Skip to content

Commit b7b1893

Browse files
authored
Merge pull request #22 from aobolensk/03-collectives
2 parents 10722cd + 233016e commit b7b1893

File tree

2 files changed

+95
-7
lines changed

2 files changed

+95
-7
lines changed

03-mpi-api/03-mpi-api.tex

+95-4
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,7 @@ \section{Advanced Send/Receive API}
9797

9898
\begin{frame}{\texttt{MPI\_Irecv}}
9999
Non-Blocking Receive function. Initiates a receive operation that returns immediately.
100+
100101
\texttt{int MPI\_Irecv(void *buf, int count, MPI\_Datatype datatype, int source, int tag, MPI\_Comm comm, MPI\_Request *request);}
101102

102103
Parameters:
@@ -150,27 +151,113 @@ \section{Synchronization}
150151
\section{Collective operations}
151152

152153
\begin{frame}{Collective operations}
154+
Operations involving all processes within a communicator.
155+
156+
Characteristics:
157+
\begin{itemize}
158+
\item Implicit synchronization among processes.
159+
\item Cannot be initiated between subsets unless a new communicator is created.
160+
\end{itemize}
161+
162+
Examples:
163+
\begin{itemize}
164+
\item Data movement operations (e.g., \texttt{MPI\_Bcast}, \texttt{MPI\_Gather}).
165+
\item Reduction operations (e.g., \texttt{MPI\_Reduce}, \texttt{MPI\_Allreduce}).
166+
\end{itemize}
167+
168+
Benefits (why use them instead of send/recv?):
169+
\begin{itemize}
170+
\item Optimized for underlying hardware and common user scenarios.
171+
\item Simplifies code and improves readability.
172+
\end{itemize}
173+
\end{frame}
174+
175+
\begin{frame}{Broadcast (\texttt{MPI\_Bcast})}
176+
Send data from one process to all other processes.
177+
178+
\texttt{int MPI\_Bcast(void *buffer, int count, MPI\_Datatype datatype, int root, MPI\_Comm comm);}
179+
180+
Parameters:
181+
\begin{itemize}
182+
\item buffer: Starting address of buffer.
183+
\item count: Number of entries in buffer.
184+
\item datatype: Data type of buffer elements.
185+
\item root: Rank of broadcast root.
186+
\item comm: Communicator.
187+
\end{itemize}
153188
\end{frame}
154189

155-
\begin{frame}{Broadcast}
190+
\begin{frame}{Reduction}
191+
Perform a global reduction operation (e.g., sum, max) across all processes. Calculate the total sum of values distributed across processes.
192+
193+
Can be seen as the opposite operation to broadcast.
194+
195+
\texttt{int MPI_Reduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm);}
196+
197+
Supported operations:
198+
\begin{itemize}
199+
\item \texttt{MPI\_SUM}
200+
\item \texttt{MPI\_PROD}
201+
\item \texttt{MPI\_MAX}
202+
\item \texttt{MPI\_MIN}
203+
\end{itemize}
156204
\end{frame}
157205

158206
\begin{frame}{\texttt{MPI\_Gather}}
207+
Collect data from all processes to a single root process.
208+
209+
\texttt{int MPI\_Gather(const void *sendbuf, int sendcount, MPI\_Datatype sendtype, void *recvbuf, int recvcount, MPI\_Datatype recvtype, int root, MPI\_Comm comm);}
210+
211+
Parameters:
212+
\begin{itemize}
213+
\item sendbuf: Starting address of send buffer.
214+
\item recvbuf: Starting address of receive buffer (significant only at root).
215+
\end{itemize}
159216
\end{frame}
160217

161218
\begin{frame}{\texttt{MPI\_Scatter}}
219+
Distribute distinct chunks of data from root to all processes.
220+
221+
\texttt{int MPI\_Scatter(const void *sendbuf, int sendcount, MPI\_Datatype sendtype, void *recvbuf, int recvcount, MPI\_Datatype recvtype, int root, MPI\_Comm comm);}
222+
223+
Parameters:
224+
\begin{itemize}
225+
\item \texttt{sendbuf}: Starting address of send buffer (significant only at root).
226+
\item \texttt{recvbuf}: Starting address of receive buffer.
227+
\end{itemize}
162228
\end{frame}
163229

164230
\begin{frame}{\texttt{MPI\_AllGather}}
165-
\end{frame}
231+
Gather data from all processes and distributes the combined data to all processes.
232+
233+
\texttt{int MPI\_Allgather(const void *sendbuf, int sendcount, MPI\_Datatype sendtype, void *recvbuf, int recvcount, MPI\_Datatype recvtype, MPI\_Comm comm);}
166234

167-
\begin{frame}{All-to-All}
235+
Usage of this function reduces the need for separate gather and broadcast operations.
168236
\end{frame}
169237

170-
\begin{frame}{Reduction}
238+
\begin{frame}{All-to-All (\texttt{MPI\_Alltoall})}
239+
Description: Each process sends data to and receives data from all other processes. It can be seen as transposing a matrix distributed across processes.
240+
241+
\texttt{int MPI\_Alltoall(const void *sendbuf, int sendcount, MPI\_Datatype sendtype, void *recvbuf, int recvcount, MPI\_Datatype recvtype, MPI\_Comm comm);}
242+
243+
Note: This operation is communication-intensive.
171244
\end{frame}
172245

173246
\begin{frame}{All API have not blocking versions}
247+
Non-Blocking collectives operations allow overlapping communication with computation.
248+
249+
Examples:
250+
\begin{itemize}
251+
\item \texttt{MPI\_Ibcast}: Non-blocking broadcast.
252+
\item \texttt{MPI\_Ireduce}: Non-blocking reduction.
253+
\item \texttt{MPI\_Iallgather}: Non-blocking all-gather.
254+
\end{itemize}
255+
256+
\texttt{int MPI\_Ibcast(void *buffer, int count, MPI\_Datatype datatype, int root, MPI\_Comm comm, MPI\_Request *request);}
257+
258+
\texttt{int MPI\_Ireduce(const void *sendbuf, void *recvbuf, int count, MPI\_Datatype datatype, MPI\_Op op, int root, MPI\_Comm comm, MPI\_Request *request);}
259+
260+
Usage flow is the same as for \texttt{MPI\_Isend}/\texttt{MPI\_Irecv}: Initiate the operation and later wait for its completion using \texttt{MPI\_Wait} or \texttt{MPI\_Test}.
174261
\end{frame}
175262

176263
\begin{frame}
@@ -179,6 +266,10 @@ \section{Collective operations}
179266
\end{frame}
180267

181268
\begin{frame}{References}
269+
\begin{enumerate}
270+
\item MPI Standard \href{https://www.mpi-forum.org/docs/}{https://www.mpi-forum.org/docs/}
271+
\item Open MPI v4.0.7 documentation: \href{https://www.open-mpi.org/doc/v4.0/}{https://www.open-mpi.org/doc/v4.0/}
272+
\end{enumerate}
182273
\end{frame}
183274

184275
\end{document}

03-mpi-api/03-mpi-api.toc

-3
This file was deleted.

0 commit comments

Comments
 (0)