Documenti di Didattica
Documenti di Professioni
Documenti di Cultura
• What is MPI
• MPI Forum
• Goals and Scope of MPI
• MPI on OSC Parallel Platforms
DATA
MPI_COMM_WORLD
5
1 2
3
0
4
C:
#include <mpi.h>
Fortran:
include ‘mpif.h’
C:
error = MPI_Xxxxx(parameter,...);
MPI_Xxxxx(parameter,...);
Fortran:
CALL MPI_XXXXX(parameter,...,IERROR)
C:
int MPI_Init(int *argc, char ***argv)
Fortran:
CALL MPI_INIT(IERROR)
INTEGER IERROR
C:
MPI_Comm_size(MPI_Comm comm, int *size)
Fortran:
CALL MPI_COMM_SIZE(COMM, SIZE, IERROR)
C:
MPI_Comm_rank(MPI_Comm comm, int *rank)
Fortran:
CALL MPI_COMM_RANK(COMM, RANK, IERROR)
C:
MPI_Finalize()
Fortran:
CALL MPI_FINALIZE(IERROR)
MPI_Finalize ();
}
CALL MPI_FINALIZE(ierror)
END
$ mpicc myprog.c
$ mpif90 myprog.f
• To run:
• Messages
• MPI Basic Datatypes - C
• MPI Basic Datatypes - Fortran
• Rules and Rationale
• MPI Datatypes:
– Basic types
– Derived types
MPI_INTEGER INTEGER
MPI_REAL REAL
MPI_COMPLEX COMPLEX
MPI_LOGICAL LOGICAL
MPI_CHARACTER CHARACTER(1)
MPI_BYTE
MPI_PACKED
• Definitions • Wildcarding
• Communication Modes • Communication Envelope
• Routine Names (blocking) • Received Message Count
• Sending a Message • Message Order Preservation
• Memory Mapping • Sample Programs
• Synchronous Send • Timers
• Buffered Send • Class Exercise: Processor Ring
• Standard Send • Extra Exercise 1: Ping Pong
• Ready Send • Extra Exercise 2: Broadcast
• Receiving a Message
C:
int MPI_Send(void *buf, int count, MPI_Datatype datatype,
int dest, int tag, MPI_Comm comm)
Fortran:
CALL MPI_SEND(BUF, COUNT, DATATYPE, DEST, TAG, COMM, IERROR)
<type> BUF(*)
INTEGER COUNT, DATATYPE, DEST, TAG
INTEGER COMM, IERROR
MPI_SEND(data,500,MPI_REAL,6,33,MPI_COMM_WORLD,IERROR)
2,1
The Fortran 2-D array
1,1 1,2 1,3 3,1
1,2
2,1 2,2 2,3
Is stored in memory 2,2
1,3
2,3
3,3
C:
int MPI_Recv(void *buf, int count, MPI_Datatype datatype, \
int source, int tag, MPI_Comm comm, MPI_Status *status)
Fortran:
CALL MPI_RECV(BUF, COUNT, DATATYPE, SOURCE, TAG, COMM,
STATUS, IERROR)
<type> BUF(*)
INTEGER COUNT, DATATYPE, DEST, TAG
INTEGER COMM, STATUS(MPI_STATUS_SIZE), IERROR
MPI_ANY_SOURCE
MPI_ANY_TAG
Sender’s Address
Destination
Address Data
Item 1
Item 2
Item 3
• Information includes:
– Source: status.MPI_SOURCE or status(MPI_SOURCE)
– Tag: status.MPI_TAG or status(MPI_TAG)
– Count: MPI_Get_count or MPI_GET_COUNT
C:
int MPI_Get_count (MPI_Status *status,
MPI_Datatype datatype, int *count)
Fortran:
CALL MPI_GET_COUNT(STATUS,DATATYPE,COUNT,IERROR)
communicator 2
5
1
3
4
0
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
if(rank==1) {
for(i=0;i<100;++i) data[i]=i;
MPI_Send(data,100,MPI_FLOAT,0,55,MPI_COMM_WORLD);
} else {
MPI_Recv(value,200,MPI_FLOAT,MPI_ANY_SOURCE,55,MPI_COMM_WORLD,&status);
printf("P:%d Got data from processor %d \n",rank, status.MPI_SOURCE);
MPI_Get_count(&status,MPI_FLOAT,&count);
printf("P:%d Got %d elements \n",rank,count);
printf("P:%d value[5]=%f \n",rank,value[5]);
}
MPI_Finalize();
}
C:
double MPI_Wtime(void);
Fortran:
DOUBLE PRECISION MPI_WTIME()
2
5 in
1
3
4
out
0
communicator 2
5
1 in
3
4
out
0
Fortran:
CALL MPI_ISSEND(BUF,COUNT,DATATYPE,DEST,TAG,COMM,REQUEST,IERROR)
<type> BUF(*)
INTEGER COUNT,DATATYPE,DEST,TAG,COMM
INTEGER REQUEST,IERROR
Fortran:
CALL MPI_IRECV(BUF,COUNT,DATATYPE,SOURCE,TAG,COMM,REQUEST,IERROR)
<type> BUF(*)
INTEGER COUNT,DATATYPE,SOURCE,TAG,COMM
INTEGER REQUEST,IERROR
Fortran:
CALL MPI_WAIT(REQUEST,STATUS,IERR)
INTEGER REQUEST,STATUS(MPI_STATUS_SIZE),IERR
CALL MPI_TEST(REQUEST,FLAG,STATUS,IERR)
LOGICAL FLAG
INTEGER REQUEST,STATUS(MPI_STATUS_SIZE),IERR
process in
in
in
… …
Basic datatype n-1 Displacement of datatype n-1
C:
int MPI_Type_contiguous (int count,
MPI_datatype oldtype,MPI_Datatype *newtype)
Fortran:
CALL MPI_TYPE_CONTIGUOUS(COUNT,OLDTYPE,NEWTYPE,IERROR)
C:
int MPI_Type_vector(int count, int blocklength, int stride,
MPI_Datatype oldtype, MPI_Datatype *newtype)
Fortran:
CALL MPI_TYPE_VECTOR(COUNT, BLOCKLENGTH, STRIDE,
OLDTYPE, NEWTYPE, IERROR)
oldtype
5 element stride
between blocks
newtype
2 blocks
• count = 2
• stride = 5
• blocklength = 3
C:
MPI_Type_extent (MPI_Datatype datatype, MPI_Aint* extent)
Fortran:
CALL MPI_TYPE_EXTENT (DATATYPE, EXTENT, IERROR)
Fortran:
CALL MPI_TYPE_STRUCT(COUNT,ARRAY_OF_BLOCKLENGTHS,
ARRAY_OF_DISPLACEMENTS,ARRAY_OF_TYPES,
NEWTYPE,IERROR)
MPI_DOUBLE
Block 0 Block 1
newtype
array_of_displacements[1]
array_of_displacements[0]
• count = 2
• array_of_blocklengths = {1,3}
• array_of_types = {MPI_INT, MPI_DOUBLE}
• array_of_displacements = {0, extent(MPI_INT)}
Program output
C:
int MPI_Type_commit (MPI_Datatype *datatype)
Fortran:
CALL MPI_TYPE_COMMIT (DATATYPE, IERROR)
• Examples:
– Broadcast, scatter, gather (Data Distribution)
– Global sum, global maximum, etc. (Collective Operations)
– Barrier synchronization
• No tags
C:
int MPI_Barrier (MPI_Comm comm)
Fortran:
CALL MPI_BARRIER (COMM,IERROR)
INTEGER COMM,IERROR
• C:
int MPI_Bcast (void *buffer, int, count,
MPI_Datatype datatype,int root, MPI_Comm comm)
• Fortran:
MPI_BCAST(BUFFER, COUNT, DATATYPE, ROOT, COMM IERROR)
Program Output
P:0 after broadcast parameter is 23.000000
P:6 after broadcast parameter is 23.000000
P:5 after broadcast parameter is 23.000000
P:2 after broadcast parameter is 23.000000
P:3 after broadcast parameter is 23.000000
P:7 after broadcast parameter is 23.000000
P:1 after broadcast parameter is 23.000000
P:4 after broadcast parameter is 23.000000
Program Output
P:1 after broadcast parameter is 23.
P:3 after broadcast parameter is 23.
P:4 after broadcast parameter is 23
P:0 after broadcast parameter is 23
P:5 after broadcast parameter is 23.
P:6 after broadcast parameter is 23.
P:7 after broadcast parameter is 23.
P:2 after broadcast parameter is 23.
C:
int MPI_Scatter(void* sendbuf, int sendcount,
MPI_Datatype sendtype, void* recvbuf, int recvcount,
MPI_Datatype recvtype, int root, MPI_Comm comm)
Fortran:
CALL MPI_SCATTER(SENDBUF, SENDCOUNT, SENDTYPE, RECVBUF,
RECVCOUNT, RECVTYPE, ROOT, COMM, IERROR)
<type> SENDBUF(*), RECVBUF(*)
ABCDE
ABCDE
A B C D E
Program Output
P:0 mine is 23.000000
P:1 mine is 24.000000
P:2 mine is 25.000000
P:3 mine is 26.000000
Program Output
P:1 mine is 25.
P:3 mine is 27.
P:0 mine is 24.
P:2 mine is 26.
A B C D E
ABCDE
A B C D E
• MPI_Alltoall
C:
int MPI_Barrier (MPI_Comm comm)
Fortran:
CALL MPI_BARRIER (COMM,IERROR)
INTEGER COMM,IERROR
C:
int MPI_Reduce(void* sendbuf, void* recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, int root,
MPI_Comm comm)
Fortran:
CALL MPI_REDUCE(SENDBUF,RECVBUF,COUNT,DATATYPE,OP,ROOT,COMM,IERROR)
<type> SENDBUF(*), RECVBUF(*)
Fortran:
MPI_2REAL, MPI_2DOUBLEPRECISION, MPI_2INTEGER
struct {
double value;
Program Output
int rank;
P:7 max=16.000000 at rank 15
} in, out;
P:7 max=1.000000 at rank 0
int root;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
in.value=rank+1;
in.rank=rank;
root=7;
MPI_Reduce(&in,&out,1,MPI_DOUBLE_INT,MPI_MAXLOC,root,MPI_COMM_WORLD);
if(rank==root) printf("PE:%d max=%lf at rank %d\n",rank,out.value,out.rank);
MPI_Reduce(&in,&out,1,MPI_DOUBLE_INT,MPI_MINLOC,root,MPI_COMM_WORLD);
if(rank==root) printf("PE:%d min=%lf at rank %d\n",rank,out.value,out.rank);
MPI_Finalize();
}
call MPI_REDUCE(in,out,1,MPI_2INTEGER,MPI_MINLOC,
& 2,MPI_COMM_WORLD,err)
CALL MPI_FINALIZE(err)
END
<type> INVEC(LEN),INOUTVEC(LEN)
INTEGER LEN,DATATYPE
C:
int MPI_Op_create (MPI_User_function *function, int commute,
MPI_Op *op)
Fortran:
MPI_OP_CREATE (FUNC, COMMUTE, OP, IERROR)
EXTERNAL FUNC
INTEGER OP,IERROR
LOGICAL COMMUTE
MPI_Type_contiguous(2,MPI_DOUBLE,&ctype);
MPI_Type_commit(&ctype);
MPI_Op_create(cprod,TRUE,&myop);
root=2;
source.real=rank+1;
source.imag=rank+2;
MPI_Reduce(&source,&result,1,ctype,myop,root,MPI_COMM_WORLD);
if(rank==root) printf ("PE:%d result is %lf + %lfi\n",rank, result.real, result.imag);
MPI_Finalize();
}
--------------------------------------------
• Graph topologies
– General graphs
C:
int MPI_Cart_create (MPI_Comm comm_old, int ndims,
int *dims, int *periods, int reorder,
MPI_Comm *comm_cart)
Fortran:
CALL MPI_CART_CREATE(COMM_OLD,NDIMS,DIMS,PERIODS,REORDER,
COMM_CART,IERROR)
INTEGER COMM_OLD,NDIMS,DIMS(*),COMM_CART,IERROR
LOGICAL PERIODS(*),REORDER
MPI_Comm vu;
int dim[2], period[2], reorder;
dim[0]=4; dim[1]=3;
period[0]=TRUE; period[1]=FALSE;
reorder=TRUE;
MPI_Cart_create(MPI_COMM_WORLD,2,dim,period,reorder,&vu);
C:
int MPI_Cart_rank (MPI_Comm comm, init *coords, int *rank)
Fortran:
CALL MPI_CART_RANK(COMM,COORDS,RANK,IERROR)
INTEGER COMM,COORDS(*),RANK,IERROR
C:
int MPI_Cart_coords (MPI_Comm comm, int rank, int maxdims,
int *coords)
Fortran:
CALL MPI_CART_COORDS(COMM,RANK,MAXDIMS,COORDS,IERROR)
INTEGER COMM,RANK,MAXDIMS,COORDS(*),IERROR
C:
int MPI_Cart_shift (MPI_Comm comm, int direction, int disp,
int *rank_source, int *rank_dest)
Fortran:
CALL MPI_CART_SHIFT(COMM,DIRECTION,DISP,RANK_SOURCE,
RANK_DEST,IERROR)
INTEGER COMM,DIRECTION,DISP,RANK_SOURCE,RANK_DEST,IERROR
Fortran:
CALL MPI_CART_SUB(COMM,REMAIN_DIMS,NEWCOMM,IERROR)
INTEGER COMM,NEWCOMM,IERROR
LOGICAL REMAIN_DIMS(*)