MPI 计算pi
MPI 计算 pi
- 点对点通讯
#include
#include
#include "mpi.h"
double func(double xi)
{return (4.0 / (1.0 + xi*xi));
}
int main(int argc,char* argv[])
{int n=1000000000,myid,numprocs,i;double pi,h,xi,res,startTime,endTime;pi=0.0;h = 1.0/(double)n;res = 0.0;MPI_Init(&argc,&argv);MPI_Status status;MPI_Comm_rank(MPI_COMM_WORLD,&myid);MPI_Comm_size(MPI_COMM_WORLD,&numprocs);MPI_Bcast(&n,1,MPI_INT,0,MPI_COMM_WORLD);startTime=MPI_Wtime();if(myid!=0){for(int i=myid;i<=n;i+=(numprocs-1)){xi = h * ((double)i - 0.5);res += func(xi);}res = h * res;MPI_Send(&res, 1, MPI_DOUBLE, 0, 99, MPI_COMM_WORLD);
}
else{for(i=1;i<numprocs;i++)
{MPI_Recv(&res,1,MPI_DOUBLE,i,99,MPI_COMM_WORLD,&status);pi=pi+res;}endTime = MPI_Wtime();printf("\nPI is %f\nTime is : %f\n",pi,endTime - startTime);}
MPI_Finalize();return 0;
}
2.brocast
#include "mpi.h"
#include
#include
double f( double );
double f( double a )
{return (4.0 / (1.0 + a*a));
}
int main( int argc, char *argv[])
{ int done = 0, n, myid, numprocs, i;double PI25DT = 3.141592653589793238462643;double mypi, pi, h, sum, x;double startwtime = 0.0, endwtime;int namelen;char processor_name[MPI_MAX_PROCESSOR_NAME];MPI_Init(&argc,&argv);MPI_Comm_size(MPI_COMM_WORLD,&numprocs);MPI_Comm_rank(MPI_COMM_WORLD,&myid);MPI_Get_processor_name(processor_name,&namelen);fprintf(stderr,"Process %d on %s\n", myid, processor_name);n = 1000000000;while (!done){if (myid == 0){startwtime = MPI_Wtime();}MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);if (n == 0)done = 1;else {h = 1.0 / (double) n;sum = 0.0;for (i = myid + 1; i <= n; i += numprocs){x = h * ((double)i - 0.5);sum += f(x);}mypi = h * sum;MPI_Reduce(&mypi, &pi, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);done=1; }}
if (myid == 0) {printf("pi is approximately %.16f, Error is %.16f\n", pi, fabs(pi - PI25DT));endwtime = MPI_Wtime();printf("wall clock time = %f\n", endwtime-startwtime);
}MPI_Finalize();return 0;
}
3.gather
#include
#include"mpi.h"
#include
#include
double func(double xi)
{return (4.0 / (1.0 + xi*xi));
}
int main(int argc,char *argv[])
{int n=1000000000,myid,numprocs,root,i,sendnum;double pi,h,xi,res,startTime,endTime,*recvbuf;MPI_Init(&argc,&argv);MPI_Comm_rank(MPI_COMM_WORLD,&myid);MPI_Comm_size(MPI_COMM_WORLD,&numprocs);MPI_Bcast(&n,1,MPI_INT,0,MPI_COMM_WORLD);root = 0;pi=0.0;h = 1.0/(double)n;res = 0.0;sendnum = numprocs;startTime=MPI_Wtime();for(int i=myid;i<n;i+=numprocs){xi = h * ((double)i -0.5);res += func(xi);}res = h * res;if(myid==root)
{ recvbuf = (double *)malloc( sendnum * sizeof(double));}MPI_Gather(&res,1,MPI_DOUBLE,recvbuf,1,MPI_DOUBLE,root,MPI_COMM_WORLD);MPI_Barrier(MPI_COMM_WORLD);if (myid==root){for(i=0;i<numprocs;i++){
pi=pi+recvbuf[i];}free(recvbuf);printf("pi is approximately %f\n", pi);endTime = MPI_Wtime();printf("wall clock time = %f\n", endTime-startTime);}MPI_Finalize();return 0;
}
4.reduce
#include
#include
#include "mpi.h"
double func(double xi)
{return (4.0 / (1.0 + xi*xi));
}
int main(int argc,char* argv[])
{int n=1000000000,myid,numprocs;double pi,h,xi,res,startTime,endTime;MPI_Init(&argc,&argv);MPI_Comm_rank(MPI_COMM_WORLD,&myid);MPI_Comm_size(MPI_COMM_WORLD,&numprocs);MPI_Bcast(&n,1,MPI_INT,0,MPI_COMM_WORLD);if(myid==0){startTime=MPI_Wtime();}h = 1.0/(double)n;res = 0.0;for(int i=myid;i<n;i+=numprocs){xi = h * ((double)i - 0.5);res += func(xi);}res = h * res;MPI_Reduce(&res,&pi,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);if(myid==0){endTime = MPI_Wtime();printf("\nPI is %f\nTime is : %f\n",pi,endTime - startTime);}MPI_Finalize();return 0;
}
本文来自互联网用户投稿,文章观点仅代表作者本人,不代表本站立场,不承担相关法律责任。如若转载,请注明出处。 如若内容造成侵权/违法违规/事实不符,请点击【内容举报】进行投诉反馈!
