• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ MPI_Irecv函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中MPI_Irecv函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Irecv函数的具体用法?C++ MPI_Irecv怎么用?C++ MPI_Irecv使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了MPI_Irecv函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: main

int main (int argc, char *argv[])
{

  MPI_Init (&argc, &argv);
  
  int nProc, iProc;

  MPI_Comm_rank (MPI_COMM_WORLD, &iProc);
  MPI_Comm_size (MPI_COMM_WORLD, &nProc);

  // number of threads
  const int NTHREADS = 1;

  // number of buffers
  const int NWAY     = 2;

  // left neighbour
  const int left  = LEFT(iProc, nProc);

  // right neighbour
  const int right = RIGHT(iProc, nProc);

  // allocate array of for local vector, left halo and right halo
  double* array = malloc (NWAY * (NTHREADS+2) * 2 * VLEN * sizeof (double));
  ASSERT (array != 0);

  // initial buffer id
  int buffer_id = 0;

  // initialize data
  data_init (NTHREADS, iProc, buffer_id, array);

  MPI_Barrier (MPI_COMM_WORLD);

  double time = -now();

  for (int k = 0; k < NITER; ++k)
  {
    for (int i = 0; i < nProc; ++i)
    {
      MPI_Request send_req[2];
      MPI_Request recv_req[2];
      int slice_id    = 1;
      int left_halo   = 0;
      int right_halo  = 2;
	
      // post recv
      MPI_Irecv ( &array_ELEM_right (buffer_id, left_halo, 0), VLEN, MPI_DOUBLE
		  , left, i, MPI_COMM_WORLD, &recv_req[0]);

      MPI_Irecv ( &array_ELEM_left (buffer_id, right_halo, 0), VLEN, MPI_DOUBLE
		  , right, i, MPI_COMM_WORLD, &recv_req[1]);

      // issue send
      MPI_Isend ( &array_ELEM_right (buffer_id, right_halo - 1, 0), VLEN, MPI_DOUBLE
		  , right, i, MPI_COMM_WORLD, &send_req[0]);

      MPI_Isend ( &array_ELEM_left (buffer_id, left_halo + 1, 0), VLEN, MPI_DOUBLE
		  , left, i, MPI_COMM_WORLD, &send_req[1]);

      // wait for recv
      MPI_Waitall (2, recv_req, MPI_STATUSES_IGNORE);

      // compute data, read from id "buffer_id", write to id "1 - buffer_id"
      data_compute (NTHREADS, array, 1 - buffer_id, buffer_id, slice_id);

      // wait for send
      MPI_Waitall (2, send_req, MPI_STATUSES_IGNORE);

      // alternate the buffer
      buffer_id = 1 - buffer_id;

    }
  }

  time += now();

  data_verify (NTHREADS, iProc, (NITER * nProc) % NWAY, array);

  printf ("# mpi %s nProc %d vlen %i niter %d nthreads %i nway %i time %g\n"
         , argv[0], nProc, VLEN, NITER, NTHREADS, NWAY, time
         );
  
  MPI_Finalize();

  free (array);

  return EXIT_SUCCESS;
}
开发者ID:LenaO,项目名称:GPI-2,代码行数:89,代码来源:left_right_double_buffer.c


示例2: MPI_Request

 void peano::applications::poisson::multigrid::records::RegularGridCellPacked::receive(int source, int tag) {
    MPI_Request* sendRequestHandle = new MPI_Request();
    MPI_Status   status;
    int          flag = 0;
    int          result;
    
    clock_t      timeOutWarning   = -1;
    clock_t      timeOutShutdown  = -1;
    bool         triggeredTimeoutWarning = false;
    
    result = MPI_Irecv(
       this, 1, Datatype, source, tag,
       tarch::parallel::Node::getInstance().getCommunicator(), sendRequestHandle
    );
    if ( result != MPI_SUCCESS ) {
       std::ostringstream msg;
       msg << "failed to start to receive peano::applications::poisson::multigrid::records::RegularGridCellPacked from node "
       << source << ": " << tarch::parallel::MPIReturnValueToString(result);
       _log.error( "receive(int)", msg.str() );
    }
    
    result = MPI_Test( sendRequestHandle, &flag, &status );
    while (!flag) {
       if (timeOutWarning==-1)   timeOutWarning   = tarch::parallel::Node::getInstance().getDeadlockWarningTimeStamp();
       if (timeOutShutdown==-1)  timeOutShutdown  = tarch::parallel::Node::getInstance().getDeadlockTimeOutTimeStamp();
       result = MPI_Test( sendRequestHandle, &flag, &status );
       if (result!=MPI_SUCCESS) {
          std::ostringstream msg;
          msg << "testing for finished receive task for peano::applications::poisson::multigrid::records::RegularGridCellPacked failed: "
          << tarch::parallel::MPIReturnValueToString(result);
          _log.error("receive(int)", msg.str() );
       }
       
       // deadlock aspect
       if (
          tarch::parallel::Node::getInstance().isTimeOutWarningEnabled() &&
          (clock()>timeOutWarning) &&
          (!triggeredTimeoutWarning)
       ) {
          tarch::parallel::Node::getInstance().writeTimeOutWarning(
          "peano::applications::poisson::multigrid::records::RegularGridCellPacked",
          "receive(int)", source
          );
          triggeredTimeoutWarning = true;
       }
       if (
          tarch::parallel::Node::getInstance().isTimeOutDeadlockEnabled() &&
          (clock()>timeOutShutdown)
       ) {
          tarch::parallel::Node::getInstance().triggerDeadlockTimeOut(
          "peano::applications::poisson::multigrid::records::RegularGridCellPacked",
          "receive(int)", source
          );
       }
       tarch::parallel::Node::getInstance().receiveDanglingMessages();
    }
    
    delete sendRequestHandle;
    
    _senderRank = status.MPI_SOURCE;
    #ifdef Debug
    _log.debug("receive(int,int)", "received " + toString() ); 
    #endif
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:65,代码来源:RegularGridCell.cpp


示例3: main

int main(int argc, char** argv) {
    int rank, size;
    int arraySize, chunkSize;
    int *array, *chunk, *y;
    int localHigh, i, elements;
    double start, stop;
    MPI_Request *requests;
    MPI_Status *status;

    if (MPI_Init(&argc, &argv) != MPI_SUCCESS) {
        fprintf(stderr, "Unable to initialize MPI!\n");
        return -1;
    }
    
    if(argc != 2 || (elements = atoi(argv[1])) == 0) {
        fprintf(stdout, "No element count parameter given!\n");
        return -1;
    }

    // get rank and size from communicator
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);



    MPI_Barrier(MPI_COMM_WORLD);
    arraySize = elements - elements % size;
    chunkSize = arraySize / size;

    if (rank == 0) {
        requests = (MPI_Request*) malloc(sizeof (MPI_Request) * (size - 1));
        status = (MPI_Status*) malloc(sizeof (MPI_Status) * (size - 1));
        array = (int*) malloc(sizeof (int) * arraySize);
        init(array, arraySize);
        start = MPI_Wtime();
    }

    y = (int*) malloc(sizeof (int) * size);
    chunk = (int*) malloc(sizeof (int) * chunkSize);

    MPI_Scatter(array, chunkSize, MPI_INT,
            chunk, chunkSize, MPI_INT, 0, MPI_COMM_WORLD);

    // Compute local prefix sums
    localHigh = scanIterativeLocal(chunk, chunkSize);

    if (rank != 0) {
        MPI_Send(&localHigh, 1, MPI_INT, 0, PREFIXSUM_TAG, MPI_COMM_WORLD);
    } else {
        y[0] = localHigh;

        for (i = 1; i < size; i++) {
            MPI_Irecv(&y[i], 1, MPI_INT, i, PREFIXSUM_TAG, MPI_COMM_WORLD, &requests[i - 1]);
        }

        MPI_Waitall(size - 1, requests, status);
    }

    MPI_Bcast(y, size, MPI_INT, 0, MPI_COMM_WORLD);

    if (rank != 0) {
        sumIterativeLocal(chunk, y, rank, chunkSize);
    }

    MPI_Gather(chunk, chunkSize, MPI_INT,
            array, chunkSize, MPI_INT, 0, MPI_COMM_WORLD);

    if (rank == 0) {
        stop = MPI_Wtime();
        fprintf(stdout, "%d;%d;%f\n", size, arraySize, stop - start);
        verify(array, arraySize);
    }

    free(chunk);

    if (rank == 0) {
        free(requests);
        free(array);
    }

    MPI_Finalize();
    fflush(stdout);
    return (EXIT_SUCCESS);
}
开发者ID:beikov,项目名称:ParallelComputingC,代码行数:84,代码来源:prefixsum.c


示例4: data


//.........这里部分代码省略.........
    ierr = PetscFree(len_r1);
    CHKERRQ(ierr);
    ierr = PetscFree(id_r1);
    CHKERRQ(ierr);

    for (proc_id=0; proc_id<size; proc_id++) len_s[proc_id] = iwork[proc_id] = 0;

    ierr = PetscMalloc1(len+1,&odata1);
    CHKERRQ(ierr);
    ierr = PetscMalloc1(size,&odata2_ptr);
    CHKERRQ(ierr);
    ierr = PetscBTCreate(Mbs,&otable);
    CHKERRQ(ierr);

    len_max = ois_max*(Mbs+1); /* max space storing all is[] for each receive */
    len_est = 2*len_max;       /* estimated space of storing is[] for all receiving messages */
    ierr    = PetscMalloc1(len_est+1,&odata2);
    CHKERRQ(ierr);
    nodata2 = 0;               /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */

    odata2_ptr[nodata2] = odata2;

    len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max  */

    k = 0;
    while (k < nrqr) {
        /* Receive messages */
        ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status);
        CHKERRQ(ierr);
        if (flag) {
            ierr    = MPI_Get_count(&r_status,MPIU_INT,&len);
            CHKERRQ(ierr);
            proc_id = r_status.MPI_SOURCE;
            ierr    = MPI_Irecv(odata1,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);
            CHKERRQ(ierr);
            ierr    = MPI_Wait(&r_req,&r_status);
            CHKERRQ(ierr);

            /*  Process messages */
            /*  make sure there is enough unused space in odata2 array */
            if (len_unused < len_max) { /* allocate more space for odata2 */
                ierr = PetscMalloc1(len_est+1,&odata2);
                CHKERRQ(ierr);

                odata2_ptr[++nodata2] = odata2;

                len_unused = len_est;
            }

            ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,odata2,&otable);
            CHKERRQ(ierr);
            len  = 1 + odata2[0];
            for (i=0; i<odata2[0]; i++) len += odata2[1 + i];

            /* Send messages back */
            ierr = MPI_Isend(odata2,len,MPIU_INT,proc_id,tag2,comm,s_waits2+k);
            CHKERRQ(ierr);
            k++;
            odata2        += len;
            len_unused    -= len;
            len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */
        }
    }
    ierr = PetscFree(odata1);
    CHKERRQ(ierr);
    ierr = PetscBTDestroy(&otable);
开发者ID:petsc,项目名称:petsc,代码行数:67,代码来源:sbaijov.c


示例5: BroadCast_ComplexMatrix

void BroadCast_ComplexMatrix(MPI_Comm MPI_Curret_Comm_WD, 
                             dcomplex **Mat, int n, int *is1, int *ie1, int myid, int numprocs, 
                             MPI_Status *stat_send,
                             MPI_Request *request_send,
                             MPI_Request *request_recv)
{
  int tag=999;
  long long int i,j,ID,N;
  long long int k,k0,k1,num0,num1;
  double *Mat1;

  N = n;

  /*********************************************
     Elemements are stored from 1 to n in Mat. 
  **********************************************/

  if (numprocs!=1){

    Mat1 = (double*)malloc(sizeof(double)*(N+1)*(N+1));

    /********************************
           Real part of Mat 
    ********************************/

    for (i=is1[myid]; i<=ie1[myid]; i++){
      for (j=1; j<=N; j++){
	k = (i-1)*N + j - 1;
	Mat1[k] = Mat[i][j].r;
      }
    }

    /* receiving */

    for (ID=0; ID<numprocs; ID++){
      k1 = (is1[ID]-1)*N;
      if (k1<0) k1 = 0;  
      num1 = (ie1[ID] - is1[ID] + 1)*N;
      if (num1<0 || ID==myid) num1 = 0;
      MPI_Irecv(&Mat1[k1], num1, MPI_DOUBLE, ID, tag, MPI_Curret_Comm_WD, &request_recv[ID]);
    }

    /* sending */

    k0 = (is1[myid]-1)*N;
    if (k0<0) k0 = 0;  
    num0 = (ie1[myid] - is1[myid] + 1)*N;
    if (num0<0) num0 = 0;

    for (ID=0; ID<numprocs; ID++){
      if (ID!=myid)
        MPI_Isend(&Mat1[k0], num0, MPI_DOUBLE, ID, tag, MPI_Curret_Comm_WD, &request_send[ID]);
      else 
        MPI_Isend(&Mat1[k0], 0,    MPI_DOUBLE, ID, tag, MPI_Curret_Comm_WD, &request_send[ID]);
    }

    /* waitall */

    MPI_Waitall(numprocs,request_recv,stat_send);
    MPI_Waitall(numprocs,request_send,stat_send);

    for (ID=0; ID<numprocs; ID++){
      for (i=is1[ID]; i<=ie1[ID]; i++){
	for (j=1; j<=N; j++){
	  k = (i-1)*N + j - 1;
	  Mat[i][j].r = Mat1[k];
	}
      }
    }

    /********************************
          Imaginary part of Mat 
    ********************************/

    for (i=is1[myid]; i<=ie1[myid]; i++){
      for (j=1; j<=N; j++){
	k = (i-1)*N + j - 1;
	Mat1[k] = Mat[i][j].i;
      }
    }

    /* receiving */

    for (ID=0; ID<numprocs; ID++){
      k1 = (is1[ID]-1)*N;
      if (k1<0) k1 = 0;  
      num1 = (ie1[ID] - is1[ID] + 1)*N;
      if (num1<0 || ID==myid) num1 = 0;
      MPI_Irecv(&Mat1[k1], num1, MPI_DOUBLE, ID, tag, MPI_Curret_Comm_WD, &request_recv[ID]);
    }

    /* sending */

    k0 = (is1[myid]-1)*N;
    if (k0<0) k0 = 0;  
    num0 = (ie1[myid] - is1[myid] + 1)*N;
    if (num0<0) num0 = 0;

    for (ID=0; ID<numprocs; ID++){
      if (ID!=myid)
//.........这里部分代码省略.........
开发者ID:RafaelDexter,项目名称:OpenMX,代码行数:101,代码来源:BroadCast_ComplexMatrix.c


示例6: master_main


//.........这里部分代码省略.........
							if(workers[i]->output)
								free(workers[i]->output);
							workers[i]->output = op->output_buffer;
							op->output_buffer = NULL;
							workers[i]->output_length = op->output_length;
							workers[i]->result = op->result;
							if(op->result < 0) {
								workers[i]->status = MPI_QUEUE_JOB_FAILED | op->type;
								op->type = MPI_QUEUE_OP_CLOSE;
								list_push_head(workers[i]->operations, op);
								op = NULL;
							}
						}
						if(op) {
							if(op->buffer)
								free(op->buffer);
							if(op->output_buffer)
								free(op->output_buffer);
							free(op);
						}
					}
				}
			}
			
			if( workers[i]->status != MPI_QUEUE_JOB_BUSY && list_size(workers[i]->operations)) {
				op = list_peek_head(workers[i]->operations);
				
				if(op->type == MPI_QUEUE_OP_CLOSE) {
					itable_remove(active_jobs, workers[i]->jobid);
					list_push_tail(complete_jobs, workers[i]);
					if(!(workers[i]->status & MPI_QUEUE_JOB_FAILED))
						workers[i]->status = MPI_QUEUE_JOB_COMPLETE;
					workers[i] = NULL;
					i--;
					continue;
				}
				
				MPI_Send(op, sizeof(*op), MPI_BYTE, workers[i]->worker_rank, 0, MPI_COMM_WORLD);
				if(op->buffer_length) {
					MPI_Send(op->buffer, op->buffer_length, MPI_BYTE, workers[i]->worker_rank, 0, MPI_COMM_WORLD);
					free(op->buffer);
					op->buffer_length = 0;
					op->buffer = NULL;
				}
				MPI_Irecv(op, sizeof(*op), MPI_BYTE, workers[i]->worker_rank, 0, MPI_COMM_WORLD, &workers[i]->request);
				workers[i]->status = MPI_QUEUE_JOB_BUSY;
			}
		}
	}


	/** Clean up waiting & complete jobs, send Exit commands to each worker */
	if(!master) {
		// If the master link hasn't been set up yet
		// the workers will be waiting for the working directory
		char line[MPI_QUEUE_LINE_MAX];
		memset(line, 0, MPI_QUEUE_LINE_MAX);
		MPI_Bcast(line, MPI_QUEUE_LINE_MAX, MPI_CHAR, 0, MPI_COMM_WORLD);
	} else {
		link_close(master);
	}

	for(i = 1; i < num_workers; i++) {
		struct mpi_queue_operation *op, close;
		memset(&close, 0, sizeof(close));
		close.type = MPI_QUEUE_OP_EXIT;
		
		if(workers[i]) {
			if(workers[i]->status == MPI_QUEUE_JOB_BUSY) {
				MPI_Wait(&workers[i]->request, &workers[i]->mpi_status);
				op = list_peek_head(workers[i]->operations);
				
				if(op->output_length) {
					op->output_buffer = malloc(op->output_length);
					MPI_Recv(op->output_buffer, op->output_length, MPI_BYTE, workers[i]->worker_rank, 0, MPI_COMM_WORLD, &workers[i]->mpi_status);
				}
			}
			itable_remove(active_jobs, workers[i]->jobid);
			list_push_tail(complete_jobs, workers[i]);
		}
		MPI_Send(&close, sizeof(close), MPI_BYTE, i, 0, MPI_COMM_WORLD);
	}

	itable_firstkey(waiting_jobs);
	while(itable_size(waiting_jobs)) {
		struct mpi_queue_job *job;
		UINT64_T jobid;

		itable_nextkey(waiting_jobs, &jobid, (void **)&job);
		itable_remove(waiting_jobs, jobid);
		list_push_tail(complete_jobs, job);
	}

	while(list_size(complete_jobs)) {
		mpi_queue_job_delete(list_pop_head(complete_jobs));
	}

	MPI_Finalize();
	return abort_flag;
}
开发者ID:liblit,项目名称:Murphy,代码行数:101,代码来源:mpi_queue_worker.c


示例7: main


//.........这里部分代码省略.........
			size_t sny = subdomain->grid[0].ny, sns = subdomain->grid[0].ns;
			size_t sby = subdomain->grid[0].by, sbs = subdomain->grid[0].bs;
			size_t sey = subdomain->grid[0].ey, ses = subdomain->grid[0].es;

			size_t soffset = sbx + (sbx + snx + sex) *
				(sby + sbs * (sby + sny + sey));

			struct grid_domain_t obuf;
			memset(&obuf, 0, sizeof(struct grid_domain_t));
			obuf.arrays = subdomain->arrays + 1;
			obuf.narrays = 1;
			obuf.offset = 0;
			obuf.grid[0].nx = dnx;
			obuf.grid[0].ny = dny;
			obuf.grid[0].ns = dns;
			obuf.grid->size = dnx * dny * dns;
		
			struct grid_domain_t scpy = *subdomain;
			scpy.arrays = subdomain->arrays + 2;
			scpy.narrays = 1;
			scpy.offset = soffset;
			scpy.grid[0].nx = sbx + snx + sex;
			scpy.grid[0].ny = sby + sny + sey;
			scpy.grid[0].ns = sbs + sns + ses;
			
			// Copy data to the temporary buffer.
			grid_subcpy(dnx, dny, dns, &obuf, &scpy);

			// Exchange temporary buffers with the subdomain neighbour.
			int subdomain_rank = grid_rank1d(subdomain->parent->parent, subdomain->parent->grid);
			int neighbor_rank = grid_rank1d(neighbor->parent->parent, neighbor->parent->grid);
			MPI_SAFE_CALL(MPI_Isend(subdomain->arrays[1], obuf.grid->size,
				MPI_BYTE, neighbor_rank, 0, MPI_COMM_WORLD, &reqs[2 * i]));
			MPI_SAFE_CALL(MPI_Irecv(subdomain->arrays[0], obuf.grid->size,
				MPI_BYTE, neighbor_rank, 0, MPI_COMM_WORLD, &reqs[2 * i + 1]));
#ifdef VERBOSE
			printf("sharing: send %d->%d\n", subdomain_rank, neighbor_rank);
			printf("sharing: recv %d->%d\n", neighbor_rank, subdomain_rank);
#endif
		}
#endif // MPI
		// Compute inner grid points of the subdomain.
		int nx = t->cpu.grid->bx + t->cpu.grid->nx + t->cpu.grid->ex;
		int ny = t->cpu.grid->by + t->cpu.grid->ny + t->cpu.grid->ey;
		int ns = t->cpu.grid->bs + t->cpu.grid->ns + t->cpu.grid->es;

		if (cpu)
		{
			isum13pt_cpu(nx, ny, ns,
				(integer(*)[ny][nx])t->cpu.arrays[0],
				(integer(*)[ny][nx])t->cpu.arrays[1],
				(integer(*)[ny][nx])t->cpu.arrays[2]);	
		}
#ifdef CUDA
		if (gpu)
		{
			isum13pt_gpu(nx, ny, ns,
				(integer*)t->gpu.arrays[0],
				(integer*)t->gpu.arrays[1],
				(integer*)t->gpu.arrays[2]);
#ifdef VISUALIZE
#ifndef CUDA_MAPPED
			// If GPU is not using mapped host memory, then need to fetch
			// the current iteration solution explicitly.
			// TODO: in case of MPI/CUDA/!MAPPED this copy must go AFTER
			// boundaries gathering.
开发者ID:alysondp,项目名称:stenfw,代码行数:67,代码来源:isum13pt_test.c


示例8: Local


//.........这里部分代码省略.........
    if (((peer_rank     == MPI_UNDEFINED) && (mpi_errno = MPI_ERR_RANK)))
	return MPIR_ERROR( local_comm_ptr, mpi_errno, myname );

    if (((remote_leader >= peer_size)     && (mpi_errno = MPI_ERR_RANK)) || 
        ((remote_leader <  0)             && (mpi_errno = MPI_ERR_RANK))) {
	mpi_errno = MPIR_Err_setmsg( MPI_ERR_RANK, MPIR_ERR_REMOTE_RANK, 
				     myname, 
				     "Error specifying remote_leader", 
"Error specifying remote_leader; value %d not between 0 and %d", remote_leader, peer_size );
       return MPIR_ERROR( local_comm_ptr, mpi_errno, myname );
    }
  }

  if (((local_leader  >= local_size)    && (mpi_errno = MPI_ERR_RANK)) || 
      ((local_leader  <  0)             && (mpi_errno = MPI_ERR_RANK))) {
	mpi_errno = MPIR_Err_setmsg( MPI_ERR_RANK, MPIR_ERR_LOCAL_RANK, 
				     myname, 
				     "Error specifying local_leader", 
"Error specifying local_leader; value %d not in between 0 and %d", local_leader, local_size );
       return MPIR_ERROR( local_comm_ptr, mpi_errno, myname );
    }

  /* Allocate send context, inter-coll context and intra-coll context */
  MPIR_Context_alloc ( local_comm_ptr, 3, &context );

  
  /* If I'm the local leader, then exchange information */
  if (local_rank == local_leader) {
      MPIR_ERROR_PUSH(peer_comm_ptr);

      /* Post the receives for the information from the remote_leader */
      /* We don't post a receive for the remote group yet, because we */
      /* don't know how big it is yet. */
      MPIR_CALL_POP(MPI_Irecv (&remote_size, 1, MPI_INT, remote_leader, tag,
			       peer_comm, &(req[2])),peer_comm_ptr,myname);
      MPIR_CALL_POP(MPI_Irecv (&send_context, 1, MPIR_CONTEXT_TYPE, 
			       remote_leader,tag, peer_comm, &(req[3])),
		    peer_comm_ptr,myname);
    
      /* Send the lrank_to_grank table of the local_comm and an allocated */
      /* context. Currently I use multiple messages to send this info.    */
      /* Eventually, this will change(?) */
      MPIR_CALL_POP(MPI_Isend (&local_size, 1, MPI_INT, remote_leader, tag, 
               peer_comm, &(req[0])),peer_comm_ptr,myname);
      MPIR_CALL_POP(MPI_Isend (&context, 1, MPIR_CONTEXT_TYPE, remote_leader, 
               tag, peer_comm, &(req[1])),peer_comm_ptr,myname);
    
      /* Wait on the communication requests to finish */
      MPIR_CALL_POP(MPI_Waitall ( 4, req, status ),peer_comm_ptr,myname);
    
      /* We now know how big the remote group is, so create it */
      remote_group_ptr = MPIR_CreateGroup ( remote_size );
      remote_group_ptr->self = 
	  (MPI_Group) MPIR_FromPointer( remote_group_ptr );

      /* Post the receive for the group information */
      MPIR_CALL_POP(MPI_Irecv (remote_group_ptr->lrank_to_grank, remote_size, 
			       MPI_INT, remote_leader, tag, peer_comm, 
			       &(req[5])),peer_comm_ptr,myname);
    
      /* Send the local group info to the remote group */
      MPIR_CALL_POP(MPI_Isend (local_comm_ptr->group->lrank_to_grank, local_size, 
			       MPI_INT, remote_leader, tag, peer_comm, 
			       &(req[4])),peer_comm_ptr,myname);
    
      /* wait on the send and the receive for the group information */
开发者ID:carsten-clauss,项目名称:MP-MPICH,代码行数:67,代码来源:ic_create.c


示例9: compute

void compute(int rank, int size)
{
	double *row1a = (double *) malloc(sizeof(double) * size_x);
	double *row1b = (double *) malloc(sizeof(double) * size_x);
	memset(row1a, 0, sizeof(double *) * size_x);
	double *row2a = (double *) malloc(sizeof(double) * size_x);
	double *row2b = (double *) malloc(sizeof(double) * size_x);
	memset(row2a, 0, sizeof(double *) * size_x);
	int position = id_to_position(size_y, size, rank);
	int height = id_to_size(size_y, size, rank);
	MPI_Request req[2];
	MPI_Request req2[2];
	DoubleMatrix matrix(size_x, height);

	set_fixed_temp(matrix, size_y, position, temp);
	matrix.swap();
	compute_new_values(matrix, row1a, row2a);
	set_fixed_temp(matrix, size_y, position, temp);
	matrix.swap();
	for (int i = 1; i < iterations; i++) {
		MPI_Isend(row1a, size_x, MPI_DOUBLE, (rank + size - 1) % size, TAG_ROW1,
			MPI_COMM_WORLD, &req[0]);
		MPI_Isend(row2a, size_x, MPI_DOUBLE, (rank + 1) % size, TAG_ROW2,
			MPI_COMM_WORLD, &req[1]);
		MPI_Irecv(row1b, size_x, MPI_DOUBLE, (rank + size - 1) % size, TAG_ROW2,
			MPI_COMM_WORLD, &req2[0]);
		MPI_Irecv(row2b, size_x, MPI_DOUBLE, (rank + 1) % size, TAG_ROW1,
			MPI_COMM_WORLD, &req2[1]);

		MPI_Waitall(2, req2, MPI_STATUSES_IGNORE);
		double *tmp;
		tmp = row1a; // swap row1a <-> row1b
		row1a = row1b;
		row1b = tmp;
		tmp = row2a; // swap row2a <-> row2b
		row2a = row2b;
		row2b = tmp;

		compute_new_values(matrix, row1a, row2a);
		set_fixed_temp(matrix, size_y, position, temp);
		matrix.swap();
		MPI_Waitall(2, req, MPI_STATUSES_IGNORE);
	}

	free(row1a);
	free(row1b);
	free(row2a);
	free(row2b);

	if (rank == 0) {
		DoubleMatrix out(size_x, size_y);
		out.set_data(matrix.get_data(), size_x * position, matrix.get_data_size());

		for (int rank = 1; rank < size; rank++) {
			int position = id_to_position(size_y, size, rank);
			int height = id_to_size(size_y, size, rank);
			MPI_Recv(out.get_write_pointer(0, position),
						size_x * height, MPI_DOUBLE, rank, TAG_MATRIX,
						MPI_COMM_WORLD, MPI_STATUSES_IGNORE);
		}
		out.swap();
		out.write_to_file("result2.html");
	} else {
		MPI_Send(matrix.get_data(),
				 matrix.get_size_x() * matrix.get_size_y(),
				 MPI_DOUBLE, 0, TAG_MATRIX, MPI_COMM_WORLD);
	}
}
开发者ID:DiPi22,项目名称:kaira,代码行数:68,代码来源:heatflow.cpp


示例10: va_start

va_list ap;

va_start(ap, unknown);
buf = unknown;
if (_numargs() == NUMPARAMS+1) {
        buflen = va_arg(ap, int) /8;          /* This is in bits. */
}
count =         va_arg (ap, int *);
datatype =      va_arg(ap, MPI_Datatype*);
source =          va_arg(ap, int *);
tag =           va_arg(ap, int *);
comm =          va_arg(ap, MPI_Comm*);
request =       va_arg(ap, MPI_Request *);
__ierr =        va_arg(ap, int *);

*__ierr = MPI_Irecv(MPIR_F_PTR(buf),*count,*datatype,*source,*tag,*comm,
		    &lrequest);
*(int*)request = MPI_Request_c2f(lrequest);
}

#else
void mpi_irecv_( buf, count, datatype, source, tag, comm, request, __ierr )
void             *buf;
int*count;
MPI_Datatype    * datatype;
int*source;
int*tag;
MPI_Comm         *comm;
MPI_Request      *request;
int *__ierr;
{
MPI_Request lrequest;
开发者ID:hpc,项目名称:mvapich-cce,代码行数:32,代码来源:irecvf.c


示例11: MPI_Irecv

void Grid3D::Load_and_Send_MPI_Comm_Buffers_BLOCK(int dir, int *flags)
{
  int i, j, k, ii;
  int gidx;
  int idx;
  int offset;
  int ireq;
  ireq = 0;

  /* x boundaries */
  if(dir == 0)
  {
    if (flags[0]==5) { 
      // load left x communication buffer
      // 1D
      if (H.ny == 1 && H.nz == 1) {
        offset = H.n_ghost;
        for (i=0;i<H.n_ghost;i++) {
          idx = (i+H.n_ghost);
          gidx = i;
          for (ii=0; ii<H.n_fields; ii++) {
            *(send_buffer_x0 + gidx + ii*offset) = C.density[idx + ii*H.n_cells];
          }
        }
      }
      // 2D
      if (H.ny > 1 && H.nz == 1) {
        offset = H.n_ghost*(H.ny-2*H.n_ghost);
        for (i=0;i<H.n_ghost;i++) {
          for (j=0;j<H.ny-2*H.n_ghost;j++) {
            idx = (i+H.n_ghost) + (j+H.n_ghost)*H.nx;
            gidx = i + j*H.n_ghost;
            for (ii=0; ii<H.n_fields; ii++) {
              *(send_buffer_x0 + gidx + ii*offset) = C.density[idx + ii*H.n_cells];
            } 
          }
        }
      }
      // 3D
      if (H.ny > 1 && H.nz > 1) { 
        offset = H.n_ghost*(H.ny-2*H.n_ghost)*(H.nz-2*H.n_ghost);
        for(i=0;i<H.n_ghost;i++)
        {
          for(j=0;j<H.ny-2*H.n_ghost;j++)
          {
            for(k=0;k<H.nz-2*H.n_ghost;k++)
            {
              idx  = (i+H.n_ghost) + (j+H.n_ghost)*H.nx + (k+H.n_ghost)*H.nx*H.ny;
              gidx = i + j*H.n_ghost + k*H.n_ghost*(H.ny-2*H.n_ghost);
              for (ii=0; ii<H.n_fields; ii++) {
                *(send_buffer_x0 + gidx + ii*offset) = C.density[idx + ii*H.n_cells];
              }
            }
          }
        }
      }

   
      //post non-blocking receive left x communication buffer
      MPI_Irecv(recv_buffer_x0, x_buffer_length, MPI_CHREAL, source[0], 0, world, &recv_request[ireq]);

      //non-blocking send left x communication buffer
      MPI_Isend(send_buffer_x0, x_buffer_length, MPI_CHREAL, dest[0],   1, world, &send_request[0]);

      //keep track of how many sends and receives are expected
      ireq++;
    }

    if(flags[1]==5)
    {
      // load right x communication buffer
      // 1D
      if (H.ny == 1 && H.nz == 1) {
        offset = H.n_ghost;
        for (i=0;i<H.n_ghost;i++) {
          idx = (i+H.nx-2*H.n_ghost);
          gidx = i;
          for (ii=0; ii<H.n_fields; ii++) {
            *(send_buffer_x1 + gidx + ii*offset) = C.density[idx + ii*H.n_cells];
          }
        }
      }
      // 2D
      if (H.ny > 1 && H.nz == 1) {
        offset = H.n_ghost*(H.ny-2*H.n_ghost);
        for (i=0;i<H.n_ghost;i++) {
          for (j=0;j<H.ny-2*H.n_ghost;j++) {
            idx = (i+H.nx-2*H.n_ghost) + (j+H.n_ghost)*H.nx;
            gidx = i + j*H.n_ghost;
            for (ii=0; ii<H.n_fields; ii++) {
              *(send_buffer_x1 + gidx + ii*offset) = C.density[idx + ii*H.n_cells];
            }
          }
        }
      }
      // 3D
      if (H.ny > 1 && H.nz > 1) { 
        offset = H.n_ghost*(H.ny-2*H.n_ghost)*(H.nz-2*H.n_ghost);
        for(i=0;i<H.n_ghost;i++)
        {
//.........这里部分代码省略.........
开发者ID:evaneschneider,项目名称:cholla,代码行数:101,代码来源:mpi_boundaries.cpp


示例12: ADIOI_LUSTRE_W_Exchange_data


//.........这里部分代码省略.........
                        ADIO_EXPLICIT_OFFSET, off, &status, &err);
        // --BEGIN ERROR HANDLING--
        if (err != MPI_SUCCESS) {
            *error_code = MPIO_Err_create_code(err,
                                               MPIR_ERR_RECOVERABLE,
                                               myname, __LINE__,
                                               MPI_ERR_IO,
                                               "**ioRMWrdwr", 0);
            ADIOI_Free(recv_types);
            return;
        }
        // --END ERROR HANDLING--
    }

    nprocs_send = 0;
    for (i = 0; i < nprocs; i++)
	if (send_size[i])
	    nprocs_send++;

    if (fd->atomicity) {
	/* bug fix from Wei-keng Liao and Kenin Coloma */
	requests = (MPI_Request *) ADIOI_Malloc((nprocs_send + 1) *
                                                sizeof(MPI_Request));
	send_req = requests;
    } else {
	requests = (MPI_Request *) ADIOI_Malloc((nprocs_send + nprocs_recv + 1)*
                                                sizeof(MPI_Request));
	/* +1 to avoid a 0-size malloc */

	/* post receives */
	j = 0;
	for (i = 0; i < nprocs; i++) {
	    if (recv_size[i]) {
		MPI_Irecv(MPI_BOTTOM, 1, recv_types[j], i,
			  myrank + i + 100 * iter, fd->comm, requests + j);
		j++;
	    }
	}
	send_req = requests + nprocs_recv;
    }

    /* post sends.
     * if buftype_is_contig, data can be directly sent from
     * user buf at location given by buf_idx. else use send_buf.
     */
    if (buftype_is_contig) {
	j = 0;
	for (i = 0; i < nprocs; i++)
	    if (send_size[i]) {
                ADIOI_Assert(buf_idx[i] != -1);
		MPI_Isend(((char *) buf) + buf_idx[i], send_size[i],
			  MPI_BYTE, i, myrank + i + 100 * iter, fd->comm,
			  send_req + j);
		j++;
	    }
    } else
        if (nprocs_send) {
	/* buftype is not contig */
	send_buf = (char **) ADIOI_Malloc(nprocs * sizeof(char *));
	for (i = 0; i < nprocs; i++)
	    if (send_size[i])
		send_buf[i] = (char *) ADIOI_Malloc(send_size[i]);

	ADIOI_LUSTRE_Fill_send_buffer(fd, buf, flat_buf, send_buf, offset_list,
                                      len_list, send_size, send_req,
                                      sent_to_proc, nprocs, myrank,
开发者ID:ORNL,项目名称:ompi,代码行数:67,代码来源:ad_lustre_wrcoll.c


示例13: main

int main (int argc, char *argv[])
{
	void inidat();
	float  ***array;        /* array for grid */
	int	taskid,                     /* this task's unique id */
		numtasks,                   /* number of tasks */
		averow,rows,offset,extra,   /* for sending rows of data */
		dest, source,               /* to - from for message send-receive */
		left,right,        /* neighbor tasks */
		msgtype,                    /* for message types */
		rc,start,end,               /* misc */
		i,x,y,z,it,size,t_sqrt;              /* loop variables */
	MPI_Status status;
   	MPI_Datatype dt,dt2; 
    MPI_Request req, req2,req3,req4,req5;
    double t1,t2;

/* First, find out my taskid and how many tasks are running */
   	MPI_Init(&argc,&argv);
   	MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
   	MPI_Comm_rank(MPI_COMM_WORLD,&taskid);

   	/*Set number of threads */
	omp_set_num_threads(atoi(argv[1])); // Use n threads for all consecutive parallel regions
	omp_set_nested(1);

	if (taskid == 0)
   	{
   		//printf("Grid size: X= %d  Y= %d  Time steps= %d\n",NXPROB,NYPROB,STEPS);
   		t1 = MPI_Wtime();
   	}
   	i = 0;
   	while(i*i < (NXPROB*NYPROB)/numtasks)
   		i++;
   	size = i;
   	i = 0;
   	while(i*i<numtasks)
   		i++;
   	t_sqrt = i;
   	MPI_Type_contiguous(size+2,MPI_FLOAT, &dt); 
	MPI_Type_commit(&dt);
	MPI_Type_vector(size+2,1,size+2,MPI_FLOAT,&dt2);
	MPI_Type_commit(&dt2); 
	array = malloc(2*sizeof(float**));
	for (i = 0;i<2;i++){
		array[i] = malloc((2+size)*sizeof(float*));
		array[i][0] = malloc(((2+size)*(2+size))*sizeof(float));
		for (x = 1;x<2+size;x++){
			array[i][x] = &(array[i][0][x*(2+size)]);
		}
	}
	for (z=0; z<2; z++){
		for (x=0; x<2+size; x++){
			for (y=0; y<2+size; y++){
				array[z][x][y] = 0.0;
			}
		}
	}
	z = 0;
	inidat(NXPROB,NYPROB,array[z],size*(taskid/t_sqrt),size*(taskid%t_sqrt),size);
	for (i = 1; i <= STEPS; i++)
	{
		if (taskid/t_sqrt != 0) //not first row
		{
			MPI_Isend(array[z][1],1,dt,taskid-t_sqrt,100, MPI_COMM_WORLD, &req);
			MPI_Irecv(array[z][0],1,dt,taskid-t_sqrt,100, MPI_COMM_WORLD, &req2);
		}
		if (taskid/t_sqrt != t_sqrt-1) //not last row
		{
			MPI_Isend(array[z][size],1,dt,taskid+t_sqrt,100, MPI_COMM_WORLD, &req);
			MPI_Irecv(array[z][size+1],1,dt,taskid+t_sqrt,100, MPI_COMM_WORLD, &req3);
		}
		if(taskid%t_sqrt != 0) //not last column
		{
			MPI_Isend(&array[z][0][1],1,dt2,taskid-1,100, MPI_COMM_WORLD, &req);
			MPI_Irecv(&array[z][0][0],1,dt2,taskid-1,100, MPI_COMM_WORLD, &req4);
		}
		if(taskid%t_sqrt != t_sqrt-1) //not last column
		{
			MPI_Isend(&array[z][0][size],1,dt2,taskid+1,100, MPI_COMM_WORLD, &req);
			MPI_Irecv(&array[z][0][size+1],1,dt2,taskid+1,100, MPI_COMM_WORLD, &req5);
		}
		inner_update(size,array[z],array[1-z]);
		if (taskid/t_sqrt != 0) 
			MPI_Wait(&req2,&status);
		if (taskid/t_sqrt != t_sqrt-1) 
			MPI_Wait(&req3,&status);
		if(taskid%t_sqrt != 0) 
			MPI_Wait(&req4,&status);
		if(taskid%t_sqrt != t_sqrt-1) 
			MPI_Wait(&req5,&status);
		outer_update(size,taskid,t_sqrt,array[z],array[1-z]);
		z = 1-z;
	}
	if (taskid == 0){
		t2 = MPI_Wtime();
		printf("MPI_Wtime measured: %1.2f\n", t2-t1);
	} 
	for (i = 0;i<2;i++){
		free(array[i][0]);
//.........这里部分代码省略.........
开发者ID:Tmichailidis,项目名称:Time-Dependent-Heat-Equation-Problem,代码行数:101,代码来源:Mpi_OpenMP_dynamic.c


示例14: START_TRACE

int HPL_sdrv
(
   double *                         SBUF,
   int                              SCOUNT,
   int                              STAG,
   double *                         RBUF,
   int                              RCOUNT,
   int                              RTAG,
   int                              PARTNER,
   MPI_Comm                         COMM
)
{
/* 
 * Purpose
 * =======
 *
 * HPL_sdrv is a simple wrapper around MPI_Sendrecv. Its main purpose is
 * to allow for some experimentation and tuning of this simple function.
 * Messages  of  length  less than  or  equal to zero  are not sent  nor
 * received.  Successful completion  is  indicated by the returned error
 * code HPL_SUCCESS.
 *
 * Arguments
 * =========
 *
 * SBUF    (local input)                 double *
 *         On entry, SBUF specifies the starting address of buffer to be
 *         sent.
 *
 * SCOUNT  (local input)                 int
 *         On entry,  SCOUNT  specifies  the number  of double precision
 *         entries in SBUF. SCOUNT must be at least zero.
 *
 * STAG    (local input)                 int
 *         On entry,  STAG  specifies the message tag to be used for the
 *         sending communication operation.
 *
 * RBUF    (local output)                double *
 *         On entry, RBUF specifies the starting address of buffer to be
 *         received.
 *
 * RCOUNT  (local input)                 int
 *         On entry,  RCOUNT  specifies  the number  of double precision
 *         entries in RBUF. RCOUNT must be at least zero.
 *
 * RTAG    (local input)                 int
 *         On entry,  RTAG  specifies the message tag to be used for the
 *         receiving communication operation.
 *
 * PARTNER (local input)                 int
 *         On entry,  PARTNER  specifies  the rank of the  collaborative
 *         process in the communication space defined by COMM.
 *
 * COMM    (local input)                 MPI_Comm
 *         The MPI communicator identifying the communication space.
 *
 * ---------------------------------------------------------------------
 */ 
START_TRACE( SDRV )

/*
 * .. Local Variables ..
 */
#ifdef HPL_USE_MPI_DATATYPE
   MPI_Datatype               type[2];
#endif
   MPI_Request                request;
   MPI_Status                 status;
   int                        ierr;
/* ..
 * .. Executable Statements ..
 */
   if( RCOUNT > 0 )
   {
      if( SCOUNT > 0 )
      {
#ifdef HPL_USE_MPI_DATATYPE
/*
 * Post asynchronous receive
 */
         ierr =      MPI_Type_contiguous( RCOUNT, MPI_DOUBLE, &type[0] );
         if( ierr == MPI_SUCCESS ) {
            ierr =   MPI_Type_commit( &type[0] );
         }
         if( ierr == MPI_SUCCESS ) {
            ierr =   MPI_Irecv( (void *)(RBUF), 1, type[0], PARTNER,
                                RTAG, COMM, &request );
         }
/*
 * Blocking send
 */
         if( ierr == MPI_SUCCESS ) {
            ierr =   MPI_Type_contiguous( SCOUNT, MPI_DOUBLE, &type[1] );
         }
         if( ierr == MPI_SUCCESS ) {
            ierr =   MPI_Type_commit( &type[1] );
         }
         if( ierr == MPI_SUCCESS ) {
            ierr =   MPI_Send( (void *)(SBUF), 1, type[1], PARTNER,
                               STAG, COMM );
//.........这里部分代码省略.........
开发者ID:davidrohr,项目名称:hpl-gpu,代码行数:101,代码来源:HPL_sdrv.c


示例15: exchange_direc1

static int exchange_direc1( double *direc1, int *local_global_index, idx_t *epart,
                            int neighbors_count, int *send_count, int **send_list,
                            int *recv_count, int **recv_list ) {
    MPI_Request *send_request, *recv_request;
    double **send_buf, **recv_buf;
    int n, i;

    int rank;
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );

    // Start sending
    if( ( send_request = malloc( neighbors_count * sizeof( MPI_Request ) ) ) == NULL ) {
        return -1;
    }
    if( ( send_buf = malloc( neighbors_count * sizeof( double * ) ) ) == NULL ) {
        return -1;
    }
    for( n = 0; n < neighbors_count; n++ ) {
        if( ( send_buf[n] = malloc( send_count[n] * sizeof( double ) ) ) == NULL ) {
            return -1;
        }
        for( i = 0; i < send_count[n]; i++ ) {
            send_buf[n][i] = direc1[send_list[n][i]];
        }

        int dest = epart[local_global_index[recv_list[n][0]]];
        MPI_Isend( send_buf[n], send_count[n], MPI_DOUBLE, dest, 0, MPI_COMM_WORLD,
                   &send_request[n] );
    }

    // Start receiving
    if( ( recv_request = malloc( neighbors_count * sizeof( MPI_Request ) ) ) == NULL ) {
        return -1;
    }
    if( ( recv_buf = malloc( neighbors_count * sizeof( double * ) ) ) == NULL ) {
        return -1;
    }
    for( n = 0; n < neighbors_count; n++ ) {
        if( ( recv_buf[n] = malloc( recv_count[n] * sizeof( double ) ) ) == NULL ) {
            return -1;
        }

        int source = epart[local_global_index[recv_list[n][0]]];
        MPI_Irecv( recv_buf[n], recv_count[n], MPI_DOUBLE, source, 0, MPI_COMM_WORLD,
                   &recv_request[n] );
    }

    // Wait for data to be received
    for( n = 0; n < neighbors_count; n++ ) {
        MPI_Wait( &recv_request[n], MPI_STATUS_IGNORE );
        for( i = 0; i < recv_count[n]; i++ ) {
            direc1[recv_list[n][i]] = recv_buf[n][i];
        }
        free( recv_buf[n] );
    }
    free( recv_buf );
    free( recv_request );

    // Wait for data to be sent
    for( n = 0; n < neighbors_count; n++ ) {
        MPI_Wait( &send_request[n], MPI_STATUS_IGNORE );
        free( send_buf[n] );
    }
    free( send_buf );
    free( send_request );

    return 0;
}
开发者ID:tusharuiit,项目名称:2013_2014_ProgrammingOfSuperComputers,代码行数:68,代码来源:compute_solution.c


示例16: main

int main (int argc, char *argv[])
{
int numtasks, rank, buf, tag1=1, i, rc, dest, src, offset, nreqs;
double T1, T2;
MPI_Request reqs[REPS*2];
MPI_Status stats[REPS*2];

MPI_Init(&argc,&argv);
MPI_Comm_size(COMM, &numtasks);
MPI_Comm_rank(COMM, &rank);

/* Require 4 tasks */
if (rank == 0 ) {
  if (numtasks != 4) {
    printf("ERROR: Number of tasks must be 4. Quitting.\n");
    MPI_Abort(COMM, rc);
    }
  printf("Starting isend/irecv send/irecv test...\n");
  }

/* Use barriers for clean output */
MPI_Barrier(COMM);
printf("Task %d starting...\n", rank);
MPI_Barrier(COMM);

T1 = MPI_Wtime();     /* start the clock */

/* Tasks 0 and 1 do the isend/irecv test. 
*  Determine who to send/receive with. nreqs specifies how many non-blocking
*  operation request handles to capture. offset is where the task should
*  store each request as it is captured in the reqs() array.         */
if (rank < 2) {
  nreqs = REPS*2;
  if (rank == 0) {
    src = 1;
    offset = 0;
    }
  if (rank == 1) {
    src = 0;
    offset = 0;
    }
  dest = src;

/* Do the non-blocking send and receive operations */
  for (i=0; i<REPS; i++) {
    MPI_Isend(&rank, 1, MPI_INT, dest, tag1, COMM, &reqs[offset]);
    MPI_Irecv(&buf, 1, MPI_INT, src, tag1, COMM, &reqs[offset+1]);
    offset += 2;
    if ((i+1)%DISP == 0)
      printf("Task %d has done %d isends/irecvs\n", rank, i+1);
    }
  }

/* Tasks 2 and 3 do the send/irecv test. 
   Determine who to send/receive with. nreqs specifies how many non-blocking
   operation request handles to capture. offset is where the task should
   store each request as it is captured in the reqs() array.  */
if (rank > 1) {
  nreqs = REPS;

/* Task 2 does the blocking send operation */
  if (rank == 2) {
    dest = 3;
    for (i=0; i<REPS; i++) {
      MPI_Send(&rank, 1, MPI_INT, dest, tag1, COMM);
      if ((i+1)%DISP == 0)
        printf("Task %d has done %d sends\n", rank, i+1);
      }
    }

/* Task 3 does the non-blocking receive operation */
  if (rank == 3) {
    src = 2;
    offset = 0;
    for (i=0; i<REPS; i++) {
      MPI_Irecv(&buf, 1, MPI_INT, src, tag1, COMM, &reqs[offset]);
      offset += 1;
      if ((i+1)%DISP == 0)
        printf("Task %d has done %d irecvs\n", rank, i+1);
      }
    }

  }

/* Wait for all non-blocking operations to complete and record time */
MPI_Waitall(nreqs, reqs, stats);
T2 = MPI_Wtime();     /* end time */
MPI_Barrier(COMM);

printf("Task %d time(wall)= %lf sec\n", rank, T2-T1);

MPI_Finalize();
}
开发者ID:hpc12,项目名称:hw6-problem1,代码行数:93,代码来源:mpi_bug6.c


示例


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ MPI_Isend函数代码示例发布时间:2022-05-30
下一篇:
C++ MPI_Info_set函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap