• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ MPI_Waitall函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中MPI_Waitall函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Waitall函数的具体用法?C++ MPI_Waitall怎么用?C++ MPI_Waitall使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了MPI_Waitall函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: SetUp


//.........这里部分代码省略.........
  for (i=0; i<nnbrs; i++) {
    recvrequests[peind[i]].key = recvptr[i+1]-recvptr[i];
    recvrequests[peind[i]].val = nvtxs+recvptr[i];
  }
  MPI_Alltoall((void *)recvrequests, 2, IDX_DATATYPE, (void *)sendrequests, 2, IDX_DATATYPE, ctrl->comm);


  sendptr = graph->sendptr = idxmalloc(npes+1, "SetUp: sendptr");
  startsind = wspace->pv2;
  for (j=i=0; i<npes; i++) {
    if (sendrequests[i].key > 0) {
      sendptr[j] = sendrequests[i].key;
      startsind[j] = sendrequests[i].val;
      j++;
    }
  }
  ASSERT(ctrl, nnbrs == j);
  MAKECSR(i, j, sendptr);

  nsend = sendptr[nnbrs];
  sendind = graph->sendind = idxmalloc(nsend, "SetUp: sendind");


  /* Issue the receives for sendind */
  for (i=0; i<nnbrs; i++) {
    MPI_Irecv((void *)(sendind+sendptr[i]), sendptr[i+1]-sendptr[i], IDX_DATATYPE, 
              peind[i], 1, ctrl->comm, ctrl->rreq+i);
  }

  /* Issue the sends. My recvind[penum] becomes penum's sendind[mype] */
  for (i=0; i<nnbrs; i++) {
    MPI_Isend((void *)(recvind+recvptr[i]), recvptr[i+1]-recvptr[i], IDX_DATATYPE,
              peind[i], 1, ctrl->comm, ctrl->sreq+i);
  }

  MPI_Waitall(nnbrs, ctrl->rreq, ctrl->statuses);
  MPI_Waitall(nnbrs, ctrl->sreq, ctrl->statuses);



  /* Create the peadjncy data structure for sparse boundary exchanges */
  pexadj = graph->pexadj = idxsmalloc(nvtxs+1, 0, "SetUp: pexadj");
  peadjncy = graph->peadjncy = idxmalloc(nsend, "SetUp: peadjncy");
  peadjloc = graph->peadjloc = idxmalloc(nsend, "SetUp: peadjloc");

  for (i=0; i<nsend; i++) {
    ASSERTP(ctrl, sendind[i] >= firstvtx && sendind[i] < lastvtx, (ctrl, "%d %d %d\n", sendind[i], firstvtx, lastvtx));
    pexadj[sendind[i]-firstvtx]++;
  }
  MAKECSR(i, nvtxs, pexadj);

  for (i=0; i<nnbrs; i++) {
    for (j=sendptr[i]; j<sendptr[i+1]; j++) {
      k = pexadj[sendind[j]-firstvtx]++;
      peadjncy[k] = i;  /* peind[i] is the actual PE number */
      peadjloc[k] = startsind[i]++;
    }
  }
  ASSERT(ctrl, pexadj[nvtxs] == nsend);

  for (i=nvtxs; i>0; i--)
    pexadj[i] = pexadj[i-1];
  pexadj[0] = 0;


  graph->nnbrs = nnbrs;
  graph->nrecv = nrecv;
  graph->nsend = nsend;
  graph->nlocal = nlocal;


  /* Create the inverse map from ladjncy to adjncy */
  imap = graph->imap = idxmalloc(nvtxs+nrecv, "SetUp: imap");
  for (i=0; i<nvtxs; i++)
    imap[i] = firstvtx+i;
  for (i=0; i<nrecv; i++)
    imap[nvtxs+i] = recvind[i];


  /* Check if wspace->nlarge is large enough for nrecv and nsend */
  if (wspace->nlarge < nrecv+nsend) {
    free(wspace->indices);
    free(wspace->pairs);
    wspace->nlarge = nrecv+nsend;
    wspace->indices = idxmalloc(wspace->nlarge, "SetUp: wspace->indices");
    wspace->pairs = (KeyValueType *)GKmalloc(sizeof(KeyValueType)*wspace->nlarge, "SetUp: wspace->pairs");
  }

  IFSET(ctrl->dbglvl, DBG_TIME, stoptimer(ctrl->SetupTmr));

#ifdef DEBUG_SETUPINFO
  rprintf(ctrl, "[%5d %5d] \tl:[%5d %5d] \ts:[%5d, %5d] \tr:[%5d, %5d]\n", 
            GlobalSEMin(ctrl, nvtxs), GlobalSEMax(ctrl, nvtxs),
            GlobalSEMin(ctrl, nlocal), GlobalSEMax(ctrl, nlocal),
            GlobalSEMin(ctrl, nsend), GlobalSEMax(ctrl, nsend),
            GlobalSEMin(ctrl, nrecv), GlobalSEMax(ctrl, nrecv));

  PrintSetUpInfo(ctrl, graph);
#endif
}
开发者ID:KnoooW,项目名称:gpgpu-sim,代码行数:101,代码来源:setup.c


示例2: xchange_deri

void xchange_deri() {
  int cntr=0;
#ifdef MPI
  MPI_Request request[8];
  MPI_Status status[8];
  int ix,mu, t, y, z, x;

#    if (defined PARALLELT || defined PARALLELXT || defined PARALLELXYT || defined PARALLELXYZT )
  /* send the data to the neighbour on the left in time direction */
  /* recieve the data from the neighbour on the right in time direction */
  MPI_Isend(&df0[gI_m1_0_0_0][0].d1,    1, deri_time_slice_cont, g_nb_t_dn, 43,
	    g_cart_grid, &request[cntr]);
  
  MPI_Irecv(&ddummy[gI_Lm1_0_0_0][0].d1, 1, deri_time_slice_cont, g_nb_t_up, 43,
	    g_cart_grid, &request[cntr+1]);
  cntr=cntr+2;
#    endif

#    if (defined PARALLELXT || defined PARALLELXYT || defined PARALLELXYZT || defined PARALLELX || defined PARALLELXY || defined PARALLELXYZ )

  /* send the data to the neighbour on the left in x direction */
  /* recieve the data from the neighbour on the right in x direction */
  MPI_Isend(&df0[gI_0_m1_0_0][0],    1, deri_x_slice_cont, g_nb_x_dn, 44,
	    g_cart_grid, &request[cntr]);
  MPI_Irecv(&ddummy[gI_0_Lm1_0_0][0],             1, deri_x_slice_gath, g_nb_x_up, 44,
	    g_cart_grid, &request[cntr+1]);
  cntr=cntr+2;
#    endif
#    if (defined PARALLELXYT || defined PARALLELXYZT || defined PARALLELXY || defined PARALLELXYZ )
  /* send the data to the neighbour on the left in y direction */
  /* recieve the data from the neighbour on the right in y direction */
  MPI_Isend((void*)df0[gI_0_0_m1_0], 
	    1, deri_y_slice_cont, g_nb_y_dn, 45,
	    g_cart_grid, &request[cntr]);
  MPI_Irecv((void*)ddummy[gI_0_0_Lm1_0],
	    1, deri_y_slice_gath, g_nb_y_up, 45,
	    g_cart_grid, &request[cntr+1]);
  cntr=cntr+2;
#    endif
#    if (defined PARALLELXYZT || defined PARALLELXYZ )
  /* send the data to the neighbour on the left in z direction */
  /* recieve the data from the neighbour on the right in z direction */
  MPI_Isend((void*)df0[gI_0_0_0_m1], 
	    1, deri_z_slice_cont, g_nb_z_dn, 46,
	    g_cart_grid, &request[cntr]);
  MPI_Irecv((void*)ddummy[gI_0_0_0_Lm1],
	    1, deri_z_slice_gath, g_nb_z_up, 46,
	    g_cart_grid, &request[cntr+1]);
  cntr=cntr+2;
#    endif
  MPI_Waitall(cntr, request, status);

#    if (defined PARALLELT || defined PARALLELXT || defined PARALLELXYT || defined PARALLELXYZT )

  /* add ddummy to df0 */
  for(x = 0; x < LX; x++) {
    for(y = 0; y < LY; y++) {
      for(z = 0; z < LZ; z++) {
	ix = g_ipt[T-1][x][y][z];
	for(mu=0;mu<4;mu++){ 
	  df0[ix][mu].d1 += ddummy[ix][mu].d1;
	  df0[ix][mu].d2 += ddummy[ix][mu].d2;
	  df0[ix][mu].d3 += ddummy[ix][mu].d3;
	  df0[ix][mu].d4 += ddummy[ix][mu].d4;
	  df0[ix][mu].d5 += ddummy[ix][mu].d5;
	  df0[ix][mu].d6 += ddummy[ix][mu].d6;
	  df0[ix][mu].d7 += ddummy[ix][mu].d7;
	  df0[ix][mu].d8 += ddummy[ix][mu].d8;
	}
      }
    }
  }

  /* send the data to the neighbour on the right is not needed*/
#    endif

#    if (defined PARALLELXT || defined PARALLELXYT || defined PARALLELXYZT || defined PARALLELX || defined PARALLELXY || defined PARALLELXYZ )

  /* add ddummy to df0 */
  for(t = 0; t < T; t++) {
    for(y = 0; y < LY; y++) {
      for(z = 0; z < LZ; z++) {
	ix = g_ipt[t][LX-1][y][z];
	for(mu=0;mu<4;mu++){
	  df0[ix][mu].d1 += ddummy[ix][mu].d1;
	  df0[ix][mu].d2 += ddummy[ix][mu].d2;
	  df0[ix][mu].d3 += ddummy[ix][mu].d3;
	  df0[ix][mu].d4 += ddummy[ix][mu].d4;
	  df0[ix][mu].d5 += ddummy[ix][mu].d5;
	  df0[ix][mu].d6 += ddummy[ix][mu].d6;
	  df0[ix][mu].d7 += ddummy[ix][mu].d7;
	  df0[ix][mu].d8 += ddummy[ix][mu].d8;
	}
      }
    }
  }
  /* send the data to the neighbour on the right is not needed*/  

#    endif

//.........这里部分代码省略.........
开发者ID:annube,项目名称:tmLQCD,代码行数:101,代码来源:xchange_deri.c


示例3: ADIOI_Exch_file_views


//.........这里部分代码省略.........
        if (fd->hints->cb_alltoall != ADIOI_HINT_DISABLE) {
            send_count_arr[tmp_agg_idx].count = flat_file_p->count;
            send_count_arr[tmp_agg_idx].fp_ind = disp_off_sz_ext_typesz[0];
            send_count_arr[tmp_agg_idx].disp = disp_off_sz_ext_typesz[1];
            send_count_arr[tmp_agg_idx].byte_off = disp_off_sz_ext_typesz[2];
            send_count_arr[tmp_agg_idx].sz = disp_off_sz_ext_typesz[3];
            send_count_arr[tmp_agg_idx].ext = disp_off_sz_ext_typesz[4];
            send_count_arr[tmp_agg_idx].type_sz = disp_off_sz_ext_typesz[5];
        }
    }

#ifdef DEBUG2
    fprintf(stderr, "my own flattened memtype: ");
    ADIOI_Print_flatlist_node(flat_mem_p);
    fprintf(stderr, "my own flattened filetype: ");
    ADIOI_Print_flatlist_node(flat_file_p);
#endif

    if (fd->hints->cb_alltoall != ADIOI_HINT_DISABLE) {
        ret = MPI_Alltoall(send_count_arr, sizeof(amount_and_extra_data_t),
                           MPI_BYTE,
                           recv_count_arr, sizeof(amount_and_extra_data_t), MPI_BYTE, fd->comm);
        if (ret != MPI_SUCCESS) {
            fprintf(stderr, "ADIOI_Exchange_file_views: MPI_Alltoall failed " "with error %d", ret);
            return;
        }
    } else {
#ifdef MPI_STATUSES_IGNORE
        statuses = MPI_STATUSES_IGNORE;
#else
        statuses = (MPI_Status *) ADIOI_Malloc(1 + nprocs * sizeof(MPI_Status));
#endif
        if (fd->is_agg) {
            MPI_Waitall(nprocs, recv_req_arr, statuses);
            ADIOI_Free(recv_req_arr);
        }
        MPI_Waitall(fd->hints->cb_nodes, send_req_arr, statuses);
#ifndef MPI_STATUSES_IGNORE
        ADIOI_Free(statuses);
#endif
        ADIOI_Free(send_req_arr);
    }
#ifdef DEBUG2
    if (fd->hints->cb_alltoall != ADIOI_HINT_DISABLE) {
        fprintf(stderr, "send_count_arr:");
        for (i = 0; i < nprocs; i++) {
            fprintf(stderr, "[%d]=%d ", i, send_count_arr[i].count);
        }
        fprintf(stderr, "\n");
        fprintf(stderr, "recv_count_arr:");
        for (i = 0; i < nprocs; i++) {
            fprintf(stderr, "[%d]=%d ", i, recv_count_arr[i].count);
        }
        fprintf(stderr, "\n");
    } else {
        fprintf(stderr, "send_count_arr:");
        for (i = 0; i < fd->hints->cb_nodes; i++) {
            fprintf(stderr, "[%d]=%d ", i, send_count_arr[i].count);
        }
        fprintf(stderr, "\n");
        if (fd->is_agg) {
            fprintf(stderr, "recv_count_arr:");
            for (i = 0; i < nprocs; i++) {
                fprintf(stderr, "[%d]=%d ", i, recv_count_arr[i].count);
            }
            fprintf(stderr, "\n");
开发者ID:jeffhammond,项目名称:mpich,代码行数:67,代码来源:ad_coll_exch_new.c


示例4: main

int main(int argc, char **argv) {
  int myRank, numPes;

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &numPes);
  MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
  MPI_Request sreq[2], rreq[2];

  int blockDimX, arrayDimX, arrayDimY;

  if (argc != 2 && argc != 3) {
    printf("%s [array_size] \n", argv[0]);
    printf("%s [array_size_X] [array_size_Y] \n", argv[0]);
    MPI_Abort(MPI_COMM_WORLD, -1);
  }

  if(argc == 2) {
    arrayDimY = arrayDimX = atoi(argv[1]);
  }
  else {
    arrayDimX = atoi(argv[1]);
    arrayDimY = atoi(argv[2]);
  }

  if (arrayDimX % numPes != 0) {
    printf("array_size_X % numPes != 0!\n");
    MPI_Abort(MPI_COMM_WORLD, -1);
  }

  blockDimX = arrayDimX / numPes;

  int iterations = 0, i, j;
  double error = 1.0, max_error = 0.0;

  if(myRank == 0) {
    printf("Running Jacobi on %d processors\n", numPes);
    printf("Array Dimensions: %d %d\n", arrayDimX, arrayDimY);
    printf("Block Dimensions: %d\n", blockDimX);
  }

  double **temperature;
  double **new_temperature;

  /* allocate two dimensional arrays */
  temperature = new double*[blockDimX+2];
  new_temperature = new double*[blockDimX+2];
  for (i=0; i<blockDimX+2; i++) {
    temperature[i] = new double[arrayDimY];
    new_temperature[i] = new double[arrayDimY];
  }
  for(i=0; i<blockDimX+2; i++) {
    for(j=0; j<arrayDimY; j++) {
      temperature[i][j] = 0.5;
      new_temperature[i][j] = 0.5;
    }
  }

  // boundary conditions
  if(myRank < numPes/2) {
    for(i=1; i<=blockDimX; i++)
      temperature[i][0] = 1.0;
  }

  if(myRank == numPes-1) {
    for(j=arrayDimY/2; j<arrayDimY; j++)
      temperature[blockDimX][j] = 0.0;
  }

  MPI_Barrier(MPI_COMM_WORLD);
  MPI_Pcontrol(1);
  startTime = MPI_Wtime();

  while(/*error > 0.001 &&*/ iterations < MAX_ITER) {
    iterations++;

    /* Receive my bottom and top edge */
    MPI_Irecv(&temperature[blockDimX+1][0], arrayDimY, MPI_DOUBLE, wrap_x(myRank+1), BOTTOM, MPI_COMM_WORLD, &rreq[BOTTOM-1]);
    MPI_Irecv(&temperature[0][0], arrayDimY, MPI_DOUBLE, wrap_x(myRank-1), TOP, MPI_COMM_WORLD, &rreq[TOP-1]);

    /* Send my top and bottom edge */
    MPI_Isend(&temperature[1][0], arrayDimY, MPI_DOUBLE, wrap_x(myRank-1), BOTTOM, MPI_COMM_WORLD, &sreq[BOTTOM-1]);
    MPI_Isend(&temperature[blockDimX][0], arrayDimY, MPI_DOUBLE, wrap_x(myRank+1), TOP, MPI_COMM_WORLD, &sreq[TOP-1]);

    MPI_Waitall(2, rreq, MPI_STATUSES_IGNORE);
    MPI_Waitall(2, sreq, MPI_STATUSES_IGNORE);

    for(i=1; i<blockDimX+1; i++) {
      for(j=0; j<arrayDimY; j++) {
        /* update my value based on the surrounding values */
        new_temperature[i][j] = (temperature[i-1][j]+temperature[i+1][j]+temperature[i][wrap_y(j-1)]+temperature[i][wrap_y(j+1)]+temperature[i][j]) * 0.2;
      }
    }

    max_error = error = 0.0;
    for(i=1; i<blockDimX+1; i++) {
      for(j=0; j<arrayDimY; j++) {
	error = fabs(new_temperature[i][j] - temperature[i][j]);
	if(error > max_error)
	  max_error = error;
      }
//.........这里部分代码省略.........
开发者ID:bhatele,项目名称:CommProxies,代码行数:101,代码来源:jacobi1d.C


示例5: mpi_waitall

void  mpi_waitall(int *count, int *request, int *status, int *ierr)
{
    *ierr = MPI_Waitall(*count, request, (MPI_Status *) status);
    return;
}
开发者ID:hadimontakhabi,项目名称:VolpexMPI-HPX,代码行数:5,代码来源:volpex_fAPI.c


示例6: main

int main( int argc, char *argv[] )
{
    int errs = 0;
    MPI_Comm comm;
    MPI_Request r[2];
    MPI_Status  s[2];
    int errval, errclass;
    int b1[20], b2[20], rank, size, src, dest, i;

    MTest_Init( &argc, &argv );

    /* Create some receive requests.  tags 0-9 will succeed, tags 10-19 
       will be used for ERR_TRUNCATE (fewer than 20 messages will be used) */
    comm = MPI_COMM_WORLD;

    MPI_Comm_rank( comm, &rank );
    MPI_Comm_size( comm, &size );

    src  = 1;
    dest = 0;
    if (rank == dest) {
	MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
	errval = MPI_Irecv( b1, 10, MPI_INT, src, 0, comm, &r[0] );
	if (errval) {
	    errs++;
	    MTestPrintError( errval );
	    printf( "Error returned from Irecv\n" );
	}
	errval = MPI_Irecv( b2, 10, MPI_INT, src, 10, comm, &r[1] );
	if (errval) {
	    errs++;
	    MTestPrintError( errval );
	    printf( "Error returned from Irecv\n" );
	}

	errval = MPI_Barrier(comm);
	if (errval) {
	    errs++;
	    MTestPrintError( errval );
	    printf( "Error returned from Barrier\n" );
	}
	for (i=0; i<2; i++) {
	    s[i].MPI_ERROR = -1;
	}
	errval = MPI_Waitall( 2, r, s );
        MPI_Error_class( errval, &errclass );
	if (errclass != MPI_ERR_IN_STATUS) {
	    errs++;
	    printf( "Did not get ERR_IN_STATUS in Waitall\n" );
	}
	else {
	    /* Check for success */
	    /* We allow ERR_PENDING (neither completed nor in error) in case
	       the MPI implementation exits the Waitall when an error 
	       is detected. Thanks to Jim Hoekstra of Iowa State University
	       and Kim McMahon for finding this bug in the test. */
	    for (i=0; i<2; i++) {
		if (s[i].MPI_TAG < 10 && (s[i].MPI_ERROR != MPI_SUCCESS &&
			                  s[i].MPI_ERROR != MPI_ERR_PENDING)) {
		    char msg[MPI_MAX_ERROR_STRING];
		    int msglen = MPI_MAX_ERROR_STRING;
		    errs++;
		    printf( "correct msg had error code %d\n", 
			    s[i].MPI_ERROR );
		    MPI_Error_string( s[i].MPI_ERROR, msg, &msglen );
		    printf( "Error message was %s\n", msg );
		}
		else if (s[i].MPI_TAG >= 10 && s[i].MPI_ERROR == MPI_SUCCESS) {
		    errs++;
		    printf( "truncated msg had MPI_SUCCESS\n" );
		}
	    }
	}

    }
    else if (rank == src) {
	/* Send messages, then barrier so that the wait does not start 
	   until we are sure that the sends have begun */
	MPI_Send( b1, 10, MPI_INT, dest, 0, comm );
	MPI_Send( b2, 11, MPI_INT, dest, 10, comm );
	MPI_Barrier(comm);
    }
    else {
	MPI_Barrier(comm);
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
  
}
开发者ID:qingu,项目名称:WRF-Libraries,代码行数:91,代码来源:errinstatwa.c


示例7: main

int main(int argc, char **argv)
{
    int errs = 0;
    MPI_Status status, *status_array = 0;
    int count = 0, flag, idx, rc, errlen, *indices=0, outcnt;
    MPI_Request *reqs = 0;
    char errmsg[MPI_MAX_ERROR_STRING];

    MTest_Init(&argc, &argv);

    MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN );

    rc = MPI_Testall( count, reqs, &flag, status_array );
    if (rc != MPI_SUCCESS) {
	MPI_Error_string( rc, errmsg, &errlen );
	printf( "MPI_Testall returned failure: %s\n", errmsg );
	errs ++;
    }
    else if (!flag) {
	printf( "MPI_Testall( 0, ... ) did not return a true flag\n") ;
	errs++;
    }

    rc = MPI_Waitall( count, reqs, status_array );
    if (rc != MPI_SUCCESS) {
	MPI_Error_string( rc, errmsg, &errlen );
	printf( "MPI_Waitall returned failure: %s\n", errmsg );
	errs ++;
    }

    rc = MPI_Testany( count, reqs, &idx, &flag, &status );
    if (rc != MPI_SUCCESS) {
	MPI_Error_string( rc, errmsg, &errlen );
	printf( "MPI_Testany returned failure: %s\n", errmsg );
	errs ++;
    }
    else if (!flag) {
	printf( "MPI_Testany( 0, ... ) did not return a true flag\n") ;
	errs++;
    }

    rc = MPI_Waitany( count, reqs, &idx, &status );
    if (rc != MPI_SUCCESS) {
	MPI_Error_string( rc, errmsg, &errlen );
	printf( "MPI_Waitany returned failure: %s\n", errmsg );
	errs ++;
    }

    rc = MPI_Testsome( count, reqs, &outcnt, indices, status_array );
    if (rc != MPI_SUCCESS) {
	MPI_Error_string( rc, errmsg, &errlen );
	printf( "MPI_Testsome returned failure: %s\n", errmsg );
	errs ++;
    }

    rc = MPI_Waitsome( count, reqs, &outcnt, indices, status_array );
    if (rc != MPI_SUCCESS) {
	MPI_Error_string( rc, errmsg, &errlen );
	printf( "MPI_Waitsome returned failure: %s\n", errmsg );
	errs ++;
    }
    
    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
开发者ID:Julio-Anjos,项目名称:simgrid,代码行数:66,代码来源:waittestnull.c


示例8: boundary_conditions

void  boundary_conditions(double ****f)
{
   int i, j, k, l, req_numS=0, req_numR=0;
   int r1,r2,z1,z2;
   int flag,cnt,z[7],ztemp,tag=10;
   double vrho,vphi,vth,An;
   char msg_err[100];       //for putlog+mpi_error
   int reslen;

   z[0]=1; z[1]=z[2]=z[3]=-1; //  влияет на вид гран.условий (-1:жесткие, 1:свободные)
   z[4]=-1; z[5]=1;          // rather for Anorm&Atau not Ar,Aphi,Az

  /*============================ divertor =================================*/
  if(t_cur>100)                  // divertors are off till t sec
  if(n[2]==0)
  for(i=0;i<m1;i++)
    for(j=ghost;j<=ghost;j++)
      for(k=0;k<m3;k++)
         {
         vrho = f[3][i][j][k]*costh[i][k]+f[1][i][j][k]*sinth[i][k];
         vth  = -f[3][i][j][k]*sinth[i][k]+f[1][i][j][k]*costh[i][k];
         vphi = sqrt(pow(f[2][i][j][k],2)+pow(vth,2));                    //=sqrt(vfi^2+vth^2)
         f[1][i][j][k] = vrho*sinth[i][k]+vphi*costh[i][k]*sin(chi[i][k]);
         f[2][i][j][k] = vphi*cos(chi[i][k]);
         f[3][i][j][k] = vrho*costh[i][k]-vphi*sinth[i][k]*sin(chi[i][k]);
         }

  /*-------------------------------- exchanging of ghosts -------------------------------------*/
// exchanging in phi-direction - periodical directions first
 if(pr_neighbour[2]>-1)
  if(pr_neighbour[2]==rank) CopyGridToBuffer(f,nut,buf_recv[2],0,n2,0,m1-1,mm2-1,m3-1);
         else { CopyGridToBuffer(f,nut,buf_send[2],0,ghost,0,m1-1,2*ghost-1,m3-1);
                MPI_Isend(buf_send[2],buf_size[1],MPI_DOUBLE,pr_neighbour[2],tag+2,MPI_COMM_WORLD,&SendRequest[req_numS++]);
                MPI_Irecv(buf_recv[2],buf_size[1],MPI_DOUBLE,pr_neighbour[2],tag+3,MPI_COMM_WORLD,&RecvRequest[req_numR++]);
                }
 if(pr_neighbour[3]>-1)
  if(pr_neighbour[3]==rank) CopyGridToBuffer(f,nut,buf_recv[3],0,ghost,0,m1-1,2*ghost-1,m3-1);
         else { CopyGridToBuffer(f,nut,buf_send[3],0,n2,0,m1-1,mm2-1,m3-1);
                MPI_Isend(buf_send[3],buf_size[1],MPI_DOUBLE,pr_neighbour[3],tag+3,MPI_COMM_WORLD,&SendRequest[req_numS++]);
                MPI_Irecv(buf_recv[3],buf_size[1],MPI_DOUBLE,pr_neighbour[3],tag+2,MPI_COMM_WORLD,&RecvRequest[req_numR++]);
                }
   MPI_Waitall(req_numR,RecvRequest,statuses);
   if(statuses[0].MPI_ERROR) {putlog("bc:error during transfer=",numlog++);
                               MPI_Error_string(statuses[0].MPI_ERROR,msg_err,&reslen);
                               msg_err[reslen++] = ','; msg_err[reslen]= 0;
                               putlog(msg_err,numlog++);
                               }    else numlog++;
  if(pr_neighbour[2]>-1) CopyBufferToGrid(f,nut,buf_recv[2],0,0,0,m1-1,ghost-1,m3-1);
  if(pr_neighbour[3]>-1) CopyBufferToGrid(f,nut,buf_recv[3],0,mm2,0,m1-1,m2-1,m3-1);

// exchanging in r-direction
 if(pr_neighbour[0]>-1)
  if(pr_neighbour[0]==rank) CopyGridToBuffer(f,nut,buf_recv[0],n1,0,0,mm1-1,m2-1,m3-1);
         else { CopyGridToBuffer(f,nut,buf_send[0],ghost,0,0,2*ghost-1,m2-1,m3-1);
                MPI_Isend(buf_send[0],buf_size[0],MPI_DOUBLE,pr_neighbour[0],tag,MPI_COMM_WORLD,&SendRequest[req_numS++]);
                MPI_Irecv(buf_recv[0],buf_size[0],MPI_DOUBLE,pr_neighbour[0],tag+1,MPI_COMM_WORLD,&RecvRequest[req_numR++]);
                }
 if(pr_neighbour[1]>-1)
  if(pr_neighbour[1]==rank) CopyGridToBuffer(f,nut,buf_recv[1],ghost,0,0,2*ghost-1,m2-1,m3-1);
         else { CopyGridToBuffer(f,nut,buf_send[1],n1,0,0,mm1-1,m2-1,m3-1);
                MPI_Isend(buf_send[1],buf_size[0],MPI_DOUBLE,pr_neighbour[1],tag+1,MPI_COMM_WORLD,&SendRequest[req_numS++]);
                MPI_Irecv(buf_recv[1],buf_size[0],MPI_DOUBLE,pr_neighbour[1],tag,MPI_COMM_WORLD,&RecvRequest[req_numR++]);
                }
  MPI_Waitall(req_numR,RecvRequest,statuses);
  if(statuses[0].MPI_ERROR) {putlog("bc:error during transfer=",numlog++);
                               MPI_Error_string(statuses[0].MPI_ERROR,msg_err,&reslen);
                               msg_err[reslen++] = ','; msg_err[reslen]= 0;
                               putlog(msg_err,numlog++);
                               }    else numlog++;
  if(pr_neighbour[0]>-1) CopyBufferToGrid(f,nut,buf_recv[0],0,0,0,ghost-1,m2-1,m3-1);
  if(pr_neighbour[1]>-1) CopyBufferToGrid(f,nut,buf_recv[1],mm1,0,0,m1-1,m2-1,m3-1);

// exchanging in z-direction
 if(pr_neighbour[4]>-1)
  if(pr_neighbour[4]==rank) CopyGridToBuffer(f,nut,buf_recv[4],0,0,mm3,m1-1,m2-1,m3-1);
         else { CopyGridToBuffer(f,nut,buf_send[4],0,0,ghost,m1-1,m2-1,2*ghost-1);
                MPI_Isend(buf_send[4],buf_size[2],MPI_DOUBLE,pr_neighbour[4],tag+4,MPI_COMM_WORLD,&SendRequest[req_numS++]);
                MPI_Irecv(buf_recv[4],buf_size[2],MPI_DOUBLE,pr_neighbour[4],tag+5,MPI_COMM_WORLD,&RecvRequest[req_numR++]);
                }
 if(pr_neighbour[5]>-1)
  if(pr_neighbour[5]==rank) CopyGridToBuffer(f,nut,buf_recv[5],0,0,0,m1-1,m2-1,ghost-1);
        else { CopyGridToBuffer(f,nut,buf_send[5],0,0,n3,m1-1,m2-1,mm3-1);
               MPI_Isend(buf_send[5],buf_size[2],MPI_DOUBLE,pr_neighbour[5],tag+5,MPI_COMM_WORLD,&SendRequest[req_numS++]);
               MPI_Irecv(buf_recv[5],buf_size[2],MPI_DOUBLE,pr_neighbour[5],tag+4,MPI_COMM_WORLD,&RecvRequest[req_numR++]);
               }
  MPI_Waitall(req_numR,RecvRequest,statuses);
  if(statuses[0].MPI_ERROR) {putlog("bc:error during transfer=",numlog++);
                               MPI_Error_string(statuses[0].MPI_ERROR,msg_err,&reslen);
                               msg_err[reslen++] = ','; msg_err[reslen]= 0;
                               putlog(msg_err,numlog++);
                               }    else numlog++;
  if(pr_neighbour[4]>-1) CopyBufferToGrid(f,nut,buf_recv[4],0,0,0,m1-1,m2-1,ghost-1);
  if(pr_neighbour[5]>-1) CopyBufferToGrid(f,nut,buf_recv[5],0,0,mm3,m1-1,m2-1,m3-1);

//    MPI_Barrier(MPI_COMM_WORLD);
//    MPI_Startall(req_numR,RecvRequest);
//    MPI_Iprobe(MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&flag,statuses);
//    if(flag==0) putlog("bc:error during transfer=",numlog++);
//    MPI_Testall(req_numR,RecvRequest,&flag,statuses);
//    MPI_Get_count(statuses,MPI_DOUBLE,&cnt);
//.........这里部分代码省略.........
开发者ID:Infovarius,项目名称:findif,代码行数:101,代码来源:pde0.c


示例9: scount

void parallelComm::sendRecvPacketsV(std::vector<VPACKET> &sndPack, std::vector<VPACKET> &rcvPack)
{
  std::vector<int> scount(2*nsend);
  std::vector<int> rcount(2*nrecv);
  std::vector<MPI_Request> request(2*(nsend+nrecv));
  std::vector<MPI_Status> status(2*(nsend+nrecv));

  for (int i = 0; i < nsend; i++) {
    scount[2*i] = sndPack[i].nints;
    scount[2*i+1] = sndPack[i].nreals;
  }

  int irnum = 0;

  for (int i = 0; i < nrecv; i++)
    MPI_Irecv(&(rcount[2*i]),2,MPI_INT,rcvMap[i],10,scomm,&request[irnum++]);

  for (int i = 0; i < nsend; i++)
    MPI_Isend(&(scount[2*i]),2,MPI_INT,sndMap[i],10,scomm,&request[irnum++]);

  MPI_Waitall(irnum,request.data(),status.data());

  for (int i = 0; i < nrecv; i++)
  {
    rcvPack[i].nints = rcount[2*i];
    rcvPack[i].nreals = rcount[2*i+1];
  }

  irnum = 0;
  for (int i = 0; i < nrecv; i++)
  {
    if (rcvPack[i].nints > 0)
    {
      rcvPack[i].intData.resize(rcvPack[i].nints);
      MPI_Irecv(rcvPack[i].intData.data(),rcvPack[i].nints,MPI_INT,
                rcvMap[i],10,scomm,&request[irnum++]);
    }

    if (rcvPack[i].nreals > 0)
    {
      rcvPack[i].realData.resize(rcvPack[i].nreals);
      MPI_Irecv(rcvPack[i].realData.data(),rcvPack[i].nreals,MPI_DOUBLE,
                rcvMap[i],20,scomm,&request[irnum++]);
    }
  }

  for (int i = 0; i < nsend;i++)
  {
    if (sndPack[i].nints > 0)
    {
      MPI_Isend(sndPack[i].intData.data(),sndPack[i].nints,MPI_INT,
                sndMap[i],10,scomm,&request[irnum++]);
    }
    if (sndPack[i].nreals > 0)
    {
      MPI_Isend(sndPack[i].realData.data(),sndPack[i].nreals,MPI_DOUBLE,
                sndMap[i],20,scomm,&request[irnum++]);
    }
  }
  MPI_Waitall(irnum,request.data(),status.data());
}
开发者ID:JacobCrabill,项目名称:tioga,代码行数:61,代码来源:parallelComm.C


示例10: main

int main(int argc, char *argv[]) {

  int rank;
  int size;

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD,&size);
  MPI_Comm_rank(MPI_COMM_WORLD,&rank);
  
  // Build a 4x3 grid of subgrids
  /* 
     +-----+-----+-----+
     |  0  |  1  |  2  |
     |(0,0)|(0,1)|(0,2)|
     +-----+-----+-----+
     |  3  |  4  |  5  |
     |(1,0)|(1,1)|(1,2)|
     +-----+-----+-----+
     |  6  |  7  |  8  |
     |(2,0)|(2,1)|(2,2)|
     +-----+-----+-----+
     |  9  |  10 |  11 |
     |(3,0)|(3,1)|(3,2)|
     +-----+-----+-----+
   */

  MPI_Comm Comm2d;
  int ndim;
  int dim[2];
  int period[2]; // for periodic conditions
  int reorder;
  int coord[2];
  
  // Setup and build cartesian grid
  ndim=2; dim[0]=4; dim[1]=3; period[0]=false; period[1]=false; reorder=true;
  MPI_Cart_create(MPI_COMM_WORLD,ndim,dim,period,reorder,&Comm2d);

  // Every processor prints it rank and coordinates
  MPI_Cart_coords(Comm2d,rank,2,coord);
  printf("P:%2d My coordinates are %d %d\n",rank,coord[0],coord[1]);

  // In Root mode: ask the rank of processor at coordinates (3,1)
  if(rank==0) {
    int id; // the requested processor id
    coord[0]=3;
    coord[1]=1;
    MPI_Cart_rank(Comm2d,coord,&id);
    printf("The processor at coords (%d, %d) has rank %d\n",coord[0],coord[1],id);
  }

  // Every processor build his neighbour map
  int nbrs[4];
  MPI_Cart_shift(Comm2d,0,1,&nbrs[UP],&nbrs[DOWN]);
  MPI_Cart_shift(Comm2d,1,1,&nbrs[LEFT],&nbrs[RIGHT]);
   
  // prints its neighbours
  if (rank==7) {
    printf("P:%2d has neighbours (u,d,l,r): %2d %2d %2d %2d\n",
	   rank,nbrs[UP],nbrs[DOWN],nbrs[LEFT],nbrs[RIGHT]);
  } 
  // if everything looks good up to here, I'll perform a communication test.
  MPI_Barrier(MPI_COMM_WORLD);

  // Making a communication test
  MPI_Request reqSendRecv[8]; // every processor sends 4 INTs and receives 4 INTs
  MPI_Status status[8];

  int out = rank; // communicate the rank number
  int in[4] = {}; // empty array
  int tag = 2; // tag

  for (int i = 0; i < 4; i++) { // following the neighbours order!!
    MPI_Isend( &out ,1,MPI_INT,nbrs[i],tag,MPI_COMM_WORLD,&reqSendRecv[ i ]);
    MPI_Irecv(&in[i],1,MPI_INT,nbrs[i],tag,MPI_COMM_WORLD,&reqSendRecv[i+4]);
  }
  MPI_Waitall(8,reqSendRecv,status);

  // print the communication output
  printf("P:%2d recived from ngbr(u,d,l,r): %2d %2d %2d %2d\n",
	   rank,in[UP],in[DOWN],in[LEFT],in[RIGHT]);

  MPI_Finalize();

  return 0;
}
开发者ID:Haider-BA,项目名称:Matlab2CPP,代码行数:85,代码来源:main.cpp


示例11: main

int main( int argc, char *argv[] )
{
    int errs = 0;
    int rank, size, dest, source;
    int i, indices[40];
    MPI_Aint extent;
    int *buf, *bufs[MAX_MSGS];
    MPI_Comm      comm;
    MPI_Datatype  dtype;
    MPI_Request   req[MAX_MSGS];

    MTest_Init( &argc, &argv );

    comm = MPI_COMM_WORLD;
    MPI_Comm_rank( comm, &rank );
    MPI_Comm_size( comm, &size );
    source = 0;
    dest   = size - 1;
    
    /* Setup by creating a blocked datatype that is likely to be processed
       in a piecemeal fashion */
    for (i=0; i<30; i++) {
	indices[i] = i*40;
    }

    /* 30 blocks of size 10 */
    MPI_Type_create_indexed_block( 30, 10, indices, MPI_INT, &dtype );
    MPI_Type_commit( &dtype );
    
    /* Create the corresponding message buffers */
    MPI_Type_extent( dtype, &extent );
    for (i=0; i<MAX_MSGS; i++) {
	bufs[i] = (int *)malloc( extent );
	if (!bufs[i]) {
	    fprintf( stderr, "Unable to allocate buffer %d of size %ld\n", 
		    	i, (long)extent );
	    MPI_Abort( MPI_COMM_WORLD, 1 );
	}
    }
    buf = (int *)malloc( 10 * 30 * sizeof(int) );
    
    MPI_Barrier( MPI_COMM_WORLD );
    if (rank == dest) {
	MTestSleep( 2 );
	for (i=0; i<MAX_MSGS; i++) {
	    MPI_Recv( buf, 10*30, MPI_INT, source, i, comm, 
		      MPI_STATUS_IGNORE );
	}
    }
    else if (rank == source ) {
	for (i=0; i<MAX_MSGS; i++) {
	    MPI_Isend( bufs[i], 1, dtype, dest, i, comm, &req[i] );
	}
	MPI_Waitall( MAX_MSGS, req, MPI_STATUSES_IGNORE );
    }

    MPI_Type_free( &dtype );
    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
开发者ID:abhinavvishnu,项目名称:matex,代码行数:61,代码来源:eagerdt.c


示例12: main

int main( int argc, char **argv )
{
    MPI_Comm comm;
    MPI_Request r[MAX_REQ];
    MPI_Status  s[MAX_REQ];
    int msgsize, maxmsg, root, i, j, size, rank, err = 0, msgcnt, toterr;
    int *sbuf, *rbuf;

    MPI_Init( &argc, &argv );
    
    comm = MPI_COMM_WORLD;

    MPI_Comm_size( comm, &size );
    MPI_Comm_rank( comm, &rank );

    if (size < 2) {
	printf( "This test requires at least 2 processors\n" );
	MPI_Abort( comm, 1 );
    }

    /* First, try large blocking sends to root */
    root = 0;
    
    maxmsg =  MAX_MSG;
    msgsize = 128;
    msgcnt  = MAX_MSG_CNT;
    if (rank == root && verbose) printf( "Blocking sends: " );
    while (msgsize <= maxmsg) {
	if (rank == root) {
	    if (verbose) { printf( "%d ", msgsize ); fflush( stdout ); }
	    rbuf = (int *)malloc( msgsize * sizeof(int) );
	    if (!rbuf) {
		printf( "Could not allocate %d words\n", msgsize );
		MPI_Abort( comm, 1 );
	    }
	    for (i=0; i<size; i++) {
		if (i == rank) continue;
		for (j=0; j<msgcnt; j++) {
		    SetupRdata( rbuf, msgsize );
		    MPI_Recv( rbuf, msgsize, MPI_INT, i, 2*i, comm, s );
		    err += CheckData( rbuf, msgsize, 2*i, s );
		}
	    }
	    free( rbuf );
	}
	else {
	    sbuf = (int *)malloc( msgsize * sizeof(int) );
	    if (!sbuf) {
		printf( "Could not allocate %d words\n", msgsize );
		MPI_Abort( comm, 1 );
	    }
	    SetupData( sbuf, msgsize, 2*rank );
	    for (j=0; j<msgcnt; j++) 
		MPI_Send( sbuf, msgsize, MPI_INT, root, 2*rank, comm );
	    free( sbuf );
	}
	msgsize *= 4;
    }
    if (rank == 0 && verbose) { printf( "\n" ); fflush( stdout ); }

    /* Next, try unexpected messages with Isends */
    msgsize = 128;
    maxmsg  = MAX_MSG;
    msgcnt  = MAX_REQ;
    if (rank == root && verbose) printf( "Unexpected recvs: " );
    while (msgsize <= maxmsg) {
	if (rank == root) {
	    if (verbose) { printf( "%d ", msgsize ); fflush( stdout ); }
	    rbuf = (int *)malloc( msgsize * sizeof(int) );
	    if (!rbuf) {
		printf( "Could not allocate %d words\n", msgsize );
		MPI_Abort( comm, 1 );
	    }
	    MPI_Barrier( comm );
	    for (i=0; i<size; i++) {
		if (i == rank) continue;
		for (j=0; j<msgcnt; j++) {
		    SetupRdata( rbuf, msgsize );
		    MPI_Recv( rbuf, msgsize, MPI_INT, i, 2*i, comm, s );
		    err += CheckData( rbuf, msgsize, 2*i, s );
		}
	    }
	    free( rbuf );
	}
	else {
	    sbuf = (int *)malloc( msgsize * sizeof(int) );
	    if (!sbuf) {
		printf( "Could not allocate %d words\n", msgsize );
		MPI_Abort( comm, 1 );
	    }
	    SetupData( sbuf, msgsize, 2*rank );
	    for (j=0; j<msgcnt; j++) {
		MPI_Isend( sbuf, msgsize, MPI_INT, root, 2*rank, comm, &r[j] );
	    }
	    MPI_Barrier( comm );
	    MPI_Waitall( msgcnt, r, s );
	    free( sbuf );
	}
	msgsize *= 4;
    }
//.........这里部分代码省略.........
开发者ID:MartinLidh,项目名称:tddc78,代码行数:101,代码来源:flood2.c


示例13: main

int
main (int argc, char **argv)
{
  int nprocs = -1;
  int rank = -1;
  char processor_name[128];
  int namelen = 128;
  int buf0[buf_size];
  int buf1[buf_size];
  MPI_Request aReq[2];
  MPI_Status aStatus[2];

  MPI_Status status;

  /* init */
  MPI_Init (&argc, &argv);
  MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
  MPI_Comm_rank (MPI_COMM_WORLD, &rank);
  MPI_Get_processor_name (processor_name, &namelen);
  printf ("(%d) is alive on %s\n", rank, processor_name);
  fflush (stdout);

  MPI_Barrier (MPI_COMM_WORLD);

  if (nprocs < 2) {
      printf ("not enough tasks\n");
  }
  else {
    if (rank == 0) {
      memset (buf0, 0, buf_size);

      MPI_Send_init (buf0, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &aReq[0]);
      MPI_Recv_init (buf1, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &aReq[1]);

      MPI_Start (&aReq[0]);
      MPI_Start (&aReq[1]);

      MPI_Waitall (2, aReq, aStatus);

      memset (buf0, 1, buf_size);

      MPI_Startall (2, aReq);

      MPI_Waitall (2, aReq, aStatus);
    }
    else if (rank == 1) {
      memset (buf1, 1, buf_size);

      MPI_Recv_init (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &aReq[0]);
      MPI_Send_init (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &aReq[1]);

      MPI_Start (&aReq[0]);
      MPI_Start (&aReq[1]);

      MPI_Waitall (2, aReq, aStatus);

      memset (buf1, 0, buf_size);

      MPI_Startall (2, aReq);

      MPI_Waitall (2, aReq, aStatus);
    }
  }

  MPI_Barrier (MPI_COMM_WORLD);

  MPI_Request_free (&aReq[0]);
  MPI_Request_free (&aReq[1]);

  MPI_Finalize ();
  printf ("(%d) Finished normally\n", rank);
}
开发者ID:Julio-Anjos,项目名称:simgrid,代码行数:72,代码来源:no-error4.c


示例14: find_bibw

double find_bibw(int size, int num_pairs, char *s_buf, char *r_buf)
{
/*This function is the bandwidth test that was previously part of main in 
  osu_bibw, with the additional modification of being able to stream multiple
  stream multiple processors per node.  As of the InfiniPath 1.3 release
  the code can also dynamically determine which processes are on which
  node and set things up appropriately.*/
  double t_start = 0.0, t_end = 0.0, t = 0.0, max_time = 0.0, min_time = 0.0;
  double seconds_per_message_size, sum_loops, dloops;
  int i, j, myid, target, skip, loops, min_loops, max_loops, window_size;
  for (i = 0; i < size; i++)
  {
    s_buf[i] = 'a';
    r_buf[i] = 'b';
  }

  if (size < large_message_size)
  {
    skip = skip_small;
    min_loops = min_loops_small;
    max_loops = max_loops_small;
    window_size = window_size_small;
    seconds_per_message_size = seconds_per_message_size_small;
  }
  else
  {
    skip = skip_large;
    min_loops = min_loops_large;
    max_loops = max_loops_large;
    window_size = window_size_large;
    seconds_per_message_size = seconds_per_message_size_large;
  }
  MPI_Comm_rank (MPI_COMM_WORLD, &myid);
  MPI_Barrier (MPI_COMM_WORLD);
  if (pair_list[myid].sender == myid)
  {
    target = pair_list[myid].receiver;
    for (i = 0; i < max_loops + skip; i++)
    {
      if (i == skip)
      {
        MPI_Barrier (MPI_COMM_WORLD);
        t_start = MPI_Wtime();
      }
      for (j = 0; j < window_size; j++)
      {
        MPI_Irecv (r_buf, size, MPI_CHAR, target, TAG_DATA,
                  MPI_COMM_WORLD, recv_request + j);
      }
      for (j = 0; j < window_size; j++)
      {
        MPI_Isend (s_buf, size, MPI_CHAR, target, TAG_DATA,
                  MPI_COMM_WORLD, send_request + j);
      }
      MPI_Waitall (window_size, send_request, reqstat);
      MPI_Waitall (window_size, recv_request, reqstat);
      MPI_Recv (r_buf, 4, MPI_CHAR, target, MPI_ANY_TAG, MPI_COMM_WORLD,
               &reqstat[0]);
      if (reqstat[0].MPI_TAG == TAG_DONE)
      {
        t_end = MPI_Wtime();
	i++;
	break;
      }
    }
    if (t_end == 0.0)
    {
      t_end = MPI_Wtime();
    }
    loops = i - skip;
    t = t_end - t_start;
  }
  else if (pair_list[myid].receiver == myid)
  {
    int tag = TAG_SKIP;
    target = pair_list[myid].sender;
    for (i = 0; i < max_loops + skip; i++)
    {
      if (i == skip)
      {
	tag = TAG_LOOP;
        MPI_Barrier (MPI_COMM_WORLD);
        t_start = MPI_Wtime();
      }
      for (j = 0; j < window_size; j++)
      {
        MPI_Isend (s_buf, size, MPI_CHAR, target, TAG_DATA,
                  MPI_COMM_WORLD, send_request + j);
      }
      for (j = 0; j < window_size; j++)
      {
        MPI_Irecv (r_buf, size, MPI_CHAR, target, TAG_DATA,
                  MPI_COMM_WORLD, recv_request + j);
      }
      MPI_Waitall (window_size, send_request, reqstat);
      MPI_Waitall (window_size, recv_request, reqstat);
      if (tag == TAG_LOOP &&
          (i - skip) >= (min_loops - 1) &&
          MPI_Wtime() - t_start >= seconds_per_message_size)
      {
//.........这里部分代码省略.........
开发者ID:01org,项目名称:opa-mpi-apps,代码行数:101,代码来源:mpi_multibw.c


示例15: gmx_pme_recv_coeffs_coords


//.........这里部分代码省略.........
            /* Special case, receive the new parameters and return */
            copy_ivec(cnb.grid_size, *grid_size);
            *ewaldcoeff_q  = cnb.ewaldcoeff_q;
            *ewaldcoeff_lj = cnb.ewaldcoeff_lj;

            status         = pmerecvqxSWITCHGRID;
        }

        if (cnb.flags & PP_PME_RESETCOUNTERS)
        {
            /* Special case, receive the step (set above) and return */
            status = pmerecvqxRESETCOUNTERS;
        }

        if (cnb.flags & (PP_PME_CHARGE | PP_PME_SQRTC6 | PP_PME_SIGMA))
        {
            *atomSetChanged = true;

            /* Receive the send counts from the other PP nodes */
            for (auto &sender : pme_pp->ppRanks)
            {
                if (sender.rankId == pme_pp->peerRankId)
                {
                    sender.numAtoms = cnb.natoms;
                }
                else
                {
                    MPI_Irecv(&sender.numAtoms, sizeof(sender.numAtoms),
                              MPI_BYTE,
                              sender.rankId, eCommType_CNB,
                              pme_pp->mpi_comm_mysim, &pme_pp->req[messages++]);
                }
            }
            MPI_Waitall(messages, pme_pp->req.data(), pme_pp->stat.data());
            messages = 0;

            nat = 0;
            for (const auto &sender : pme_pp->ppRanks)
            {
                nat += sender.numAtoms;
            }

            if (cnb.flags & PP_PME_CHARGE)
            {
                pme_pp->chargeA.resizeWithPadding(nat);
            }
            if (cnb.flags & PP_PME_CHARGEB)
            {
                pme_pp->chargeB.resize(nat);
            }
            if (cnb.flags & PP_PME_SQRTC6)
            {
                pme_pp->sqrt_c6A.resize(nat);
            }
            if (cnb.flags & PP_PME_SQRTC6B)
            {
                pme_pp->sqrt_c6B.resize(nat);
            }
            if (cnb.flags & PP_PME_SIGMA)
            {
                pme_pp->sigmaA.resize(nat);
            }
            if (cnb.flags & PP_PME_SIGMAB)
            {
                pme_pp->sigmaB.resize(nat);
            }
开发者ID:friforever,项目名称:gromacs,代码行数:67,代码来源:pme-only.cpp


示例16: malloc

void parallelComm::sendRecvPacketsAll(PACKET *sndPack, PACKET *rcvPack)
{
  int i;
  int *sint,*sreal,*rint,*rreal;
  int tag,irnum;
  MPI_Request *request;
  MPI_Status *status;
  //
  sint=(int *)malloc(sizeof(int)*numprocs);
  sreal=(int *) malloc(sizeof(int)*numprocs);
  rint=(int *)malloc(sizeof(int)*numprocs);
  rreal=(int *) malloc(sizeof(int)*numprocs);
  request=(MPI_Request *) malloc(sizeof(MPI_Request)*4*numprocs);
  status=(MPI_Status *) malloc(sizeof(MPI_Status)*4*numprocs);
  //
  for(i=0;i<numprocs;i++){
    sint[i]=sndPack[i].nints;			
    sreal[i]=sndPack[i].nreals;
  }
  //
  MPI_Alltoall(sint,1,MPI_INT,rint,1,MPI_INT,scomm);
  MPI_Alltoall(sreal,1,MPI_INT,rreal,1,MPI_INT,scomm);
  //
  for(i=0;i<numprocs;i++) {
    rcvPack[i].nints=rint[i];
    rcvPack[i].nreals=rreal[i];
  }
  //
  irnum=0;
  for(i=0;i<numprocs;i++)
    {
      if (rcvPack[i].nints > 0) {
	tag=1;
	rcvPack[i].intData=(int *) malloc(sizeof(int)*rcvPack[i].nints);
	MPI_Irecv(rcvPack[i].intData,rcvPack[i].nints,
		  MPI_INT,i,
		  tag,scomm,&request[irnum++]);
      }
      if (rcvPack[i].nreals > 0) {
	tag=2;
	rcvPack[i].realData=(REAL *) malloc(sizeof(REAL)*rcvPack[i].nreals);
	MPI_Irecv(rcvPack[i].realData,rcvPack[i].nreals,
		  MPI_DOUBLE,i,
		  tag,scomm,&request[irnum++]);
      }
    }
  for(i=0;i<numprocs;i++)
    {
      if (sndPack[i].nints > 0){
	tag=1;
	MPI_Isend(sndPack[i].intData,sndPack[i].nints,
		  MPI_INT,i,
		  tag,scomm,&request[irnum++]);
      }
      if (sndPack[i].nreals > 0){
	tag=2;
	MPI_Isend(sndPack[i].realData,sndPack[i].nreals,
		  MPI_DOUBLE,i,
		  tag,scomm,&request[irnum++]);
      }
    }
  MPI_Pcontrol(1, "tioga_pc_waitall");
  MPI_Waitall(irnum,request,status);
  MPI_Pcontrol(- 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ MPI_Waitany函数代码示例发布时间:2022-05-30
下一篇:
C++ MPI_Wait函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap