• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ MPI_Waitany函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中MPI_Waitany函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Waitany函数的具体用法?C++ MPI_Waitany怎么用?C++ MPI_Waitany使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了MPI_Waitany函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: MEX_post_recv

void NEKTAR_MEX::MEX_max_fabs(double *val){

  double a,b;
  double *dp;
  int *map;
  int i, j,partner,index;

  MEX_post_recv();


  for (partner = 0; partner < Npartners; ++partner){
    dp = send_buffer[partner];
    map = message_send_map[partner];
    for (i = 0; i < message_size[partner]; ++i)
       dp[i] = val[map[i]];
  }

  MEX_post_send();


  for (partner = 0; partner < Npartners; partner++){
     MPI_Waitany(Npartners,request_recv,&index,MPI_STATUS_IGNORE);
     dp = recv_buffer[index];
     map = message_recv_map[index];
     for (i = 0; i < message_size[index]; ++i){
        j = map[i];
        a = fabs(val[j]);
        b = fabs(dp[i]);
        if (b>a)
           val[j] = dp[i];
     }
  }
  MPI_Waitall(Npartners,request_send,MPI_STATUS_IGNORE);
}
开发者ID:HerculesCE,项目名称:ParaView,代码行数:34,代码来源:MC3D_nektar_mex.C


示例2: trace_begin


//.........这里部分代码省略.........


    if (!(rank >= n_proc - column)){
        // neighbor 1
dbg++;
printf("rank[%d], iter[%d] ------- send [%d] fish to neig[6], \n", rank, iter, n_send_fish[6]);
        MPI_Irecv(receive_fish[6], n_receive_fish, fishtype, rankNeighbor[6], MPI_ANY_TAG, comm, &recvReqArray[6]);
        // neighbor 6
        MPI_Isend(send_fish[6], n_send_fish[6], fishtype, rankNeighbor[6], mesTag, comm, &sendReqArray[6]);
    } else {
		recvReqArray[6] = MPI_REQUEST_NULL;
	}


     // sendrecv from neighbor 2, 5
    if (!(rank < column) && ((rank + 1) % column != 0)) {
dbg++;
printf("rank[%d], iter[%d] ------- send [%d] fish to neig[2], \n", rank, iter, n_send_fish[2]);
        // neighbor 2
        MPI_Isend(send_fish[2], n_send_fish[2], fishtype, rankNeighbor[2], mesTag, comm, &sendReqArray[2]);
        // neighbor 5
        MPI_Irecv(receive_fish[2], n_receive_fish, fishtype, rankNeighbor[2], MPI_ANY_TAG, comm, &recvReqArray[2]);
    } else {
		recvReqArray[2] = MPI_REQUEST_NULL;
	}


    if (!(rank >= n_proc - column) && (rank % column != 0)) {
dbg++;
printf("rank[%d], iter[%d] ------- send [%d] fish to neig[5], \n", rank, iter, n_send_fish[5]);
        // neighbor 2
        MPI_Irecv(receive_fish[5], n_receive_fish, fishtype, rankNeighbor[5], MPI_ANY_TAG, comm, &recvReqArray[5]);
        // neighbor 5
        MPI_Isend(send_fish[5], n_send_fish[5], fishtype, rankNeighbor[5], mesTag, comm, &sendReqArray[5]);
    } else {
		recvReqArray[5] = MPI_REQUEST_NULL;
	}


    //j++;
    // sendrecv from neighbor 3, 4
    if (rank % column != 0) {
dbg++;
printf("rank[%d], iter[%d] ------- send [%d] fish to neig[3], \n", rank, iter, n_send_fish[3]);
        // neighbor 3
        MPI_Isend(send_fish[3], n_send_fish[3], fishtype, rankNeighbor[3], mesTag, comm, &sendReqArray[3]);
        // neighbor 4
        MPI_Irecv(receive_fish[3], n_receive_fish, fishtype, rankNeighbor[3], MPI_ANY_TAG, comm, &recvReqArray[3]);
    } else {
		recvReqArray[3] = MPI_REQUEST_NULL;
	}


    if ((rank + 1) % column != 0){
dbg++;
printf("rank[%d], iter[%d] ------- send [%d] fish to neig[4], \n", rank, iter, n_send_fish[4]);
        // neighbor 3
        MPI_Irecv(receive_fish[4], n_receive_fish, fishtype, rankNeighbor[4], MPI_ANY_TAG, comm, &recvReqArray[4]);
        // neighbor 4
        MPI_Isend(send_fish[4], n_send_fish[4], fishtype, rankNeighbor[4], mesTag, comm, &sendReqArray[4]);
    }    else {
		recvReqArray[4] = MPI_REQUEST_NULL;
	}
}


//*************************************************************************************/
////////////////////////////////////////////////////////////////////////////////////////
// wait_for_fish()
////////////////////////////////////////////////////////////////////////////////////////
//*************************************************************************************/
void wait_for_fish(MPI_Request* recvReqArray, int* n_fish) {
    // Now wait for any recv to come back.
    int k;
    int arrayIndex;
	int numNeighbor = NUM_NEIGHBOR;
    
	if ((rank < column) || ((rank + 1) % column == 0) || (rank >= n_proc - column) || (rank % column == 0))
        numNeighbor = 5;
    if (rank == 0 || rank == column - 1 || rank == n_proc - 1 || rank == n_proc - column)
        numNeighbor = 3;

	MPI_Status statusArray[numNeighbor];

	// Zero the count array.
	for (k = 0; k < NUM_NEIGHBOR; k++) {
		n_fish[k] = 0;
	}
	
    for (k = 0; k < numNeighbor; ++k) {
        MPI_Waitany(NUM_NEIGHBOR, recvReqArray, &arrayIndex, &statusArray[k]);
		//recvReqArray[arrayIndex] = MPI_REQUEST_NULL;

		assert(arrayIndex >= 0 && arrayIndex <= 7);
		if (arrayIndex != MPI_UNDEFINED) {
dbg--;
			MPI_Get_count(&statusArray[k], fishtype, &n_fish[arrayIndex]);
		} 
	}
}
开发者ID:blickly,项目名称:ptii,代码行数:101,代码来源:fish.c


示例3: spmd_waitany

SEXP spmd_waitany(SEXP R_count, SEXP R_status){
	int index;
	spmd_errhandler(
		MPI_Waitany(INTEGER(R_count)[0], request, &index,
			&status[INTEGER(R_status)[0]]));
	return(AsInt(index));
} /* End of spmd_waitany(). */
开发者ID:RBigData,项目名称:pbdMPI,代码行数:7,代码来源:spmd_wait.c


示例4: event_loop

static void event_loop(event_queue_t queue,int block){
    while(queue->pending){
        Debug("MPI waiting for %d events",queue->pending);
        int index[queue->pending];
        int completed;
        MPI_Status status[queue->pending];
        if (block) {
            Debug("MPI_Waitsome");
            //int res = MPI_Waitsome(queue->pending,queue->request,&completed,index,status);
            int res = MPI_Waitany(queue->pending,queue->request,index,status);
            completed=1;
            Debug("MPI_Waitsome : %d",res);
            if (res != MPI_SUCCESS) Abort("MPI_Waitsome");
            queue->wait_some_calls++;
            if (completed>1) queue->wait_some_multi++;
            block=0;
        } else {
            Debug("MPI_Testsome");
            //int res = MPI_Testsome(queue->pending,queue->request,&completed,index,status);
            int flag;
            int res = MPI_Testany(queue->pending,queue->request,index,&flag,status);
            completed=flag?1:0;
            Debug("MPI_Testsome : %d",res);
            if (res != MPI_SUCCESS) Abort("MPI_Testsome");
            queue->test_some_calls++;
            if (completed==0) {
                queue->test_some_none++;
                Debug("MPI exit event loop");
                return;
            }
            if (completed>1) queue->test_some_multi++;
        }
        Debug("MPI completion of %d events",completed);
        event_callback cb[completed];
        void *ctx[completed];
        for(int i=0;i<completed;i++){
            cb[i]=queue->cb[index[i]];
            queue->cb[index[i]]=NULL;
            ctx[i]=queue->context[index[i]];
        }
        int k=0;
        for(int i=0;i<queue->pending;i++){
            if (queue->cb[i]) {
                if (k<i) {
                    queue->request[k]=queue->request[i];
                    queue->cb[k]=queue->cb[i];
                    queue->context[k]=queue->context[i];
                }
                k++;
            }
        }
        queue->pending=k;
        for(int i=0;i<completed;i++) {
            Debug("MPI call back");
            cb[i](ctx[i],&status[i]);
            Debug("MPI call back done");
        }
    }
    Debug("MPI exit loop");
}
开发者ID:Meijuh,项目名称:ltsmin,代码行数:60,代码来源:mpi_event_loop.c


示例5: Zoltan_Comm_Do_Wait

int       Zoltan_Comm_Do_Wait(
ZOLTAN_COMM_OBJ * plan,		/* communication data structure */
int tag,			/* message tag for communicating */
char *send_data,		/* array of data I currently own */
int nbytes,			/* multiplier for sizes */
char *recv_data)		/* array of data I'll own after comm */
{
    MPI_Status status;		/* return from Waitany */
    int       my_proc;		/* processor ID */
    int       self_num;		/* where in send list my_proc appears */
    int       i, j, k, jj;	/* loop counters */

    MPI_Comm_rank(plan->comm, &my_proc);    
    
    /* Wait for messages to arrive & unpack them if necessary. */
    /* Note: since request is in plan, could wait in later routine. */

    if (plan->indices_from == NULL) {	/* No copying required */
        if (plan->nrecvs > 0) {
	    MPI_Waitall(plan->nrecvs, plan->request, plan->status);
	}
    }

    else {			 	/* Need to copy into recv_data. */
	if (plan->self_msg) {		/* Unpack own data before waiting */
	    for (self_num = 0; self_num < plan->nrecvs + plan->self_msg; self_num++) 
		if (plan->procs_from[self_num] == my_proc) break;
	    k = plan->starts_from[self_num];
            if (!plan->sizes_from || plan->sizes_from[self_num]) {
	        for (j = plan->lengths_from[self_num]; j; j--) {
		    memcpy(&recv_data[plan->indices_from[k] * nbytes],
		        &plan->recv_buff[k * nbytes], nbytes);
		    k++;
	        }
	    }
	}
	else
	    self_num = plan->nrecvs;

	for (jj = 0; jj < plan->nrecvs; jj++) {

	    MPI_Waitany(plan->nrecvs, plan->request, &i, &status);

            if (i == MPI_UNDEFINED) break;  /* No more receives */

	    if (i >= self_num) i++;

	    k = plan->starts_from[i];
	    for (j = plan->lengths_from[i]; j; j--) {
		memcpy(&recv_data[plan->indices_from[k] * nbytes],
		    &plan->recv_buff[k * nbytes], nbytes);
		k++;
	    }
	}

	ZOLTAN_FREE(&plan->recv_buff);
    }

    return (ZOLTAN_OK);
}
开发者ID:xunzhang,项目名称:ESMF_Regridding,代码行数:60,代码来源:comm_do.c


示例6: MPI_Waitany

/*!
    Waits for any receive to completes and returns the associated rank.

    If there are no active recevies, the call returns MPI_UNDEFINED.

    \param if set to true
    \result The rank of the completed receive or MPI_UNDEFINED if there was
    no active receives.
*/
int DataCommunicator::waitAnyRecv()
{
    // Wait for a receive to complete
    int id;
    MPI_Waitany(m_recvRequests.size(), m_recvRequests.data(), &id, MPI_STATUS_IGNORE);
    if (id == MPI_UNDEFINED) {
        return MPI_UNDEFINED;
    }

    // If the buffer is a double buffer, swap it
    RecvBuffer &recvBuffer = m_recvBuffers[id];
    if (recvBuffer.isDouble()) {
        recvBuffer.swap();
    }

    // Rank of the request
    int rank = m_recvRanks[id];

    // Restart the recevie
    if (areRecvsContinuous()) {
        startRecv(rank);
    }

    // Return the rank associated to the completed receive
    return rank;
}
开发者ID:optimad,项目名称:bitpit,代码行数:35,代码来源:communications.cpp


示例7: main

int
main (int argc, char **argv)
{
  int nprocs = -1;
  int rank = -1;
  char processor_name[128];
  int namelen = 128;
  int buf0[buf_size];
  int buf1[buf_size];
  int buf2[buf_size];
  int i, flipbit, done;
  MPI_Status status;

  /* init */
  MPI_Init (&argc, &argv);
  MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
  MPI_Comm_rank (MPI_COMM_WORLD, &rank);
  MPI_Get_processor_name (processor_name, &namelen);
  printf ("(%d) is alive on %s\n", rank, processor_name);
  fflush (stdout);

  MPI_Barrier (MPI_COMM_WORLD);

  if (nprocs < 2)
    {
      printf ("not enough tasks\n");
    }
  else if (rank == 0)
    {
      MPI_Request reqs[3];

      MPI_Irecv (buf0, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &reqs[0]);
      MPI_Irecv (buf1, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &reqs[1]);
      MPI_Irecv (buf2, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &reqs[2]);

      for (i = 3; i > 0; i--) {
	MPI_Waitany (i, reqs, &done, &status);

	assert (done == (i - 1));

	/* don't let next one start until after waitany call... */
	MPI_Send (&flipbit, 1, MPI_INT, 1, i, MPI_COMM_WORLD);
      }
    }
  else if (rank == 1)
    {
      memset (buf0, 1, buf_size*sizeof(int));

      for (i = 3; i > 0; i--) {
	MPI_Recv (&flipbit, 1, MPI_INT, 0, i, MPI_COMM_WORLD, &status);
	
	MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
      }
    }

  MPI_Barrier (MPI_COMM_WORLD);

  MPI_Finalize ();
  printf ("(%d) Finished normally\n", rank);
}
开发者ID:simgrid,项目名称:simgrid,代码行数:60,代码来源:waitany-deadlock.c


示例8: MPI_Waitany_Wrapper

int 	MPI_Waitany_Wrapper(int count, MPI_Request *array_of_requests, int *index, MPI_Status *status)
{
#ifdef COMMPI
  char *me = ft_mpi_routine_names[MPI_Waitany_cntr];
  int ierr;
  FT_INITIALIZE(me, ft_global_ht)
  ft_mpi_cntrs[MPI_Total_cntr]++;
  ft_mpi_cntrs[MPI_Waitany_cntr]++;
#ifdef TERRY_TRACE
  if (terry_trace_flag == TRUE) {
  TERRY_MPI_Waitany_cntr++;
  TRCHKGT(BEFORE_MPI_Waitany, cycle, TERRY_MPI_Waitany_cntr, 0, 0, 0); 
  }
#endif
  ierr = MPI_Waitany(count, array_of_requests, index, status);
#ifdef TERRY_TRACE
  if (terry_trace_flag == TRUE) {
  TRCHKGT(AFTER_MPI_Waitany, cycle, TERRY_MPI_Waitany_cntr, 0, 0, 0); 
  }
#endif
  FT_FINALIZE(me, ft_global_ht, 1)
  return(ierr);
#else
  return(0);
#endif
}
开发者ID:ngholka,项目名称:patki-power,代码行数:26,代码来源:FunctionTimer_mpi_wrappers.c


示例9: mpi_waitany

void  mpi_waitany (int *count, int *request, int *index, int *status, int *ierr)
{
    int c_index;

    *ierr = MPI_Waitany(*count, request, &c_index, (MPI_Status *)status);
    *index = c_index + 1; /* Fortran counts from one not from zero */
    return;
}
开发者ID:hadimontakhabi,项目名称:VolpexMPI-HPX,代码行数:8,代码来源:volpex_fAPI.c


示例10: main

int main(int argc, char **argv) {
  int a;

  MPI_Request reqs[2];

  MPI_Waitany(2, reqs, &a, MPI_STATUS_IGNORE);

  return 0;
}
开发者ID:8l,项目名称:rose,代码行数:9,代码来源:waitany.c


示例11: MatStashScatterGetMesg_Ref

static PetscErrorCode MatStashScatterGetMesg_Ref(MatStash *stash,PetscMPIInt *nvals,PetscInt **rows,PetscInt **cols,PetscScalar **vals,PetscInt *flg)
{
  PetscErrorCode ierr;
  PetscMPIInt    i,*flg_v = stash->flg_v,i1,i2;
  PetscInt       bs2;
  MPI_Status     recv_status;
  PetscBool      match_found = PETSC_FALSE;

  PetscFunctionBegin;
  *flg = 0; /* When a message is discovered this is reset to 1 */
  /* Return if no more messages to process */
  if (stash->nprocessed == stash->nrecvs) PetscFunctionReturn(0);

  bs2 = stash->bs*stash->bs;
  /* If a matching pair of receives are found, process them, and return the data to
     the calling function. Until then keep receiving messages */
  while (!match_found) {
    if (stash->reproduce) {
      i    = stash->reproduce_count++;
      ierr = MPI_Wait(stash->recv_waits+i,&recv_status);CHKERRQ(ierr);
    } else {
      ierr = MPI_Waitany(2*stash->nrecvs,stash->recv_waits,&i,&recv_status);CHKERRQ(ierr);
    }
    if (recv_status.MPI_SOURCE < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Negative MPI source!");

    /* Now pack the received message into a structure which is usable by others */
    if (i % 2) {
      ierr = MPI_Get_count(&recv_status,MPIU_SCALAR,nvals);CHKERRQ(ierr);

      flg_v[2*recv_status.MPI_SOURCE] = i/2;

      *nvals = *nvals/bs2;
    } else {
      ierr = MPI_Get_count(&recv_status,MPIU_INT,nvals);CHKERRQ(ierr);

      flg_v[2*recv_status.MPI_SOURCE+1] = i/2;

      *nvals = *nvals/2; /* This message has both row indices and col indices */
    }

    /* Check if we have both messages from this proc */
    i1 = flg_v[2*recv_status.MPI_SOURCE];
    i2 = flg_v[2*recv_status.MPI_SOURCE+1];
    if (i1 != -1 && i2 != -1) {
      *rows = stash->rindices[i2];
      *cols = *rows + *nvals;
      *vals = stash->rvalues[i1];
      *flg  = 1;
      stash->nprocessed++;
      match_found = PETSC_TRUE;
    }
  }
  PetscFunctionReturn(0);
}
开发者ID:firedrakeproject,项目名称:petsc,代码行数:54,代码来源:matstash.c


示例12: mpi_waitany_

void mpi_waitany_(int* count, int* requests, int* index, MPI_Status* status, int* ierr) {
  MPI_Request* reqs;
  int i;

  reqs = xbt_new(MPI_Request, *count);
  for(i = 0; i < *count; i++) {
    reqs[i] = find_request(requests[i]);
  }
  *ierr = MPI_Waitany(*count, reqs, index, status);
  free(reqs);
}
开发者ID:ricardojrdez,项目名称:simgrid,代码行数:11,代码来源:smpi_f77.c


示例13: MPI_Waitany

/**
  * \brief Waits for any socket to complete operation (\b irecv or \b isend). Used to process data in the arrival order.
  * <b>Unlocking of the socket must be done by client to free the socket</b>. For performance reason tests of the ID may be omitted.
  */
socket_t *socket_seekWait(const channel_t * ch, int direction)
{
	int num;
	MPI_Status status;
	MPI_Waitany(ch->socketsN[direction], ch->requests[direction], &num, &status);
	if(num != MPI_UNDEFINED) {
		socket_t *s = ch->sockets[direction] + num;
		if(!s->locked) error("socket_seekWait: MPI_Waitany pointed to the unlocked socket (cpu = %d, direction = %s).", s->cpu, (s->direction) ? "outcome" : "income");
		return s;
	}
	return NULL;
}
开发者ID:binarycode,项目名称:mandor2,代码行数:16,代码来源:misc_socket.c


示例14: sync_cells_direct

void sync_cells_direct(void (*copy_func)(int, int, int, int, int, int, vektor),
				void (*pack_func)(msgbuf*, int, int, int, vektor),
				void (*unpack_func)(msgbuf*, int, int, int), int all) {
	int i,k;

	int sendCells;
	int recvCells;
	int totalOperations;

	if (all){
		sendCells = lb_nTotalComms;
		recvCells = lb_nTotalComms;
	} else {
		sendCells = lb_nTotalComms-lb_nForceComms;
		recvCells = lb_nForceComms;
	}
	totalOperations = sendCells + recvCells;

	MPI_Status stat;

	empty_mpi_buffers();

	for (i = 0; i<sendCells;++i){
		/*Send data away*/
		lb_copyCellDataToSend(&lb_send_buf[i], lb_sendCells[i], lb_nSendCells[i], pack_func, lb_commIndexToCpu[i]);
		isend_buf(&lb_send_buf[i], lb_commIndexToCpu[i], &lb_req_send[i]);
		lb_requests[i] = lb_req_send[i];
		lb_request_indices[i] = -1; /* Indicates no processing required */
	}

	for (i = 0; i<recvCells;++i){
		/*Start receiving data*/
		k = (lb_nTotalComms-1)-i;
		irecv_buf(&lb_recv_buf[k], lb_commIndexToCpu[k], &lb_req_recv[k]);
		lb_requests[i+sendCells] = lb_req_recv[k];
		lb_request_indices[i+sendCells] = k;
	}

	/*Receive and process data as soon as something is available*/
	for (i = totalOperations; i>0; i--){
		int finished;
		MPI_Waitany(i, lb_requests, &finished, &stat);
		int ind = lb_request_indices[finished];
		if (ind != -1){
			MPI_Get_count(&stat, REAL, &lb_recv_buf[ind].n);

			lb_unpackCellDataFromBuffer(&lb_recv_buf[ind], lb_commIndexToCpu[ind], (*unpack_func));
		}
		lb_requests[finished] = lb_requests[i-1];
		lb_request_indices[finished] = lb_request_indices[i-1];
	}
}
开发者ID:CBegau,项目名称:imd,代码行数:52,代码来源:imd_loadBalance_direct.c


示例15: Java_mpi_Request_waitAny

JNIEXPORT jint JNICALL Java_mpi_Request_waitAny(
        JNIEnv *env, jclass clazz, jlongArray requests)
{
    int count = (*env)->GetArrayLength(env, requests);
    jlong* jReq;
    MPI_Request *cReq;
    ompi_java_getPtrArray(env, requests, &jReq, (void***)&cReq);
    int index;
    int rc = MPI_Waitany(count, cReq, &index, MPI_STATUS_IGNORE);
    ompi_java_exceptionCheck(env, rc);
    ompi_java_releasePtrArray(env, requests, jReq, (void**)cReq);
    return index;
}
开发者ID:Dissolubilis,项目名称:ompi-svn-mirror,代码行数:13,代码来源:mpi_Request.c


示例16: mirrorProcs

//------------------------------------------------------------------------
int mirrorProcs(MPI_Comm comm, std::vector<int>& toProcs, std::vector<int>& fromProcs)
{
  fromProcs.resize(0);
#ifdef FEI_SER
  fromProcs.push_back(0);
  return(0);
#else
  int num_procs = fei::numProcs(comm);
  std::vector<int> tmpIntData(num_procs*3, 0);

  int* buf = &tmpIntData[0];
  int* recvbuf = buf+num_procs;

  for(unsigned i=0; i<toProcs.size(); ++i) {
    buf[toProcs[i]] = 1;
  }

  for(int ii=2*num_procs; ii<3*num_procs; ++ii) {
    buf[ii] = 1;
  }

  CHK_MPI( MPI_Reduce_scatter(buf, &(buf[num_procs]), &(buf[2*num_procs]),
                              MPI_INT, MPI_SUM, comm) );

  int numRecvProcs = buf[num_procs];

  int tag = 11116;
  std::vector<MPI_Request> mpiReqs(numRecvProcs);

  int offset = 0;
  for(int ii=0; ii<numRecvProcs; ++ii) {
    CHK_MPI( MPI_Irecv(&(recvbuf[ii]), 1, MPI_INT, MPI_ANY_SOURCE, tag,
                       comm, &(mpiReqs[offset++])) );
  }

  for(unsigned i=0; i<toProcs.size(); ++i) {
    CHK_MPI( MPI_Send(&(toProcs[i]), 1, MPI_INT, toProcs[i], tag, comm) );
  }

  MPI_Status status;
  for(int ii=0; ii<numRecvProcs; ++ii) {
    int index;
    MPI_Waitany(numRecvProcs, &mpiReqs[0], &index, &status);
    fromProcs.push_back(status.MPI_SOURCE);
  }

  std::sort(fromProcs.begin(), fromProcs.end());

  return(0);
#endif
}
开发者ID:00liujj,项目名称:trilinos,代码行数:52,代码来源:fei_CommUtils.cpp


示例17: main

int main(int argc, char **argv)
{
	MPI_Init(&argc, &argv);

	int rank;
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Request r[2];
	int d;
	int d2[2];
	if (rank == 0) {
		MPI_Isend(&d, 1, MPI_INT, 1, 10, MPI_COMM_WORLD, &r[0]);
		MPI_Isend(&d, 1, MPI_INT, 3, 10, MPI_COMM_WORLD, &r[1]);
		int i;
		MPI_Waitany(2, r, &i, MPI_STATUS_IGNORE);
		//MPI_Waitall(2, r, MPI_STATUSES_IGNORE);
		MPI_Recv(&d, 1, MPI_INT, 1, 10, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
		MPI_Recv(&d2, 1, MPI_INT, 3, 10, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
		MPI_Waitany(2, r, &i, MPI_STATUS_IGNORE);
	}

	if (rank == 1) {
		MPI_Ssend(&d, 1, MPI_INT, 0, 10, MPI_COMM_WORLD);
		MPI_Recv(&d, 1, MPI_INT, 0, 10, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
	}

	if (rank == 2) {
		MPI_Ssend(&d, 1, MPI_INT, 3, 20, MPI_COMM_WORLD);
	}

	if (rank == 3) {
		MPI_Recv(&d, 1, MPI_INT, 2, 20, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
		MPI_Send(&d, 1, MPI_INT, 0, 10, MPI_COMM_WORLD);
		MPI_Recv(&d, 1, MPI_INT, 0, 10, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
	}

	MPI_Finalize();
	return 0;
}
开发者ID:msurkovsky,项目名称:aislinn,代码行数:38,代码来源:waitany2.cpp


示例18: Java_mpi_Request_waitAnyStatus

JNIEXPORT void JNICALL Java_mpi_Request_waitAnyStatus(
        JNIEnv *env, jclass clazz, jlongArray requests, jobject stat)
{
    int count = (*env)->GetArrayLength(env, requests);
    jlong* jReq;
    MPI_Request *cReq;
    ompi_java_getPtrArray(env, requests, &jReq, (void***)&cReq);
    int index;
    MPI_Status status;
    int rc = MPI_Waitany(count, cReq, &index, &status);
    ompi_java_exceptionCheck(env, rc);
    ompi_java_releasePtrArray(env, requests, jReq, (void**)cReq);
    ompi_java_status_setIndex(env, stat, &status, index);
}
开发者ID:Dissolubilis,项目名称:ompi-svn-mirror,代码行数:14,代码来源:mpi_Request.c


示例19: remap_2d

void remap_2d(double *in, double *out, double *buf,
	      struct remap_plan_2d *plan)

{
  MPI_Status status;
  int i,isend,irecv;
  double *scratch;

  if (plan->memory == 0)
    scratch = buf;
  else
    scratch = plan->scratch;

/* post all recvs into scratch space */

  for (irecv = 0; irecv < plan->nrecv; irecv++)
    MPI_Irecv(&scratch[plan->recv_bufloc[irecv]],plan->recv_size[irecv],
	      MPI_DOUBLE,plan->recv_proc[irecv],0,
	      plan->comm,&plan->request[irecv]);

/* send all messages to other procs */

  for (isend = 0; isend < plan->nsend; isend++) {
    plan->pack(&in[plan->send_offset[isend]],
	       plan->sendbuf,&plan->packplan[isend]);
    MPI_Send(plan->sendbuf,plan->send_size[isend],MPI_DOUBLE,
	     plan->send_proc[isend],0,plan->comm);
  }       

/* copy in -> scratch -> out for self data */

  if (plan->self) {
    isend = plan->nsend;
    irecv = plan->nrecv;
    plan->pack(&in[plan->send_offset[isend]],
	       &scratch[plan->recv_bufloc[irecv]],
	       &plan->packplan[isend]);
    plan->unpack(&scratch[plan->recv_bufloc[irecv]],
		 &out[plan->recv_offset[irecv]],&plan->unpackplan[irecv]);
  }

/* unpack all messages from scratch -> out */

  for (i = 0; i < plan->nrecv; i++) {
    MPI_Waitany(plan->nrecv,plan->request,&irecv,&status);
    plan->unpack(&scratch[plan->recv_bufloc[irecv]],
		 &out[plan->recv_offset[irecv]],&plan->unpackplan[irecv]);
  }
}
开发者ID:KulMari,项目名称:Parsek2D_MLMD,代码行数:49,代码来源:remap_2d.c


示例20: exchangeIntData

//------------------------------------------------------------------------
int exchangeIntData(MPI_Comm comm,
                    const std::vector<int>& sendProcs,
                    std::vector<int>& sendData,
                    const std::vector<int>& recvProcs,
                    std::vector<int>& recvData)
{
  if (sendProcs.size() == 0 && recvProcs.size() == 0) return(0);
  if (sendProcs.size() != sendData.size()) return(-1);
#ifndef FEI_SER
  recvData.resize(recvProcs.size());
  std::vector<MPI_Request> mpiReqs;
  mpiReqs.resize(recvProcs.size());

  int tag = 11114;
  MPI_Datatype mpi_dtype = MPI_INT;

  //launch Irecv's for recvData:

  int localProc = fei::localProc(comm);
  int numRecvProcs = recvProcs.size();
  int req_offset = 0;
  for(unsigned i=0; i<recvProcs.size(); ++i) {
    if (recvProcs[i] == localProc) {--numRecvProcs; continue; }

    CHK_MPI( MPI_Irecv(&(recvData[i]), 1, mpi_dtype, recvProcs[i], tag,
                       comm, &mpiReqs[req_offset++]) );
  }

  //send the sendData:

  for(unsigned i=0; i<sendProcs.size(); ++i) {
    if (sendProcs[i] == localProc) continue;

    CHK_MPI( MPI_Send(&(sendData[i]), 1, mpi_dtype,
                      sendProcs[i], tag, comm) );
  }

  //complete the Irecv's:

  for(int ii=0; ii<numRecvProcs; ++ii) {
    int index;
    MPI_Status status;
    CHK_MPI( MPI_Waitany(numRecvProcs, &mpiReqs[0], &index, &status) );
  }

#endif
  return(0);
}
开发者ID:00liujj,项目名称:trilinos,代码行数:49,代码来源:fei_CommUtils.cpp



注:本文中的MPI_Waitany函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ MPI_Win_create函数代码示例发布时间:2022-05-30
下一篇:
C++ MPI_Waitall函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap