• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ MPI_Test函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中MPI_Test函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Test函数的具体用法?C++ MPI_Test怎么用?C++ MPI_Test使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了MPI_Test函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: chang_robert

//********************************************************************
//
// Chang Robert's algorithm function
//
// This function performs Chang and Robert's algrithom
//
// Return Value
// ------------
// void                         
//
//
// Reference Parameters
// --------------------
// rank                     INT				            MPI processor rank number
// id                       INT*                        MPI processor random id
// size                     INT                         MPI processor numbers
// right                    INT                         The right neighbor of processor
// left                     INT                         The left neighbor of processor
// state_status             INT                         Define the participation of a processor
// count                    INT                         Count how many phase the processor has been through
// election                 INT*                        The election message array
// elected                  INT*                        The elected message array
// requests                 MPI_REQUEST                 The MPI requests in unblocking message
// status                   MPI_STATUS                  The MPI status
// flag                     INT                         The MPI flags used in MPI_Test
//
// Local Variables
// ---------------
// NONE
//*******************************************************************
void chang_robert(int rank, int size, int* id, int right, int left, int* count, int* flag, int* election, int* elected, int* state_status, MPI_Request requests, MPI_Status status)
{
	MPI_Irecv(election, 2, MPI_INT, left, 100, MPI_COMM_WORLD, &requests);
    
	MPI_Test(&requests, &flag[left], &status);
	while (!flag[left])
		MPI_Test(&requests, &flag[left], &status);

	if (election[1] == -1)
	{
		*count++;

		if (election[0] > *id)
		{
			printf("Phase: %d, Rank:  %d, Identifier: %d, Status: Passive\n", *count+1, rank, *id);
			fflush(stdout);

			if (*state_status == NONPARTICIPANT)
				*state_status = PARTICIPANT;
			MPI_Send(election, 2, MPI_INT, right, 100, MPI_COMM_WORLD);
		}
		else if (election[0] < *id)
		{
			printf("Phase: %d, Rank:  %d, Identifier: %d, Status: Active\n", *count+1, rank, *id);
			fflush(stdout);

			if (*state_status == NONPARTICIPANT)
			{
				*state_status = PARTICIPANT; 
				election[0] = *id;
				MPI_Send(election, 2, MPI_INT, right, 100, MPI_COMM_WORLD);
			}	
		}
		else
		{
            printf("Phase: %d, Rank:  %d, Identifier: %d, Status: Active\n", *count+1, rank, *id);
            fflush(stdout);

			*state_status = NONPARTICIPANT;
			election[1] = rank;
			MPI_Send(election, 2, MPI_INT, right, 100, MPI_COMM_WORLD);	
			fflush(stdout);
		}
	}
	else
	{
		elected[0] = election[0];
		elected[1] = election[1];
		election = NULL;

		if (elected[0] != *id)
		{
			*state_status = NONPARTICIPANT;
			MPI_Send(elected, 2, MPI_INT, right, 100, MPI_COMM_WORLD);
		}
        
		else if(elected[0] == *id)
        {
			printf("I am the Leader and my Rank is: %d and my Identifier is: %d\n", rank, *id);
            fflush(stdout);
        }

	}
}
开发者ID:succiz,项目名称:schoolProject,代码行数:94,代码来源:aosproj4.c


示例2: MPI_Request

 void peano::applications::faxen::repositories::FaxenBatchJobRepositoryStatePacked::receive(int source, int tag) {
    MPI_Request* sendRequestHandle = new MPI_Request();
    MPI_Status   status;
    int          flag = 0;
    int          result;
    
    clock_t      timeOutWarning   = -1;
    clock_t      timeOutShutdown  = -1;
    bool         triggeredTimeoutWarning = false;
    
    result = MPI_Irecv(
       this, 1, Datatype, source, tag,
       tarch::parallel::Node::getInstance().getCommunicator(), sendRequestHandle
    );
    if ( result != MPI_SUCCESS ) {
       std::ostringstream msg;
       msg << "failed to start to receive peano::applications::faxen::repositories::FaxenBatchJobRepositoryStatePacked from node "
       << source << ": " << tarch::parallel::MPIReturnValueToString(result);
       _log.error( "receive(int)", msg.str() );
    }
    
    result = MPI_Test( sendRequestHandle, &flag, &status );
    while (!flag) {
       if (timeOutWarning==-1)   timeOutWarning   = tarch::parallel::Node::getInstance().getDeadlockWarningTimeStamp();
       if (timeOutShutdown==-1)  timeOutShutdown  = tarch::parallel::Node::getInstance().getDeadlockTimeOutTimeStamp();
       result = MPI_Test( sendRequestHandle, &flag, &status );
       if (result!=MPI_SUCCESS) {
          std::ostringstream msg;
          msg << "testing for finished receive task for peano::applications::faxen::repositories::FaxenBatchJobRepositoryStatePacked failed: "
          << tarch::parallel::MPIReturnValueToString(result);
          _log.error("receive(int)", msg.str() );
       }
       
       // deadlock aspect
       if (
          tarch::parallel::Node::getInstance().isTimeOutWarningEnabled() &&
          (clock()>timeOutWarning) &&
          (!triggeredTimeoutWarning)
       ) {
          tarch::parallel::Node::getInstance().writeTimeOutWarning(
          "peano::applications::faxen::repositories::FaxenBatchJobRepositoryStatePacked",
          "receive(int)", source
          );
          triggeredTimeoutWarning = true;
       }
       if (
          tarch::parallel::Node::getInstance().isTimeOutDeadlockEnabled() &&
          (clock()>timeOutShutdown)
       ) {
          tarch::parallel::Node::getInstance().triggerDeadlockTimeOut(
          "peano::applications::faxen::repositories::FaxenBatchJobRepositoryStatePacked",
          "receive(int)", source
          );
       }
       tarch::parallel::Node::getInstance().receiveDanglingMessages();
    }
    
    delete sendRequestHandle;
    
    _senderRank = status.MPI_SOURCE;
    #ifdef Debug
    _log.debug("receive(int,int)", "received " + toString() ); 
    #endif
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:65,代码来源:FaxenBatchJobRepositoryState.cpp


示例3: test_impl

 inline bool test_impl(request_impl* r){
     int f = 0; MPI_Test(&r->mpi_request, &f, MPI_STATUS_IGNORE); return f;
 }
开发者ID:akosenkov,项目名称:bind,代码行数:3,代码来源:channel.hpp


示例4: PetscCommBuildTwoSided_Ibarrier

static PetscErrorCode PetscCommBuildTwoSided_Ibarrier(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata)
{
  PetscErrorCode ierr;
  PetscMPIInt    nrecvs,tag,done,i;
  MPI_Aint       lb,unitbytes;
  char           *tdata;
  MPI_Request    *sendreqs,barrier;
  PetscSegBuffer segrank,segdata;
  PetscBool      barrier_started;

  PetscFunctionBegin;
  ierr = PetscCommDuplicate(comm,&comm,&tag);CHKERRQ(ierr);
  ierr = MPI_Type_get_extent(dtype,&lb,&unitbytes);CHKERRQ(ierr);
  if (lb != 0) SETERRQ1(comm,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
  tdata = (char*)todata;
  ierr  = PetscMalloc1(nto,&sendreqs);CHKERRQ(ierr);
  for (i=0; i<nto; i++) {
    ierr = MPI_Issend((void*)(tdata+count*unitbytes*i),count,dtype,toranks[i],tag,comm,sendreqs+i);CHKERRQ(ierr);
  }
  ierr = PetscSegBufferCreate(sizeof(PetscMPIInt),4,&segrank);CHKERRQ(ierr);
  ierr = PetscSegBufferCreate(unitbytes,4*count,&segdata);CHKERRQ(ierr);

  nrecvs  = 0;
  barrier = MPI_REQUEST_NULL;
  /* MPICH-3.2 sometimes does not create a request in some "optimized" cases.  This is arguably a standard violation,
   * but we need to work around it. */
  barrier_started = PETSC_FALSE;
  for (done=0; !done; ) {
    PetscMPIInt flag;
    MPI_Status  status;
    ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag,comm,&flag,&status);CHKERRQ(ierr);
    if (flag) {                 /* incoming message */
      PetscMPIInt *recvrank;
      void        *buf;
      ierr      = PetscSegBufferGet(segrank,1,&recvrank);CHKERRQ(ierr);
      ierr      = PetscSegBufferGet(segdata,count,&buf);CHKERRQ(ierr);
      *recvrank = status.MPI_SOURCE;
      ierr      = MPI_Recv(buf,count,dtype,status.MPI_SOURCE,tag,comm,MPI_STATUS_IGNORE);CHKERRQ(ierr);
      nrecvs++;
    }
    if (!barrier_started) {
      PetscMPIInt sent,nsends;
      ierr = PetscMPIIntCast(nto,&nsends);CHKERRQ(ierr);
      ierr = MPI_Testall(nsends,sendreqs,&sent,MPI_STATUSES_IGNORE);CHKERRQ(ierr);
      if (sent) {
#if defined(PETSC_HAVE_MPI_IBARRIER)
        ierr = MPI_Ibarrier(comm,&barrier);CHKERRQ(ierr);
#elif defined(PETSC_HAVE_MPIX_IBARRIER)
        ierr = MPIX_Ibarrier(comm,&barrier);CHKERRQ(ierr);
#endif
        barrier_started = PETSC_TRUE;
        ierr = PetscFree(sendreqs);CHKERRQ(ierr);
      }
    } else {
      ierr = MPI_Test(&barrier,&done,MPI_STATUS_IGNORE);CHKERRQ(ierr);
    }
  }
  *nfrom = nrecvs;
  ierr   = PetscSegBufferExtractAlloc(segrank,fromranks);CHKERRQ(ierr);
  ierr   = PetscSegBufferDestroy(&segrank);CHKERRQ(ierr);
  ierr   = PetscSegBufferExtractAlloc(segdata,fromdata);CHKERRQ(ierr);
  ierr   = PetscSegBufferDestroy(&segdata);CHKERRQ(ierr);
  ierr   = PetscCommDestroy(&comm);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
开发者ID:firedrakeproject,项目名称:petsc,代码行数:65,代码来源:mpits.c


示例5: ADIOI_GEN_irc_poll_fn

static int ADIOI_GEN_irc_poll_fn(void *extra_state, MPI_Status *status)
{
    ADIOI_NBC_Request *nbc_req;
    ADIOI_GEN_IreadStridedColl_vars *rsc_vars = NULL;
    ADIOI_Icalc_others_req_vars     *cor_vars = NULL;
    ADIOI_Iread_and_exch_vars       *rae_vars = NULL;
    ADIOI_R_Iexchange_data_vars     *red_vars = NULL;
    int errcode = MPI_SUCCESS;
    int flag;

    nbc_req = (ADIOI_NBC_Request *)extra_state;

    switch (nbc_req->data.rd.state) {
        case ADIOI_IRC_STATE_GEN_IREADSTRIDEDCOLL:
            rsc_vars = nbc_req->data.rd.rsc_vars;
            errcode = MPI_Testall(2, rsc_vars->req_offset, &flag,
                                  MPI_STATUSES_IGNORE);
            if (errcode == MPI_SUCCESS && flag) {
                ADIOI_GEN_IreadStridedColl_inter(nbc_req, &errcode);
            }
            break;

        case ADIOI_IRC_STATE_GEN_IREADSTRIDEDCOLL_INDIO:
            rsc_vars = nbc_req->data.rd.rsc_vars;
            errcode = MPI_Test(&rsc_vars->req_ind_io, &flag, MPI_STATUS_IGNORE);
            if (errcode == MPI_SUCCESS && flag) {
                /* call the last function */
                ADIOI_GEN_IreadStridedColl_fini(nbc_req, &errcode);
            }
            break;

        case ADIOI_IRC_STATE_ICALC_OTHERS_REQ:
            cor_vars = nbc_req->cor_vars;
            errcode = MPI_Test(&cor_vars->req1, &flag, MPI_STATUS_IGNORE);
            if (errcode == MPI_SUCCESS && flag) {
                ADIOI_Icalc_others_req_main(nbc_req, &errcode);
            }
            break;

        case ADIOI_IRC_STATE_ICALC_OTHERS_REQ_MAIN:
            cor_vars = nbc_req->cor_vars;
            if (cor_vars->num_req2) {
                errcode = MPI_Testall(cor_vars->num_req2, cor_vars->req2,
                                      &flag, MPI_STATUSES_IGNORE);
                if (errcode == MPI_SUCCESS && flag) {
                    ADIOI_Icalc_others_req_fini(nbc_req, &errcode);
                }
            } else {
                ADIOI_Icalc_others_req_fini(nbc_req, &errcode);
            }
            break;

        case ADIOI_IRC_STATE_IREAD_AND_EXCH:
            rae_vars = nbc_req->data.rd.rae_vars;
            errcode = MPI_Test(&rae_vars->req1, &flag, MPI_STATUS_IGNORE);
            if (errcode == MPI_SUCCESS && flag) {
                rae_vars->m = 0;
                ADIOI_Iread_and_exch_l1_begin(nbc_req, &errcode);
            }
            break;

        case ADIOI_IRC_STATE_IREAD_AND_EXCH_L1_BEGIN:
            rae_vars = nbc_req->data.rd.rae_vars;
            errcode = MPI_Test(&rae_vars->req2, &flag, MPI_STATUS_IGNORE);
            if (errcode == MPI_SUCCESS && flag) {
                ADIOI_R_Iexchange_data(nbc_req, &errcode);
            }
            break;

        case ADIOI_IRC_STATE_R_IEXCHANGE_DATA:
            red_vars = nbc_req->data.rd.red_vars;
            errcode = MPI_Test(&red_vars->req1, &flag, MPI_STATUS_IGNORE);
            if (errcode == MPI_SUCCESS && flag) {
                ADIOI_R_Iexchange_data_recv(nbc_req, &errcode);
            }
            break;

        case ADIOI_IRC_STATE_R_IEXCHANGE_DATA_RECV:
            red_vars = nbc_req->data.rd.red_vars;
            errcode = MPI_Testall(red_vars->nprocs_recv, red_vars->req2, &flag,
                                  MPI_STATUSES_IGNORE);
            if (errcode == MPI_SUCCESS && flag) {
                ADIOI_R_Iexchange_data_fill(nbc_req, &errcode);
            }
            break;

        case ADIOI_IRC_STATE_R_IEXCHANGE_DATA_FILL:
            red_vars = nbc_req->data.rd.red_vars;
            errcode = MPI_Testall(red_vars->nprocs_send,
                                  red_vars->req2 + red_vars->nprocs_recv,
                                  &flag, MPI_STATUSES_IGNORE);
            if (errcode == MPI_SUCCESS && flag) {
                ADIOI_R_Iexchange_data_fini(nbc_req, &errcode);
            }
            break;

        default:
            break;
    }

//.........这里部分代码省略.........
开发者ID:ORNL,项目名称:ompi,代码行数:101,代码来源:ad_iread_coll.c


示例6: do_compute_and_probe

double
do_compute_and_probe(double seconds, MPI_Request* request)
{
    double t1 = 0.0, t2 = 0.0;
    double test_time = 0.0;
    int num_tests = 0;
    double target_seconds_for_compute = 0.0;
    int flag = 0;
    MPI_Status status;

    if (options.num_probes) {
        target_seconds_for_compute = (double) seconds/options.num_probes;
        if (DEBUG) fprintf(stderr, "setting target seconds to %f\n", (target_seconds_for_compute * 1e6 ));
    } 
    else {
        target_seconds_for_compute = seconds;
        if (DEBUG) fprintf(stderr, "setting target seconds to %f\n", (target_seconds_for_compute * 1e6 ));
    }

#ifdef _ENABLE_CUDA_KERNEL_
    if (options.target == gpu) {
        if (options.num_probes) {
            /* Do the dummy compute on GPU only */
            do_compute_gpu(target_seconds_for_compute);
            num_tests = 0;
            while (num_tests < options.num_probes) {
                t1 = MPI_Wtime();
                MPI_Test(request, &flag, &status);
                t2 = MPI_Wtime();
                test_time += (t2-t1);
                num_tests++;
            }
        }
        else {
            do_compute_gpu(target_seconds_for_compute);
        }
    }
    else if (options.target == both) {
        if (options.num_probes) {
            /* Do the dummy compute on GPU and CPU*/
            do_compute_gpu(target_seconds_for_compute);
            num_tests = 0;
            while (num_tests < options.num_probes) {
                t1 = MPI_Wtime();
                MPI_Test(request, &flag, &status);
                t2 = MPI_Wtime();
                test_time += (t2-t1);
                num_tests++;
                do_compute_cpu(target_seconds_for_compute);
            }
        } 
        else {
            do_compute_gpu(target_seconds_for_compute);
            do_compute_cpu(target_seconds_for_compute);
        }        
    }
    else
#endif
    if (options.target == cpu) {
        if (options.num_probes) {
            num_tests = 0;
            while (num_tests < options.num_probes) {
                do_compute_cpu(target_seconds_for_compute);
                t1 = MPI_Wtime();
                MPI_Test(request, &flag, &status);
                t2 = MPI_Wtime();
                test_time += (t2-t1);
                num_tests++;
            }
        }
        else {
            do_compute_cpu(target_seconds_for_compute);
        }
    }

#ifdef _ENABLE_CUDA_KERNEL_
    if (options.target == gpu || options.target == both) {
        cudaDeviceSynchronize();    
        cudaStreamDestroy(stream);
    }
#endif
    
    return test_time;
}
开发者ID:allevin,项目名称:sst-macro,代码行数:84,代码来源:osu_coll.c


示例7: MPI_Request

 void tarch::parallel::messages::NodePoolAnswerMessagePacked::send(int destination, int tag, bool exchangeOnlyAttributesMarkedWithParallelise) {
    MPI_Request* sendRequestHandle = new MPI_Request();
    MPI_Status   status;
    int          flag = 0;
    int          result;
    
    clock_t      timeOutWarning   = -1;
    clock_t      timeOutShutdown  = -1;
    bool         triggeredTimeoutWarning = false;
    
    #ifdef Asserts
    _senderRank = -1;
    #endif
    
    if (exchangeOnlyAttributesMarkedWithParallelise) {
       result = MPI_Isend(
          this, 1, Datatype, destination,
          tag, tarch::parallel::Node::getInstance().getCommunicator(),
          sendRequestHandle
       );
       
    }
    else {
       result = MPI_Isend(
          this, 1, FullDatatype, destination,
          tag, tarch::parallel::Node::getInstance().getCommunicator(),
          sendRequestHandle
       );
       
    }
    if  (result!=MPI_SUCCESS) {
       std::ostringstream msg;
       msg << "was not able to send message tarch::parallel::messages::NodePoolAnswerMessagePacked "
       << toString()
       << " to node " << destination
       << ": " << tarch::parallel::MPIReturnValueToString(result);
       _log.error( "send(int)",msg.str() );
    }
    result = MPI_Test( sendRequestHandle, &flag, &status );
    while (!flag) {
       if (timeOutWarning==-1)   timeOutWarning   = tarch::parallel::Node::getInstance().getDeadlockWarningTimeStamp();
       if (timeOutShutdown==-1)  timeOutShutdown  = tarch::parallel::Node::getInstance().getDeadlockTimeOutTimeStamp();
       result = MPI_Test( sendRequestHandle, &flag, &status );
       if (result!=MPI_SUCCESS) {
          std::ostringstream msg;
          msg << "testing for finished send task for tarch::parallel::messages::NodePoolAnswerMessagePacked "
          << toString()
          << " sent to node " << destination
          << " failed: " << tarch::parallel::MPIReturnValueToString(result);
          _log.error("send(int)", msg.str() );
       }
       
       // deadlock aspect
       if (
          tarch::parallel::Node::getInstance().isTimeOutWarningEnabled() &&
          (clock()>timeOutWarning) &&
          (!triggeredTimeoutWarning)
       ) {
          tarch::parallel::Node::getInstance().writeTimeOutWarning(
          "tarch::parallel::messages::NodePoolAnswerMessagePacked",
          "send(int)", destination,tag,1
          );
          triggeredTimeoutWarning = true;
       }
       if (
          tarch::parallel::Node::getInstance().isTimeOutDeadlockEnabled() &&
          (clock()>timeOutShutdown)
       ) {
          tarch::parallel::Node::getInstance().triggerDeadlockTimeOut(
          "tarch::parallel::messages::NodePoolAnswerMessagePacked",
          "send(int)", destination,tag,1
          );
       }
       tarch::parallel::Node::getInstance().receiveDanglingMessages();
    }
    
    delete sendRequestHandle;
    #ifdef Debug
    _log.debug("send(int,int)", "sent " + toString() );
    #endif
    
 }
开发者ID:Alexander-Shukaev,项目名称:precice,代码行数:82,代码来源:NodePoolAnswerMessage.cpp


示例8: worker

/* slave 进程 */
void worker()
{
    printf("\tProcessor %d at %s begin work..\n", myid, processor_name);

    MPI_Status status;
    MPI_Request handle;

    int recv_flag = 0;
    int count = 0;
    int upload = 0;

    // 非阻塞接收主进程消息
    MPI_Irecv(selectedGenes, n, MPI_GENETYPE, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &handle);

    while(1)
    {
        // 独立繁衍count代
        count = generations;
        while(count--)
        {
            select();
            crossover();
            mutate();
            evaluate();
            prefer();
            // 若满足终止条件,则向主进程发送最优路径,并结束进程
            if(population[CARDINALITY].fitness <= optimal+margin)
            {
                printf("\tProcessor %d at %s Terminated\n", myid, processor_name);
                MPI_Send(&population[CARDINALITY], 1, MPI_GENETYPE, 0, DONE_TAG, MPI_COMM_WORLD);
                printf("\tProcessor %d at %s exit\n", myid, processor_name);
                return;
            }
            // 探测是否收到主进程的消息
            MPI_Test(&handle, &recv_flag, &status);
            // 若收到主进程的消息
            if(recv_flag)
            {
                printf("\tProcessor %d at %s recv %d\n", myid, processor_name, status.MPI_TAG);

                // 状态重置
                recv_flag = 0;
                // 若接收到DONE_TAG则结束进程
                if(status.MPI_TAG == DONE_TAG)
                {
                    printf("\tProcessor %d at %s exit\n", myid, processor_name);
                    return;
                }
                // 否则,将接收到的优良个体替换种群中最差的个体
                qsort(population, CARDINALITY, sizeof(GeneType), compare);
                for(int i=1; i <= n; i++)
                    assign(&population[CARDINALITY-i], &selectedGenes[i-1]);
                if(selectedGenes[0].fitness < population[CARDINALITY].fitness)
                    assign(&population[CARDINALITY], &selectedGenes[0]);

                // 非阻塞接收主进程消息
                MPI_Irecv(selectedGenes, n, MPI_GENETYPE, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &handle);
            }
        }
        // 繁衍count代后,若没有终止则向主进程发送最优个体
        select_N_best(n);
        MPI_Send(selectedGenes, n, MPI_GENETYPE, 0, PUT_BETTER_TAG, MPI_COMM_WORLD);
        printf("\tProcessor %d at %s upload %d\n", myid, processor_name, upload++);
    }
}
开发者ID:elan2wang,项目名称:parallel_tsp,代码行数:66,代码来源:application.cpp


示例9: main


//.........这里部分代码省略.........
			rank+(rank % x_partition_count ? 0 : x_partition_count)-1, LTRTAG, MPI_COMM_WORLD, req_recv+2);

		MPI_Isend(u[iu]+c(x_length,2,0), 1, right_left_type,\
			rank+((rank+1) % x_partition_count ? 0 : -x_partition_count)+1, LTRTAG, MPI_COMM_WORLD, req_send+3);
		MPI_Irecv(u[iu]+c(x_length+2,2,0), 1, right_left_type,\
			rank+((rank+1) % x_partition_count ? 0 : -x_partition_count)+1, RTLTAG, MPI_COMM_WORLD, req_recv+3);
		//printf("Rank %d has finished nonblocking sendrecvs\n", rank);
		duration_sendrecv += MPI_Wtime() - time_start_sendrecv;
			
		//begin update of internal elements
		time_start_internal = MPI_Wtime();
		#pragma omp parallel
		{
			#pragma omp for
				for(iz=0; iz<z_length; ++iz){ //full z range
					//printf("Iteration %d is assigned to thread %d\n", iz, omp_get_thread_num());
					//disregard both the data waiting to be received (width 2 perimeter) and the ones 
					//who need them to be calculated (another 2 width perimeter)(central elements)
					for(iy=4; iy<y_length; ++iy) 
						for(ix=4; ix<x_length; ++ix)
							update(ix, iy, iz, u[iu], u[1-iu]);
				}
		}
		duration_internal += MPI_Wtime() - time_start_internal;
		// printf("Rank %d has finished internal elements\n", rank);
		// finished update of internal elements
	
		time_start_busywait = MPI_Wtime();
		done_count = 0;                  
		memset(done, 0, 4*sizeof(int)); 
		while(done_count<4){
			for(i=0; i<4; ++i)
				if(!done[i]){
					MPI_Test(req_recv+i, done+i, MPI_STATUS_IGNORE);
					if(done[i]){
						switch(i){
						case 0:
							for(iz=0; iz<z_length; ++iz) //full z range
								for(iy=y_length; iy<y_length+2; ++iy)
									for(ix=2; ix<x_length+2; ++ix)
										update(ix,iy,iz,u[iu],u[1-iu]);//update top row except corners
							break;
						case 1:
							for(iz=0; iz<z_length; ++iz) //full z range
								for(iy=2; iy<4; ++iy)
									for(ix=2; ix<x_length+2; ++ix)
										update(ix,iy,iz,u[iu],u[1-iu]);//update bottom row except corners
							break;
						case 2:
							for(iz=0; iz<z_length; ++iz) //full z range
								for(ix=2; ix<4; ++ix)
									for(iy=2; iy<y_length+2; ++iy)
										update(ix,iy,iz,u[iu],u[1-iu]);//update left column except corners
							break;
						case 3:
							for(iz=0;iz<z_length;iz++) //full z range
								for(ix=x_length;ix<x_length+2;ix++)
									for(iy=2;iy<y_length+2;iy++)
										update(ix,iy,iz,u[iu],u[1-iu]);//update right column except corners
						}
						++done_count;
					}//end if(done[i])
				}//end if(!done[i]).
		}//end while(done_count<4)
		//printf("Rank %d has finished busywait phase\n", rank);
		duration_busywait += MPI_Wtime() - time_start_busywait;
开发者ID:GeorgePapageorgakis,项目名称:Atmospheric-model-MPI,代码行数:67,代码来源:atmosphere.c


示例10: main


//.........这里部分代码省略.........
        check(s2.MPI_SOURCE == MPI_PROC_NULL);
        check(s2.MPI_TAG == MPI_ANY_TAG);
        check(s2.MPI_ERROR == MPI_ERR_TOPOLOGY);
        check(msg == MPI_MESSAGE_NULL);
        count = -1;
        MPI_Get_count(&s2, MPI_INT, &count);
        check(count == 0);
    }

    /* test 5: mprobe+imrecv with MPI_PROC_NULL */
    {
        memset(&s1, 0xab, sizeof(MPI_Status));
        memset(&s2, 0xab, sizeof(MPI_Status));
        /* the error field should remain unmodified */
        s1.MPI_ERROR = MPI_ERR_DIMS;
        s2.MPI_ERROR = MPI_ERR_TOPOLOGY;

        msg = MPI_MESSAGE_NULL;
        MPI_Mprobe(MPI_PROC_NULL, 5, MPI_COMM_WORLD, &msg, &s1);
        check(s1.MPI_SOURCE == MPI_PROC_NULL);
        check(s1.MPI_TAG == MPI_ANY_TAG);
        check(s1.MPI_ERROR == MPI_ERR_DIMS);
        check(msg == MPI_MESSAGE_NO_PROC);
        count = -1;
        MPI_Get_count(&s1, MPI_INT, &count);
        check(count == 0);

        rreq = MPI_REQUEST_NULL;
        recvbuf[0] = 0x01234567;
        recvbuf[1] = 0x89abcdef;
        MPI_Imrecv(recvbuf, count, MPI_INT, &msg, &rreq);
        check(rreq != MPI_REQUEST_NULL);
        completed = 0;
        MPI_Test(&rreq, &completed, &s2);       /* single test should always succeed */
        check(completed);
        /* recvbuf should remain unmodified */
        check(recvbuf[0] == 0x01234567);
        check(recvbuf[1] == 0x89abcdef);
        /* should get back "proc null status" */
        check(s2.MPI_SOURCE == MPI_PROC_NULL);
        check(s2.MPI_TAG == MPI_ANY_TAG);
        check(s2.MPI_ERROR == MPI_ERR_TOPOLOGY);
        check(msg == MPI_MESSAGE_NULL);
        count = -1;
        MPI_Get_count(&s2, MPI_INT, &count);
        check(count == 0);
    }

    /* test 6: improbe+mrecv with MPI_PROC_NULL */
    {
        memset(&s1, 0xab, sizeof(MPI_Status));
        memset(&s2, 0xab, sizeof(MPI_Status));
        /* the error field should remain unmodified */
        s1.MPI_ERROR = MPI_ERR_DIMS;
        s2.MPI_ERROR = MPI_ERR_TOPOLOGY;

        msg = MPI_MESSAGE_NULL;
        found = 0;
        MPI_Improbe(MPI_PROC_NULL, 5, MPI_COMM_WORLD, &found, &msg, &s1);
        check(found);
        check(msg == MPI_MESSAGE_NO_PROC);
        check(s1.MPI_SOURCE == MPI_PROC_NULL);
        check(s1.MPI_TAG == MPI_ANY_TAG);
        check(s1.MPI_ERROR == MPI_ERR_DIMS);
        count = -1;
        MPI_Get_count(&s1, MPI_INT, &count);
开发者ID:jeffhammond,项目名称:mpich,代码行数:67,代码来源:mprobe.c


示例11: master_main


//.........这里部分代码省略.........
				list_push_tail(job->operations, op);
			}
			idle_stoptime = time(0) + idle_timeout;
		} else {
			link_close(master);
			master = 0;
			sleep(5);
		}
		
		int num_waiting_jobs = itable_size(waiting_jobs);
		int num_unvisited_jobs = itable_size(active_jobs);
		for(i = 1; i < num_workers && (num_unvisited_jobs > 0 || num_waiting_jobs > 0); i++) {
			struct mpi_queue_job *job;
			struct mpi_queue_operation *op;
			int flag = 0;
			UINT64_T jobid;

			if(!workers[i]) {
				if(num_waiting_jobs) {
					itable_firstkey(waiting_jobs);
					itable_nextkey(waiting_jobs, &jobid, (void **)&job);
					itable_remove(waiting_jobs, jobid);
					itable_insert(active_jobs, jobid, job);
					workers[i] = job;
					num_waiting_jobs--;
					job->worker_rank = i;
					job->status = MPI_QUEUE_JOB_READY;
				} else {
					continue;
				}
			} else {
				num_unvisited_jobs--;
				if(workers[i]->status == MPI_QUEUE_JOB_BUSY) {
					MPI_Test(&workers[i]->request, &flag, &workers[i]->mpi_status);
					if(flag) {
						op = list_pop_head(workers[i]->operations);
						if(op->output_length) {
							op->output_buffer = malloc(op->output_length);
							MPI_Recv(op->output_buffer, op->output_length, MPI_BYTE, workers[i]->worker_rank, 0, MPI_COMM_WORLD, &workers[i]->mpi_status);
						}
						
						workers[i]->status = MPI_QUEUE_JOB_READY;

						if(op->type == MPI_QUEUE_OP_WORK || op->result < 0) {
							if(workers[i]->output)
								free(workers[i]->output);
							workers[i]->output = op->output_buffer;
							op->output_buffer = NULL;
							workers[i]->output_length = op->output_length;
							workers[i]->result = op->result;
							if(op->result < 0) {
								workers[i]->status = MPI_QUEUE_JOB_FAILED | op->type;
								op->type = MPI_QUEUE_OP_CLOSE;
								list_push_head(workers[i]->operations, op);
								op = NULL;
							}
						}
						if(op) {
							if(op->buffer)
								free(op->buffer);
							if(op->output_buffer)
								free(op->output_buffer);
							free(op);
						}
					}
				}
开发者ID:liblit,项目名称:Murphy,代码行数:67,代码来源:mpi_queue_worker.c


示例12: do_master_stuff


//.........这里部分代码省略.........
    assignment_time[slave-2] = MPI_Wtime();

    // update next_work_node
    if(next_work_node->next == NULL)
    {
      list_end = next_work_node;
    }
    next_work_node=next_work_node->next;

    DEBUG_PRINT(("work sent to slave"));
  }

  // send time array to supervisor
  DEBUG_PRINT(("Sending supervisor first time update"));
  MPI_Send(assignment_time, number_of_slaves-2, MPI_DOUBLE, 1, SUPERVISOR_TAG, MPI_COMM_WORLD);

  // failure id
  int failure_id;

  MPI_Status status_fail, status_res;
  MPI_Request request_fail, request_res;
  int flag_fail = 0, flag_res = 0;

  // receive failure from supervisor as non-blocking recv
  MPI_Irecv(&failure_id, 1, MPI_INT, 1, FAIL_TAG, MPI_COMM_WORLD, &request_fail);

  // receive result from workers as non-blocking recv
  MPI_Irecv(&received_results[num_results_received], f->res_sz, MPI_CHAR, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &request_res);

  // send units of work while haven't received all results
  while(num_results_received < num_work_units)
  {
    // check for flag_fail again
    MPI_Test(&request_fail, &flag_fail, &status_fail);

    // check for flag_res again
    MPI_Test(&request_res, &flag_res, &status_res);
    
    // send work if have failures or got results
    if (flag_fail)
    {
      // change inactive workers array
      //inactive_workers[status_fail.MPI_SOURCE-2] = 1;
      DEBUG_PRINT(("received failure from supervisor, process %d", failure_id));

      // get work_unit that needs to be reassigned
      LinkedList * work_unit = assignment_ptrs[failure_id];

      if(work_unit != NULL)
      {
        DEBUG_PRINT(("Moving assignment at %p to end of the queue", work_unit));
        move_node_to_end(work_unit);
        if(next_work_node == NULL)
        {
          next_work_node = work_unit;
        }
        assert(next_work_node != NULL);
      }
      if(assignment_time[failure_id] == 0.0)
      {
        DEBUG_PRINT(("Failure on idle process %d. WTF??", failure_id));
      }
      if(are_you_down[failure_id] == 1)
      {
        DEBUG_PRINT(("Failure on a process which is already failed. WTF??"));
      }
开发者ID:kawakita,项目名称:parallel,代码行数:67,代码来源:master.c


示例13: main


//.........这里部分代码省略.........
    }
    MPI_Barrier(MPI_COMM_WORLD);

    if (I) {
      TRACE_smpi_set_category("I");
      for (i = 0; i < 2 * N; i++) {
        if (i < N) {
          MPI_Send(r, DATATOSENT, MPI_INT, 2, tag, MPI_COMM_WORLD);
        } else {
          MPI_Send(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD);
        }
      }
      MPI_Barrier(MPI_COMM_WORLD);
      for (i = 0; i < 2 * N; i++) {
        if (i < N) {
          MPI_Irecv(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD,
                    &req[i]);
        } else {
          MPI_Irecv(r, DATATOSENT, MPI_INT, 2, tag, MPI_COMM_WORLD,
                    &req[i]);
        }
      }
      MPI_Waitall(2 * N, req, sta);
    }
    MPI_Barrier(MPI_COMM_WORLD);

    if (J) {
      TRACE_smpi_set_category("J");
      for (i = 0; i < N; i++) {
        MPI_Isend(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD, &req[i]);
      }
      for (i = 0; i < N; i++) {
        int flag;
        MPI_Test(&req[i], &flag, &sta[i]);
      }
      for (i = 0; i < N; i++) {
        MPI_Wait(&req[i], &sta[i]);
      }
    }
    free(r);
/////////////////////////////////////////
////////////////// RANK 1
///////////////////////////////////
  } else if (rank == 1) {
    MPI_Request request;
    MPI_Status status;
    MPI_Request req[N];
    MPI_Status sta[N];
    int *r = (int *) malloc(sizeof(int) * DATATOSENT);

    if (A) {
      TRACE_smpi_set_category("A");
      MPI_Recv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &status);
    }
    MPI_Barrier(MPI_COMM_WORLD);

    if (B) {
      TRACE_smpi_set_category("B");
      MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &request);
      MPI_Wait(&request, &status);
    }
    MPI_Barrier(MPI_COMM_WORLD);

    if (C) {
      TRACE_smpi_set_category("C");
      for (i = 0; i < N; i++) {
开发者ID:Shurakai,项目名称:SimGrid,代码行数:67,代码来源:smpi_traced.c


示例14: complete_something_somehow

static void complete_something_somehow(unsigned int rndnum, int numreqs, MPI_Request reqs[], int *outcount, int indices[])
{
    int i, idx, flag;

#define COMPLETION_CASES (8)
    switch (rand_range(rndnum, 0, COMPLETION_CASES)) {
        case 0:
            MPI_Waitall(numreqs, reqs, MPI_STATUSES_IGNORE);
            *outcount = numreqs;
            for (i = 0; i < numreqs; ++i) {
                indices[i] = i;
            }
            break;

        case 1:
            MPI_Testsome(numreqs, reqs, outcount, indices, MPI_STATUS_IGNORE);
            if (*outcount == MPI_UNDEFINED) {
                *outcount = 0;
            }
            break;

        case 2:
            MPI_Waitsome(numreqs, reqs, outcount, indices, MPI_STATUS_IGNORE);
            if (*outcount == MPI_UNDEFINED) {
                *outcount = 0;
            }
            break;

        case 3:
            MPI_Waitany(numreqs, reqs, &idx, MPI_STATUS_IGNORE);
            if (idx == MPI_UNDEFINED) {
                *outcount = 0;
            }
            else {
                *outcount = 1;
                indices[0] = idx;
            }
            break;

        case 4:
            MPI_Testany(numreqs, reqs, &idx, &flag, MPI_STATUS_IGNORE);
            if (idx == MPI_UNDEFINED) {
                *outcount = 0;
            }
            else {
                *outcount = 1;
                indices[0] = idx;
            }
            break;

        case 5:
            MPI_Testall(numreqs, reqs, &flag, MPI_STATUSES_IGNORE);
            if (flag) {
                *outcount = numreqs;
                for (i = 0; i < numreqs; ++i) {
                    indices[i] = i;
                }
            }
            else {
                *outcount = 0;
            }
            break;

        case 6:
            /* select a new random index and wait on it */
            rndnum = gen_prn(rndnum);
            idx = rand_range(rndnum, 0, numreqs);
            MPI_Wait(&reqs[idx], MPI_STATUS_IGNORE);
            *outcount = 1;
            indices[0] = idx;
            break;

        case 7:
            /* select a new random index and wait on it */
            rndnum = gen_prn(rndnum);
            idx = rand_range(rndnum, 0, numreqs);
            MPI_Test(&reqs[idx], &flag, MPI_STATUS_IGNORE);
            *outcount = (flag ? 1 : 0);
            indices[0] = idx;
            break;

        default:
            assert(0);
            break;
    }
#undef COMPLETION_CASES
}
开发者ID:qingu,项目名称:WRF-Libraries,代码行数:87,代码来源:nonblocking3.c


示例15: async_mpi

enum async_status async_mpi(void* session) {
	async_mpi_session* ses = (async_mpi_session*) session;
	switch(ses->state) {
		case ASYNC_MPI_STATE_SEND_OR_RECV: {
			PRINT_SES(ses);
			if((ses->flags & ASYNC_MPI_FLAG_SHOULD_SEND_COUNT) != 0) {
				int r;
				if((ses->flags & ASYNC_MPI_FLAG_IS_SENDING) != 0) {
					//printf("MPI_Isend(%p[%i], %i, MPI_INT, %i, %i, MPI_COMM_WORLD, %p)\n", &(ses->count), ses->count, 1, ses->peer, 0, ses->request);
					// fprintf(stderr, "Isend(1) to %d\n", ses->peer);
					//r = MPI_Isend(&(ses->count), 2, MPI_INT, ses->peer, 0, ses->comm, ses->request);
					ses->tmp[0] = ses->count;
					ses->tmp[1] = ses->tag;
					r = MPI_Isend(ses->tmp, 2, MPI_INT, ses->peer, 0, ses->comm, ses->request);
				} else {
					//printf("MPI_Irecv(%p[%i], %i, MPI_INT, %i, %i, MPI_COMM_WORLD, %p)\n", &(ses->count), ses->count, 1, ses->peer, 0, ses->request);
					//r = MPI_Irecv(&(ses->count), 2, MPI_INT, ses->peer, 0, ses->comm, ses->request);
					r = MPI_Irecv(ses->tmp, 2, MPI_INT, ses->peer, 0, ses->comm, ses->request);
				}
				if(r != MPI_SUCCESS) {
					ses->state = ASYNC_MPI_STATE_FAILURE;
					return ASYNC_FAILURE;
				}
			} else {
				ses->state = ASYNC_MPI_STATE_SEND_OR_RECV2;
				return async_mpi(ses);
			}
			ses->state = ASYNC_MPI_STATE_TEST;
			// fall-through
		}
		case ASYNC_MPI_STATE_TEST: {
			PRINT_SES(ses);
			int flag;
			MPI_Status status;
			MPI_Test(ses->request, &flag, &status);
			if(!flag) {
				return ASYNC_PENDING;
			}
			if((ses->flags & ASYNC_MPI_FLAG_IS_SENDING) == 0) {
				ses->count = ses->tmp[0];
				ses->tag = ses->tmp[1];
				//printf("count=%i source=%i tag=%i error=%i\n", ses->count, status.MPI_SOURCE, status.MPI_TAG, status.MPI_ERROR);
				ses->peer = status.MPI_SOURCE;
				//ses->tag = status.MPI_TAG;
				if(safe_realloc(&(ses->buf), ses->buf, ses->count) <= 0) {
					ses->state = ASYNC_MPI_STATE_FAILURE;
					return ASYNC_FAILURE;
				}
//				fprintf(stderr, "%s:%u: recv message of size %u\n", __FILE__, __LINE__, ses->count);
			}
			ses->state = ASYNC_MPI_STATE_SEND_OR_RECV2;
			// fall-through
		}
		case ASYNC_MPI_STATE_SEND_OR_RECV2: {
			PRINT_SES(ses);
			int r;
			if((ses->flags & ASYNC_MPI_FLAG_IS_SENDING) != 0) {
				//fprintf(stderr, "MPI_Isend(%p[%i,%i], %i, MPI_BYTE, %i, %i, MPI_COMM_WORLD, %p)\n", ses->buf, ((int*)ses->buf)[0], ((int*)ses->buf)[1], ses->count, ses->peer, ses->tag, ses->request);
				// fprintf(stderr, "Isend(2) to %d\n", ses->peer);
				r = MPI_Isend(ses->buf, ses->count, ses->datatype, ses->peer, ses->tag, ses->comm, ses->request);
			} else {
				//fprintf(stderr, "MPI_Irecv(%p[%i,%i], %i, MPI_BYTE, %i, %i, MPI_COMM_WORLD, %p)\n", ses->buf, ((int*)ses->buf)[0], ((int*)ses->buf)[1], ses->count, ses->peer, ses->tag, ses->request);
				r = MPI_Irecv(ses->buf, ses->count, ses->datatype, ses->peer, ses->tag, ses->comm, ses->request);
			}
			if(r != MPI_SUCCESS) {
				//printf("FAILURE! (from async_mpi)\n");
				ses->state = ASYNC_MPI_STATE_FAILURE;
				return ASYNC_FAILURE;
			}
			ses->state = ASYNC_MPI_STATE_TEST2;
			// fall-through
		}
		case ASYNC_MPI_STATE_TEST2: {
			PRINT_SES(ses);
			int flag = 1;
			MPI_Status status;
			MPI_Test(ses->request, &flag, &status);
			//fprintf(stderr, "MPI_Test(%p[%i,%i], %i, MPI_BYTE, %i, %i, MPI_COMM_WORLD, %p)\n", ses->buf, ((int*)ses->buf)[0], ((int*)ses->buf)[1], ses->count, ses->peer, ses->tag, ses->request);
			if(!flag) {
				return ASYNC_PENDING;
			}
			ses->peer = status.MPI_SOURCE;
			//printf("flag = %i\tSOURCE = %i\tTAG = %i\tERROR = %i\n", flag, status.MPI_SOURCE, status.MPI_TAG, status.MPI_ERROR);
			ses->state = ASYNC_MPI_STATE_SUCCESS;
			// fall-through
		}
		case ASYNC_MPI_STATE_SUCCESS: {
			PRINT_SES(ses);
			return ASYNC_SUCCESS;
		}
		case ASYNC_MPI_STATE_FAILURE: {
			PRINT_SES(ses);
			return ASYNC_FAILURE;
		}
		default: {
			printf("UNHANDLED CASE!\n");
			return ASYNC_FAILURE;
		}
	}
}
开发者ID:kasei,项目名称:hexastore,代码行数:100,代码来源:async_mpi.c


示例16: main

int main( int argc, char **argv )
{
    MPI_Request r1;
    int         size, rank;
    int         err = 0;
    int         partner, buf[10], flag, idx, index;
    MPI_Status  status;

    MPI_Init( &argc, &argv );

    MPI_Comm_size( MPI_COMM_WORLD, &size );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    
    if (size < 2) {
	printf( "Cancel test requires at least 2 processes\n" );
	MPI_Abort( MPI_COMM_WORLD, 1 );
    }

    /* 
     * Here is the test.  First, we ensure an unsatisfied Irecv:
     *       process 0             process size-1
     *       Sendrecv              Sendrecv
     *       Irecv                    ----
     *       Cancel                   ----
     *       Sendrecv              Sendrecv
     * Next, we confirm receipt before canceling
     *       Irecv                 Send
     *       Sendrecv              Sendrecv
     *       Cancel
     */
    if (rank == 0) {
	partner = size - 1;
	/* Cancel succeeds for wait/waitall */
	MPI_Send_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 );
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	MPI_Start( &r1 );
	MPI_Cancel( &r1 );
	MPI_Wait( &r1, &status );
	MPI_Test_cancelled( &status, &flag ); 
	MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_BOTTOM, 0, MPI_INT, partner, 1,
		      MPI_COMM_WORLD, &status );
	if (!flag) {
	    err++; 
	    printf( "Cancel of a send failed where it should succeed (Wait).\n" );
	}
	MPI_Request_f 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ MPI_Type_contiguous函数代码示例发布时间:2022-05-30
下一篇:
C++ MPI_Scatter函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap