• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ MPI_Get_count函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中MPI_Get_count函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Get_count函数的具体用法?C++ MPI_Get_count怎么用?C++ MPI_Get_count使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了MPI_Get_count函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: MPI_Iprobe

bool Channel::Receive(Message & message) {

	// do a bunch of MPI_Test
	ring.TestBuffers();

	int source = MPI_ANY_SOURCE;
	int tag = MPI_ANY_TAG;

	MPI_Status status;
	int flag = 0;

	/*
	MPI_Iprobe(source, tag, messagingCommunicator, &flag,
			               &status);
	*/

	MPI_Probe(source, tag, messagingCommunicator, &status);

	flag = 1;

	probeOperations ++;

	if(!flag)
		return false;

	source = status.MPI_SOURCE;
	tag = status.MPI_TAG;

	message.SetTag(tag);

	MPI_Datatype datatype = MPI_BYTE;

	int count = 0;
	MPI_Get_count(&status, datatype, &count);

	MPI_Recv(receivingBuffer, count, datatype, source, tag,
			messagingCommunicator, &status);

	int sourceActor = -1;
	int destinationActor = -1;

	memcpy(&sourceActor, receivingBuffer + sourceActorOffset, sizeof(int));
	memcpy(&destinationActor, receivingBuffer + destinationActorOffset, sizeof(int));

	char * content = message.GetContent();
	count -= contentOffset;

	memcpy(content, receivingBuffer + contentOffset, count * sizeof(char));

	message.SetContentSize(count);
	message.SetSource(sourceActor);
	message.SetDestination(destinationActor);

	/*
	cout << "Channel got a message with tag ";
	cout << tag << ", ";
	cout << message.GetSource() << " -> " << source;
	cout << " " << message.GetDestination();
	cout << " -> " << rank << endl;
	*/

	messagesReceived++;
	return true;
}
开发者ID:sebhtml,项目名称:BioActors,代码行数:64,代码来源:Channel.cpp


示例2: mpi_get_count_

void mpi_get_count_(MPI_Status * status, int* datatype, int *count, int* ierr){
  *ierr = MPI_Get_count(status, get_datatype(*datatype), count);
}
开发者ID:ricardojrdez,项目名称:simgrid,代码行数:3,代码来源:smpi_f77.c


示例3: main

int main(int argc, const char *argv[])
{
    // init mpi
    MPI_Init(NULL,NULL);
    
    // get processor standing
    int rank = 0, world = 0;
    MPI_Comm_size(MPI_COMM_WORLD, &world);
    
    // get rank of this processor
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    
    // send some random numbers
    int number_amount;
    if(rank == 0)
    {
        const int MAX_NUMBERS = 10;
        int numbers[MAX_NUMBERS];

        Random rg;
        number_amount = rg.getRandom(0,MAX_NUMBERS-1);
        
        // make numbers
        for(int i = 0; i < number_amount; i++)
          numbers[i] = rg.getRandom(-10,10);
        // send
        MPI_Send(numbers,number_amount,MPI_INT,1,0,MPI_COMM_WORLD);
        printf("0 sends %d numbers to 1\n",number_amount);
        
        for(int i = 0; i < number_amount; i++)
          printf("%d, ", numbers[i]);
        printf("\n");      

    }else if(rank == 1)
    {
        MPI_Status status;
        
        // probe for message and size
       // MPI_Probe(0,0,MPI_COMM_WORLD,&status);
        
        // check status out to find out how many numbers were actually sent
        //MPI_Get_count(&status, MPI_INT, &number_amount);
        
        //int * numbers = new int[number_amount];
        // recieve from 0
        //MPI_Recv(numbers,number_amount,MPI_INT,0,0,MPI_COMM_WORLD, &status);
        void * numbers = NULL;
        MPI_ProbeRecv(&numbers,MPI_INT,0,0,MPI_COMM_WORLD,&status);

        int number_amount = 0;
        MPI_Get_count(&status,MPI_INT,&number_amount);
        printf("1 received %d numbers from 0. Message source = %d, "
                "tag = %d\n",number_amount, status.MPI_SOURCE, status.MPI_TAG);
        
        // print numbers
        printf("Try to print numbers...\n");
        int * tmp = (int *)numbers;
        for(int i = 0; i < number_amount; i++)
          printf("%d, ", tmp[i]);
        printf("\n"); 
    
        free(numbers);     
    }
    
    // end session
    MPI_Finalize();
}
开发者ID:Davidsob,项目名称:mpi-projects,代码行数:67,代码来源:check_status.cpp


示例4: mpi_recv

/*
 * Implements a blocking receive operation. 
 *  mpi_recv(?Source,?Tag,-Data).
 */
static YAP_Bool 
mpi_recv(term_t YAP_ARG1,...) {
  YAP_Term t1 = YAP_Deref(YAP_ARG1), 
    t2 = YAP_Deref(YAP_ARG2), 
    t3 = YAP_Deref(YAP_ARG3), 
    t4;
  int tag, orig;
  int len=0;
  MPI_Status status;

  //The third argument (data) must be unbound
  if(!YAP_IsVarTerm(t3)) {
    return false;
  }
  /* The first argument (Source) must be bound to an integer
     (the rank of the source) or left unbound (i.e. any source
     is OK) */
  if (YAP_IsVarTerm(t1)) orig = MPI_ANY_SOURCE;
  else if( !YAP_IsIntTerm(t1) ) return  false;
  else orig = YAP_IntOfTerm(t1);
  
  /* The second argument must be bound to an integer (the tag)
     or left unbound (i.e. any tag is OK) */
  if (YAP_IsVarTerm(t2)) tag = MPI_ANY_TAG;
  else if( !YAP_IsIntTerm(t2) ) return  false;
  else  tag  = YAP_IntOfTerm( t2 );

  CONT_TIMER();
  // probe for term' size
  if( MPI_CALL(MPI_Probe( orig, tag, MPI_COMM_WORLD, &status )) != MPI_SUCCESS) {
    PAUSE_TIMER();
    return false;
  }
  if( MPI_CALL(MPI_Get_count( &status, MPI_CHAR, &len )) != MPI_SUCCESS || 
      status.MPI_TAG==MPI_UNDEFINED || 
      status.MPI_SOURCE==MPI_UNDEFINED) { 
    PAUSE_TIMER();
    return false;
  }
  //realloc memory buffer
  change_buffer_size((size_t)(len+1));
  BUFFER_LEN=len; 
  // Already know the source from MPI_Probe()
  if( orig == MPI_ANY_SOURCE ) {
    orig = status.MPI_SOURCE;
    if( !YAP_Unify(t1, YAP_MkIntTerm(orig))) {
      PAUSE_TIMER();
      return false;
    }
  }
  // Already know the tag from MPI_Probe()
  if( tag == MPI_ANY_TAG ) {
    tag = status.MPI_TAG;
    if( !YAP_Unify(t2, YAP_MkIntTerm(status.MPI_TAG))) {
      PAUSE_TIMER();
      return false; 
    }
  }
  // Receive the message as a string
  if( MPI_CALL(MPI_Recv( BUFFER_PTR, BUFFER_LEN, MPI_CHAR,  orig, tag,
			 MPI_COMM_WORLD, &status )) != MPI_SUCCESS ) {
    /* Getting in here should never happen; it means that the first
       package (containing size) was sent properly, but there was a glitch with
       the actual content! */
    PAUSE_TIMER();
    return false;
  }
#ifdef DEBUG
  write_msg(__FUNCTION__,__FILE__,__LINE__,"%s(%s,%u, MPI_CHAR,%d,%d)\n",__FUNCTION__,BUFFER_PTR, BUFFER_LEN, orig, tag);
#endif
  MSG_RECV(BUFFER_LEN);
  t4=string2term(BUFFER_PTR,&BUFFER_LEN);
  PAUSE_TIMER();
  return(YAP_Unify(YAP_ARG3,t4));
}
开发者ID:friguzzi,项目名称:mpi,代码行数:79,代码来源:pl_mpi.c


示例5: do_MPImaster_cluster

/* Master distributes work to slaves */
void do_MPImaster_cluster(WorkPtr work) {
#ifdef MPI
  FILE * checkfile;
  int i,k,nlen,tranche,client,maxlen,err,rlen,maxload,minload;
  int bound[2];
  int checkpoint;
  int round=0;
  int  w=(SEQELTWIDTH/2);
  int  *last_sent, *last_got;
  MPI_Status status;
  int seq_data[2];

  maxlen = MPIBUFRECSZ*num_seqs;
  buffer =  (int32_t *) calloc(maxlen,sizeof(int32_t));  
  last_sent = (int *) calloc(numprocs,sizeof(int));
  last_got  = (int *) calloc(numprocs,sizeof(int));
  bzero(last_sent,sizeof(int)*numprocs);
  bzero(last_got,sizeof(int)*numprocs);
  seq_data[0]=num_seqs;
  seq_data[1]=data_size;

  mpierr(MPI_Bcast(seq_data,2,MPI_INT,0,MPI_COMM_WORLD));

  mpierr(MPI_Bcast(seqInfo,2*num_seqs,MPI_INT,0,MPI_COMM_WORLD));
  mpierr(MPI_Bcast(data,data_size,MPI_SHORT,0,MPI_COMM_WORLD));


  /* divide work up */
  tranche = (num_seqs+8)/16/numprocs;
  /* Now wait for requests from slaves */
  for(i=0; i<num_seqs; i=i+tranche) {
    round++;
    if (round < prog_opts.restore) continue;
    checkpoint = 0;
    bound[0]=i;
    bound[1]=MIN(num_seqs,i+tranche);
    if (prog_opts.checkpoint) bound[1]=-bound[1];

    //printf("Master waits for client answer <%d,%d>\n",bound[0],bound[1]);
    merr =MPI_Recv(&client, 1, 
		   MPI_INT, MPI_ANY_SOURCE, WORKTAG, MPI_COMM_WORLD,&status); 
    mpierr(merr);
    //printf("Master gets note from client %d\n",client);
    if (client < 0) { // client wants to send checkpoint 
      client = -client;
      checkpoint = 1;
    }
    // The round previously sent to the client is done
    last_got[client] = last_sent[client];
    // Record the current round send to the client
    last_sent[client]=round;
    // DBg printf("Master sends new work to client %d\n",client);
    merr =MPI_Send(bound, 2, MPI_INT, client, WORKTAG, MPI_COMM_WORLD);
    mpierr(merr);
    if (prog_opts.checkpoint) {
      if (checkpoint) { 
	printf("Master to receive checkpoint\n");
	err = MPI_Recv(buffer, maxlen, MPI_INT, MPI_ANY_SOURCE, 
		       ANSTAG, MPI_COMM_WORLD, &status);
	mpierr(err);
	MPI_Get_count(&status, MPI_INT, &rlen);
	MergeSlaveClusterTable(buffer,rlen);
	checkfile = fopen(prog_opts.checkpoint,"w");
        for(k=1; k<numprocs; k++)
	  fprintf(checkfile,"%d\n",last_got[k]);
	show_clusters(checkfile);
	fclose(checkfile);
      }
      printf("Slave %d sent tranche %d of %d\n",
	     client,round,(num_seqs+1)/tranche);
    }
  }
  bound[0]=-1;
  for(i=1; i< numprocs; i++) {
    // Tell them no more work
    merr = 
      MPI_Recv(&client, 1, MPI_INT, MPI_ANY_SOURCE, 
	       WORKTAG, MPI_COMM_WORLD,&status); 
    //printf("Client %d told to finish\n",client);
    mpierr(merr);
    if (client < 0)  
      client = -client;   
    mpierr(MPI_Send(bound, 2, MPI_INT, client, WORKTAG, MPI_COMM_WORLD));
    err = MPI_Recv(buffer, maxlen, MPI_INT, MPI_ANY_SOURCE, 
		   ANSTAG, MPI_COMM_WORLD, &status);
    mpierr(err);
    MPI_Get_count(&status, MPI_INT, &rlen);
    MergeSlaveClusterTable(buffer,rlen);
  } 
#endif
}
开发者ID:shaze,项目名称:wcdest,代码行数:92,代码来源:mpistuff.c


示例6: PCSetUp_Redistribute


//.........这里部分代码省略.........
    nsends = 0;
    for (i=rstart; i<rend; i++) {
      if (i < nmap->range[j]) j = 0;
      for (; j<size; j++) {
        if (i < nmap->range[j+1]) {
          if (!nprocs[j]++) nsends++;
          owner[i-rstart] = j;
          break;
        }
      }
    }
    /* inform other processors of number of messages and max length*/
    ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,nprocs,&nrecvs);CHKERRQ(ierr);
    ierr = PetscGatherMessageLengths(comm,nsends,nrecvs,nprocs,&onodes1,&olengths1);CHKERRQ(ierr);
    ierr = PetscSortMPIIntWithArray(nrecvs,onodes1,olengths1);CHKERRQ(ierr);
    recvtotal = 0; for (i=0; i<nrecvs; i++) recvtotal += olengths1[i];

    /* post receives:  rvalues - rows I will own; count - nu */
    ierr = PetscMalloc3(recvtotal,PetscInt,&rvalues,nrecvs,PetscInt,&source,nrecvs,MPI_Request,&recv_waits);CHKERRQ(ierr);
    count  = 0;
    for (i=0; i<nrecvs; i++) {
      ierr  = MPI_Irecv((rvalues+count),olengths1[i],MPIU_INT,onodes1[i],tag,comm,recv_waits+i);CHKERRQ(ierr);
      count += olengths1[i];
    }

    /* do sends:
       1) starts[i] gives the starting index in svalues for stuff going to
       the ith processor
    */
    ierr = PetscMalloc3(cnt,PetscInt,&svalues,nsends,MPI_Request,&send_waits,size,PetscInt,&starts);CHKERRQ(ierr);
    starts[0]  = 0;
    for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[i-1];}
    for (i=0; i<cnt; i++) {
      svalues[starts[owner[i]]++] = rows[i];
    }
    for (i=0; i<cnt; i++) rows[i] = rows[i] - rstart;
    red->drows = drows;
    red->dcnt  = dcnt;
    ierr = PetscFree(rows);CHKERRQ(ierr);

    starts[0] = 0;
    for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[i-1];}
    count = 0;
    for (i=0; i<size; i++) {
      if (nprocs[i]) {
        ierr = MPI_Isend(svalues+starts[i],nprocs[i],MPIU_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr);
      }
    }

    /*  wait on receives */
    count  = nrecvs;
    slen   = 0;
    while (count) {
      ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
      /* unpack receives into our local space */
      ierr = MPI_Get_count(&recv_status,MPIU_INT,&n);CHKERRQ(ierr);
      slen += n;
      count--;
    }
    if (slen != recvtotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Total message lengths %D not expected %D",slen,recvtotal);

    ierr = ISCreateGeneral(comm,slen,rvalues,PETSC_COPY_VALUES,&red->is);CHKERRQ(ierr);

    /* free up all work space */
    ierr = PetscFree(olengths1);CHKERRQ(ierr);
    ierr = PetscFree(onodes1);CHKERRQ(ierr);
    ierr = PetscFree3(rvalues,source,recv_waits);CHKERRQ(ierr);
    ierr = PetscFree2(nprocs,owner);CHKERRQ(ierr);
    if (nsends) {   /* wait on sends */
      ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
      ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
      ierr = PetscFree(send_status);CHKERRQ(ierr);
    }
    ierr = PetscFree3(svalues,send_waits,starts);CHKERRQ(ierr);
    ierr = PetscLayoutDestroy(&map);CHKERRQ(ierr);
    ierr = PetscLayoutDestroy(&nmap);CHKERRQ(ierr);

    ierr = VecCreateMPI(comm,slen,PETSC_DETERMINE,&red->b);CHKERRQ(ierr);
    ierr = VecDuplicate(red->b,&red->x);CHKERRQ(ierr);
    ierr = MatGetVecs(pc->pmat,&tvec,PETSC_NULL);CHKERRQ(ierr);
    ierr = VecScatterCreate(tvec,red->is,red->b,PETSC_NULL,&red->scatter);CHKERRQ(ierr);
    ierr = VecDestroy(&tvec);CHKERRQ(ierr);
    ierr = MatGetSubMatrix(pc->pmat,red->is,red->is,MAT_INITIAL_MATRIX,&tmat);CHKERRQ(ierr);
    ierr = KSPSetOperators(red->ksp,tmat,tmat,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
    ierr = MatDestroy(&tmat);CHKERRQ(ierr);
  }

  /* get diagonal portion of matrix */
  ierr = PetscMalloc(red->dcnt*sizeof(PetscScalar),&red->diag);CHKERRQ(ierr);
  ierr = MatGetVecs(pc->pmat,&diag,PETSC_NULL);CHKERRQ(ierr);
  ierr = MatGetDiagonal(pc->pmat,diag);CHKERRQ(ierr);
  ierr = VecGetArrayRead(diag,&d);CHKERRQ(ierr);
  for (i=0; i<red->dcnt; i++) {
    red->diag[i] = 1.0/d[red->drows[i]];
  }
  ierr = VecRestoreArrayRead(diag,&d);CHKERRQ(ierr);
  ierr = VecDestroy(&diag);CHKERRQ(ierr);
  ierr = KSPSetUp(red->ksp);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
开发者ID:erdc-cm,项目名称:petsc-dev,代码行数:101,代码来源:redistribute.c


示例7: scr_swap_files_copy

static int scr_swap_files_copy(
  int have_outgoing, const char* file_send, scr_meta* meta_send, int rank_send, uLong* crc32_send,
  int have_incoming, const char* file_recv, scr_meta* meta_recv, int rank_recv, uLong* crc32_recv,
  MPI_Comm comm)
{
  int rc = SCR_SUCCESS;
  MPI_Request request[2];
  MPI_Status  status[2];

  /* allocate MPI send buffer */
  char *buf_send = NULL;
  if (have_outgoing) {
    buf_send = (char*) scr_align_malloc(scr_mpi_buf_size, scr_page_size);
    if (buf_send == NULL) {
      scr_abort(-1, "Allocating memory: malloc(%ld) errno=%d %s @ %s:%d",
              scr_mpi_buf_size, errno, strerror(errno), __FILE__, __LINE__
      );
      return SCR_FAILURE;
    }
  }

  /* allocate MPI recv buffer */
  char *buf_recv = NULL;
  if (have_incoming) {
    buf_recv = (char*) scr_align_malloc(scr_mpi_buf_size, scr_page_size);
    if (buf_recv == NULL) {
      scr_abort(-1, "Allocating memory: malloc(%ld) errno=%d %s @ %s:%d",
              scr_mpi_buf_size, errno, strerror(errno), __FILE__, __LINE__
      );
      return SCR_FAILURE;
    }
  }

  /* open the file to send: read-only mode */
  int fd_send = -1;
  if (have_outgoing) {
    fd_send = scr_open(file_send, O_RDONLY);
    if (fd_send < 0) {
      scr_abort(-1, "Opening file for send: scr_open(%s, O_RDONLY) errno=%d %s @ %s:%d",
              file_send, errno, strerror(errno), __FILE__, __LINE__
      );
    }
  }

  /* open the file to recv: truncate, write-only mode */
  int fd_recv = -1;
  if (have_incoming) {
    mode_t mode_file = scr_getmode(1, 1, 0);
    fd_recv = scr_open(file_recv, O_WRONLY | O_CREAT | O_TRUNC, mode_file);
    if (fd_recv < 0) {
      scr_abort(-1, "Opening file for recv: scr_open(%s, O_WRONLY | O_CREAT | O_TRUNC, ...) errno=%d %s @ %s:%d",
              file_recv, errno, strerror(errno), __FILE__, __LINE__
      );
    }
  }

  /* exchange file chunks */
  int nread, nwrite;
  int sending = 0;
  if (have_outgoing) {
    sending = 1;
  }
  int receiving = 0;
  if (have_incoming) {
    receiving = 1;
  }
  while (sending || receiving) {
    /* if we are still receiving a file, post a receive */
    if (receiving) {
      MPI_Irecv(buf_recv, scr_mpi_buf_size, MPI_BYTE, rank_recv, 0, comm, &request[0]);
    }

    /* if we are still sending a file, read a chunk, send it, and wait */
    if (sending) {
      nread = scr_read(file_send, fd_send, buf_send, scr_mpi_buf_size);
      if (scr_crc_on_copy && nread > 0) {
        *crc32_send = crc32(*crc32_send, (const Bytef*) buf_send, (uInt) nread);
      }
      if (nread < 0) {
        nread = 0;
      }
      MPI_Isend(buf_send, nread, MPI_BYTE, rank_send, 0, comm, &request[1]);
      MPI_Wait(&request[1], &status[1]);
      if (nread < scr_mpi_buf_size) {
        sending = 0;
      }
    }

    /* if we are still receiving a file,
     * wait on our receive to complete and write the data */
    if (receiving) {
      MPI_Wait(&request[0], &status[0]);
      MPI_Get_count(&status[0], MPI_BYTE, &nwrite);
      if (scr_crc_on_copy && nwrite > 0) {
        *crc32_recv = crc32(*crc32_recv, (const Bytef*) buf_recv, (uInt) nwrite);
      }
      scr_write(file_recv, fd_recv, buf_recv, nwrite);
      if (nwrite < scr_mpi_buf_size) {
        receiving = 0;
      }
//.........这里部分代码省略.........
开发者ID:c-a-h,项目名称:scr,代码行数:101,代码来源:scr_cache_rebuild.c


示例8: set_up_BD


//.........这里部分代码省略.........
    if ( info!=0 ) {
        printf ( "Error in closing open streams" );
        return -1;
    }
    if(filenameT != NULL)
        free(filenameT);
    filenameT=NULL;

    //Each process only has calculated some parts of B
    //All parts are collected by the root process (iam==0), which assembles B
    //Each process then receives BT_i and B_j corresponding to the D_ij available to the process
    if ( iam!=0 ) {
        //Each process other than root sends its X' * T and Z' * T to the root process.
        MPI_Send ( & ( XtT_sparse.nonzeros ),1, MPI_INT,0,iam,MPI_COMM_WORLD );
        MPI_Send ( & ( XtT_sparse.pRows[0] ),XtT_sparse.nrows + 1, MPI_INT,0,iam+size,MPI_COMM_WORLD );
        MPI_Send ( & ( XtT_sparse.pCols[0] ),XtT_sparse.nonzeros, MPI_INT,0,iam+2*size,MPI_COMM_WORLD );
        MPI_Send ( & ( XtT_sparse.pData[0] ),XtT_sparse.nonzeros, MPI_DOUBLE,0,iam+3*size,MPI_COMM_WORLD );
        XtT_sparse.clear();
        MPI_Send ( & ( ZtT_sparse.nonzeros ),1, MPI_INT,0,iam,MPI_COMM_WORLD );
        MPI_Send ( & ( ZtT_sparse.pRows[0] ),ZtT_sparse.nrows + 1, MPI_INT,0,4*size + iam,MPI_COMM_WORLD );
        MPI_Send ( & ( ZtT_sparse.pCols[0] ),ZtT_sparse.nonzeros, MPI_INT,0,iam+ 5*size,MPI_COMM_WORLD );
        MPI_Send ( & ( ZtT_sparse.pData[0] ),ZtT_sparse.nonzeros, MPI_DOUBLE,0,iam+6*size,MPI_COMM_WORLD );
        ZtT_sparse.clear();
        //printf("Process %d sent ZtT and XtT\n",iam);

        // And eventually receives the necessary BT_i and B_j
        // Blocking sends are used, which is why the order of the receives is critical depending on the coordinates of the process
        int nonzeroes;
        if (*position >= pcol) {
            MPI_Recv ( &nonzeroes,1,MPI_INT,0,iam,MPI_COMM_WORLD,&status );
            BT_i.allocate ( blocksize*Drows,m+l,nonzeroes );
            MPI_Recv ( & ( BT_i.pRows[0] ),blocksize*Drows + 1, MPI_INT,0,iam + size,MPI_COMM_WORLD,&status );
            int count;
            MPI_Get_count(&status,MPI_INT,&count);
            BT_i.nrows=count-1;
            MPI_Recv ( & ( BT_i.pCols[0] ),nonzeroes, MPI_INT,0,iam+2*size,MPI_COMM_WORLD,&status );
            MPI_Recv ( & ( BT_i.pData[0] ),nonzeroes, MPI_DOUBLE,0,iam+3*size,MPI_COMM_WORLD,&status );

            MPI_Recv ( &nonzeroes,1, MPI_INT,0,iam+4*size,MPI_COMM_WORLD,&status );

            B_j.allocate ( blocksize*Dcols,m+l,nonzeroes );

            MPI_Recv ( & ( B_j.pRows[0] ),blocksize*Dcols + 1, MPI_INT,0,iam + 5*size,MPI_COMM_WORLD,&status );
            MPI_Get_count(&status,MPI_INT,&count);
            B_j.nrows=count-1;
            MPI_Recv ( & ( B_j.pCols[0] ),nonzeroes, MPI_INT,0,iam+6*size,MPI_COMM_WORLD,&status );
            MPI_Recv ( & ( B_j.pData[0] ),nonzeroes, MPI_DOUBLE,0,iam+7*size,MPI_COMM_WORLD,&status );

            //Actually BT_j is sent, so it still needs to be transposed
            B_j.transposeIt ( 1 );
        }
        else {
            MPI_Recv ( &nonzeroes,1, MPI_INT,0,iam+4*size,MPI_COMM_WORLD,&status );

            B_j.allocate ( blocksize*Dcols,m+l,nonzeroes );

            MPI_Recv ( & ( B_j.pRows[0] ),blocksize*Dcols + 1, MPI_INT,0,iam + 5*size,MPI_COMM_WORLD,&status );
            int count;
            MPI_Get_count(&status,MPI_INT,&count);
            B_j.nrows=count-1;

            MPI_Recv ( & ( B_j.pCols[0] ),nonzeroes, MPI_INT,0,iam+6*size,MPI_COMM_WORLD,&status );

            MPI_Recv ( & ( B_j.pData[0] ),nonzeroes, MPI_DOUBLE,0,iam+7*size,MPI_COMM_WORLD,&status );

            B_j.transposeIt ( 1 );
开发者ID:arnedc,项目名称:sparsedense,代码行数:67,代码来源:readdist.cpp


示例9: update_particle_ghosts


//.........这里部分代码省略.........
  }
  else{
    for( i = m_end-m_start+1; i <= m_end-m_start+ghosts; i++ )
      for( j = 0; j <= n_end-n_start+2*ghosts; j++ )
	for( k = 0; k <= o_end-o_start+2*ghosts; k++ ){
	  p = hoc[i][j][k];
	  while( p >= 0 ){
	    particles[start+count]  = particles[p];
	    count++;
	    p = ll[p];
	  }
	}
  }
  
  if( mpi_right == mpi_self ){
    /*
    if( periodic ){
    */
    for( p = 0; p < count; p++ ){
      particles[*n_stored_particles+p]  = particles[start+p];
    }
    *n_stored_particles += count;
    /*
    }
    */
  }
  else{
    MPI_Isend( &particles[start], count, mpi_type_particle, mpi_right, 
	       1, mpi_comm_cart, &mpi_req[0] );
    MPI_Irecv( &particles[*n_stored_particles],
	       *max_particles-*n_stored_particles, mpi_type_particle, mpi_left, 1,
	       mpi_comm_cart, &mpi_req[1] );
    MPI_Waitall( 2, mpi_req, mpi_stat );
    MPI_Get_count( &mpi_stat[1], mpi_type_particle, &mpi_count );
    count = mpi_count;
    *n_stored_particles += count;
  }

  if( *n_stored_particles >= start )
    {
      printf("Buffer too small!\n");
      exit(1);
    }
  
  /* Updating linked list */
  if( count > 0 )
    update_linked_list( ll, hoc, particles,*n_stored_particles-count, *n_stored_particles, 
			x, y, z, m, n, o, 
			m_start-ghosts, m_end+ghosts,
			n_start-ghosts, n_end+ghosts,
			o_start-ghosts, o_end+ghosts );
  
  /* --------------------------------------------------------------------------
   *
   * Sending particles to left neighbor
   *
   * -------------------------------------------------------------------------- */
  
  count = 0;
  for( i = ghosts; i <= 2*ghosts-1; i++ )
    for( j = 0; j <= n_end-n_start+2*ghosts; j++ )
      for( k = 0; k <= o_end-o_start+2*ghosts; k++ ){
	p = hoc[i][j][k];
	while( p >= 0 ){
	  count++;
	  p = ll[p];
开发者ID:arnolda,项目名称:scafacos,代码行数:67,代码来源:particle.c


示例10: mpi_master


//.........这里部分代码省略.........
   * also, catch error states and die later, after clean shutdown of workers.
   * 
   * When a recoverable error occurs, have_work = FALSE, xstatus !=
   * eslOK, and errmsg is set to an informative message. No more
   * errmsg's can be received after the first one. We wait for all the
   * workers to clear their work units, then send them shutdown signals,
   * then finally print our errmsg and exit.
   * 
   * Unrecoverable errors just crash us out with p7_Fail().
   */
  wi = 1;
  while (have_work || nproc_working)
    {
      if (have_work) 
	{
	  if ((status = esl_msa_Read(cfg->afp, &msa)) == eslOK) 
	    {
	      cfg->nali++;  
	      ESL_DPRINTF1(("MPI master read MSA %s\n", msa->name == NULL? "" : msa->name));
	    }
	  else 
	    {
	      have_work = FALSE;
	      if      (status == eslEFORMAT)  { xstatus = eslEFORMAT; snprintf(errmsg, eslERRBUFSIZE, "Alignment file parse error:\n%s\n", cfg->afp->errbuf); }
	      else if (status == eslEINVAL)   { xstatus = eslEFORMAT; snprintf(errmsg, eslERRBUFSIZE, "Alignment file parse error:\n%s\n", cfg->afp->errbuf); }
	      else if (status != eslEOF)      { xstatus = status;     snprintf(errmsg, eslERRBUFSIZE, "Alignment file read unexpectedly failed with code %d\n", status); }
	      ESL_DPRINTF1(("MPI master has run out of MSAs (having read %d)\n", cfg->nali));
	    } 
	}

      if ((have_work && nproc_working == cfg->nproc-1) || (!have_work && nproc_working > 0))
	{
	  if (MPI_Probe(MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &mpistatus) != 0) { MPI_Finalize(); p7_Fail("mpi probe failed"); }
	  if (MPI_Get_count(&mpistatus, MPI_PACKED, &n)                != 0) { MPI_Finalize(); p7_Fail("mpi get count failed"); }
	  wi = mpistatus.MPI_SOURCE;
	  ESL_DPRINTF1(("MPI master sees a result of %d bytes from worker %d\n", n, wi));

	  if (n > bn) {
	    if ((buf = realloc(buf, sizeof(char) * n)) == NULL) p7_Fail("reallocation failed");
	    bn = n; 
	  }
	  if (MPI_Recv(buf, bn, MPI_PACKED, wi, 0, MPI_COMM_WORLD, &mpistatus) != 0) { MPI_Finalize(); p7_Fail("mpi recv failed"); }
	  ESL_DPRINTF1(("MPI master has received the buffer\n"));

	  /* If we're in a recoverable error state, we're only clearing worker results;
           * just receive them, don't unpack them or print them.
           * But if our xstatus is OK, go ahead and process the result buffer.
	   */
	  if (xstatus == eslOK)	
	    {
	      pos = 0;
	      if (MPI_Unpack(buf, bn, &pos, &xstatus, 1, MPI_INT, MPI_COMM_WORLD)     != 0) { MPI_Finalize();  p7_Fail("mpi unpack failed");}
	      if (xstatus == eslOK) /* worker reported success. Get the HMM. */
		{
		  ESL_DPRINTF1(("MPI master sees that the result buffer contains an HMM\n"));
		  if (p7_hmm_MPIUnpack(buf, bn, &pos, MPI_COMM_WORLD, &(cfg->abc), &hmm) != eslOK) {  MPI_Finalize(); p7_Fail("HMM unpack failed"); }
		  ESL_DPRINTF1(("MPI master has unpacked the HMM\n"));

		  if (cfg->postmsafile != NULL) {
		    if (esl_msa_MPIUnpack(cfg->abc, buf, bn, &pos, MPI_COMM_WORLD, &postmsa) != eslOK) { MPI_Finalize(); p7_Fail("postmsa unpack failed");}
		  } 

		  entropy = p7_MeanMatchRelativeEntropy(hmm, bg);
		  if ((status = output_result(cfg, errmsg, msaidx[wi], msalist[wi], hmm, postmsa, entropy)) != eslOK) xstatus = status;

		  esl_msa_Destroy(postmsa); postmsa = NULL;
开发者ID:TuftsBCB,项目名称:SMURFBuild,代码行数:67,代码来源:hmmbuild.c


示例11: master

void master(const struct fracInfo info)
{
    int ntasks, dest, msgsize;
    struct fracData *work = malloc(sizeof(*work));
    MPI_Status status;
    int rowsTaken = 0;

    MPI_Comm_size(MPI_COMM_WORLD, &ntasks);    

    size_t size = sizeof(unsigned char) * (unsigned long)info.nCols * (unsigned long)info.nRows;
    unsigned char *fractal = (unsigned char*)malloc(size);
    if(!fractal) {
        printf("fractal allocation failed, %lu bytes\n", size);
        exit(1);
    }

    // Allocate buffer
    int membersize, emptysize, fullsize;
    int position;
    char *buffer;
    MPI_Pack_size(1, MPI_INT, MPI_COMM_WORLD, &membersize);
    emptysize = membersize;
    MPI_Pack_size(1, MPI_INT, MPI_COMM_WORLD, &membersize);
    emptysize += membersize;
    MPI_Pack_size(get_max_work_size(&info), MPI_UNSIGNED_CHAR, MPI_COMM_WORLD, &membersize);
    fullsize = emptysize + membersize;

    buffer = malloc(fullsize);    
    if(!buffer) {
        printf("buffer allocation failed, %d bytes\n",fullsize);
        exit(1);
    }

    // Send initial data
    for (dest = 1; dest < ntasks; dest++) {
        //Get next work item
        get_work(&info,&rowsTaken,work);
        
        //pack and send work       
        position = 0;
        MPI_Pack(&work->startRow,1,MPI_INT,buffer,emptysize,&position,MPI_COMM_WORLD);
        MPI_Pack(&work->nRows,1,MPI_INT,buffer,emptysize,&position,MPI_COMM_WORLD);
        MPI_Send(buffer, position, MPI_PACKED, dest, WORKTAG, MPI_COMM_WORLD);
    }

    printf("sent initial work\n");
    //Get next work item
    get_work(&info,&rowsTaken,work);
    int startRow, nRows;
    while(work->nRows) {
        // Recieve and unpack work
        MPI_Recv(buffer, fullsize, MPI_PACKED, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
        position = 0;
        MPI_Get_count(&status, MPI_PACKED, &msgsize);
        MPI_Unpack(buffer, msgsize, &position, &startRow,1,MPI_INT,MPI_COMM_WORLD);
        MPI_Unpack(buffer, msgsize, &position, &nRows,1,MPI_INT,MPI_COMM_WORLD);    
        MPI_Unpack(buffer, msgsize, &position, fractal+((unsigned long)startRow*info.nCols), nRows*info.nCols, MPI_UNSIGNED_CHAR, MPI_COMM_WORLD);

        //pack and send work       
        position = 0;
        MPI_Pack(&work->startRow,1,MPI_INT,buffer,emptysize,&position,MPI_COMM_WORLD);
        MPI_Pack(&work->nRows,1,MPI_INT,buffer,emptysize,&position,MPI_COMM_WORLD);
        MPI_Send(buffer, position, MPI_PACKED, status.MPI_SOURCE, WORKTAG, MPI_COMM_WORLD);

        //Get next work item
        get_work(&info,&rowsTaken,work);

        if(status.MPI_SOURCE==1)
            printf("%d\n",work->startRow);
    }

    // Recieve all remaining work
    for (dest = 1; dest < ntasks; dest++) {
        // Recieve and unpack work
        MPI_Recv(buffer, fullsize, MPI_PACKED, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
        position = 0;
        MPI_Get_count(&status, MPI_PACKED, &msgsize);

        MPI_Unpack(buffer, msgsize, &position, &startRow,1,MPI_INT,MPI_COMM_WORLD);
        MPI_Unpack(buffer, msgsize, &position, &nRows,1,MPI_INT,MPI_COMM_WORLD);
        // unpack pixel data
        MPI_Unpack(buffer, msgsize, &position, fractal+((unsigned long)startRow*info.nCols), nRows*info.nCols, MPI_UNSIGNED_CHAR, MPI_COMM_WORLD);

        // Kill slaves
        MPI_Send(0,0,MPI_INT,dest,DIETAG,MPI_COMM_WORLD);
    }

    free(work);
    free(buffer);

    //Save image as TIFF
    unsigned int nx = info.nCols;
    unsigned int ny = info.nRows;
    char fileName[] = "/home/pi/Mandelbrot/Mandelbrot.tiff";
    TIFF *out = TIFFOpen(fileName, "w");
    uint32 tileDim = 256;
    tsize_t tileBytes = tileDim*tileDim*sizeof(char);
    unsigned char *buf = (unsigned char *)_TIFFmalloc(tileBytes);
    char description[1024];
    snprintf(description, sizeof(description),"xStart:%f yStart:%f spacing:%f AAx:%d",info.xStart,info.yStart,info.spacing,info.AA);
//.........这里部分代码省略.........
开发者ID:AdamSimpson,项目名称:Mandelbrot,代码行数:101,代码来源:mandel-DEM-BD-AA-hybrid.c


示例12: IrcvBuf

int IrcvBuf(intbuf_t* bufs, intbuf_t serversbuf, int* w, MPI_Comm comm, int* tag, int* size){
  // if there is a send request pending,
  //     receive UPDATE_END and UPDATE_MSG
  // else
  //     if there's processing pending
  //        receive UPDATE_END, UPDATE_MSG, NEWIDS_END
  //     else 
  //        receive UPDATE_END, UPDATE_MSG, NEWIDS_END, NEWIDS_MSG 

  intbuf_t bb = NULL;
  MPI_Status status; 
  int flag;
  //  Warning(info,"$$$$$$$$$ (W%d) SP: %d   PP: %d  ", mpi_me, send_pending, bufnewids_pending);
  if (send_pending) {
      MPI_Iprobe( MPI_ANY_SOURCE, UPDATE_END, comm, &flag, &status );
      if (!flag) 
	MPI_Iprobe( MPI_ANY_SOURCE, UPDATE_MSG, comm, &flag, &status );
      //   if (!flag) printf("\n...%d probing...", mpi_me);
  }
  else { 
    if (bufnewids_pending) {
      MPI_Iprobe( MPI_ANY_SOURCE, UPDATE_END, comm, &flag, &status );
      if (!flag) 
	MPI_Iprobe( MPI_ANY_SOURCE, UPDATE_MSG, comm, &flag, &status );
      if (!flag) 
	MPI_Iprobe( MPI_ANY_SOURCE, NEWIDS_END, comm, &flag, &status );
      //      if (!flag) printf("\n...%d PROBING...", mpi_me);
    }
    else {
       MPI_Probe(MPI_ANY_SOURCE,MPI_ANY_TAG,comm, &status);
      //MPI_Iprobe(MPI_ANY_SOURCE,MPI_ANY_TAG,comm, &flag, &status);
      flag = 1;
    }
  };


  if (!flag) return 0;
  
  MPI_Get_count(&status, MPI_INT, size);
  *w = status.MPI_SOURCE;
  *tag = status.MPI_TAG;
  
   if ((*tag == NEWIDS_MSG) || (*tag == NEWIDS_END))
     bb = serversbuf;
   else 
     bb = bufs[*w];
   

   MPI_Recv(bb->b, *size, 
	    MPI_INT, status.MPI_SOURCE, 
	    *tag, comm, &status);

   bb->index = 0;
   bb->size = *size;

#ifdef DEBUG  
   Warning(info,"\n      %d IrcvBuf %s (size %d) from %d", 
	   mpi_me, tname[*tag], *size, *w);
#endif

   return 1;
}
开发者ID:graydon,项目名称:ltsmin,代码行数:62,代码来源:bufs.c


示例13: main


//.........这里部分代码省略.........
  bottomBound = topBound + rowSep;
  leftBound = columnSep * columnIndex;
  rightBound = leftBound + columnSep;

  assert(n_proc % row == 0);

  // Add n_proc # of arrays each holding ID of local fishes
  fish_t fishProc[n_proc][n_fish];
  int n_fish_proc[n_proc];
  int k;
  for (k = 0; k < n_proc; k++) n_fish_proc[k] = 0;
  //////////////////////////////////

  init_fish (rank, fish_off, n_fish_split, row, column, fishProc, n_fish_proc);

  // distribute initial conditions to all processes
  if (rank == 0) {
    local_fish = fishProc[0];
    n_local_fish = n_fish_proc[0];

    // Functionality of MPI_Scatterv is done here with Isends
    //MPI_Request request[n_proc-1];
    
	int mesTag = 0;
    MPI_Request *req;
    for (k = 1; k < n_proc; ++k) {
	//printf("n_fish_proc[%d], %d\n", k, n_fish_proc[k]);
	    MPI_Isend(fishProc[k], n_fish_proc[k], fishtype, k, mesTag, comm, req);
    }
  } else {
    MPI_Status status;
    // Processors of rank != 0 receives.
    MPI_Recv( local_fish, n_fish, fishtype, 0, MPI_ANY_TAG, comm, &status);
    MPI_Get_count(&status, fishtype, &n_local_fish);
  }
  printf("rank[%d], n_local_fish = %d\n", rank, n_local_fish);
  ///*
  //MPI_Scatterv (fish, n_fish_split, fish_off, fishtype,
  //		local_fish, n_local_fish, fishtype,
  //		0, comm);
  //*/

#ifdef TRACE_WITH_VAMPIR
    tracingp = 1;
    VT_traceon();
#endif

  start_mpi_timer(&total_timer);



  for (output_time = 0.0, curr_time = 0.0, steps = 0;
       curr_time <= end_time && steps < max_steps;
       curr_time += dt, ++steps) {

#ifdef TRACE_WITH_VAMPIR
    if (steps >= STEPS_TO_TRACE) {
      tracingp = 0; VT_traceoff();
    }
#endif

    trace_begin(TRACE_FISH_GATHER);
    start_mpi_timer (&gather_timer);
    start_mpi_timer (&mpi_timer);
    /* 
       Pull in all the fish.  Obviously, this is not a good idea.
开发者ID:blickly,项目名称:ptii,代码行数:67,代码来源:fish.c


示例14: m


//.........这里部分代码省略.........
      myParts[cnt++] = it->first;

    // Step 1: Find out how many processors send me data
    // partsIndexBase starts from zero, as the processors ids start from zero
    GO partsIndexBase = 0;
    RCP<Map>    partsIHave  = MapFactory   ::Build(lib, Teuchos::OrdinalTraits<Xpetra::global_size_t>::invalid(), myParts(), partsIndexBase, comm);
    RCP<Map>    partsIOwn   = MapFactory   ::Build(lib,                                                 numProcs,  myPart(), partsIndexBase, comm);
    RCP<Export> partsExport = ExportFactory::Build(partsIHave, partsIOwn);

    RCP<GOVector> partsISend    = Xpetra::VectorFactory<GO, LO, GO, NO>::Build(partsIHave);
    RCP<GOVector> numPartsIRecv = Xpetra::VectorFactory<GO, LO, GO, NO>::Build(partsIOwn);
    if (numSend) {
      ArrayRCP<GO> partsISendData = partsISend->getDataNonConst(0);
      for (int i = 0; i < numSend; i++)
        partsISendData[i] = 1;
    }
    (numPartsIRecv->getDataNonConst(0))[0] = 0;

    numPartsIRecv->doExport(*partsISend, *partsExport, Xpetra::ADD);
    numRecv = (numPartsIRecv->getData(0))[0];

    // Step 2: Get my GIDs from everybody else
    MPI_Datatype MpiType = MpiTypeTraits<GO>::getType();
    int msgTag = 12345;  // TODO: use Comm::dup for all internal messaging

    // Post sends
    Array<MPI_Request> sendReqs(numSend);
    cnt = 0;
    for (typename map_type::iterator it = sendMap.begin(); it != sendMap.end(); it++)
      MPI_Isend(static_cast<void*>(it->second.getRawPtr()), it->second.size(), MpiType, Teuchos::as<GO>(it->first), msgTag, *rawMpiComm, &sendReqs[cnt++]);

    map_type recvMap;
    size_t totalGIDs = myGIDs.size();
    for (int i = 0; i < numRecv; i++) {
      MPI_Status status;
      MPI_Probe(MPI_ANY_SOURCE, msgTag, *rawMpiComm, &status);

      // Get rank and number of elements from status
      int fromRank = status.MPI_SOURCE, count;
      MPI_Get_count(&status, MpiType, &count);

      recvMap[fromRank].resize(count);
      MPI_Recv(static_cast<void*>(recvMap[fromRank].getRawPtr()), count, MpiType, fromRank, msgTag, *rawMpiComm, &status);

      totalGIDs += count;
    }

    // Do waits on send requests
    if (numSend) {
      Array<MPI_Status> sendStatuses(numSend);
      MPI_Waitall(numSend, sendReqs.getRawPtr(), sendStatuses.getRawPtr());
    }

    // Merge GIDs
    myGIDs.reserve(totalGIDs);
    for (typename map_type::const_iterator it = recvMap.begin(); it != recvMap.end(); it++) {
      int offset = myGIDs.size(), len = it->second.size();
      if (len) {
        myGIDs.resize(offset + len);
        memcpy(myGIDs.getRawPtr() + offset, it->second.getRawPtr(), len*sizeof(GO));
      }
    }
    // NOTE 2: The general sorting algorithm could be sped up by using the knowledge that original myGIDs and all received chunks
    // (i.e. it->second) are sorted. Therefore, a merge sort would work well in this situation.
    std::sort(myGIDs.begin(), myGIDs.end());

    // Step 3: Construct importer
    RCP<Map>          newRowMap      = MapFactory   ::Build(lib, rowMap->getGlobalNumElements(), myGIDs(), indexBase, origComm);
    RCP<const Import> rowMapImporter;
    {
      SubFactoryMonitor m1(*this, "Import construction", currentLevel);
      rowMapImporter = ImportFactory::Build(rowMap, newRowMap);
    }

    Set(currentLevel, "Importer", rowMapImporter);

    // ======================================================================================================
    // Print some data
    // ======================================================================================================
    if (pL.get<bool>("repartition: print partition distribution") && IsPrint(Statistics2)) {
      // Print the grid of processors
      GetOStream(Statistics2) << "Partition distribution over cores (ownership is indicated by '+')" << std::endl;

      char amActive = (myGIDs.size() ? 1 : 0);
      std::vector<char> areActive(numProcs, 0);
      MPI_Gather(&amActive, 1, MPI_CHAR, &areActive[0], 1, MPI_CHAR, 0, *rawMpiComm);

      int rowWidth = std::min(Teuchos::as<int>(ceil(sqrt(numProcs))), 100);
      for (int proc = 0; proc < numProcs; proc += rowWidth) {
        for (int j = 0; j < rowWidth; j++)
          if (proc + j < numProcs)
            GetOStream(Statistics2) << (areActive[proc + j] ? "+" : ".");
          else
          GetOStream(Statistics2) << " ";

        GetOStream(Statistics2) << "      " << proc << ":" << std::min(proc + rowWidth, numProcs) - 1 << std::endl;
      }
    }

  } // Build
开发者ID:Russell-Jones-OxPhys,项目名称:Trilinos,代码行数:101,代码来源:MueLu_RepartitionFactory_def.hpp


示例15: MPI_Comm_rank

  /**
   * This method contains most of the MPI code that coordinates the efforts
   * among the crawlers. This method doesn't return until the root MPI process
   * recieves a SIGTERM signal.
   *
   * One of the crawlers is designated the root crawler based on its MPI rank.
   * The root crawler instantiates a KeyspaceMapping object to coordinate the
   * keyspace mapping among the crawlers. In reality the root crawler forks its
   * tripcode generating thread and continues to listen for KeyspacePool
   * requests in the main thread.
   *
   * When the root crawler recieves a SIGTERM signal, it signals all of the
   * crawlers to finish their current pools and optionally serialize the
   * KeyspaceMapping object to disk to allow for the search to be resumed in the
   * future.
   *
   * \fixme Catching the SIGTERM signal in a thread that makes MPI calls might
   * not be safe. See section 2.9.2 of the MPI specification.
   */
  void TripcodeCrawler::run()
  {
    int worldRank, worldSize;
    MPI_Comm_rank(MPI_COMM_WORLD, &worldRank);
    MPI_Comm_size(MPI_COMM_WORLD, &worldSize);

    if(worldRank == ROOT_RANK)
    {
      /// \todo Spawn a thread so the root process can compute tripcodes and
      /// coordinate the threads at the same time.

      while(true)
      {
        cout << "doing things" << endl;
        MPI_Status status;

        // blocking receive for requests for keyspace pools
        MPI_Recv(NULL, 0, MPI_INT, MPI_ANY_SOURCE, KEYSPACE_REQUEST, MPI_COMM_WORLD, &status);

        /// \todo Need to document the ownership of a lot of these buffers.
        // construct a KeyspacePool object suitable for serialization and
        // transmission
        assert(m_keyspaceMapping != NULL);
        KeyspacePool *keyspacePool = m_keyspaceMapping->getNextPool();
        size_t poolDataSize;
        uint8_t *poolData = keyspacePool->serialize(&poolDataSize);
        // blocking response to keyspace pool request with serialized
        // KeyspacePool object
        MPI_Send(poolData, static_cast<int>(poolDataSize), MPI_BYTE, status.MPI_SOURCE, KEYSPACE_RESPONSE, MPI_COMM_WORLD);
        delete keyspacePool;
        delete poolData;
      }
    }
    else
    {
      while(true)
      {
        MPI_Status status;

        // request a new keyspace pool
        MPI_Send(NULL, 0, MPI_INT, ROOT_RANK, KEYSPACE_REQUEST, MPI_COMM_WORLD);

        // recieve the serialized KeyspacePool object
        MPI_Probe(ROOT_RANK, KEYSPACE_RESPONSE, MPI_COMM_WORLD, &status);
        size_t poolDataSize;
        MPI_Get_count(&status, MPI_BYTE, reinterpret_cast<int*>(poolDataSize));
        uint8_t *poolData = new uint8_t[poolDataSize];
        MPI_Recv(poolData, static_cast<int>(poolDataSize), MPI_BYTE, ROOT_RANK, KEYSPACE_RESPONSE, MPI_COMM_WORLD, &status);
        KeyspacePool *keyspacePool = KeyspacePoolFactory::singleton()->createKeyspacePool(poolData, poolDataSize);
        delete poolData;  /// \todo We might want to explore the speed benefit
        /// of a custom memory allocater here and a few other places.

        TripcodeContainer tripcodes, matches;
        KeyBlock *currentBlock;
        while((currentBlock = keyspacePool->getNextBlock()) != NULL)
        {
          m_tripcodeAlgorithm->computeTripcodes(currentBlock, &tripcodes);
          m_matchingAlgorithm->matchTripcodes(&tripcodes, &matches);
        }

        // TODO: send TripcodeSearchResult to ROOT_RANK

        delete keyspacePool;

        // TODO: check for termination signal
      }
    }
  }
开发者ID:auntieNeo,项目名称:TripRipper,代码行数:87,代码来源:tripcodeCrawler.cpp


示例16: main

该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ MPI_Get_processor_name函数代码示例发布时间:2022-05-30
下一篇:
C++ MPI_Get函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap