• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ MPI_Alltoallv函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中MPI_Alltoallv函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Alltoallv函数的具体用法?C++ MPI_Alltoallv怎么用?C++ MPI_Alltoallv使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了MPI_Alltoallv函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: sg_route

void sg_route(struct Domain *theDomain,struct poisson *thePoisson){
//	struct Tridiagsys* thetridiag=alloc_tridiagsys(thePoisson);
	tridiag_setup(thePoisson);
	int N_r=thePoisson->N_r;
	int N_z=thePoisson->N_z;
	int N_p=thePoisson->N_p;
	int N_k=thePoisson->N_k;
	int i,j,k;
	int size=thePoisson->size;
	int rank=theDomain->rank;
	cylinder_interp(theDomain,thePoisson);
	set_bndry(theDomain,thePoisson);
	density_fft(thePoisson);
	mpi_arrange(thePoisson->density,thePoisson->buffer,thePoisson);
	double *buffersend=thePoisson->buffer;
	double *bufferstore=thePoisson->density;

	int *sendcnts=thePoisson->sendcnts;
        int *sdispls=thePoisson->sdispls;
        int *recvcnts=thePoisson->recvcnts;
        int *rdispls=thePoisson->rdispls;
	
        MPI_Alltoallv(buffersend,sendcnts,sdispls,MPI_DOUBLE,bufferstore,recvcnts,rdispls,MPI_DOUBLE,MPI_COMM_WORLD);
        sinefft(thePoisson);
        solveVp(rank,thePoisson);
        sinefft(thePoisson);    
/*
	if(rank==0){
	int i,j,k;
	i=0;j=0;k=0;
	FILE *f;
	f=fopen("poten.dat","w");
	for(i=0;i<thePoisson->N_r_glob;i++)
		for(k=0;k<thePoisson->N_z_glob;k++)
			fprintf(f,"%f   %d   %f\n",i+0.5,k,thePoisson->density[in_lookup(thePoisson,k,i,0)]);
	fclose(f);
	}
*/

	buffersend=thePoisson->density;
	bufferstore=thePoisson->buffer;
	//this is the inverse MPI comm.SO the sendcnts and recvcnts are exchanged 
	MPI_Alltoallv(buffersend,recvcnts,rdispls,MPI_DOUBLE,bufferstore,sendcnts,sdispls,MPI_DOUBLE,MPI_COMM_WORLD);
	inverse_mpi_arrange(thePoisson->buffer,thePoisson->density,thePoisson);
	inverse_fft(thePoisson);
	int direction=0;
	for(direction=0;direction<3;direction++){
		cal_force(thePoisson,direction);	
		disco_force_interp(theDomain,thePoisson,direction);
	}
//	disco_interp(theSim,theCells,thePoisson);
//	destroy_tridiagsys(thetridiag);

}
开发者ID:julywater,项目名称:self_gravity,代码行数:54,代码来源:sg_routine.c


示例2: apply

static void apply(const plan *ego_, R *I, R *O)
{
     const P *ego = (const P *) ego_;
     plan_rdft *cld1, *cld2, *cld2rest, *cld3;

     /* transpose locally to get contiguous chunks */
     cld1 = (plan_rdft *) ego->cld1;
     if (cld1) {
	  cld1->apply(ego->cld1, I, O);
	  
	  /* transpose chunks globally */
	  if (ego->equal_blocks)
	       MPI_Alltoall(O, ego->send_block_sizes[0], FFTW_MPI_TYPE,
			    I, ego->recv_block_sizes[0], FFTW_MPI_TYPE,
			    ego->comm);
	  else
	       MPI_Alltoallv(O, ego->send_block_sizes, ego->send_block_offsets,
			     FFTW_MPI_TYPE,
			     I, ego->recv_block_sizes, ego->recv_block_offsets,
			     FFTW_MPI_TYPE,
			     ego->comm);
     }
     else { /* TRANSPOSED_IN, no need to destroy input */
	  /* transpose chunks globally */
	  if (ego->equal_blocks)
	       MPI_Alltoall(I, ego->send_block_sizes[0], FFTW_MPI_TYPE,
			    O, ego->recv_block_sizes[0], FFTW_MPI_TYPE,
			    ego->comm);
	  else
	       MPI_Alltoallv(I, ego->send_block_sizes, ego->send_block_offsets,
			     FFTW_MPI_TYPE,
			     O, ego->recv_block_sizes, ego->recv_block_offsets,
			     FFTW_MPI_TYPE,
			     ego->comm);
	  I = O; /* final transpose (if any) is in-place */
     }
     
     /* transpose locally, again, to get ordinary row-major */
     cld2 = (plan_rdft *) ego->cld2;
     if (cld2) {
	  cld2->apply(ego->cld2, I, O);
	  cld2rest = (plan_rdft *) ego->cld2rest;
	  if (cld2rest) { /* leftover from unequal block sizes */
	       cld2rest->apply(ego->cld2rest,
			       I + ego->rest_Ioff, O + ego->rest_Ooff);
	       cld3 = (plan_rdft *) ego->cld3;
	       if (cld3)
		    cld3->apply(ego->cld3, O, O);
	       /* else TRANSPOSED_OUT is true and user wants O transposed */
	  }
     }
}
开发者ID:376473984,项目名称:fftw3,代码行数:52,代码来源:transpose-alltoall.c


示例3: time_alltoallv

double time_alltoallv(struct collParams* p)
{
    int i, j, size2;
    int disp = 0;
    for ( i = 0; i < p->nranks; i++) {
        int size2 = (i+p->myrank) % (p->size+1);
        sendcounts[i] = size2;
        recvcounts[i] = size2;
        sdispls[i] = disp;
        rdispls[i] = disp;
        disp += size2;
    }
    MPI_Barrier(MPI_COMM_WORLD);

    size2 = p->myrank % (p->size+1);
    __TIME_START__;
    for (i = 0; i < p->iter; i++) {
        MPI_Alltoallv(sbuffer, sendcounts, sdispls, p->type, rbuffer, recvcounts, rdispls, p->type, p->comm);
        __BAR__(p->comm);
    }
    __TIME_END__;

    if (check_buffers) {
        check_sbuffer(p->myrank);
        for (i = 0; i < p->nranks; i++) {
            disp = 0;
            for (j = 0; j < p->myrank; j++) { disp += (j+i) % (p->size+1); }
            check_rbuffer(rbuffer, rdispls[i], i, disp, recvcounts[i]);
        }
    }

    return __TIME_USECS__ / (double)p->iter;
}
开发者ID:8l,项目名称:insieme,代码行数:33,代码来源:bCast.c


示例4: remap

void remap(){
    MPI_Allgather(num,mask,MPI_INT,cnt,mask,MPI_INT,MPI_COMM_WORLD);
    int arrStart = 0;
    int arrEnd = 0;
    int allStart = 0;
    for (int i=0;i<mask;++i){
        spf[0] = allStart;
        for (int j=0;j<size;++j){
            spf[j+1] = spf[j]+cnt[j*mask+i];
        }
        for (int j=0;j<size;++j){
            if (spf[rank]>j*len+len-1 || spf[rank+1]-1<j*len){
                sdispls[j] = arrStart;
                sendcounts[j] = 0;
            } else {
                sdispls[j] = arrStart;
                sendcounts[j] = std::min(spf[rank+1],j*len+len)-std::max(j*len,spf[rank]);
                arrStart += sendcounts[j];
            }
            if (spf[j]>rank*len+len-1 || spf[j+1]-1<rank*len){
                rdispls[j] = arrEnd;
                recvcounts[j] = 0;
            } else {
                rdispls[j] = arrEnd;
                recvcounts[j] = std::min(spf[j+1],rank*len+len)-std::max(rank*len,spf[j]);
                arrEnd += recvcounts[j];
            }
        }
        MPI_Alltoallv(tmpData,sendcounts,sdispls,MPI_DT_,data,recvcounts,rdispls,MPI_DT_,MPI_COMM_WORLD);
        allStart = spf[size];
    }
}
开发者ID:Cueroqu,项目名称:ParallelComputingHomework,代码行数:32,代码来源:2.cpp


示例5: main

int main( int argc, char* argv[] )
{
    int i, j;
    int myrank, nprocs;
    char *sbuf,  *rbuf;
    int *scnt, *rcnt;
    int *sdpl, *rdpl;
    int dsize;
    int ssize, rsize;

    MPI_Init( &argc, &argv );

    MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
    MPI_Comm_size( MPI_COMM_WORLD, &nprocs );
    MPI_Type_size(DATATYPE, &dsize);

    scnt = malloc( sizeof(int)*nprocs );
    sdpl = malloc( sizeof(int)*nprocs );
    rcnt = malloc( sizeof(int)*nprocs );
    rdpl = malloc( sizeof(int)*nprocs );

    for( i=0; i<nprocs; i++ )
    {
        scnt[i]=SIZE*(i+1)*(myrank+1);
        rcnt[i]=SIZE*(i+1)*(myrank+1);
        sdpl[i]=SIZE*((i*(i+1))/2)*(myrank+1);
        rdpl[i]=SIZE*((i*(i+1))/2)*(myrank+1);
    }

    ssize=0;
    for(i=0; i<nprocs; i++) ssize+=scnt[i];
    rsize=0;
    for(i=0; i<nprocs; i++) rsize+=rcnt[i];

    sbuf = (char*) malloc( SIZE*dsize*ssize );
    rbuf = (char*) malloc( SIZE*dsize*rsize );

    for( i=0; i<REPEAT; i++ )
    {
        MPI_Alltoallv( sbuf, scnt, sdpl, DATATYPE,
                       rbuf, rcnt, rdpl, DATATYPE,
                       MPI_COMM_WORLD );
    }

    fprintf(stdout, "DONE (rank %d)!\n", myrank);

    MPI_Finalize();
}
开发者ID:RSE-Cambridge,项目名称:IPM,代码行数:48,代码来源:main.c


示例6: ZMPI_Alltoall_int_proclists_alltoallv

int ZMPI_Alltoall_int_proclists_alltoallv(int *sendbuf, int nsprocs, int *sprocs, int *recvbuf, int nrprocs, int *rprocs, MPI_Comm comm) /* zmpi_func ZMPI_Alltoall_int_proclists_alltoallv */
{
  int i, size;

  int *scounts2, *sdispls2, *rcounts2, *rdispls2;


  MPI_Comm_size(comm, &size);

  scounts2 = z_alloc(4 * size, sizeof(int));
  sdispls2 = scounts2 + 1 * size;
  rcounts2 = scounts2 + 2 * size;
  rdispls2 = scounts2 + 3 * size;

  for (i = 0; i < size; ++i)
  {
    scounts2[i] = rcounts2[i] = DEFAULT_INT;
    sdispls2[i] = rdispls2[i] = i;
    recvbuf[i] = 0;
  }

  for (i = 0; i < nsprocs; ++i) scounts2[sprocs[i]] = 1;
  for (i = 0; i < nrprocs; ++i) rcounts2[rprocs[i]] = 1;

  MPI_Alltoallv(sendbuf, scounts2, sdispls2, MPI_INT, recvbuf, rcounts2, rdispls2, MPI_INT, comm);

  z_free(scounts2);

  return MPI_SUCCESS;
}
开发者ID:fweik,项目名称:scafacos,代码行数:30,代码来源:zmpi_tools.c


示例7: transpose_mpi_out_of_place

/* Out-of-place version of transpose_mpi (or rather, in place using
   a scratch array): */
static void transpose_mpi_out_of_place(transpose_mpi_plan p, int el_size,
				       TRANSPOSE_EL_TYPE *local_data,
				       TRANSPOSE_EL_TYPE *work)
{
     local_transpose_copy(local_data, work, el_size, p->local_nx, p->ny);

     if (p->all_blocks_equal)
	  MPI_Alltoall(work, p->send_block_size * el_size, p->el_type,
		       local_data, p->recv_block_size * el_size, p->el_type,
		       p->comm);
     else {
	  int i, n_pes = p->n_pes;

	  for (i = 0; i < n_pes; ++i) {
	       p->send_block_sizes[i] *= el_size;
	       p->recv_block_sizes[i] *= el_size;
	       p->send_block_offsets[i] *= el_size;
	       p->recv_block_offsets[i] *= el_size;
	  }
	  MPI_Alltoallv(work, p->send_block_sizes, p->send_block_offsets,
			p->el_type,
			local_data, p->recv_block_sizes, p->recv_block_offsets,
			p->el_type,
			p->comm);
	  for (i = 0; i < n_pes; ++i) {
	       p->send_block_sizes[i] /= el_size;
	       p->recv_block_sizes[i] /= el_size;
	       p->send_block_offsets[i] /= el_size;
	       p->recv_block_offsets[i] /= el_size;
	  }
     }

     do_permutation(local_data, p->perm_block_dest, p->num_perm_blocks,
		    p->perm_block_size * el_size);
}
开发者ID:JonBoley,项目名称:peaqb-fast,代码行数:37,代码来源:transpose_mpi.c


示例8: fprintf

//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
void Image_Exchanger::exchange_fragment_images(unsigned int* databuf,
                                               int nviewer,
                                               ImageFragment_Tile* ift)
{
//    fprintf(stderr, "**** %s:%s() ****\n", __FILE__, __func__);
 
#ifdef _DEBUG7
    fprintf(stderr, "**** %s:%s() ****\n", __FILE__, __func__);
#endif


    unsigned int* sendbuf = databuf + m_sbuf_offset;
    unsigned int* recvbuf = databuf + m_rbuf_offset;


    if(nviewer == 1)
    {
        MPI_Gatherv((int*)sendbuf, m_scounts[0], MPI_INT,
                    (int*)recvbuf, m_rcounts, m_rdispls, MPI_INT,
                    0, MPI_COMM_WORLD);
    }
    else
    {
        MPI_Alltoallv( (int*)sendbuf, m_scounts, m_sdispls, MPI_INT,
                       (int*)recvbuf, m_rcounts, m_rdispls, MPI_INT, 
                       MPI_COMM_WORLD);
    }

    ift->address_fragments(m_rbuf_offset, m_rdispls);
}
开发者ID:jinghuage,项目名称:pcaster,代码行数:33,代码来源:image_exchanger.cpp


示例9: mpi_alltoallv

void mpi_alltoallv(void *sendbuf, int *sendcount, int *sdispls,  int *sendtype,
                   void *recvbuf, int *recvcount, int *rdispls,  int *recvtype,
                   int *comm, int *ierr)
{
    *ierr = MPI_Alltoallv(sendbuf, sendcount, sdispls, *sendtype, recvbuf, recvcount,
                          rdispls, *recvtype, *comm);
    return;
}
开发者ID:hadimontakhabi,项目名称:VolpexMPI-HPX,代码行数:8,代码来源:volpex_fAPI.c


示例10: mpi_alltoallv_

FORT_DLL_SPEC void FORT_CALL mpi_alltoallv_ ( void*v1, MPI_Fint *v2, MPI_Fint *v3, MPI_Fint *v4, void*v5, MPI_Fint *v6, MPI_Fint *v7, MPI_Fint *v8, MPI_Fint *v9, MPI_Fint *ierr ){

#ifndef HAVE_MPI_F_INIT_WORKS_WITH_C
    if (MPIR_F_NeedInit){ mpirinitf_(); MPIR_F_NeedInit = 0; }
#endif
    if (v1 == MPIR_F_MPI_IN_PLACE) v1 = MPI_IN_PLACE;
    *ierr = MPI_Alltoallv( v1, v2, v3, (MPI_Datatype)(*v4), v5, v6, v7, (MPI_Datatype)(*v8), (MPI_Comm)(*v9) );
}
开发者ID:mpifl,项目名称:mpich3newforfile,代码行数:8,代码来源:alltoallvf.c


示例11: mpiColumnMatrixTranspose

void mpiColumnMatrixTranspose(ColumnMatrix recvData,ColumnMatrix recvBuffer, ColumnMatrix sendData, ColumnMatrix sendBuffer)
{
	packTansposeData(sendData, sendBuffer);	

	//All processors send and receive the same amount of doubles with the same processor.
	MPI_Alltoallv(sendBuffer->data,sendBuffer->blockSize,sendBuffer->displacement,MPI_DOUBLE,recvBuffer->data,sendBuffer->blockSize,sendBuffer->displacement,MPI_DOUBLE,*sendData->comm); 

	unpackTansposeData(recvData, recvBuffer);
}
开发者ID:raymondt8,项目名称:ex6_2016,代码行数:9,代码来源:parallelPoisson.c


示例12: PCU_Comm_Peers

int binGraph::exchange_edges(uint64_t m_read, uint64_t* read_edges,
                             int32_t* ranks,etype t)
{
  int32_t* scounts = (int32_t*)malloc(PCU_Comm_Peers()*sizeof(int32_t));
  int32_t* rcounts = (int32_t*)malloc(PCU_Comm_Peers()*sizeof(int32_t));
  int32_t* sdispls = (int32_t*)malloc(PCU_Comm_Peers()*sizeof(int32_t));
  int32_t* sdispls_cpy = (int32_t*)malloc(PCU_Comm_Peers()*sizeof(int32_t));
  int32_t* rdispls = (int32_t*)malloc(PCU_Comm_Peers()*sizeof(int32_t));
  for (int i = 0; i < PCU_Comm_Peers(); ++i)
  {
    scounts[i] = 0;
    rcounts[i] = 0;
    sdispls[i] = 0;
    sdispls_cpy[i] = 0;
    rdispls[i] = 0;
  }

  uint64_t n_per_rank = num_global_verts / PCU_Comm_Peers() + 1;
  for (uint64_t i = 0; i < m_read*2; i+=2)
  {
    uint64_t vert = read_edges[i];
    int vert_task = ranks[vert];
    scounts[vert_task] += 2;
  }

  MPI_Alltoall(scounts, 1, MPI_INT32_T,
               rcounts, 1, MPI_INT32_T, PCU_Get_Comm());

  for (uint64_t i = 1; i < PCU_Comm_Peers(); ++i) {
    sdispls[i] = sdispls[i-1] + scounts[i-1];
    sdispls_cpy[i] = sdispls[i];
    rdispls[i] = rdispls[i-1] + rcounts[i-1];
  }
 
  int32_t total_send = sdispls[PCU_Comm_Peers()-1] + scounts[PCU_Comm_Peers()-1];
  int32_t total_recv = rdispls[PCU_Comm_Peers()-1] + rcounts[PCU_Comm_Peers()-1];
  uint64_t* sendbuf = (uint64_t*)malloc(total_send*sizeof(uint64_t));
  edge_list[t] = (uint64_t*)malloc(total_recv*sizeof(uint64_t));
  num_local_edges[t] = total_recv / 2;

  for (uint64_t i = 0; i < m_read*2; i+=2)
  {
    uint64_t vert1 = read_edges[i];
    uint64_t vert2 = read_edges[i+1];
    int vert_task = ranks[vert1];

    sendbuf[sdispls_cpy[vert_task]++] = vert1;
    sendbuf[sdispls_cpy[vert_task]++] = vert2;
  }

  MPI_Alltoallv(sendbuf, scounts, sdispls, MPI_UINT64_T,
                edge_list[t], rcounts, rdispls, MPI_UINT64_T, PCU_Get_Comm());
  free(sendbuf);

  return 0;
}
开发者ID:SCOREC,项目名称:EnGPar,代码行数:56,代码来源:binGraph.cpp


示例13: mpi_alltoallv

void mpi_alltoallv (void *sendbuf, MPI_Fint *sendcnts, MPI_Fint *sdispls, 
		    MPI_Fint *sendtype, void *recvbuf, MPI_Fint *recvcnts,
		    MPI_Fint *rdispls, MPI_Fint *recvtype, MPI_Fint *comm, 
		    MPI_Fint *__ierr)
{
  *__ierr = MPI_Alltoallv (sendbuf, sendcnts, sdispls, 
			   MPI_Type_f2c (*sendtype), recvbuf,
			   recvcnts, rdispls, MPI_Type_f2c (*recvtype),
			   MPI_Comm_f2c (*comm));
}
开发者ID:JeremyFyke,项目名称:cime,代码行数:10,代码来源:f_wrappers_pmpi.c


示例14: MPI_Comm_rank

void dummy_operations::run_collective_dummy_operations() {
        int rank, size;
        MPI_Comm_rank( MPI_COMM_WORLD, &rank);
        MPI_Comm_size( MPI_COMM_WORLD, &size);
        
        // Run Broadcast
        {
                int x;
                MPI_Comm_rank( MPI_COMM_WORLD, &x);
                MPI_Bcast(&x, 1, MPI_INT, 0, MPI_COMM_WORLD);
        }
        // Run Allgather.
        {
                int x, size;
                MPI_Comm_rank( MPI_COMM_WORLD, &x);
                MPI_Comm_size( MPI_COMM_WORLD, &size);
                
                std::vector<int> rcv(size);
                MPI_Allgather(&x, 1, MPI_INT, &rcv[0], 1, MPI_INT, MPI_COMM_WORLD);
        }

        // Run Allreduce.
        {
                int x;
                MPI_Comm_rank( MPI_COMM_WORLD, &x);
                
                int y = 0;
                MPI_Allreduce(&x, &y, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
        }

        // Dummy Prefix Sum
        {
                int x  = 1;
                int y  = 0;

                MPI_Scan(&x, &y, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); 
        }

        // Run Alltoallv.
        {
                std::vector<int> snd(size);
                std::vector<int> rcv(size);
                std::vector<int> scounts(size, 1);
                std::vector<int> rcounts(size, 1);
                std::vector<int> sdispls(size);
                std::vector<int> rdispls(size);
                for (int i = 0, iend = sdispls.size(); i < iend; ++i) {
                        sdispls[i] = rdispls[i] = i;
                }
                MPI_Alltoallv(&snd[0], &scounts[0], &sdispls[0], MPI_INT,
                              &rcv[0], &rcounts[0], &rdispls[0], MPI_INT, MPI_COMM_WORLD);
        }
        

}
开发者ID:SebastianSchlag,项目名称:KaHIP,代码行数:55,代码来源:dummy_operations.cpp


示例15: FC_FUNC

FC_FUNC( mpi_alltoallv , MPI_ALLTOALLV )
           ( void *sendbuf, int *sendcounts, int *sdispls, int *sendtype,
	     void *recvbuf, int *recvcounts, int *rdispls, int *recvtype,
             int *comm, int *ierror )
{

  *ierror=MPI_Alltoallv(sendbuf, sendcounts, sdispls, *sendtype,
			recvbuf, recvcounts, rdispls, *recvtype,
			*comm);

}
开发者ID:ACME-Climate,项目名称:cime,代码行数:11,代码来源:collective.c


示例16: mpiMatrix_transpose

void mpiMatrix_transpose(struct mpiMatrix *matrix, struct mpi_com *uplink) {
  mpiMatrix_serialiseForSending(matrix, uplink);
  int *SRcounts = mpiMatrix_genCounts(matrix, uplink);
  int *SRdispl = mpiMatrix_genDispl(uplink, SRcounts);

  MPI_Alltoallv(matrix->data, SRcounts , SRdispl, MPI_DOUBLE,
                matrix->aux, SRcounts, SRdispl, MPI_DOUBLE,  uplink->comm);
  mpiMatrix_swapDataAux(matrix);
  mpiMatrix_deserialiseAfterReception(matrix);

  free(SRcounts);
  free(SRdispl);
}
开发者ID:somaen,项目名称:superdupercomputers,代码行数:13,代码来源:transpose.c


示例17: MPI_Alltoall

char *
avtSamplePointCommunicator::CommunicateMessages(char **sendmessages,
                                                int   *sendcount,
                                                char **recvmessages,
                                                int   *recvcount)
{
#ifdef PARALLEL
    //
    // Figure out how much each processor needs to send/receive.
    //
    MPI_Alltoall(sendcount, 1, MPI_INT, recvcount, 1, MPI_INT, VISIT_MPI_COMM);

    //
    // Create a buffer we can receive into.
    //
    char *recvConcatList = CreateMessageStrings(recvmessages, recvcount,
                                                numProcs);
    
    //
    // Calculate the displacement lists.
    //
    int *senddisp = new int[numProcs];
    int *recvdisp = new int[numProcs];
    senddisp[0] = 0;
    recvdisp[0] = 0;
    for (int i = 1 ; i < numProcs ; i++)
    {
        senddisp[i] = senddisp[i-1] + sendcount[i-1];
        recvdisp[i] = recvdisp[i-1] + recvcount[i-1];
    }

    //
    // Do the actual transfer of sample points.   The messages arrays are
    // actually indexes into one big array.  Since MPI expects that big
    // array, give that (which is at location 0).
    //
    MPI_Alltoallv(sendmessages[0], sendcount, senddisp, MPI_CHAR,
                  recvmessages[0], recvcount, recvdisp, MPI_CHAR,
                  VISIT_MPI_COMM);

    delete [] senddisp;
    delete [] recvdisp;

    //
    // We need to return this buffer so the calling function can delete it.
    //
    return recvConcatList;
#else
    return 0;
#endif
}
开发者ID:HarinarayanKrishnan,项目名称:VisIt26RC_Trunk,代码行数:51,代码来源:avtSamplePointCommunicator.C


示例18: COMALL_Repeat

/* communicate integers and doubles accodring
 * to the pattern computed by COMALL_Pattern */
int COMALL_Repeat (void *pattern)
{
  COMALLPATTERN *pp = pattern;
  COMDATA *cd;
  int i;

  for (i = 0; i < pp->ncpu; i ++) pp->send_position [i] = pp->recv_position [i] = 0;

  /* pack ints */
  for (i = 0, cd = pp->send; i < pp->nsend; i ++, cd ++)
  {
    if (cd->ints)
    {
      MPI_Pack (cd->i, cd->ints, MPI_INT, &pp->send_data [pp->send_disps [cd->rank]], pp->send_counts [cd->rank], &pp->send_position [cd->rank], pp->comm);
    }
  }

  /* pack doubles */
  for (i = 0, cd = pp->send; i < pp->nsend; i ++, cd ++)
  {
    if (cd->doubles)
    {
      MPI_Pack (cd->d, cd->doubles, MPI_DOUBLE, &pp->send_data [pp->send_disps [cd->rank]], pp->send_counts [cd->rank], &pp->send_position [cd->rank], pp->comm); 
    }
  }

#if DEBUG
  for (i = 0; i < pp->ncpu; i ++)
  {
    ASSERT_DEBUG (pp->send_position [i] <= pp->send_counts [i], "Incorrect packing");
  }
#endif

  /* all to all send and receive */
  MPI_Alltoallv (pp->send_data, pp->send_counts, pp->send_disps, MPI_PACKED, pp->recv_data, pp->recv_counts, pp->recv_disps, MPI_PACKED, pp->comm);

  if (pp->recv_size)
  {
    /* unpack data */
    for (i = 0; i < pp->ncpu; i ++)
    {
      MPI_Unpack (&pp->recv_data [pp->recv_disps [i]], pp->recv_counts [i], &pp->recv_position [i], pp->recv [i].i, pp->recv [i].ints, MPI_INT, pp->comm);
      MPI_Unpack (&pp->recv_data [pp->recv_disps [i]], pp->recv_counts [i], &pp->recv_position [i], pp->recv [i].d, pp->recv [i].doubles, MPI_DOUBLE, pp->comm);
    }
  }

  return pp->send_size;
}
开发者ID:KonstantinosKr,项目名称:solfec,代码行数:50,代码来源:com.c


示例19: ZMPI_Reduce_scatter_block_intsum_proclists_alltoallv

int ZMPI_Reduce_scatter_block_intsum_proclists_alltoallv(const int *sendbuf, int nsendprocs, int *sendprocs, int *recvbuf, int recvcount, int nrecvprocs, int *recvprocs, MPI_Comm comm) /* zmpi_func ZMPI_Reduce_scatter_block_intsum_proclists_alltoallv */
{
  int i, j, size, rank;

  int *recvbuf_full;
  int *scounts, *sdispls, *rcounts, *rdispls;


  MPI_Comm_size(comm, &size);
  MPI_Comm_rank(comm, &rank);

  recvbuf_full = z_alloc(nrecvprocs * recvcount, sizeof(int));

  scounts = z_alloc(4 * size, sizeof(int));
  sdispls = scounts + 1 * size;
  rcounts = scounts + 2 * size;
  rdispls = scounts + 3 * size;

  memset(scounts, 0, 4 * size * sizeof(int));

  for (j = 0; j < nrecvprocs; ++j)
  {
    rcounts[recvprocs[j]] = recvcount;
    rdispls[recvprocs[j]] = j * recvcount;
  }

  for (j = 0; j < nsendprocs; ++j)
  {
    scounts[sendprocs[j]] = recvcount;
    sdispls[sendprocs[j]] = sendprocs[j] * recvcount;
  }

  MPI_Alltoallv((void *) sendbuf, scounts, sdispls, MPI_INT, recvbuf_full, rcounts, rdispls, MPI_INT, comm);

  for (i = 0; i < recvcount; ++i) recvbuf[i] = DEFAULT_INT;

  for (j = 0; j < nrecvprocs; ++j)
    for (i = 0; i < recvcount; ++i) recvbuf[i] += recvbuf_full[j * recvcount + i];

  z_free(scounts);

  z_free(recvbuf_full);

  return MPI_SUCCESS;
}
开发者ID:fweik,项目名称:scafacos,代码行数:45,代码来源:zmpi_tools.c


示例20: transpose

void transpose (Real **b, int size, int *len, int *disp, int rank, int m){
  int i, *sendcounts, *rdispls;
  Real  *sendbuf, *recvbuf;
  sendbuf = createRealArray (m * len[rank]);
  recvbuf = createRealArray (m * len[rank]);
  sendcounts = calloc(size,sizeof(int));
  rdispls = calloc(size,sizeof(int));
  matrixToVector(b,sendbuf,len,disp, size, rank);

  int index = 0;
  for (int i = 0; i < size; ++i)
  {
    sendcounts[i]= len[rank]*len[i];
    rdispls[i]=index;
    index=index+sendcounts[i];
  }
  MPI_Alltoallv(sendbuf, sendcounts, rdispls, MPI_DOUBLE, recvbuf, sendcounts, rdispls, MPI_DOUBLE, MPI_COMM_WORLD);
  vectorToMatrix(b,recvbuf,len,disp, size, rank);
}
开发者ID:hgranlund,项目名称:superComp,代码行数:19,代码来源:common.c



注:本文中的MPI_Alltoallv函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ MPI_Barrier函数代码示例发布时间:2022-05-30
下一篇:
C++ MPI_Alltoall函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap