• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ MPI_Free_mem函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中MPI_Free_mem函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Free_mem函数的具体用法?C++ MPI_Free_mem怎么用?C++ MPI_Free_mem使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了MPI_Free_mem函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: main

int main(int argc, char* argv[])
{
    MPI_Init(&argc,&argv);
    MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, &MPI_COMM_NODE);

    int n = (argc>1) ? atoi(argv[1]) : 1000;

    int wrank, wsize;
    MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
    MPI_Comm_size(MPI_COMM_WORLD, &wsize);

    int nrank, nsize;
    MPI_Comm_rank(MPI_COMM_WORLD, &nrank);
    MPI_Comm_size(MPI_COMM_WORLD, &nsize);

    char * buf1 = NULL;
    char * buf2 = NULL;
    MPI_Alloc_mem(n, MPI_INFO_NULL, &buf1);
    MPI_Alloc_mem(n, MPI_INFO_NULL, &buf2);

    memset(buf1, nrank==0 ? 'Z' : 'A', n);
    memset(buf2, nrank==0 ? 'Z' : 'A', n);

    double t0, t1, dt;
    for (int r=0; r<20; r++) {
        MPI_Barrier(MPI_COMM_WORLD);
        t0 = MPI_Wtime();
        MPI_Bcast(buf1, n, MPI_CHAR, 0, MPI_COMM_NODE);
        t1 = MPI_Wtime();
        dt = t1-t0;
        printf("%d: MPI_Bcast: %lf seconds, %lf MB/s \n", wrank, dt, n*(1.e-6/dt));
        fflush(stdout);

        MPI_Barrier(MPI_COMM_WORLD);
        t0 = MPI_Wtime();
        SMP_Bcast(buf2, n, MPI_CHAR, 0, MPI_COMM_NODE);
        t1 = MPI_Wtime();
        dt = t1-t0;
        printf("%d: SMP_Bcast: %lf seconds, %lf MB/s \n", wrank, dt, n*(1.e-6/dt));
        fflush(stdout);

        if (r==0) {
            char * tmp = malloc(n);
            memset(tmp, 'Z', n);
            int err1 = memcmp(tmp, buf1, n);
            int err2 = memcmp(tmp, buf2, n);
            if (err1>0 || err2>0) {
                printf("%d: errors: MPI (%d), SMP (%d) \n", wrank, err1, err2);
            }
        }
    }

    MPI_Free_mem(buf1);
    MPI_Free_mem(buf2);

    MPI_Comm_free(&MPI_COMM_NODE);

    MPI_Finalize();
    return 0;
}
开发者ID:jeffhammond,项目名称:PersistentCollectives,代码行数:60,代码来源:intranode-bcast.c


示例2: main

int main(int argc, char * argv[])
{
    const MPI_Count test_int_max = BigMPI_Get_max_int();

    MPI_Init(&argc, &argv);

    int rank, size;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    if (size<1) {
        printf("Use 1 or more processes. \n");
        MPI_Finalize();
        return 1;
    }

    int l = (argc > 1) ? atoi(argv[1]) : 2;
    int m = (argc > 2) ? atoi(argv[2]) : 17777;
    MPI_Count n = l * test_int_max + m;

    char * buf_send = NULL;
    char * buf_recv = NULL;

    MPI_Alloc_mem((MPI_Aint)n * size, MPI_INFO_NULL, &buf_send);
    assert(buf_send!=NULL);
    MPI_Alloc_mem((MPI_Aint)n,        MPI_INFO_NULL, &buf_recv);
    assert(buf_recv!=NULL);

    if (rank==0) {
        for (int i = 0; i < size; ++i) {
            for (MPI_Count j = 0; j < n; ++j) {
                buf_send[i*n+j] = (unsigned char)i;
            }
        }
    }
    memset(buf_recv, -1, (size_t)n);

    /* collective communication */
    MPIX_Scatter_x(buf_send, n, MPI_CHAR,
                   buf_recv, n, MPI_CHAR,
                   0 /* root */, MPI_COMM_WORLD);

    size_t errors = verify_buffer(buf_recv, n, rank);

    MPI_Free_mem(buf_send);
    MPI_Free_mem(buf_recv);

    if (rank==0 && errors==0) {
        printf("SUCCESS\n");
    }

    MPI_Finalize();

    return 0;
}
开发者ID:caomw,项目名称:BigMPI,代码行数:55,代码来源:test_scatter_x.c


示例3: main

int main (int argc, char *argv[])
{
        struct pe_vars v;
	long * msg_buffer;
	/*
	 * Initialize
	 */
	init_mpi(&v);
	check_usage(argc, argv, v.npes, v.me);
	print_header(v.me);

	if (v.me == 0) printf("Total processes = %d\n",v.npes);
	/*
	 * Allocate Memory
	 */
	msg_buffer = allocate_memory(v.me, &(v.win) );
	memset(msg_buffer, 0, MAX_MSG_SZ * ITERS_LARGE * sizeof(long));
	/*
	 * Time Put Message Rate
	 */
	benchmark(msg_buffer, v.me, v.pairs, v.nxtpe, v.win);
	/*
	 * Finalize
	 */
	MPI_Win_unlock_all(v.win);
	MPI_Win_free(&v.win); 	
	MPI_Free_mem(msg_buffer);

	MPI_Finalize();

	return EXIT_SUCCESS;
}
开发者ID:coti,项目名称:oshmpi,代码行数:32,代码来源:osu_mpi_put_mr.c


示例4: main

int main (int argc,char *argv[]) {
  int       i;
  double    w[NEL];
  MPI_Aint  win_size,warr_size;
  MPI_Win  *win;

  win_size=sizeof(MPI_Win);
  warr_size=sizeof(MPI_DOUBLE)*NEL;
  
      
  MPI_Init (&argc, &argv);
  
  for(i=0;i<NTIMES;i++) {
      MPI_Alloc_mem(win_size,MPI_INFO_NULL,&win);

      MPI_Win_create(w,warr_size,sizeof(double),MPI_INFO_NULL,MPI_COMM_WORLD,win);
      MPI_Win_free(win);

      MPI_Free_mem(win);
  }

  MPI_Finalize();

  return 0;

}
开发者ID:gpaulsen,项目名称:ompi-www,代码行数:26,代码来源:mleak.win.c


示例5: IMB_del_r_buf

void IMB_del_r_buf(struct comm_info* c_info )
/*


                      Deletes recv buffer component of c_info



In/out variables:

-c_info               (type struct comm_info*)
                      Collection of all base data for MPI;
                      see [1] for more information



*/
{
    /* July 2002 V2.2.1 change: use MPI_Free_mem */
    if ( c_info->r_alloc> 0)
    {
#if (defined EXT || defined MPIIO || defined RMA)
        MPI_Free_mem( c_info->r_buffer );
#else
        IMB_v_free( (void**)&c_info->r_buffer );
#endif

        c_info-> r_alloc = 0;
        c_info->r_buffer = NULL;

    }
}
开发者ID:hoelzlw,项目名称:hpc-lab,代码行数:32,代码来源:IMB_mem_manager.c


示例6: MTestFreeWin

/* Free the storage associated with a window object */
void MTestFreeWin(MPI_Win * win)
{
    void *addr;
    int flag, merr;

    merr = MPI_Win_get_attr(*win, MPI_WIN_BASE, &addr, &flag);
    if (merr)
        MTestPrintError(merr);
    if (!flag) {
        MTestError("Could not get WIN_BASE from window");
    }
    if (addr) {
        void *val;
        merr = MPI_Win_get_attr(*win, mem_keyval, &val, &flag);
        if (merr)
            MTestPrintError(merr);
        if (flag) {
            if (val == (void *) 1) {
                free(addr);
            }
            else if (val == (void *) 2) {
                merr = MPI_Free_mem(addr);
                if (merr)
                    MTestPrintError(merr);
            }
            /* if val == (void *)0, then static data that must not be freed */
        }
    }
    merr = MPI_Win_free(win);
    if (merr)
        MTestPrintError(merr);
}
开发者ID:mpoquet,项目名称:simgrid,代码行数:33,代码来源:mtest.c


示例7: main

int main(int argc, char *argv[])
{
    int errs = 0, err;
    int j, count;
    char *ap;

    MTest_Init(&argc, &argv);

    MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
    for (count = 1; count < 128000; count *= 2) {

        err = MPI_Alloc_mem(count, MPI_INFO_NULL, &ap);
        if (err) {
            int errclass;
            /* An error of  MPI_ERR_NO_MEM is allowed */
            MPI_Error_class(err, &errclass);
            if (errclass != MPI_ERR_NO_MEM) {
                errs++;
                MTestPrintError(err);
            }

        } else {
            /* Access all of this memory */
            for (j = 0; j < count; j++) {
                ap[j] = (char) (j & 0x7f);
            }
            MPI_Free_mem(ap);
        }
    }

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
开发者ID:ParaStation,项目名称:psmpi2,代码行数:33,代码来源:allocmem.c


示例8: sizeof

void SweptDiscretization2D::updateRemoteConstants(unsigned char *buffer)
{
	void *sendingBuffer = NULL;
	FILE *inFile = NULL;

	if(pg.rank == 0)
	{
		int bufferSize = this->remoteConstantsCount * n * n * pg.mpiSize * sizeof(double);
		MPI_Alloc_mem(bufferSize, MPI_INFO_NULL, &sendingBuffer);
		
		for(int r=0;r<pg.mpiSize;r++)
		{
			double *processing = (double*)sendingBuffer + (this->remoteConstantsCount * n * n * r);
			int jIndex = (r % (pg.xNodes*pg.yNodes)) / pg.xNodes;
			int iIndex = r % pg.xNodes;
			for(int j=0;j<n;j++)
			{
				for(int i=0;i<n;i++)
				{
					int iGlobal = n*iIndex + (i);
					int jGlobal = n*jIndex + (j);
					int index   = this->ijToConstantIndex(i,j);
					int globalIndex = this->remoteConstantsCount * (iGlobal + jGlobal * n * pg.xNodes);
					for(int k=0;k<this->remoteConstantsCount;k++)
					{
						processing[index + k] = ((double*)buffer)[k + globalIndex];
					}					
				}
			}		
		}
	}
	
	MPI_Win_fence(MPI_MODE_NOPRECEDE, this->constantsWindow);
	if(pg.rank == 0)
	{
		for(int r=0;r<pg.mpiSize;r++)
		{
			MPI_Put((unsigned char*)sendingBuffer + (r * remoteConstantsCount * n * n * sizeof(double)), remoteConstantsCount * n * n * sizeof(double), MPI_BYTE, r, 0, remoteConstantsCount * n * n * sizeof(double), MPI_BYTE, constantsWindow);			
		}
	}
	MPI_Win_fence((MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED), this->constantsWindow);

	if(pg.rank == 0)
	{
		MPI_Free_mem(sendingBuffer);					
	}
	for(int i=1;i<n+1;i++)
	{
		for(int j=1;j<n+1;j++)
		{
			for(int k=0;k<this->remoteConstantsCount;k++)
			{
				int windowIndex    = this->ijToConstantIndex(i-1,j-1);
				int foundationIndex = this->ijToIndex(i,j);
				this->foundation[foundationIndex + k] = this->remoteConstants[windowIndex + k];
			}
		}
	}
}
开发者ID:hubailmm,项目名称:Euler2D,代码行数:59,代码来源:SweptDiscretization2D.cpp


示例9: MPI_Free_mem

int MPI_Free_mem(void* baseptr)
{
    if (max_ep > 0)
    {
        EPLIB_free(baseptr);
        return MPI_SUCCESS;
    }
    return MPI_Free_mem(baseptr);
}
开发者ID:rscohn2,项目名称:MLSL,代码行数:9,代码来源:wrapper.c


示例10: main

int main(int argc, char** argv) {
  MPI_Init(&argc, &argv);

  int my_rank; // Number of the node
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);

  int node_count; // Total number of nodes
  MPI_Comm_size(MPI_COMM_WORLD, &node_count);
  
  // The root must load the input data to distribute to the other nodes
  if(my_rank == 0) {
    // In our case it generates a random array as input data
    srand(time(NULL));
    for(int item = 0; item < items; ++item)
      array[item] = rand();
  }
  
  int items_per_rank = items / node_count;
  int remainder_items = items % node_count;
  int* my_work;
  MPI_Alloc_mem(items_per_rank * sizeof(int), MPI_INFO_NULL, &my_work);
 
  // MPI_Scatter is a collective operation which distributes an equal-sized part of the given array to each node.
  MPI_Scatter(&array[remainder_items] /* send buffer */, items_per_rank /* send count per node */, MPI_INT /* send type */,
	      my_work /* receive buffer on each node */, items_per_rank /* receive count */ , MPI_INT /* receive type */, 
	      0 /* send buffer is stored on this rank */, MPI_COMM_WORLD /* communication channel */);
 
  // This is the actual working-loop
  long sub_sum = 0;
  for(int i=0; i < items_per_rank; i++)
    sub_sum += my_work[i];

  if(my_rank == 0) { // Scatter cannot deal with a division remainder so we manually deal with it
    while(remainder_items > 0)
      sub_sum += array[--remainder_items];
  }

  MPI_Free_mem(my_work);

  // MPI_Reduce with op-code MPI_SUM is a collective operation which sums up the input sub_sum of each node
  // into single a resulting output sum on the master.
  MPI_Reduce(&sub_sum /* input to sum up */, &sum /* output */, 1 /* input count */, MPI_LONG /* input type */,
	     MPI_SUM /* operation */, 0 /* output is stored on this rank */, MPI_COMM_WORLD /* communication channel */);
 
  if(my_rank == 0) {
    // The result of the computation now is available on rank 0.
    // We compare it with the sequential reference implementation to test our parallel implementation.
    if(sum == sum__sequential_reference_implementation())
      fprintf(stderr, "Test OK.\n");
    else
      fprintf(stderr, "Test FAILED!\n");
  }

  MPI_Barrier(MPI_COMM_WORLD);
  MPI_Finalize();
  return EXIT_SUCCESS;
}
开发者ID:him-28,项目名称:mpi-debug-tutorial,代码行数:57,代码来源:debug-tutorial-3-fixed.c


示例11: MPI_Alloc_mem

void SweptDiscretization2D::allGatherAllOutputToFile(string filename)
{

	void *buffer = NULL;
	FILE *output;
	if(pg.rank == 0)
	{
		MPI_Alloc_mem(foundationSize * pg.mpiSize * sizeof(double), MPI_INFO_NULL, &buffer);		
		output = fopen(filename.c_str(),"wb");
	}

	MPI_Win_fence((MPI_MODE_NOPUT | MPI_MODE_NOPRECEDE), foundationWindow);
	if(pg.rank == 0)
	{
		for(int r=0;r<pg.mpiSize;r++)
		{
			MPI_Get((char*)buffer + (r * foundationSize * sizeof(double)), foundationSize * sizeof(double), MPI_BYTE, r, 0, foundationSize * sizeof(double), MPI_BYTE, foundationWindow);			
		}
	}
	MPI_Win_fence(MPI_MODE_NOSUCCEED, foundationWindow);
	
	if(pg.rank == 0)
	{
		int w = (n * pg.xNodes);
		int h = (n * pg.yNodes);
		int resultArraySize = w * h;
		if(resultArray == NULL)
			resultArray = (double*) malloc(resultArraySize * sizeof(double) * outputLength);
		
		for(int r=0;r<pg.mpiSize;r++)
		{
			double *processing = (double*)buffer + (foundationSize * r);			
			int jIndex = (r % (pg.xNodes*pg.yNodes)) / pg.xNodes;
			int iIndex = r % pg.xNodes;
			for(int j=1;j<n+1;j++)
			{
				for(int i=1;i<n+1;i++)
				{
					int iGlobal = n*iIndex + (i-1);
					int jGlobal = n*jIndex + (j-1);
					int index   = this->ijToIndex(i,j);
					for(int point=0;point<outputLength;point++)
					{
						double val  = processing[index + constants + point];
						int resultIndex = (iGlobal + jGlobal * n * pg.xNodes) * outputLength + point;
						resultArray[resultIndex] = val;
					}					
				}
			}		
		}
		fwrite((const void*)resultArray,sizeof(double),resultArraySize,output);
		fclose(output);
		MPI_Free_mem(buffer);
				
	}
	MPI_Barrier(MPI_COMM_WORLD);
}
开发者ID:hubailmm,项目名称:Euler2D,代码行数:57,代码来源:SweptDiscretization2D.cpp


示例12: _ZMPI_Alltoall_int_proclists_put

static int _ZMPI_Alltoall_int_proclists_put(int alloc_mem, int nphases, int *sendbuf, int nsprocs, int *sprocs, int *recvbuf, int nrprocs, int *rprocs, MPI_Comm comm)
{
  int i, p, size, rank, *rcounts_put;

  MPI_Win win;


  MPI_Comm_size(comm, &size);
  MPI_Comm_rank(comm, &rank);

  if (alloc_mem) MPI_Alloc_mem(size * sizeof(int), MPI_INFO_NULL, &rcounts_put);
  else rcounts_put = recvbuf;

  if (nrprocs >= 0)
    for (i = 0; i < nrprocs; ++i) rcounts_put[rprocs[i]] = DEFAULT_INT;
  else
    for (i = 0; i < size; ++i) rcounts_put[i] = DEFAULT_INT;

  MPI_Win_create(rcounts_put, size * sizeof(int), sizeof(int), MPI_INFO_NULL, comm, &win);
  MPI_Win_fence(MPI_MODE_NOSTORE|MPI_MODE_NOPRECEDE, win);

  for (p = 0; p < nphases; ++p)
  {
/*    printf("%d: phase = %d of %d\n", rank, p, nphases);*/
  
    if (rank % nphases == p)
    {
      if (nsprocs >= 0)
      {
        for (i = 0; i < nsprocs; ++i)
          if (sendbuf[sprocs[i]] != DEFAULT_INT) MPI_Put(&sendbuf[sprocs[i]], 1, MPI_INT, sprocs[i], rank, 1, MPI_INT, win);

      } else
      {
        for (i = 0; i < size; ++i)
          if (sendbuf[i] != DEFAULT_INT) MPI_Put(&sendbuf[i], 1, MPI_INT, i, rank, 1, MPI_INT, win);
      }
    }

    if (p < nphases - 1) MPI_Win_fence(0, win);
  }

  MPI_Win_fence(MPI_MODE_NOPUT|MPI_MODE_NOSUCCEED, win);
  MPI_Win_free(&win);

  if (alloc_mem)
  {
    if (nrprocs >= 0)
      for (i = 0; i < nrprocs; ++i) recvbuf[rprocs[i]] = rcounts_put[rprocs[i]];
    else
      for (i = 0; i < size; ++i) recvbuf[i] = rcounts_put[i];

    MPI_Free_mem(rcounts_put);    
  }

  return MPI_SUCCESS;
}
开发者ID:fweik,项目名称:scafacos,代码行数:57,代码来源:zmpi_tools.c


示例13: mpp_free

void mpp_free (void *buf)
{
#if HAVE_MPI_ALLOC_MEM
    if (use_mpi_alloc) 
	MPI_Free_mem (buf);
    else 
#endif	
	free (buf);
    return;
}
开发者ID:RWTH-OS,项目名称:MP-MPICH,代码行数:10,代码来源:mem.c


示例14: socket_freeMem

/**
  * Wrappers for MPI_Free_mem for computers which may not have them (MPI-1 computers).
  */
static void socket_freeMem(socket_t * s)
{
	assert(s->buffer);
#ifndef MC_NO_MPI_ALLOC_MEM
	MPI_Free_mem(s->buffer);
#else
	free(s->buffer);
#endif
	s->buffer = 0;
}
开发者ID:binarycode,项目名称:mandor2,代码行数:13,代码来源:misc_socket.c


示例15: MPI_Alloc_mem

void mpiofstream::flush()
{
  MPI_Status status;
  char* buf;
  MPI_Alloc_mem(ss.str().length()+1, MPI_INFO_NULL, &buf);
  strcpy(buf, ss.str().c_str());
  MPI_File_write_shared(fh, buf, ss.str().length(), MPI_CHAR, &status);
  MPI_Free_mem(buf);
  ss.str("");
}
开发者ID:ConstantinV,项目名称:Mauve-Analysis,代码行数:10,代码来源:mpiutils.cpp


示例16: PARMCI_Acc

/** One-sided accumulate operation.
  *
  * @param[in] datatype ARMCI data type for the accumulate operation (see armci.h)
  * @param[in] scale    Pointer for a scalar of type datatype that will be used to
  *                     scale values in the source buffer
  * @param[in] src      Source address (remote)
  * @param[in] dst      Destination address (local)
  * @param[in] bytes    Number of bytes to transfer
  * @param[in] proc     Process id to target
  * @return             0 on success, non-zero on failure
  */
int PARMCI_Acc(int datatype, void *scale, void *src, void *dst, int bytes, int proc) {
  void  *src_buf;
  int    count, type_size, scaled;
  MPI_Datatype type;
  gmr_t *src_mreg, *dst_mreg;

  /* If NOGUARD is set, assume the buffer is not shared */
  if (ARMCII_GLOBAL_STATE.shr_buf_method != ARMCII_SHR_BUF_NOGUARD)
    src_mreg = gmr_lookup(src, ARMCI_GROUP_WORLD.rank);
  else
    src_mreg = NULL;

  dst_mreg = gmr_lookup(dst, proc);

  ARMCII_Assert_msg(dst_mreg != NULL, "Invalid remote pointer");

  /* Prepare the input data: Apply scaling if needed and acquire the DLA lock if
   * needed.  We hold the DLA lock if (src_buf == src && src_mreg != NULL). */

  scaled = ARMCII_Buf_acc_is_scaled(datatype, scale);

  if (scaled) {
      MPI_Alloc_mem(bytes, MPI_INFO_NULL, &src_buf);
      ARMCII_Assert(src_buf != NULL);
      ARMCII_Buf_acc_scale(src, src_buf, bytes, datatype, scale);
  } else {
    src_buf = src;
  }

  /* Check if we need to copy: user requested it or same mem region */
  if (   (src_buf == src) /* buf_prepare didn't make a copy */
      && (ARMCII_GLOBAL_STATE.shr_buf_method == ARMCII_SHR_BUF_COPY || src_mreg == dst_mreg) )
  {
    MPI_Alloc_mem(bytes, MPI_INFO_NULL, &src_buf);
    ARMCII_Assert(src_buf != NULL);
    ARMCI_Copy(src, src_buf, bytes);
  }

  ARMCII_Acc_type_translate(datatype, &type, &type_size);
  count = bytes/type_size;

  ARMCII_Assert_msg(bytes % type_size == 0, 
      "Transfer size is not a multiple of the datatype size");

  /* TODO: Support a local accumulate operation more efficiently */

  gmr_accumulate(dst_mreg, src_buf, dst, count, type, proc);
  gmr_flush(dst_mreg, proc, 1); /* flush_local */

  if (src_buf != src)
    MPI_Free_mem(src_buf);

  return 0;
}
开发者ID:jeffhammond,项目名称:armci-mpi,代码行数:65,代码来源:onesided.c


示例17: ARMCII_Buf_finish_acc_vec

/** Finish a set of prepared buffers.  Will perform communication and copies as
  * needed to ensure results are in the original buffers.  Temporary space will be
  * freed.
  *
  * @param[in]  orig_bufs Original set of buffers.
  * @param[out] new_bufs  Set of private buffers.
  * @param[in]  count     Number of entries in the buffer list.
  * @param[in]  size      The size of the buffers (all are of the same size).
  */
void ARMCII_Buf_finish_acc_vec(void **orig_bufs, void **new_bufs, int count, int size) {
  int i;

  for (i = 0; i < count; i++) {
    if (orig_bufs[i] != new_bufs[i]) {
      MPI_Free_mem(new_bufs[i]);
    }
  }

  free(new_bufs);
}
开发者ID:abhinavvishnu,项目名称:matex,代码行数:20,代码来源:buffer.c


示例18: ARMCIX_Destroy_mutexes_hdl

/** Destroy/free a mutex group.  Collective.
  * 
  * @param[in] hdl Group to destroy
  */
int ARMCIX_Destroy_mutexes_hdl(armcix_mutex_hdl_t hdl) {
  MPI_Win_free(&hdl->window);
  
  if (hdl->base) 
    MPI_Free_mem(hdl->base);

  MPI_Comm_free(&hdl->comm);

  free(hdl);
  
  return 0;
}
开发者ID:jeffhammond,项目名称:armci-mpi,代码行数:16,代码来源:mutex_hdl_spin.c


示例19: test_put

void test_put(void)
{
    int me, nproc;
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);
    MPI_Comm_rank(MPI_COMM_WORLD, &me);
    MPI_Win dst_win;
    double *dst_buf;
    double src_buf[MAXELEMS];
    int i, j;

    MPI_Alloc_mem(sizeof(double) * nproc * MAXELEMS, MPI_INFO_NULL, &dst_buf);
    MPI_Win_create(dst_buf, sizeof(double) * nproc * MAXELEMS, 1, MPI_INFO_NULL, MPI_COMM_WORLD,
                   &dst_win);

    for (i = 0; i < MAXELEMS; i++)
        src_buf[i] = me + 1.0;

    MPI_Win_lock(MPI_LOCK_EXCLUSIVE, me, 0, dst_win);

    for (i = 0; i < nproc * MAXELEMS; i++)
        dst_buf[i] = 0.0;

    MPI_Win_unlock(me, dst_win);

    MPI_Barrier(MPI_COMM_WORLD);

    for (i = 0; i < nproc; i++) {
        int target = i;

        for (j = 0; j < COUNT; j++) {
            if (verbose)
                printf("%2d -> %2d [%2d]\n", me, target, j);
            MPI_Win_lock(MPI_LOCK_EXCLUSIVE, target, 0, dst_win);
            MPI_Put(&src_buf[j], sizeof(double), MPI_BYTE, target,
                    (me * MAXELEMS + j) * sizeof(double), sizeof(double), MPI_BYTE, dst_win);
            MPI_Win_unlock(target, dst_win);
        }

        for (j = 0; j < COUNT; j++) {
            if (verbose)
                printf("%2d <- %2d [%2d]\n", me, target, j);
            MPI_Win_lock(MPI_LOCK_EXCLUSIVE, target, 0, dst_win);
            MPI_Get(&src_buf[j], sizeof(double), MPI_BYTE, target,
                    (me * MAXELEMS + j) * sizeof(double), sizeof(double), MPI_BYTE, dst_win);
            MPI_Win_unlock(target, dst_win);
        }
    }

    MPI_Barrier(MPI_COMM_WORLD);

    MPI_Win_free(&dst_win);
    MPI_Free_mem(dst_buf);
}
开发者ID:mpoquet,项目名称:simgrid,代码行数:53,代码来源:contention_putget.c


示例20: ARMCI_Put

/** One-sided put operation.
  *
  * @param[in] src    Source address (remote)
  * @param[in] dst    Destination address (local)
  * @param[in] size   Number of bytes to transfer
  * @param[in] target Process id to target
  * @return           0 on success, non-zero on failure
  */
int ARMCI_Put(void *src, void *dst, int size, int target) {
  gmr_t *src_mreg, *dst_mreg;

  src_mreg = gmr_lookup(src, ARMCI_GROUP_WORLD.rank);
  dst_mreg = gmr_lookup(dst, target);

  ARMCII_Assert_msg(dst_mreg != NULL, "Invalid remote pointer");

  /* Local operation */
  if (target == ARMCI_GROUP_WORLD.rank) {
    if (ARMCII_GLOBAL_STATE.shr_buf_method != ARMCII_SHR_BUF_NOGUARD) {
      gmr_dla_lock(dst_mreg);
      if (src_mreg) gmr_dla_lock(src_mreg);
    }

    ARMCI_Copy(src, dst, size);
    
    if (ARMCII_GLOBAL_STATE.shr_buf_method != ARMCII_SHR_BUF_NOGUARD) {
      gmr_dla_unlock(dst_mreg);
      if (src_mreg) gmr_dla_unlock(src_mreg);
    }
  }

  /* Origin buffer is private */
  else if (src_mreg == NULL || ARMCII_GLOBAL_STATE.shr_buf_method == ARMCII_SHR_BUF_NOGUARD) {
    gmr_lock(dst_mreg, target);
    gmr_put(dst_mreg, src, dst, size, target);
    gmr_unlock(dst_mreg, target);
  }

  /* COPY: Either origin and target buffers are in the same window and we can't
   * lock the same window twice (MPI semantics) or the user has requested
   * always-copy mode. */
  else {
    void *src_buf;

    MPI_Alloc_mem(size, MPI_INFO_NULL, &src_buf);
    ARMCII_Assert(src_buf != NULL);

    gmr_dla_lock(src_mreg);
    ARMCI_Copy(src, src_buf, size);
    gmr_dla_unlock(src_mreg);

    gmr_lock(dst_mreg, target);
    gmr_put(dst_mreg, src_buf, dst, size, target);
    gmr_unlock(dst_mreg, target);

    MPI_Free_mem(src_buf);
  }

  return 0;
}
开发者ID:addy004,项目名称:mpich2-yarn,代码行数:60,代码来源:onesided.c



注:本文中的MPI_Free_mem函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ MPI_Gather函数代码示例发布时间:2022-05-30
下一篇:
C++ MPI_Finalize函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap