• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ MPI_Alloc_mem函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中MPI_Alloc_mem函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Alloc_mem函数的具体用法?C++ MPI_Alloc_mem怎么用?C++ MPI_Alloc_mem使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了MPI_Alloc_mem函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: main

int main(int argc, char *argv[])
{
    int errs = 0, err;
    int j, count;
    char *ap;

    MTest_Init(&argc, &argv);

    MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
    for (count = 1; count < 128000; count *= 2) {

        err = MPI_Alloc_mem(count, MPI_INFO_NULL, &ap);
        if (err) {
            int errclass;
            /* An error of  MPI_ERR_NO_MEM is allowed */
            MPI_Error_class(err, &errclass);
            if (errclass != MPI_ERR_NO_MEM) {
                errs++;
                MTestPrintError(err);
            }

        } else {
            /* Access all of this memory */
            for (j = 0; j < count; j++) {
                ap[j] = (char) (j & 0x7f);
            }
            MPI_Free_mem(ap);
        }
    }

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
开发者ID:ParaStation,项目名称:psmpi2,代码行数:33,代码来源:allocmem.c


示例2: ARMCIX_Create_mutexes_hdl

/** Create a mutex group.  Collective.
  *
  * @param[in] count Number of mutexes to create on the calling process
  * @return          Handle to the mutex group
  */
armcix_mutex_hdl_t ARMCIX_Create_mutexes_hdl(int count, ARMCI_Group *pgroup) {
  int         ierr, i;
  armcix_mutex_hdl_t hdl;

  hdl = malloc(sizeof(struct armcix_mutex_hdl_s));
  ARMCII_Assert(hdl != NULL);

  MPI_Comm_dup(pgroup->comm, &hdl->comm);

  if (count > 0) {
    MPI_Alloc_mem(count*sizeof(long), MPI_INFO_NULL, &hdl->base);
    ARMCII_Assert(hdl->base != NULL);
  } else {
    hdl->base = NULL;
  }

  hdl->count = count;

  // Initialize mutexes to 0
  for (i = 0; i < count; i++)
    hdl->base[i] = 0;

  ierr = MPI_Win_create(hdl->base, count*sizeof(long), sizeof(long) /* displacement size */,
                        MPI_INFO_NULL, hdl->comm, &hdl->window);
  ARMCII_Assert(ierr == MPI_SUCCESS);

  return hdl;
}
开发者ID:jeffhammond,项目名称:armci-mpi,代码行数:33,代码来源:mutex_hdl_spin.c


示例3: ARMCII_Buf_prepare_write_vec

/** Prepare a set of buffers for use with a get operation.  The returned set of
  * buffers is guaranteed to be in private space.  Copies will be made if needed,
  * the result should be completed by finish.
  *
  * @param[in]  orig_bufs Original set of buffers.
  * @param[out] new_bufs  Pointer to the set of private buffers.
  * @param[in]  count     Number of entries in the buffer list.
  * @param[in]  size      The size of the buffers (all are of the same size).
  * @return               Number of buffers that were moved.
  */
int ARMCII_Buf_prepare_write_vec(void **orig_bufs, void ***new_bufs_ptr, int count, int size) {
  int num_moved = 0;

  if (ARMCII_GLOBAL_STATE.shr_buf_method != ARMCII_SHR_BUF_NOGUARD) {
    void **new_bufs = malloc(count*sizeof(void*));
    int i;

    for (i = 0; i < count; i++)
      new_bufs[i] = NULL;

    for (i = 0; i < count; i++) {
      // Check if the destination buffer is within a shared region.  If not, create
      // a temporary private buffer to hold the result.
      gmr_t *mreg = gmr_lookup(orig_bufs[i], ARMCI_GROUP_WORLD.rank);

      if (mreg != NULL) {
        MPI_Alloc_mem(size, MPI_INFO_NULL, &new_bufs[i]);
        ARMCII_Assert(new_bufs[i] != NULL);
        num_moved++;
      } else {
        new_bufs[i] = orig_bufs[i];
      }
    }

    *new_bufs_ptr = new_bufs;
  } else {
    *new_bufs_ptr = orig_bufs;
  }
  
  return num_moved;
}
开发者ID:abhinavvishnu,项目名称:matex,代码行数:41,代码来源:buffer.c


示例4: main

int main (int argc,char *argv[]) {
  int       i;
  double    w[NEL];
  MPI_Aint  win_size,warr_size;
  MPI_Win  *win;

  win_size=sizeof(MPI_Win);
  warr_size=sizeof(MPI_DOUBLE)*NEL;
  
      
  MPI_Init (&argc, &argv);
  
  for(i=0;i<NTIMES;i++) {
      MPI_Alloc_mem(win_size,MPI_INFO_NULL,&win);

      MPI_Win_create(w,warr_size,sizeof(double),MPI_INFO_NULL,MPI_COMM_WORLD,win);
      MPI_Win_free(win);

      MPI_Free_mem(win);
  }

  MPI_Finalize();

  return 0;

}
开发者ID:gpaulsen,项目名称:ompi-www,代码行数:26,代码来源:mleak.win.c


示例5: MPI_Info_create

void *mpp_alloc (size_t len)
{
    MPI_Info info;
    void *buf = NULL;

#if HAVE_MPI_ALLOC_MEM    
    if (use_mpi_alloc) {
	MPI_Info_create (&info);
#if 0
	MPI_Info_set (info, "alignment", "4096");
	MPI_Info_set (info, "type", "private");
#endif
	MPI_Alloc_mem (len, info, &buf);
	MPI_Info_free (&info);
   } else 
#endif	
	buf = malloc(len);

    if (buf == NULL) {
	fprintf (stderr, "Could not allocate %d byte buffer\n", len);
	MPI_Abort (MPI_COMM_WORLD, -1);
    }
    
    return buf;
}
开发者ID:RWTH-OS,项目名称:MP-MPICH,代码行数:25,代码来源:mem.c


示例6: sizeof

void SweptDiscretization2D::updateRemoteConstants(unsigned char *buffer)
{
	void *sendingBuffer = NULL;
	FILE *inFile = NULL;

	if(pg.rank == 0)
	{
		int bufferSize = this->remoteConstantsCount * n * n * pg.mpiSize * sizeof(double);
		MPI_Alloc_mem(bufferSize, MPI_INFO_NULL, &sendingBuffer);
		
		for(int r=0;r<pg.mpiSize;r++)
		{
			double *processing = (double*)sendingBuffer + (this->remoteConstantsCount * n * n * r);
			int jIndex = (r % (pg.xNodes*pg.yNodes)) / pg.xNodes;
			int iIndex = r % pg.xNodes;
			for(int j=0;j<n;j++)
			{
				for(int i=0;i<n;i++)
				{
					int iGlobal = n*iIndex + (i);
					int jGlobal = n*jIndex + (j);
					int index   = this->ijToConstantIndex(i,j);
					int globalIndex = this->remoteConstantsCount * (iGlobal + jGlobal * n * pg.xNodes);
					for(int k=0;k<this->remoteConstantsCount;k++)
					{
						processing[index + k] = ((double*)buffer)[k + globalIndex];
					}					
				}
			}		
		}
	}
	
	MPI_Win_fence(MPI_MODE_NOPRECEDE, this->constantsWindow);
	if(pg.rank == 0)
	{
		for(int r=0;r<pg.mpiSize;r++)
		{
			MPI_Put((unsigned char*)sendingBuffer + (r * remoteConstantsCount * n * n * sizeof(double)), remoteConstantsCount * n * n * sizeof(double), MPI_BYTE, r, 0, remoteConstantsCount * n * n * sizeof(double), MPI_BYTE, constantsWindow);			
		}
	}
	MPI_Win_fence((MPI_MODE_NOSTORE | MPI_MODE_NOSUCCEED), this->constantsWindow);

	if(pg.rank == 0)
	{
		MPI_Free_mem(sendingBuffer);					
	}
	for(int i=1;i<n+1;i++)
	{
		for(int j=1;j<n+1;j++)
		{
			for(int k=0;k<this->remoteConstantsCount;k++)
			{
				int windowIndex    = this->ijToConstantIndex(i-1,j-1);
				int foundationIndex = this->ijToIndex(i,j);
				this->foundation[foundationIndex + k] = this->remoteConstants[windowIndex + k];
			}
		}
	}
}
开发者ID:hubailmm,项目名称:Euler2D,代码行数:59,代码来源:SweptDiscretization2D.cpp


示例7: ompi_alloc_mem_f

void ompi_alloc_mem_f(MPI_Aint *size, MPI_Fint *info, char *baseptr, MPI_Fint *ierr)
{
    int ierr_c;
    MPI_Info c_info = MPI_Info_f2c(*info);

    ierr_c = MPI_Alloc_mem(*size, c_info, baseptr);
    if (NULL != ierr) *ierr = OMPI_INT_2_FINT(ierr_c);
}
开发者ID:Dissolubilis,项目名称:ompi-svn-mirror,代码行数:8,代码来源:alloc_mem_f.c


示例8: main

int main(int argc, char *argv[])
{
    int rank, nprocs, i, *A, *B;
    MPI_Comm CommDeuce;
    MPI_Win win;
    int errs = 0;

    MTest_Init(&argc, &argv);

    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (nprocs < 2) {
        printf("Run this program with 2 or more processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_Comm_split(MPI_COMM_WORLD, (rank < 2), rank, &CommDeuce);

    if (rank < 2) {
        i = MPI_Alloc_mem(SIZE * sizeof(int), MPI_INFO_NULL, &A);
        if (i) {
            printf("Can't allocate memory in test program\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }
        i = MPI_Alloc_mem(SIZE * sizeof(int), MPI_INFO_NULL, &B);
        if (i) {
            printf("Can't allocate memory in test program\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        if (rank == 0) {
            for (i = 0; i < SIZE; i++)
                B[i] = 500 + i;
            MPI_Win_create(B, SIZE * sizeof(int), sizeof(int), MPI_INFO_NULL, CommDeuce, &win);
            MPI_Win_fence(0, win);
            for (i = 0; i < SIZE; i++) {
                A[i] = i + 100;
                MPI_Get(&A[i], 1, MPI_INT, 1, i, 1, MPI_INT, win);
            }
            MPI_Win_fence(0, win);
            for (i = 0; i < SIZE; i++)
                if (A[i] != 1000 + i) {
                    SQUELCH(printf("Rank 0: A[%d] is %d, should be %d\n", i, A[i], 1000 + i););
                    errs++;
                }
开发者ID:NexMirror,项目名称:MPICH,代码行数:46,代码来源:test5_am.c


示例9: main

int main(int argc, char** argv) {
  MPI_Init(&argc, &argv);

  int my_rank; // Number of the node
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);

  int node_count; // Total number of nodes
  MPI_Comm_size(MPI_COMM_WORLD, &node_count);
  
  // The root must load the input data to distribute to the other nodes
  if(my_rank == 0) {
    // In our case it generates a random array as input data
    srand(time(NULL));
    for(int item = 0; item < items; ++item)
      array[item] = rand();
  }
  
  int items_per_rank = items / node_count;
  int remainder_items = items % node_count;
  int* my_work;
  MPI_Alloc_mem(items_per_rank * sizeof(int), MPI_INFO_NULL, &my_work);
 
  // MPI_Scatter is a collective operation which distributes an equal-sized part of the given array to each node.
  MPI_Scatter(&array[remainder_items] /* send buffer */, items_per_rank /* send count per node */, MPI_INT /* send type */,
	      my_work /* receive buffer on each node */, items_per_rank /* receive count */ , MPI_INT /* receive type */, 
	      0 /* send buffer is stored on this rank */, MPI_COMM_WORLD /* communication channel */);
 
  // This is the actual working-loop
  long sub_sum = 0;
  for(int i=0; i < items_per_rank; i++)
    sub_sum += my_work[i];

  if(my_rank == 0) { // Scatter cannot deal with a division remainder so we manually deal with it
    while(remainder_items > 0)
      sub_sum += array[--remainder_items];
  }

  MPI_Free_mem(my_work);

  // MPI_Reduce with op-code MPI_SUM is a collective operation which sums up the input sub_sum of each node
  // into single a resulting output sum on the master.
  MPI_Reduce(&sub_sum /* input to sum up */, &sum /* output */, 1 /* input count */, MPI_LONG /* input type */,
	     MPI_SUM /* operation */, 0 /* output is stored on this rank */, MPI_COMM_WORLD /* communication channel */);
 
  if(my_rank == 0) {
    // The result of the computation now is available on rank 0.
    // We compare it with the sequential reference implementation to test our parallel implementation.
    if(sum == sum__sequential_reference_implementation())
      fprintf(stderr, "Test OK.\n");
    else
      fprintf(stderr, "Test FAILED!\n");
  }

  MPI_Barrier(MPI_COMM_WORLD);
  MPI_Finalize();
  return EXIT_SUCCESS;
}
开发者ID:him-28,项目名称:mpi-debug-tutorial,代码行数:57,代码来源:debug-tutorial-3-fixed.c


示例10: MPI_Alloc_mem

void SweptDiscretization2D::allGatherAllOutputToFile(string filename)
{

	void *buffer = NULL;
	FILE *output;
	if(pg.rank == 0)
	{
		MPI_Alloc_mem(foundationSize * pg.mpiSize * sizeof(double), MPI_INFO_NULL, &buffer);		
		output = fopen(filename.c_str(),"wb");
	}

	MPI_Win_fence((MPI_MODE_NOPUT | MPI_MODE_NOPRECEDE), foundationWindow);
	if(pg.rank == 0)
	{
		for(int r=0;r<pg.mpiSize;r++)
		{
			MPI_Get((char*)buffer + (r * foundationSize * sizeof(double)), foundationSize * sizeof(double), MPI_BYTE, r, 0, foundationSize * sizeof(double), MPI_BYTE, foundationWindow);			
		}
	}
	MPI_Win_fence(MPI_MODE_NOSUCCEED, foundationWindow);
	
	if(pg.rank == 0)
	{
		int w = (n * pg.xNodes);
		int h = (n * pg.yNodes);
		int resultArraySize = w * h;
		if(resultArray == NULL)
			resultArray = (double*) malloc(resultArraySize * sizeof(double) * outputLength);
		
		for(int r=0;r<pg.mpiSize;r++)
		{
			double *processing = (double*)buffer + (foundationSize * r);			
			int jIndex = (r % (pg.xNodes*pg.yNodes)) / pg.xNodes;
			int iIndex = r % pg.xNodes;
			for(int j=1;j<n+1;j++)
			{
				for(int i=1;i<n+1;i++)
				{
					int iGlobal = n*iIndex + (i-1);
					int jGlobal = n*jIndex + (j-1);
					int index   = this->ijToIndex(i,j);
					for(int point=0;point<outputLength;point++)
					{
						double val  = processing[index + constants + point];
						int resultIndex = (iGlobal + jGlobal * n * pg.xNodes) * outputLength + point;
						resultArray[resultIndex] = val;
					}					
				}
			}		
		}
		fwrite((const void*)resultArray,sizeof(double),resultArraySize,output);
		fclose(output);
		MPI_Free_mem(buffer);
				
	}
	MPI_Barrier(MPI_COMM_WORLD);
}
开发者ID:hubailmm,项目名称:Euler2D,代码行数:57,代码来源:SweptDiscretization2D.cpp


示例11: _ZMPI_Alltoall_int_proclists_put

static int _ZMPI_Alltoall_int_proclists_put(int alloc_mem, int nphases, int *sendbuf, int nsprocs, int *sprocs, int *recvbuf, int nrprocs, int *rprocs, MPI_Comm comm)
{
  int i, p, size, rank, *rcounts_put;

  MPI_Win win;


  MPI_Comm_size(comm, &size);
  MPI_Comm_rank(comm, &rank);

  if (alloc_mem) MPI_Alloc_mem(size * sizeof(int), MPI_INFO_NULL, &rcounts_put);
  else rcounts_put = recvbuf;

  if (nrprocs >= 0)
    for (i = 0; i < nrprocs; ++i) rcounts_put[rprocs[i]] = DEFAULT_INT;
  else
    for (i = 0; i < size; ++i) rcounts_put[i] = DEFAULT_INT;

  MPI_Win_create(rcounts_put, size * sizeof(int), sizeof(int), MPI_INFO_NULL, comm, &win);
  MPI_Win_fence(MPI_MODE_NOSTORE|MPI_MODE_NOPRECEDE, win);

  for (p = 0; p < nphases; ++p)
  {
/*    printf("%d: phase = %d of %d\n", rank, p, nphases);*/
  
    if (rank % nphases == p)
    {
      if (nsprocs >= 0)
      {
        for (i = 0; i < nsprocs; ++i)
          if (sendbuf[sprocs[i]] != DEFAULT_INT) MPI_Put(&sendbuf[sprocs[i]], 1, MPI_INT, sprocs[i], rank, 1, MPI_INT, win);

      } else
      {
        for (i = 0; i < size; ++i)
          if (sendbuf[i] != DEFAULT_INT) MPI_Put(&sendbuf[i], 1, MPI_INT, i, rank, 1, MPI_INT, win);
      }
    }

    if (p < nphases - 1) MPI_Win_fence(0, win);
  }

  MPI_Win_fence(MPI_MODE_NOPUT|MPI_MODE_NOSUCCEED, win);
  MPI_Win_free(&win);

  if (alloc_mem)
  {
    if (nrprocs >= 0)
      for (i = 0; i < nrprocs; ++i) recvbuf[rprocs[i]] = rcounts_put[rprocs[i]];
    else
      for (i = 0; i < size; ++i) recvbuf[i] = rcounts_put[i];

    MPI_Free_mem(rcounts_put);    
  }

  return MPI_SUCCESS;
}
开发者ID:fweik,项目名称:scafacos,代码行数:57,代码来源:zmpi_tools.c


示例12: xMPI_Alloc_mem

void* xMPI_Alloc_mem(size_t nbytes) {
  void* p;
  MPI_Alloc_mem(nbytes, MPI_INFO_NULL, &p);
  if (nbytes != 0 && !p) {
    fprintf(stderr, "MPI_Alloc_mem failed for size %zu\n", nbytes);
    throw "OutOfMemoryExpception";
  }
  return p;
}
开发者ID:anshumang,项目名称:bfs_titech,代码行数:9,代码来源:utils.hpp


示例13: MPI_Alloc_mem

void mpiofstream::flush()
{
  MPI_Status status;
  char* buf;
  MPI_Alloc_mem(ss.str().length()+1, MPI_INFO_NULL, &buf);
  strcpy(buf, ss.str().c_str());
  MPI_File_write_shared(fh, buf, ss.str().length(), MPI_CHAR, &status);
  MPI_Free_mem(buf);
  ss.str("");
}
开发者ID:ConstantinV,项目名称:Mauve-Analysis,代码行数:10,代码来源:mpiutils.cpp


示例14: MPI_Comm_rank

MPIMutex::MPIMutex(MPI_Comm _comm) {
	int nproc, rank;
	id = details::mutex_count++;
	comm = _comm;
	MPI_Comm_rank(comm, &rank);
  MPI_Comm_size(comm, &nproc);
	MPI_Alloc_mem(nproc, MPI_INFO_NULL, &lock_vector);
	bzero(lock_vector, nproc);
	MPI_Win_create(lock_vector, nproc, sizeof(byte), MPI_INFO_NULL, comm, &win);
};
开发者ID:atrantan,项目名称:Distributed-Futures,代码行数:10,代码来源:MPIMutex.cpp


示例15: xMPI_Alloc_mem

void* xMPI_Alloc_mem(size_t nbytes) {
  void* p;
  MPI_Alloc_mem(nbytes, MPI_INFO_NULL, &p);
  if (nbytes != 0 && !p) {
    fprintf(stderr, "MPI_Alloc_mem failed for size %zu\n", nbytes);
    abort();
  }
  // if (rank == 0) fprintf(stderr, "MEMORY: alloc %zu %p mpi\n", nbytes, p);
  return p;
}
开发者ID:cwmoo740,项目名称:graph500,代码行数:10,代码来源:utils.cpp


示例16: MPI_Alloc_mem

/** Allocate a local buffer suitable for use in one-sided communication
  *
  * @param[in] size Number of bytes to allocate
  * @return         Pointer to the local buffer
  */
void *PARMCI_Malloc_local(armci_size_t size) {
  void *buf;

  MPI_Alloc_mem((MPI_Aint) size, MPI_INFO_NULL, &buf);

  if (ARMCII_GLOBAL_STATE.debug_alloc) {
    ARMCII_Bzero(buf, size);
  }

  return buf;
}
开发者ID:jeffhammond,项目名称:armci-mpi,代码行数:16,代码来源:malloc.c


示例17: socket_allocateMem

/**
  * Wrappers for MPI_Alloc_mem for computers which may not have them (MPI-1 computers).
  */
static void socket_allocateMem(socket_t * s, long long int size, const char *chName)
{
#ifndef MC_NO_MPI_ALLOC_MEM
	MPI_Alloc_mem(size, MPI_INFO_NULL, &(s->buffer));
	if(!s->buffer) error("%s(%s): cannot 'MPI_Alloc_mem' %.2f Kb.", __func__, chName, size / 1024.0);
#else
	s->buffer = malloc(size);
	if(!s->buffer) error("%s(%s): cannot 'malloc' %.2f Kb.", __func__, chName, size / 1024.0);
	msg_fsend("%s(%s): fallback (malloc) is used to allocate %.2f Kb.", __func__, chName, size / 1024.0);
#endif
}
开发者ID:binarycode,项目名称:mandor2,代码行数:14,代码来源:misc_socket.c


示例18: test_put

void test_put(void)
{
    int me, nproc;
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);
    MPI_Comm_rank(MPI_COMM_WORLD, &me);
    MPI_Win dst_win;
    double *dst_buf;
    double src_buf[MAXELEMS];
    int i, j;

    MPI_Alloc_mem(sizeof(double) * nproc * MAXELEMS, MPI_INFO_NULL, &dst_buf);
    MPI_Win_create(dst_buf, sizeof(double) * nproc * MAXELEMS, 1, MPI_INFO_NULL, MPI_COMM_WORLD,
                   &dst_win);

    for (i = 0; i < MAXELEMS; i++)
        src_buf[i] = me + 1.0;

    MPI_Win_lock(MPI_LOCK_EXCLUSIVE, me, 0, dst_win);

    for (i = 0; i < nproc * MAXELEMS; i++)
        dst_buf[i] = 0.0;

    MPI_Win_unlock(me, dst_win);

    MPI_Barrier(MPI_COMM_WORLD);

    for (i = 0; i < nproc; i++) {
        int target = i;

        for (j = 0; j < COUNT; j++) {
            if (verbose)
                printf("%2d -> %2d [%2d]\n", me, target, j);
            MPI_Win_lock(MPI_LOCK_EXCLUSIVE, target, 0, dst_win);
            MPI_Put(&src_buf[j], sizeof(double), MPI_BYTE, target,
                    (me * MAXELEMS + j) * sizeof(double), sizeof(double), MPI_BYTE, dst_win);
            MPI_Win_unlock(target, dst_win);
        }

        for (j = 0; j < COUNT; j++) {
            if (verbose)
                printf("%2d <- %2d [%2d]\n", me, target, j);
            MPI_Win_lock(MPI_LOCK_EXCLUSIVE, target, 0, dst_win);
            MPI_Get(&src_buf[j], sizeof(double), MPI_BYTE, target,
                    (me * MAXELEMS + j) * sizeof(double), sizeof(double), MPI_BYTE, dst_win);
            MPI_Win_unlock(target, dst_win);
        }
    }

    MPI_Barrier(MPI_COMM_WORLD);

    MPI_Win_free(&dst_win);
    MPI_Free_mem(dst_buf);
}
开发者ID:mpoquet,项目名称:simgrid,代码行数:53,代码来源:contention_putget.c


示例19: ARMCI_Put

/** One-sided put operation.
  *
  * @param[in] src    Source address (remote)
  * @param[in] dst    Destination address (local)
  * @param[in] size   Number of bytes to transfer
  * @param[in] target Process id to target
  * @return           0 on success, non-zero on failure
  */
int ARMCI_Put(void *src, void *dst, int size, int target) {
  gmr_t *src_mreg, *dst_mreg;

  src_mreg = gmr_lookup(src, ARMCI_GROUP_WORLD.rank);
  dst_mreg = gmr_lookup(dst, target);

  ARMCII_Assert_msg(dst_mreg != NULL, "Invalid remote pointer");

  /* Local operation */
  if (target == ARMCI_GROUP_WORLD.rank) {
    if (ARMCII_GLOBAL_STATE.shr_buf_method != ARMCII_SHR_BUF_NOGUARD) {
      gmr_dla_lock(dst_mreg);
      if (src_mreg) gmr_dla_lock(src_mreg);
    }

    ARMCI_Copy(src, dst, size);
    
    if (ARMCII_GLOBAL_STATE.shr_buf_method != ARMCII_SHR_BUF_NOGUARD) {
      gmr_dla_unlock(dst_mreg);
      if (src_mreg) gmr_dla_unlock(src_mreg);
    }
  }

  /* Origin buffer is private */
  else if (src_mreg == NULL || ARMCII_GLOBAL_STATE.shr_buf_method == ARMCII_SHR_BUF_NOGUARD) {
    gmr_lock(dst_mreg, target);
    gmr_put(dst_mreg, src, dst, size, target);
    gmr_unlock(dst_mreg, target);
  }

  /* COPY: Either origin and target buffers are in the same window and we can't
   * lock the same window twice (MPI semantics) or the user has requested
   * always-copy mode. */
  else {
    void *src_buf;

    MPI_Alloc_mem(size, MPI_INFO_NULL, &src_buf);
    ARMCII_Assert(src_buf != NULL);

    gmr_dla_lock(src_mreg);
    ARMCI_Copy(src, src_buf, size);
    gmr_dla_unlock(src_mreg);

    gmr_lock(dst_mreg, target);
    gmr_put(dst_mreg, src_buf, dst, size, target);
    gmr_unlock(dst_mreg, target);

    MPI_Free_mem(src_buf);
  }

  return 0;
}
开发者ID:addy004,项目名称:mpich2-yarn,代码行数:60,代码来源:onesided.c


示例20: main

int main( int argc, char *argv[] )
{
    int errs = 0;
    int rank, size, i;
    MPI_Comm      comm;
    MPI_Win       win;
    int           *winbuf, count;

    MTest_Init( &argc, &argv );

    comm = MPI_COMM_WORLD;

    MPI_Comm_rank( comm, &rank );
    MPI_Comm_size( comm, &size );

    /* Allocate and initialize buf */
    count  = 1000;

    MPI_Alloc_mem( count*sizeof(int), MPI_INFO_NULL, &winbuf );

    MPI_Win_create( winbuf, count * sizeof(int), sizeof(int), MPI_INFO_NULL, 
		    comm, &win );

    /* Clear winbuf */
    memset( winbuf, 0, count*sizeof(int) );

    /* Note that for i == rank, this is a useful operation - it allows 
       the programmer to use direct loads and stores, rather than 
       put/get/accumulate, to access the local memory window. */
    for (i=0; i<size; i++) {
	MPI_Win_lock( MPI_LOCK_EXCLUSIVE, i, 0, win );
	MPI_Win_unlock( i, win );
    }

    for (i=0; i<size; i++) {
	MPI_Win_lock( MPI_LOCK_SHARED, i, 0, win );
	MPI_Win_unlock( i, win );
    }

    MPI_Win_free( &win );
    MPI_Free_mem( winbuf );

    /* If this test completes, no error has been found */
    /* A more complete test may ensure that local locks in fact block
       remote, exclusive locks */
    MTest_Finalize( errs );

    MPI_Finalize();
    return 0;
}
开发者ID:Julio-Anjos,项目名称:simgrid,代码行数:50,代码来源:locknull.c



注:本文中的MPI_Alloc_mem函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ MPI_Allreduce函数代码示例发布时间:2022-05-30
下一篇:
C++ MPI_Allgatherv函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap