• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ MPI_Finalize函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中MPI_Finalize函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Finalize函数的具体用法?C++ MPI_Finalize怎么用?C++ MPI_Finalize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了MPI_Finalize函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: main


//.........这里部分代码省略.........
  typedef std::chrono::duration<int,std::milli> millisecs_t ;

  int numprocs, rank, edge, pixel_count, start, end;
  double max_values_sq;
  Uint32 max_iter;
  
  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  if(numprocs <= 1)
  {
    std::cerr << argv[0] << ": error: requires at least two MPI processes\n";
    return 1;
  }
  
  max_values_sq = 4.0;
  max_iter = 5000;

  edge = (MAX_X * MAX_Y) / (numprocs - 1);

  if(rank > 0)
  {
    int tile = rank - 1;

    Uint32* pixels;

    start = tile * edge;
    end = (tile == numprocs - 2) ? MAX_X * MAX_Y : (tile + 1) * edge;
    pixel_count = end - start;

    pixels = (Uint32*) malloc(pixel_count * sizeof(Uint32));
    calc_lines(start, end, pixels, max_values_sq, max_iter);

    MPI_Send((void*)pixels, pixel_count, MPI_INT, 0, 0, MPI_COMM_WORLD);
    free(pixels);
  }
  else /* rank == 0 */
  {

    int tile, recv_count = (edge + 1);
    char title[100];

    Uint32* field = (Uint32*) malloc(MAX_X * MAX_Y * sizeof(Uint32));
    Uint32* fieldpos;

    SDL_Surface* sdlSurface;
    SDL_Event event;
        
    MPI_Status status;

    tStart = std::chrono::high_resolution_clock::now();
    for(tile = 1; tile < numprocs; tile++)
    {
      start = (tile - 1) * edge;
      end = (tile == numprocs - 1) ? MAX_X * MAX_Y : tile * edge;

      pixel_count = end - start;
      recv_count = pixel_count;

      fieldpos = field+start;

      MPI_Recv(fieldpos, recv_count, MPI_INT, tile, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    }
    tStop = std::chrono::high_resolution_clock::now();
    millisecs_t duration( std::chrono::duration_cast<millisecs_t>(tStop-tStart) ) ;
    long elapsed = duration.count();
    
    SDL_Init(SDL_INIT_EVERYTHING);

    sdlSurface = SDL_SetVideoMode(MAX_X, MAX_Y, 32, SDL_HWSURFACE | SDL_DOUBLEBUF);
    
    std::stringstream ss;
    ss << argv[0] << " " 
    << numprocs << " processes "
    << elapsed*1.e-3 << " sec."
    << "\n";

    SDL_WM_SetCaption(ss.str().c_str(), title);
    std::cout << ss.str().c_str() << "\n";

    draw(sdlSurface, field);

    SDL_Flip(sdlSurface);
   
    do {
      SDL_Delay(50);
      SDL_PollEvent(&event);
    } while( event.type != SDL_QUIT && event.type != SDL_KEYDOWN );
        
    SDL_FreeSurface(sdlSurface);
    SDL_Quit();

    free(field);
  }

  MPI_Finalize();

  return 0;
}
开发者ID:DanBrennan33,项目名称:SenecaOOP345-attic,代码行数:101,代码来源:mandelbrot_mpi.cpp


示例2: main

int main(int argc, char *argv[])
{
	int rank;
	int n_ranks, start_rank;
	int i,j;
	float gamma = 0.25, rho = -0.495266;
	float GLOB_SUM = 0, sum = 0;

	MPI_Init(&argc, &argv);
	MPI_Comm_size(MPI_COMM_WORLD, &n_ranks);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);

	printf("before get data in id %d\n", rank);

	get_data(rank%4);
	start_rank = 6;
	n_ranks = 4;

	printf("done getting dat rank %d\n", rank);

	MPI_Barrier(MPI_COMM_WORLD);
//	printf("crossing bar1 %d\n", rank);
	
	for (j = 0; j < INPUT_SIZE; ++j)
	{
		get_input(rank, start_rank, n_ranks);
		sum = compute_svm_sum(rank%4, gamma);
		if(rank == start_rank)
		{
			float tempBuff;
			GLOB_SUM = sum;
			for (i = start_rank+1; i < start_rank + n_ranks; ++i) {
				MPI_Recv(&tempBuff, 1, MPI_FLOAT, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
				GLOB_SUM = GLOB_SUM + tempBuff;
			}
			GLOB_SUM -= rho;
		}
		else {
			MPI_Send((float*)&sum, 1, MPI_FLOAT, start_rank, 0, MPI_COMM_WORLD);
		}
	}

	//if(rank != 6)
	//printf("before bar2 %d\n", rank);

	MPI_Barrier(MPI_COMM_WORLD);
	if(rank == 6)
	{
		#ifdef DUMP
			m5_dump_stats(0, 0);
			m5_reset_stats(0, 0);
		#endif
	}

	//printf("done with thread %d\n", rank);

	if(rank == 6)	
		printf("global sum = %f\n", GLOB_SUM);
//	free_data();
	MPI_Finalize();
	return 0;
}
开发者ID:iot-locus,项目名称:kernels,代码行数:62,代码来源:svm_mpi.c


示例3: main

int main(int argc, char** argv)
{
  int my_rank, p;
  int i, dest;
  mpz_t currentPrime;
  unsigned long int product;
  sscanf(argv[1], "%lu", &product);
  int secondFactor = 0;
  int bcastStatus;
  int equals;

  /** GMP library variables **/
  mpz_t nextPrimeNumber;
  mpz_t testFactor;
  mpz_init(nextPrimeNumber);
  mpz_init_set_str (nextPrimeNumber, argv[1], 10);
  mpz_init(testFactor);
  mpz_init_set_ui(currentPrime, 2);
  mpz_nextprime(nextPrimeNumber, nextPrimeNumber);
  mpz_t testProduct;
  mpz_init(testProduct);

  /** MPI Initialization **/
  MPI_Request finalValue;
  MPI_File out;
  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &p);
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
  MPI_Status status;

  /** Get Ready to receive a factor if another process finds one */
  MPI_Irecv(&secondFactor, 1, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &finalValue);
  
  /** Prepare initial offset for each process **/
  for (i=0 ; i < my_rank ; i++) {
    mpz_nextprime(currentPrime, currentPrime);
  }
  /** Start Timing **/
  double start = MPI_Wtime(), diff;
  while (!secondFactor) {
    /** Check if another process has found the factors **/
    MPI_Test (&finalValue, &bcastStatus, &status);
    if(bcastStatus) {
      /** Somebody else has found the factors, we are done **/
      MPI_Wait(&finalValue, &status);
      break;
    }
      /** Skip P primes before checking again **/
    for (i=0 ; i < p ; i++) {
      mpz_nextprime(currentPrime, currentPrime);
    }
    
    /** Brute force check if the current working prime is a factor of the input number **/
    for (mpz_set_ui(testFactor , 2) ; mpz_get_ui(testFactor) <= mpz_get_ui(currentPrime); mpz_nextprime(testFactor, testFactor)) {
      /** Check if another process has found the factors **/
      MPI_Test (&finalValue, &bcastStatus, &status);
      if(bcastStatus) {
        MPI_Wait(&finalValue, &status);
        break;
      }
      mpz_mul_ui(testProduct, currentPrime, mpz_get_ui(testFactor));
      equals = mpz_cmp_ui(testProduct, product);
      if (equals == 0){
        /** We've found the factor, find the second number, secnd it to the other processes  **/
        secondFactor = mpz_get_ui(testFactor);
        printf("done by process %d, factors are %lu and %d \n", my_rank, mpz_get_ui(currentPrime), secondFactor);
        fflush(stdout);
        for (dest = 0 ; dest < p ; dest++) {
          if (dest != my_rank) {
            MPI_Send(&secondFactor, 1, MPI_UNSIGNED_LONG, dest, 0, MPI_COMM_WORLD);
          }
        }
      }
    }
  }

  diff = MPI_Wtime() - start;
  /** End Timing **/

  /** Prepare file contents **/
  char fileName[200], fileContents[200];
  sprintf(fileName, "time_%lu", product);
  sprintf(fileContents, "%d\t%f\n", my_rank, diff);

  /** Write File **/
  MPI_File_open( MPI_COMM_WORLD, fileName, MPI_MODE_WRONLY | MPI_MODE_CREATE, MPI_INFO_NULL, &out );
  MPI_File_seek(out, my_rank*strlen ( fileContents ) , MPI_SEEK_SET);
  MPI_File_write_all(out , &fileContents, strlen ( fileContents ), MPI_CHAR, &status );
  MPI_File_close(&out);

  /** Fin **/
  MPI_Barrier(MPI_COMM_WORLD);
  MPI_Finalize();
  return(0);
}
开发者ID:challett,项目名称:primeFactor,代码行数:95,代码来源:main.c


示例4: main

int main(int argc, char **argv)
{
  MPI_File fp;

  LemonWriter *w;
  LemonReader *r;
  LemonRecordHeader *h;

  double *data;
  double tick, tock;
  double *timesRead;
  double *timesWrite;
  double stdRead = 0.0;
  double stdWrite = 0.0;
  int mpisize;
  int rank;
  char const *type;
  int ldsize;
  unsigned long long int fsize;
  int *hashMatch, *hashMatchAll;
  double const rscale = 1.0 / RAND_MAX;

  int ME_flag=1, MB_flag=1, status=0;

  int latDist[] = {0, 0, 0, 0};
  int periods[] = {1, 1, 1, 1};
  int locSizes[4];
  int latSizes[4];
  int localVol = 1;
  int latVol = localVol;

  MPI_Comm cartesian;
  int i, j;

  md5_state_t state;
  md5_byte_t before[16];
  md5_byte_t after[16];
  
  int L;
  int iters; 

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &mpisize);

  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  
  if (argc != 3)
  {
    usage(rank, argv);
    MPI_Finalize();
    return 1;
  }
  
  L = atoi(argv[1]);
  if (L <= 0)
    usage(rank, argv);

  iters = atoi(argv[2]);
  if (iters <= 0)
    usage(rank, argv);

  timesWrite = (double*)calloc(iters, sizeof(double));
  if (timesWrite == (double*)NULL)
  {
    fprintf(stderr, "ERROR: Could not allocate memory.\n");
    return 1;
  }
  timesRead = (double*)calloc(iters, sizeof(double));
  if (timesRead == (double*)NULL)
  {
    fprintf(stderr, "ERROR: Could not allocate memory.\n");
    return 1;
  }
  hashMatch = (int*)calloc(iters, sizeof(int));
    if (hashMatch == (int*)NULL)
  {
    fprintf(stderr, "ERROR: Could not allocate memory.\n");
    return 1;
  }
  hashMatchAll = (int*)calloc(iters, sizeof(int));
  if (hashMatchAll == (int*)NULL)
  {
    fprintf(stderr, "ERROR: Could not allocate memory.\n");
    return 1;
  }
  
  /* Construct a Cartesian topology, adjust lattice sizes where needed */
  MPI_Dims_create(mpisize, 4, latDist);
  
  for (i = 0; i < 4; ++i)
  {
    int div = (i == 3 ? (2 * L) : L) / latDist[i];
    locSizes[i] = div ? div : 1;
    localVol *= locSizes[i];
    latSizes[i] = locSizes[i] * latDist[i];
  }
  latVol = mpisize * localVol;
  ldsize = localVol * 72 * sizeof(double);
  fsize = (unsigned long long int)latVol * 72 * sizeof(double);
 
//.........这里部分代码省略.........
开发者ID:kostrzewa,项目名称:lemon,代码行数:101,代码来源:lemon_benchmark.c


示例5: main


//.........这里部分代码省略.........
    
    // set boundary conditions
    for (int j = 0; j < N; j++)
    {
        float y0     = sinf( 2.0 * pi * j / (N-1));
        A[j][0]      = y0;
        A[j][M-1]    = y0;
        Aref[j][0]   = y0;
        Aref[j][M-1] = y0;
    }
    
#if _OPENACC
    int ngpus=acc_get_num_devices(acc_device_nvidia);
    int devicenum=rank%ngpus;
    acc_set_device_num(devicenum,acc_device_nvidia);

    // Call acc_init after acc_set_device_num to avoid multiple contexts on device 0 in multi GPU systems
    acc_init(acc_device_nvidia);
#endif /*_OPENACC*/

    // Ensure correctness if N%size != 0
    int chunk_size = ceil( (1.0*N)/size );
    
    int jstart = rank * chunk_size;
    int jend   = jstart + chunk_size;
    
    // Do not process boundaries
    jstart = max( jstart, 1 );
    jend = min( jend, N - 1 );
    
    if ( rank == 0) printf("Jacobi relaxation Calculation: %d x %d mesh\n", N, M);

    if ( rank == 0) printf("Calculate reference solution and time serial execution.\n");
    StartTimer();
    laplace2d_serial( rank, iter_max, tol );
    double runtime_serial = GetTimer();

    //Wait for all processes to ensure correct timing of the parallel version
    MPI_Barrier( MPI_COMM_WORLD );
    if ( rank == 0) printf("Parallel execution.\n");
    StartTimer();
    int iter  = 0;
    float error = 1.0f;
    
    #pragma acc data copy(A) create(Anew)
    while ( error > tol && iter < iter_max )
    {
        error = 0.f;

        #pragma acc kernels
        for (int j = jstart; j < jend; j++)
        {
            for( int i = 1; i < M-1; i++ )
            {
                Anew[j][i] = 0.25f * ( A[j][i+1] + A[j][i-1]
                                     + A[j-1][i] + A[j+1][i]);
                error = fmaxf( error, fabsf(Anew[j][i]-A[j][i]));
            }
        }
        float globalerror = 0.0f;
        MPI_Allreduce( &error, &globalerror, 1, MPI_FLOAT, MPI_MAX, MPI_COMM_WORLD );
        error = globalerror;
        
        #pragma acc kernels
        for (int j = jstart; j < jend; j++)
        {
            for( int i = 1; i < M-1; i++ )
            {
                A[j][i] = Anew[j][i];
            }
        }

        //Periodic boundary conditions
        int top    = (rank == 0) ? (size-1) : rank-1;
        int bottom = (rank == (size-1)) ? 0 : rank+1;

        #pragma acc host_data use_device( A )
        {
            //1. Sent row jstart (first modified row) to top receive lower boundary (jend) from bottom
            MPI_Sendrecv( A[jstart], M, MPI_FLOAT, top   , 0, A[jend], M, MPI_FLOAT, bottom, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE );

            //2. Sent row (jend-1) (last modified row) to bottom receive upper boundary (jstart-1) from top
            MPI_Sendrecv( A[(jend-1)], M, MPI_FLOAT, bottom, 0, A[(jstart-1)], M, MPI_FLOAT, top   , 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE );
        }
        
        if(rank == 0 && (iter % 100) == 0) printf("%5d, %0.6f\n", iter, error);
        
        iter++;
    }
    MPI_Barrier( MPI_COMM_WORLD );
    double runtime = GetTimer();

    if (check_results( rank, jstart, jend, tol ) && rank == 0)
    {
        printf( "Num GPUs: %d\n", size );
        printf( "%dx%d: 1 GPU: %8.4f s, %d GPUs: %8.4f s, speedup: %8.2f, efficiency: %8.2f%\n", N,M, runtime_serial/ 1000.f, size, runtime/ 1000.f, runtime_serial/runtime, runtime_serial/(size*runtime)*100 );
    }
    MPI_Finalize();
    return 0;
}
开发者ID:Hopobcn,项目名称:nvidia-openacc-course-sources,代码行数:101,代码来源:laplace2d.solution.c


示例6: main

int main(int argc, char* argv[])
{
  //
  // Get a default output stream from the Teuchos::VerboseObjectBase
  //
  Teuchos::RCP<Teuchos::FancyOStream>
    out = Teuchos::VerboseObjectBase::getDefaultOStream();
  
  Teuchos::GlobalMPISession mpiSession(&argc,&argv);

#ifdef HAVE_COMPLEX
  typedef std::complex<double> ST;  // Scalar-type typedef
#elif HAVE_COMPLEX_H
  typedef std::complex<double> ST;     // Scalar-type typedef
#else
  typedef double ST;                // Scalar-type typedef
#endif
  
  typedef Teuchos::ScalarTraits<ST>::magnitudeType MT;  // Magnitude-type typedef
  typedef int OT;                   // Ordinal-type typedef
  ST one = Teuchos::ScalarTraits<ST>::one(); 
  ST zero = Teuchos::ScalarTraits<ST>::zero(); 
  
#ifdef HAVE_MPI
  MPI_Comm mpiComm = MPI_COMM_WORLD;
  const Tpetra::MpiPlatform<OT,OT>  ordinalPlatform(mpiComm);
  const Tpetra::MpiPlatform<OT,ST>   scalarPlatform(mpiComm);
#else
  const Tpetra::SerialPlatform<OT,OT>  ordinalPlatform;
  const Tpetra::SerialPlatform<OT,ST>   scalarPlatform;
#endif
  
  //
  // Get the data from the HB file
  //
  
  // Name of input matrix file
  std::string matrixFile = "mhd1280b.cua";
  
  int info=0;
  int dim,dim2,nnz;
  MT *dvals;
  int *colptr,*rowind;
  ST *cvals;
  nnz = -1;
  info = readHB_newmat_double(matrixFile.c_str(),&dim,&dim2,&nnz,
                              &colptr,&rowind,&dvals);

  if (info == 0 || nnz < 0) {
    *out << "Error reading '" << matrixFile << "'" << std::endl;
  }
#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  // Convert interleaved doubles to std::complex values
  cvals = new ST[nnz];
  for (int ii=0; ii<nnz; ii++) {
    cvals[ii] = ST(dvals[ii*2],dvals[ii*2+1]);
  }
  
  // Declare global dimension of the linear operator
  OT globalDim = dim;
  
  // Create the element space and std::vector space
  const Tpetra::ElementSpace<OT> elementSpace(globalDim,0,ordinalPlatform);
  const Tpetra::VectorSpace<OT,ST> vectorSpace(elementSpace,scalarPlatform);
  
  // Create my implementation of a Tpetra::Operator
  RCP<Tpetra::Operator<OT,ST> >
    tpetra_A = rcp( new MyOperator<OT,ST>(vectorSpace,dim,colptr,nnz,rowind,cvals) );

  // Create a Thyra linear operator (A) using the Tpetra::CisMatrix (tpetra_A).
  RCP<Thyra::LinearOpBase<ST> >
    A = Teuchos::rcp( new Thyra::TpetraLinearOp<OT,ST>(tpetra_A) );

  //
  // Set the parameters for the Belos LOWS Factory and create a parameter list.
  //
  int             blockSize              = 1;
  int             maxIterations          = globalDim;
  int             maxRestarts            = 15;
  int             gmresKrylovLength      = 50;
  int             outputFrequency        = 100;
  bool            outputMaxResOnly       = true;
  MT              maxResid               = 1e-5;

  Teuchos::RCP<Teuchos::ParameterList>
    belosLOWSFPL = Teuchos::rcp( new Teuchos::ParameterList() );
 
  belosLOWSFPL->set("Solver Type","Block GMRES");

  Teuchos::ParameterList& belosLOWSFPL_solver =
    belosLOWSFPL->sublist("Solver Types");

  Teuchos::ParameterList& belosLOWSFPL_gmres =
    belosLOWSFPL_solver.sublist("Block GMRES");

  belosLOWSFPL_gmres.set("Maximum Iterations",int(maxIterations));
  belosLOWSFPL_gmres.set("Convergence Tolerance",MT(maxResid));
//.........这里部分代码省略.........
开发者ID:haripandey,项目名称:trilinos,代码行数:101,代码来源:belos_tpetra_thyra_lowsf_hb.cpp


示例7: main

int main(int argc, char *argv[] ) {
	double time1, time2;
	
	time1 = MPI_Wtime();

	int rank, processors;
	
	int j;	// number of iterations
	int k;	// number of iterations to perform before creating a checkpoint
	int l;  // number of random samples per grid point
	int checkpoint_resume = 0;	// 1 = resume from last checkpoint

	int c;		// used to hold a character
	int i=0, row = 0, col = 0, pln = 0;	// array iterators

	char ***local_array;		   
	char **local_array_2nd;		   
	char *local_array_pointer; 

	char ***local_array_copy;		   
	char **local_array_copy_2nd;		   
	char *local_array_copy_pointer; 

	char ***temp, *temp_pointer;
	
	int file_open_error;
	int command_line_incomplete = 0;

	int grid_size[3] 	  = {0,0,0};
	int proc_size[3] 	  = {0,0,0};
	int local_size[3] 	  = {0,0,0};
	int remainder_size[3] = {0,0,0};
	int coords[3] 		  = {0,0,0};
	int start_indices[3]  = {0,0,0};
	int periods[3]        = {0,0,0};
	int mem_size[3]       = {0,0,0};
	
	MPI_Status status;
	MPI_Datatype filetype, memtype;
	MPI_File fh;
	
	MPI_Init(&argc, &argv);
	MPI_Comm_size(MPI_COMM_WORLD, &processors);	
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);	

	// Interpret the command line arguments --------------------------------
  	if (rank == 0) {  	  	
		
		if (argc < 6 || argc > 8) {
			fputs("usage: x y z j k l r\n", stderr);
			fputs("where: x,y,z = x, y and z dimensions\n", stderr);
			fputs("       j = how many times the game of life is played\n", stderr);
			fputs("       k = checkpoint every k iterations\n", stderr);
			fputs("       l = number of random samples per grid point\n", stderr);
			fputs("       r = resume from the last checkpoint\n", stderr);
			fputs(INITIAL, stderr);
			fputs(" must be present.\n", stderr);
			fputs(CHECKPOINT, stderr);
			fputs(" must be present if resuming from the last checkpoint.\n", stderr);
			exit(EXIT_FAILURE);
		}

  	}

	j = (int) strtol(argv[4], NULL, 10);
	k = (int) strtol(argv[5], NULL, 10);
	l = (int) strtol(argv[6], NULL, 10);		
	if ( argc == 7 )
		if ( argv[6][0] == 'r' )
			checkpoint_resume = 1;			

	if (rank == 0)
		printf("%d iterations \ncheckpoint every %d iterations \n%d samples per grid point \ncheckpoint resume = %d\n", j,k,l,checkpoint_resume);				
	
	grid_size[0] = (int) strtol(argv[1], NULL, 10);
	grid_size[1] = (int) strtol(argv[2], NULL, 10);
	grid_size[2] = (int) strtol(argv[3], NULL, 10);
	if (rank==0) printf("grid_size: %d, %d, %d\n", grid_size[0], grid_size[1], grid_size[2]);

	MPI_Dims_create(processors, 3, proc_size);
	if (rank==0) printf("proc_size: %d, %d, %d\n", proc_size[0], proc_size[1], proc_size[2]);

	local_size[0] = grid_size[0] / proc_size[0];
	local_size[1] = grid_size[1] / proc_size[1];
	local_size[2] = grid_size[2] / proc_size[2];
	if (rank==0) printf("local_size: %d, %d, %d\n", local_size[0], local_size[1], local_size[2]);

	remainder_size[0] = grid_size[0] % proc_size[0];
	remainder_size[1] = grid_size[1] % proc_size[1];
	remainder_size[2] = grid_size[2] % proc_size[2];
	if (rank==0) printf("remainder_size: %d, %d, %d\n", remainder_size[0], remainder_size[1], remainder_size[2]);
	if (remainder_size[0] != 0 || remainder_size[1] != 0 || remainder_size[2] != 0) {
		fputs("remainder size != 0, check your dimensions", stderr);
		MPI_Finalize();
		exit(EXIT_FAILURE);
	}

	MPI_Comm comm;
	MPI_Cart_create(MPI_COMM_WORLD, 3, proc_size, periods, 0, &comm);
	MPI_Comm_rank(comm, &rank);
//.........这里部分代码省略.........
开发者ID:AnthonyDiGirolamo,项目名称:massive_heat,代码行数:101,代码来源:life3d.c


示例8: main


//.........这里部分代码省略.........

  //------------------------------------------------------
  // SIP Step 1 of 6: Instantiate the parameter space
  //------------------------------------------------------
  QUESO::VectorSpace<> paramSpace(env, "param_", 1, NULL);

  //------------------------------------------------------
  // SIP Step 2 of 6: Instantiate the parameter domain
  //------------------------------------------------------
  QUESO::GslVector paramMinValues(paramSpace.zeroVector());
  QUESO::GslVector paramMaxValues(paramSpace.zeroVector());

  paramMinValues[0] = 8.;
  paramMaxValues[0] = 11.;

  QUESO::BoxSubset<> paramDomain("param_", paramSpace, paramMinValues,
      paramMaxValues);

  //------------------------------------------------------
  // SIP Step 3 of 6: Instantiate the likelihood function
  // object to be used by QUESO.
  //------------------------------------------------------
  Likelihood<> lhood("like_", paramDomain);

  //------------------------------------------------------
  // SIP Step 4 of 6: Define the prior RV
  //------------------------------------------------------
  QUESO::UniformVectorRV<> priorRv("prior_", paramDomain);

  //------------------------------------------------------
  // SIP Step 5 of 6: Instantiate the inverse problem
  //------------------------------------------------------
  // Extra prefix before the default "rv_" prefix
  QUESO::GenericVectorRV<> postRv("post_", paramSpace);

  // No extra prefix before the default "ip_" prefix
  QUESO::StatisticalInverseProblem<> ip("", NULL, priorRv, lhood, postRv);

  //------------------------------------------------------
  // SIP Step 6 of 6: Solve the inverse problem, that is,
  // set the 'pdf' and the 'realizer' of the posterior RV
  //------------------------------------------------------
  QUESO::GslVector paramInitials(paramSpace.zeroVector());
  priorRv.realizer().realization(paramInitials);

  QUESO::GslMatrix proposalCovMatrix(paramSpace.zeroVector());
  proposalCovMatrix(0,0) = std::pow(std::abs(paramInitials[0]) / 20.0, 2.0);

  ip.solveWithBayesMetropolisHastings(NULL, paramInitials, &proposalCovMatrix);

  //================================================================
  // Statistical forward problem (SFP): find the max distance
  // traveled by an object in projectile motion; input pdf for 'g'
  // is the solution of the SIP above.
  //================================================================

  //------------------------------------------------------
  // SFP Step 1 of 6: Instantiate the parameter *and* qoi spaces.
  // SFP input RV = FIP posterior RV, so SFP parameter space
  // has been already defined.
  //------------------------------------------------------
  QUESO::VectorSpace<> qoiSpace(env, "qoi_", 1, NULL);

  //------------------------------------------------------
  // SFP Step 2 of 6: Instantiate the parameter domain
  //------------------------------------------------------

  // Not necessary because input RV of the SFP = output RV of SIP.
  // Thus, the parameter domain has been already defined.

  //------------------------------------------------------
  // SFP Step 3 of 6: Instantiate the qoi object
  // to be used by QUESO.
  //------------------------------------------------------
  Qoi<> qoi("qoi_", paramDomain, qoiSpace);

  //------------------------------------------------------
  // SFP Step 4 of 6: Define the input RV
  //------------------------------------------------------

  // Not necessary because input RV of SFP = output RV of SIP
  // (postRv).

  //------------------------------------------------------
  // SFP Step 5 of 6: Instantiate the forward problem
  //------------------------------------------------------
  QUESO::GenericVectorRV<> qoiRv("qoi_", qoiSpace);

  QUESO::StatisticalForwardProblem<> fp("", NULL, postRv, qoi, qoiRv);

  //------------------------------------------------------
  // SFP Step 6 of 6: Solve the forward problem
  //------------------------------------------------------
  fp.solveWithMonteCarlo(NULL);

  MPI_Finalize();

  return 0;
#endif  // QUESO_HAS_MPI
}
开发者ID:pbauman,项目名称:queso,代码行数:101,代码来源:gravity_main.C


示例9: main

// main driver
int main(int argc, char *argv[])
{

#ifdef HAVE_MPI
  MPI_Init(&argc, &argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  if (Comm.NumProc() != 2) {
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return(0);
  }

  int NumMyElements = 0;         // NODES assigned to this processor
  int NumMyExternalElements = 0; // nodes used by this proc, but not hosted
  int NumMyTotalElements = 0;
  int FE_NumMyElements = 0;      // TRIANGLES assigned to this processor
  int * MyGlobalElements = 0;    // nodes assigned to this processor
  Epetra_IntSerialDenseMatrix T; // store the grid connectivity

  int MyPID=Comm.MyPID();

  cout << MyPID << endl;

  switch( MyPID ) {

  case 0:
    NumMyElements = 3;
    NumMyExternalElements = 2;
    NumMyTotalElements = NumMyElements + NumMyExternalElements;
    FE_NumMyElements = 3;

    MyGlobalElements = new int[NumMyTotalElements];
    MyGlobalElements[0] = 0;
    MyGlobalElements[1] = 4;
    MyGlobalElements[2] = 3;
    MyGlobalElements[3] = 1;
    MyGlobalElements[4] = 5;

    break;
  case 1:
    NumMyElements = 3;
    NumMyExternalElements = 2;
    NumMyTotalElements = NumMyElements + NumMyExternalElements;
    FE_NumMyElements = 3;

    MyGlobalElements = new int[NumMyTotalElements];
    MyGlobalElements[0] = 1;
    MyGlobalElements[1] = 2;
    MyGlobalElements[2] = 5;
    MyGlobalElements[3] = 0;
    MyGlobalElements[4] = 4;
    break;

  }

  // build Map corresponding to update
  Epetra_Map Map(-1,NumMyElements,MyGlobalElements,0,Comm);

  // vector containing coordinates BEFORE exchanging external nodes
  Epetra_Vector CoordX_noExt(Map);
  Epetra_Vector CoordY_noExt(Map);

  switch( MyPID ) {

  case 0:
    T.Shape(3,FE_NumMyElements);

    // fill x-coordinates
    CoordX_noExt[0] = 0.0; 
    CoordX_noExt[1] = 1.0; 
    CoordX_noExt[2] = 0.0;
    // fill y-coordinates
    CoordY_noExt[0] = 0.0; 
    CoordY_noExt[1] = 1.0; 
    CoordY_noExt[2] = 1.0;
    // fill connectivity
    T(0,0) = 0; T(0,1) = 4; T(0,2) = 3;
    T(1,0) = 0; T(1,1) = 1; T(1,2) = 4;
    T(2,0) = 4; T(2,1) = 1; T(2,2) = 5;
    break;
    
  case 1:

    T.Shape(3,FE_NumMyElements);

    // fill x-coordinates
    CoordX_noExt[0] = 1.0; 
    CoordX_noExt[1] = 2.0; 
    CoordX_noExt[2] = 2.0;
    // fill y-coordinates
    CoordY_noExt[0] = 0.0; 
    CoordY_noExt[1] = 0.0; 
    CoordY_noExt[2] = 1.0;
    // fill connectivity
//.........这里部分代码省略.........
开发者ID:10341074,项目名称:pacs,代码行数:101,代码来源:ex13.cpp


示例10: main

int main(int argc, char *argv[])
{
  long N=20, M=30;      // number of cells NxM
  int n=2,  m=3;        // number of blocks nxm 
  int tpi=16, tpj=18;   // test pressure coordinates
  int tai=7, taj=9;     // test average coordinates
  int i, j, I, J;       // local and global i,j
  int myi, myj;         // my i,j in neighbor map
  int bi, bj;           // block size in y and x direction
  int numprocs, myid;   // number of processors and my rank id
  double **P, **A;      // 2D array of pressures and averages
  int **B;              // 2D array with map of neighbors

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
  MPI_Comm_rank(MPI_COMM_WORLD, &myid);

  // get command line arguments if any
  if (argc > 1) {
    if (argc != 5) {
      if (myid==0) {
        fprintf(stderr, "usage: prog [N M n m]\n");
        fprintf(stderr, "Parameters:\n");
        fprintf(stderr, "\tN: number of rows or cells in y direction. Default: %ld\n", N);
        fprintf(stderr, "\tM: number of columns or cells in x direction. Default: %ld\n", M);
        fprintf(stderr, "\tn: number of blocks in y direction. Default: %d\n", n);
        fprintf(stderr, "\tm: number of blocks in x direction. Default %d\n", m);
      }
      MPI_Finalize();
      exit(3);
    } 
    N = atoi(argv[1]);
    M = atoi(argv[2]);
    n = atoi(argv[3]);
    m = atoi(argv[4]);
  }

  bi = N/n;
  bj = M/m;

  // start message
  if (myid==0) {
    printf("Terapressure v0.1\n");
    printf("=================\n");
    printf("Number of cells: %lu (%lu x %lu)\n", N*M, N, M);
    printf("Number of blocks: %d (%d x %d)\n", n*m, n, m);
    printf("Number of processors %d\n", numprocs);
    printf("Block size: (%d x %d)\n", bi, bj);
  }
  
  // validate parameters   
  if (N % n != 0 || M % m != 0) {
    if(myid==0) 
      fprintf(stderr,"Number of blocks in x or y axis do not fit.\n"); 
    MPI_Finalize();
    exit(1);
  }  
  if (numprocs != n*m) {
    if (myid==0) 
      fprintf(stderr,"Number of processors must be the same as number of blocks: %d\n", n*m);
    MPI_Finalize();
    exit(2);
  }

  double t = MPI_Wtime();

  // memory allocation
  // stack allocation is simple but limited in size
  // double   P[bi][bj];
  // double   A[bi][bj];
  // int     B[n][m];      
  
  // heap allocation
  P = malloc(sizeof(double*) * bi); 
  A = malloc(sizeof(double*) * bi); 
  for (i=0; i < bi; i++) {
    P[i] = malloc(sizeof(double) * bj);
    A[i] = malloc(sizeof(double) * bj);
  }  
  B = malloc(sizeof(int*) * n); 
  for (i=0; i < n; i++) {
    B[i] = malloc(sizeof(int) * m);
  }
  
  // domain decomposition
  int rank = 0;    
  //printf("Neighbors map:\n");
  for (i=0; i < n; i++) {
    for (j=0; j < m; j++) {
      if (rank == myid) {
        myi = i; 
        myj = j;
      }
      B[i][j] = rank++;
      //printf ("%3d ",  W[i][j]);
    }
    //printf ("\n");
  }
  //printf("%d: my i,j in neighbor map: %d,%d\n", myid, myi, myj);

//.........这里部分代码省略.........
开发者ID:sganis,项目名称:terapressure,代码行数:101,代码来源:terapressure-0.1.c


示例11: main

int main(int argc, char *argv[])
{
    MPI_Init(&argc, &argv);
    int rank = 0, size = 1;
#ifdef ADIOS2_HAVE_MPI
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
#endif

    /** Application variable */
    std::vector<float> myFloats = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
    const std::size_t Nx = myFloats.size();

    try
    {
        /** ADIOS class factory of IO class objects, DebugON is recommended */
        adios2::ADIOS adios(MPI_COMM_WORLD, adios2::DebugON);

        /*** IO class object: settings and factory of Settings: Variables,
         * Parameters, Transports, and Execution: Engines */
        adios2::IO &adios1IO = adios.DeclareIO("ADIOS1IO");
        adios1IO.SetEngine("ADIOS1Writer");
        adios1IO.AddTransport("file", {{"library", "MPI"}});

        /** global array : name, { shape (total) }, { start (local) }, { count
         * (local) }, all are constant dimensions */
        adios2::Variable<float> &bpFloats = adios1IO.DefineVariable<float>(
            "bpFloats", {size * Nx}, {rank * Nx}, {Nx}, adios2::ConstantDims);

        /** Engine derived class, spawned to start IO operations */
        adios2::Engine &adios1Writer =
            adios1IO.Open("myVector.bp", adios2::Mode::Write);

        /** Write variable for buffering */
        adios1Writer.PutSync<float>(bpFloats, myFloats.data());

        /** Create bp file, engine becomes unreachable after this*/
        adios1Writer.Close();
    }
    catch (std::invalid_argument &e)
    {
        std::cout << "Invalid argument exception, STOPPING PROGRAM from rank "
                  << rank << "\n";
        std::cout << e.what() << "\n";
    }
    catch (std::ios_base::failure &e)
    {
        std::cout
            << "IO System base failure exception, STOPPING PROGRAM from rank "
            << rank << "\n";
        std::cout << e.what() << "\n";
    }
    catch (std::exception &e)
    {
        std::cout << "Exception, STOPPING PROGRAM from rank " << rank << "\n";
        std::cout << e.what() << "\n";
    }

    MPI_Finalize();

    return 0;
}
开发者ID:Bella42,项目名称:ADIOS2,代码行数:62,代码来源:helloADIOS1Writer.cpp


示例12: main


//.........这里部分代码省略.........
  if(err != MPI_SUCCESS) {
    fprintf(stderr, "unable to create cartestian communicator\n");
    goto die_finalize_mpi;
  }

  dsfmt_t *prng = malloc(sizeof(dsfmt_t));
  if(prng == NULL) {
    fprintf(stderr, "unable to allocate PRNG\n");
    goto die_free_cart_comm;
  }
  dsfmt_init_gen_rand(prng, SEED + rank);

  int const net_elems = proc_elems[0]*proc_elems[1];
  // Allocate master source array for FFT.
  double *const master = fftw_malloc(net_elems*sizeof(double));
  if(master == NULL) {
    fprintf(stderr, "unable to allocate master array\n");
    goto die_free_prng;
  }
  for(unsigned int i = 0; i < net_elems; ++i) {
    master[i] = dsfmt_genrand_open_close(prng) * 10;
  }

  /* Allocate source array for serial array. We copy the master array to this
   * array, then transform it in place, then reverse transform it. The idea is
   * that we should get the original data back, and we use this as a consistency
   * check. We need the original data to compare to.
   */
  double *const source = fftw_malloc(net_elems*sizeof(double));
  if(source == NULL) {
    fprintf(stderr, "unable to allocate source array\n");
    goto die_free_master;
  }
  for(int i = 0; i < net_elems; ++i) source[i] = master[i];

  /* Allocate the destination array */
  double complex *const dest = fftw_malloc(net_elems*sizeof(double complex));
  if(dest == NULL) {
    fprintf(stderr, "unable to allocate destination array\n");
    goto die_free_source;
  }

  /* Allocate a plan to compute the FFT */
  fft_par_plan plan = fft_par_plan_r2c(cart, proc_elems, 1, source,
      dest, &err);
  if(plan == NULL) {
    fprintf(stderr, "unable to initialize parallel FFT plan\n");
    goto die_free_dest;
  }

  /* Execute the forward plan */
  err = fft_par_execute_fwd(plan);
  if(err != MPI_SUCCESS) {
    fprintf(stderr, "error computing forward plan\n");
    goto die_free_plan;
  }

  /* Execute the reverse plan */
  err = fft_par_execute_rev(plan);
  if(err != MPI_SUCCESS) {
    fprintf(stderr, "error computing reverse plan\n");
    goto die_free_plan;
  }

  /* Compare source to master, use supremum norm */
  int norm = 0.0;
  for(int i = 0; i < net_elems; ++i) {
    /* Each FFT effectively multiplies by sqrt(net_elems*num_procs) */
    norm = fmax(norm, fabs(master[i] - source[i]/net_elems/size));
  }
  if(norm < 1.0e-6) {
    ret = EXIT_SUCCESS;
  }

die_free_plan:
  fft_par_plan_destroy(plan);
die_free_dest:
  fftw_free(dest);
die_free_source:
  fftw_free(source);
die_free_master:
  fftw_free(master);
die_free_prng:
  free(prng);
die_free_cart_comm:
  if(err == MPI_SUCCESS) err = MPI_Comm_free(&cart);
  if(err != MPI_SUCCESS) {
    fprintf(stderr, "unable to free cartestian communicator\n");
    ret = EXIT_FAILURE;
  }
die_finalize_mpi:
  if(err == MPI_SUCCESS) err = MPI_Finalize();
  if(err != MPI_SUCCESS) {
    fprintf(stderr, "unable to finalize MPI\n");
    ret = EXIT_FAILURE;
  }
die_immed:
  fftw_cleanup();
  return ret;
}
开发者ID:mcox,项目名称:distfft,代码行数:101,代码来源:fft_par_rev_2d.c


示例13: main


//.........这里部分代码省略.........
	
	BaseFoundation_Init( &argc, &argv );
	BaseIO_Init( &argc, &argv );
	BaseContainer_Init( &argc, &argv );
	BaseAutomation_Init( &argc, &argv );
	BaseExtensibility_Init( &argc, &argv );
	BaseContext_Init( &argc, &argv );
	
	stream =  Journal_Register( InfoStream_Type, "myStream" );

	if( argc >= 2 ) {
		procToWatch = atoi( argv[1] );
	}
	else {
		procToWatch = 0;
	}
	if( rank == procToWatch ) Journal_Printf( (void*) stream, "Watching rank: %i\n", rank );
	
	/* Read input */
	dictionary = Dictionary_New();
	
	/* Build the context */
	abstractContext = _AbstractContext_New( 
		sizeof(AbstractContext), 
		"TestContext", 
		MyDelete, 
		MyPrint, 
		NULL,
		NULL, 
		NULL, 
		_AbstractContext_Build, 
		_AbstractContext_Initialise, 
		_AbstractContext_Execute, 
		_AbstractContext_Destroy, 
		"context", 
		True, 
		MySetDt, 
		0, 
		10, 
		CommWorld, 
		dictionary );

	/* add hooks to existing entry points */
	ContextEP_ReplaceAll( abstractContext, AbstractContext_EP_Build, MyBuild );
	ContextEP_ReplaceAll( abstractContext, AbstractContext_EP_Initialise, MyInitialConditions );
	ContextEP_ReplaceAll( abstractContext, AbstractContext_EP_Solve, MySolve );
	ContextEP_ReplaceAll( abstractContext, AbstractContext_EP_Dt, MyDt );

	if( rank == procToWatch ) {
		Journal_Printf( 
			(void*)stream, 
			"abstractContext->entryPointList->_size: %lu\n", 
			abstractContext->entryPoint_Register->_size );
		Journal_Printf( 
			(void*)stream, 
			"abstractContext->entryPointList->count: %u\n", 
			abstractContext->entryPoint_Register->count );
	}
	
	ContextEP_Append( abstractContext, AbstractContext_EP_Solve, MySolve2 );
	ContextEP_ReplaceAll( abstractContext, AbstractContext_EP_Initialise, MyInitialConditions2 ); 

	if( rank == procToWatch ) {
		stream = Journal_Register( InfoStream_Type, AbstractContext_Type );
		AbstractContext_PrintConcise( abstractContext, stream );
		
		Journal_Printf( 
			(void*)stream, 
			"abstractContext->entryPointList->_size: %lu\n", 
			abstractContext->entryPoint_Register->_size );
		Journal_Printf( 
			(void*)stream, 
			"abstractContext->entryPointList->count: %u\n", 
			abstractContext->entryPoint_Register->count );
	}

	/* Run the context */
	if( rank == procToWatch ) {
		Stg_Component_Build( abstractContext, 0 /* dummy */, False );
		Stg_Component_Initialise( abstractContext, 0 /* dummy */, False );
		Stg_Component_Execute( abstractContext, 0 /* dummy */, False );
		Stg_Component_Destroy( abstractContext, 0 /* dummy */, False );
	}
	
	/* Stg_Class_Delete stuff */
	Stg_Class_Delete( abstractContext );
	Stg_Class_Delete( dictionary );
	
	BaseContext_Finalise();
	BaseExtensibility_Finalise();
	BaseAutomation_Finalise();
	BaseContainer_Finalise();
	BaseIO_Finalise();
	BaseFoundation_Finalise();
	
	/* Close off MPI */
	MPI_Finalize();
	
	return 0; /* success */
}
开发者ID:bmi-forum,项目名称:bmi-pyre,代码行数:101,代码来源:testAbstractContext1.c


示例14: TRAN_Input_std

void TRAN_Input_std(
  MPI_Comm comm1, 
  int Solver,          /* input */
  int SpinP_switch,  
  char *filepath,
  double kBvalue,
  double TRAN_eV2Hartree,
  double Electronic_Temperature,
                      /* output */
  int *output_hks
)
{
  FILE *fp;
  int i,po;
  int i_vec[20],i_vec2[20];
  double r_vec[20];
  char *s_vec[20];
  char buf[MAXBUF];
  int myid;

  MPI_Comm_rank(comm1,&myid);

  /****************************************************
               parameters for TRANSPORT
  ****************************************************/

  input_logical("NEGF.Output_HKS",&TRAN_output_hks,0);
  *output_hks = TRAN_output_hks;

  /* printf("NEGF.OutputHKS=%d\n",TRAN_output_hks); */
  input_string("NEGF.filename.HKS",TRAN_hksoutfilename,"NEGF.hks");
  /* printf("TRAN_hksoutfilename=%s\n",TRAN_hksoutfilename); */

  input_logical("NEGF.Output.for.TranMain",&TRAN_output_TranMain,0);

  if ( Solver!=4 ) { return; }

  /**** show transport credit ****/
  TRAN_Credit(comm1);

  input_string("NEGF.filename.hks.l",TRAN_hksfilename[0],"NEGF.hks.l");
  input_string("NEGF.filename.hks.r",TRAN_hksfilename[1],"NEGF.hks.r");

  /* read data of leads */

  TRAN_RestartFile(comm1, "read","left", filepath,TRAN_hksfilename[0]);
  TRAN_RestartFile(comm1, "read","right",filepath,TRAN_hksfilename[1]);

  /* check b-, and c-axes of the unit cell of leads. */

  po = 0;
  for (i=2; i<=3; i++){
    if (1.0e-10<fabs(tv_e[0][i][1]-tv_e[1][i][1])) po = 1;
    if (1.0e-10<fabs(tv_e[0][i][2]-tv_e[1][i][2])) po = 1;
    if (1.0e-10<fabs(tv_e[0][i][3]-tv_e[1][i][3])) po = 1;
  }

  if (po==1){

    if (myid==Host_ID){
      printf("Warning: The b- or c-axis of the unit cell for the left lead is not same as that for the right lead.\n");
    }

    MPI_Finalize();
    exit(1);
  }

  /* show chemical potentials */

  if (myid==Host_ID){
    printf("\n");
    printf("Intrinsic chemical potential (eV) of the leads\n");
    printf("  Left lead:  %15.12f\n",ChemP_e[0]*TRAN_eV2Hartree);
    printf("  Right lead: %15.12f\n",ChemP_e[1]*TRAN_eV2Hartree);
  }

  /* check the conflict of SpinP_switch */

  if ( (SpinP_switch!=SpinP_switch_e[0]) || (SpinP_switch!=SpinP_switch_e[1]) ){

    if (myid==Host_ID){
      printf ("scf.SpinPolarization conflicts between leads or lead and center.\n");
    }

    MPI_Finalize();
    exit(0);
  }

  input_int(   "NEGF.Surfgreen.iterationmax", &tran_surfgreen_iteration_max, 600);
  input_double("NEGF.Surfgreen.convergeeps", &tran_surfgreen_eps, 1.0e-12); 

  /****  k-points parallel to the layer, which are used for the SCF calc. ****/
  
  i_vec2[0]=1;
  i_vec2[1]=1;
  input_intv("NEGF.scf.Kgrid",2,i_vec,i_vec2);
  TRAN_Kspace_grid2 = i_vec[0];
  TRAN_Kspace_grid3 = i_vec[1];

  if (TRAN_Kspace_grid2<=0){
//.........这里部分代码省略.........
开发者ID:rigarash,项目名称:openmx,代码行数:101,代码来源:TRAN_Input_std.c


示例15: main


//.........这里部分代码省略.........
            fprintf(stderr, "%s\n", USAGE);
            exit(1);
        }

        if ( strcmp(argv[elem], "-i"  ) == 0) {
            ITERATION_SIZE  = atoi(argv[next_elem]);
        } else if ( strcmp(argv[elem], "-m"  ) == 0) {
            MAX_SEND_COUNT  = atoi(argv[next_elem]);
        } else if ( strcmp(argv[elem], "-p"  ) == 0) {
            PUB_ELEM_COUNT  = atoi(argv[next_elem]);
        } else if ( strcmp(argv[elem], "-j"  ) == 0) {
            JITTER_INTERVAL = strtod(argv[next_elem], NULL);
            JITTER_ENABLED = 1;
        } else {
            fprintf(stderr, "Unknown flag: %s %s\n", argv[elem], argv[next_elem]);
        }
        elem = next_elem + 1;
    }

    if ( (MAX_SEND_COUNT < 1)
         || (ITERATION_SIZE < 1)
         || (PUB_ELEM_COUNT < 1)
         || (JITTER_INTERVAL < 0.0) )
        { fprintf(stderr, "%s\n", USAGE); exit(1); }


    /* Example variables. */
    char    *str_node_id  = getenv("HOSTNAME");
    char    *str_prog_ver = "1.0";
    char     var_string[100] = {0};
    int      var_int;
    double   var_double;
    
    my_sos = SOS_init( &argc, &argv, SOS_ROLE_CLIENT, SOS_LAYER_APP);
    SOS_SET_CONTEXT(my_sos, "demo_app.main");

    srandom(my_sos->my_guid);

    printf("demo_app starting...\n"); fflush(stdout);
    
    dlog(0, "Creating a pub...\n");

    pub = SOS_pub_create(my_sos, "demo", SOS_NA 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ MPI_Free_mem函数代码示例发布时间:2022-05-30
下一篇:
C++ MPI_File_open函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap