• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ MPI_Get_processor_name函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中MPI_Get_processor_name函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Get_processor_name函数的具体用法?C++ MPI_Get_processor_name怎么用?C++ MPI_Get_processor_name使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了MPI_Get_processor_name函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: main

int
main(int argc, char **argv)
{
  int status;
  char* env = NULL;

  env = getenv("GPAW_OFFLOAD");
  if (env) {
      errno = 0;
      gpaw_offload_enabled = strtol(env, NULL, 10);
      if (errno) {
        fprintf(stderr, 
                "Wrong value for for GPAW_OFFLOAD.\nShould be either 0 or 1, but was %s\n",
                env);
      }
  }
  fprintf(stderr, "GPAW info: GPAW_OFFLOAD=%d\n", gpaw_offload_enabled);
  
#ifdef CRAYPAT
  PAT_region_begin(1, "C-Initializations");
#endif

#ifndef GPAW_OMP
  MPI_Init(&argc, &argv);
#else
  int granted;
  MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &granted);
  if(granted != MPI_THREAD_MULTIPLE) exit(1);
#endif // GPAW_OMP

// Get initial timing
  double t0 = MPI_Wtime();

#ifdef GPAW_PERFORMANCE_REPORT
  gpaw_perf_init();
#endif

#ifdef GPAW_MPI_MAP
  int tag = 99;
  int myid, numprocs, i, procnamesize;
  char procname[MPI_MAX_PROCESSOR_NAME];
  MPI_Comm_size(MPI_COMM_WORLD, &numprocs );
  MPI_Comm_rank(MPI_COMM_WORLD, &myid );
  MPI_Get_processor_name(procname, &procnamesize);
  if (myid > 0) {
      MPI_Send(&procnamesize, 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
      MPI_Send(procname, procnamesize, MPI_CHAR, 0, tag, MPI_COMM_WORLD);
  }
  else {
      printf("MPI_COMM_SIZE is %d \n", numprocs);
      printf("%s \n", procname);
      
      for (i = 1; i < numprocs; ++i) {
          MPI_Recv(&procnamesize, 1, MPI_INT, i, tag, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
          MPI_Recv(procname, procnamesize, MPI_CHAR, i, tag, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
          printf("%s \n", procname);
      }
  }
#endif // GPAW_MPI_MAP

#ifdef GPAW_MPI_DEBUG
  // Default Errhandler is MPI_ERRORS_ARE_FATAL
  MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
#endif

  // Progname seems to be needed in some circumstances to resolve
  // correct default sys.path
  Py_SetProgramName(argv[0]);

  Py_Initialize();

#pragma offload target(mic) if(gpaw_offload_enabled)
    {
        init_openmp();
    }
  
  if (PyType_Ready(&MPIType) < 0)
    return -1;

  if (PyType_Ready(&LFCType) < 0)
    return -1;
  if (PyType_Ready(&LocalizedFunctionsType) < 0)
    return -1;
  if (PyType_Ready(&OperatorType) < 0)
    return -1;
  if (PyType_Ready(&SplineType) < 0)
    return -1;
  if (PyType_Ready(&TransformerType) < 0)
    return -1;
  if (PyType_Ready(&XCFunctionalType) < 0)
    return -1;
  if (PyType_Ready(&lxcXCFunctionalType) < 0)
    return -1;

  PyObject* m = Py_InitModule3("_gpaw", functions,
             "C-extension for GPAW\n\n...\n");
  if (m == NULL)
    return -1;

  Py_INCREF(&MPIType);
//.........这里部分代码省略.........
开发者ID:ryancoleman,项目名称:lotsofcoresbook2code,代码行数:101,代码来源:_gpaw.c


示例2: main

main(int argc, char* argv[]){
  time_t time1 = time(0), time2;

  //-------MPI initialzation-------------

  int numprocs, myid, namelen;
  char processor_name[MPI_MAX_PROCESSOR_NAME];

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
  MPI_Comm_rank(MPI_COMM_WORLD, &myid);

  MPI_Get_processor_name(processor_name, &namelen);
  fprintf(stderr, "Process %d running on %s\n", myid, processor_name);

  string numbers = "0123456789";  // !!!!! np <= 10
  string myid_str(numbers, myid, 1);

  MPI_Status status;

  // define a new MPI data type for particles
  MPI_Datatype particletype;
  MPI_Type_contiguous(18, MPI_DOUBLE, &particletype);  // !!! 14->18 changed
  MPI_Type_commit(&particletype);

  //-------- end MPI init----------------

  // wait for gdb
  waitforgdb(myid);

  // read input file (e.g. patric.cfg):

  if(argv[1] == 0){
    printf("No input file name !\n");
    MPI_Abort(MPI_COMM_WORLD, 0);
  }
  input_from_file(argv[1], myid);
  double eps_x = rms_emittance_x0;  // handy abbreviation
  double eps_y = rms_emittance_y0;  // same

  // Synchronous particle:

  SynParticle SP;
  SP.Z = Z;
  SP.A = A;
  SP.gamma0 = 1.0 + (e_kin*1e6*qe)/(mp*clight*clight) ;
  SP.beta0 = sqrt((SP.gamma0*SP.gamma0-1.0)/(SP.gamma0*SP.gamma0)) ;
  SP.eta0 = 1.0/pow(gamma_t, 2)-1.0/pow(SP.gamma0, 2);

  //-------Init Lattice-------

  BeamLine lattice;
  double tunex, tuney; 
  SectorMap CF(CF_advance_h/NCF, CF_advance_v/NCF, CF_R, CF_length/NCF, SP.gamma0);
  BeamLine CF_cell;
  if(madx_input_file == 1){
    // read madx sectormap and twiss files 
	cout << "madx sectormap" << endl;
    string data_dir_in = input;
    lattice.init(data_dir_in+"/mad/", circum, tunex, tuney); 
  }
  else{
    // init constant focusing (CF) sectormap and cell:
	cout << "constsnt focusing" << endl;
    for(int j=0; j<NCF; j++)
      CF_cell.add_map(CF);
    lattice.init(CF_cell);
  }

  // Other variables:
  double dx = 2.0*piperadius/(NX-1.0);  // needed for Poisson solver and grids
  double dy = 2.0*piperadius/(NY-1.0);  // needed for Poisson solver and grids
  double dz = circum/NZ;
  double ds = 0.4;  // value needed here only for setting dxs, dys.
  double dxs = 4.0*(dx/ds)/(NX-1.0);  // only for plotting xs, not for tracking
  double dys = 4.0*(dx/ds)/(NX-1.0);  // only for plotting ys, not for tracking
  double charge = current*circum/(NPIC*SP.beta0*clight*qe);  // macro-particle charge Q/e
  double zm = 0.5*circum*bunchfactor;  // (initial) bunch length
  if(init_pic_z == 1 || init_pic_z == 3 || init_pic_z == 4 || init_pic_z == 6)
    zm = 1.5*0.5*circum*bunchfactor;  // for parabolic bunch
  double zm1 = -zm*1.0;  // left bunch boundary
  double zm2 = zm*1.0;  // right bunch boundary
  if(init_pic_z==7)
	zm=0.25;
  double rmsToFull;  // ratio of rms to full emittance for Bump; SP

  // open output file patric.dat:

  string data_dir = ausgabe;
  data_dir = data_dir + "/";
  string outfile = data_dir + "patric.dat";
  FILE *out = fopen(outfile.c_str(), "w"); 

  // init random number generator:
  long d = -11*(myid+1);  // was -1021  transverse distribution: each slice needs a different initialization !
  long dl = -103;  // was -103   longitudinal plane: same random set needed
  long dran = -101;  // for BTF noise excitation: same random sets needed


  // set some global lattice parameters
//.........这里部分代码省略.........
开发者ID:sappel,项目名称:patric_mti,代码行数:101,代码来源:Main.cpp


示例3: main

int
main(int argc, char *argv[]) {
    struct plat_opts_config_mpilogme config;
    SDF_boolean_t success = SDF_TRUE;
    uint32_t numprocs;
    int tmp, namelen, mpiv = 0, mpisubv = 0, i;
    char processor_name[MPI_MAX_PROCESSOR_NAME];
    int msg_init_flags = SDF_MSG_MPI_INIT;
    config.inputarg = 0;
    config.msgtstnum = 500;

    /* We may not need to gather anything from here but what the heck */
    loadProperties("/opt/schooner/config/schooner-med.properties"); // TODO get filename from command line

    /* make sure this is first in order to get the the mpi init args */
    success = plat_opts_parse_mpilogme(&config, argc, argv) ? SDF_FALSE : SDF_TRUE;

    printf("input arg %d msgnum %d success %d\n", config.inputarg, config.msgtstnum, success);
    fflush(stdout);
    myid = sdf_msg_init_mpi(argc, argv, &numprocs, &success, msg_init_flags);

    if ((!success) || (myid < 0)) {
        printf("Node %d: MPI Init failure... exiting - errornum %d\n", myid, success);
        fflush(stdout);
        MPI_Finalize();
        return (EXIT_FAILURE);
    }

    int debug = 0;
    while(debug);

    tmp = init_msgtest_sm((uint32_t)myid);

    /* Enable this process to run threads across 2 cpus, MPI will default to running all threads
     * on only one core which is not what we really want as it forces the msg thread to time slice
     * with the fth threads that send and receive messsages
     * first arg is the number of the processor you want to start off on and arg #2 is the sequential
     * number of processors from there
     */
    lock_processor(0, 7);
    sleep(1);
    msg_init_flags =  msg_init_flags | SDF_MSG_RTF_DISABLE_MNGMT;

    /* Startup SDF Messaging Engine FIXME - dual node mode still - pnodeid is passed and determined
     * from the number of processes mpirun sees.
     */
    sdf_msg_init(myid, &pnodeid, msg_init_flags);

    MPI_Get_version(&mpiv, &mpisubv);
    MPI_Get_processor_name(processor_name, &namelen);

    printf("Node %d: MPI Version: %d.%d Name %s \n", myid, mpiv, mpisubv, processor_name);
    fflush(stdout);

    plat_log_msg(
            PLAT_LOG_ID_INITIAL,
            LOG_CAT,
            PLAT_LOG_LEVEL_TRACE,
            "\nNode %d: Completed Msg Init.. numprocs %d pnodeid %d Starting Test\n",
            myid, numprocs, pnodeid);

    for (i = 0; i < 2; i++) {
        sleep(2);
        plat_log_msg(PLAT_LOG_ID_INITIAL, LOG_CAT, PLAT_LOG_LEVEL_TRACE,
                "\nNode %d: Number of sleeps %d\n", myid, i);
    }

    fthInit();
    sdf_msg_startmsg(myid, 0, NULL); 


    /* SAVE THIS may need to play with the priority later */
#if 0
    struct sched_param param;
    int newprio = 60;
    pthread_attr_t hi_prior_attr;

    pthread_attr_init(&hi_prior_attr);
    pthread_attr_setschedpolicy(&hi_prior_attr, SCHED_FIFO);
    pthread_attr_getschedparam(&hi_prior_attr, &param);
    param.sched_priority = newprio;
    pthread_attr_setschedparam(&hi_prior_attr, &param);
    pthread_create(&fthPthread, &hi_prior_attr, &fthPthreadRoutine, NULL);
#endif

    pthread_attr_t attr;
    pthread_attr_init(&attr);
    pthread_create(&fthPthread, &attr, &MultiNodeMultiPtlMstosrPthreadRoutine, &numprocs);

    plat_log_msg(PLAT_LOG_ID_INITIAL, LOG_CAT, PLAT_LOG_LEVEL_TRACE,
                 "\nNode %d: Created pthread for FTH %d\n", myid, i);

    pthread_join(fthPthread, NULL);

    plat_log_msg(PLAT_LOG_ID_INITIAL, LOG_CAT, PLAT_LOG_LEVEL_TRACE,
                 "\nNode %d: SDF Messaging Test Complete - i %d\n", myid, i);

    /* Lets stop the messaging engine this will block until they complete */
    /* FIXME arg is the threadlvl */
#if 0
//.........这里部分代码省略.........
开发者ID:AlfredChenxf,项目名称:zetascale,代码行数:101,代码来源:fcnl_multinode_multiptl_test2.c


示例4: main

int main (int argc, char *argv[])
{
    int rank, nprocs, ilen;
    char processor[MPI_MAX_PROCESSOR_NAME];
    double tstart = 0.0, tend = 0.0;

    MPI_Status reqstat;
    MPI_Request send_request;
    MPI_Request recv_request;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Get_processor_name(processor, &ilen);

    if (nprocs != 2)
    {
        if(rank == 0) printf("This test requires exactly two processes\n");

        MPI_Finalize();
        exit(EXIT_FAILURE);
    }

    int other_proc = (rank == 1 ? 0 : 1);

    // Hard code GPU affinity since this example only works with 2 GPUs.
    int igpu = 0;

   // if(rank == 0 )
       /* printf("%s allocates %d MB pinned memory with regual mpi and "
               "bidirectional bandwidth\n", argv[0],
               MAX_MSG_SIZE / 1024 / 1024);
	*/
    /*printf("node=%d(%s): my other _proc = %d and using GPU=%d\n", rank,
            processor, other_proc, igpu);
	*/
    char *h_src, *h_rcv;
   // h_src=(char *)malloc(MYBUFSIZE*100*sizeof(char));
   // h_rcv=(char *)malloc(MYBUFSIZE*100*sizeof(char));
    CHECK(cudaSetDevice(igpu));
    CHECK(cudaMallocHost((void**)&h_src, MYBUFSIZE));
    CHECK(cudaMallocHost((void**)&h_rcv, MYBUFSIZE));

    char *d_src, *d_rcv;
    CHECK(cudaSetDevice(igpu));
    CHECK(cudaMalloc((void **)&d_src, MYBUFSIZE));
    CHECK(cudaMalloc((void **)&d_rcv, MYBUFSIZE));

    initalData(h_src, h_rcv, MYBUFSIZE);

    CHECK(cudaMemcpy(d_src, h_src, MYBUFSIZE, cudaMemcpyDefault));
    CHECK(cudaMemcpy(d_rcv, h_rcv, MYBUFSIZE, cudaMemcpyDefault));

    // latency test
    for(int size = 1; size <= MAX_MSG_SIZE; size = size * 2)
    {
        MPI_Barrier(MPI_COMM_WORLD);

        if(rank == 0)
        {
            tstart = MPI_Wtime();

            for(int i = 0; i < loop; i++)
            {
                /*
                 * Transfer data from the GPU to the host to be transmitted to
                 * the other MPI process.
                 */
                CHECK(cudaMemcpy(h_src, d_src, size, cudaMemcpyDeviceToHost));

                // bi-directional transmission
                MPI_Isend(h_src, size, MPI_CHAR, other_proc, 100,
                          MPI_COMM_WORLD, &send_request);
MPI_Irecv(h_rcv, size, MPI_CHAR, other_proc, 10, MPI_COMM_WORLD,
                          &recv_request);


                MPI_Waitall(1, &recv_request, &reqstat);
                MPI_Waitall(1, &send_request, &reqstat);

                /*
                 * Transfer the data received from the other MPI process to
                 * the device.
                 */
                CHECK(cudaMemcpy(d_rcv, h_rcv, size, cudaMemcpyHostToDevice));
            }

            tend = MPI_Wtime();
        }
        else
        {
            for(int i = 0; i < loop; i++)
            {
                /*
                 * Transfer data from the GPU to the host to be transmitted to
                 * the other MPI process.
                 */
                CHECK(cudaMemcpy(h_src, d_src, size, cudaMemcpyDeviceToHost));

                // bi-directional transmission
//.........这里部分代码省略.........
开发者ID:achuthpv,项目名称:me766,代码行数:101,代码来源:simpleP2P_bk.c


示例5: ADIOI_cb_gather_name_array

/* ADIOI_cb_gather_name_array() - gather a list of processor names from all processes
 *                          in a communicator and store them on rank 0.
 *
 * This is a collective call on the communicator(s) passed in.
 *
 * Obtains a rank-ordered list of processor names from the processes in
 * "dupcomm".
 *
 * Returns 0 on success, -1 on failure.
 *
 * NOTE: Needs some work to cleanly handle out of memory cases!  
 */
int ADIOI_cb_gather_name_array(MPI_Comm comm,
			       MPI_Comm dupcomm,
			       ADIO_cb_name_array *arrayp)
{
    char my_procname[MPI_MAX_PROCESSOR_NAME], **procname = 0;
    int *procname_len = NULL, my_procname_len, *disp = NULL, i;
    int commsize, commrank, found;
    ADIO_cb_name_array array = NULL;
    int alloc_size;

    if (ADIOI_cb_config_list_keyval == MPI_KEYVAL_INVALID) {
        /* cleaned up by ADIOI_End_call */
	MPI_Keyval_create((MPI_Copy_function *) ADIOI_cb_copy_name_array, 
			  (MPI_Delete_function *) ADIOI_cb_delete_name_array,
			  &ADIOI_cb_config_list_keyval, NULL);
    }
    else {
	MPI_Attr_get(comm, ADIOI_cb_config_list_keyval, (void *) &array, &found);
        if (found) {
            ADIOI_Assert(array != NULL);
	    *arrayp = array;
	    return 0;
	}
    }

    MPI_Comm_size(dupcomm, &commsize);
    MPI_Comm_rank(dupcomm, &commrank);

    MPI_Get_processor_name(my_procname, &my_procname_len);

    /* allocate space for everything */
    array = (ADIO_cb_name_array) ADIOI_Malloc(sizeof(*array));
    if (array == NULL) {
	return -1;
    }
    array->refct = 2; /* we're going to associate this with two comms */

    if (commrank == 0) {
	/* process 0 keeps the real list */
	array->namect = commsize;

	array->names = (char **) ADIOI_Malloc(sizeof(char *) * commsize);
	if (array->names == NULL) {
	    return -1;
	}
	procname = array->names; /* simpler to read */

	procname_len = (int *) ADIOI_Malloc(commsize * sizeof(int));
	if (procname_len == NULL) { 
	    return -1;
	}
    }
    else {
	/* everyone else just keeps an empty list as a placeholder */
	array->namect = 0;
	array->names = NULL;
    }
    /* gather lengths first */
    MPI_Gather(&my_procname_len, 1, MPI_INT, 
	       procname_len, 1, MPI_INT, 0, dupcomm);

    if (commrank == 0) {
#ifdef CB_CONFIG_LIST_DEBUG
	for (i=0; i < commsize; i++) {
	    FPRINTF(stderr, "len[%d] = %d\n", i, procname_len[i]);
	}
#endif

	alloc_size = 0;
	for (i=0; i < commsize; i++) {
	    /* add one to the lengths because we need to count the
	     * terminator, and we are going to use this list of lengths
	     * again in the gatherv.  
	     */
	    alloc_size += ++procname_len[i];
	}
	
	procname[0] = ADIOI_Malloc(alloc_size);
	if (procname[0] == NULL) {
	    return -1;
	}

	for (i=1; i < commsize; i++) {
	    procname[i] = procname[i-1] + procname_len[i-1];
	}
	
	/* create our list of displacements for the gatherv.  we're going
	 * to do everything relative to the start of the region allocated
//.........这里部分代码省略.........
开发者ID:abhinavvishnu,项目名称:matex,代码行数:101,代码来源:cb_config_list.c


示例6: main

int main (int argc, char **argv)
{
  char pname[MPI_MAX_PROCESSOR_NAME];

  int iter;
  int counter;
  int c;
  int tnum = 0;
  int resultlen;
  int ret;
  double value;
  extern char *optarg;

  while ((c = getopt (argc, argv, "p:")) != -1) {
    switch (c) {
    case 'p':
      if ((ret = GPTLevent_name_to_code (optarg, &counter)) != 0) {
	printf ("Failure from GPTLevent_name_to_code\n");
	return 1;
      }
      if (GPTLsetoption (counter, 1) < 0) {
	printf ("Failure from GPTLsetoption (%s,1)\n", optarg);
	return 1;
      }
      break;
    default:
      printf ("unknown option %c\n", c);
      printf ("Usage: %s [-p option_name]\n", argv[0]);
      return 2;
    }
  }
  
  ret = GPTLsetoption (GPTLabort_on_error, 1);
  ret = GPTLsetoption (GPTLoverhead, 1);
  ret = GPTLsetoption (GPTLnarrowprint, 1);

  if (MPI_Init (&argc, &argv) != MPI_SUCCESS) {
    printf ("Failure from MPI_Init\n");
    return 1;
  }

  ret = GPTLinitialize ();
  ret = GPTLstart ("total");
	 
  ret = MPI_Comm_rank (MPI_COMM_WORLD, &iam);
  ret = MPI_Comm_size (MPI_COMM_WORLD, &nproc);

  ret = MPI_Get_processor_name (pname, &resultlen);
  printf ("Rank %d is running on processor %s\n", iam, pname);

#ifdef THREADED_OMP
  nthreads = omp_get_max_threads ();
#pragma omp parallel for private (iter, ret, tnum)
#endif

  for (iter = 1; iter <= nthreads; iter++) {
#ifdef THREADED_OMP
    tnum = omp_get_thread_num ();
#endif
    printf ("Thread %d of rank %d on processor %s\n", tnum, iam, pname);
    value = sub (iter);
  }

  ret = GPTLstop ("total");
  ret = GPTLpr (iam);

  if (iam == 0) {
    printf ("summary: testing GPTLpr_summary...\n");
    printf ("Number of threads was %d\n", nthreads);
    printf ("Number of tasks was %d\n", nproc);
  }

  // NOTE: if ENABLE_PMPI is set, 2nd pr call below will show some extra send/recv calls
  // due to MPI calls from within GPTLpr_summary_file
  if (GPTLpr_summary (MPI_COMM_WORLD) != 0)
    return 1;

  if (GPTLpr_summary_file (MPI_COMM_WORLD, "timing.summary.duplicate") != 0)
    return 1;

  ret = MPI_Finalize ();

  if (GPTLfinalize () != 0)
    return 1;

  return 0;
}
开发者ID:jmrosinski,项目名称:GPTL,代码行数:87,代码来源:summary.c


示例7: main

int main(int argc, char *argv[]) {
	info = (struct test_info *)malloc(sizeof(struct test_info));
	test_info_init(info);
	info->test_type = 0;
    info->msg_count=50;
    
    struct plat_opts_config_mpilogme config;
    SDF_boolean_t success = SDF_TRUE;
    uint32_t numprocs;
    int tmp, namelen, mpiv = 0, mpisubv = 0;
    char processor_name[MPI_MAX_PROCESSOR_NAME];
    int msg_init_flags = SDF_MSG_MPI_INIT;

    config.inputarg = 0;
    config.msgtstnum = 50;

    /* We may not need to gather anything from here but what the heck */
    loadProperties("/opt/schooner/config/schooner-med.properties"); // TODO get filename from command line

    /* make sure this is first in order to get the the mpi init args */
    success = plat_opts_parse_mpilogme(&config, argc, argv) ? SDF_FALSE : SDF_TRUE;

    printf("input arg %d msgnum %d success %d\n", config.inputarg, config.msgtstnum, success);
    fflush(stdout);
    myid = sdf_msg_init_mpi(argc, argv, &numprocs, &success, msg_init_flags);
    info->myid = myid;
    if ((!success)||(myid < 0)) {
	printf("Node %d: MPI Init failure... exiting - errornum %d\n", myid, success);
	fflush(stdout);
        MPI_Finalize();
        return (EXIT_FAILURE);
    }

    tmp = init_msgtest_sm((uint32_t)myid);

    /* Enable this process to run threads across 2 cpus, MPI will default to running all threads
     * on only one core which is not what we really want as it forces the msg thread to time slice
     * with the fth threads that send and receive messsages
     * first arg is the number of the processor you want to start off on and arg #2 is the sequential
     * number of processors from there
     */
    lock_processor(0, 2);
    info->lock_cpu = 2;
    /* Startup SDF Messaging Engine FIXME - dual node mode still - pnodeid is passed and determined
     * from the number of processes mpirun sees.
     */
    sleep(1);
    msg_init_flags =  msg_init_flags | SDF_MSG_RTF_DISABLE_MNGMT;
    sdf_msg_init(myid, &pnodeid, msg_init_flags);

    MPI_Get_version(&mpiv, &mpisubv);
    MPI_Get_processor_name(processor_name, &namelen);

    printf("Node %d: MPI Version: %d.%d Name %s \n", myid, mpiv, mpisubv,
            processor_name);
    fflush(stdout);

    plat_log_msg(
            PLAT_LOG_ID_INITIAL,
            LOG_CAT,
            PLAT_LOG_LEVEL_TRACE,
            "\nNode %d: Completed Msg Init.. numprocs %d pnodeid %d Starting Test\n",
            myid, numprocs, pnodeid);
    info->pnodeid = pnodeid;
    for (msgCount = 0; msgCount < 2; msgCount++) {
        sleep(2);
        plat_log_msg(PLAT_LOG_ID_INITIAL, LOG_CAT, PLAT_LOG_LEVEL_TRACE,
                "\nNode %d: Number of sleeps %d\n", myid, msgCount);
    }

    /* create the fth test threads */

    fthInit(); // Init

    pthread_attr_t attr;
    pthread_attr_init(&attr);
    pthread_create(&fthPthread, &attr, &SystemPthreadRoutine, &myid);

    plat_log_msg(PLAT_LOG_ID_INITIAL, LOG_CAT, PLAT_LOG_LEVEL_TRACE,
            "\nNode %d: Created pthread for System protocol\n", myid);
    info->pthread_info = 1;
    info->fth_info = 2;
    pthread_join(fthPthread, NULL);

    plat_log_msg(PLAT_LOG_ID_INITIAL, LOG_CAT, PLAT_LOG_LEVEL_TRACE,
            "\nNode %d: SDF Messaging Test Complete\n", myid);

    /* Lets stop the messaging engine this will block until they complete */
    /* FIXME arg is the threadlvl */
    sdf_msg_stopmsg(myid, SYS_SHUTDOWN_SELF);

    plat_shmem_detach();
    info->success++;
    if (myid == 0) {
        sched_yield();
        printf("Node %d: Exiting message test after yielding... Calling MPI_Finalize\n", myid);
        fflush(stdout);
        sched_yield();
        MPI_Finalize();
        print_test_info(info);
//.........这里部分代码省略.........
开发者ID:AlfredChenxf,项目名称:zetascale,代码行数:101,代码来源:fcnl_system_test1.c


示例8: main

int main(int argc,char* argv[])
{
  int numtasks, rank, rc;
  int micros=35;
  int minsec=42;
  const int buf_size = 60;		/* Size of the buffer for timestamp */
  /* initialize MPI and check for success*/
  rc = MPI_Init(&argc,&argv);
  if (rc != MPI_SUCCESS)
    {
      printf ("Error starting MPI programm. Termianting.\n");
      MPI_Abort(MPI_COMM_WORLD, rc);
    }
  /* get size of comm and rank in that comm */
  MPI_Comm_size(MPI_COMM_WORLD,&numtasks );
  MPI_Comm_rank(MPI_COMM_WORLD,&rank);
  /* Make sure we have at least 2 processes(need at least that much). */
  if (numtasks < 2) 
    {
      fprintf(stderr, "World size must be at least two for %s to run properly!\n", argv[0]);
      MPI_Abort(MPI_COMM_WORLD, 1); 
    }
  /* get hostname */
  char hostname[MPI_MAX_PROCESSOR_NAME];
  int resultlength=0;
  MPI_Get_processor_name(hostname,&resultlength);
  /* get current time */
  struct tm *Tm;
  struct timeval detail_time;
  time_t timer = time(NULL);
  Tm=localtime(&timer);
  gettimeofday(&detail_time,NULL);
  micros = detail_time.tv_usec;
  /* workernodes do */
  if (rank != 0)
    {
      /* make formatted string from time */
      char timestamp[buf_size];
      snprintf(timestamp,buf_size,"%s(%d):%d %d %d, %d:%d:%d and %dns\n",
	       //Tm->tm_wday, /* Mon - Sun */
	       hostname,
	       rank,
	       Tm->tm_mday,
	       Tm->tm_mon+1,
	       Tm->tm_year+1900,
	       Tm->tm_hour,
	       Tm->tm_min,
	       Tm->tm_sec,
	       (int) detail_time.tv_usec); /* /1000 for ms */
      
      /* send timestamp to Master */
      MPI_Send(timestamp, buf_size, MPI_CHAR, 0, 0, MPI_COMM_WORLD);
    }else if (rank == 0)
    {
      /* print recieved messages */
      printf("The masternode recieved the following timestamps:\n");
      char buf[buf_size];
      for (int i = 1; i < numtasks; i++)
	{
	  MPI_Recv(buf, buf_size, MPI_CHAR, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
	  fprintf(stdout, "%s", buf);
	}
    }
  /* collect the minimum from all processes */
  MPI_Reduce(&micros, &minsec, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD);
  if (rank == 0)
    {
      printf("Minimum of all microsecond counts was:%dns\n",minsec);
    }
  MPI_Barrier(MPI_COMM_WORLD);
  fprintf(stdout,"Rang %d beendet jetzt!\n",rank);
  /* finalize the MPI environment */
  MPI_Finalize();
  return 0;
}
开发者ID:oostlander,项目名称:hlr,代码行数:75,代码来源:timempi2.c


示例9: initParallelEnv

int initParallelEnv(){
    omp_set_num_threads(THREADS);

    /* Setup MPI programming environment */
	MPI_Init_thread(NULL, NULL, MPI_THREAD_MULTIPLE, &threadSupport);

	comm = MPI_COMM_WORLD;
	MPI_Comm_size(comm, &numMPIprocs);
	MPI_Comm_rank(comm, &myMPIRank);

	/*Find the number of bytes for an int */
	sizeInteger = sizeof(int);

	/* Find the processor name of each MPI process */
    MPI_Get_processor_name(myProcName, &procNameLen);

	/* Use processor name to create a communicator
	 * across node boundaries.
	 */
	setupCommunicators();

	/* setup OpenMP programming environment */
    #pragma omp parallel shared(numThreads,globalIDarray,myMPIRank)
   {
	   numThreads = omp_get_num_threads();
	   myThreadID = omp_get_thread_num();

	   /* Allocate space for globalIDarray */
        #pragma omp single
       {
           globalIDarray = (int *)malloc(numThreads * sizeof(int));
       }

	   /*calculate the globalID for each thread */
	   globalIDarray[myThreadID] = (myMPIRank * numThreads) + myThreadID;
   }
    MPI_Barrier(comm);

    gaspi_config_t config;
    GASPI(config_get(&config));
    config.qp_count = THREADS;
    GASPI(config_set(config));
    /* GASPI setup */
    GASPI(proc_init(GASPI_BLOCK));

    gaspi_rank_t totalRanks;
    GASPI(proc_num(&totalRanks));

    gaspi_rank_t rank;
    GASPI(proc_rank(&rank));

    gaspi_number_t q_num;
    GASPI(queue_num(&q_num));
    assert (q_num == THREADS);

    GASPI(barrier (GASPI_GROUP_ALL, GASPI_BLOCK));
    // ok, we will continue to use the MPI ranks, just make sure GASPI and MPI ranks are identical
    // this is not guaranteed, so depending on the setup this may fail.
    assert (totalRanks == numMPIprocs);
    assert (rank == myMPIRank);

   /* set parallel info in benchmark report type */
   setParallelInfo(numMPIprocs,threadSupport,numThreads);

return 0;
}
开发者ID:jbreitbart,项目名称:OpenMP-GASPI-MicroBenchmark-Suite,代码行数:66,代码来源:parallelEnvironment.c


示例10: main

int main (int argc, char *argv[])
{
    int  numproc, rank, len,i;
    char hostname[MPI_MAX_PROCESSOR_NAME];
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &numproc);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Get_processor_name(hostname, &len);
    
    FP_PREC *yc, *dyc, *derr, *fullerr;
    FP_PREC *xc, dx, intg, davg_err, dstd_dev, intg_err;
    FP_PREC globalSum = 0.0;

    // MPI vailables 
    MPI_Request *requestList,request;
    MPI_Status  *status;

    //"real" grid indices
    int imin, imax;
    
    imin = 1 + (rank * (NGRID/numproc));
    
    if(rank == numproc - 1)
    imax = NGRID;
    
    else
    imax = (rank+1) * (NGRID/numproc);
       
    int range = imax - imin + 1;
    
    xc =  (FP_PREC*) malloc((range + 2) * sizeof(FP_PREC));
    yc =  (FP_PREC*) malloc((range + 2) * sizeof(FP_PREC));
    dyc =  (FP_PREC*) malloc((range + 2) * sizeof(FP_PREC));
    dx = (XF - XI)/(double)NGRID;
    for (i = 1; i <= range ; i++)
    {
        //xc[i] = imin + (XF - XI) * (FP_PREC)(i - 1)/(FP_PREC)(NGRID - 1);
        xc[i] = XI + dx * (imin + i - 2);
    }
    
    xc[0] = xc[1] - dx;
    xc[range + 1] = xc[range] + dx;
    
    for( i = 1; i <= range; i++ )
    {
        yc[i] = fn(xc[i]);
    }
    
    yc[0] = fn(xc[0]);
    yc[range + 1] = fn(xc[range + 1]);
    
    for (i = 1; i <= range; i++)
    {
        dyc[i] = (yc[i + 1] - yc[i - 1])/(2.0 * dx);
    }
    
    intg = 0.0;
    for (i = 1; i <= range; i++)
    {
        intg += 0.5 * (xc[i + 1] - xc[i]) * (yc[i + 1] + yc[i]);
    }
    
    MPI_Reduce(&intg, &globalSum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
    
    
    //compute the error, average error of the derivatives
    derr = (FP_PREC*)malloc((range + 2) * sizeof(FP_PREC));
    
    //compute the errors
    for(i = 1; i <= range; i++)
    {
        derr[i] = fabs((dyc[i] - dfn(xc[i]))/dfn(xc[i]));
    }
    
    derr[0] = derr[range + 1] = 0.0;
    
    if(rank == 0)
    {
        fullerr = (FP_PREC *)malloc(sizeof(FP_PREC)*NGRID);
        requestList =(MPI_Request*)malloc((numproc-1)*sizeof(MPI_Request));
        for(i = 0;i<range;i++)
        {
            fullerr[i] = derr[i+1];
        }
        for(i = 1; i<numproc; i++)
        {
            int rmin, rmax, *indx;
            rmin = 1 + (i * (NGRID/numproc));
            if(i == numproc - 1)
                rmax = NGRID;
            else
                rmax = (i+1) * (NGRID/numproc);
            MPI_Irecv(fullerr+rmin-1, rmax-rmin+1, MPI_DOUBLE, i, 1, MPI_COMM_WORLD, &(requestList[i-1]));
        }
        double sum = 0.0;
        for(i=0; i<NGRID; i++)
        {
            sum+=fullerr[i];
        }
        davg_err = sum/(FP_PREC)NGRID;
//.........这里部分代码省略.........
开发者ID:rahlk,项目名称:Parallel-Systems,代码行数:101,代码来源:p2_nblk.c


示例11: main

int
main (int argc, char **argv)
{
  int nprocs = -1;
  int rank = -1;
  char processor_name[128];
  int namelen = 128;
  int buf0[buf_size];
  int buf1[buf_size];
  MPI_Status status;
  MPI_Comm comm;
  int drank, dnprocs;

  /* init */
  MPI_Init (&argc, &argv);
  MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
  MPI_Comm_rank (MPI_COMM_WORLD, &rank);
  MPI_Get_processor_name (processor_name, &namelen);
  printf ("(%d) is alive on %s\n", rank, processor_name);
  fflush (stdout);

  MPI_Barrier (MPI_COMM_WORLD);

  if (nprocs < 3) {
    printf ("not enough tasks\n");
  }
  else {
    MPI_Comm_split (MPI_COMM_WORLD, rank % 2, nprocs - rank, &comm);
    
    if (comm != MPI_COMM_NULL) {
      MPI_Comm_size (comm, &dnprocs);
      MPI_Comm_rank (comm, &drank);

      if (dnprocs > 1) {
	if (drank == 0) {
	  memset (buf0, 0, buf_size);

	  MPI_Recv (buf1, buf_size, MPI_INT, 1, 0, comm, &status);
	
	  MPI_Send (buf0, buf_size, MPI_INT, 1, 0, comm);
	}
	else if (drank == 1) {
	  memset (buf1, 1, buf_size);

	  MPI_Recv (buf0, buf_size, MPI_INT, 0, 0, comm, &status);

	  MPI_Send (buf1, buf_size, MPI_INT, 0, 0, comm);
	}
      }
      else {
	printf ("(%d) Derived communicator too small (size = %d)\n",
		rank, dnprocs);
      }

      MPI_Comm_free (&comm);
    }
    else {
      printf ("(%d) Got MPI_COMM_NULL\n", rank);
    }
  }

  MPI_Barrier (MPI_COMM_WORLD);

  MPI_Finalize ();
  printf ("(%d) Finished normally\n", rank);
}
开发者ID:Julio-Anjos,项目名称:simgrid,代码行数:66,代码来源:basic-deadlock-comm_split.c


示例12: main

int
main ( int argc, char *argv[] )
{
  int *messList = NULL;
  int testIdx, doTestLoop;
  int i;

  executableName = "com";

  MPI_Init ( &argc, &argv );
  MPI_Get_processor_name ( hostName, &i );

  /* Set global wsize and rank values */
  MPI_Comm_size ( MPI_COMM_WORLD, &wsize );
  MPI_Comm_rank ( MPI_COMM_WORLD, &rank );

  if ( !initAllTestTypeParams ( &testParams ) )
  {
    MPI_Finalize (  );
    exit ( 1 );
  }

  argStruct.testList = "Bidirectional, BidirAsync";

  if ( !processArgs ( argc, argv ) )
  {
    if ( rank == 0 )
      printUse (  );

    MPI_Finalize (  );
    exit ( 1 );
  }

  /* If using a source directory of process rank target files,
   * get the next appropriate file.
   */
  if ( targetDirectory != NULL && getNextTargetFile (  ) == 0 )
  {
    prestaAbort ( "Failed to open target file in target directory %s\n",
                  targetDirectory );
  }

  doTestLoop = 1;
  while ( doTestLoop )
  {
    if ( !setupTestListParams (  ) || !initAllTestTypeParams ( &testParams ) )
    {
      if ( rank == 0 )
        printUse (  );

      MPI_Finalize (  );
      exit ( 1 );
    }

#ifdef PRINT_ENV
    if ( rank == 0 )
      printEnv();
#endif

    printReportHeader (  );

    for ( testIdx = 0; testIdx < TYPETOT; testIdx++ )
    {
      if ( argStruct.testList == NULL
           || ( argStruct.testList != NULL
                && strstr ( argStruct.testList,
                            testParams[testIdx].name ) != NULL ) )
      {
        prestaRankDebug ( 0, "running test index %d\n", testIdx );
        runTest ( &testParams[testIdx] );
      }
    }

    if ( presta_check_data == 1 )
    {
      MPI_Reduce ( &presta_data_err_total, &presta_global_data_err_total,
                   1, MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD );
    }

    if ( targetDirectory == NULL || getNextTargetFile (  ) == 0 )
    {
      doTestLoop = 0;
    }
  }

  printSeparator (  );

  freeBuffers ( &testParams );
  free ( messList );

  MPI_Finalize (  );

  exit ( 0 );
}
开发者ID:8l,项目名称:insieme,代码行数:94,代码来源:com.c


示例13: main

int main(int argc, char *argv[])
{
    int sendbuf[COUNT], recvbuf[COUNT], i;
    int err = 0, rank, nprocs, errs = 0;
    MPI_Comm intercomm;
    int listenfd, connfd, port, namelen;
    struct sockaddr_in cliaddr, servaddr;
    struct hostent *h;
    char hostname[MPI_MAX_PROCESSOR_NAME];
    socklen_t len, clilen;

    MTest_Init(&argc, &argv);

    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (nprocs != 2) {
        printf("Run this program with 2 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    if (rank == 1) {
        /* server */
        listenfd = socket(AF_INET, SOCK_STREAM, 0);
        if (listenfd < 0) {
            printf("server cannot open socket\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        memset(&servaddr, 0, sizeof(servaddr));
        servaddr.sin_family = AF_INET;
        servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
        servaddr.sin_port = 0;

        err = bind(listenfd, (struct sockaddr *) &servaddr, sizeof(servaddr));
        if (err < 0) {
            errs++;
            printf("bind failed\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        len = sizeof(servaddr);
        err = getsockname(listenfd, (struct sockaddr *) &servaddr, &len);
        if (err < 0) {
            errs++;
            printf("getsockname failed\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        port = ntohs(servaddr.sin_port);
        MPI_Get_processor_name(hostname, &namelen);

        err = listen(listenfd, 5);
        if (err < 0) {
            errs++;
            printf("listen failed\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        MPI_Send(hostname, namelen + 1, MPI_CHAR, 0, 0, MPI_COMM_WORLD);
        MPI_Send(&port, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);

        MPI_Barrier(MPI_COMM_WORLD);

        clilen = sizeof(cliaddr);

        connfd = accept(listenfd, (struct sockaddr *) &cliaddr, &clilen);
        if (connfd < 0) {
            printf("accept failed\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }
    }
    else {
        /* client */

        MPI_Recv(hostname, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, 1, 0,
                 MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        MPI_Recv(&port, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);

        MPI_Barrier(MPI_COMM_WORLD);

        h = gethostbyname(hostname);
        if (h == NULL) {
            printf("gethostbyname failed\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        servaddr.sin_family = h->h_addrtype;
        memcpy((char *) &servaddr.sin_addr.s_addr, h->h_addr_list[0], h->h_length);
        servaddr.sin_port = htons(port);

        /* create socket */
        connfd = socket(AF_INET, SOCK_STREAM, 0);
        if (connfd < 0) {
            printf("client cannot open socket\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        /* connect to server */
        err = connect(connfd, (struct sockaddr *) &servaddr, sizeof(servaddr));
//.........这里部分代码省略.........
开发者ID:NexMirror,项目名称:MPICH,代码行数:101,代码来源:join.c


示例14: main

int main(int argc, char *argv[])
{
	int             ret;
	char           *buf;
	char            processor_name[MPI_MAX_PROCESSOR_NAME];
	int             namelen;
	double          start_time;
        double          used_time;
        double          avg_time;
        double          barrier_time;
        double          us_rate;
        int             max_len, lenbuf;
        int             j;
	int             me, nproc;
        FILE           *fparam ;


        /*
         *  begining ...
         */

	setbuf(stdout, NULL) ;

        /*
         *  max_len  ...
         */
/*
	if (argc != 2)
        {
                printf("Use: bcast <max_len> \n") ;
		exit(1) ;
        }
	max_len =atoi(argv[1]) ;
*/
/*
#if defined(__LINUX__)
        fparam = fopen("bcast.in","rt") ;
#endif
#if defined(__SUNOS__)
        fparam = fopen("bcast.in","rt") ;
#endif
#if defined(__SP2__)
        fparam = fopen("/u/fperez/XMP/MiMPI/test/mp/mpi/performance/bcast/bcast.in","rt") ;
#endif
        if (fparam == NULL)
        {
                printf("ERROR: can not open bcast.in, sorry.\n") ;
		exit(1) ;
        }
        ret = fscanf(fparam,"max_len=%i",&max_len) ;
        fclose(fparam) ;
        if (ret != 1)
        {
                printf("ERROR: can not read a valid 'max_len' value from bcast.in, sorry.\n") ;
		exit(1) ;
        }
*/
	max_len = 1024 * 1024;

        if ( (max_len <= 0) || (max_len >= 8*1024*1024) )
        {
                printf("ERROR: (max_len <= 0) || (max_len >= 8*1024*1024)\n") ;
                exit(1) ;
        }

        /*
         *  MPI init  ...
         */
	ret = MPI_Init(&argc, &argv);	
	if (ret < 0)
	{
		printf("Can't init\n") ;
		exit(1) ;
	}

	MPI_Comm_rank(MPI_COMM_WORLD,&me) ;
	MPI_Get_processor_name(processor_name,&namelen) ;
	MPI_Comm_size(MPI_COMM_WORLD, &nproc) ;

#if (0)
	printf("Process %d; total %d is alive on %s\n",me,nproc,processor_name) ;
#endif


        buf = (char *) malloc((unsigned) max_len) ;
        if (buf == NULL)
        {
                perror("Error en malloc") ;
                exit(1) ;
        }
	memset(buf,'x',max_len) ;

	printf("barrier\n") ;
	 MPI_Barrier(MPI_COMM_WORLD) ;

        /* ... Barrier ... */
	start_time = MPI_Wtime() ;
	for(j = 0; j < 10; j++)
        {
	  MPI_Barrier(MPI_COMM_WORLD) ;
//.........这里部分代码省略.........
开发者ID:acaldero,项目名称:MiMPI,代码行数:101,代码来源:bcast.c


示例15: processor_name

	std::string processor_name() const {
	  char name[MPI_MAX_PROCESSOR_NAME];
	  int len;
	  MPI_Get_processor_name(name, &len);
	  return std::string(name);
	}
开发者ID:jsharpe,项目名称:mpl,代码行数:6,代码来源:environment.hpp


示例16: main

// argc = cpu count, argv = file.cpp
int main(int argc, char *argv[])
{
	// create win object, this is used for locks
	MPI_Win win;
	// needed for MPI
	int namelen = 0;
	int myid, numprocs = 0;
	// processor name
	char processor_name[MPI_MAX_PROCESSOR_NAME];
	//initialize MPI execution environment
	MPI_Init(&argc, &argv);
	//each process get total # of processes
	//the total # of processes specified in mpirun �np n
	MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
	//each process gets its own id
	MPI_Comm_rank(MPI_COMM_WORLD, &myid);
	// needed for times
	double program_start = 0;
	double program_end = 0;
	double process_start = 0;
	double process_end = 0;
	// take time
	if (myid == 0)
		// get start program time
		program_start = MPI_Wtime();
	// Gets the name of the processor
	MPI_Get_processor_name(processor_name, &namelen);
	// number of processes
	int n = 0;
	// display info
	fprintf(stderr, "process %d on %s\n", myid, processor_name);
	fflush(stderr);
	// create win object for locks
	MPI_Win_create(NULL, 0, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &win);
	// declare array to hold char from words plus \0
	char* arr;
	// list to keep track of length of each word
	short* list;
	// size of entire array
	int arr_size = 0;
	// size of the list
	int list_size = 0;
	// new list of words that are not palindromes
	char* new_words;
	// size of new array of words eahc process will
	// have inorder to send back to root after finding
	// all none plaindrome words
	int new_size = 0;
	// this will be the total size of non-palidrome words
	// which will be recieved from each process
	int total_size = 0;
	// temp vector to hold arrays in file
	std::vector<std::string>* words;
	// root does
	if (myid == 0)
	{
		// stream to open file
		std::fstream in;
		// vector to dynamically grow as we add strings to it
		// this makes it so we don't need to open file twice since 
		// we would normally open file and count number of words
		// then reopen it to get the actually words to put in an array
		// we just declared based off the size we got the first time
		words = new std::vector<std::string>();
		// open file as instream
		in.open("Palindromes.txt", std::ios::in);
		// if error opening file
		if (in.fail())
		{
			// display message and close
			std::cout << "Error Opening File" << std::endl;
			MPI_Abort(MPI_COMM_WORLD, 1);
		}
		// no error while opening file
		else
		{
			// temp string to hold each word
			std::string temp;
			// grab each word from each line
			while (getline(in, temp))
			{
				// put word into vector
				words->push_back(temp);
				// loop each strin 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ MPI_Group_free函数代码示例发布时间:2022-05-30
下一篇:
C++ MPI_Get_count函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap