• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ MPI_Comm_free函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中MPI_Comm_free函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Comm_free函数的具体用法?C++ MPI_Comm_free怎么用?C++ MPI_Comm_free使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了MPI_Comm_free函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: main


//.........这里部分代码省略.........
        else
            check(recvbuf[1] == wrank - 1);

        if (wrank == wsize - 1)
            check(recvbuf[0] == 0xdeadbeef);
        else
            check(recvbuf[0] == wrank + 1);
    }

    /* alltoall */
    {
        int sendbuf[2]    = { -(wrank+1), wrank+1 };
        int recvbuf[2]    = { 0xdeadbeef, 0xdeadbeef };

        /* should see one send to each neighbor (rank-1 and rank+1) and one
         * receive each from same */
        MPI_Neighbor_alltoall(sendbuf, 1, MPI_INT, recvbuf, 1, MPI_INT, cart);

        if (wrank == 0)
            check(recvbuf[0] == 0xdeadbeef);
        else
            check(recvbuf[0] == wrank);

        if (wrank == wsize - 1)
            check(recvbuf[1] == 0xdeadbeef);
        else
            check(recvbuf[1] == -(wrank + 2));
    }

    /* alltoallv */
    {
        int sendbuf[2]    = { -(wrank+1), wrank+1 };
        int recvbuf[2]    = { 0xdeadbeef, 0xdeadbeef };
        int sendcounts[2] = { 1, 1 };
        int recvcounts[2] = { 1, 1 };
        int sdispls[2]    = { 0, 1 };
        int rdispls[2]    = { 1, 0 };

        /* should see one send to each neighbor (rank-1 and rank+1) and one receive
         * each from same, but put them in opposite slots in the buffer */
        MPI_Neighbor_alltoallv(sendbuf, sendcounts, sdispls, MPI_INT,
                               recvbuf, recvcounts, rdispls, MPI_INT,
                               cart);

        if (wrank == 0)
            check(recvbuf[1] == 0xdeadbeef);
        else
            check(recvbuf[1] == wrank);

        if (wrank == wsize - 1)
            check(recvbuf[0] == 0xdeadbeef);
        else
            check(recvbuf[0] == -(wrank + 2));
    }

    /* alltoallw */
    {
        int sendbuf[2]            = { -(wrank+1), wrank+1 };
        int recvbuf[2]            = { 0xdeadbeef, 0xdeadbeef };
        int sendcounts[2]         = { 1, 1 };
        int recvcounts[2]         = { 1, 1 };
        MPI_Aint sdispls[2]       = { 0, sizeof(int) };
        MPI_Aint rdispls[2]       = { sizeof(int), 0 };
        MPI_Datatype sendtypes[2] = { MPI_INT, MPI_INT };
        MPI_Datatype recvtypes[2] = { MPI_INT, MPI_INT };

        /* should see one send to each neighbor (rank-1 and rank+1) and one receive
         * each from same, but put them in opposite slots in the buffer */
        MPI_Neighbor_alltoallw(sendbuf, sendcounts, sdispls, sendtypes,
                               recvbuf, recvcounts, rdispls, recvtypes,
                               cart);

        if (wrank == 0)
            check(recvbuf[1] == 0xdeadbeef);
        else
            check(recvbuf[1] == wrank);

        if (wrank == wsize - 1)
            check(recvbuf[0] == 0xdeadbeef);
        else
            check(recvbuf[0] == -(wrank + 2));
    }


    MPI_Comm_free(&cart);
#endif /* defined(TEST_NEIGHB_COLL) */

    MPI_Reduce((wrank == 0 ? MPI_IN_PLACE : &errs), &errs, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
    if (wrank == 0) {
        if (errs) {
            printf("found %d errors\n", errs);
        }
        else {
            printf(" No errors\n");
        }
    }
    MPI_Finalize();

    return 0;
}
开发者ID:glesserd,项目名称:simgrid,代码行数:101,代码来源:neighb_coll.c


示例2: main

int main(int argc, char **argv)
{
    int np[2];
    ptrdiff_t n[3];
    ptrdiff_t alloc_local;
    ptrdiff_t local_ni[3], local_i_start[3];
    ptrdiff_t local_no[3], local_o_start[3];
    long double err;
    pfftl_complex *in, *out;
    pfftl_plan plan_forw=NULL, plan_back=NULL;
    MPI_Comm comm_cart_2d;

    /* Set size of FFT and process mesh */
    n[0] = 29;
    n[1] = 27;
    n[2] = 31;
    np[0] = 2;
    np[1] = 2;

    /* Initialize MPI and PFFT */
    MPI_Init(&argc, &argv);
    pfftl_init();

    /* Create two-dimensional process grid of size np[0] x np[1], if possible */
    if( pfftl_create_procmesh_2d(MPI_COMM_WORLD, np[0], np[1], &comm_cart_2d) ) {
        pfftl_fprintf(MPI_COMM_WORLD, stderr, "Error: This test file only works with %d processes.\n", np[0]*np[1]);
        MPI_Finalize();
        return 1;
    }

    /* Get parameters of data distribution */
    alloc_local = pfftl_local_size_dft_3d(n, comm_cart_2d, PFFT_TRANSPOSED_NONE,
                                          local_ni, local_i_start, local_no, local_o_start);

    /* Allocate memory */
    in  = pfftl_alloc_complex(alloc_local);
    out = pfftl_alloc_complex(alloc_local);

    /* Plan parallel forward FFT */
    plan_forw = pfftl_plan_dft_3d(
                    n, in, out, comm_cart_2d, PFFT_FORWARD, PFFT_TRANSPOSED_NONE| PFFT_MEASURE| PFFT_DESTROY_INPUT);

    /* Plan parallel backward FFT */
    plan_back = pfftl_plan_dft_3d(
                    n, out, in, comm_cart_2d, PFFT_BACKWARD, PFFT_TRANSPOSED_NONE| PFFT_MEASURE| PFFT_DESTROY_INPUT);

    /* Initialize input with random numbers */
    pfftl_init_input_complex_3d(n, local_ni, local_i_start,
                                in);

    /* execute parallel forward FFT */
    pfftl_execute(plan_forw);

    /* execute parallel backward FFT */
    pfftl_execute(plan_back);

    /* Scale data */
    ptrdiff_t l;
    for(l=0; l < local_ni[0] * local_ni[1] * local_ni[2]; l++)
        in[l] /= (n[0]*n[1]*n[2]);

    /* Print error of back transformed data */
    err = pfftl_check_output_complex_3d(n, local_ni, local_i_start, in, comm_cart_2d);
    pfftl_printf(comm_cart_2d, "Error after one forward and backward trafo of size n=(%td, %td, %td):\n", n[0], n[1], n[2]);
    pfftl_printf(comm_cart_2d, "maxerror = %6.2Le;\n", err);

    /* free mem and finalize */
    pfftl_destroy_plan(plan_forw);
    pfftl_destroy_plan(plan_back);
    MPI_Comm_free(&comm_cart_2d);
    pfftl_free(in);
    pfftl_free(out);
    MPI_Finalize();
    return 0;
}
开发者ID:rainwoodman,项目名称:pfft,代码行数:75,代码来源:simple_check_c2c_ldouble.c


示例3: main


//.........这里部分代码省略.........

  board = malloc( ldlboard * ldlboard * sizeof(int) );
  nbngb = malloc( ldnbngb * ldnbngb * sizeof(int) );

  if (rank == 0){
    global_board = malloc( ldboard * ldboard * sizeof(int) );
    num_alive = generate_initial_board( &global_cell( 1, 1), ldboard );
    printf("Starting number of living cells = %d\n", num_alive);
    t1 = mytimer();
  }

  matrix_placement_proc(nb_proc_row, nb_in_block, &comm, &(global_cell( 1, 1)), &(cell( 1, 1)), SCATTER, ldlboard);

  mpi_grid_init(&comm, &grid, rank);
  //printf("rank #%d: %d %d\n", rank, grid.rank_I, grid.rank_J);


  //output_lboard( nb_in_block, board, ldlboard, 0, rank );

  for (loop = 1; loop <= maxloop; loop++) {

    MPI_Datatype blocktype; // we need a specific type for row exchange
    MPI_Type_vector(nb_in_block, 1, ldlboard, MPI_INT, &blocktype);
    MPI_Type_commit(&blocktype);
    // for upper/lower ghost row
    MPI_Sendrecv(&(cell( 1, 1)), 1, blocktype, grid.proc_above, 99, 
		 &(cell( nb_in_block+1, 1)), 1, blocktype, grid.proc_under, 99,
		 comm, MPI_STATUS_IGNORE);
    MPI_Sendrecv(&(cell( nb_in_block, 1)), 1, blocktype, grid.proc_under, 99,
		 &(cell( 0, 1)), 1, blocktype, grid.proc_above, 99, 
		 comm, MPI_STATUS_IGNORE);

    // for left/right ghost col
    MPI_Sendrecv(&(cell( 0, 1)), ldlboard, MPI_INT, grid.proc_left, 98, 
		 &(cell( 0, nb_in_block+1)), ldlboard, MPI_INT, grid.proc_right, 98,
		 comm, MPI_STATUS_IGNORE);
    MPI_Sendrecv(&(cell( 0, nb_in_block)), ldlboard, MPI_INT, grid.proc_right, 98,
		 &(cell( 0, 0)), ldlboard, MPI_INT, grid.proc_left, 98, 
		 comm, MPI_STATUS_IGNORE);

    //debug
    /* if (loop == 1) */
    /*   output_lboard( nb_in_block, board, ldlboard, 0, rank ); */

    //calcul du nombre de voisins
    for (j = 1; j <= nb_in_block; j++) {
      for (i = 1; i <= nb_in_block; i++) {
  	ngb( i, j ) =
  	  cell( i-1, j-1 ) + cell( i, j-1 ) + cell( i+1, j-1 ) +
  	  cell( i-1, j   ) +                  cell( i+1, j   ) +
  	  cell( i-1, j+1 ) + cell( i, j+1 ) + cell( i+1, j+1 );
      }
    }

    //mise à jour de la matrice
    local_alive = 0;
    for (j = 1; j <= nb_in_block; j++) {
      for (i = 1; i <= nb_in_block; i++) {
  	if ( (ngb( i, j ) < 2) ||
  	     (ngb( i, j ) > 3) ) {
  	  cell(i, j) = 0;
  	}
  	else {
  	  if ((ngb( i, j )) == 3)
  	    cell(i, j) = 1;
  	}
  	if (cell(i, j) == 1) {
  	  local_alive ++;
  	}
      }
    }

    //output_lboard( nb_in_block, board, ldlboard, loop, rank );
#ifdef PRINT_ALIVE
    MPI_Reduce(&local_alive, &num_alive, 1, MPI_INT, MPI_SUM, 0, comm);
    if (rank == 0)
      printf("%d \n", num_alive);
#endif
  }

  matrix_placement_proc(nb_proc_row, nb_in_block, &comm, &(cell( 1, 1)), &(global_cell( 1, 1)), GATHER, ldlboard);
  MPI_Reduce(&local_alive, &num_alive, 1, MPI_INT, MPI_SUM, 0, comm);

  if (rank == 0){
    t2 = mytimer();
    temps = t2 - t1;
    printf("Final number of living cells = %d\n", num_alive);
    printf("time=%.2lf ms\n",(double)temps * 1.e3);
    
    //output_board( BS, &(global_cell(1, 1)), ldboard, maxloop);
    free(global_board);
  }
  free(board);
  free(nbngb);

  MPI_Comm_free(&comm);
  MPI_Finalize();

  return EXIT_SUCCESS;
}
开发者ID:ahonorat,项目名称:tdp,代码行数:101,代码来源:life_mpi.c


示例4: main

/*
    Check that the MPI implementation properly handles zero-dimensional
    Cartesian communicators - the original standard implies that these
    should be consistent with higher dimensional topologies and thus
    these should work with any MPI implementation.  MPI 2.1 made this
    requirement explicit.
*/
int main(int argc, char *argv[])
{
    int errs = 0;
    int size, rank, ndims;
    MPI_Comm comm, newcomm;

    MTest_Init(&argc, &argv);

    /* Create a new cartesian communicator in a subset of the processes */
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    if (size < 2) {
        fprintf(stderr, "This test needs at least 2 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_Cart_create(MPI_COMM_WORLD, 0, NULL, NULL, 0, &comm);

    if (comm != MPI_COMM_NULL) {
        int csize;
        MPI_Comm_size(comm, &csize);
        if (csize != 1) {
            errs++;
            fprintf(stderr, "Sizes is wrong in cart communicator.  Is %d, should be 1\n", csize);
        }

        /* This function is not meaningful, but should not fail */
        MPI_Dims_create(1, 0, NULL);

        ndims = -1;
        MPI_Cartdim_get(comm, &ndims);
        if (ndims != 0) {
            errs++;
            fprintf(stderr, "MPI_Cartdim_get: ndims is %d, should be 0\n", ndims);
        }

        /* this function should not fail */
        MPI_Cart_get(comm, 0, NULL, NULL, NULL);

        MPI_Cart_rank(comm, NULL, &rank);
        if (rank != 0) {
            errs++;
            fprintf(stderr, "MPI_Cart_rank: rank is %d, should be 0\n", rank);
        }

        /* this function should not fail */
        MPI_Cart_coords(comm, 0, 0, NULL);

        MPI_Cart_sub(comm, NULL, &newcomm);
        ndims = -1;
        MPI_Cartdim_get(newcomm, &ndims);
        if (ndims != 0) {
            errs++;
            fprintf(stderr, "MPI_Cart_sub did not return zero-dimensional communicator\n");
        }

        MPI_Barrier(comm);

        MPI_Comm_free(&comm);
        MPI_Comm_free(&newcomm);
    }
    else if (rank == 0) {
        errs++;
        fprintf(stderr, "Communicator returned is null!");
    }

    MTest_Finalize(errs);

    MPI_Finalize();

    return 0;
}
开发者ID:NexMirror,项目名称:MPICH,代码行数:79,代码来源:cartzero.c


示例5: main

int main(int argc, char *argv[])
{
    int provided, wrank, wsize, nmsg, i, tag;
    int *(buf[MAX_TARGETS]), bufsize[MAX_TARGETS];
    MPI_Request r[MAX_TARGETS];
    MPI_Comm commDup, commEven;

    MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided);
    MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
    MPI_Comm_size(MPI_COMM_WORLD, &wsize);

    if (wsize < 4) {
        fprintf(stderr, "This test requires at least 4 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* Create several communicators */
    MPI_Comm_dup(MPI_COMM_WORLD, &commDup);
    MPI_Comm_set_name(commDup, "User dup of comm world");

    MPI_Comm_split(MPI_COMM_WORLD, wrank & 0x1, wrank, &commEven);
    if (wrank & 0x1)
        MPI_Comm_free(&commEven);
    else
        MPI_Comm_set_name(commEven, "User split to even ranks");

    /* Create a collection of pending sends and receives
     * We use tags on the sends and receives (when ANY_TAG isn't used)
     * to provide an easy way to check that the proper requests are present.
     * TAG values use fields, in decimal (for easy reading):
     * 0-99: send/recv type:
     * 0 - other
     * 1 - irecv
     * 2 - isend
     * 3 - issend
     * 4 - ibsend
     * 5 - irsend
     * 6 - persistent recv
     * 7 - persistent send
     * 8 - persistent ssend
     * 9 - persistent rsend
     * 10 - persistent bsend
     * 100-999: destination (for send) or source, if receive.  999 = any-source
     * (rank is value/100)
     * 1000-2G: other values
     */
    /* Create the send/receive buffers */
    nmsg = 10;
    for (i = 0; i < nmsg; i++) {
        bufsize[i] = i;
        if (i) {
            buf[i] = (int *) calloc(bufsize[i], sizeof(int));
            if (!buf[i]) {
                fprintf(stderr, "Unable to allocate %d words\n", bufsize[i]);
                MPI_Abort(MPI_COMM_WORLD, 2);
            }
        } else
            buf[i] = 0;
    }

    /* Partial implementation */
    if (wrank == 0) {
        nmsg = 0;
        tag = 2 + 1 * 100;
        MPI_Isend(buf[0], bufsize[0], MPI_INT, 1, tag, MPI_COMM_WORLD, &r[nmsg++]);
        tag = 3 + 2 * 100;
        MPI_Issend(buf[1], bufsize[1], MPI_INT, 2, tag, MPI_COMM_WORLD, &r[nmsg++]);
        tag = 1 + 3 * 100;
        MPI_Irecv(buf[2], bufsize[2], MPI_INT, 3, tag, MPI_COMM_WORLD, &r[nmsg++]);
    } else if (wrank == 1) {
    } else if (wrank == 2) {
    } else if (wrank == 3) {
    }

    /* provide a convenient place to wait */
    MPI_Barrier(MPI_COMM_WORLD);
    printf("Barrier 1 finished\n");

    /* Match up (or cancel) the requests */
    if (wrank == 0) {
        MPI_Waitall(nmsg, r, MPI_STATUSES_IGNORE);
    } else if (wrank == 1) {
        tag = 2 + 1 * 100;
        MPI_Recv(buf[0], bufsize[0], MPI_INT, 0, tag, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    } else if (wrank == 2) {
        tag = 3 + 2 * 100;
        MPI_Recv(buf[1], bufsize[1], MPI_INT, 0, tag, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    } else if (wrank == 3) {
        tag = 1 + 3 * 100;
        MPI_Send(buf[2], bufsize[2], MPI_INT, 0, tag, MPI_COMM_WORLD);
    }

    MPI_Barrier(MPI_COMM_WORLD);
    printf("Barrier 2 finished\n");

    MPI_Comm_free(&commDup);
    if (commEven != MPI_COMM_NULL)
        MPI_Comm_free(&commEven);

    MPI_Finalize();
//.........这里部分代码省略.........
开发者ID:ParaStation,项目名称:psmpi2,代码行数:101,代码来源:dbexample.c


示例6: main


//.........这里部分代码省略.........
    MPI_Comm_size(shm_comm, &shm_nproc);

    /* Platform does not support shared memory, just return. */
    if (shm_nproc < 2) {
        goto exit;
    }

    /* Specify the last process in the node as the target process */
    dst_shm_rank = shm_nproc - 1;
    MPI_Comm_group(shm_comm, &shm_group);
    MPI_Comm_group(MPI_COMM_WORLD, &world_group);
    MPI_Group_translate_ranks(shm_group, 1, &dst_shm_rank, world_group, &dst_world_rank);

    bases = calloc(shm_nproc, sizeof(int *));

    /* Allocate shm window among local processes, then create a global window with
     * those shm window buffers */
    MPI_Win_allocate_shared(sizeof(int) * ELEM_PER_PROC, sizeof(int), MPI_INFO_NULL,
                            shm_comm, &my_base, &shm_win);
    if (verbose)
        printf("%d -- allocate shared: my_base = %p, absolute base\n", shm_rank, my_base);

    for (i = 0; i < shm_nproc; i++) {
        MPI_Win_shared_query(shm_win, i, &size, &disp_unit, &bases[i]);
        if (verbose)
            printf("%d --    shared query: base[%d]=%p, size %ld, unit %d\n",
                   shm_rank, i, bases[i], size, disp_unit);
    }

#ifdef USE_INFO_ALLOC_SHM
    MPI_Info_create(&create_info);
    MPI_Info_set(create_info, "alloc_shm", "true");
#else
    create_info = MPI_INFO_NULL;
#endif

    MPI_Win_create(my_base, sizeof(int) * ELEM_PER_PROC, sizeof(int), create_info, MPI_COMM_WORLD,
                   &win);

    /* Reset data */
    for (i = 0; i < ELEM_PER_PROC; i++) {
        my_base[i] = 0;
        local_buf[i] = i + 1;
    }

    /* Do RMA through global window, then check value through shared window */
    MPI_Win_lock_all(0, win);
    MPI_Win_lock_all(0, shm_win);

    if (shm_rank == 0) {
        MPI_Put(&local_buf[0], 1, MPI_INT, dst_world_rank, 0, 1, MPI_INT, win);
        MPI_Put(&local_buf[ELEM_PER_PROC - 1], 1, MPI_INT, dst_world_rank, ELEM_PER_PROC - 1, 1,
                MPI_INT, win);
        MPI_Win_flush(dst_world_rank, win);
    }

    MPI_Win_sync(shm_win);
    MPI_Barrier(shm_comm);
    MPI_Win_sync(shm_win);

    if (bases[dst_shm_rank][0] != local_buf[0]) {
        errors++;
        printf("%d -- Got %d at rank %d index %d, expected %d\n", rank,
               bases[dst_shm_rank][0], dst_shm_rank, 0, local_buf[0]);
    }
    if (bases[dst_shm_rank][ELEM_PER_PROC - 1] != local_buf[ELEM_PER_PROC - 1]) {
        errors++;
        printf("%d -- Got %d at rank %d index %d, expected %d\n", rank,
               bases[dst_shm_rank][ELEM_PER_PROC - 1], dst_shm_rank,
               ELEM_PER_PROC - 1, local_buf[ELEM_PER_PROC - 1]);
    }

    MPI_Win_unlock_all(shm_win);
    MPI_Win_unlock_all(win);

    MPI_Reduce(&errors, &all_errors, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);

    MPI_Win_free(&win);
    MPI_Win_free(&shm_win);

  exit:
    if (rank == 0 && all_errors == 0)
        printf(" No Errors\n");

    if (create_info != MPI_INFO_NULL)
        MPI_Info_free(&create_info);
    if (shm_comm != MPI_COMM_NULL)
        MPI_Comm_free(&shm_comm);
    if (shm_group != MPI_GROUP_NULL)
        MPI_Group_free(&shm_group);
    if (world_group != MPI_GROUP_NULL)
        MPI_Group_free(&world_group);

    MPI_Finalize();

    if (bases)
        free(bases);

    return 0;
}
开发者ID:syftalent,项目名称:dist-sys-exercises-1,代码行数:101,代码来源:win_shared_create.c


示例7: main


//.........这里部分代码省略.........
    for(r = 0; r < size; r++) {
        MPI_Barrier(MPI_COMM_WORLD);
        if(rank == r) {
            printf("@[%d] X-> \n", rank);
            for(i=0; i<N; i++)
                print_row("Xnewtable",newtable,i, N, rank);
        }
        MPI_Barrier(MPI_COMM_WORLD);
    }

/*    if(!rank)
        for(i=0; i<N; i++)
            print_row("newtable", newtable,i, N, rank);*/

    for(i=0; i <last; i++) ranks[i] = i;

    MPI_Comm_group(MPI_COMM_WORLD, &group_world);
    MPI_Group_incl(group_world, last, ranks, &group_last);
    MPI_Comm_create(MPI_COMM_WORLD, group_last, &COMM_LAST);

    for(i = 0; i < N; i+= size ) {
        if(!rank)printf("scatter <%d>\n", i);
        if (i < (N - last)) {
            MPI_Scatter(
                    &newtable[i * N + 0], 1, subarray,
                    &newtable[(i + rank) * N + 0], 1, subarray,
                    0,MPI_COMM_WORLD);
        } else {
            if (rank < last) {
                MPI_Scatter(
                        &newtable[i * N + 0], 1, subarray,
                        &newtable[ (i + rank) * N + 0], 1, subarray,
                        0, COMM_LAST);
            }
        }
    }
    MPI_Barrier(MPI_COMM_WORLD);
    
    for(r = 0; r < size; r++) {
        MPI_Barrier(MPI_COMM_WORLD);
        if(rank == r) {
            printf("@[%d] -> \n", rank);
            for(i=0; i<N; i++)
                print_row("Xnewtable",newtable,i, N, rank);
        }
        MPI_Barrier(MPI_COMM_WORLD);
    }

    MPI_Barrier(MPI_COMM_WORLD);
    printf("@[%d] ......\n", rank);
    MPI_Barrier(MPI_COMM_WORLD);

    // reset our little matrix
    if (rank == 0) {
        int k = 0;
        for ( i=0; i<N; i++) 
            for ( j=0; j<N; j++ ) 
                newtable[i * N  + j] = 0;
        for(r = 0; r < size; r++) {
            printf("@[%d]\n", rank);
            if(rank == r) {
                for(i=0; i<N; i++)
                    print_row("newtable",newtable, i, N, rank);
            }
        }
    }
    MPI_Barrier(MPI_COMM_WORLD);

    for(i = 0; i < N; i+= size ) {
        if(!rank)printf("gather <%d>\n", i);
        if (i < (N - last)) {
            MPI_Gather(
                    &newtable[ ( i + rank ) * N + 0], 1, subarray,
                    &newtable[i * N + 0], 1, subarray,
                    0, MPI_COMM_WORLD);
        } else {
            if (rank < last) {
                MPI_Gather(
                        &newtable[(i + rank) * N + 0], 1, subarray,
                        &newtable[i*N + 0], 1, subarray,
                        0,COMM_LAST);
            }
        }
    }
    if (rank == 0) {
        for(r = 0; r < size; r++) {
            printf("@[%d]\n", rank);
            if(rank == r) {
                for(i=0; i<N; i++)
                    print_row("newtable",newtable,i, N, rank);
            }
        }
    }

    MPI_Group_free(&group_last);
    if (rank < last) 
        MPI_Comm_free(&COMM_LAST);
    MPI_Finalize();
    return 0;
}
开发者ID:rahulgopinath,项目名称:rahulgopinath.github.io,代码行数:101,代码来源:scattermatrix.c


示例8: gmx_setup_nodecomm

void gmx_setup_nodecomm(FILE *fplog, t_commrec *cr)
{
    gmx_nodecomm_t *nc;
    int             n, rank, hostnum, ng, ni;

    /* Many MPI implementations do not optimize MPI_Allreduce
     * (and probably also other global communication calls)
     * for multi-core nodes connected by a network.
     * We can optimize such communication by using one MPI call
     * within each node and one between the nodes.
     * For MVAPICH2 and Intel MPI this reduces the time for
     * the global_stat communication by 25%
     * for 2x2-core 3 GHz Woodcrest connected by mixed DDR/SDR Infiniband.
     * B. Hess, November 2007
     */

    nc = &cr->nc;

    nc->bUse = FALSE;
#ifndef GMX_THREAD_MPI
#ifdef GMX_MPI
    MPI_Comm_size(cr->mpi_comm_mygroup, &n);
    MPI_Comm_rank(cr->mpi_comm_mygroup, &rank);

    hostnum = gmx_hostname_num();

    if (debug)
    {
        fprintf(debug, "In gmx_setup_nodecomm: splitting communicator of size %d\n", n);
    }


    /* The intra-node communicator, split on node number */
    MPI_Comm_split(cr->mpi_comm_mygroup, hostnum, rank, &nc->comm_intra);
    MPI_Comm_rank(nc->comm_intra, &nc->rank_intra);
    if (debug)
    {
        fprintf(debug, "In gmx_setup_nodecomm: node rank %d rank_intra %d\n",
                rank, nc->rank_intra);
    }
    /* The inter-node communicator, split on rank_intra.
     * We actually only need the one for rank=0,
     * but it is easier to create them all.
     */
    MPI_Comm_split(cr->mpi_comm_mygroup, nc->rank_intra, rank, &nc->comm_inter);
    /* Check if this really created two step communication */
    MPI_Comm_size(nc->comm_inter, &ng);
    MPI_Comm_size(nc->comm_intra, &ni);
    if (debug)
    {
        fprintf(debug, "In gmx_setup_nodecomm: groups %d, my group size %d\n",
                ng, ni);
    }

    if (getenv("GMX_NO_NODECOMM") == NULL &&
        ((ng > 1 && ng < n) || (ni > 1 && ni < n)))
    {
        nc->bUse = TRUE;
        if (fplog)
        {
            fprintf(fplog, "Using two step summing over %d groups of on average %.1f processes\n\n",
                    ng, (real)n/(real)ng);
        }
        if (nc->rank_intra > 0)
        {
            MPI_Comm_free(&nc->comm_inter);
        }
    }
    else
    {
        /* One group or all processes in a separate group, use normal summing */
        MPI_Comm_free(&nc->comm_inter);
        MPI_Comm_free(&nc->comm_intra);
        if (debug)
        {
            fprintf(debug, "In gmx_setup_nodecomm: not unsing separate inter- and intra-node communicators.\n");
        }
    }
#endif
#else
    /* tMPI runs only on a single node so just use the nodeid */
    nc->rank_intra = cr->nodeid;
#endif
}
开发者ID:alwanderer,项目名称:gromacs,代码行数:84,代码来源:network.c


示例9: runTest


//.........这里部分代码省略.........

  messList = testParams->messList;
  messListSize = testParams->messListSize;

  testParams->maxBW = 0.0;
  testParams->maxBWMessSize = 0;

  init_stats ( &sum, 0, 0, 0, 0 );

  for ( procIdx = 0; procIdx < argStruct.procListSize; procIdx++ )
  {
    procs = argStruct.procList[procIdx];
    if ( procs > wsize )
      procs = wsize;

    /*  Create Communicator of all active processes  */
    procs = createActiveComm ( procs, argStruct.procsPerNode,
                               argStruct.allocPattern,
                               argStruct.useNearestRank, &activeComm );

    prestaDebug ( "rank %d returned from createActiveCom\n", rank );
    prestaDebug ( "messListSize is %d \n", messListSize );

    for ( messIdx = 0; messIdx < messListSize; messIdx++ )
    {
      if ( argStruct.iterList != NULL && argStruct.iterList[messIdx] != 0 )
        iters = argStruct.iterList[messIdx];
      else
        iters = argStruct.iters;

      if ( argStruct.testCountList != NULL )
      {
        fprintf ( stderr,
                  "Before init_stats! messIdx is %d, procIdx is %d, testCountList is %p\n",
                  messIdx, procIdx, argStruct.testCountList );
        init_stats ( &dp, procs, messList[messIdx], iters,
                     argStruct.testCountList[procIdx] );
      }
      else
      {
        init_stats ( &dp, procs, messList[messIdx], iters, argStruct.samples );
      }

      for ( testCount = 0; testCount < dp.samples; testCount++ )
      {
        /*  Run test and save current result  */
        testParams->rankResult =
          testParams->testFunc ( dp.msize, iters, &activeComm );

        /*  TODO : Remove this if unnecessary   */
        if ( testParams->rankResult < minTime )
          minTime = testParams->rankResult;

        if ( !generateResults
             ( testParams, procs, dp.msize, iters, &result ) )
          prestaAbort ( "Failed to generate test results." );

        update_stats ( &dp, result );
        update_stats ( &sum, result );
      }

      if ( testParams->id == LATEN )
      {
        prestaRankPrint ( 0,
                          "(%6d, %9d, %6d, %6d):  %6.3f / %6.3f / %6.3f\n",
                          dp.tasks, dp.msize, dp.iters, dp.samples, dp.min,
                          dp.mean, dp.max );
      }
      else
      {
        prestaRankPrint ( 0,
                          "(%6d, %7d, %5d, %5d):  %12.3f / %12.3f / %12.3f\n",
                          dp.tasks, dp.msize, dp.iters, dp.samples, dp.min,
                          dp.mean, dp.max );
      }
    }
  }

  if ( testParams->id == LATEN )
  {
    prestaRankPrint ( 0,
                      "\nSummary  :             min/mean/max = %6.3f / %6.3f / %6.3f\n",
                      sum.min, sum.mean, sum.max );
  }
  else
  {
    prestaRankPrint ( 0,
                      "\nSummary  :         min/mean/max = %12.3f / %12.3f / %12.3f\n",
                      sum.min, sum.mean, sum.max );
  }

  if ( rank == 0 && argStruct.printPairs )
  {
    printActivePairs ( procs, argStruct.procsPerNode,
                       argStruct.allocPattern, argStruct.useNearestRank );
  }

  if ( activeComm != MPI_COMM_NULL )
    MPI_Comm_free ( &activeComm );
}
开发者ID:8l,项目名称:insieme,代码行数:101,代码来源:com.c


示例10: Balance_Partition


//.........这里部分代码省略.........
                lwhere[agraph->label[i]] = fpart + part[i];
        }

        MPI_Allreduce((void *)lwhere, (void *)part, nvtxs, IDX_DATATYPE, MPI_SUM, srcomm);

        edgecut = ComputeSerialEdgeCut(&cgraph);
        Mc_ComputeSerialBalance(ctrl, &cgraph, part, lbvec);
        lbsum = ssum(ncon, lbvec);
        MPI_Allreduce((void *)&edgecut, (void *)&max_cut, 1, MPI_INT, MPI_MAX, ipcomm);
        MPI_Allreduce((void *)&lbsum, (void *)&min_lbsum, 1, MPI_FLOAT, MPI_MIN, ipcomm);
        lpecost.rank = ctrl->mype;
        lpecost.cost = lbsum;
        if (min_lbsum < UNBALANCE_FRACTION * (float)(ncon)) {
            if (lbsum < UNBALANCE_FRACTION * (float)(ncon))
                lpecost.cost = (float)edgecut;
            else
                lpecost.cost = (float)max_cut + lbsum;
        }
        MPI_Allreduce((void *)&lpecost, (void *)&gpecost, 1, MPI_FLOAT_INT, MPI_MINLOC, ipcomm);

        if (ctrl->mype == gpecost.rank && ctrl->mype != sr_pe) {
            MPI_Send((void *)part, nvtxs, IDX_DATATYPE, sr_pe, 1, ctrl->comm);
        }

        if (ctrl->mype != gpecost.rank && ctrl->mype == sr_pe) {
            MPI_Recv((void *)part, nvtxs, IDX_DATATYPE, gpecost.rank, 1, ctrl->comm, &status);
        }

        if (ctrl->mype == sr_pe) {
            idxcopy(nvtxs, part, lwhere);
            SerialRemap(&cgraph, ctrl->nparts, home, lwhere, part, ctrl->tpwgts);
        }

        MPI_Comm_free(&srcomm);
    }
    /**************************************/
    /* The other half do global diffusion */
    /**************************************/
    else {
        /******************************************************************/
        /* The next stmt is required to balance out the sr MPI_Comm_split */
        /******************************************************************/
        MPI_Comm_split(ipcomm, MPI_UNDEFINED, 0, &srcomm);

        if (ncon == 1) {
            rating = WavefrontDiffusion(&myctrl, agraph, home);
            Mc_ComputeSerialBalance(ctrl, &cgraph, part, lbvec);
            lbsum = ssum(ncon, lbvec);

            /* Determine which PE computed the best partitioning */
            MPI_Allreduce((void *)&rating, (void *)&max_rating, 1, MPI_FLOAT, MPI_MAX, ipcomm);
            MPI_Allreduce((void *)&lbsum, (void *)&min_lbsum, 1, MPI_FLOAT, MPI_MIN, ipcomm);

            lpecost.rank = ctrl->mype;
            lpecost.cost = lbsum;
            if (min_lbsum < UNBALANCE_FRACTION * (float)(ncon)) {
                if (lbsum < UNBALANCE_FRACTION * (float)(ncon))
                    lpecost.cost = rating;
                else
                    lpecost.cost = max_rating + lbsum;
            }

            MPI_Allreduce((void *)&lpecost, (void *)&gpecost, 1, MPI_FLOAT_INT, MPI_MINLOC, ipcomm);

            /* Now send this to the coordinating processor */
            if (ctrl->mype == gpecost.rank && ctrl->mype != gd_pe)
开发者ID:BijanZarif,项目名称:oomph-lib,代码行数:67,代码来源:initbalance.c


示例11: MPI_Comm_free

 void TreeCommunicator::comm_destroy(void)
 {
     for (auto comm_it = m_comm.begin(); comm_it < m_comm.end(); ++comm_it) {
         MPI_Comm_free(&(*comm_it));
     }
 }
开发者ID:bgeltz,项目名称:geopm,代码行数:6,代码来源:TreeCommunicator.cpp


示例12: main

int main( int argc, char *argv[] )
{
    int wrank, wsize, rank, size, color;
    int j, tmp;
    int err, toterrs, errs = 0;
    MPI_Comm newcomm;

    MPI_Init( &argc, &argv );

    MPI_Comm_size( MPI_COMM_WORLD, &wsize );
    MPI_Comm_rank( MPI_COMM_WORLD, &wrank );

    /* Color is 0 or 1; 1 will be the processes that "fault" */
    /* process 0 and wsize/2+1...wsize-1 are in non-faulting group */
    color = (wrank > 0) && (wrank <= wsize/2);
    MPI_Comm_split( MPI_COMM_WORLD, color, wrank, &newcomm );

    MPI_Comm_size( newcomm, &size );
    MPI_Comm_rank( newcomm, &rank );

    /* Set errors return on COMM_WORLD and the new comm */
    MPI_Comm_set_errhandler( MPI_ERRORS_RETURN, MPI_COMM_WORLD );
    MPI_Comm_set_errhandler( MPI_ERRORS_RETURN, newcomm );

    err = MPI_Barrier( MPI_COMM_WORLD );
    if (err) errs += ReportErr( err, "Barrier" );
    if (color) {
	/* Simulate a fault on some processes */
	exit(1);
    }
    else {
	/* To improve the chance that the "faulted" processes will have
	   exited, wait for 1 second */
	sleep( 1 );
    }
    
    /* Can we still use newcomm? */
    for (j=0; j<rank; j++) {
	err = MPI_Recv( &tmp, 1, MPI_INT, j, 0, newcomm, MPI_STATUS_IGNORE );
	if (err) errs += ReportErr( err, "Recv" );
    }
    for (j=rank+1; j<size; j++) {
	err = MPI_Send( &rank, 1, MPI_INT, j, 0, newcomm );
	if (err) errs += ReportErr( err, "Recv" );
    }

    /* Now, try sending in MPI_COMM_WORLD on dead processes */
    /* There is a race condition here - we don't know for sure that the faulted
       processes have exited.  However, we can ensure a failure by using 
       synchronous sends - the sender will wait until the reciever handles 
       receives the message, which will not happen (the process will exit 
       without matching the message, even if it has not yet exited). */
    for (j=1; j<=wsize/2; j++) {
	err = MPI_Ssend( &rank, 1, MPI_INT, j, 0, MPI_COMM_WORLD );
	if (!err) {
	    errs++;
	    fprintf( stderr, "Ssend succeeded to dead process %d\n", j );
	}
    }

    err = MPI_Allreduce( &errs, &toterrs, 1, MPI_INT, MPI_SUM, newcomm );
    if (err) errs += ReportErr( err, "Allreduce" );
    MPI_Comm_free( &newcomm );

    MPI_Finalize();

    if (wrank == 0) {
	if (toterrs > 0) {
	    printf( " Found %d errors\n", toterrs );
	}
	else {
	    printf( " No Errors\n" );
	}
    }

    return 0;
}
开发者ID:OngOngoing,项目名称:219351_homework,代码行数:77,代码来源:pt2ptf2.c


示例13: Rhpc_gethandle

SEXP Rhpc_gethandle(SEXP procs)
{
  int num_procs;
  MPI_Comm *ptr;
  SEXP com;
  int num;
  MPI_Comm pcomm;

  if (RHPC_Comm == MPI_COMM_NULL){
    error("Rhpc_initialize is not called.");
    return(R_NilValue);
  }
  if(finalize){
    warning("Rhpc were already finalized.");
    return(R_NilValue);
  }
  if(!initialize){
    warning("Rhpc not initialized.");
    return(R_NilValue);
  }
 
  num_procs = INTEGER (procs)[0];

  ptr = Calloc(1,MPI_Comm);
  PROTECT(com = R_MakeExternalPtr(ptr, R_NilValue, R_NilValue));
  R_RegisterCFinalizer(com, comm_free);
  SXP2COMM(com) = RHPC_Comm;

  if (num_procs == NA_INTEGER){/* use mpirun */
    _M(MPI_Comm_size(SXP2COMM(com), &num));
    Rprintf("Detected communication size %d\n", num);
    if( num > 1 ){
      if ( num_procs > 0){
	warning("blind procs argument, return of MPI_COMM_WORLD");
      }
    }else{
      if ( num == 1){
	warning("only current master process. not found worker process.");
      }
      SXP2COMM(com)=MPI_COMM_NULL;
      warning("please pecifies the number of processes in mpirun or mpiexec, or provide a number of process to spawn");
    }
    UNPROTECT(1);
    return(com);
  }else{ /* spawn */
    if(num_procs < 1){
      warning("you need positive number of procs argument");
      UNPROTECT(1);
      return(com);
    }
    _M(MPI_Comm_size(SXP2COMM(com), &num));
    if(num > 1){ 
      warning("blind procs argument, return of last communicator");
      UNPROTECT(1);
      return(com);
    }
  }

  _M(MPI_Comm_spawn(RHPC_WORKER_CMD, MPI_ARGV_NULL, num_procs,
		    MPI_INFO_NULL, 0, MPI_COMM_SELF, &pcomm,  
		    MPI_ERRCODES_IGNORE));
  _M(MPI_Intercomm_merge( pcomm, 0, SXP2COMMP(com)));
  _M(MPI_Comm_free( &pcomm ));
  _M(MPI_Comm_size(SXP2COMM(com), &num));
  RHPC_Comm = SXP2COMM(com); /* rewrite RHPC_Comm */
  _M(MPI_Comm_set_errhandler(RHPC_Comm, MPI_ERRORS_RETURN));
  _M(MPI_Comm_rank(RHPC_Comm, &MPI_rank));
  _M(MPI_Comm_size(RHPC_Comm, &MPI_procs));
  DPRINT("Rhpc_getHandle(MPI_Comm_spawn : rank:%d size:%d\n", MPI_rank, MPI_procs);
  Rhpc_set_options( MPI_rank, MPI_procs,RHPC_Comm);
  UNPROTECT(1);
  return(com);
}
开发者ID:bamonroe,项目名称:Rhpc,代码行数:73,代码来源:RhpcMPI.c


示例14: main

int main(int argc, char *argv[])
{
    int errs = 0;
    int rank, size, rsize;
    int nsize, nrank;
    int minsize = 2;
    int isLeft;
    MPI_Comm comm, comm1, comm2, comm3, comm4;

    MTest_Init(&argc, &argv);

    /* The following illustrates the use of the routines to
     * run through a selection of communicators and datatypes.
     * Use subsets of these for tests that do not involve combinations
     * of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntercomm(&comm, &isLeft, minsize)) {
        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_remote_size(comm, &rsize);
        MPI_Comm_size(comm, &size);

        /* Try building intercomms */
        MPI_Intercomm_merge(comm, isLeft, &comm1);
        /* Check the size and ranks */
        MPI_Comm_size(comm1, &nsize);
        MPI_Comm_rank(comm1, &nrank);
        if (nsize != size + rsize) {
            errs++;
            printf("(1) Comm size is %d but should be %d\n", nsize, size + rsize);
            if (isLeft) {
                /* The left processes should be high */
                if (nrank != rsize + rank) {
                    errs++;
                    printf("(1) rank for high process is %d should be %d\n", nrank, rsize + rank);
                }
            }
            else {
                /* The right processes should be low */
                if (nrank != rank) {
                    errs++;
                    printf("(1) rank for low process is %d should be %d\n", nrank, rank);
                }
            }
        }

        MPI_Intercomm_merge(comm, !isLeft, &comm2);
        /* Check the size and ranks */
        MPI_Comm_size(comm1, &nsize);
        MPI_Comm_rank(comm1, &nrank);
        if (nsize != size + rsize) {
            errs++;
            printf("(2) Comm size is %d but should be %d\n", nsize, size + rsize);
            if (!isLeft) {
                /* The right processes should be high */
                if (nrank != rsize + rank) {
                    errs++;
                    printf("(2) rank for high process is %d should be %d\n", nrank, rsize + rank);
                }
            }
            else {
                /* The left processes should be low */
                if (nrank != rank) {
                    errs++;
                    printf("(2) rank for low process is %d should be %d\n", nrank, rank);
                }
            }
        }


        MPI_Intercomm_merge(comm, 0, &comm3);

        MPI_Intercomm_merge(comm, 1, &comm4);

        MPI_Comm_free(&comm1);
        MPI_Comm_free(&comm2);
        MPI_Comm_free(&comm3);
        MPI_Comm_free(&comm4);

        MTestFreeComm(&comm);
    }

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
开发者ID:NexMirror,项目名称:MPICH,代码行数:87,代码来源:icm.c


示例15: nrnmpi_subworld_size

void nrnmpi_subworld_size(int n) {
	/* n is the size of a subworld, nrnmpi_numprocs (pc.nhost) */
	if (nrnmpi_use != 1) { return; }
	if (nrnmpi_comm != MPI_COMM_NULL) {asrt(MPI_Comm_free(&nrnmpi_comm)); }
	if (nrn_bbs_comm != MPI_COMM_NULL) {asrt(MPI_Comm_free(&nrn_bbs_comm)); }
	if (grp_bbs != MPI_GROUP_NULL) { asrt(MPI_Group_free(&grp_bbs)); }
	if (grp_net != MPI_GROUP_NULL) { asrt(MPI_Group_free(&grp_net)); }
	MPI_Group wg;
	asrt(MPI_Comm_group(nrnmpi_world_comm, &wg));
	int r = nrnmpi_myid_world;
	/* special cases */
	if (n == 1) {
		asrt(MPI_Group_incl(wg, 1, &r, &grp_net));
		asrt(MPI_Comm_dup(nrnmpi_world_comm, &nrn_bbs_comm));
		asrt(MPI_Comm_create(nrnmpi_world_comm, grp_net, &nrnmpi_comm));
		asrt(MPI_Comm_rank(nrnmpi_comm, &nrnmpi_myid));
		asrt(MPI_Comm_size(nrnmpi_comm, &nrnmpi_numprocs));
		asrt(MPI_Comm_rank(nrn_bbs_comm, &nrnmpi_myid_bbs));
		asrt(MPI_Comm_size(nrn_bbs_comm, &nrnmpi_numprocs_bbs));
	}else if (n == nrnmpi_numprocs_world) {
		asrt(MPI_Group_incl(wg, 1, &r, &grp_bbs));
		asrt(MPI_Comm_dup(nrnmpi_world_comm, &nrnmpi_comm));
		asrt(MPI_Comm_create(nrnmpi_world_comm, grp_bbs, &nrn_bbs_comm));
		asrt(MPI_Comm_rank(nrnmpi_comm, &nrnmpi_myid));
		asrt(MPI_Comm_size(nrnmpi_comm, &nrnmpi_numprocs));
		if (r == 0) {
			asrt(MPI_Comm_rank(nrn_bbs_comm, &nrnmpi_myid_bbs));
			asrt(MPI_Comm_size(nrn_bbs_comm, &nrnmpi_numprocs_bbs));
		}else{
			nrnmpi_myid_bbs = -1;
			nrnmpi_numprocs_bbs = -1;
		}
	}else{
		int nw = nrnmpi_numprocs_world;
		int nb = nw/n; /* nrnmpi_numprocs_bbs */
		int range[3];
		/* net is contiguous */
		range[0] = r/n;
		range[0] *= n; /* first */
		range[1] = range[0] + n - 1; /* last */
		range[2] = 1; /* stride */
		asrt(MPI_Group_range_incl(wg, 1, &range, &grp_net));
		asrt(MPI_Comm_create(nrnmpi_world_comm, grp_net, &nrnmpi_comm));
		asrt(MPI_Comm_rank(nrnmpi_comm, &nrnmpi_myid));
		asrt(MPI_Comm_size(nrnmpi_comm, &nrnmpi_numprocs));

		range[0] = 0; /* first */
		range[1] = nw - n; /* last */
		range[2] = n; /* stride */
		asrt(MPI_Group_range_incl(wg, 1, &range, &grp_bbs));
		asrt(MPI_Comm_create(nrnmpi_world_comm, grp_bbs, &nrn_bbs_comm));
		if (r%n == 0) { /* only rank 0 of the subworlds */
			asrt(MPI_Comm_rank(nrn_bbs_comm, &nrnmpi_myid_bbs));
			asrt(MPI_Comm_size(nrn_bbs_comm, &nrnmpi_numprocs_bbs));
		}else{
			nrnmpi_myid_bbs = -1;
			nrnmpi_numprocs_bbs = -1;
		}
	}
	asrt(MPI_Group_free(&wg));
}
开发者ID:bhache,项目名称:pkg-neuron,代码行数:61,代码来源:nrnmpi.c


示例16: main

int
main (int argc, char **argv)
{
  int nprocs = -1;
  int rank = -1;
  char processor_name[128];
  int namelen = 128;
  int buf0[buf_size];
  int buf1[buf_size];
  MPI_Status status;
  MPI_Comm comm;
  int drank, dnprocs;

  /* init */
  MPI_Init (&argc, &argv);
  MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
  MPI_Comm_rank (MPI_COMM_WORLD, &rank);
  MPI_Get_processor_name (processor_name, &namelen);
  printf ("(%d) is alive on %s\n", rank, processor_name);
  fflush (stdout);

  MPI_Barrier (MPI_COMM_WORLD);

  if (nprocs < 3) {
    printf ("not enough tasks\n");
  }
  else {
    MPI_Comm_split (MPI_COMM_WORLD, rank % 2, nprocs - rank, &comm);
    
    if (comm != MPI_COMM_ 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ MPI_Comm_rank函数代码示例发布时间:2022-05-30
下一篇:
C++ MPI_Comm_f2c函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap