• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ ops_timing_realloc函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中ops_timing_realloc函数的典型用法代码示例。如果您正苦于以下问题:C++ ops_timing_realloc函数的具体用法?C++ ops_timing_realloc怎么用?C++ ops_timing_realloc使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了ops_timing_realloc函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: ops_par_loop_calc_dt_kernel_get

// host stub function
void ops_par_loop_calc_dt_kernel_get(char const *name, ops_block block, int dim, int* range,
 ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
  ops_arg args[4] = { arg0, arg1, arg2, arg3};


  #ifdef CHECKPOINTING
  if (!ops_checkpointing_before(args,4,range,29)) return;
  #endif

  ops_timing_realloc(29,"calc_dt_kernel_get");
  OPS_kernels[29].count++;

  //compute locally allocated range for the sub-block
  int start[2];
  int end[2];
  #ifdef OPS_MPI
  sub_block_list sb = OPS_sub_block_list[block->index];
  if (!sb->owned) return;
  for ( int n=0; n<2; n++ ){
    start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
    if (start[n] >= range[2*n]) {
      start[n] = 0;
    }
    else {
      start[n] = range[2*n] - start[n];
    }
    if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
    if (end[n] >= range[2*n+1]) {
      end[n] = range[2*n+1] - sb->decomp_disp[n];
    }
    else {
      end[n] = sb->decomp_size[n];
    }
    if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
      end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
  }
  #else //OPS_MPI
  for ( int n=0; n<2; n++ ){
    start[n] = range[2*n];end[n] = range[2*n+1];
  }
  #endif //OPS_MPI

  int x_size = MAX(0,end[0]-start[0]);
  int y_size = MAX(0,end[1]-start[1]);


  int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
  int xdim1 = args[1].dat->size[0]*args[1].dat->dim;

  //build opencl kernel if not already built

  buildOpenCLKernels_calc_dt_kernel_get(
  xdim0,xdim1);

  //Timing
  double t1,t2,c1,c2;
  ops_timers_core(&c2,&t2);

  //set up OpenCL thread blocks
  size_t globalWorkSize[3] = {((x_size-1)/OPS_block_size_x+ 1)*OPS_block_size_x, ((y_size-1)/OPS_block_size_y + 1)*OPS_block_size_y, 1};
  size_t localWorkSize[3] =  {OPS_block_size_x,OPS_block_size_y,1};


  #ifdef OPS_MPI
  double *arg2h = (double *)(((ops_reduction)args[2].data)->data + ((ops_reduction)args[2].data)->size * block->index);
  #else //OPS_MPI
  double *arg2h = (double *)(((ops_reduction)args[2].data)->data);
  #endif //OPS_MPI
  #ifdef OPS_MPI
  double *arg3h = (double *)(((ops_reduction)args[3].data)->data + ((ops_reduction)args[3].data)->size * block->index);
  #else //OPS_MPI
  double *arg3h = (double *)(((ops_reduction)args[3].data)->data);
  #endif //OPS_MPI

  int nblocks = ((x_size-1)/OPS_block_size_x+ 1)*((y_size-1)/OPS_block_size_y + 1);
  int maxblocks = nblocks;
  int reduct_bytes = 0;

  reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
  reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));

  reallocReductArrays(reduct_bytes);
  reduct_bytes = 0;

  int r_bytes2 = reduct_bytes/sizeof(double);
  arg2.data = OPS_reduct_h + reduct_bytes;
  arg2.data_d = OPS_reduct_d;// + reduct_bytes;
  for (int b=0; b<maxblocks; b++)
  for (int d=0; d<1; d++) ((double *)arg2.data)[d+b*1] = ZERO_double;
  reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));

  int r_bytes3 = reduct_bytes/sizeof(double);
  arg3.data = OPS_reduct_h + reduct_bytes;
  arg3.data_d = OPS_reduct_d;// + reduct_bytes;
  for (int b=0; b<maxblocks; b++)
  for (int d=0; d<1; d++) ((double *)arg3.data)[d+b*1] = ZERO_double;
  reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));


//.........这里部分代码省略.........
开发者ID:satyajammy,项目名称:OPS,代码行数:101,代码来源:calc_dt_kernel_get_opencl_kernel.cpp


示例2: ops_par_loop_update_halo_kernel5_plus_2_left

// host stub function
void ops_par_loop_update_halo_kernel5_plus_2_left(char const *name,
                                                  ops_block block, int dim,
                                                  int *range, ops_arg arg0,
                                                  ops_arg arg1, ops_arg arg2) {

  // Timing
  double t1, t2, c1, c2;

  ops_arg args[3] = {arg0, arg1, arg2};

#ifdef CHECKPOINTING
  if (!ops_checkpointing_before(args, 3, range, 88))
    return;
#endif

  if (OPS_diags > 1) {
    ops_timing_realloc(88, "update_halo_kernel5_plus_2_left");
    OPS_kernels[88].count++;
    ops_timers_core(&c1, &t1);
  }

  // compute locally allocated range for the sub-block
  int start[3];
  int end[3];
#ifdef OPS_MPI
  sub_block_list sb = OPS_sub_block_list[block->index];
  if (!sb->owned)
    return;
  for (int n = 0; n < 3; n++) {
    start[n] = sb->decomp_disp[n];
    end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
    if (start[n] >= range[2 * n]) {
      start[n] = 0;
    } else {
      start[n] = range[2 * n] - start[n];
    }
    if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
      start[n] = range[2 * n];
    if (end[n] >= range[2 * n + 1]) {
      end[n] = range[2 * n + 1] - sb->decomp_disp[n];
    } else {
      end[n] = sb->decomp_size[n];
    }
    if (sb->id_p[n] == MPI_PROC_NULL &&
        (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
      end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
  }
#else
  for (int n = 0; n < 3; n++) {
    start[n] = range[2 * n];
    end[n] = range[2 * n + 1];
  }
#endif

  int x_size = MAX(0, end[0] - start[0]);
  int y_size = MAX(0, end[1] - start[1]);
  int z_size = MAX(0, end[2] - start[2]);

  int xdim0 = args[0].dat->size[0];
  int ydim0 = args[0].dat->size[1];
  int xdim1 = args[1].dat->size[0];
  int ydim1 = args[1].dat->size[1];

  // build opencl kernel if not already built

  buildOpenCLKernels_update_halo_kernel5_plus_2_left(xdim0, ydim0, xdim1,
                                                     ydim1);

  // set up OpenCL thread blocks
  size_t globalWorkSize[3] = {
      ((x_size - 1) / OPS_block_size_x + 1) * OPS_block_size_x,
      ((y_size - 1) / OPS_block_size_y + 1) * OPS_block_size_y,
      ((z_size - 1) / OPS_block_size_z + 1) * OPS_block_size_z};
  size_t localWorkSize[3] = {OPS_block_size_x, OPS_block_size_y,
                             OPS_block_size_z};

  int *arg2h = (int *)arg2.data;

  int consts_bytes = 0;

  consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));

  reallocConstArrays(consts_bytes);

  consts_bytes = 0;
  arg2.data = OPS_consts_h + consts_bytes;
  arg2.data_d = OPS_consts_d + consts_bytes;
  for (int d = 0; d < NUM_FIELDS; d++)
    ((int *)arg2.data)[d] = arg2h[d];
  consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
  mvConstArraysToDevice(consts_bytes);

  // set up initial pointers
  int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
  for (int d = 0; d < dim; d++)
    d_m[d] =
        args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
//.........这里部分代码省略.........
开发者ID:gihanmudalige,项目名称:OPS,代码行数:101,代码来源:update_halo_kernel5_plus_2_left_opencl_kernel.cpp


示例3: ops_par_loop_advec_cell_kernel4_ydir

// host stub function
void ops_par_loop_advec_cell_kernel4_ydir(char const *name, ops_block Block, int dim, int* range,
 ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10) {

  ops_arg args[11] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10};


  ops_timing_realloc(36,"advec_cell_kernel4_ydir");
  OPS_kernels[36].count++;

  //compute localy allocated range for the sub-block
  int start[3];
  int end[3];
  #ifdef OPS_MPI
  sub_block_list sb = OPS_sub_block_list[block->index];
  if (!sb->owned) return;
  for ( int n=0; n<3; n++ ){
    start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
    if (start[n] >= range[2*n]) {
      start[n] = 0;
    }
    else {
      start[n] = range[2*n] - start[n];
    }
    if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
    if (end[n] >= range[2*n+1]) {
      end[n] = range[2*n+1] - sb->decomp_disp[n];
    }
    else {
      end[n] = sb->decomp_size[n];
    }
    if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
      end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
  }
  #else //OPS_MPI
  for ( int n=0; n<3; n++ ){
    start[n] = range[2*n];end[n] = range[2*n+1];
  }
  #endif //OPS_MPI

  int x_size = MAX(0,end[0]-start[0]);
  int y_size = MAX(0,end[1]-start[1]);
  int z_size = MAX(0,end[2]-start[2]);


  xdim0 = args[0].dat->size[0]*args[0].dat->dim;
  ydim0 = args[0].dat->size[1];
  xdim1 = args[1].dat->size[0]*args[1].dat->dim;
  ydim1 = args[1].dat->size[1];
  xdim2 = args[2].dat->size[0]*args[2].dat->dim;
  ydim2 = args[2].dat->size[1];
  xdim3 = args[3].dat->size[0]*args[3].dat->dim;
  ydim3 = args[3].dat->size[1];
  xdim4 = args[4].dat->size[0]*args[4].dat->dim;
  ydim4 = args[4].dat->size[1];
  xdim5 = args[5].dat->size[0]*args[5].dat->dim;
  ydim5 = args[5].dat->size[1];
  xdim6 = args[6].dat->size[0]*args[6].dat->dim;
  ydim6 = args[6].dat->size[1];
  xdim7 = args[7].dat->size[0]*args[7].dat->dim;
  ydim7 = args[7].dat->size[1];
  xdim8 = args[8].dat->size[0]*args[8].dat->dim;
  ydim8 = args[8].dat->size[1];
  xdim9 = args[9].dat->size[0]*args[9].dat->dim;
  ydim9 = args[9].dat->size[1];
  xdim10 = args[10].dat->size[0]*args[10].dat->dim;
  ydim10 = args[10].dat->size[1];

  //Timing
  double t1,t2,c1,c2;
  ops_timers_core(&c2,&t2);

  if (xdim0 != xdim0_advec_cell_kernel4_ydir_h || ydim0 != ydim0_advec_cell_kernel4_ydir_h || xdim1 != xdim1_advec_cell_kernel4_ydir_h || ydim1 != ydim1_advec_cell_kernel4_ydir_h || xdim2 != xdim2_advec_cell_kernel4_ydir_h || ydim2 != ydim2_advec_cell_kernel4_ydir_h || xdim3 != xdim3_advec_cell_kernel4_ydir_h || ydim3 != ydim3_advec_cell_kernel4_ydir_h || xdim4 != xdim4_advec_cell_kernel4_ydir_h || ydim4 != ydim4_advec_cell_kernel4_ydir_h || xdim5 != xdim5_advec_cell_kernel4_ydir_h || ydim5 != ydim5_advec_cell_kernel4_ydir_h || xdim6 != xdim6_advec_cell_kernel4_ydir_h || ydim6 != ydim6_advec_cell_kernel4_ydir_h || xdim7 != xdim7_advec_cell_kernel4_ydir_h || ydim7 != ydim7_advec_cell_kernel4_ydir_h || xdim8 != xdim8_advec_cell_kernel4_ydir_h || ydim8 != ydim8_advec_cell_kernel4_ydir_h || xdim9 != xdim9_advec_cell_kernel4_ydir_h || ydim9 != ydim9_advec_cell_kernel4_ydir_h || xdim10 != xdim10_advec_cell_kernel4_ydir_h || ydim10 != ydim10_advec_cell_kernel4_ydir_h) {
    xdim0_advec_cell_kernel4_ydir = xdim0;
    xdim0_advec_cell_kernel4_ydir_h = xdim0;
    ydim0_advec_cell_kernel4_ydir = ydim0;
    ydim0_advec_cell_kernel4_ydir_h = ydim0;
    xdim1_advec_cell_kernel4_ydir = xdim1;
    xdim1_advec_cell_kernel4_ydir_h = xdim1;
    ydim1_advec_cell_kernel4_ydir = ydim1;
    ydim1_advec_cell_kernel4_ydir_h = ydim1;
    xdim2_advec_cell_kernel4_ydir = xdim2;
    xdim2_advec_cell_kernel4_ydir_h = xdim2;
    ydim2_advec_cell_kernel4_ydir = ydim2;
    ydim2_advec_cell_kernel4_ydir_h = ydim2;
    xdim3_advec_cell_kernel4_ydir = xdim3;
    xdim3_advec_cell_kernel4_ydir_h = xdim3;
    ydim3_advec_cell_kernel4_ydir = ydim3;
    ydim3_advec_cell_kernel4_ydir_h = ydim3;
    xdim4_advec_cell_kernel4_ydir = xdim4;
    xdim4_advec_cell_kernel4_ydir_h = xdim4;
    ydim4_advec_cell_kernel4_ydir = ydim4;
    ydim4_advec_cell_kernel4_ydir_h = ydim4;
    xdim5_advec_cell_kernel4_ydir = xdim5;
    xdim5_advec_cell_kernel4_ydir_h = xdim5;
    ydim5_advec_cell_kernel4_ydir = ydim5;
    ydim5_advec_cell_kernel4_ydir_h = ydim5;
    xdim6_advec_cell_kernel4_ydir = xdim6;
    xdim6_advec_cell_kernel4_ydir_h = xdim6;
    ydim6_advec_cell_kernel4_ydir = ydim6;
//.........这里部分代码省略.........
开发者ID:satyajammy,项目名称:OPS,代码行数:101,代码来源:advec_cell_kernel4_ydir_openacc_kernel.cpp


示例4: ops_par_loop_revert_kernel

// host stub function
void ops_par_loop_revert_kernel(char const *name, ops_block block, int dim, int* range,
 ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
  ops_arg args[4] = { arg0, arg1, arg2, arg3};


  #ifdef CHECKPOINTING
  if (!ops_checkpointing_before(args,4,range,0)) return;
  #endif

  ops_timing_realloc(0,"revert_kernel");
  OPS_kernels[0].count++;

  //compute locally allocated range for the sub-block
  int start[2];
  int end[2];
  #ifdef OPS_MPI
  sub_block_list sb = OPS_sub_block_list[block->index];
  if (!sb->owned) return;
  for ( int n=0; n<2; n++ ){
    start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
    if (start[n] >= range[2*n]) {
      start[n] = 0;
    }
    else {
      start[n] = range[2*n] - start[n];
    }
    if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
    if (end[n] >= range[2*n+1]) {
      end[n] = range[2*n+1] - sb->decomp_disp[n];
    }
    else {
      end[n] = sb->decomp_size[n];
    }
    if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
      end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
  }
  #else //OPS_MPI
  for ( int n=0; n<2; n++ ){
    start[n] = range[2*n];end[n] = range[2*n+1];
  }
  #endif //OPS_MPI

  int x_size = MAX(0,end[0]-start[0]);
  int y_size = MAX(0,end[1]-start[1]);


  int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
  int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
  int xdim2 = args[2].dat->size[0]*args[2].dat->dim;
  int xdim3 = args[3].dat->size[0]*args[3].dat->dim;

  //build opencl kernel if not already built

  buildOpenCLKernels_revert_kernel(
  xdim0,xdim1,xdim2,xdim3);

  //Timing
  double t1,t2,c1,c2;
  ops_timers_core(&c2,&t2);

  //set up OpenCL thread blocks
  size_t globalWorkSize[3] = {((x_size-1)/OPS_block_size_x+ 1)*OPS_block_size_x, ((y_size-1)/OPS_block_size_y + 1)*OPS_block_size_y, 1};
  size_t localWorkSize[3] =  {OPS_block_size_x,OPS_block_size_y,1};





  int dat0 = args[0].dat->elem_size;
  int dat1 = args[1].dat->elem_size;
  int dat2 = args[2].dat->elem_size;
  int dat3 = args[3].dat->elem_size;

  //set up initial pointers
  int d_m[OPS_MAX_DIM];
  #ifdef OPS_MPI
  for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
  #else //OPS_MPI
  for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
  #endif //OPS_MPI
  int base0 = 1 * 
  (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
  base0 = base0 + args[0].dat->size[0] *
  (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);

  #ifdef OPS_MPI
  for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
  #else //OPS_MPI
  for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
  #endif //OPS_MPI
  int base1 = 1 * 
  (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
  base1 = base1 + args[1].dat->size[0] *
  (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);

  #ifdef OPS_MPI
  for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
  #else //OPS_MPI
  for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d];
//.........这里部分代码省略.........
开发者ID:satyajammy,项目名称:OPS,代码行数:101,代码来源:revert_kernel_opencl_kernel.cpp


示例5: ops_par_loop_set_val

// host stub function
void ops_par_loop_set_val(char const *name, ops_block block, int dim,
                          int *range, ops_arg arg0, ops_arg arg1) {

  ops_arg args[2] = {arg0, arg1};

#ifdef CHECKPOINTING
  if (!ops_checkpointing_before(args, 2, range, 6))
    return;
#endif

  ops_timing_realloc(6, "set_val");
  OPS_kernels[6].count++;

  // compute localy allocated range for the sub-block
  int start[3];
  int end[3];
#ifdef OPS_MPI
  sub_block_list sb = OPS_sub_block_list[block->index];
  if (!sb->owned)
    return;
  for (int n = 0; n < 3; n++) {
    start[n] = sb->decomp_disp[n];
    end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
    if (start[n] >= range[2 * n]) {
      start[n] = 0;
    } else {
      start[n] = range[2 * n] - start[n];
    }
    if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
      start[n] = range[2 * n];
    if (end[n] >= range[2 * n + 1]) {
      end[n] = range[2 * n + 1] - sb->decomp_disp[n];
    } else {
      end[n] = sb->decomp_size[n];
    }
    if (sb->id_p[n] == MPI_PROC_NULL &&
        (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
      end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
  }
#else
  for (int n = 0; n < 3; n++) {
    start[n] = range[2 * n];
    end[n] = range[2 * n + 1];
  }
#endif

  int x_size = MAX(0, end[0] - start[0]);
  int y_size = MAX(0, end[1] - start[1]);
  int z_size = MAX(0, end[2] - start[2]);

  xdim0 = args[0].dat->size[0];
  ydim0 = args[0].dat->size[1];

  // Timing
  double t1, t2, c1, c2;
  ops_timers_core(&c2, &t2);

  if (xdim0 != xdim0_set_val_h || ydim0 != ydim0_set_val_h) {
    xdim0_set_val = xdim0;
    xdim0_set_val_h = xdim0;
    ydim0_set_val = ydim0;
    ydim0_set_val_h = ydim0;
  }

  int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);

  // set up initial pointers
  int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
  for (int d = 0; d < dim; d++)
    d_m[d] =
        args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
  for (int d = 0; d < dim; d++)
    d_m[d] = args[0].dat->d_m[d];
#endif
  int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
                          args[0].dat->base[0] - d_m[0]);
  base0 = base0 +
          dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
                                         args[0].dat->base[1] - d_m[1]);
  base0 = base0 +
          dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
              (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
               d_m[2]);
  double *p_a0 = (double *)((char *)args[0].data + base0);

  double *p_a1 = (double *)args[1].data;

  ops_H_D_exchanges_host(args, 2);
  ops_halo_exchanges(args, 2, range);

  ops_timers_core(&c1, &t1);
  OPS_kernels[6].mpi_time += t1 - t2;

  set_val_c_wrapper(p_a0, p_a1, x_size, y_size, z_size);

  ops_timers_core(&c2, &t2);
  OPS_kernels[6].time += t2 - t1;
//.........这里部分代码省略.........
开发者ID:gihanmudalige,项目名称:OPS,代码行数:101,代码来源:set_val_mpiinline_kernel.cpp


示例6: ops_par_loop_update_halo_kernel1_fr2

// host stub function
void ops_par_loop_update_halo_kernel1_fr2(char const *name, ops_block Block, int dim, int* range,
 ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {

  ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};


  ops_timing_realloc(51,"update_halo_kernel1_fr2");
  OPS_kernels[51].count++;

  //compute localy allocated range for the sub-block
  int start[3];
  int end[3];
  #ifdef OPS_MPI
  sub_block_list sb = OPS_sub_block_list[block->index];
  if (!sb->owned) return;
  for ( int n=0; n<3; n++ ){
    start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
    if (start[n] >= range[2*n]) {
      start[n] = 0;
    }
    else {
      start[n] = range[2*n] - start[n];
    }
    if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
    if (end[n] >= range[2*n+1]) {
      end[n] = range[2*n+1] - sb->decomp_disp[n];
    }
    else {
      end[n] = sb->decomp_size[n];
    }
    if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
      end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
  }
  #else //OPS_MPI
  for ( int n=0; n<3; n++ ){
    start[n] = range[2*n];end[n] = range[2*n+1];
  }
  #endif //OPS_MPI

  int x_size = MAX(0,end[0]-start[0]);
  int y_size = MAX(0,end[1]-start[1]);
  int z_size = MAX(0,end[2]-start[2]);


  xdim0 = args[0].dat->size[0]*args[0].dat->dim;
  ydim0 = args[0].dat->size[1];
  xdim1 = args[1].dat->size[0]*args[1].dat->dim;
  ydim1 = args[1].dat->size[1];
  xdim2 = args[2].dat->size[0]*args[2].dat->dim;
  ydim2 = args[2].dat->size[1];
  xdim3 = args[3].dat->size[0]*args[3].dat->dim;
  ydim3 = args[3].dat->size[1];
  xdim4 = args[4].dat->size[0]*args[4].dat->dim;
  ydim4 = args[4].dat->size[1];
  xdim5 = args[5].dat->size[0]*args[5].dat->dim;
  ydim5 = args[5].dat->size[1];
  xdim6 = args[6].dat->size[0]*args[6].dat->dim;
  ydim6 = args[6].dat->size[1];

  //Timing
  double t1,t2,c1,c2;
  ops_timers_core(&c2,&t2);

  if (xdim0 != xdim0_update_halo_kernel1_fr2_h || ydim0 != ydim0_update_halo_kernel1_fr2_h || xdim1 != xdim1_update_halo_kernel1_fr2_h || ydim1 != ydim1_update_halo_kernel1_fr2_h || xdim2 != xdim2_update_halo_kernel1_fr2_h || ydim2 != ydim2_update_halo_kernel1_fr2_h || xdim3 != xdim3_update_halo_kernel1_fr2_h || ydim3 != ydim3_update_halo_kernel1_fr2_h || xdim4 != xdim4_update_halo_kernel1_fr2_h || ydim4 != ydim4_update_halo_kernel1_fr2_h || xdim5 != xdim5_update_halo_kernel1_fr2_h || ydim5 != ydim5_update_halo_kernel1_fr2_h || xdim6 != xdim6_update_halo_kernel1_fr2_h || ydim6 != ydim6_update_halo_kernel1_fr2_h) {
    xdim0_update_halo_kernel1_fr2 = xdim0;
    xdim0_update_halo_kernel1_fr2_h = xdim0;
    ydim0_update_halo_kernel1_fr2 = ydim0;
    ydim0_update_halo_kernel1_fr2_h = ydim0;
    xdim1_update_halo_kernel1_fr2 = xdim1;
    xdim1_update_halo_kernel1_fr2_h = xdim1;
    ydim1_update_halo_kernel1_fr2 = ydim1;
    ydim1_update_halo_kernel1_fr2_h = ydim1;
    xdim2_update_halo_kernel1_fr2 = xdim2;
    xdim2_update_halo_kernel1_fr2_h = xdim2;
    ydim2_update_halo_kernel1_fr2 = ydim2;
    ydim2_update_halo_kernel1_fr2_h = ydim2;
    xdim3_update_halo_kernel1_fr2 = xdim3;
    xdim3_update_halo_kernel1_fr2_h = xdim3;
    ydim3_update_halo_kernel1_fr2 = ydim3;
    ydim3_update_halo_kernel1_fr2_h = ydim3;
    xdim4_update_halo_kernel1_fr2 = xdim4;
    xdim4_update_halo_kernel1_fr2_h = xdim4;
    ydim4_update_halo_kernel1_fr2 = ydim4;
    ydim4_update_halo_kernel1_fr2_h = ydim4;
    xdim5_update_halo_kernel1_fr2 = xdim5;
    xdim5_update_halo_kernel1_fr2_h = xdim5;
    ydim5_update_halo_kernel1_fr2 = ydim5;
    ydim5_update_halo_kernel1_fr2_h = ydim5;
    xdim6_update_halo_kernel1_fr2 = xdim6;
    xdim6_update_halo_kernel1_fr2_h = xdim6;
    ydim6_update_halo_kernel1_fr2 = ydim6;
    ydim6_update_halo_kernel1_fr2_h = ydim6;
  }

  int dat0 = args[0].dat->elem_size;
  int dat1 = args[1].dat->elem_size;
  int dat2 = args[2].dat->elem_size;
  int dat3 = args[3].dat->elem_size;
  int dat4 = args[4].dat->elem_size;
//.........这里部分代码省略.........
开发者ID:satyajammy,项目名称:OPS,代码行数:101,代码来源:update_halo_kernel1_fr2_openacc_kernel.cpp


示例7: ops_par_loop_update_halo_kernel2_xvel_minus_2_a

// host stub function
void ops_par_loop_update_halo_kernel2_xvel_minus_2_a(char const *name, ops_block Block, int dim, int* range,
 ops_arg arg0, ops_arg arg1, ops_arg arg2) {

  ops_arg args[3] = { arg0, arg1, arg2};


  #ifdef CHECKPOINTING
  if (!ops_checkpointing_before(args,3,range,56)) return;
  #endif

  ops_timing_realloc(56,"update_halo_kernel2_xvel_minus_2_a");
  OPS_kernels[56].count++;

  //compute localy allocated range for the sub-block
  int start[2];
  int end[2];
  #ifdef OPS_MPI
  sub_block_list sb = OPS_sub_block_list[block->index];
  if (!sb->owned) return;
  for ( int n=0; n<2; n++ ){
    start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
    if (start[n] >= range[2*n]) {
      start[n] = 0;
    }
    else {
      start[n] = range[2*n] - start[n];
    }
    if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
    if (end[n] >= range[2*n+1]) {
      end[n] = range[2*n+1] - sb->decomp_disp[n];
    }
    else {
      end[n] = sb->decomp_size[n];
    }
    if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
      end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
  }
  #else //OPS_MPI
  for ( int n=0; n<2; n++ ){
    start[n] = range[2*n];end[n] = range[2*n+1];
  }
  #endif //OPS_MPI

  int x_size = MAX(0,end[0]-start[0]);
  int y_size = MAX(0,end[1]-start[1]);


  xdim0 = args[0].dat->size[0]*args[0].dat->dim;
  xdim1 = args[1].dat->size[0]*args[1].dat->dim;

  //Timing
  double t1,t2,c1,c2;
  ops_timers_core(&c2,&t2);

  if (xdim0 != xdim0_update_halo_kernel2_xvel_minus_2_a_h || xdim1 != xdim1_update_halo_kernel2_xvel_minus_2_a_h) {
    xdim0_update_halo_kernel2_xvel_minus_2_a = xdim0;
    xdim0_update_halo_kernel2_xvel_minus_2_a_h = xdim0;
    xdim1_update_halo_kernel2_xvel_minus_2_a = xdim1;
    xdim1_update_halo_kernel2_xvel_minus_2_a_h = xdim1;
  }

  int dat0 = args[0].dat->elem_size;
  int dat1 = args[1].dat->elem_size;

  int *arg2h = (int *)arg2.data;
  //Upload large globals
  int consts_bytes = 0;
  consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
  reallocConstArrays(consts_bytes);
  consts_bytes = 0;
  args[2].data = OPS_consts_h + consts_bytes;
  args[2].data_d = OPS_consts_d + consts_bytes;
  for (int d=0; d<NUM_FIELDS; d++) ((int *)args[2].data)[d] = arg2h[d];
  consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
  mvConstArraysToDevice(consts_bytes);

  //set up initial pointers
  int d_m[OPS_MAX_DIM];
  #ifdef OPS_MPI
  for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
  #else //OPS_MPI
  for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
  #endif //OPS_MPI
  int base0 = dat0 * 1 * 
    (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
  base0 = base0+ dat0 *
    args[0].dat->size[0] *
    (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
  #ifdef OPS_GPU
  double *p_a0 = (double *)((char *)args[0].data_d + base0);
  #else
  double *p_a0 = (double *)((char *)args[0].data + base0);
  #endif

  #ifdef OPS_MPI
  for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
  #else //OPS_MPI
  for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
  #endif //OPS_MPI
//.........这里部分代码省略.........
开发者ID:satyajammy,项目名称:OPS,代码行数:101,代码来源:update_halo_kernel2_xvel_minus_2_a_openacc_kernel.cpp


示例8: ops_par_loop_left_bndcon

// host stub function
void ops_par_loop_left_bndcon(char const *name, ops_block block, int dim, int* range,
 ops_arg arg0, ops_arg arg1) {

  //Timing
  double t1,t2,c1,c2;

  char *p_a[2];
  int  offs[2][2];
  ops_arg args[2] = { arg0, arg1};



  #ifdef CHECKPOINTING
  if (!ops_checkpointing_before(args,2,range,2)) return;
  #endif

  if (OPS_diags > 1) {
    ops_timing_realloc(2,"left_bndcon");
    OPS_kernels[2].count++;
    ops_timers_core(&c2,&t2);
  }

  //compute locally allocated range for the sub-block
  int start[2];
  int end[2];

  #ifdef OPS_MPI
  sub_block_list sb = OPS_sub_block_list[block->index];
  if (!sb->owned) return;
  for ( int n=0; n<2; n++ ){
    start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
    if (start[n] >= range[2*n]) {
      start[n] = 0;
    }
    else {
      start[n] = range[2*n] - start[n];
    }
    if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
    if (end[n] >= range[2*n+1]) {
      end[n] = range[2*n+1] - sb->decomp_disp[n];
    }
    else {
      end[n] = sb->decomp_size[n];
    }
    if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
      end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
  }
  #else
  for ( int n=0; n<2; n++ ){
    start[n] = range[2*n];end[n] = range[2*n+1];
  }
  #endif
  #ifdef OPS_DEBUG
  ops_register_args(args, "left_bndcon");
  #endif

  offs[0][0] = args[0].stencil->stride[0]*1;  //unit step in x dimension
  offs[0][1] = off2D(1, &start[0],
      &end[0],args[0].dat->size, args[0].stencil->stride) - offs[0][0];


  int arg_idx[2];
  #ifdef OPS_MPI
  arg_idx[0] = sb->decomp_disp[0]+start[0];
  arg_idx[1] = sb->decomp_disp[1]+start[1];
  #else
  arg_idx[0] = start[0];
  arg_idx[1] = start[1];
  #endif

  int off0_0 = offs[0][0];
  int off0_1 = offs[0][1];
  int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);

  //set up initial pointers and exchange halos if necessary
  int base0 = args[0].dat->base_offset + (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) * start[0] * args[0].stencil->stride[0];
  base0 = base0+ (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
    args[0].dat->size[0] *
    start[1] * args[0].stencil->stride[1];
  p_a[0] = (char *)args[0].data + base0;

  p_a[1] = (char *)arg_idx;



  //initialize global variable with the dimension of dats
  xdim0 = args[0].dat->size[0];

  //Halo Exchanges
  ops_H_D_exchanges_host(args, 2);
  ops_halo_exchanges(args,2,range);
  ops_H_D_exchanges_host(args, 2);

  if (OPS_diags > 1) {
    ops_timers_core(&c1,&t1);
    OPS_kernels[2].mpi_time += t1-t2;
  }

  int n_x;
//.........这里部分代码省略.........
开发者ID:gihanmudalige,项目名称:OPS,代码行数:101,代码来源:left_bndcon_seq_kernel.cpp


示例9: ops_par_loop_advec_mom_kernel_mass_flux_y

// host stub function
void ops_par_loop_advec_mom_kernel_mass_flux_y(char const *name, ops_block Block, int dim, int* range,
 ops_arg arg0, ops_arg arg1) {

  ops_arg args[2] = { arg0, arg1};


  ops_timing_realloc(21,"advec_mom_kernel_mass_flux_y");
  OPS_kernels[21].count++;

  //compute localy allocated range for the sub-block
  int start[3];
  int end[3];
  #ifdef OPS_MPI
  sub_block_list sb = OPS_sub_block_list[block->index];
  if (!sb->owned) return;
  for ( int n=0; n<3; n++ ){
    start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
    if (start[n] >= range[2*n]) {
      start[n] = 0;
    }
    else {
      start[n] = range[2*n] - start[n];
    }
    if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
    if (end[n] >= range[2*n+1]) {
      end[n] = range[2*n+1] - sb->decomp_disp[n];
    }
    else {
      end[n] = sb->decomp_size[n];
    }
    if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
      end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
  }
  #else //OPS_MPI
  for ( int n=0; n<3; n++ ){
    start[n] = range[2*n];end[n] = range[2*n+1];
  }
  #endif //OPS_MPI

  int x_size = MAX(0,end[0]-start[0]);
  int y_size = MAX(0,end[1]-start[1]);
  int z_size = MAX(0,end[2]-start[2]);


  xdim0 = args[0].dat->size[0]*args[0].dat->dim;
  ydim0 = args[0].dat->size[1];
  xdim1 = args[1].dat->size[0]*args[1].dat->dim;
  ydim1 = args[1].dat->size[1];

  //Timing
  double t1,t2,c1,c2;
  ops_timers_core(&c2,&t2);

  if (xdim0 != xdim0_advec_mom_kernel_mass_flux_y_h || ydim0 != ydim0_advec_mom_kernel_mass_flux_y_h || xdim1 != xdim1_advec_mom_kernel_mass_flux_y_h || ydim1 != ydim1_advec_mom_kernel_mass_flux_y_h) {
    xdim0_advec_mom_kernel_mass_flux_y = xdim0;
    xdim0_advec_mom_kernel_mass_flux_y_h = xdim0;
    ydim0_advec_mom_kernel_mass_flux_y = ydim0;
    ydim0_advec_mom_kernel_mass_flux_y_h = ydim0;
    xdim1_advec_mom_kernel_mass_flux_y = xdim1;
    xdim1_advec_mom_kernel_mass_flux_y_h = xdim1;
    ydim1_advec_mom_kernel_mass_flux_y = ydim1;
    ydim1_advec_mom_kernel_mass_flux_y_h = ydim1;
  }

  int dat0 = args[0].dat->elem_size;
  int dat1 = args[1].dat->elem_size;


  //set up initial pointers
  int d_m[OPS_MAX_DIM];
  #ifdef OPS_MPI
  for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
  #else //OPS_MPI
  for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
  #endif //OPS_MPI
  int base0 = dat0 * 1 * 
    (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
  base0 = base0+ dat0 *
    args[0].dat->size[0] *
    (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
  base0 = base0+ dat0 *
    args[0].dat->size[0] *
    args[0].dat->size[1] *
    (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
  #ifdef OPS_GPU
  double *p_a0 = (double *)((char *)args[0].data_d + base0);
  #else
  double *p_a0 = (double *)((char *)args[0].data + base0);
  #endif

  #ifdef OPS_MPI
  for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
  #else //OPS_MPI
  for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
  #endif //OPS_MPI
  int base1 = dat1 * 1 * 
    (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
  base1 = base1+ dat1 *
    args[1].dat->size[0] *
//.........这里部分代码省略.........
开发者ID:satyajammy,项目名称:OPS,代码行数:101,代码来源:advec_mom_kernel_mass_flux_y_openacc_kernel.cpp


示例10: ops_par_loop_calc_dt_kernel_get

// host stub function
void ops_par_loop_calc_dt_kernel_get(char const *name, ops_block Block, int dim, int* range,
 ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) {

  ops_arg args[6] = { arg0, arg1, arg2, arg3, arg4, arg5};


  ops_timing_realloc(128,"calc_dt_kernel_get");
  OPS_kernels[128].count++;

  //compute localy allocated range for the sub-block
  int start[3];
  int end[3];
  #ifdef OPS_MPI
  sub_block_list sb = OPS_sub_block_list[block->index];
  if (!sb->owned) return;
  for ( int n=0; n<3; n++ ){
    start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
    if (start[n] >= range[2*n]) {
      start[n] = 0;
    }
    else {
      start[n] = range[2*n] - start[n];
    }
    if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
    if (end[n] >= range[2*n+1]) {
      end[n] = range[2*n+1] - sb->decomp_disp[n];
    }
    else {
      end[n] = sb->decomp_size[n];
    }
    if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
      end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
  }
  #else //OPS_MPI
  for ( int n=0; n<3; n++ ){
    start[n] = range[2*n];end[n] = range[2*n+1];
  }
  #endif //OPS_MPI

  int x_size = MAX(0,end[0]-start[0]);
  int y_size = MAX(0,end[1]-start[1]);
  int z_size = MAX(0,end[2]-start[2]);


  xdim0 = args[0].dat->size[0]*args[0].dat->dim;
  ydim0 = args[0].dat->size[1];
  xdim1 = args[1].dat->size[0]*args[1].dat->dim;
  ydim1 = args[1].dat->size[1];
  xdim4 = args[4].dat->size[0]*args[4].dat->dim;
  ydim4 = args[4].dat->size[1];

  //Timing
  double t1,t2,c1,c2;
  ops_timers_core(&c2,&t2);

  if (xdim0 != xdim0_calc_dt_kernel_get_h || ydim0 != ydim0_calc_dt_kernel_get_h || xdim1 != xdim1_calc_dt_kernel_get_h || ydim1 != ydim1_calc_dt_kernel_get_h || xdim4 != xdim4_calc_dt_kernel_get_h || ydim4 != ydim4_calc_dt_kernel_get_h) {
    xdim0_calc_dt_kernel_get = xdim0;
    xdim0_calc_dt_kernel_get_h = xdim0;
    ydim0_calc_dt_kernel_get = ydim0;
    ydim0_calc_dt_kernel_get_h = ydim0;
    xdim1_calc_dt_kernel_get = xdim1;
    xdim1_calc_dt_kernel_get_h = xdim1;
    ydim1_calc_dt_kernel_get = ydim1;
    ydim1_calc_dt_kernel_get_h = ydim1;
    xdim4_calc_dt_kernel_get = xdim4;
    xdim4_calc_dt_kernel_get_h = xdim4;
    ydim4_calc_dt_kernel_get = ydim4;
    ydim4_calc_dt_kernel_get_h = ydim4;
  }

  int dat0 = args[0].dat->elem_size;
  int dat1 = args[1].dat->elem_size;
  int dat4 = args[4].dat->elem_size;

  #ifdef OPS_MPI
  double *arg2h = (double *)(((ops_reduction)args[2].data)->data + ((ops_reduction)args[2].data)->size * block->index);
  #else //OPS_MPI
  double *arg2h = (double *)(((ops_reduction)args[2].data)->data);
  #endif //OPS_MPI
  #ifdef OPS_MPI
  double *arg3h = (double *)(((ops_reduction)args[3].data)->data + ((ops_reduction)args[3].data)->size * block->index);
  #else //OPS_MPI
  double *arg3h = (double *)(((ops_reduction)args[3].data)->data);
  #endif //OPS_MPI
  #ifdef OPS_MPI
  double *arg5h = (double *)(((ops_reduction)args[5].data)->data + ((ops_reduction)args[5].data)->size * block->index);
  #else //OPS_MPI
  double *arg5h = (double *)(((ops_reduction)args[5].data)->data);
  #endif //OPS_MPI

  //set up initial pointers
  int d_m[OPS_MAX_DIM];
  #ifdef OPS_MPI
  for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
  #else //OPS_MPI
  for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
  #endif //OPS_MPI
  int base0 = dat0 * 1 * 
    (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
//.........这里部分代码省略.........
开发者ID:satyajammy,项目名称:OPS,代码行数:101,代码来源:calc_dt_kernel_get_openacc_kernel.cpp


示例11: ops_par_loop_update_halo_kernel2_zvel_plus_4_right

// host stub function
void ops_par_loop_update_halo_kernel2_zvel_plus_4_right(
    char const *name, ops_block block, int dim, int *range, ops_arg arg0,
    ops_arg arg1, ops_arg arg2) {

  // Timing
  double t1, t2, c1, c2;
  ops_arg args[3] = {arg0, arg1, arg2};

#ifdef CHECKPOINTING
  if (!ops_checkpointing_before(args, 3, range, 53))
    return;
#endif

  if (OPS_diags > 1) {
    ops_timing_realloc(53, "update_halo_kernel2_zvel_plus_4_right");
    OPS_kernels[53].count++;
    ops_timers_core(&c1, &t1);
  }

  // compute localy allocated range for the sub-block

  int start[3];
  int end[3];
#ifdef OPS_MPI
  sub_block_list sb = OPS_sub_block_list[block->index];
#endif // OPS_MPI

  int arg_idx[3];
  int arg_idx_base[3];
#ifdef OPS_MPI
  if (compute_ranges(args, 3, block, range, start, end, arg_idx) < 0)
    return;
#else // OPS_MPI
  for (int n = 0; n < 3; n++) {
    start[n] = range[2 * n];
    end[n] = range[2 * n + 1];
    arg_idx[n] = start[n];
  }
#endif
  for (int n = 0; n < 3; n++) {
    arg_idx_base[n] = arg_idx[n];
  }

  int dat0 = args[0].dat->elem_size;
  int dat1 = args[1].dat->elem_size;

  int *arg2h = (int *)arg2.data;
// Upload large globals
#ifdef OPS_GPU
  int consts_bytes = 0;
  consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
  reallocConstArrays(consts_bytes);
  consts_bytes = 0;
  args[2].data = OPS_consts_h + consts_bytes;
  args[2].data_d = OPS_consts_d + consts_bytes;
  for (int d = 0; d < NUM_FIELDS; d++)
    ((int *)args[2].data)[d] = arg2h[d];
  consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
  mvConstArraysToDevice(consts_bytes);
#endif // OPS_GPU

  // set up initial pointers
  int base0 = args[0].dat->base_offset +
              (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
                  start[0] * args[0].stencil->stride[0];
  base0 = base0 +
          (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
              args[0].dat->size[0] * start[1] * args[0].stencil->stride[1];
  base0 = base0 +
          (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
              args[0].dat->size[0] * args[0].dat->size[1] * start[2] *
              args[0].stencil->stride[2];
#ifdef OPS_GPU
  double *p_a0 = (double *)((char *)args[0].data_d + base0);
#else
  double *p_a0 = (double *)((char *)args[0].data + base0);
#endif

  int base1 = args[1].dat->base_offset +
              (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size) *
                  start[0] * args[1].stencil->stride[0];
  base1 = base1 +
          (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size) *
              args[1].dat->size[0] * start[1] * args[1].stencil->stride[1];
  base1 = base1 +
          (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size) *
              args[1].dat->size[0] * args[1].dat->size[1] * start[2] *
              args[1].stencil->stride[2];
#ifdef OPS_GPU
  double *p_a1 = (double *)((char *)args[1].data_d + base1);
#else
  double *p_a1 = (double *)((char *)args[1].data + base1);
#endif

#ifdef OPS_GPU
  int *p_a2 = (int *)args[2].data_d;
#else
  int *p_a2 = arg2h;
#endif
//.........这里部分代码省略.........
开发者ID:gihanmudalige,项目名称:OPS,代码行数:101,代码来源:update_halo_kernel2_zvel_plus_4_right_openacc_kernel.cpp


示例12: ops_par_loop_poisson_kernel_initialguess

// host stub function
void ops_par_loop_poisson_kernel_initialguess(char const *name, ops_block block,
                                              int dim, int *range,
                                              ops_arg arg0) {

  // Timing
  double t1, t2, c1, c2;
  ops_arg args[1] = {arg0};

#ifdef CHECKPOINTING
  if (!ops_checkpointing_before(args, 1, range, 2))
    return;
#endif

  if (OPS_diags > 1) {
    ops_timing_realloc(2, "poisson_kernel_initialguess");
    OPS_kernels[2].count++;
    ops_timers_core(&c1, &t1);
  }

  // compute localy allocated range for the sub-block

  int start[2];
  int end[2];
#ifdef OPS_MPI
  sub_block_list sb = OPS_sub_block_list[block->index];
#endif // OPS_MPI

  int arg_idx[2];
  int arg_idx_base[2];
#ifdef OPS_MPI
  if (compute_ranges(args, 1, block, range, start, end, arg_idx) < 0)
    return;
#else // OPS_MPI
  for (int n = 0; n < 2; n++) {
    start[n] = range[2 * n];
    end[n] = range[2 * n + 1];
    arg_idx[n] = start[n];
  }
#endif
  for (int n = 0; n < 2; n++) {
    arg_idx_base[n] = arg_idx[n];
  }

  int dat0 = args[0].dat->elem_size;

  // set up initial pointers
  int base0 = args[0].dat->base_offset +
              (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
                  start[0] * args[0].stencil->stride[0];
  base0 = base0 +
          (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size) *
              args[0].dat->size[0] * start[1] * args[0].stencil->stride[1];
#ifdef OPS_GPU
  double *p_a0 = (double *)((char *)args[0].data_d + base0);
#else
  double *p_a0 = (double *)((char *)args[0].data + base0);
#endif

  int x_size = MAX(0, end[0] - start[0]);
  int y_size = MAX(0, end[1] - start[1]);

  // initialize global variable with the dimension of dats
  xdim0 = args[0].dat->size[0];
  if (xdim0 != xdim0_poisson_kernel_initialguess_h) {
    xdim0_poisson_kernel_initialguess = xdim0;
    xdim0_poisson_kernel_initialguess_h = xdim0;
  }

// Halo Exchanges

#ifdef OPS_GPU
  ops_H_D_exchanges_device(args, 1);
#else
  ops_H_D_exchanges_host(args, 1);
#endif
  ops_halo_exchanges(args, 1, range);

#ifdef OPS_GPU
  ops_H_D_exchanges_device(args, 1);
#else
  ops_H_D_exchanges_host(args, 1);
#endif
  if (OPS_diags > 1) {
    ops_timers_core(&c2, &t2);
    OPS_kernels[2].mpi_time += t2 - t1;
  }

  poisson_kernel_initialguess_c_wrapper(p_a0, x_size, y_size);

  if (OPS_diags > 1) {
    ops_timers_core(&c1, &t1);
    OPS_kernels[2].time += t1 - t2;
  }
#ifdef OPS_GPU
  ops_set_dirtybit_device(args, 1);
#else
  ops_set_dirtybit_host(args, 1);
#endif
  ops_set_halo_dirtybit3(&args[0], range);
//.........这里部分代码省略.........
开发者ID:gihanmudalige,项目名称:OPS,代码行数:101,代码来源:poisson_kernel_initialguess_openacc_kernel.cpp


示例13: ops_par_loop_poisson_kernel_populate

// host stub function
void ops_par_loop_poisson_kernel_populate(char const *name, ops_block block, int dim, int* range,
 ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
 ops_arg arg4, ops_arg arg5) {

  //Timing
  double t1,t2,c1,c2;
  ops_timers_core(&c1,&t1);


  int  offs[6][2];
  ops_arg args[6] = { arg0, arg1, arg2, arg3, arg4, arg5};



  ops_timing_realloc(0,"poisson_kernel_populate");
  OPS_kernels[0].count++;

  //compute locally allocated range for the sub-block

  int start[2];
  int end[2];

  #ifdef OPS_MPI
  sub_block_list sb = OPS_sub_block_list[block->index];
  if (!sb->owned) return;
  for ( int n=0; n<2; n++ ){
    start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
    if (start[n] >= range[2*n]) {
      start[n] = 0;
    }
    else {
      start[n] = range[2*n] - start[n];
    }
    if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
    if (end[n] >= range[2*n+1]) {
      end[n] = range[2*n+1] - sb->decomp_disp[n];
    }
    else {
      end[n] = sb->decomp_size[n];
    }
    if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
      end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
  }
  #else //OPS_MPI
  for ( int n=0; n<2; n++ ){
    start[n] = range[2*n];end[n] = range[2*n+1];
  }
  #endif //OPS_MPI
  #ifdef OPS_DEBUG
  ops_register_args(args, "poisson_kernel_populate");
  #endif

  offs[3][0] = args[3].stencil->stride[0]*1;  //unit step in x dimension
  offs[3][1] = off2D(1, &start[0],
      &end[0],args[3].dat->size, args[3].stencil->stride) - offs[3][0];

  offs[4][0] = args[4].stencil->stride[0]*1;  //unit step in x dimension
  offs[4][1] = off2D(1, &start[0],
      &end[0],args[4].dat->size, args[4].stencil->stride) - offs[4][0];

  offs[5][0] = args[5].stencil->stride[0]*1;  //unit step in x dimension
  offs[5][1] = off2D(1, &start[0],
      &end[0],args[5].dat->size, args[5].stencil->stride) - offs[5][0];



  int off3_0 = offs[3][0];
  int off3_1 = offs[3][1];
  int dat3 = args[3].dat->elem_size;
  int off4_0 = offs[4][0];
  int off4_1 = offs[4][1];
  int dat4 = args[4].dat->elem_size;
  int off5_0 = offs[5][0];
  int off5_1 = offs[5][1];
  int dat5 = args[5].dat->elem_size;


  #ifdef _OPENMP
  int nthreads = omp_get_max_threads( );
  #else
  int nthreads = 1;
  #endif
  xdim3 = args[3].dat->size[0]*args[3].dat->dim;
  xdim4 = args[4].dat->size[0]*args[4].dat->dim;
  xdim5 = args[5].dat->size[0]*args[5].dat->dim;

  ops_H_D_exchanges_host(args, 6);

  //Halo Exchanges
  ops_halo_exchanges(args,6,range);


  ops_timers_core(&c2,&t2);
  OPS_kernels[0].mpi_time += t2-t1;


  #pragma omp parallel for
  for ( int thr=0; thr<nthreads; thr++ ){

//.........这里部分代码省略.........
开发者ID:s

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ opt函数代码示例发布时间:2022-05-31
下一篇:
C++ ops_timers_core函数代码示例发布时间:2022-05-31
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap