• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ MPI_Type_struct函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中MPI_Type_struct函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Type_struct函数的具体用法?C++ MPI_Type_struct怎么用?C++ MPI_Type_struct使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了MPI_Type_struct函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: DefineMPITypes

DefineMPITypes()
{
  Winspecs winspecs;
  Flags flags;
  rect rectangle;

  int len[3], disp[3];
  MPI_Datatype types[3];

  NUM_type = MPI_DOUBLE;

  MPI_Type_contiguous(6, MPI_INT, &winspecs_type);
  MPI_Type_commit(&winspecs_type);

  len[0] = 10;
  len[1] = 2;
  len[2] = 6;
  disp[0] = (int) ((char *) (&(flags.breakout)) - (char *) (&(flags)));
  disp[1] = (int) ((char *) (&(flags.boundary_sq)) - (char *) (&(flags)));
  disp[2] = (int) ((char *) (&(flags.rmin)) - (char *) (&(flags)));
  types[0] = MPI_INT;
  types[1] = MPI_DOUBLE;
  types[2] = NUM_type;
  MPI_Type_struct(3, len, disp, types, &flags_type);
  MPI_Type_commit(&flags_type);

  len[0] = 5;
  disp[0] = (int) ((char *) (&(rectangle.l)) - (char *) (&(rectangle)));
  types[0] = MPI_INT;
  MPI_Type_struct(1, len, disp, types, &rect_type);
  MPI_Type_commit(&rect_type);

  return 0;
}
开发者ID:MartinLidh,项目名称:tddc78,代码行数:34,代码来源:pm_genproc_cleanedup.c


示例2: main

int main(int argc, char *argv[])
{
    MPI_Datatype mystruct, vecs[3];
    MPI_Aint stride = 5, displs[3];
    int i=0, blockcount[3];
    int errs=0;

    MTest_Init( &argc, &argv );

    for(i = 0; i < 3; i++)
    {
        MPI_Type_hvector(i, 1, stride, MPI_INT, &vecs[i]);
        MPI_Type_commit(&vecs[i]);
        blockcount[i]=1;
    }
    displs[0]=0; displs[1]=-100; displs[2]=-200; /* irrelevant */

    MPI_Type_struct(3, blockcount, displs, vecs, &mystruct);
    MPI_Type_commit(&mystruct);

    MPI_Type_free(&mystruct);
    for(i = 0; i < 3; i++)
    {
        MPI_Type_free(&vecs[i]);
    }

    /* this time with the first argument always 0 */
    for(i = 0; i < 3; i++)
    {
        MPI_Type_hvector(0, 1, stride, MPI_INT, &vecs[i]);
        MPI_Type_commit(&vecs[i]);
        blockcount[i]=1;
    }
    displs[0]=0; displs[1]=-100; displs[2]=-200; /* irrelevant */

    MPI_Type_struct(3, blockcount, displs, vecs, &mystruct);
    MPI_Type_commit(&mystruct);

    MPI_Type_free(&mystruct);
    for(i = 0; i < 3; i++)
    {
        MPI_Type_free(&vecs[i]);
    }

    MTest_Finalize( errs );
    MPI_Finalize();

    return 0;
}
开发者ID:OngOngoing,项目名称:219351_homework,代码行数:49,代码来源:struct-derived-zeros.c


示例3: MPI_Address

 void peano::applications::poisson::jacobitutorial::records::RegularGridCell::initDatatype() {
    const int Attributes = 1;
    MPI_Datatype subtypes[Attributes] = {
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    RegularGridCell dummyRegularGridCell[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridCell[0]))), &base);
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegularGridCell::Datatype );
    MPI_Type_commit( &RegularGridCell::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:25,代码来源:RegularGridCell.cpp


示例4: offsetof

void BBLSGraph::createDatatypes() {
	// BBLSNode struct
	int block_lengths[5];
	block_lengths[0] = 1;
	block_lengths[1] = 1;
	block_lengths[2] = 1;
	block_lengths[3] = 1;
	block_lengths[4] = 1;
	
	MPI_Aint displacements[5];
	displacements[0] = offsetof(BBLSNode, type);
	displacements[1] = offsetof(BBLSNode, output);
	displacements[2] = offsetof(BBLSNode, inputLeft);
	displacements[3] = offsetof(BBLSNode, inputRight);
	displacements[4] = sizeof(BBLSNode);
	
	MPI_Datatype types[5];
	types[0] = MPI_INT;
	types[1] = MPI_UNSIGNED;
	types[2] = MPI_UNSIGNED;
	types[3] = MPI_UNSIGNED;
	types[4] = MPI_UB;
	
	MPI_Type_struct(5, block_lengths, displacements, types, &mpi_nodeType);
	MPI_Type_commit(&mpi_nodeType);
	
	// 3 BBLSNodes
	MPI_Type_contiguous(3, mpi_nodeType, &mpi_threeNodes);
	MPI_Type_commit(&mpi_threeNodes);
}
开发者ID:Jnesselr,项目名称:BBLS,代码行数:30,代码来源:BBLSGraph.cpp


示例5: FC_FUNC

FC_FUNC( mpi_type_struct, MPI_TYPE_STRUCT )
         (int * count,       int * blocklens, long * displacements,
          int *oldtypes_ptr, int *newtype,    int *ierror)
{
  *ierror=MPI_Type_struct(*count, blocklens, displacements, 
                                    oldtypes_ptr, newtype);
}
开发者ID:mfvalin,项目名称:serial-mpi,代码行数:7,代码来源:type.c


示例6: MPI_Address

 void peano::applications::navierstokes::prototype1::repositories::PrototypeRepositoryStatePacked::initDatatype() {
    const int Attributes = 3;
    MPI_Datatype subtypes[Attributes] = {
       MPI_INT,		 //action
       MPI_CHAR,		 //reduceState
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //action
       1,		 //reduceState
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    PrototypeRepositoryStatePacked dummyPrototypeRepositoryStatePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[0]._persistentRecords._action))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[0]._persistentRecords._reduceState))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[1]._persistentRecords._action))), 		&disp[2] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &PrototypeRepositoryStatePacked::Datatype );
    MPI_Type_commit( &PrototypeRepositoryStatePacked::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:33,代码来源:PrototypeRepositoryState.cpp


示例7: MPI_Address

 void peano::kernel::regulargrid::tests::records::TestCell::initDatatype() {
    const int Attributes = 1;
    MPI_Datatype subtypes[Attributes] = {
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    TestCell dummyTestCell[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyTestCell[0]))), &base);
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &TestCell::Datatype );
    MPI_Type_commit( &TestCell::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:25,代码来源:TestCell.cpp


示例8: main

int main( int argc, char *argv[] )
{
    int    rank, size;
    double dbuff = 0x0;

    MPI_Init( &argc, &argv );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    MPI_Comm_size( MPI_COMM_WORLD, &size );

    if ( rank != size-1 ) {
        /* create pathological case */
        MPI_Datatype types[2] = { MPI_INT, MPI_FLOAT };
        int          blks[2]  = { 1, 1};
        MPI_Aint     displs[2] = {0, sizeof(float) };
        MPI_Datatype flt_int_type;
        MPI_Type_struct( 2, blks, displs, types, &flt_int_type );
        MPI_Type_commit( &flt_int_type );
        MPI_Bcast( &dbuff, 1, flt_int_type, 0, MPI_COMM_WORLD );
        MPI_Type_free( &flt_int_type );
    }
    else
        MPI_Bcast( &dbuff, 1, MPI_FLOAT_INT, 0, MPI_COMM_WORLD );

    MPI_Finalize();
    return 0;
}
开发者ID:BoiseState,项目名称:CS430-resources,代码行数:26,代码来源:wrong_composite.c


示例9: MPI_Address

 void peano::applications::latticeboltzmann::blocklatticeboltzmann::forcerecords::BlockPositionPacked::initDatatype() {
    const int Attributes = 2;
    MPI_Datatype subtypes[Attributes] = {
       MPI_DOUBLE,		 //_blockPosition
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       DIMENSIONS,		 //_blockPosition
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    BlockPositionPacked dummyBlockPositionPacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockPositionPacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockPositionPacked[0]._persistentRecords._blockPosition[0]))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&dummyBlockPositionPacked[1]._persistentRecords._blockPosition[0])), 		&disp[1] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &BlockPositionPacked::Datatype );
    MPI_Type_commit( &BlockPositionPacked::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:30,代码来源:BlockPosition.cpp


示例10: Build_type

void Build_type( float* a, float* b, float* n, MPI_Datatype* point_t ) {

    int block_lengths[3];
    MPI_Aint displacements[3];
    MPI_Datatype typelist[3];
    MPI_Aint start_address;
    MPI_Aint address;

    block_lengths[0] = block_lengths[1] = block_lengths[2] = 1;
    typelist[0] = MPI_FLOAT;
    typelist[1] = MPI_FLOAT;
    typelist[2] = MPI_INT;

    displacements[0] = 0;
    MPI_Address(a, &start_address);
    MPI_Address(b, &address);
    displacements[1] = address - start_address;
    
    MPI_Address(n, &address);
    displacements[2] = address - start_address;

    MPI_Type_struct(3, block_lengths, displacements, typelist, point_t);
    MPI_Type_commit(point_t);

}
开发者ID:ajdecon,项目名称:play,代码行数:25,代码来源:derived.c


示例11: MPI_Address

 void peano::applications::latticeboltzmann::blocklatticeboltzmann::repositories::BlockLatticeBoltzmannBatchJobRepositoryState::initDatatype() {
    const int Attributes = 3;
    MPI_Datatype subtypes[Attributes] = {
       MPI_INT,		 //action
       MPI_CHAR,		 //reduceState
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //action
       1,		 //reduceState
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    BlockLatticeBoltzmannBatchJobRepositoryState dummyBlockLatticeBoltzmannBatchJobRepositoryState[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[0]._persistentRecords._action))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[0]._persistentRecords._reduceState))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[1]._persistentRecords._action))), 		&disp[2] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &BlockLatticeBoltzmannBatchJobRepositoryState::Datatype );
    MPI_Type_commit( &BlockLatticeBoltzmannBatchJobRepositoryState::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:33,代码来源:BlockLatticeBoltzmannBatchJobRepositoryState.cpp


示例12: MPI_Address

 void peano::integration::partitioncoupling::builtin::records::ForceTorquePacked::initDatatype() {
    const int Attributes = 3;
    MPI_Datatype subtypes[Attributes] = {
       MPI_DOUBLE,		 //_translationalForce
       MPI_DOUBLE,		 //_torque
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       3,		 //_translationalForce
       3,		 //_torque
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    ForceTorquePacked dummyForceTorquePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyForceTorquePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyForceTorquePacked[0]._persistentRecords._translationalForce[0]))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyForceTorquePacked[0]._persistentRecords._torque[0]))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&dummyForceTorquePacked[1]._persistentRecords._translationalForce[0])), 		&disp[2] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &ForceTorquePacked::Datatype );
    MPI_Type_commit( &ForceTorquePacked::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:33,代码来源:ForceTorque.cpp


示例13: rnemd_init

void rnemd_init(struct beads *b)
{
	inuse = 1;

	if (N % 2 == 1) 
		fatal(EINVAL, "rnemd: N must be even");
	if (dd < 0 || dd >= 3 || dg < 0 || dg >= 3)
		fatal(EINVAL, "rnemd: gradient / velocity invalid");

	printf("rnemd: slabs=%ld swaps=%ld gradient=%ld velocity=%ld\n",
								N, sw, dg, dd);
	assert(N && sw);
	int blocklens[2] = {1, 2};
	struct rnemd_list q;
	ptrdiff_t indices[2] = {(ptrdiff_t)&q.v - (ptrdiff_t)&q,
		(ptrdiff_t)&q.pos - (ptrdiff_t)&q};
	MPI_Datatype old_types[2] = {MPI_DOUBLE, MPI_INT};
	MPI_Type_struct(ARRAY_SIZE(blocklens), blocklens, indices, old_types, &rnemd_type);
	MPI_Type_commit(&rnemd_type);
	
	MPI_Comm_rank(comm_grid, &rank);
	MPI_Comm_size(comm_grid, &size);
				
	max = MAX(0x10000, 2 * sw * size);
	list = calloc(max, sizeof(*list));
	if (list == NULL) novm("rnemd: list");

	t0 = b->time;
}
开发者ID:sabeiro,项目名称:Allink,代码行数:29,代码来源:rnemd.c


示例14: Build_derived_type

void Build_derived_type(border* indata, MPI_Datatype* message_type_ptr){
  int block_lengths[3];

  MPI_Aint displacements[3];
  MPI_Aint addresses[4];
  MPI_Datatype typelist[3];

  /* Создает производный тип данных, содержащий три int */

  /* Сначала нужно определить типы элементов */

  typelist[0]=MPI_INT;
  typelist[1]=MPI_INT; 
  typelist[2]=MPI_INT;

 

  /* Определить количество элементов каждого типа */
  block_lengths[0]=block_lengths[1]=block_lengths[2] = 1;
  
  /* Вычислить смещения элементов * относительно indata */
  MPI_Address(indata, &addresses[0]);
  MPI_Address(&(indata->left), &addresses[1]);
  MPI_Address(&(indata->right), &addresses[2]);
  MPI_Address(&(indata->length), &addresses[3]);

  displacements[0]=addresses[1]-addresses[0];
  displacements[1]=addresses[2]-addresses[0];
  displacements[2]=addresses[3]-addresses[0];
  
  /* Создать производный тип */
  MPI_Type_struct(3, block_lengths, displacements,typelist, message_type_ptr);
  /* Зарегистрировать его для использования */
  MPI_Type_commit(message_type_ptr);
} /* Build_derived_type */
开发者ID:stardreamer,项目名称:ParallelLab,代码行数:35,代码来源:build_derived_type.c


示例15: mytype_commit

void mytype_commit(struct mystruct value){

  MPI_Aint indices[3];
  int blocklens[3];
  MPI_Datatype old_types[3];

  old_types[0] = MPI_CHAR;
  old_types[1] = MPI_INT;
  old_types[2] = MPI_DOUBLE;

  blocklens[0] = 1;
  blocklens[1] = 3;
  blocklens[2] = 5;

  MPI_Address(&value.ch, &indices[0]);
  MPI_Address(&value.a, &indices[1]);
  MPI_Address(&value.x, &indices[2]);

  indices[2] = indices[2] - indices[0];
  indices[1] = indices[1] - indices[0];
  indices[0] = 0;

  MPI_Type_struct(3,blocklens,indices,old_types,&mpistruct);

  MPI_Type_commit(&mpistruct);
}
开发者ID:akihiko-fujii,项目名称:mpi1,代码行数:26,代码来源:b.c


示例16: MPI_Address

 void peano::applications::faxen::repositories::FaxenBatchJobRepositoryStatePacked::initDatatype() {
    const int Attributes = 2;
    MPI_Datatype subtypes[Attributes] = {
       MPI_INT,		 //action
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //action
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    FaxenBatchJobRepositoryStatePacked dummyFaxenBatchJobRepositoryStatePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyFaxenBatchJobRepositoryStatePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyFaxenBatchJobRepositoryStatePacked[0]._persistentRecords._action))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyFaxenBatchJobRepositoryStatePacked[1]._persistentRecords._action))), 		&disp[1] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &FaxenBatchJobRepositoryStatePacked::Datatype );
    MPI_Type_commit( &FaxenBatchJobRepositoryStatePacked::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:30,代码来源:FaxenBatchJobRepositoryState.cpp


示例17: InitializeMPIStuff

static void 
InitializeMPIStuff(void)
{
    const int n = 5;
    int          lengths[n]       = {1, 1, 1, 1, 1};
    MPI_Aint     displacements[n] = {0, 0, 0, 0, 0};
    MPI_Datatype types[n] = {MPI_FLOAT,
                             MPI_UNSIGNED_CHAR,
                             MPI_UNSIGNED_CHAR,
                             MPI_UNSIGNED_CHAR,
                             MPI_UNSIGNED_CHAR};

    // create the MPI data type for Pixel
    Pixel onePixel;
    MPI_Address(&onePixel.z, &displacements[0]);
    MPI_Address(&onePixel.r, &displacements[1]);
    MPI_Address(&onePixel.g, &displacements[2]);
    MPI_Address(&onePixel.b, &displacements[3]);
    MPI_Address(&onePixel.a, &displacements[4]);
    for (int i = n-1; i >= 0; i--)
        displacements[i] -= displacements[0];
    MPI_Type_struct(n, lengths, displacements, types,
                    &mpiTypePixel);
    MPI_Type_commit(&mpiTypePixel);

    // and the merge operation for a reduction
    MPI_Op_create((MPI_User_function *)MergePixelBuffersOp, 1,
                  &mpiOpMergePixelBuffers);
}
开发者ID:mclarsen,项目名称:EAVL,代码行数:29,代码来源:eavlCompositor.cpp


示例18: MPI_Address

 void tarch::parallel::messages::RegisterAtNodePoolMessagePacked::initDatatype() {
    const int Attributes = 2;
    MPI_Datatype subtypes[Attributes] = {
       MPI_SHORT,		 //nodeName
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       MPI_MAX_NAME_STRING_ADDED_ONE,		 //nodeName
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    RegisterAtNodePoolMessagePacked dummyRegisterAtNodePoolMessagePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegisterAtNodePoolMessagePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegisterAtNodePoolMessagePacked[0]._persistentRecords._nodeName[0]))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&dummyRegisterAtNodePoolMessagePacked[1]._persistentRecords._nodeName[0])), 		&disp[1] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegisterAtNodePoolMessagePacked::Datatype );
    MPI_Type_commit( &RegisterAtNodePoolMessagePacked::Datatype );
    
 }
开发者ID:p-hoffmann,项目名称:madpac,代码行数:30,代码来源:RegisterAtNodePoolMessage.cpp


示例19: make_fishtype

void
make_fishtype (MPI_Datatype* fishtype)
{
  int err, i;

  /* QQQ: How does the data type affect performance? */
#if 1
  MPI_Aint disp[8];
  MPI_Datatype types[8] = { MPI_LB, MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE,
			    MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE, MPI_UB };
  int blocklen[8] = { 1, 1, 1, 1, 1, 1, 1, 1 };
  fish_t example[2];

  MPI_Address (&example[0], &disp[0]);
  MPI_Address (&example[0].x, &disp[1]);
  MPI_Address (&example[0].y, &disp[2]);
  MPI_Address (&example[0].vx, &disp[3]);
  MPI_Address (&example[0].vy, &disp[4]);
  MPI_Address (&example[0].ax, &disp[5]);
  MPI_Address (&example[0].ay, &disp[6]);
  MPI_Address (&example[1], &disp[7]);
  for (i = 7; i >= 0; --i) disp[i] -= disp[0];

  err = MPI_Type_struct (8, &blocklen[0], &disp[0], &types[0], fishtype);
#elif 0
  MPI_Aint disp[2];
  MPI_Aint base;
  MPI_Datatype types[2] = { MPI_DOUBLE, MPI_UB };
  int blocklen[2] = { 6, 1 };
  fish_t example[2];

  MPI_Address (&example[0], &base);
  MPI_Address (&example[0].x, &disp[0]);
  MPI_Address (&example[1], &disp[1]);
  disp[0] -= base;
  disp[1] -= base;
  err = MPI_Type_struct (2, blocklen, disp, types, fishtype);
#else
  err = MPI_Type_contiguous (6, MPI_DOUBLE, fishtype);
#endif

  if (err) {
    fprintf (stderr, "Error creating type: %d\n", err);
    MPI_Abort (MPI_COMM_WORLD, -29);
  }
  MPI_Type_commit (fishtype);
}
开发者ID:blickly,项目名称:ptii,代码行数:47,代码来源:fish.c


示例20: main

int main (int argc, char *argv[]){

	int   i,
		  numtasks, rank;
	int tag=1;
	float a[16] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0};
	float b[SIZE];
	int blockcounts[2] ={4, 2};
	MPI_Datatype oldtypes[2] = {MPI_FLOAT, MPI_INT};
	int offsets[2];
	MPI_Aint extent;
	MPI_Status status;
	MPI_Datatype particletype;
	Particle particles[NELEMENTS], p[NELEMENTS];

	MPI_Init(&argc,&argv);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank); 
	MPI_Comm_size(MPI_COMM_WORLD, &numtasks);


	MPI_Type_extent(MPI_FLOAT,&extent);
	offsets[0]=0;
	offsets[1]=4*extent;
	MPI_Type_struct(2,blockcounts,offsets,oldtypes,&particletype);
	MPI_Type_commit(&particletype);



	if (rank == RANK_MASTER) {

		for(i=0;i<NELEMENTS;i++){

			particles[i].x=1;
     		particles[i].y=2;
     		particles[i].z=3;
     		particles[i].velocity=4;
     		particles[i].n=5;
     		particles[i].type=6;

		}


		for (i=0; i<numtasks; i++)
		MPI_Send(particles,NELEMENTS, particletype, i, tag, MPI_COMM_WORLD);
	}

	MPI_Recv(p, NELEMENTS, particletype, 0, tag, MPI_COMM_WORLD, &status);

	for(i=0;i<NELEMENTS;i++)
		printf("RANK #%d: %.1f %.1f %.1f %.1f %d %d\n", rank,p[i].x,
     p[i].y,p[i].z,p[i].velocity,p[i].n,p[i].type);


	MPI_Type_free(&particletype);

	MPI_Finalize();
	return 0;

}  /* end of main */
开发者ID:bertuccio,项目名称:ASP,代码行数:59,代码来源:structType.c



注:本文中的MPI_Type_struct函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ MPI_Type_vector函数代码示例发布时间:2022-05-30
下一篇:
C++ MPI_Type_size_x函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap