• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.recarray函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.recarray函数的典型用法代码示例。如果您正苦于以下问题:Python recarray函数的具体用法?Python recarray怎么用?Python recarray使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了recarray函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _get_subheaders

 def _get_subheaders(self):
     """retreive all subheaders and return list of subheader recarrays
     """
     subheaders = []
     header = self._header
     endianness = self.endianness
     dt = self._subhdrdtype
     if not self.endianness is native_code:
         dt = self._subhdrdtype.newbyteorder(self.endianness)
     if self._header['num_frames'] > 1:
         for item in self._mlist._mlist:
             if item[1] == 0:
                 break
             self.fileobj.seek(0)
             offset = (int(item[1])-1)*512
             self.fileobj.seek(offset)
             tmpdat = self.fileobj.read(512)
             sh = (np.recarray(shape=(), dtype=dt,
                               buf=tmpdat))
             subheaders.append(sh.copy())
     else:
         self.fileobj.seek(0)
         offset = (int(self._mlist._mlist[0][1])-1)*512
         self.fileobj.seek(offset)
         tmpdat = self.fileobj.read(512)
         sh = (np.recarray(shape=(), dtype=dt,
                           buf=tmpdat))
         subheaders.append(sh)
     return subheaders
开发者ID:GaelVaroquaux,项目名称:nibabel,代码行数:29,代码来源:ecat.py


示例2: test_usecase1

    def test_usecase1(self):
        pyfunc = usecase1

        # This is an unaligned dtype
        mystruct_dt = numpy.dtype([('p', numpy.float64),
                           ('row', numpy.float64),
                           ('col', numpy.float64)])
        mystruct = numpy_support.from_dtype(mystruct_dt)

        cres = compile_isolated(pyfunc, (mystruct[:], mystruct[:]))
        cfunc = cres.entry_point

        st1 = numpy.recarray(3, dtype=mystruct_dt)
        st2 = numpy.recarray(3, dtype=mystruct_dt)

        st1.p = numpy.arange(st1.size) + 1
        st1.row = numpy.arange(st1.size) + 1
        st1.col = numpy.arange(st1.size) + 1

        st2.p = numpy.arange(st2.size) + 1
        st2.row = numpy.arange(st2.size) + 1
        st2.col = numpy.arange(st2.size) + 1

        expect1 = st1.copy()
        expect2 = st2.copy()

        got1 = expect1.copy()
        got2 = expect2.copy()

        pyfunc(expect1, expect2)
        cfunc(got1, got2)

        self.assertTrue(numpy.all(expect1 == got1))
        self.assertTrue(numpy.all(expect2 == got2))
开发者ID:EGQM,项目名称:numba,代码行数:34,代码来源:test_recarray_usecases.py


示例3: __init__

    def __init__(self, analyzer):
        n = analyzer.frames_in_flight

        fw, fh = analyzer.camera.frame_size
        c = analyzer.camera.channels

        rw, rh = analyzer.rectifier.image_size

        self.frames = numpy.recarray(n, [
            ("index", "u4"),
            ("timestamp", "f8"),
            ("image", "u1", (fh, fw, c)),
            ("image_f", "f4", (fh, fw, c)),
            ("table", analyzer.table_tracker.dtype),
            ("rectification", "f4", (rh, rw, c)),
            ("background", analyzer.background_analyzer.dtype),
            ("team_foosmen", analyzer.team_foosmen_analyzer.dtype, len(analyzer.table.teams)),
            ("ball", analyzer.ball_analyzer.dtype),
            ("rod", [
                ("%s_%s" % (rod.type.name, rod.team.name), analyzer.rod_analyzer[i].dtype) for i, rod in enumerate(analyzer.table.rods)
            ])
        ])

        self.background = numpy.recarray((), [
            ("color_mean", "f4", (rh, rw, c)),
            ("variance", "f8", (c, c)),
            ("q_estimation", "f4", (rh, rw))
        ])

        self.team_foosmen = numpy.recarray(len(analyzer.table.teams), [
            ("color_mean", "f4", (rh, rw, c)),
            ("variance", "f8", (c, c)),
        ])
开发者ID:subotto,项目名称:subtracker,代码行数:33,代码来源:analysis.py


示例4: test_structured_arrays

    def test_structured_arrays(self):
        def check(arr, dtype, ndim, layout, aligned):
            ty = typeof(arr)
            self.assertIsInstance(ty, types.Array)
            self.assertEqual(ty.dtype, dtype)
            self.assertEqual(ty.ndim, ndim)
            self.assertEqual(ty.layout, layout)
            self.assertEqual(ty.aligned, aligned)

        dtype = np.dtype([('m', np.int32), ('n', 'S5')])
        rec_ty = numpy_support.from_struct_dtype(dtype)

        arr = np.empty(4, dtype=dtype)
        check(arr, rec_ty, 1, "C", False)
        arr = np.recarray(4, dtype=dtype)
        check(arr, rec_ty, 1, "C", False)

        dtype = np.dtype([('m', np.int32), ('n', 'S5')], align=True)
        rec_ty = numpy_support.from_struct_dtype(dtype)

        # On Numpy 1.6, align=True doesn't align the itemsize
        actual_aligned = numpy_support.version >= (1, 7)

        arr = np.empty(4, dtype=dtype)
        check(arr, rec_ty, 1, "C", actual_aligned)
        arr = np.recarray(4, dtype=dtype)
        check(arr, rec_ty, 1, "C", actual_aligned)
开发者ID:GaZ3ll3,项目名称:numba,代码行数:27,代码来源:test_typeof.py


示例5: addfield

def addfield(mrecord, newfield, newfieldname=None):
    """Adds a new field to the masked record array, using `newfield` as data
and `newfieldname` as name. If `newfieldname` is None, the new field name is
set to 'fi', where `i` is the number of existing fields.
    """
    _data = mrecord._data
    _mask = mrecord._mask
    if newfieldname is None or newfieldname in reserved_fields:
        newfieldname = 'f%i' % len(_data.dtype)
    newfield = ma.array(newfield)
    # Get the new data ............
    # Create a new empty recarray
    newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)])
    newdata = recarray(_data.shape, newdtype)
    # Add the exisintg field
    [newdata.setfield(_data.getfield(*f), *f)
         for f in _data.dtype.fields.values()]
    # Add the new field
    newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname])
    newdata = newdata.view(MaskedRecords)
    # Get the new mask .............
    # Create a new empty recarray
    newmdtype = np.dtype([(n, bool_) for n in newdtype.names])
    newmask = recarray(_data.shape, newmdtype)
    # Add the old masks
    [newmask.setfield(_mask.getfield(*f), *f)
         for f in _mask.dtype.fields.values()]
    # Add the mask of the new field
    newmask.setfield(getmaskarray(newfield),
                     *newmask.dtype.fields[newfieldname])
    newdata._mask = newmask
    return newdata
开发者ID:arthornsby,项目名称:numpy,代码行数:32,代码来源:mrecords.py


示例6: test_multiple_args_records

    def test_multiple_args_records(self): 
        pyfunc = foobar

        mystruct_dt = np.dtype([('p', np.float64),
                           ('row', np.float64),
                           ('col', np.float64)])
        mystruct = numpy_support.from_dtype(mystruct_dt)

        cres = compile_isolated(pyfunc, [mystruct[:], types.uint64, types.uint64],
                return_type=mystruct[:])
        cfunc = cres.entry_point

        st1 = np.recarray(3, dtype=mystruct_dt)
        st2 = np.recarray(3, dtype=mystruct_dt)

        st1.p = np.arange(st1.size) + 1
        st1.row = np.arange(st1.size) + 1
        st1.col = np.arange(st1.size) + 1

        st2.p = np.arange(st2.size) + 1
        st2.row = np.arange(st2.size) + 1
        st2.col = np.arange(st2.size) + 1

        test_fail_args = ((st1, -1, st2), (st1, st2, -1))
        
        # TypeError is for 2.6
        if sys.version_info >= (2, 7):
            with self.assertRaises(OverflowError):
                for a, b, c in test_fail_args:
                    cfunc(a, b, c) 
        else:
            with self.assertRaises(TypeError):
                for a, b, c in test_fail_args:
                    cfunc(a, b, c) 
开发者ID:genba,项目名称:numba,代码行数:34,代码来源:test_conversion.py


示例7: events

 def events(self):
     '''return a rec array with all events of this channel'''
     if self.__events == None:           
         #the event data from where to extract
         data  = self.__eventdata._data
                     
         #select all events for this cluster
         eventmask = data['chid'] == self.__index
         
         # if there are no events return empty recarray
         if len(data) == 0:
             self.__events = numpy.recarray(shape = (0),dtype = [('t', '<f8'), ('states', self.__model.state_type())])
             return self.__events
             
         #select first and last frame
         eventmask[0]  = True
         eventmask[-1] = True
         
         #create recarray that stores the events of this channel
         # this works for DYK
         # self.__events = numpy.recarray(shape = (eventmask.sum()),dtype = [('t', '<f8'), ('states', '|i1', 8)])
         self.__events = numpy.recarray(shape = (eventmask.sum()),dtype = [('t', '<f8'), ('states', self.__model.state_type())])
         
         # copy time chid and subspace of state column to new recarray
         self.__events['t']       = data[eventmask]['t']
         # this works for DYK
         #self.__events['states']  = data[eventmask]['states'][:,self.__index,:]
         self.__events['states']  = data[eventmask]['states'][:,self.clusterindex(),self.__index]
         
     return self.__events
开发者ID:aolsux,项目名称:calcdyn-python,代码行数:30,代码来源:channel.py


示例8: Xi2_line_ratios

def Xi2_line_ratios(obs_ratios, arxvPDR):
    '''Computes the Xi2 statistic given the observed lines and a PDR arxv.'''
    
    
    allData = numpy.recarray([],[('x', 'f8'),('y', 'f8'),('z', 'f8'),('t', 'f8'),('v', 'f8'),])
    
    models = {} 
    
    specStrs, codes = obs_ratios.species_and_codes()

    #collecting all the line intensities of the ratios involved in the observations (obs_ratio)
    #from the model database. Proccessing one Av at a time...
    for i, AvStr in enumerate(arxvPDR.radexDbs):
                
        Av = numpy.float64(AvStr)

        #array which will hold the grid points and the values for this Av
        data = numpy.recarray((arxvPDR.nMeshes), allData.dtype.descr)

        #getting the emissions for each line from the PDR database for all the models for the current Av
        for code in codes:
            models[code] = 10.0**arxvPDR.get_emissions_from_databases(line={'type':'radex-lvg', 'code':code}, Av_use=Av)

        #defining the array which will hold the Xi2 for all the models for this Av
        Xi2 = numpy.zeros(arxvPDR.nMeshes, 'f8')
        
        #compute the Xi2
        for obs_ratio in obs_ratios:
            
            #the line codes invloved in this ratio
            code1, code2 = codes_from_ratio(obs_ratio)
            
            #the ratios for all the models at this Av for this particular line ratio
            model_ratio = models[code1] / models[code2] 
            
            #computing the Xi2
            f_o = obs_ratios[obs_ratio]['v']
            f_e = obs_ratios[obs_ratio]['e']
            f_m = model_ratio
            
            Xi2 += ((f_m - f_o)/f_e)**2.0
        #
        
        data.x = arxvPDR.grid_x
        data.y = arxvPDR.grid_y
        data.z = arxvPDR.grid_z
        data.t = Av
        data.v = Xi2

        allData = numpy.hstack((allData, data) )

    #removing the first entry (redundant ;;; .. todo:: fix this [low priority])
    allData = allData[1::]
    
    #filtering out the points which have Nans 
    inds_not_nan = numpy.where( numpy.isfinite(allData['v']) )

    return allData[inds_not_nan]
        
    return allData[1::]
开发者ID:mherkazandjian,项目名称:ismcpak,代码行数:60,代码来源:line_ratio_utils.py


示例9: events

    def events(self):
        if isinstance(self.__events, types.NoneType):
            # the indices of the channels within this cluster
            cidx  = [channel.index() for channel in self]
            #print cidx
            # the eventdata from where to extract
            data  = self.__eventdata._data

            #select all events for this cluster
            eventmask = data['clid'] == self.__index
            #select first and last frame
            eventmask[0]  = True
            eventmask[-1] = True

            #create recarray that stores the events of this cluster
            if self.__model == dyk:                
                self.__events = numpy.recarray(shape = (eventmask.sum()),dtype = [('t', '<f8'), ('noch', '<i2'), ('chid', int), ('states', '|i1', (len(cidx), 8))])
            elif self.__model == deterministic:
                self.__events = numpy.recarray(shape = (eventmask.sum()),dtype = [('t', '<f8'), ('noch', '<i2'), ('chid', int), ('states', bool, (len(cidx),))])

            # copy time chid and subspace of state column to new recarray
            self.__events['t']       = data[eventmask]['t']
            self.__events['chid']    = data[eventmask]['chid']
            self.__events['states']  = data[eventmask]['states'][:,self.__index,...]
            
            # cache the number of open channels         
            model =  self.__eventdata.model()
            self.__events['noch'] = model.open(self.__events).sum(-1)
            
        return self.__events
开发者ID:aolsux,项目名称:calcdyn-python,代码行数:30,代码来源:cluster.py


示例10: selectOnSharpeRatio

    def selectOnSharpeRatio(self, ls_symbols, top_n_equities=10):
        ''' Choose the best portfolio over the stock universe,
        according to their sharpe ratio'''
        #TODO: change this to a DataAccess utilitie --------------
        symbols, files = getAllFromCSV()
        datalength = len(recfromcsv(files[0])['close'])
        print('Datalength: {}'.format(datalength))
        #---------------------------------------------------------
        #Initiaing data arrays
        closes = np.recarray((datalength,), dtype=[(symbol, 'float') for symbol in symbols])
        daily_ret = np.recarray((datalength - 1,), dtype=[(symbol, 'float') for symbol in symbols])
        average_returns = np.zeros(len(files))
        return_stdev = np.zeros(len(files))
        sharpe_ratios = np.zeros(len(files))
        cumulative_returns = np.recarray((datalength-1,), dtype=[(symbol, 'float') for symbol in symbols])

        # Here is the meat
        #TODO: data = dataobj.getData(ls_symbols)
        for i, symbol in enumerate(ls_symbols):
            if len(data) != datalength:
                continue
            print('Processing {} file'.format(file))
            closes[symbols[i]] = data['close'][::-1]
            daily_ret[symbols[i]] = dailyReturns()
            # We now can compute:
            average_returns[i] = daily_ret[symbols[i]].mean()
            return_stdev[i] = daily_ret[symbols[i]].stdev()
            sharpe_ratios[i] = (average_returns[i] / return_stdev[i]) * np.sqrt(datalength)   # compare to course
            print('\tavg: {}, stdev: {}, sharpe ratio: {}'.format(average_returns[i], return_stdev[i], sharpe_ratios[i]))

        sorted_sharpe_indices = np.argsort(sharpe_ratios)[::-1][0:top_n_equities]
        #TODO: return a disct as {symbol: sharpe_ratio}, or a df with all 3 components
        return sorted_sharpe_indices
开发者ID:Mark1988huang,项目名称:ppQuanTrade,代码行数:33,代码来源:portfolio.py


示例11: test_add_data_then_read

 def test_add_data_then_read(self):
     data0 = np.recarray((1,), dtype=[("f0", "<f8"), ("f1", "<f8"), ("f2", "<f8")])
     data0[0] = (1, 2, 3)
     data1 = np.recarray((1,), dtype=[("f0", "<f8"), ("f1", "<f8"), ("f2", "<f8")])
     data1[0] = (4, 5, 6)
     self.data.addData(data0)
     self.data.addData(data1)
     self.assert_data_in_backend(self.data, [[1, 2, 3], [4, 5, 6]])
开发者ID:amopremcak,项目名称:servers,代码行数:8,代码来源:test_backend.py


示例12: create_neurohdf_file

def create_neurohdf_file(filename, data):

    with closing(h5py.File(filename, 'w')) as hfile:
        hfile.attrs['neurohdf_version'] = '0.1'
        mcgroup = hfile.create_group("Microcircuit")
        mcgroup.attrs['node_type'] = 'irregular_dataset'
        vert = mcgroup.create_group("vertices")
        conn = mcgroup.create_group("connectivity")

        vert.create_dataset("id", data=data['vert']['id'])
        vert.create_dataset("location", data=data['vert']['location'])
        verttype=vert.create_dataset("type", data=data['vert']['type'])
        # create rec array with two columns, value and name
        my_dtype = np.dtype([('value', 'l'), ('name', h5py.new_vlen(str))])
        helpdict={VerticesTypeSkeletonRootNode['id']: VerticesTypeSkeletonRootNode['name'],
                  VerticesTypeSkeletonNode['id']: VerticesTypeSkeletonNode['name'],
                  VerticesTypeConnectorNode['id']: VerticesTypeConnectorNode['name']
        }
        arr=np.recarray( len(helpdict), dtype=my_dtype )
        for i,kv in enumerate(helpdict.items()):
            arr[i][0] = kv[0]
            arr[i][1] = kv[1]
        verttype.attrs['value_name']=arr

        vert.create_dataset("confidence", data=data['vert']['confidence'])
        vert.create_dataset("userid", data=data['vert']['userid'])
        vert.create_dataset("radius", data=data['vert']['radius'])
        vert.create_dataset("skeletonid", data=data['vert']['skeletonid'])
        vert.create_dataset("creation_time", data=data['vert']['creation_time'])
        vert.create_dataset("modification_time", data=data['vert']['modification_time'])

        conn.create_dataset("id", data=data['conn']['id'])
        if data['conn'].has_key('type'):
            conntype=conn.create_dataset("type", data=data['conn']['type'])
            helpdict={ConnectivityNeurite['id']: ConnectivityNeurite['name'],
                      ConnectivityPresynaptic['id']: ConnectivityPresynaptic['name'],
                      ConnectivityPostsynaptic['id']: ConnectivityPostsynaptic['name']
            }
            arr=np.recarray( len(helpdict), dtype=my_dtype )
            for i,kv in enumerate(helpdict.items()):
                arr[i][0] = kv[0]
                arr[i][1] = kv[1]
            conntype.attrs['value_name']=arr

        if data['conn'].has_key('skeletonid'):
            conn.create_dataset("skeletonid", data=data['conn']['skeletonid'])

        if data.has_key('meta'):
            metadata=mcgroup.create_group('metadata')
            # create recarray with two columns, skeletonid and string
            my_dtype = np.dtype([('skeletonid', 'l'), ('name', h5py.new_vlen(str))])
            arr=np.recarray( len(data['meta']), dtype=my_dtype )
            for i,kv in enumerate(data['meta'].items()):
                arr[i][0] = kv[0]
                arr[i][1] = kv[1]

            metadata.create_dataset('skeleton_name', data=arr )
开发者ID:RussTorres,项目名称:CATMAID,代码行数:57,代码来源:neurohdf.py


示例13: __init__

 def __init__(self, stid, nlat, elon, elev):
     self.stid  = stid
     self.nlat  = nlat
     self.elon  = elon
     self.elev  = elev
     # Measured data
     self.datat = np.recarray((NPTSt,), dtype={"names": ("flux", "sun_alt", "moon_phase"),
                                               "formats": (np.int64, np.float64, np.float64)})
     self.datap = np.recarray((NPTSp,), dtype={"names": ("flux", "sun_alt", "moon_phase"),
                                               "formats": (np.int64, np.float64, np.float64)})
开发者ID:acbecker,项目名称:solar,代码行数:10,代码来源:regress7.py


示例14: make_polynomial_psf_params

def make_polynomial_psf_params(ntrain, nvalidate, nvisualize):
    """ Make training/testing data for PSF with params varying as polynomials.
    """
    bd = galsim.BaseDeviate(5772156649+314159)
    ud = galsim.UniformDeviate(bd)

    training_data = np.recarray((ntrain,), dtype=star_type)
    validate_data = np.recarray((nvalidate,), dtype=star_type)

    # Make randomish Chebyshev polynomial coefficients
    # 5 Different arrays (hlr, g1, g2, u0, v0), and up to 3rd order in each of x and y.
    coefs = np.empty((4, 4, 5), dtype=float)
    for (i, j, k), _ in np.ndenumerate(coefs):
        coefs[i, j, k] = 2*ud() - 1.0

    for i in range(ntrain):
        u = ud()
        v = ud()
        flux = ud()*50+100
        vals = np.polynomial.chebyshev.chebval2d(u, v, coefs)/6  # range is [-0.5, 0.5]
        hlr = vals[0] * 0.1 + 0.35
        g1 = vals[1] * 0.1
        g2 = vals[2] * 0.1
        u0 = vals[3]
        v0 = vals[4]
        training_data[i] = (u, v, hlr, g1, g2, u0, v0, flux)

    for i in range(nvalidate):
        u = ud()*0.5 + 0.25
        v = ud()*0.5 + 0.25
        flux = 1.0
        vals = np.polynomial.chebyshev.chebval2d(u, v, coefs)/6  # range is [-0.5, 0.5]
        hlr = vals[0] * 0.1 + 0.35
        g1 = vals[1] * 0.1
        g2 = vals[2] * 0.1
        u0 = vals[3]
        v0 = vals[4]
        validate_data[i] = (u, v, hlr, g1, g2, u0, v0, flux)

    vis_data = np.recarray((nvisualize*nvisualize), dtype=star_type)
    u = v = np.linspace(0, 1, nvisualize)
    u, v = np.meshgrid(u, v)
    for i, (u1, v1) in enumerate(zip(u.ravel(), v.ravel())):
        vals = np.polynomial.chebyshev.chebval2d(u1, v1, coefs)/6  # range is [-0.5, 0.5]
        hlr = vals[0] * 0.1 + 0.35
        g1 = vals[1] * 0.1
        g2 = vals[2] * 0.1
        u0 = vals[3]
        v0 = vals[4]
        vis_data[i] = (u1, v1, hlr, g1, g2, u0, v0, 1.0)

    return training_data, validate_data, vis_data.reshape((nvisualize, nvisualize))
开发者ID:rmjarvis,项目名称:Piff,代码行数:52,代码来源:test_gp_interp.py


示例15: test_save_results

    def test_save_results(self):
        # test for 1d
        # test for 2d
        # test for 3d
        # test for very large
        
        nr_experiments = 10000
        experiments = np.recarray((nr_experiments,),
                               dtype=[('x', float), ('y', float)])
        outcome_a = np.random.rand(nr_experiments,1)
        
        results = (experiments, {'a': outcome_a})
    
        fn = u'../data/test.tar.gz'
        
        save_results(results, fn)
        os.remove(fn)
#         ema_logging.info('1d saved successfully')
        
        nr_experiments = 10000
        nr_timesteps = 100
        experiments = np.recarray((nr_experiments,),
                               dtype=[('x', float), ('y', float)])
        outcome_a = np.zeros((nr_experiments,nr_timesteps))
        
        results = (experiments, {'a': outcome_a})
        save_results(results, fn)
        os.remove(fn)
#         ema_logging.info('2d saved successfully')
     
     
        nr_experiments = 10000
        nr_timesteps = 100
        nr_replications = 10
        experiments = np.recarray((nr_experiments,),
                               dtype=[('x', float), ('y', float)])
        outcome_a = np.zeros((nr_experiments,nr_timesteps,nr_replications))
         
        results = (experiments, {'a': outcome_a})
        save_results(results, fn)
        os.remove(fn)
#         ema_logging.info('3d saved successfully')
        
        nr_experiments = 500000
        nr_timesteps = 100
        experiments = np.recarray((nr_experiments,),
                               dtype=[('x', float), ('y', float)])
        outcome_a = np.zeros((nr_experiments,nr_timesteps))
        
        results = (experiments, {'a': outcome_a})
        save_results(results, fn)
        os.remove(fn)
开发者ID:JamesPHoughton,项目名称:EMAworkbench,代码行数:52,代码来源:test_utilities.py


示例16: test_record_write_2d_array

    def test_record_write_2d_array(self):
        """
        Test writing to a 2D array within a structured type
        """
        nbval = np.recarray(1, dtype=recordwith2darray)
        nbrecord = numpy_support.from_dtype(recordwith2darray)
        cfunc = self.get_cfunc(record_write_2d_array, (nbrecord,))
        cfunc(nbval[0])

        expected = np.recarray(1, dtype=recordwith2darray)
        expected[0].i = 3
        expected[0].j[:] = np.asarray([5.0, 6.0, 7.0, 8.0, 9.0, 10.0], np.float32).reshape(3, 2)
        np.testing.assert_equal(expected, nbval)
开发者ID:maartenscholl,项目名称:numba,代码行数:13,代码来源:test_record_dtype.py


示例17: test_record_write_array

    def test_record_write_array(self):
        '''
        Testing writing to a 1D array within a structured type
        '''
        nbval = np.recarray(1, dtype=recordwitharray)
        nbrecord = numpy_support.from_dtype(recordwitharray)
        cfunc = self.get_cfunc(record_write_array, (nbrecord,))
        cfunc(nbval[0])

        expected = np.recarray(1, dtype=recordwitharray)
        expected[0].g = 2
        expected[0].h[0] = 3.0
        expected[0].h[1] = 4.0
        np.testing.assert_equal(expected, nbval)
开发者ID:PierreBizouard,项目名称:numba,代码行数:14,代码来源:test_record_dtype.py


示例18: _load

    def _load(self, maxentries=None):
        self._build_index(maxentries)
        incdict, cumdict = self._set_entries()
        if incdict is None and cumdict is None:
            return
        totim = []
        for ts, sp, seekpoint in self.idx_map:
            tinc, tcum = self._get_sp(ts, sp, seekpoint)
            for entry in self.entries:
                incdict[entry].append(tinc[entry])
                cumdict[entry].append(tcum[entry])

            # Get the time for this record
            seekpoint = self._seek_to_string('TIME SUMMARY AT END')
            tslen, sptim, tt = self._get_totim(ts, sp, seekpoint)
            totim.append(tt)

        # get kstp and kper
        idx_array = np.array(self.idx_map)

        # build dtype for recarray
        dtype_tups = [('totim', np.float32), ("time_step", np.int32),
                      ("stress_period", np.int32)]
        for entry in self.entries:
            dtype_tups.append((entry, np.float32))
        dtype = np.dtype(dtype_tups)

        # create recarray
        nentries = len(incdict[entry])
        self.inc = np.recarray(shape=(nentries,), dtype=dtype)
        self.cum = np.recarray(shape=(nentries,), dtype=dtype)

        # fill each column of the recarray
        for entry in self.entries:
            self.inc[entry] = incdict[entry]
            self.cum[entry] = cumdict[entry]

        # file the totim, time_step, and stress_period columns for the
        # incremental and cumulative recarrays (zero-based kstp,kper)
        self.inc['totim'] = np.array(totim)[:]
        self.inc["time_step"] = idx_array[:, 0] - 1
        self.inc["stress_period"] = idx_array[:, 1] - 1

        self.cum['totim'] = np.array(totim)[:]
        self.cum["time_step"] = idx_array[:, 0] - 1
        self.cum["stress_period"] = idx_array[:, 1] - 1

        return
开发者ID:brclark-usgs,项目名称:flopy,代码行数:48,代码来源:mflistfile.py


示例19: for_shape

 def for_shape(self, shape):
     dtype = [(n,t) for n, t in self._stats_fields]
     data = np.recarray(shape, dtype=dtype)
     data[:] = 0
     stats = Statistics(data=data)
     stats._pointer = 0
     return stats
开发者ID:naymen,项目名称:MELA,代码行数:7,代码来源:statistics.py


示例20: CfxCentreLineSnapshot

def CfxCentreLineSnapshot(filename):
    """Factory function wrapping a CFX snapshot.
    
    Load the data with:
    >>> snap = CfxSnapshot(filename)

    Fields are constructed from the header line.
    
        
    """
    (__raw_row, fieldUnits) = parseHeader(filename, AllData=True)
    __raw_row = [('id', int),] + __raw_row
    fieldUnits['id'] = 1
    
                 # ('position', float, (3,)),
                 # ('strain_rate', float),
                 # ('speed', float),
                 # ('velocity', float, (3,)),
                 # ('wall_shear', float, (4,))]

    __readable_row = np.dtype(__raw_row[1:])
    row = np.dtype(__raw_row)
    
    noindex = np.genfromtxt(filename, skip_header=findStart(filename, AllData=True)+2,
                           delimiter=',',
                           dtype=__readable_row).view(np.recarray)
    index = np.recarray(shape=noindex.shape, dtype=row)
    index.id = np.arange(len(noindex))
    for el in __raw_row[1:]:
        key = el[0]
        index.__setattr__(key, U.convert(noindex.__getattribute__(key), fieldUnits[key], hlbUnits[key]))
        continue
    
    return index
开发者ID:jenshnielsen,项目名称:hemelb,代码行数:34,代码来源:Cfx.py



注:本文中的numpy.recarray函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.recfromcsv函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.real_if_close函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap