• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tables.openFile函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tables.openFile函数的典型用法代码示例。如果您正苦于以下问题:Python openFile函数的具体用法?Python openFile怎么用?Python openFile使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了openFile函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: main

def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument('-a', dest='one_name', help='track file 1')
    parser.add_argument('-b', dest='two_name', help='track file 2')
    parser.add_argument('--atrack', dest='atrack', help="track name 1")
    parser.add_argument('--btrack', dest='btrack', help="track name 2")
    parser.add_argument('-o', dest='out_name', help='out track file')
    parser.add_argument('--floor', required=False, default=False)
    args = parser.parse_args()
    
    one = tb.openFile(args.one_name)
    two = tb.openFile(args.two_name)
    out = tb.openFile(args.out_name, 'a')
    atrack = args.atrack
    btrack = args.btrack
    two_track = two.getNode("/" + btrack)
    if atrack == "all":
        for one_track in one.iterNodes("/"):
            run(one_track, two_track, out, args.floor)
    else:
        one_track = one.getNode("/" + atrack)
        run(one_track, two_track, out, args.floor)

    out.flush()    
    out.close()
开发者ID:bradleycolquitt,项目名称:seqAnalysis,代码行数:25,代码来源:subTracks.py


示例2: _write_image_tables

 def _write_image_tables(self,images,setname,descr):
     """
     Write images to file
     """
     import tables
     images = num.array(images)
     fname  = self._make_fname()
     if os.path.exists(fname):
         h    = tables.openFile(fname,mode="a")
         if not hasattr(h.root,'image_data'):
             h.createGroup(h.root,'image_data',"Image Data")
     else:
         h    = tables.openFile(fname,mode="w",title="Scan Data Archive")
         root = h.createGroup(h.root, "image_data", "Image Data")
     # if setname == None:
     #   look under '/images for 'SXXX'
     # find the highest one and
     # auto generate set name as next in the sequence 
     if hasattr(h.root.image_data,setname):
         print "Warning: Image Archive File '%s'" % fname
         print "-->Setname '%s' already exists, data is not overwritten\n" % setname
     else:
         h.createGroup('/image_data',setname,"Image Data")
         grp = '/image_data/' + setname
         h.createArray(grp,'images',images,descr)
     h.close()
开发者ID:FHe,项目名称:tdl,代码行数:26,代码来源:image_data.py


示例3: test03b_Compare64EArray

    def test03b_Compare64EArray(self):
        "Comparing several written and read 64-bit time values in an EArray."

        # Create test EArray with data.
        h5file = tables.openFile(
                self.h5fname, 'w', title = "Test for comparing Time64 E arrays")
        ea = h5file.createEArray(
                '/', 'test', tables.Time64Atom(), shape=(0, 2))

        # Size of the test.
        nrows = ea.nrowsinbuf + 34  # Add some more rows than buffer.
        # Only for home checks; the value above should check better
        # the I/O with multiple buffers.
        ##nrows = 10

        for i in xrange(nrows):
            j = i * 2
            ea.append(((j + 0.012, j + 1 + 0.012),))
        h5file.close()

        # Check the written data.
        h5file = tables.openFile(self.h5fname)
        arr = h5file.root.test.read()
        h5file.close()

        orig_val = numpy.arange(0, nrows*2, dtype=numpy.int32) + 0.012
        orig_val.shape = (nrows, 2)
        if common.verbose:
            print "Original values:", orig_val
            print "Retrieved values:", arr
        self.assertTrue(allequal(arr, orig_val),
                        "Stored and retrieved values do not match.")
开发者ID:andreas-h,项目名称:PyTables,代码行数:32,代码来源:test_timetype.py


示例4: ComputeTargetStateZhuRabitzExperiment

def ComputeTargetStateZhuRabitzExperiment(prop, numEigs=0, eigFile=None, \
	eigDataSet = "/eigenvector", outFileName = "zhu_rabitz_final_state.h5"):
	"""
	Zhu and Rabitz use a gaussian projection operator to characterize their target space,

	    P = gamma / sqrt(pi) * exp[-gamma**2 * (x - x')**2 ]

	This function computes |ZR> = sum(<i|P|i>|i>, i), where |i> is the i'th eigenfunction 
	of the Morse oscillator.
	"""

	#Setup Zhu-Rabtiz operator
	ZhuRabitzOperator = eval(prop.Config.ZhuRabitzOperator.classname + "_1()")
	ZhuRabitzOperator.ApplyConfigSection(prop.Config.ZhuRabitzOperator)

	#Get tmpPsi
	tmpPsi = prop.psi.Copy()

	#Create vector to hold Zhu-Rabitz state
	ZhuRabitzState = numpy.zeros(numEigs, dtype=complex)

	h5file = tables.openFile(eigFile, "r")

	for i in range(numEigs):
		tmpPsi.Clear()
		prop.psi.GetData()[:] = h5file.getNode("%s%03i" % (eigDataSet, i))
		ZhuRabitzOperator.MultiplyPotential(prop.psi, tmpPsi, 0, 0)
		ZhuRabitzState[i] = prop.psi.InnerProduct(tmpPsi)
		
	outFile = tables.openFile(outFileName, "w")
	try:
		outFile.createArray("/", "ZhuRabitzFinalState", ZhuRabitzState)
	finally:
		outFile.close()
开发者ID:AtomAleks,项目名称:PyProp,代码行数:34,代码来源:example.py


示例5: eigFieldValues

  def eigFieldValues(self, type, modename, pts, ptsName = None, saveDir = None):
    if saveDir != None:
      if saveDir[-1] != "/":
        saveDir += "/"

    res = []
    # if user supplies name and directory, search for existing data first. Existing
    # data must have the same dimensions of 'pts'
    if saveDir != None and ptsName != None and havePyTables:
      filepath = saveDir + self._fieldPrefixes[type] + "_" + ptsName + "_eigVecs_" + modename + ".h5"
      if os.path.exists(filepath):
        h5 = tables.openFile(filepath, 'r')
        data = h5.root.data.read()
        h5.close()
        if len(data) == len(pts):
          return data

    for pt in pts:
      res.append(self.eigFieldValue(type, modename, pt))
    res = numpy.asarray(res)

    # save if name and directory given
    if saveDir != None and ptsName != None and havePyTables:
      h5 = tables.openFile(filepath, 'w')
      h5.createArray("/", "data", res)
      h5.close()

    return res
开发者ID:bauerca,项目名称:maxwell,代码行数:28,代码来源:mx.py


示例6: main

def main(infile, dec_fs=600, outfile_suffix='dec', force_overwrite=False):
    fh_in = tables.openFile(infile, 'r')
    if fh_in.root._g_getnchildren() == 1:
        print 'Processing {}'.format(infile)
        outfile = infile.replace('raw', outfile_suffix)
        if path.exists(outfile) and not force_overwrite:
            raise IOError, '{} already exists'.format(outfile)
        fh_out = tables.openFile(outfile, 'w')

        output_node = fh_out.root
        input_node = fh_in.root._f_listNodes()[0]

        decimate_waveform(input_node, 
                          output_node,
                          source_fs=input_node._v_attrs['fs'],
                          dec_fs=dec_fs,
                          progress_callback=update_progress)

        # Add some extra metadata to the output node to help us in tracking
        # where the data came from
        output_node._v_attrs['source_file'] = infile
        output_node._v_attrs['source_pathname'] = input_node._v_pathname

        fh_out.close()
        fh_in.close()
    else:
        mesg = "Unable to process {}".format(infile)
        raise ValueError, mesg
开发者ID:bburan,项目名称:NeuroBehavior,代码行数:28,代码来源:decimate.py


示例7: __init__

    def __init__( self, file_name, file_mode, compression_level=1, compression_lib='zlib' ):
        '''Constructor
                    
        For compatibility it is reccomeded the compression values are left at defaults.
        
        Arguments:
        file_name -- Path to file
        file_mode -- How file should be opened i.e. r, w, a, r+
        compression_level -- Level of compression to use from 1 to 9
        compression_lib -- Compression library to use see PyTables docs for option.
        '''
        compression_filters = Filters( complevel=compression_level, complib=compression_lib )

        if file_mode == "w":
            self._file_handle = openFile( file_name, file_mode, filters=compression_filters )
            self._data_group = self._file_handle.createGroup( "/", "data" )
            self._parameters_group = self._file_handle.createGroup( "/", "parameters" )
            self._priors_group = self._file_handle.createGroup( "/", "priors" )
            
            self._file_handle.setNodeAttr( '/', 'creation_date', time.ctime() )
        else:
            self._file_handle = openFile( file_name, file_mode )
            self._data_group = self._file_handle.root.data
            self._parameters_group = self._file_handle.root.parameters
            self._priors_group = self._file_handle.root.priors

        self._init_entries()

        self._init_chr_tables()
开发者ID:aroth85,项目名称:joint-snv-mix.release,代码行数:29,代码来源:jmm.py


示例8: get_node

def get_node(org, mode):
    # get the parent group node in the h5 file.
    if mode == 'w':
        h5 = tables.openFile(H5, mode='a')
        if org in h5.root:
            action = raw_input(\
               """%s copy counts exist in %s. what to do [d/a/u]?
                   'd': delete them and create new copy-counts
                   'a': abort
                   'u': use the existing copy-counts
                you can use the existing counts if the blast is unchanged."""
                               % (org, H5))[0].lower()
            if action == 'd':
                getattr(h5.root, org)._f_remove(recursive=True)
                h5.flush()
            elif action == 'u':
                h5.close()
                return None, None
            else:
                print('ABORT: %s already exists in %s' % (org, H5))
                h5.close(); sys.exit()
        return h5, h5.createGroup(h5.root, org, org)
    else:
        h5 = tables.openFile(H5, mode='r')
        return h5, getattr(h5.root, org)
开发者ID:gturco,项目名称:find_cns,代码行数:25,代码来源:mask_genome.py


示例9: populate_R_data

    def populate_R_data(self):
        print "Collecting R Data for "+str(self.driftFNs[0].run)+'...'
        self.r_blue=np.zeros((self.beammap.shape[0],self.beammap.shape[1],len(self.driftFNs)))
        for i in range(len(self.driftFNs)):
            try:
                calFile=tables.openFile(self.driftFNs[i].calSoln(),mode='r')
                cal_row = calFile.root.wavecal.calsoln.cols.pixelrow[:]    
                cal_col = calFile.root.wavecal.calsoln.cols.pixelcol[:]    
                cal_params = calFile.root.wavecal.calsoln.cols.polyfit[:]
                cal_sigma = calFile.root.wavecal.calsoln.cols.sigma[:]
            except:
                print '\tUnable to open: '+self.driftFNs[i].calSoln()
                return
            try:
                driftFile=tables.openFile(self.driftFNs[i].calDriftInfo(),mode='r')
                drift_row = driftFile.root.params_drift.driftparams.cols.pixelrow[:]    
                drift_col = driftFile.root.params_drift.driftparams.cols.pixelcol[:]    
                drift_params = driftFile.root.params_drift.driftparams.cols.gaussparams[:]
            except:
                print '\tUnable to open: '+self.driftFNs[i].calDriftInfo()
                return
            for k in range(len(cal_sigma)):
                if cal_sigma[k]>0:
                    drift_ind = np.where((drift_row==cal_row[k]) * (drift_col==cal_col[k]))[0][0]
                    peak_fit = drift_params[drift_ind]
                    blue_energy = (parabola(cal_params[k],x=np.asarray([peak_fit[1]]),return_models=True))[0][0]
                    self.r_blue[cal_row[k],cal_col[k],i]=blue_energy/(self.params['fwhm2sig']*cal_sigma[k])
            calFile.close()
            driftFile.close()

        print "\tDone."
开发者ID:RupertDodkins,项目名称:ARCONS-pipeline-1,代码行数:31,代码来源:drift_diagnostics.py


示例10: merge

def merge(out, fnames):
    data = tables.openFile(out, mode='a')

    for fname in fnames:
        f = tables.openFile(fname, mode='r')
        raw_targets = f.root.denseFeat
        
        if 'denseFeat' in data.root:
            prev_data = data.root.denseFeat
            targets = data.createCArray(data.root, '_y', atom=tables.Float32Atom(), shape=((raw_targets.shape[0]+prev_data.shape[0],436)))
            targets[:prev_data.shape[0],:] = prev_data[:,:]
            targets[prev_data.shape[0]:,:] = raw_targets[:,:]
            data.flush()
            data.removeNode(data.root, "denseFeat", 1)
        else:
            targets = data.createCArray(data.root, '_y', atom=tables.Float32Atom(), shape=((raw_targets.shape[0],436)))
            targets[:,:] = raw_targets[:,:]
            data.flush()


        data.renameNode(data.root, "denseFeat", "_y")
        data.flush()

        f.close()

    data.close()
开发者ID:AtousaTorabi,项目名称:HumanActivityRecognition,代码行数:26,代码来源:merge_hdf.py


示例11: _init__

    def _init__(self, path_to_arc):
        """
        :Parameters:
            path_to_arc : str
                Path to the hdf5 archive in the local file system.
        """

        # get handle to archive
        try:
            self._arc = openFile(path_to_arc, 'r')
        except:
            self._arc = openFile(path_to_arc, 'w')
        self._grp_config = None
        self._grp_ndata = None
        self._grp_scene = None

        # establish main structure
        if '/__TYPE__' not in self._arc:
            self._arc.createArray(self._arc.root, '__TYPE__', 'SCENE_ARCHIVE')
        if self._has_main_grp('CONFIG'):
            self._grp_config = self._get_main_grp('CONFIG')
        else:
            self._grp_config = self._arc.createGroup(self._arc.root, 'CONFIG')
            self._arc.createArray(self._grp_config, '__TYPE__', 'CONFIG')
        if self._has_main_grp('NEURON_DATA'):
            self._grp_ndata = self._get_main_grp('NEURON_DATA')
        else:
            self._grp_ndata = self._arc.createGroup(self._arc.root, 'NEURON_DATA')
            self._arc.createArray(self._grp_ndata, '__TYPE__', 'NEURON_DATA')
        if self._has_main_grp('SCENE'):
            self._grp_scene = self._get_main_grp('SCENE')
        else:
            self._grp_scene = self._arc.createGroup(self._arc.root, 'SCENE')
            self._arc.createArray(self._grp_scene, '__TYPE__', 'SCENE')
开发者ID:mtambos,项目名称:Neural-Simulation,代码行数:34,代码来源:scene_generator.py


示例12: modeToVtk

def modeToVtk(resolution, modename, pathToH5Utils = ""):
  import tables

  delta = 0.5 / float(resolution)

  ex = numpy.zeros(3 * (resolution, ), dtype = 'd')
  ey = numpy.zeros(3 * (resolution, ), dtype = 'd')
  ez = numpy.zeros(3 * (resolution, ), dtype = 'd')

  pol, n, l, m, phase = strToMode(modename)
  print "Saving " + modename + " field"
  #print "Saving TM%d%d%d field" % (n, l, m)
  fx = "elecField_" + modename + "_x.h5"
  fy = "elecField_" + modename + "_y.h5"
  fz = "elecField_" + modename + "_z.h5"
  fvtk = "elecField_" + modename + ".vtk"
  h5x = tables.openFile(fx, 'w')
  h5y = tables.openFile(fy, 'w')
  h5z = tables.openFile(fz, 'w')

  pts = numpy.mgrid[0:0.5:delta, 0:0.5:delta, 0:0.5:delta]
  pts = numpy.rollaxis(pts, 0, pts.ndim)
  field = efield(pol, n, l, m, phase, pts)

  h5x.createArray("/", "data", field[...,0])
  h5y.createArray("/", "data", field[...,1])
  h5z.createArray("/", "data", field[...,2])
  h5x.close()
  h5y.close()
  h5z.close()
  # now use h5tovtk to get vtk file
  os.system(pathToH5Utils + "h5tovtk -o %s %s %s %s" % (fvtk, fx, fy, fz))
开发者ID:bauerca,项目名称:maxwell,代码行数:32,代码来源:dsphmsph.py


示例13: __init__

    def __init__(self, output_dir, chrom_list):
        # combined allele-specific read counts
        as_count_filename = "%s/combined_as_count.h5" % output_dir
        self.as_count_h5 = tables.openFile(as_count_filename, "w")
        
        # combined mapped read counts
        read_count_filename = "%s/combined_read_count.h5" % output_dir
        self.read_count_h5 = tables.openFile(read_count_filename, "w")

        # counts of genotypes
        ref_count_filename = "%s/combined_ref_count.h5" % output_dir
        self.ref_count_h5 = tables.openFile(ref_count_filename, "w")
        
        alt_count_filename = "%s/combined_alt_count.h5" % output_dir
        self.alt_count_h5 = tables.openFile(alt_count_filename, "w")
        
        het_count_filename = "%s/combined_het_count.h5" % output_dir
        self.het_count_h5 = tables.openFile(het_count_filename, "w")
        
        self.filenames = [as_count_filename, read_count_filename,
                          ref_count_filename, alt_count_filename,
                          het_count_filename]

        self.h5_files = [self.as_count_h5, self.read_count_h5,
                         self.ref_count_h5, self.alt_count_h5, 
                         self.het_count_h5]

        # initialize all of these files
        atom = tables.UInt16Atom(dflt=0)
        
        for h5f in self.h5_files:
            for chrom in chrom_list:
                self.create_carray(h5f, chrom, atom)
开发者ID:Q-KIM,项目名称:WASP,代码行数:33,代码来源:get_target_regions.py


示例14: compute_rms

def compute_rms(ext_filename, force_overwrite=False):
    '''
    Add running measurement of RMS noise floor to the extracted spiketimes file.
    This metric is required for many of the spike processing routines; however,
    this is such a slow (possibly inefficient) algorithm that it was broken out
    into a separate function.
    '''
    processing = {}
    with tables.openFile(ext_filename, 'a') as fh:
        raw_filename = ext_filename.replace('extracted', 'raw')

        if 'rms' in fh.root:
            if not force_overwrite:
                raise IOError, 'Already contains RMS data'
            else:
                fh.root.rms._f_remove(recursive=True)

        processing['filter_freq_lp'] = fh.root.filter._v_attrs.fc_lowpass
        processing['filter_freq_hp'] = fh.root.filter._v_attrs.fc_highpass
        processing['filter_order'] = fh.root.filter._v_attrs.filter_order
        processing['filter_btype'] = fh.root.filter._v_attrs.filter_btype
        processing['bad_channels'] = fh.root.filter.bad_channels[:]-1
        processing['diff_mode'] = fh.root.filter._v_attrs.diff_mode
        #channels = fh.root.event_data._v_attrs.extracted_channels[:]-1

        with tables.openFile(raw_filename, 'r') as fh_raw:
            input_node = h5.p_get_node(fh_raw.root, '*')
            output_node = fh.createGroup('/', 'rms')
            running_rms(input_node, output_node, 1, 0.25, processing=processing,
                        algorithm='median', progress_callback=update_progress)
开发者ID:bburan,项目名称:NeuroBehavior,代码行数:30,代码来源:add_rms_to_extracted.py


示例15: main

def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument("-a", dest="one_name", help="track file 1")
    parser.add_argument("-b", dest="two_name", help="track file 2")
    parser.add_argument("--atrack", dest="atrack", help="track name 1")
    parser.add_argument("--btrack", dest="btrack", help="track name 2")
    parser.add_argument("-o", dest="out_name", help="out track file")
    parser.add_argument("--floor", required=False, default=False)
    args = parser.parse_args()

    one = tb.openFile(args.one_name)
    two = tb.openFile(args.two_name)
    out = tb.openFile(args.out_name, "a")
    atrack = args.atrack
    btrack = args.btrack
    two_track = two.getNode("/" + btrack)
    if atrack == "all":
        for one_track in one.iterNodes("/"):
            run(one_track, two_track, out, args.floor)
    else:
        one_track = one.getNode("/" + atrack)
        run(one_track, two_track, out, args.floor)

    out.flush()
    out.close()
开发者ID:bradleycolquitt,项目名称:seqAnalysis,代码行数:25,代码来源:subTracks.py


示例16: main_load_disp

def main_load_disp(app):
    """
    Local replacement for CompareApp::main().

    Load displacement fields.
    """
    
    field = "displacements"
    filename = "results/strikeslip_%s_%04dm.h5" % (shape, res)
    
    projection.open()

    # PyLith ---------------------------------
    app._info.log("Projecting PyLith solution...")
    solnfile = tables.openFile(filename, 'r')
    for tstep in [0]:
        projection.project(solnfile, "pylith_1_0", tstep, field)
    solnfile.close()

    # Analytic -------------------------------
    app._info.log("Copying analytic solution...")
    filename = "analytic/output/%s_%04dm.h5" % (shape, res)
    solnfile = tables.openFile(filename, 'r')
    for tstep in [0]:
        projection.copy_projection(solnfile, "analytic", tstep, field)
    solnfile.close()

    # ----------------------------------------
    projection.close()

    return
开发者ID:geodynamics,项目名称:pylith_benchmarks,代码行数:31,代码来源:compare_solns.py


示例17: test_write_to_hdf5

def test_write_to_hdf5():
    test_files = ["mcnp_ptrac_i4_little.ptrac",
                  "mcnp_ptrac_i8_little.ptrac"]

    for test_file in test_files:
        p = mcnp.PtracReader(test_file)
        h5file = tables.openFile("mcnp_ptrac_hdf5_file.h5", "w")
        tab = h5file.createTable("/", "t", mcnp.PtracEvent, "test")
        p.write_to_hdf5_table(tab)
        tab.flush()
        h5file.close()
        del h5file
        del tab
        del p

        # now check if the data was correctly written.
        # there should be 5 events of type 1000 (src)
        h5file = tables.openFile("mcnp_ptrac_hdf5_file.h5")
        tab = h5file.getNode("/t")
        selected = [1 for x in tab.iterrows() if x["event_type"] == 1000]
        assert_equal(len(selected), 5)
        h5file.close()
        del tab
        del h5file

        # clean up
        if os.path.exists("mcnp_ptrac_hdf5_file.h5"):
            os.unlink("mcnp_ptrac_hdf5_file.h5")
开发者ID:NukespudWork,项目名称:pyne,代码行数:28,代码来源:test_mcnp.py


示例18: test_AccumulatorSeriesWrapper_common_case

def test_AccumulatorSeriesWrapper_common_case(h5f=None):
    if not h5f:
        h5f_path = tempfile.NamedTemporaryFile().name
        h5f = tables.openFile(h5f_path, "w")

    validation_error = ErrorSeries(error_name="accumulated_validation_error",
                                table_name="accumulated_validation_error",
                                hdf5_file=h5f,
                                index_names=('epoch','minibatch'),
                                title="Validation error, summed every 3 minibatches, indexed by epoch and minibatch")

    accumulator = AccumulatorSeriesWrapper(base_series=validation_error,
                                    reduce_every=3, reduce_function=numpy.sum)

    # (1,1), (1,2) etc. are (epoch, minibatch) index
    accumulator.append((1,1), 32.0)
    accumulator.append((1,2), 30.0)
    accumulator.append((2,1), 28.0)
    accumulator.append((2,2), 26.0)
    accumulator.append((3,1), 24.0)
    accumulator.append((3,2), 22.0)

    h5f.close()

    h5f = tables.openFile(h5f_path, "r")
    
    table = h5f.getNode('/', 'accumulated_validation_error')

    assert compare_lists(table.cols.epoch[:], [2,3])
    assert compare_lists(table.cols.minibatch[:], [1,2])
    assert compare_lists(table.cols.accumulated_validation_error[:], [90.0,72.0], floats=True)
开发者ID:sauravbiswasiupr,项目名称:image_transformations,代码行数:31,代码来源:test_series.py


示例19: get_val

def get_val(sessions=None, masked=True, repeats=False):
    """Retrieves training data for given sessions.
    Default: all sessions, mask applied"""

    if sessions is None:
        sessions = range(3)

    if repeats:
        if isinstance(masked, bool) and masked:
            return tables.openFile(val_file_repeats).getNode("/alldata").read()
        elif isinstance(masked, np.ndarray):
            return tables.openFile(val_file_repeats).getNode("/alldata").read().reshape(270, 30, 10, 100, 100).transpose(0, 1, 3, 4, 2)[:, masked, :]
        else:
            raise Exception("Repeats data is masked")
    else:
        if isinstance(masked, bool) and masked:
            #mask = get_mask()
            mask = cortex.get_cortical_mask("MLfs",
                                            "20121210ML_auto1", "thick")
            return np.concatenate(
                [tables.openFile(t).getNode('/data').read()[:, mask]
                 for t in [val_files[i] for i in sessions]])
        elif isinstance(masked, np.ndarray):
            mask = masked
            return np.concatenate(
                [tables.openFile(t).getNode('/data').read()[:, mask]
                 for t in [val_files[i] for i in sessions]])
        else:
            raise NotImplementedError("This will exceed 4G of RAM")
开发者ID:eickenberg,项目名称:fbg_code,代码行数:29,代码来源:data.py


示例20: test_BasicStatisticsSeries_common_case

def test_BasicStatisticsSeries_common_case(h5f=None):
    if not h5f:
        h5f_path = tempfile.NamedTemporaryFile().name
        h5f = tables.openFile(h5f_path, "w")

    stats_series = BasicStatisticsSeries(table_name="b_vector_statistics",
                                hdf5_file=h5f, index_names=('epoch','minibatch'),
                                title="Basic statistics for b vector indexed by epoch and minibatch")

    # (1,1), (1,2) etc. are (epoch, minibatch) index
    stats_series.append((1,1), [0.15, 0.20, 0.30])
    stats_series.append((1,2), [-0.18, 0.30, 0.58])
    stats_series.append((2,1), [0.18, -0.38, -0.68])
    stats_series.append((2,2), [0.15, 0.02, 1.9])

    h5f.close()

    h5f = tables.openFile(h5f_path, "r")
    
    table = h5f.getNode('/', 'b_vector_statistics')

    assert compare_lists(table.cols.epoch[:], [1,1,2,2])
    assert compare_lists(table.cols.minibatch[:], [1,2,1,2])
    assert compare_lists(table.cols.mean[:], [0.21666667,  0.23333333, -0.29333332,  0.69], floats=True)
    assert compare_lists(table.cols.min[:], [0.15000001, -0.18000001, -0.68000001,  0.02], floats=True)
    assert compare_lists(table.cols.max[:], [0.30, 0.58, 0.18, 1.9], floats=True)
    assert compare_lists(table.cols.std[:], [0.06236095, 0.31382939,  0.35640177, 0.85724366], floats=True)
开发者ID:sauravbiswasiupr,项目名称:image_transformations,代码行数:27,代码来源:test_series.py



注:本文中的tables.openFile函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tables.open_file函数代码示例发布时间:2022-05-27
下一篇:
Python contentsparser.ContentsParser类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap