• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python matlab.loadmat函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中scipy.io.matlab.loadmat函数的典型用法代码示例。如果您正苦于以下问题:Python loadmat函数的具体用法?Python loadmat怎么用?Python loadmat使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了loadmat函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_FindWavKurt

    def test_FindWavKurt(self):
        from scipy.io.matlab import loadmat

        N = 16
        fcut = 0.4
        level_index = 11
        freq_index = 24
        lev = self.level_w[level_index]

        base_path = os.getenv("WAVELOC_PATH")
        matlab_file = os.path.join(base_path, "test_data", "c.mat")
        c_dict = loadmat(matlab_file)
        c_exp = c_dict["c"]

        matlab_file = os.path.join(base_path, "test_data", "S.mat")
        S_dict = loadmat(matlab_file)
        S_exp = S_dict["S"]

        # get bw and frequency (Hz)
        bw_hz, fc_hz, fi, l1 = getBandwidthAndFrequency(
            self.nlevel, self.Fs, self.level_w, self.freq_w, level_index, freq_index
        )

        # get basic filter parameters
        h, g, h1, h2, h3 = get_h_parameters(N, fcut)
        c, s, threshold, Bw, fc = Find_wav_kurt(self.x, h, g, h1, h2, h3, self.nlevel, lev, fi, Fs=self.Fs)
        S = getFTSquaredEnvelope(c)

        # do tests
        self.assertAlmostEqual(Bw * self.Fs, bw_hz)
        self.assertAlmostEqual(fc * self.Fs, fc_hz)
        np.testing.assert_allclose(c.flatten(), c_exp.flatten(), atol=1e-3)
        np.testing.assert_allclose(S.flatten(), S_exp.flatten(), atol=1e-6)
开发者ID:nlanget,项目名称:waveloc,代码行数:33,代码来源:test_kurtogram.py


示例2: test_FindWavKurt

  def test_FindWavKurt(self):

    N=16
    fcut=0.4
    level_index=11
    freq_index=24
    lev=self.level_w[level_index]

    c_dict=loadmat("test_data/c.mat")
    c_exp = c_dict['c']

    S_dict=loadmat("test_data/S.mat")
    S_exp = S_dict['S']
   
    # get bw and frequency (Hz)
    bw_hz, fc_hz, fi = getBandwidthAndFrequency(self.nlevel, self.Fs,
            self.level_w, self.freq_w, level_index, freq_index)

    # get basic filter parameters
    h, g, h1, h2, h3 = get_h_parameters(N, fcut)
    c,s,threshold,Bw,fc = Find_wav_kurt(self.x, h, g, h1, h2, h3,
            self.nlevel,lev, fi, self.Fs)
   
    S=getFTSquaredEnvelope(c)

    # do tests
    self.assertAlmostEqual(Bw*self.Fs,bw_hz)
    self.assertAlmostEqual(fc*self.Fs,fc_hz)
    np.testing.assert_allclose(c.flatten(),c_exp.flatten(),atol=1e-3)
    np.testing.assert_allclose(S.flatten(),S_exp.flatten(),atol=1e-6)
开发者ID:amaggi,项目名称:seismokurt,代码行数:30,代码来源:test_kurtogram.py


示例3: __get_excit_wfm

    def __get_excit_wfm(filepath):
        """
        Returns the excitation BE waveform present in the more parms.mat file
        
        Parameters
        ------------
        filepath : String / unicode
            Absolute filepath of the .mat parameter file
        
        Returns
        -----------
        ex_wfm : 1D numpy float array
            Band Excitation waveform

        """
        if not path.exists(filepath):
            warn('BEPSndfTranslator - NO more_parms.mat file found')
            return np.zeros(1000, dtype=np.float32)

        if 'more_parms' in filepath:
            matread = loadmat(filepath, variable_names=['FFT_BE_wave'])
            fft_full = np.complex64(np.squeeze(matread['FFT_BE_wave']))
            bin_inds = None
            fft_full_rev = None
        else:
            matread = loadmat(filepath, variable_names=['FFT_BE_wave', 'FFT_BE_rev_wave', 'BE_bin_ind'])
            bin_inds = np.uint(np.squeeze(matread['BE_bin_ind'])) - 1
            fft_full = np.complex64(np.squeeze(matread['FFT_BE_wave']))
            fft_full_rev = np.complex64(np.squeeze(matread['FFT_BE_rev_wave']))

        return fft_full, fft_full_rev, bin_inds
开发者ID:pycroscopy,项目名称:pycroscopy,代码行数:31,代码来源:beps_ndf.py


示例4: test_srmr_norm

def test_srmr_norm():
    fs = 16000
    s = loadmat("test/test.mat")["s"][:,0]

    correct_ratios = loadmat("test/correct_ratios.mat")['correct_ratios'][0]
    srmr = SRMR(fs, fast=False, norm=True, max_cf=30)
    out = srmr.predict(s, s, s)
    ratio_norm, avg_energy_norm = out['p']['srmr'], out['avg_energy']
    assert np.allclose(ratio_norm, correct_ratios[3], rtol=1e-6, atol=1e-12)
开发者ID:achabotl,项目名称:SRMRpy,代码行数:9,代码来源:test_srmr.py


示例5: test_srmr_slow

def test_srmr_slow():
    fs = 16000
    s = loadmat("test/test.mat")["s"][:,0]

    correct_ratios = loadmat("test/correct_ratios.mat")['correct_ratios'][0]
    srmr = SRMR(fs, fast=False)
    out = srmr.predict(s, s, s)
    ratio_slow, avg_energy_slow = out['p']['srmr'], out['avg_energy']
    assert np.allclose(ratio_slow, correct_ratios[0], rtol=1e-6, atol=1e-12)
开发者ID:achabotl,项目名称:SRMRpy,代码行数:9,代码来源:test_srmr.py


示例6: timeseries_design

def timeseries_design(subject_id,whatParadigm,onsets_dir):
    import scipy.signal
    import scipy.special as sp
    import numpy as np
    import math
    from nipype.interfaces.base import Bunch
    from copy import deepcopy
    from scipy.io.matlab import loadmat
    import glob
    import os
    #from Facematch import onsets_dir
    print "Entered timeseries_design once with arguments SUBID = "+subject_id+", paradigm = "+whatParadigm+", and onsets dir = "+onsets_dir+"."
    output = []
    regressor_names = None
    regressors = None
    onsets_temp = os.path.join(onsets_dir, subject_id+'*onsets.mat')
    onsets_files = sorted(glob.glob(onsets_temp))
    testmat = loadmat(onsets_files[0], struct_as_record=False)
    testnames = testmat['names'][0]
    names_count_vec = np.zeros(len(testnames))

    for r in range(len(onsets_files)):
        mat = loadmat(onsets_files[r], struct_as_record=False)
        ons = mat['onsets'][0]
        nam = mat['names'][0]
        dur = mat['durations'][0]

        names = []
        durations = []
        run_onsets = []
        for condition in range(len(nam)):
            for onset in range(len(ons[condition][0])): 
                names_count_vec[condition] += 1          
                names.append(str(nam[condition][0])+'_%d'%(names_count_vec[condition]))


                run_onsets.append([ons[condition][0][onset]])
                durations.append(dur[condition][0])
  

        print run_onsets
        print names
        print durations
        output.insert(r,
            Bunch(conditions=deepcopy(names),
                onsets=deepcopy(run_onsets),
                durations=deepcopy(durations),
                amplitudes=None,
                tmod=None,
                pmod=None,
                regressor_names=None,
                regressors=regressors)) #here is where we can do linear, quad, etc detrending
        
    return output
开发者ID:jsalva,项目名称:gates_analysis,代码行数:54,代码来源:beta_series_analysis.py


示例7: main

def main(argv):
	dim = 64
	imidx = 7

	# load unnormalized log-likelihood
	results = loadmat('results/vanhateren/poe/AIS_GibbsTrain_white_studentt_L=064_M=256_B=0100000_learner=PMPFdH1_20120523T112539.mat')
	loglik = -mean(results['E'][:, :10000]) - results['logZ']

	# load importance weights for partition function
	ais_weights = loadmat('results/vanhateren/poe/matlab_up=022150_T=10000000_ais.mat')['logweights']
	ais_weights.shape

	# number of samples to probe
	num_samples = 2**arange(0, ceil(log2(ais_weights.shape[0])) + 1, dtype='int32')
	num_samples[-1] = max([num_samples[-1], ais_weights.shape[0]])
	num_repetitions = ceil(2.**16 / num_samples)
	estimates = []

	print loadmat('results/vanhateren/poe/matlab_up=022150_T=10000000_ais.mat')['t_range'][:, imidx], 'intermediate distributions'

	logZ = logmeanexp(ais_weights[:, -1])

	for k in arange(len(num_samples)):
		estimates_ = []

		for _ in arange(num_repetitions[k]):
			# pick samples at random
			idx = permutation(ais_weights.shape[0])[:num_samples[k]]

			# estimate log-partf. using num_samples[k] samples
			loglik_ = loglik + (logZ - logmeanexp(ais_weights[idx, imidx]))

			# store estimate of log-likelihood 
			estimates_.append(loglik_)

		estimates.append(mean(estimates_))

	gca().width = 5
	gca().height = 5
#	gca().ymin = 0.85
#	gca().ymax = 1.55
#	ytick([0.9, 1.1, 1.3, 1.5])
	semilogx(num_samples, estimates / log(2.) / dim, '.-')
	xlabel('number of AIS samples')
	ylabel('estimated log-likelihood')
	savefig('results/vanhateren/convergence_poe.tex')
	draw()

	return 0
开发者ID:lucastheis,项目名称:isa,代码行数:49,代码来源:convergence_poe.py


示例8: load_dataset

def load_dataset(dataset):
    if dataset == 'umls':
        mat = loadmat('../data/%s/uml.mat' % (dataset))
        T = np.array(mat['Rs'], np.float32)
    elif dataset == 'nation':
        mat = loadmat('../data/%s/dnations.mat' % (dataset))
        T = np.array(mat['R'], np.float32)
    elif dataset == 'kinship':
        mat = loadmat('../data/%s/alyawarradata.mat' % (dataset))
        T = np.array(mat['Rs'], np.float32)
    elif dataset == 'wordnet':
        T = pickle.load(open('../data/%s/reduced_wordnet.pkl' % (dataset), 'rb'))

    T[np.isnan(T)] = 0
    return T
开发者ID:arongdari,项目名称:almc,代码行数:15,代码来源:amdc_runner.py


示例9: get_top_scores

 def get_top_scores(self, i=100, force_num=True):
     fn_scores = os.path.join(self.ds.path, "cpmc", "MySegmentsMat", self.name, "scores.mat")
     sc = ml.loadmat(fn_scores)["scores"]
     scores = list(np.sort(sc.ravel())[-1 : (-1 - i) : -1])
     if len(scores) < i and force_num:
         scores = (list(scores) * 100)[:100]
     return scores
开发者ID:amiltonwong,项目名称:pottics,代码行数:7,代码来源:dataset.py


示例10: ReadDatasetFile

def ReadDatasetFile(dataset_file_path):
  """Reads dataset file in Revisited Oxford/Paris ".mat" format.

  Args:
    dataset_file_path: Path to dataset file, in .mat format.

  Returns:
    query_list: List of query image names.
    index_list: List of index image names.
    ground_truth: List containing ground-truth information for dataset. Each
      entry is a dict corresponding to the ground-truth information for a query.
      The dict may have keys 'easy', 'hard', 'junk' or 'ok', mapping to a list
      of integers; additionally, it has a key 'bbx' mapping to a list of floats
      with bounding box coordinates.
  """
  with tf.gfile.GFile(dataset_file_path, 'r') as f:
    cfg = matlab.loadmat(f)

  # Parse outputs according to the specificities of the dataset file.
  query_list = [str(im_array[0]) for im_array in np.squeeze(cfg['qimlist'])]
  index_list = [str(im_array[0]) for im_array in np.squeeze(cfg['imlist'])]
  ground_truth_raw = np.squeeze(cfg['gnd'])
  ground_truth = []
  for query_ground_truth_raw in ground_truth_raw:
    query_ground_truth = {}
    for ground_truth_key in _GROUND_TRUTH_KEYS:
      if ground_truth_key in query_ground_truth_raw.dtype.names:
        adjusted_labels = query_ground_truth_raw[ground_truth_key] - 1
        query_ground_truth[ground_truth_key] = adjusted_labels.flatten()

    query_ground_truth['bbx'] = np.squeeze(query_ground_truth_raw['bbx'])
    ground_truth.append(query_ground_truth)

  return query_list, index_list, ground_truth
开发者ID:rder96,项目名称:models,代码行数:34,代码来源:dataset.py


示例11: show_predictions

def show_predictions(alpha="alpha", symbol="GE", xtn=".PNG"):
    if type(alpha) == str:
        print ("Loading file named " + alpha + ".mat")
        a = mat.loadmat(
            alpha + ".mat", mat_dtype=False
        )  # load a matlab style set of matrices from the file named by the string alpha
        if a.has_key(alpha):
            alpha = a.get(alpha).reshape(-1)  # get the variable with the name of the string in alpha
        else:
            alpha = a.get(a.keys()[2]).reshape(-1)  # get the first non-hidden key and reshape into a 1-D array
    print ("Loading financial data for stock symbol", symbol)
    r = np.recfromcsv("/home/hobs/Desktop/References/quant/lyle/data/" + symbol + "_yahoo.csv", skiprows=1)
    r.sort()
    r.high = r.high * r.adj_close / r.close  # adjust the high and low prices for stock splits
    r.low = r.low * r.adj_close / r.close  # adjust the high and low prices for stock splits
    daily_returns = r.adj_close[1:] / r.adj_close[0:-1] - 1
    predictions = lfilt(alpha, daily_returns)
    print (
        "Plotting a scatter plot of",
        len(daily_returns),
        "returns vs",
        len(predictions),
        "predictions using a filter of length",
        len(alpha),
    )
    (ax, fig) = plot(predictions, daily_returns[len(alpha) :], s="bo", xtn=".PNG")
    ax.set_xlabel("Predicted Returns")
    ax.set_ylabel("Actual Returns")
    big_mask = np.abs(predictions) > np.std(predictions) * 1.2
    bigs = predictions[big_mask]
    true_bigs = daily_returns[big_mask]
    (ax, fig) = plot(bigs, true_bigs, s="r.", xtn=".PNG")
    fig.show()
    return (predictions, daily_returns, bigs, true_bigs, big_mask)
开发者ID:hobson,项目名称:tagim,代码行数:34,代码来源:finance.py


示例12: subtract_background_from_stacks

def subtract_background_from_stacks(scanfile, indir, outdir, scannumber=-1):
    """Subtract background from SAXS data in MAT-file stacks.
    """
    scans = read_yaml(scanfile)
    if scannumber > 0:
        scannos = [ scannumber ]
    else:
        scannos = scans.keys()
        scannos.sort()
    for scanno in scannos:
        print("Scan #%03d" % scanno)
        try:
            bufscan = scans[scanno][0]
        except TypeError:
            print("Scan #%03d is a buffer" % scanno)
            continue
        try:
            conc = scans[scanno][1]
        except TypeError:
            print("No concentration for scan #02d." % scanno)
            conc = 1.0
        print("Using concentration %g g/l." % conc)
        stackname = "s%03d" % scanno
        stack = loadmat(indir+'/'+stackname+'.mat')[stackname]
        subs = np.zeros_like(stack)
        (npos, nrep, _, _) = stack.shape
        for pos in range(npos):
            print(pos)
            buf = get_bg(indir, bufscan, pos)
            for rep in range(nrep):
                subs[pos,rep,...] = errsubtract(stack[pos,rep,...], buf)
                subs[pos,rep,1:3,:] = subs[pos,rep,1:3,:] / conc
        outname = "subs%03d" % scanno
        savemat(outdir+'/'+outname + ".mat", {outname: subs}, do_compression=1,
                oned_as='row')
开发者ID:tpikonen,项目名称:solution,代码行数:35,代码来源:subtraction.py


示例13: preprocess_dataset

    def preprocess_dataset(self, dataset, n_jobs=-1, verbosity=2):
        """

        :param dataset:
        :param n_jobs:
        :return:
        """
        if self.skip:
            return

        if verbosity > 1: print("   Loading masks from .mat file")
        data = loadmat(self.path)
        masks = data[self.var_name][0]

        if not self.invert:
            masks_probe = masks.take(range(0, masks.size, 2))
            masks_gallery = masks.take(range(1, masks.size, 2))
        else:
            masks_gallery = masks.take(range(1, masks.size, 2))
            masks_probe = masks.take(range(0, masks.size, 2))

        dataset.probe.masks_train = list(masks_probe[dataset.train_indexes])
        dataset.probe.masks_test = list(masks_probe[dataset.test_indexes])
        dataset.gallery.masks_train = list(masks_gallery[dataset.train_indexes])
        dataset.gallery.masks_test = list(masks_gallery[dataset.test_indexes])
开发者ID:AShedko,项目名称:PyReID,代码行数:25,代码来源:preprocessing.py


示例14: test

def test():
    """
    Test with Kinship dataset
    Use all positive triples and negative triples as a training set
    See how the reconstruction error is reduced during training
    """
    from scipy.io.matlab import loadmat
    mat = loadmat('../data/kinship/alyawarradata.mat')
    T = np.array(mat['Rs'], np.float32)
    T[T == 0] = -1  # set negative value to -1
    E, K = T.shape[0], T.shape[2]
    max_iter = E * E * K * 10

    n_dim = 10

    # p_idx = np.ravel_multi_index((T == 1).nonzero(), T.shape)  # raveled positive index
    # n_idx = np.ravel_multi_index((T == -1).nonzero(), T.shape)  # raveled negative index
    # model.fit(T, p_idx, n_idx, max_iter, e_gap=10000)

    training = np.random.binomial(1., 0.01, T.shape)
    testing = np.random.binomial(1., 0.5, T.shape)
    testing[training == 1] = 0

    model = AMDC(n_dim)
    model.population = True
    model.do_active_learning(T, training, 15000, testing)
开发者ID:arongdari,项目名称:almc,代码行数:26,代码来源:amdc.py


示例15: read_mat_profile_files

def read_mat_profile_files(
        path,
        loc,
        var,
        dataSetName='test',
        dataSetType='ms'):
    """Reads generic time series from matlab file and converts data
    to python format"""
    varToChar = {'salt': 's', 'elev': 'e', 'temp': 't', 'u': 'u', 'v': 'v'}
    pattern = os.path.join(
        path,
        dataSetName +
        '.' +
        dataSetType +
        '.' +
        varToChar[var] +
        '.' +
        loc +
        '.mat')
    fList = sorted(glob.glob(pattern))
    if not fList:
        raise Exception('File not found: ' + pattern)
    f = fList[0]
    print 'Reading', f
    d = loadmat(f)
    t = d['t'].flatten()  # (1,nTime)
    z = d['z']  # (nVert,nTime)
    data = d['data']  # (nVert,nTime)
    # convert time from Matlab datenum (in PST) to epoch (UTC)
    time = datenumPSTToEpoch(t)
    # round to nearest minute
    time = np.round(time / 60.) * 60.
    print '  Loaded data range: ', str(timeArray.epochToDatetime(time[0])), ' -> ', str(timeArray.epochToDatetime(time[-1]))
    return time, z, data
开发者ID:tkarna,项目名称:crane,代码行数:34,代码来源:convSurrogateOutputToNC.py


示例16: test_srmr

def test_srmr():
    fs = 16000
    s = loadmat("test/test.mat")["s"][:,0]

    correct_ratios = loadmat("test/correct_ratios.mat")['correct_ratios'][0]
    ratio, avg_energy = srmr(s, fs)
    assert np.allclose(ratio, correct_ratios[1], rtol=1e-6, atol=1e-12)

    ratio_norm_fast, avg_energy_norm_fast = srmr(s, fs, fast=True, norm=True, max_cf=30)
    assert np.allclose(ratio_norm_fast, correct_ratios[2], rtol=1e-6, atol=1e-12)

    ratio_slow, avg_energy_slow = srmr(s, fs, fast=False)
    assert np.allclose(ratio_slow, correct_ratios[0], rtol=1e-6, atol=1e-12)

    ratio_norm, avg_energy_norm = srmr(s, fs, fast=False, norm=True, max_cf=30)
    assert np.allclose(ratio_norm, correct_ratios[3], rtol=1e-6, atol=1e-12)
开发者ID:kastnerkyle,项目名称:SRMRpy,代码行数:16,代码来源:test_srmr.py


示例17: convert

def convert(in_filename, out_filename=None, spacings=None):
    A = loadmat(in_filename, struct_as_record=False)

    # struct
    S = A['Save_data'][0,0]
    # volume
    V = S.P

    # output filename
    if out_filename == None:
        out_filename = os.path.splitext(in_filename)[0] + '.nrrd'
        
    logger.debug('Output filename: %s', out_filename)
    logger.debug('Writing NRRD file.')

    # NRRD options
    options = {}
    if spacings == None:
        xs = float((S.xmax - S.xmin) / V.shape[0])
        ys = float((S.ymax - S.ymin) / V.shape[1])
        zs = float((S.zmax - S.zmin) / V.shape[2])
        options['spacings'] = [xs, ys, zs]
    else:
        options['spacings'] = eval(spacings)

    logger.debug('Setting spacings to: %s', options['spacings'])

    nrrd.write(out_filename, V, options)
开发者ID:davepeake,项目名称:oscar2nrrd,代码行数:28,代码来源:oscar2nrrd.py


示例18: __readOldMatBEvecs

    def __readOldMatBEvecs(file_path):
        """
    Returns information about the excitation BE waveform present in the .mat file
    
    Inputs:
        filepath -- Absolute filepath of the .mat parameter file
    
    Outputs:
        Tuple -- (bin_inds, bin_w, bin_FFT, BE_wave, dc_amp_vec_full)\n
        bin_inds -- Bin indices\n
        bin_w -- Excitation bin Frequencies\n
        bin_FFT -- FFT of the BE waveform for the excited bins\n
        BE_wave -- Band Excitation waveform\n
        dc_amp_vec_full -- spectroscopic waveform. 
        This information will be necessary for fixing the UDVS for AC modulation for example
        """

        matread = loadmat(file_path, squeeze_me=True)
        BE_wave = matread['BE_wave_1']
        bin_inds = matread['bin_ind_s'] - 1  # Python base 0. note also _s, for this case
        bin_w = matread['bin_w']
        dc_amp_vec_full = matread['dc_amp_vec_full']
        FFT_full = np.fft.fftshift(np.fft.fft(BE_wave))
        bin_FFT = np.conjugate(FFT_full[bin_inds])

        return bin_inds, bin_w, bin_FFT, BE_wave, dc_amp_vec_full
开发者ID:pycroscopy,项目名称:pycroscopy,代码行数:26,代码来源:be_odf_relaxation.py


示例19: read_training_data

def read_training_data():
    """
    Returns a dictionary of features for the training data
    """
    filename=os.path.join('..','data/Piton','TrainingSet_2.mat')
    data_orig=loadmat(filename)
    
    # create a clean dictionnary of data
    # taking logarithms of the features for which
    # the test set also has logarithms (thanks Clement!)
    
    # for now only deal with the two features that are ok in the two datasets
    data={}
    data['KurtoEB']=log(np.array(data_orig['KurtoEB'].flat))
    data['KurtoVT']=log(np.array(data_orig['KurtoVT'].flat))
    data['AsDecVT']=log(np.array(data_orig['AsDecVT'].flat))
    data['AsDecEB']=log(np.array(data_orig['AsDecEB'].flat))
    data['RappMaxMeanEB']=log(np.array(data_orig['RappMaxMeanEB'].flat))
    data['RappMaxMeanVT']=log(np.array(data_orig['RappMaxMeanVT'].flat))
    data['DurVT']=np.abs(np.array(data_orig['DurVT'].flat))
    data['DurEB']=np.abs(np.array(data_orig['DurEB'].flat))
    data['EneEB']=log(np.array(data_orig['EneFFTeB'].flat))
    data['EneVT']=log(np.array(data_orig['EneFFTvT'].flat))

    return data
开发者ID:amaggi,项目名称:discrimination,代码行数:25,代码来源:PdF_io.py


示例20: test_rdop4_zero_rowscutoff

	def test_rdop4_zero_rowscutoff(self):
		matfile = 'nastran_op4_data/r_c_rc.mat'
		filenames = glob('nastran_op4_data/*.op4')
		o4 = op4.OP4()
		o4._rowsCutoff = 0
		m = matlab.loadmat(matfile)
		for filename in filenames:
			if filename.find('badname') > -1:
				with assert_warns(RuntimeWarning) as cm:
					dct = o4.dctload(filename)
				the_warning = str(cm.warning)
				assert 0 == the_warning.find('Output4 file has matrix '
											 'name: 1mat')
				with assert_warns(RuntimeWarning) as cm:
					names, mats, forms, mtypes = o4.listload(filename)
				the_warning = str(cm.warning)
				assert 0 == the_warning.find('Output4 file has matrix '
											 'name: 1mat')
				with assert_warns(RuntimeWarning) as cm:
					names2, sizes, forms2, mtypes2 = o4.dir(filename,
															verbose=False)
				the_warning = str(cm.warning)
				assert 0 == the_warning.find('Output4 file has matrix '
											 'name: 1mat')
			else:
				dct = o4.dctload(filename)
				names, mats, forms, mtypes = o4.listload(filename)
				names2, sizes, forms2, mtypes2 = o4.dir(filename,
														verbose=False)
			assert sorted(dct.keys()) == sorted(names)
			assert names == names2
			assert forms == forms2
			assert mtypes == mtypes2
			for mat, sz in zip(mats, sizes):
				assert mat.shape == sz
			for nm in dct:
				if nm[-1] == 's':
					matnm = nm[:-1]
				elif nm == '_1mat':
					matnm = 'rmat'
				else:
					matnm = nm
				assert np.allclose(m[matnm], dct[nm][0])
				pos = names.index(nm)
				assert np.allclose(m[matnm], mats[pos])
				assert dct[nm][1] == forms[pos]
				assert dct[nm][2] == mtypes[pos]

			nm2 = nm = 'rcmat'
			if filename.find('single') > -1:
				nm2 = 'rcmats'
			if filename.find('badname') > -1:
				with assert_warns(RuntimeWarning) as cm:
					dct = o4.dctload(filename, nm2)
					name, mat, *_ = o4.listload(filename, [nm2])
			else:
				dct = o4.dctload(filename, [nm2])
				name, mat, *_ = o4.listload(filename, nm2)
			assert np.allclose(m[nm], dct[nm2][0])
			assert np.allclose(m[nm], mat[0])
开发者ID:EmanueleCannizzaro,项目名称:pyNastran,代码行数:60,代码来源:test_op4_nose.py



注:本文中的scipy.io.matlab.loadmat函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python mio.loadmat函数代码示例发布时间:2022-05-27
下一篇:
Python idl.readsav函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap