• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python io.loadmat函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中scipy.io.loadmat函数的典型用法代码示例。如果您正苦于以下问题:Python loadmat函数的具体用法?Python loadmat怎么用?Python loadmat使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了loadmat函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: keypoint_detection

def keypoint_detection():
    try:
        data = sio.loadmat('data.mat')
    except:
        load.csv()
        data = sio.loadmat('data.mat')

    train_x = data['train_x']
    train_y = data['train_y']
    test_x = data['test_x']

    # data normalization
    train_x = train_x / 256.0
    train_y = (train_y - 48) / 48.0
    test_x = test_x / 256.0

    sklearn.utils.shuffle(train_x, train_y, random_state=0)

    train_x, valid_x = train_x[:-400], train_x[-400:]
    train_y, valid_y = train_y[:-400], train_y[-400:]

    model = Model(0.01, 0.9, 0.0005, 100, 10000)

    model.add_layer(layers.FullConnectedLayer(9216, 256, 1, layers.rectify))
    model.add_layer(layers.DropoutLayer(0.5))
    model.add_layer(layers.FullConnectedLayer(256, 100, 1, layers.rectify))
    model.add_layer(layers.DropoutLayer(0.5))
    model.add_layer(layers.FullConnectedLayer(100, 30))
    model.set_loss_function(layers.EuclideanLoss)

    model.build()
    print 'build model complete'
    model.train_model(train_x, train_y, valid_x, valid_y)
    model.save_test_result(test_x)
开发者ID:hqythu,项目名称:kaggle-facial-keypoints-detection,代码行数:34,代码来源:mlp.py


示例2: __init__

    def __init__(self, complete_path):

        if complete_path.endswith('.mat.gz'):
            temp_filename = complete_path.split('.gz')[0]
            with open(temp_filename, "wb") as tmp:
                shutil.copyfileobj(gzip.open(complete_path), tmp)
            dict_mr = sio.loadmat(temp_filename)
            os.remove(temp_filename)
        elif complete_path.endswith('.mat'):
            dict_mr = sio.loadmat(complete_path)
        else:
            print('Unknown file extension for MountainRange file. Should be ' +
                  '.mat or .mat.gz')
        self.value = dict_mr['value']
        self.trigger_stamp = dict_mr['triggerStamp']
        self.SC_numb = np.int(np.squeeze(dict_mr['superCycleNb']))
        self.first_trigger_t_stamp_unix = dict_mr['first_trigger_t_stamp_unix']
        self.sample_interval = float(np.squeeze(dict_mr['sampleInterval']))
        self.first_sample_time = dict_mr['firstSampleTime']
        self.sensitivity = dict_mr['sensitivity']
        self.offset = dict_mr['offset']
        self.SPSuser = dict_mr['SPSuser']
        self.t_stamp_unix = dict_mr['t_stamp_unix']

        self.time_axis = np.float_(range(self.value.shape[1]))*self.sample_interval-self.value.shape[1]*self.sample_interval/2.
开发者ID:PyCOMPLETE,项目名称:SPSMeasurementTools,代码行数:25,代码来源:MR.py


示例3: load_matlab_matrix

def load_matlab_matrix( matfile, matname=None ):
    """
    Wraps scipy.io.loadmat.

    If matname provided, returns np.ndarray representing the index
    map. Otherwise, the full dict provided by loadmat is returns.
    """
    if not matname:
        out = spio.loadmat( matfile )
        mat = _extract_mat( out )
        # if mat is a sparse matrix, convert it to numpy matrix
        try:
            mat = np.matrix( mat.toarray() )
        except AttributeError:
            mat = np.matrix( mat )
        return mat
    else:
        matdict = spio.loadmat( matfile )
        mat = matdict[ matname ]
        # if mat is a sparse matrix, convert it to numpy matrix
        try:
            mat = np.matrix( mat.toarray() )
        except AttributeError:
            mat = np.matrix( mat )
        return mat #np.matrix( mat[ matname ] )
开发者ID:caosuomo,项目名称:rads,代码行数:25,代码来源:utils.py


示例4: _loadGEval

    def _loadGEval(self):
        print('Loading densereg GT..')
        prefix = os.path.dirname(__file__) + '/../../DensePoseData/eval_data/'
        print(prefix)
        SMPL_subdiv = loadmat(prefix + 'SMPL_subdiv.mat')
        self.PDIST_transform = loadmat(prefix + 'SMPL_SUBDIV_TRANSFORM.mat')
        self.PDIST_transform = self.PDIST_transform['index'].squeeze()
        UV = np.array([
            SMPL_subdiv['U_subdiv'],
            SMPL_subdiv['V_subdiv']
        ]).squeeze()
        ClosestVertInds = np.arange(UV.shape[1])+1
        self.Part_UVs = []
        self.Part_ClosestVertInds = []
        for i in np.arange(24):
            self.Part_UVs.append(
                UV[:, SMPL_subdiv['Part_ID_subdiv'].squeeze()==(i+1)]
            )
            self.Part_ClosestVertInds.append(
                ClosestVertInds[SMPL_subdiv['Part_ID_subdiv'].squeeze()==(i+1)]
            )

        arrays = {}
        f = h5py.File( prefix + 'Pdist_matrix.mat')
        for k, v in f.items():
            arrays[k] = np.array(v)
        self.Pdist_matrix = arrays['Pdist_matrix']

        print('Loaded')
开发者ID:Mrggggg,项目名称:DensePose,代码行数:29,代码来源:densepose_cocoeval.py


示例5: contrast_session

def contrast_session(session, C_path = os.getcwd()): # takes an integer for session ID, and a path.
    """-\nsession ID, and data path required.\nreturns list of ST_tbc matrices for every image in session."""
    
    session_data = sio.loadmat(os.path.join(C_path, 'goodCh_cont'))['goodCh_cont']

    sName = session_data[session,0][0]
    one_session = sio.loadmat(os.path.join(C_path, sName))
    
    trials = one_session['MUA'] #trials.shape =>  (32 channel, ~900 trial)
    img = np.squeeze(one_session['Cond'])
    img_trialNum = Counter(img)
    
    ST = []
    for p in range(len(img_trialNum)):
        st = np.zeros((img_trialNum[p+1],4500,trials.shape[0])) # ST_tbc
        
        for channel in range(trials.shape[0]):
            img_trials = trials[channel, img == p+1] # list of trials here.
            
            for trial in range(len(img_trials)): # mert kell az index, hogy el tudjam helyezni.
                for ap in range(img_trials[trial].shape[1]):
                    b = int(np.ceil(img_trials[trial][0][ap]*1000)) # change to ms!
                    st[trial, b, channel] = 1   # b stands for bin, bin is occ in python.
        ST.append(st)
    print sName
    return ST
开发者ID:partvishegy,项目名称:csnl_repository,代码行数:26,代码来源:V1_data_modul.py


示例6: test_spm_hrf_octave

def test_spm_hrf_octave():
    # Test SPM hrf against output from SPM code running in Octave
    my_path = dirname(__file__)
    hrfs_path = pjoin(my_path, 'spm_hrfs.mat')
    # mat file resulting from make_hrfs.m
    hrfs_mat = sio.loadmat(hrfs_path, squeeze_me=True)
    params = hrfs_mat['params']
    hrfs = hrfs_mat['hrfs']
    for i, pvec in enumerate(params):
        dt, ppk, upk, pdsp, udsp, rat = pvec
        t_vec = np.arange(0, 32.1, dt)
        our_hrf = spm_hrf_compat(t_vec,
                                 peak_delay=ppk,
                                 peak_disp=pdsp,
                                 under_delay=upk,
                                 under_disp=udsp,
                                 p_u_ratio=rat)
        # Normalize integral to match SPM
        assert_almost_equal(our_hrf, hrfs[i])
    # Test basis functions
    # mat file resulting from get_td_dd.m
    bases_path = pjoin(my_path, 'spm_bases.mat')
    bases_mat = sio.loadmat(bases_path, squeeze_me=True)
    dt = bases_mat['dt']
    t_vec = np.arange(0, 32 + dt, dt)
    # SPM function divides by sum of values - revert with dt
    assert_almost_equal(spmt(t_vec), bases_mat['hrf'] / dt, 4)
    assert_almost_equal(dspmt(t_vec), bases_mat['dhrf'] / dt, 4)
    assert_almost_equal(ddspmt(t_vec), bases_mat['ddhrf'] / dt, 4)
开发者ID:Naereen,项目名称:nipy,代码行数:29,代码来源:test_hrf.py


示例7: get_images

 def get_images(self, img_name):
     stp = str(img_name)
     if img_name < 10:
         stp = '0000' + stp
     elif img_name < 100:
         stp = '000' + stp
     elif img_name < 1000:
         stp = '00' + stp
     else:
         stp = '0' + stp
     img_path = 'data/portraitFCN_data/' + stp + '.mat'
     alpha_path = 'data/images_mask/' + stp + '_mask.mat'
     if os.path.exists(img_path) and os.path.exists(alpha_path):
         imat = sio.loadmat(img_path)['img']
         amat = sio.loadmat(alpha_path)['mask']
         nimat = np.array(imat, dtype=np.float)
         namat = np.array(amat, dtype=np.int)
         org_mat = np.zeros(nimat.shape, dtype=np.int)
         h, w, _ = nimat.shape
         for i in range(h):
             for j in range(w):
                 org_mat[i][j][0] = round(nimat[i][j][2] * 255 + 122.675)
                 org_mat[i][j][1] = round(nimat[i][j][1] * 255 + 116.669)
                 org_mat[i][j][2] = round(nimat[i][j][0] * 255 + 104.008)
         return nimat, namat, org_mat
     return None, None, None
开发者ID:GirishaGarg,项目名称:AutoPortraitMatting,代码行数:26,代码来源:portrait.py


示例8: load_pertub_data_cifar

def load_pertub_data_cifar(dirs='data_imputation/', dataset='cifar10_gcn_var', pertub_type=3, pertub_prob=6):
    # perturb data
    print 'Loading perturbed data...'

    if pertub_type==4:
        zz = sio.loadmat(dirs+dataset+'_type_'+str(pertub_type)+'_params_'+str(int(pertub_prob*100))+'_noise_rawdata.mat')
    elif pertub_type==3:
        pertub_prob = int(pertub_prob)
        zz = sio.loadmat(dirs+dataset+'_type_'+str(pertub_type)+'_params_'+str(pertub_prob)+'_noise_rawdata.mat')
    elif pertub_type==5:
        zz = sio.loadmat(dirs+dataset+'_type_'+str(pertub_type)+'_params_noise_rawdata.mat')
    else:
        print 'Error in load_pertub_data'
        print dirs, pertub_type, pertub_prob
        exit()

    data_train = zz['z_train'].T
    data = zz['z_test_original'].T
    data_perturbed = zz['z_test'].T
    pertub_label = zz['pertub_label'].astype(np.float32).T
    pertub_number = float(np.sum(1-pertub_label))

    print pertub_number, data_train.shape, data.shape, data_perturbed.shape, pertub_label.shape

    data_train = theano.shared(np.asarray(data_train, dtype=theano.config.floatX), borrow=True)
    data = theano.shared(np.asarray(data, dtype=theano.config.floatX), borrow=True)
    data_perturbed = theano.shared(np.asarray(data_perturbed, dtype=theano.config.floatX), borrow=True)
    pertub_label = theano.shared(np.asarray(pertub_label, dtype=theano.config.floatX), borrow=True)

    return data_train, data, data_perturbed, pertub_label, pertub_number
开发者ID:codeaudit,项目名称:mmdgm,代码行数:30,代码来源:datapy.py


示例9: main

def main():
    predicted_mat = loadmat(args.predicted_mat)['labels']
    truth_mat = loadmat(args.truth_mat)['GT']
    mode = args.mode
    
    min_shape = np.minimum(predicted_mat.shape, truth_mat.shape)
    error = None
    if(mode == 'all'):
        error = compare_all_mats(predicted_mat, truth_mat, min_shape)
        print 1 - error/(min_shape[0]*min_shape[1]*min_shape[2])
    else:
        error = compare_single_mats(predicted_mat, truth_mat, min_shape)
        error = 1 - error/(min_shape[0]*min_shape[1])
        print error
    should_graph = args.graph
    if(should_graph == 'True'):
        y_axis = error
        x_axis = np.arange(len(y_axis))
        fig = plt.figure()
        ax = fig.add_subplot(111)
        print type(y_axis)
        #ax.plot(x_axis, y_axis)
        ax.scatter(x_axis, y_axis)
        ax.set_xlim([0,len(x_axis)])
        ax.set_ylim([0,1])
        plt.savefig(args.output)
开发者ID:Rulison,项目名称:background_blog,代码行数:26,代码来源:evaluate_mats.py


示例10: loadMAT

def loadMAT(slice_filename,parameters_filename):
    '''
    Created to convert .mat files with specific configuration for ECoG data of Newcastle Hospitals and create a dict.
    If you want to load other .mat file, use scipy.io. loadmat and create_DataObj
    
    Parameters
    ----------
    slice_filename: str 
        Name of the slice (.mat) file 
    parameters_filename: str 
        Name of the parameters (.mat) file
    
    '''
    mat = sio.loadmat(parameters_filename, struct_as_record=False, squeeze_me=True)
    parameters = mat['parameters']
    ch_l = parameters.channels
    ch_labels = [str(x) for x in ch_l]
    sample_rate = parameters.sr
    f = sio.loadmat(slice_filename, struct_as_record=False, squeeze_me=True)
    Data = f['Data']
    time_vec = Data.time_vec
    signal = Data.raw.T
    amp_unit = '$\mu V$'
    Data = DataObj(signal,sample_rate,amp_unit,ch_labels,time_vec,[])
    return Data
开发者ID:britodasilva,项目名称:pyhfo,代码行数:25,代码来源:i_functions.py


示例11: load_simTB_data

def load_simTB_data(source_directory):
    """
    Load simTB data along with simulation info.
    """
    nifti_files = natural_sort(glob(path.join(source_directory, "*_DATA.nii")))
    sim_files = natural_sort(glob(path.join(source_directory, "*_SIM.mat")))
    if len(nifti_files) != len(sim_files):
        raise ValueError("Different number of DATA and SIM files found int %s"
                         % source_directory)
    assert len(nifti_files) > 0

    param_files = glob(path.join(source_directory, "*PARAMS.mat"))
    if len(param_files) != 1:
        raise ValueError("Exactly one param file needed, found %d in %s"
                         % (len(param_files), source_directory))
    params = tuple(io.loadmat(param_files[0])["sP"][0][0])

    sim_dict = {}
    for i, (nifti_file, sim_file) in enumerate(zip(nifti_files, sim_files)):
        assert "%03d" % (i + 1) in nifti_file
        assert "%03d" % (i + 1) in sim_file
        sims = io.loadmat(sim_file)
        tcs = sims["TC"].T
        sms = sims["SM"]
        sim_dict[i] = {"SM": sms, "TC": tcs}
    sim_dict["params"] = params

    data, labels, base = read_niftis(nifti_files)
    return data, labels, sim_dict
开发者ID:ecastrow,项目名称:pl2mind,代码行数:29,代码来源:mri_utils.py


示例12: TenTwentyDownslopeBPF

def TenTwentyDownslopeBPF(FullPath):
    TempClipData = spio.loadmat(FullPath)
    TempDataArray = TempClipData['data']
    TempDataArray = TempDataArray.transpose()

    Fsample = float(TempClipData['freq'])       #Sampling frequency
    dt = 1.0/Fsample                            #Time between samples
    TimeValues = np.arange(0.0, 1.0, dt)        #Construct ndarray of time values
    LastChan = int(TempDataArray.shape[1])      #Last channel number
    Channels = np.arange(0, LastChan,1)         #List of channel numbers
    
    FeatureOutput = np.zeros(LastChan)          #Initialize the output 

    # Read in the digital filter coeficients and place in ndarrays
    FilterInfo=spio.loadmat('FilterSetTenTwentyDownslopeBPF.mat')

    FilterCoefI = FilterInfo['FilterCoefI'].flatten()
    FilterCoefQ = FilterInfo['FilterCoefQ'].flatten()

    # Calculate the feature values for each channel       
    for i in Channels:
        Iproduct = FilterCoefI*TempDataArray[:,i]
        Isum=np.sum(Iproduct[i])
        
        Qproduct = FilterCoefQ*TempDataArray[:,i]
        Qsum=np.sum(Qproduct)
        
        FeatureOutput[i] = np.log( np.sqrt(Isum*Isum + Qsum*Qsum) )
        FeatureOutput[i] = bender(FeatureOutput[i], 4.0, 4.0)               #Limit to range of 0 to 1.  Second arg is mean, third is span      

    FeatureList = FeatureOutput.tolist()        #Convert ndarray to list.  The returned value will be appended other values; this would be very inefficent with ndarray

    #Return feature vector in form of a list
    return(FeatureList)
开发者ID:alex-mc,项目名称:detection,代码行数:34,代码来源:FeatureExplorer.py


示例13: read_dataset

def read_dataset( stimulus_pattern='stimulus_%d.mat', data_file='data.mat'):
    from scipy.io import loadmat
    data = loadmat(data_file)
    data = data['data']    
    spikes = data['spike_rate'][0][0]
    del data['spike_rate']
    data['rgc_ids']        = data['rgc_ids'][0][0][0]    
    data['cone_weights']   = data['cone_weights'][0][0]    
    data['cone_types']     = data['cone_types'][0][0].tolist()    
    data['cone_locations'] = data['cone_locations'][0][0]    
    data['rgc_locations']  = numpy.array([d[0][0] for d in data['rgc_locations'][0][0]])    
    data['rgc_types']      = dict((d[0][0],d[1][0].tolist()) 
                                   for d in filter( lambda d : len( d[0] )>0 , [d[0][0] 
                                   for d in data['cell_types'][0][0][0]] ))
    try:
        i = 0
        N_timebins = 0
        while 1:
            data['stimulus'] = loadmat(stimulus_pattern % i)['cone_input'].T
            data['spikes'] = spikes[N_timebins:N_timebins+data['stimulus'].shape[1]]
            N_timebins += data['stimulus'].shape[1]
            i += 1
            yield data
    except:
        raise StopIteration()        
开发者ID:kolia,项目名称:subunits,代码行数:25,代码来源:simulate_retina.py


示例14: save_crop_images_and_joints

def save_crop_images_and_joints():
    training_indices = loadmat('data/FLIC-full/tr_plus_indices.mat')
    training_indices = training_indices['tr_plus_indices'].flatten()

    examples = loadmat('data/FLIC-full/examples.mat')
    examples = examples['examples'][0]
    joint_ids = ['lsho', 'lelb', 'lwri', 'rsho', 'relb', 'rwri', 'lhip',
                 'lkne', 'lank', 'rhip', 'rkne', 'rank', 'leye', 'reye',
                 'lear', 'rear', 'nose', 'msho', 'mhip', 'mear', 'mtorso',
                 'mluarm', 'mruarm', 'mllarm', 'mrlarm', 'mluleg', 'mruleg',
                 'mllleg', 'mrlleg']

    available = joint_ids[:8]
    available.extend(joint_ids[12:14])
    available.extend([joint_ids[16]])

    target_joints = ['lsho', 'lelb', 'lwri',
                     'leye', 'reye', 'nose',
                     'rsho', 'relb', 'rwri']

    fp_train = open('data/FLIC-full/train_joints.csv', 'w')
    fp_test = open('data/FLIC-full/test_joints.csv', 'w')
    for i, example in enumerate(examples):
        joint = example[2].T
        joint = dict(zip(joint_ids, joint))
        fname = example[3][0]
        joint = get_joint_list(joint)
        msg = '{},{}'.format(fname, ','.join([str(j) for j in joint.tolist()]))
        if i in training_indices:
            print(msg, file=fp_train)
        else:
            print(msg, file=fp_test)
开发者ID:cybermatt,项目名称:deeppose,代码行数:32,代码来源:flic_dataset.py


示例15: loadfile_hfreud

def loadfile_hfreud(filename, alpha, rho, n):

    filename = os.path.join(data_directory, filename)
    try:
        data = loadmat(filename)['data'].flatten()
    except:
        data = np.zeros(0)

    if data.size < n+1:
        # Run matlab to generate/populate file

        print("Calling matlab....")
        cwd = os.path.dirname(os.path.abspath(__file__))

        command  = "cd(" + "'" + cwd + "'); cd ..; "

        command += "data = load_fhfreud({:d}, {:.4f}, {:.4f}); ".format(n, alpha, rho)
        command += "data = fidistinv_hfreud_setup({:d}, {:.4f}, {:.4f}, data); ".format(n, alpha, rho)
        command += "save_fhfreud(data, {:.4f}, {:.4f}); ".format(alpha, rho)

        command += "exit"
        print(command)

        subprocess.call([matlab_binary, "-nodisplay", "-r", command])
        print("...finished")

        data = loadmat(filename)['data'].flatten()

    return data
开发者ID:SCIInstitute,项目名称:FwdInvToolkit,代码行数:29,代码来源:fidist.py


示例16: get_data_1

 def get_data_1(self,numero,radical,suffix='_func_data'):
     fbd=loadmat('./CEC05_files/fbias_data.mat')  # the f_bias dictionary
     fb=fbd['f_bias'][0,:]                        # the f_bias array
     fbias=fb[numero-1]                             # the desired f_bias for this function
     od=loadmat('./CEC05_files/'+radical+suffix+'.mat')    # the shift data dictionary
     o=od['o'][0,:]                                           # the shift data array
     return fbias,o
开发者ID:stromatolith,项目名称:peabox,代码行数:7,代码来源:peabox_testfuncs.py


示例17: __init__

    def __init__(self, db_path='', use_extra=True):
        Dataset.__init__(self)
        print("Loading files")
        self.data_dims = [32, 32, 3]
        self.range = [0.0, 1.0]
        self.name = "svhn"
        self.train_file = os.path.join(db_path, "train_32x32.mat")
        self.extra_file = os.path.join(db_path, "extra_32x32.mat")
        self.test_file = os.path.join(db_path, "test_32x32.mat")
        if use_extra:
            self.train_file = self.extra_file

        # Load training images
        if os.path.isfile(self.train_file):
            mat = sio.loadmat(self.train_file)
            self.train_image = mat['X'].astype(np.float32)
            self.train_label = mat['y']
            self.train_image = np.clip(self.train_image / 255.0, a_min=0.0, a_max=1.0)
        else:
            print("SVHN dataset train files not found")
            exit(-1)
        self.train_batch_ptr = 0
        self.train_size = self.train_image.shape[-1]

        if os.path.isfile(self.test_file):
            mat = sio.loadmat(self.test_file)
            self.test_image = mat['X'].astype(np.float32)
            self.test_label = mat['y']
            self.test_image = np.clip(self.test_image / 255.0, a_min=0.0, a_max=1.0)
        else:
            print("SVHN dataset test files not found")
            exit(-1)
        self.test_batch_ptr = 0
        self.test_size = self.test_image.shape[-1]
        print("SVHN loaded into memory")
开发者ID:xiaoyulu2014,项目名称:Sequential-Variational-Autoencoder,代码行数:35,代码来源:dataset_svhn.py


示例18: sentCombMat_add

def sentCombMat_add(w1,w2,w3):
    root = u"I:/数据/word12585relation30/rel_30_ref_TFIDF/ref_800_TFIDF/rel_svd/file_word_lus/word_mat_latent_324/"
    wordList = getWordList()
    w1Mat = sio.loadmat(root+u"l_"+w1)[w1]
    w2Mat = sio.loadmat(root+u"l_"+w2)[w2]
    w3Mat = sio.loadmat(root+u"l_"+w3)[w3]
    return w1Mat+w2Mat+w3Mat
开发者ID:benniaogithub,项目名称:RBDSM,代码行数:7,代码来源:RBDSMRow.py


示例19: unify_newLabel_to_existing

def unify_newLabel_to_existing(matfiles, LabelName, IDName):
    flab     = [] #final labels
    ftrjID   = [] #final trjID

    for matidx in range(len(matfiles)): 
        L1 = loadmat(matfiles[matidx])[LabelName][0]
        # L1 = L1+1 # class label starts from 1 instead of 0
        M1 = loadmat(matfiles[matidx])[IDName][0]

        if len(flab)>0:
            Labelnowmax = max(flab)
            L1 = L1+Labelnowmax+1
            commonidx = np.intersect1d(M1,ftrjID)  #trajectories existing in both 2 trucations

            print('flab : {0}, new labels : {1} ,common term : {2}').format(len(np.unique(flab)),len(np.unique(L1)),len(commonidx))
            for i in commonidx:
                labelnew = L1[M1==i][0]
                labelnow = np.array(flab)[ftrjID==i][0]
                idx1  = np.where(L1==labelnew)[0]
                L1[idx1] = labelnow  ## keep the first appearing label

        flab[:]   = flab +list(L1)
        ftrjID[:] = ftrjID + list(M1)
    
    ftrjID, indices= np.unique(ftrjID,return_index=True)
    flab = np.array(flab)[indices] 

    return flab, ftrjID
开发者ID:ChengeLi,项目名称:VehicleTracking,代码行数:28,代码来源:unify_label_func.py


示例20: demo_lab1

def demo_lab1():

    # this part will load the dataset
    one_train=sio.loadmat('one_train.mat')
    seven_train=sio.loadmat('seven_train.mat')
    
    TRAIN_ONES=one_train['one_train']
    TRAIN_SEVENS=seven_train['seven_train']
    TRAIN=np.concatenate((TRAIN_ONES, TRAIN_SEVENS), axis=0)
    
    LABEL_ONES=np.ones((300,1))
    LABEL_SEVENS=np.ones((300,1))*-1
    LABEL=np.concatenate((LABEL_ONES, LABEL_SEVENS), axis=0)
 
    #Trivial Part
    YOURNAME = ...          # eg. 'john_smith' pay attention to the underscore
          
    #Challenging Part
    N_SPLIT =  ...                                 # eg. 5     see 'kcv'
    SPLIT_TYPE = ...                        # eg. 'Sequential' see 'kcv'
    KERNEL = ...                              # eg. 'Linear' see 'KernelMatrix'
    KERNEL_PARAMETER = ...           #fix it manually or by autosigma for example with autosigma(TRAIN,5). see 'KernelMatrix' 'kcv' and 'autosigma'
    TRANGE =  ...                      # eg. np.logspace(-3, 3, 7) or np.linspace(0.1, 10, 10)

    t_kcv_idx, avg_err_kcv = kcv(KERNEL, KERNEL_PARAMETER, 'Reg. Least Squared', TRANGE, TRAIN, LABEL, N_SPLIT, 'Classification', SPLIT_TYPE)
    save_challenge_1(YOURNAME, TRANGE[t_kcv_idx], KERNEL, KERNEL_PARAMETER, avg_err_kcv[0][t_kcv_idx])

    return
开发者ID:sigurdlekve,项目名称:Summer-Project-Machine-Learning,代码行数:28,代码来源:demo_lab1.py



注:本文中的scipy.io.loadmat函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python io.mmread函数代码示例发布时间:2022-05-27
下一篇:
Python interpolate.interp1d函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap