• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.in1d函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.in1d函数的典型用法代码示例。如果您正苦于以下问题:Python in1d函数的具体用法?Python in1d怎么用?Python in1d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了in1d函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_destination_pathline_data

    def get_destination_pathline_data(self, dest_cells):
        """Get pathline data for set of destination cells.

        Parameters
        ----------
        dest_cells : list or array of tuples
            (k, i, j) of each destination cell (zero-based)

        Returns
        -------
        pthldest : np.recarray
            Slice of pathline data array (e.g. PathlineFile._data)
            containing only pathlines with final k,i,j in dest_cells.
        """
        ra = self._data.view(np.recarray)
        # find the intersection of endpoints and dest_cells
        # convert dest_cells to same dtype for comparison
        raslice = ra[['k', 'i', 'j']]
        dest_cells = np.array(dest_cells, dtype=raslice.dtype)
        inds = np.in1d(raslice, dest_cells)
        epdest = ra[inds].copy().view(np.recarray)

        # use particle ids to get the rest of the paths
        inds = np.in1d(ra.particleid, epdest.particleid)
        pthldes = ra[inds].copy()
        pthldes.sort(order=['particleid', 'time'])
        return pthldes
开发者ID:emorway-usgs,项目名称:flopy,代码行数:27,代码来源:modpathfile.py


示例2: test_match_mask

def test_match_mask():
    msk = np.array([ True, False,  True, False, False], dtype=bool)
    idx = np.array([0, 2])
    arr = np.array([1,2,3,4,5]) 
    values = np.array([1,3])
    assert (num.match_mask(arr, values) == msk).all()
    ret = num.match_mask(arr, values, fullout=True)
    assert (ret[0] == msk).all()
    assert (ret[1] == idx).all()
    assert (arr[msk] == np.array([1, 3])).all()
    assert (ret[0] == np.in1d(arr, values)).all()
    
    # handle cases where len(values) > len(arr) and values not contained in arr
    values = np.array([1,3,3,3,7,9,-3,-4,-5])
    ret = num.match_mask(arr, values, fullout=True)
    assert (ret[0] == msk).all()
    assert (ret[1] == idx).all()
    assert (ret[0] == np.in1d(arr, values)).all()

    # float values: use eps
    ret = num.match_mask(arr+0.1, values, fullout=True, eps=0.2)
    assert (ret[0] == msk).all()
    assert (ret[1] == idx).all()
    
    msk = num.match_mask(np.array([1,2]), np.array([3,4])) 
    assert (msk == np.array([False]*2)).all()
开发者ID:elcorto,项目名称:pwtools,代码行数:26,代码来源:test_match_mask.py


示例3: generateBatch

def generateBatch(curinds, elements, atomArraysAll, nAtomsDict,
                  atomsIndsReverse, atomArraysAllDerivs):
    """This method generates batches from a large dataset using a set of
    selected indices curinds."""
    # inputs:
    atomArraysFinal = {}
    atomArraysDerivsFinal = {}
    for element in elements:
        validKeys = np.in1d(atomsIndsReverse[element], curinds)
        if len(validKeys) > 0:
            atomArraysFinal[element] = atomArraysAll[element][validKeys]
            if len(atomArraysAllDerivs[element]) > 0:
                atomArraysDerivsFinal[element] = atomArraysAllDerivs[
                    element][validKeys, :, :, :]
            else:
                atomArraysDerivsFinal[element] = []
        else:
            atomArraysFinal[element] = []
            atomArraysDerivsFinal[element] = []

    atomInds = {}
    for element in elements:
        validKeys = np.in1d(atomsIndsReverse[element], curinds)
        if len(validKeys) > 0:
            atomIndsTemp = np.sum(atomsIndsReverse[element][validKeys], 1)
            atomInds[element] = atomIndsTemp * 0.
            for i in range(len(curinds)):
                atomInds[element][atomIndsTemp == curinds[i]] = i
        else:
            atomInds[element] = []

    return atomArraysFinal, atomArraysDerivsFinal, atomInds
开发者ID:AkshayTharval,项目名称:Atomistic-Machine-Learning-Potentials,代码行数:32,代码来源:tfAmpNN.py


示例4: Check_Result

    def Check_Result(self, Str_DataName, Int_DataNum, List_PeakIdx):
        Array_MyAnswer = np.array(List_PeakIdx)
        Array_MyAnswer = np.unique(Array_MyAnswer)
        Array_Anno = self.Load_Answer(Str_DataName, Int_DataNum)


        Int_TP = 0
        Int_FP = 0
        Int_FN = 0

        Int_BufferSize = 2
        for myanswer in Array_MyAnswer:
            Array_BufferMyAnswer = range(myanswer-Int_BufferSize, myanswer + Int_BufferSize)
            Array_BufferMyAnswer = np.array(Array_BufferMyAnswer)
            Array_InorNOT = np.in1d(Array_BufferMyAnswer, Array_Anno)
            if True in Array_InorNOT:
                Int_TP += 1
            elif True not in Array_InorNOT:
                Int_FP += 1

        for trueanswer in Array_Anno:
            Array_BufferMyAnswer = range(trueanswer - Int_BufferSize, trueanswer + Int_BufferSize)
            Array_BufferMyAnswer = np.array(Array_BufferMyAnswer)
            Array_InorNOT = np.in1d(Array_BufferMyAnswer, Array_MyAnswer)
            if True not in Array_InorNOT:
                Int_FN += 1

        Flt_Se = float(Int_TP) / float(Int_TP + Int_FN)
        Flt_PP = float(Int_TP) / float(Int_TP + Int_FP)
        return Str_DataName, Int_DataNum, Flt_Se, Flt_PP
开发者ID:HansJung,项目名称:PPG_Reexperiment,代码行数:30,代码来源:CompeteMethod_SSF.py


示例5: _do_one_inner_iteration

    def _do_one_inner_iteration(self, inv_val):
        r"""
        Determine which throats are invaded at a given applied capillary
        pressure.

        """
        # Generate a tlist containing boolean values for throat state
        Tinvaded = self['throat.entry_pressure'] <= inv_val
        # Find all pores that can be invaded at specified pressure
        [pclusters, tclusters] = self._net.find_clusters2(mask=Tinvaded,
                                                          t_labels=True)
        if self._AL:
            # Identify clusters connected to invasion sites
            inv_clusters = sp.unique(pclusters[self['pore.inlets']])
        else:
            # All clusters are invasion sites
            inv_clusters = pclusters
        inv_clusters = inv_clusters[inv_clusters >= 0]
        # Find pores on the invading clusters
        pmask = np.in1d(pclusters, inv_clusters)
        # Store current applied pressure in newly invaded pores
        pinds = (self['pore.inv_Pc'] == sp.inf) * (pmask)
        self['pore.inv_Pc'][pinds] = inv_val
        # Find throats on the invading clusters
        tmask = np.in1d(tclusters, inv_clusters)
        # Store current applied pressure in newly invaded throats
        tinds = (self['throat.inv_Pc'] == sp.inf) * (tmask)
        self['throat.inv_Pc'][tinds] = inv_val
        # Store total network saturation
        tsat = sp.sum(self._net['throat.volume'][self['throat.inv_Pc'] <= inv_val])
        psat = sp.sum(self._net['pore.volume'][self['pore.inv_Pc'] <= inv_val])
        total = sp.sum(self._net['throat.volume']) + sp.sum(self._net['pore.volume'])
        self['pore.inv_sat'][pinds] = (tsat + psat)/total
        self['throat.inv_sat'][tinds] = (tsat + psat)/total
开发者ID:MichaelHoeh,项目名称:OpenPNM,代码行数:34,代码来源:__OrdinaryPercolation__.py


示例6: Pred_EOF_CCA

    def Pred_EOF_CCA(self):
        '''
        预报模块,需要进一步完善,有很多内容需要进一步深入
        '''

        I_Year = self.I_Year
        I_YearP = self.I_YearP
        print('I_Year=',I_Year)
        print('I_YearP=',I_YearP)
        #print(self.Field[:,0,0])
        #print(self.FieldP[:,0,0])

        #sys.exit(0)

        Region = self.Region[:,np.in1d(I_Year,I_YearP)]
        print('I_YearR=',I_Year[np.in1d(I_Year,I_YearP)])

        FieldP = self.FieldP[:,self.p_np3]  #等于过滤后的场文件
        FieldP = FieldP.T

        FieldP2 = FieldP[:,np.in1d(I_YearP,I_Year)]

        print(FieldP2.shape,np.atleast_2d(FieldP[:,-1]).T.shape)

        print('FieldP.shape = ',FieldP.shape)
        print('FieldP2.shape = ',FieldP2.shape)
        print('Region.shape = ',Region.shape)
        self.X_Pre = dclim.dpre_eof_cca(FieldP2,Region,np.atleast_2d(FieldP[:,-1]).T,4)
        print(self.X_Pre.shape)

        self.out = np.hstack((self.StaLatLon,self.X_Pre))
        
        print('Pred Year is ',I_YearP[-1])
        np.savetxt('out.txt',self.out,fmt='%5d %7.2f %7.2f %7.2f',delimiter=' ')
开发者ID:bazingaedwaqrd,项目名称:MODES,代码行数:34,代码来源:climdiag.py


示例7: AM_vector_strength

def AM_vector_strength(spikeTimestamps, eventOnsetTimes, behavData, timeRange):

    currentFreq = behavData['currentFreq']
    possibleFreq = np.unique(currentFreq)

    vs_array=np.array([])
    ral_array=np.array([])
    pval_array = np.array([])
    timeRange = [0, 0.5]
    spikeTimesFromEventOnset, trialIndexForEachSpike, indexLimitsEachTrial = spikesanalysis.eventlocked_spiketimes(
        spikeTimestamps, eventOnsetTimes, timeRange)

    for freq in possibleFreq:

        select = np.flatnonzero(currentFreq==freq)
        selectspikes = spikeTimesFromEventOnset[np.in1d(trialIndexForEachSpike, select)]
        selectinds = trialIndexForEachSpike[np.in1d(trialIndexForEachSpike, select)]
        squeezedinds=np.array([list(np.unique(selectinds)).index(x) for x in selectinds])

        spikesAfterFirstCycle = selectspikes[selectspikes>(1.0/freq)]
        indsAfterFirstCycle = selectinds[selectspikes>(1.0/freq)]

        strength, phase = vectorstrength(spikesAfterFirstCycle, 1.0/freq)
        vs_array=np.concatenate((vs_array, np.array([strength])))

        #Compute the pval for the vector strength
        radsPerSec=freq*2*np.pi
        spikeRads = (spikesAfterFirstCycle*radsPerSec)%(2*np.pi)
        ral_test = circstats.rayleigh_test(spikeRads)
        pval = np.array([ral_test['pvalue']])
        ral =np.array([2*len(spikesAfterFirstCycle)*(strength**2)]) 
        pval_array = np.concatenate((pval_array, pval))
        ral_array = np.concatenate((ral_array, ral))

    return vs_array, pval_array, ral_array
开发者ID:sjara,项目名称:jaratoolbox,代码行数:35,代码来源:am_funcs.py


示例8: sim_top_doc

    def sim_top_doc(self, topic_or_topics, weights=[], filter_words=[],
                    print_len=10, as_strings=True, label_fn=_def_label_fn_, 
                    filter_nan=True):
        """
        """
        d_arr = _sim_top_doc_(self.corpus, self.model.doc_top, topic_or_topics, 
                              self.model.context_type, weights=weights, 
                              norms=self._doc_norms, print_len=print_len,
                              as_strings=False, label_fn=label_fn, 
                              filter_nan=filter_nan)
        
        topics = _res_top_type_(topic_or_topics)

        if len(filter_words) > 0:
            white = set()
            for w in filter_words:
                l = self.word_topics(w, as_strings=False)
                d = l['i'][np.in1d(l['value'], topics)]
                white.update(d)
            
            d_arr = d_arr[(np.in1d(d_arr['i'], white))]

        if as_strings:
            md = self.corpus.view_metadata(self.model.context_type)
            docs = label_fn(md)
            d_arr = _map_strarr_(d_arr, docs, k='i', new_k='doc')

    	return d_arr
开发者ID:argdundee,项目名称:DbyD,代码行数:28,代码来源:ldagibbsviewer.py


示例9: compute_mAP

def compute_mAP(index, good_index, junk_index):
    ap = 0
    cmc = torch.IntTensor(len(index)).zero_()
    if good_index.size==0:   # if empty
        cmc[0] = -1
        return ap,cmc

    # remove junk_index
    mask = np.in1d(index, junk_index, invert=True)
    index = index[mask]

    # find good_index index
    ngood = len(good_index)
    mask = np.in1d(index, good_index)
    rows_good = np.argwhere(mask==True)
    rows_good = rows_good.flatten()
    
    cmc[rows_good[0]:] = 1
    for i in range(ngood):
        d_recall = 1.0/ngood
        precision = (i+1)*1.0/(rows_good[i]+1)
        if rows_good[i]!=0:
            old_precision = i*1.0/rows_good[i]
        else:
            old_precision=1.0
        ap = ap + d_recall*(old_precision + precision)/2

    return ap, cmc
开发者ID:codes-kzhan,项目名称:Person_reID_baseline_pytorch,代码行数:28,代码来源:evaluate.py


示例10: find_matches

def find_matches(mock, obs, opts):

    """
    Function to find matching galaxy members between mock haloes
    and observed clusters.
    """
    
    obs = obs[np.in1d(obs.mem_id, mock.m_mem_id, assume_unique = True)]
    
    mock = mock[np.in1d(mock.m_mem_id, obs.mem_id, assume_unique = True)]
    
    merged = np.lib.recfunctions.merge_arrays([obs, mock], flatten = True,
                                              usemask = False)

    clusters = []
    count = 0

    for id_val in np.unique(obs.id):
        clusters.append(Clusterx(count))
         
        for member in merged[obs.id == id_val]:
            clusters[count].add_mem(member)
            
        count += 1
    
    for cluster in clusters:
        cluster.props()
        cluster.halo_count()
        cluster.mass_hist(opts.mass_bin)

    return clusters
开发者ID:EiffL,项目名称:python_lib,代码行数:31,代码来源:pymemmatch_match.py


示例11: map_to_external_reference

    def map_to_external_reference(self, roi, refname='HXB2', in_patient=True):
        '''
        return a map of positions in the patient to a reference genomewide
        Args:
            roi  --  region of interest given as a string or a tuple (start, end)
            refname --  reference to compare to
            in_patient -- specifies whether the (start, end) refers to reference or patient coordinates
        returns:
            a (len(roi), 3) array with reference coordinates in first column, 
                                        patient coordinates in second 
                                        roi coordinates in third column
        '''
        from .filenames import get_coordinate_map_filename
        coo_fn = get_coordinate_map_filename(self.name, 'genomewide', refname=refname)
        genomewide_map = np.loadtxt(coo_fn, dtype=int)

        if roi in self.annotation:
            roi_pos = np.array([x for x in self.annotation[roi]], dtype = int)
            ind = np.in1d(genomewide_map[:,1], roi_pos)
            roi_indices = np.in1d(roi_pos, genomewide_map[:,1]).nonzero()[0]
            return np.vstack((genomewide_map[ind].T, [roi_indices])).T

        elif roi == "genomewide":
            return np.vstack((genomewide_map.T, [genomewide_map[:,1]])).T            

        else:
            try:
                start, stop = map(int, roi)
                start_ind = np.searchsorted(genomewide_map[:,in_patient], start)
                stop_ind = np.searchsorted(genomewide_map[:,in_patient], stop)
                return np.vstack((genomewide_map[start_ind:stop_ind].T,
                                  [genomewide_map[start_ind:stop_ind, in_patient] - start])).T
            except:
                raise ValueError("ROI not understood")
开发者ID:vpuller,项目名称:HIVEVO_access,代码行数:34,代码来源:patients.py


示例12: _limit_features

    def _limit_features(self, csr_matrix, low=2, high=None, limit=None):
        """
        Lower bound on features, so that > n docs much contain the feature
        """
        
        assert isinstance(csr_matrix, scipy.sparse.csr_matrix) # won't work with other sparse matrices
        # (most can be converted with .tocsr() method)

        indices_to_remove = np.where(np.asarray(csr_matrix.sum(axis=0) < low)[0])[0]
        # csr_matrix.sum(axis=0) < low: returns Boolean matrix where total features nums < low
        # np.asarray: converts np.matrix to np.array
        # [0]: since the array of interest is the first (and only) item in an outer array
        # np.where: to go from True/False to indices of Trues

        
        data_filter = np.in1d(csr_matrix.indices, indices_to_remove)
        # gets boolean array, where the columns of any non-zero values are to be removed
        # (i.e. their index is in the indices_to_remove array)

        # following three lines for info/debugging purposes
        # to show how many unique features are being removed
        num_total_features = len(np.unique(csr_matrix.indices)) 
        num_features_to_remove = np.sum(np.in1d(indices_to_remove, np.unique(csr_matrix.indices)))
        print "%d/%d features will be removed" % (num_features_to_remove, num_total_features)

        csr_matrix.data[data_filter] = 0
        # set the values to be removed to 0 to start with

        csr_matrix.eliminate_zeros()
        # then run the np optimised routine to delete those 0's (and free a little memory)
        # NB zeros are superfluous since a sparse matrix

        return csr_matrix
开发者ID:ijmarshall,项目名称:cochrane-nlp,代码行数:33,代码来源:modvec2.py


示例13: check_filter_labels

def check_filter_labels(inverse=False):

    # create a feature set
    fs, _ = make_classification_data(num_examples=1000,
                                     num_features=4,
                                     num_labels=5,
                                     train_test_ratio=1.0)

    # keep just the instaces with 0, 1 and 2 labels
    labels_to_filter = [0, 1, 2]

    # do the actual filtering
    fs.filter(labels=labels_to_filter, inverse=inverse)

    # make sure that we removed the right things
    if inverse:
        ids_kept = fs.ids[np.where(np.logical_not(np.in1d(fs.labels,
                                                          labels_to_filter)))]
    else:
        ids_kept = fs.ids[np.where(np.in1d(fs.labels, labels_to_filter))]

    assert_array_equal(fs.ids, np.array(ids_kept))

    # make sure that number of ids, labels and features are the same
    eq_(fs.ids.shape[0], fs.labels.shape[0])
    eq_(fs.labels.shape[0], fs.features.shape[0])
开发者ID:BK-University,项目名称:skll,代码行数:26,代码来源:test_featureset.py


示例14: untie

def untie(a,b):
    """
    
    Parameters
    ----------
    a
    b
    Returns
    -------
    boolean 
    a
    r 

    """
    la    = len(a)
    lb    = len(b)
    u     = np.intersect1d(a,b)
    lu    = len(u)
    #print lu
    #print min(la,lb)/2
    if lu >= min(la,lb)/2:
        # segment de a non commun avec b
        aa    = a[~np.in1d(a,u)]
        # segment de b non commun avec a
        bb    = b[~np.in1d(b,u)]
        r     = np.hstack((aa,bb))
        if la<lb:
            return(True,a,r)
        else:
            return(True,b,r)
    else:
        return(False,-1,-1)
开发者ID:houidhek,项目名称:pylayers,代码行数:32,代码来源:pyutil.py


示例15: filter_effects

    def filter_effects(self):
        """
        Merge effects and data, and flip effect alleles 
        """
        effect_positions=self.effects[["CHR", "POS"]]
        data_positions=self.data.snp[["CHR", "POS"]]

        effect_include=np.in1d(effect_positions, data_positions)
        data_include=np.in1d(data_positions, effect_positions)

        self.data.filter_snps(data_include)
        self.effects=self.effects[effect_include]
        # Just give up and convert to float. I have no idea why int doesn't work here
        # but it's something to do with the fact that you can't have None as a numpy int
        # wheras float gets converted to nan. 
        tmp_data=nprec.append_fields(self.data.snp, "GENO", None, dtypes=[(float,self.data.geno.shape[1])],usemask=False)
        tmp_data["GENO"]=self.data.geno
        self.effects=nprec.join_by(["CHR", "POS"], self.effects, tmp_data, usemask=False, jointype="inner")
        flipped=0
        removed=0
        for rec in self.effects:
            if rec["EFFECT"]==rec["REF"] and rec["OTHER"]==rec["ALT"]:
                pass
            elif rec["OTHER"]==rec["REF"] and rec["EFFECT"]==rec["ALT"]:
                flipped+=1
                rec["OTHER"]=rec["ALT"]
                rec["EFFECT"]=rec["REF"]
                rec["BETA"]=-rec["BETA"]
            else:
                removed+=1
                rec["EFFECT"]=rec["OTHER"]="N"

        self.effects=self.effects[self.effects["EFFECT"]!="N"]
        print( "Removed "+str(removed)+" non-matching alleles",file=sys.stderr)
        print( "Flipped "+str(flipped)+" alleles",file=sys.stderr)
开发者ID:mathii,项目名称:spindrift,代码行数:35,代码来源:predictor.py


示例16: classifyPerCountry

def classifyPerCountry(T,V,Y,Y_country_hat):
	Y_country = np.floor(Y / 1000)
	print "\nClassifying per Country"
	Y_city = Y 
	country_codes = list(set(Y_country))
	nCountryCodes = len(country_codes)
	Y_hat = np.zeros(len(Y_country_hat))
	for i in xrange(nCountryCodes):
		print '%s\r' % ' '*20,
		print '   ' , i*100/nCountryCodes,
#		clf = MultinomialNB(0.5)
		clf = SVC()
		country_idx = np.in1d(Y_country,country_codes[i])
		country_idx_sparse = country_idx.nonzero()[0]
		T_country = T[country_idx_sparse,:]
		Y_cityPerCountry = Y_city[country_idx]
		unique_Y_cityPerCountry=list(set(Y_cityPerCountry))
		predict_idx = np.in1d(Y_country_hat,country_codes[i])
		predict_idx_sparse = predict_idx.nonzero()[0]
		if len(unique_Y_cityPerCountry)==1 :
			Y_hat[predict_idx] = unique_Y_cityPerCountry
			continue
		clf.fit(T_country,Y_cityPerCountry)
		if sum(predict_idx) > 1:
			Y_cityPerCountry_hat = clf.predict(V[predict_idx_sparse,:])
			Y_hat[predict_idx] = Y_cityPerCountry_hat
	print "\n"
	return Y_hat
开发者ID:swook,项目名称:KungFuLearning,代码行数:28,代码来源:train.py


示例17: test_group_shuffle_split

def test_group_shuffle_split():
    for groups_i in test_groups:
        X = y = np.ones(len(groups_i))
        n_splits = 6
        test_size = 1./3
        slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0)

        # Make sure the repr works
        repr(slo)

        # Test that the length is correct
        assert_equal(slo.get_n_splits(X, y, groups=groups_i), n_splits)

        l_unique = np.unique(groups_i)
        l = np.asarray(groups_i)

        for train, test in slo.split(X, y, groups=groups_i):
            # First test: no train group is in the test set and vice versa
            l_train_unique = np.unique(l[train])
            l_test_unique = np.unique(l[test])
            assert_false(np.any(np.in1d(l[train], l_test_unique)))
            assert_false(np.any(np.in1d(l[test], l_train_unique)))

            # Second test: train and test add up to all the data
            assert_equal(l[train].size + l[test].size, l.size)

            # Third test: train and test are disjoint
            assert_array_equal(np.intersect1d(train, test), [])

            # Fourth test:
            # unique train and test groups are correct, +- 1 for rounding error
            assert_true(abs(len(l_test_unique) -
                            round(test_size * len(l_unique))) <= 1)
            assert_true(abs(len(l_train_unique) -
                            round((1.0 - test_size) * len(l_unique))) <= 1)
开发者ID:alexandercbooth,项目名称:scikit-learn,代码行数:35,代码来源:test_split.py


示例18: make_lineup

def make_lineup(pa_transitions, non_pa_transitions, by_batting_order = True):
    #If by_batting_order is false, grab by position instead
    constructed_lineup = []
    for lineup_spot in range(1, num_lineup_spots+1):
        final_pa_transitions = []
        final_non_pa_transitions = []
        spot_to_take = lineup_spot if by_batting_order else batting_order_positions_1[lineup_spot - 1]
        print (lineup_spot, spot_to_take)
        current_pa_transitions = pa_transitions[np.in1d(pa_transitions[:, 0], spot_to_take)]
        print(current_pa_transitions)
        current_non_pa_transitions = non_pa_transitions[np.in1d(non_pa_transitions[:, 0], spot_to_take)]
        for start_state in range(0,num_start_states):
            pa_row = [0] * num_end_states
            non_pa_row = [0] * num_end_states
            pa_for_start_state = current_pa_transitions[np.in1d(current_pa_transitions[:, 1], start_state)]
            non_pa_for_start_state = current_non_pa_transitions[np.in1d(current_non_pa_transitions[:, 1], start_state)]
            for row in pa_for_start_state:
                pa_row[row[2]] = row[3]
            final_pa_transitions.append(pa_row)
            for row in non_pa_for_start_state:
                non_pa_row[row[2]] = row[3]
            final_non_pa_transitions.append(non_pa_row)
        #print(final_pa_transitions)
        constructed_lineup.append(LineupSpot(final_pa_transitions, final_non_pa_transitions))
    return constructed_lineup
开发者ID:crglaser,项目名称:baseball_markov,代码行数:25,代码来源:Simulator.py


示例19: generate_throats

 def generate_throats(self):
     r"""
     Generate the throats (connections, numbering and types)
     """
     self._logger.info("generate_throats: Define connections between pores")
     
     img = self._net_img
     [Nx, Ny, Nz] = np.shape(img)
     Np = Nx*Ny*Nz
     ind = np.arange(0,Np)
     
     #Generate throats based on pattern of the adjacency matrix
     tpore1_1 = ind[(ind%Nx)<(Nx-1)]
     tpore2_1 = tpore1_1 + 1
     tpore1_2 = ind[(ind%(Nx*Ny))<(Nx*(Ny-1))]
     tpore2_2 = tpore1_2 + Nx
     tpore1_3 = ind[(ind%Np)<(Nx*Ny*(Nz-1))]
     tpore2_3 = tpore1_3 + Nx*Ny
     tpore1 = np.hstack((tpore1_1,tpore1_2,tpore1_3))
     tpore2 = np.hstack((tpore2_1,tpore2_2,tpore2_3))
     connections = np.vstack((tpore1,tpore2)).T
     connections = connections[np.lexsort((connections[:, 1], connections[:, 0]))]
     
     #Remove throats to non-active pores
     img_ind = np.ravel_multi_index(np.nonzero(img), dims=np.shape(img), order='F')
     temp0 = np.in1d(connections[:,0],img_ind)
     temp1 = np.in1d(connections[:,1],img_ind)
     tind = temp0*temp1
     connections = connections[tind]
     
     self._net.throat_properties['connections'] = self._voxel_to_pore_map[connections]
     self._net.throat_properties['type'] = np.zeros(np.sum(tind))
     self._net.throat_properties['numbering'] = np.arange(0,np.sum(tind))
     self._logger.debug("generate_throats: End of method")
开发者ID:dgupta599,项目名称:OpenPNM,代码行数:34,代码来源:__CustomGenerator__.py


示例20: compute_mAP

def compute_mAP(index, qc, good_index, junk_index):
    ap = 0
    cmc = torch.IntTensor(len(index)).zero_()
    if good_index.size==0:   # if empty
        cmc[0] = -1
        return ap,cmc

    # remove junk_index
    ranked_camera = gallery_cam[index]
    mask = np.in1d(index, junk_index, invert=True)
    #mask2 = np.in1d(index, np.append(good_index,junk_index), invert=True)
    index = index[mask]
    ranked_camera = ranked_camera[mask]
    for i in range(10):
        cam_metric[ qc-1, ranked_camera[i]-1 ] +=1

    # find good_index index
    ngood = len(good_index)
    mask = np.in1d(index, good_index)
    rows_good = np.argwhere(mask==True)
    rows_good = rows_good.flatten()
    
    cmc[rows_good[0]:] = 1
    for i in range(ngood):
        d_recall = 1.0/ngood
        precision = (i+1)*1.0/(rows_good[i]+1)
        if rows_good[i]!=0:
            old_precision = i*1.0/rows_good[i]
        else:
            old_precision=1.0
        ap = ap + d_recall*(old_precision + precision)/2

    return ap, cmc
开发者ID:XuJiaMing1997,项目名称:Person-reID-triplet-loss,代码行数:33,代码来源:evaluate_gpu.py



注:本文中的numpy.in1d函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.indices函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.imag函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap