• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python scipy.argmin函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中scipy.argmin函数的典型用法代码示例。如果您正苦于以下问题:Python argmin函数的具体用法?Python argmin怎么用?Python argmin使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了argmin函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: calc_probability_matrix

def calc_probability_matrix(trains_a, trains_b, metric, tau, z):
    """ Calculates the probability matrix that one spike train from stimulus X
    will be classified as spike train from stimulus Y.

    :param list trains_a: Spike trains of stimulus A.
    :param list trains_b: Spike trains of stimulus B.
    :param str metric: Metric to base the classification on. Has to be a key in
        :const:`metrics.metrics`.
    :param tau: Time scale parameter for the metric.
    :type tau: Quantity scalar.
    :param float z: Exponent parameter for the classifier.
    """

    with warnings.catch_warnings():
        warnings.filterwarnings("ignore", "divide by zero")
        dist_mat = calc_single_metric(trains_a + trains_b, metric, tau) ** z
    dist_mat[sp.diag_indices_from(dist_mat)] = 0.0

    assert len(trains_a) == len(trains_b)
    l = len(trains_a)
    classification_of_a = sp.argmin(sp.vstack((
        sp.sum(dist_mat[:l, :l], axis=0) / (l - 1),
        sp.sum(dist_mat[l:, :l], axis=0) / l)) ** (1.0 / z), axis=0)
    classification_of_b = sp.argmin(sp.vstack((
        sp.sum(dist_mat[:l, l:], axis=0) / l,
        sp.sum(dist_mat[l:, l:], axis=0) / (l - 1))) ** (1.0 / z), axis=0)
    confusion = sp.empty((2, 2))
    confusion[0, 0] = sp.sum(classification_of_a == 0)
    confusion[1, 0] = sp.sum(classification_of_a == 1)
    confusion[0, 1] = sp.sum(classification_of_b == 0)
    confusion[1, 1] = sp.sum(classification_of_b == 1)
    return confusion / 2.0 / l
开发者ID:jgosmann,项目名称:spyke-metrics-extra,代码行数:32,代码来源:section3.2.1.py


示例2: cosine_coefficient

    def cosine_coefficient(self, target_FV):
        #print target_FV
        #print "temp = (self.nodes * target_FV)"
        temp = (self.nodes * target_FV)
        #print temp
        #print "temp = temp.sum(axis=2)"
        temp = temp.sum(axis=2)
        #print temp
        #print "temp_2 = (self.nodes**2)"
        temp_2 = (self.nodes**2)
        #print temp_2
        #print "temp_2 = temp_2.sum(axis=2)"
        temp_2 = temp_2.sum(axis=2)
        #print temp_2
        #print "temp_3 = (target_FV**2)"
        temp_3 = (target_FV**2)
        #print temp_3
        #print "temp_3 = temp_3.sum()"
        temp_3 = temp_3.sum()
        #print temp_3
        #print "temp_3 = temp_3**0.5"
        temp_3 = temp_3**0.5
        #print temp_3
        #print "temp_4 = temp_2*temp_3"
        temp_4 = temp_2*temp_3
        #print temp_4
        #print "temp_f = temp / temp_4"
        temp_f = temp / temp_4
        #print temp_f
        return scipy.argmin(temp_f)

        #(a*b).sum(axis=2) / (((a**2).sum(axis=2) * (b**2).sum())**0.5)
        return scipy.argmin((self.nodes * target_FV).sum(axis=2) / ((self.nodes**2).sum(axis=2) * (target_FV**2).sum()**0.5))
开发者ID:warles34,项目名称:INTERFACE,代码行数:33,代码来源:sompy.py


示例3: kmeans

def kmeans(data,K, clusterType = "MeansPointRepresentative", distancePointToPoint = L2NormDistance, distancePointToSet = AveragePointSetDistance): # could also input tolerance
    clusters = []
    if clusterType == 'MeansPointRepresentative':
        for k in range(K): 
            clusters.append(MeansCluster(distancePointToPoint,data)) #initializes the clusters as MeansPointRepresentative
    elif clusterType == 'SetRepresentative':
        for k in range(K):
            clusters.append(SetCluster(distancePointToPoint,distancePointToSet,data)) #initializes the cluster as set representative
    else:
        print "Unknown type of cluster"
        return None
    hasConverged = False
    iterations = 0
    while not hasConverged: #continues to run until clusters converge
        conv = []
        for d in data:
            distanceFromCluster = scipy.array([c.distanceToPointOrSet(d) for c in clusters])
            indexCluster = scipy.argmin(distanceFromCluster) # i have a weird error here when I use setCluster
            clusters[indexCluster].assign(d)
        for c in clusters:
            c.update()
            conv.append(c.reachedTolerance()) #tests to see if epsilon is below tolerance
        iterations = iterations + 1
        hasConverged = all(conv)
    print "The number of iterations is: ", iterations
    clusterID =[]
    for d in data: #creates cluster ID's
        distanceFromCluster = scipy.array([c.distanceToPointOrSet(d) for c in clusters])
        indexCluster = scipy.argmin(distanceFromCluster)
        clusterID.append(indexCluster)
        
    return [clusterID, clusters]    
开发者ID:xmunoz,项目名称:spatialstats,代码行数:32,代码来源:alephoneHW2forHW3.py


示例4: __convolveSphinx

    def __convolveSphinx(self,star):
        
        '''
        Convolve the Sphinx output with the SPIRE resolution. The convolution
        is done in wave number (cm^-1).
        
        @param star: The Star() object for which Sphinx profiles are loaded
        @type star: Star()
        
        '''     

        #- Get sphinx model output and merge, for all star models in star_grid
        if not self.resolution: 
            print '* Resolution is undefined. Cannot convolve Sphinx.'
            return
        print '* Reading Sphinx model and merging.'
        sphinx_wav,sphinx_flux = star['LAST_GASTRONOOM_MODEL'] \
                                        and self.mergeSphinx(star) \
                                        or [[],[]]
        if not sphinx_wav: 
            print '* No Sphinx data found.'
            return
        sphinx_wav = 1./array(sphinx_wav)*10**(4)
        sphinx_flux = array(sphinx_flux)
        sphinx_wav = sphinx_wav[::-1]
        sphinx_flux = sphinx_flux[::-1]
        
        #-- eliminate some of the zeroes in the grid to reduce calculation time
        #   (can reduce the array by a factor up to 100!!)
        s = self.sigma
        lcs = array(sorted([1./line.wavelength 
                            for line in star['GAS_LINES']]))
        new_wav, new_flux = [sphinx_wav[0]],[sphinx_flux[0]]
        for w,f in zip(sphinx_wav[1:],sphinx_flux[1:]):
            if f != 0 or (w < 5*s+lcs[argmin(abs(lcs-w))] \
                                and w > lcs[argmin(abs(lcs-w))]-5*s):
                new_wav.append(w)
                new_flux.append(f)
        new_wav, new_flux = array(new_wav), array(new_flux)
        
        #-- convolve the model fluxes with a gaussian and constant sigma(spire)
        print '* Convolving Sphinx model for SPIRE.'
        convolution = Data.convolveArray(new_wav,new_flux,s)
        
        for data_wav,fn in zip(self.data_wave_list,self.data_filenames):
            rebinned = []
            #-- Convert wavelengths to wave number for integration, and reverse
            data_cm = data_wav[::-1]
            data_cm = 1./data_cm*10**4
            rebinned = [trapz(y=convolution[abs(new_wav-wavi)<=self.resolution/self.oversampling],\
                              x=new_wav[abs(new_wav-wavi)<=self.resolution/self.oversampling])\
                            /(self.resolution/self.oversampling)
                        for wavi in data_cm]
            #-- Reverse the rebinned fluxes so they match up with the 
            #   wavelength grid.
            rebinned = array(rebinned)[::-1]
            self.sphinx_convolution[star['LAST_SPIRE_MODEL']][fn] = rebinned
开发者ID:IvS-KULeuven,项目名称:ComboCode,代码行数:57,代码来源:Spire.py


示例5: getModel

 def getModel(self,teff,logg):
     
     """
     Return the model atmosphere for given effective temperature and log g.
     
     Not yet scaled to the distance!
     
     Units returned are (micron,Jy)
     
     @param teff: the stellar effective temperature
     @type teff: float
     @param logg: the log g value
     @type logg: float
     
     @return: The model spectrum in (micron,Jy)
     @rtype: recarray
     
     """
     
     c = 2.99792458e18          #in angstrom/s
     if self.modelgrid is None:
         self.readModelGrid()
     mg = self.modelgrid
     #- Find the closest temperature in the grid
     teff_prox = mg['TEFF'][argmin(abs(mg['TEFF']-teff))]
     #- Select all models with that temperature
     mgsel = mg[mg['TEFF']==teff_prox]
     #- Select the closest log g in the selection
     logg_prox = mgsel['LOGG'][argmin(abs(mgsel['LOGG']-logg))]
     #- Get the index of the model closest to teff and logg
     imodel = mgsel[mgsel['LOGG']==logg_prox]['INDEX'][0]
     
     self.teff_actual = teff_prox
     self.logg_actual = logg_prox        
     
     wave = self.ff[imodel].data.field('wavelength')
     flux = self.ff[imodel].data.field('flux')
     if self.header['FLXUNIT'] == 'erg/s/cm2/A':
         #- Go to erg/s/cm2/Hz, lFl = nFn, then to Jy (factor 10**(23))
         flux = flux * wave**2 / c * 10**(23)
     else:
         raise Error('Flux unit unknown in atmosphere model fits file.')
     if self.header['WAVUNIT'] == 'angstrom':
         wave = wave * 10**(-4)
     else:
         raise Error('Wavelength unit unknown in atmosphere model fits file.')
     
     model = rec.fromarrays([wave,flux],names=['wave','flux'])        
     return model 
     
     
开发者ID:FungKu01,项目名称:ComboCode,代码行数:49,代码来源:Atmosphere.py


示例6: decode

def decode(file_name):
    border.rotate(file_name)
    image = Image.open("temp.png")
    q = border.find("temp.png")
    ind = sp.argmin(sp.sum(q, 1), 0)
    up_left = q[ind, 0] + 2
    up_top = q[ind, 1] + 2
    d_right = q[ind+1, 0] - 3
    d_bottom = q[ind-1, 1] - 3

    box = (up_left, up_top, d_right, d_bottom)
    region = image.crop(box)
    h_sum = sp.sum(region, 0)
    m = argrelmax(sp.correlate(h_sum, h_sum, 'same'))
    s = sp.average(sp.diff(m))
    m = int(round(d_right - up_left)/s)
    if m % 3 != 0:
        m += 3 - m % 3
    n = int(round(d_bottom - up_top)/s)
    if n % 4 != 0:
        n += 4 - n % 4
    s = int(round(s))+1

    region = region.resize((s*m, s*n), PIL.Image.ANTIALIAS)
    region.save("0.png")
    pix = region.load()
    matrix = mix.off(rec.matrix(pix, s, m, n))
    str2 = hamming.decode(array_to_str(matrix))

    return hamming.bin_to_str(str2)
开发者ID:aroundnothing,项目名称:optar,代码行数:30,代码来源:picture.py


示例7: getclosest

    def getclosest(self,coords,timelist=None):
        """This method will get the closest set of parameters in the coordinate space. It will return
        the parameters from all times.
        Input
        coords - A list of x,y and z coordinates.
        Output
        paramout - A NtxNp array from the closes output params
        sphereout - A Nc length array The sphereical coordinates of the closest point.
        cartout -  Cartisian coordinates of the closes point.
        """
        X_vec = self.Cart_Coords[:,0]
        Y_vec = self.Cart_Coords[:,1]
        Z_vec = self.Cart_Coords[:,2]

        xdiff = X_vec -coords[0]
        ydiff = Y_vec -coords[1]
        zdiff = Z_vec -coords[2]
        distall = xdiff**2+ydiff**2+zdiff**2
        minidx = np.argmin(distall)
        paramout = self.Param_List[minidx]
        velout = self.Velocity[minidx]
        datatime = self.Time_Vector
        if sp.ndim(self.Time_Vector)>1:
            datatime = datatime[:,0]
        if timelist is not None:
            timeindx = []
            for itime in timelist:
                timeindx.append(sp.argmin(sp.absolute(itime-datatime)))
            paramout=paramout[timeindx]
            velout=velout[timeindx]
        sphereout = self.Sphere_Coords[minidx]
        cartout = self.Cart_Coords[minidx]
        return (paramout,velout,sphereout,cartout,np.sqrt(distall[minidx]))
开发者ID:hhuangmeso,项目名称:RadarDataSim,代码行数:33,代码来源:IonoContainer.py


示例8: _init_params

    def _init_params(self, X):
        init = self.init
        n_samples, n_features = X.shape
        n_components = self.n_components

        if (init == 'kmeans'):
            km = Kmeans(n_components)
            clusters, mean, cov = km.cluster(X)
            coef = sp.array([c.shape[0] / n_samples for c in clusters])
            comps = [multivariate_normal(mean[i], cov[i], allow_singular=True)
                     for i in range(n_components)]
        elif (init == 'rand'):
            coef = sp.absolute(sprand.randn(n_components))
            coef = coef / coef.sum()
            means = X[sprand.permutation(n_samples)[0: n_components]]
            clusters = [[] for i in range(n_components)]
            for x in X:
                idx = sp.argmin([spla.norm(x - mean) for mean in means])
                clusters[idx].append(x)

            comps = []
            for k in range(n_components):
                mean = means[k]
                cov = sp.cov(clusters[k], rowvar=0, ddof=0)
                comps.append(multivariate_normal(mean, cov, allow_singular=True))

        self.coef = coef
        self.comps = comps
开发者ID:Yevgnen,项目名称:prml,代码行数:28,代码来源:mixture_model.py


示例9: fit

    def fit(self, X):
        n_samples, n_features = X.shape
        n_classes = self.n_classes
        max_iter = self.max_iter
        tol = self.tol

        rand_center_idx = sprand.permutation(n_samples)[0:n_classes]
        center = X[rand_center_idx].T
        responsilibity = sp.zeros((n_samples, n_classes))

        for iter in range(max_iter):
            # E step
            dist = sp.expand_dims(X, axis=2) - sp.expand_dims(center, axis=0)
            dist = spla.norm(dist, axis=1)**2
            min_idx = sp.argmin(dist, axis=1)
            responsilibity.fill(0)
            responsilibity[sp.arange(n_samples), min_idx] = 1

            # M step
            center_new = sp.dot(X.T, responsilibity) / sp.sum(responsilibity, axis=0)
            diff = center_new - center
            print('K-Means: {0:5d} {1:4e}'.format(iter, spla.norm(diff) / spla.norm(center)))
            if (spla.norm(diff) < tol * spla.norm(center)):
                break

            center = center_new

        self.center = center.T
        self.responsibility = responsilibity

        return self
开发者ID:Yevgnen,项目名称:prml,代码行数:31,代码来源:mixture_model.py


示例10: brute_force_2ref

def brute_force_2ref(ref1,ref2,data,res):
    [a,b,c] = data.shape[0],data.shape[1],data.shape[2]
    print a,b,c
    matrix_ref1 = np.copy(data)
    matrix_ref2 = np.copy(data)
    for i in range(c):
        matrix_ref1[:, :, i] = ref1[i]
        matrix_ref2[:, :, i] = ref2[i]
    total = 100/res + 1
    total = int(total)
    factor = (np.linspace(0,1,total))
    fRGB = np.zeros((3,total), dtype=np.float16)
    fRGB[0,:] = factor
    fRGB[1,:] = 1-factor
    sum_sqdata = np.sum(np.square(data),axis=2)
    R_ref = np.empty((a,b,total),dtype=np.float16)
    for i in range(total):
        print i
        matrix_ref_com = fRGB[0,i]*matrix_ref1 + fRGB[1,i]*matrix_ref2
        sqr = np.square(data - matrix_ref_com)
        R_ref[:, :, i] = np.sum(sqr, axis=2) / sum_sqdata

    min_R = np.amin(R_ref, axis=2)
    index = scipy.argmin(R_ref, axis=2)
    return  min_R,  index,  fRGB
开发者ID:dtyu,项目名称:xrayanalysisgui,代码行数:25,代码来源:fitting.py


示例11: __init__

    def __init__(self, func, pop0, args=(), crossover_rate=0.5, scale=None, strategy=("rand", 2, "bin"), eps=1e-6):
        self.func = func
        self.population = sp.array(pop0)

        # added by Minh-Tri Pham
        for n in xrange(len(self.population)):
            self.refine(self.population[n])

        self.npop, self.ndim = self.population.shape
        self.args = args
        self.crossover_rate = crossover_rate
        self.strategy = strategy
        self.eps = eps

        self.pop_values = [self.func(m, *args) for m in self.population]
        bestidx = sp.argmin(self.pop_values)
        self.best_vector = self.population[bestidx]
        self.best_value = self.pop_values[bestidx]

        if scale is None:
            self.scale = self.calculate_scale()
        else:
            self.scale = scale

        self.generations = 0
        self.best_val_history = []
        self.best_vec_history = []

        self.jump_table = {
            ("rand", 1, "bin"): (self.choose_rand, self.diff1, self.bin_crossover),
            ("rand", 2, "bin"): (self.choose_rand, self.diff2, self.bin_crossover),
            ("best", 1, "bin"): (self.choose_best, self.diff1, self.bin_crossover),
            ("best", 2, "bin"): (self.choose_best, self.diff2, self.bin_crossover),
            ("rand-to-best", 1, "bin"): (self.choose_rand_to_best, self.diff1, self.bin_crossover),
        }
开发者ID:hksonngan,项目名称:mytesgnikrow,代码行数:35,代码来源:de.py


示例12: fit2D_2ref

def fit2D_2ref(ref1,ref2,data,res):
    [a,b,c] = data.shape[0],data.shape[1],data.shape[2]
    print a,b,c
    matrix_ref1 = np.copy(data)
    matrix_ref2 = np.copy(data)
    for i in range(c):
        matrix_ref1[:, :, i] = ref1[i]
        matrix_ref2[:, :, i] = ref2[i]
    total = 100/res + 1
    total = int(total)
    factor = (np.linspace(0,1,total))
    fRGB = np.zeros((3,total), dtype=np.float16)
    fRGB[0,:] = factor
    fRGB[1,:] = 1-factor
    sum_sqdata = np.sum(np.square(data),axis=2)
    R_ref = np.empty((a,b,total),dtype=np.float16)
    for i in range(total):
        print i
        combination = i
        matrix_ref_com = fRGB[0,i]*matrix_ref1 + fRGB[1,i]*matrix_ref2
        sqr = np.square(data - matrix_ref_com)
        R_ref[:, :, i] = np.sum(sqr, axis=2) / sum_sqdata

    min_R = np.amin(R_ref, axis=2)
    index = scipy.argmin(R_ref, axis=2)
    save_dir = 'D:/Research/BNL_2014_Summer_Intern/xanes_PyQT'
    f = open(save_dir+'/index.txt','w')
    for i in range(a):
        f.write("%14.5f\n"%( index[i,500]))
    f.close()
    return  min_R,  index,  fRGB
开发者ID:dtyu,项目名称:xrayanalysisgui,代码行数:31,代码来源:fitting.py


示例13: predict_gmm

    def predict_gmm(self, testSamples, tau=0):
        """
            Function that predict the label for testSamples using the learned model
            Inputs:
                testSamples: the samples to be classified
                tau:         regularization parameter
            Outputs:
                predLabels: the class
                scores:     the decision value for each class
        """
        # Get information from the data
        nbTestSpl = testSamples.shape[0] # Number of testing samples

        # Initialization
        scores = sp.empty((nbTestSpl,self.C))

        # Start the prediction for each class
        for c in xrange(self.C):
            testSamples_c = testSamples - self.mean[c,:]

            regvp = self.vp[c,:] + tau

            logdet        = sp.sum(sp.log(regvp))
            cst           = logdet - 2*sp.log(self.prop[c]) # Pre compute the constant term

            # compute ||lambda^{-0.5}q^T(x-mu)||^2 + cst for all samples
            scores[:,c] = sp.sum( sp.square( sp.dot( (self.Q[c,:,:][:,:]/sp.sqrt(regvp)).T, testSamples_c.T ) ), axis=0 ) + cst

            del testSamples_c

        # Assign the label to the minimum value of scores
        predLabels = sp.argmin(scores,1)+1

        return predLabels,scores
开发者ID:Laadr,项目名称:FFFS,代码行数:34,代码来源:npfs.py


示例14: _set_reach_dist

def _set_reach_dist(setofobjects, point_index, epsilon):

    # Assumes that the query returns ordered (smallest distance first)
    # entries. This is the case for the balltree query...

    dists, indices = setofobjects.query(setofobjects.data[point_index],
                                        setofobjects._nneighbors[point_index])

    # Checks to see if there more than one member in the neighborhood ##
    if sp.iterable(dists):

        # Masking processed values ##
        # n_pr is 'not processed'
        n_pr = indices[(setofobjects._processed[indices] < 1)[0].T]
        rdists = sp.maximum(dists[(setofobjects._processed[indices] < 1)[0].T],
                            setofobjects.core_dists_[point_index])

        new_reach = sp.minimum(setofobjects.reachability_[n_pr], rdists)
        setofobjects.reachability_[n_pr] = new_reach

        # Checks to see if everything is already processed;
        # if so, return control to main loop ##
        if n_pr.size > 0:
            # Define return order based on reachability distance ###
            return n_pr[sp.argmin(setofobjects.reachability_[n_pr])]
        else:
            return point_index
开发者ID:Broham,项目名称:scikit-learn,代码行数:27,代码来源:optics.py


示例15: __init__

    def __init__(self, func, pop0, args=(), crossover_rate=0.5, scale=None,
            strategy=('rand', 2, 'bin'), eps=1e-6):
        self.func = func
        self.population = sp.array(pop0)
        self.npop, self.ndim = self.population.shape
        self.args = args
        self.crossover_rate = crossover_rate
        self.strategy = strategy
        self.eps = eps

        self.pop_values = [self.func(m, *args) for m in self.population]
        bestidx = sp.argmin(self.pop_values)
        self.best_vector = self.population[bestidx]
        self.best_value = self.pop_values[bestidx]

        if scale is None:
            self.scale = self.calculate_scale()
        else:
            self.scale = scale

        self.generations = 0
        self.best_val_history = []
        self.best_vec_history = []

        self.jump_table = {
            ('rand', 1, 'bin'): (self.choose_rand, self.diff1, self.bin_crossover),
            ('rand', 2, 'bin'): (self.choose_rand, self.diff2, self.bin_crossover),
            ('best', 1, 'bin'): (self.choose_best, self.diff1, self.bin_crossover),
            ('best', 2, 'bin'): (self.choose_best, self.diff2, self.bin_crossover),
            ('rand-to-best', 1, 'bin'):
                (self.choose_rand_to_best, self.diff1, self.bin_crossover),
            }
开发者ID:mbentz80,项目名称:jzigbeercp,代码行数:32,代码来源:diffev.py


示例16: check_if_click_is_on_an_existing_point

def check_if_click_is_on_an_existing_point(mouse_x_coord,mouse_y_coord):
    # First, figure out how many points we have.
    # Each point is one row in the coords_array,
    # so we count the number of rows, which is dimension-0 for Python
    number_of_points = scipy.shape(coords_array)[0]    
    this_coord = scipy.array([[ mouse_x_coord, mouse_y_coord ]]) 
            # The double square brackets above give the this_coord array 
            # an explicit structure of having rows and also columns
    if number_of_points > 0:  
        # If there are some points, we want to calculate the distance
        # of the new mouse-click location from every existing point.
        # One way to do this is to make an array which is the same size
        # as coords_array, and which contains the mouse x,y-coords on every row.
        # Then we can subtract that xy_coord_matchng_matrix from coords_array
        ones_vec = scipy.ones((number_of_points,1))
        xy_coord_matching_matrix = scipy.dot(ones_vec,this_coord)
        distances_from_existing_points = (coords_array - xy_coord_matching_matrix)
        squared_distances_from_existing_points = distances_from_existing_points**2
        sum_sq_dists = scipy.sum(squared_distances_from_existing_points,axis=1) 
                   # The axis=1 means "sum over dimension 1", which is columns for Python          
        euclidean_dists = scipy.sqrt(sum_sq_dists)
        distance_threshold = 0.5
        within_threshold_points = scipy.nonzero(euclidean_dists < distance_threshold )
        num_within_threshold_points = scipy.shape(within_threshold_points)[1]
        if num_within_threshold_points > 0:
            # We only want one matching point.
            # It's possible that more than one might be within threshold.
            # So, we take the unique smallest distance
            point_to_be_deleted = scipy.argmin(euclidean_dists)
            return point_to_be_deleted
        else: # If there are zero points, then we are not deleting any 
            point_to_be_deleted = -1
            return point_to_be_deleted
开发者ID:eddienko,项目名称:SamPy,代码行数:33,代码来源:interactive_correlation_plot.py


示例17: makepulse

def makepulse(ptype,plen,ts):
    """ This will make the pulse array.
        Inputs
            ptype - The type of pulse used.
            plen - The length of the pulse in seconds.
            ts - The sampling rate of the pulse.
        Output
            pulse - The pulse array that will be used as the window in the data formation.
            plen - The length of the pulse with the sampling time taken into account.
    """
    nsamps = int(sp.round_(plen/ts))

    if ptype.lower()=='long':
        pulse = sp.ones(nsamps)
        plen = nsamps*ts

    elif ptype.lower()=='barker':
        blen = sp.array([1,2, 3, 4, 5, 7, 11,13])
        nsampsarg = sp.argmin(sp.absolute(blen-nsamps))
        nsamps = blen[nsampsarg]
        pulse = GenBarker(nsamps)
        plen = nsamps*ts
#elif ptype.lower()=='ac':
    else:
        raise ValueError('The pulse type %s is not a valide pulse type.' % (ptype))

    return (pulse,plen)
开发者ID:jswoboda,项目名称:RadarDataSim,代码行数:27,代码来源:utilFunctions.py


示例18: compute_loocv_gmm

def compute_loocv_gmm(variable,model,x,y,ids,K_u,alpha,beta,log_prop_u):
    """ Function that computes the estimation of the loocv for the GMM model with variables ids + variable(i)
        Inputs:
            model : the GMM model
            x,y : the training samples and the corresponding label
            ids : the pool of selected variables
            variable   : the variable to be tested from the set of available variable
            K_u    : the initial prediction values computed with all the samples
            alpha, beta and log_prop_u : constant that are computed outside of the loop to increased speed
        Outputs:
            loocv_temp : the loocv
            
        Used in GMM.forward_selection()
    """
    n = x.shape[0]
    ids.append(variable)      # Iteratively add one of the remaining variables
    Kp = model.predict_gmm(x,ids=ids)[1]# Predict with all the samples with ids
    loocv_temp=0.0;                     # Initialization of the temporary loocv
    for j in range(n):                  # Predict the class with the model ids_t
        Kloo = Kp[j,:] + K_u  # Initialization of the decision rule for sample "j" #--- Change for only not C---#
                   
        c = int(y[j]-1)        # Update of parameter of class c
        m  = (model.ni[c]*model.mean[c,ids] -x[j,ids])*alpha[c]    # Update the mean value
        xb = x[j,ids] - m                                     # x centered
        cov_u =  (model.cov[c,ids,:][:,ids] - sp.outer(xb,xb)*alpha[c])*beta    # Update the covariance matrix 
        logdet,rcond = safe_logdet(cov_u)
        Kloo[c] =   logdet - 2*log_prop_u[c] + sp.vdot(xb,mylstsq(cov_u,xb.T,rcond))    # Compute the new decision rule
        del cov_u,xb,m,c                   
                    
        yloo = sp.argmin(Kloo)+1
        loocv_temp += float(yloo==y[j])                   # Check the correct/incorrect classification rule
    ids.pop()                                                         # Remove the current variable 
    return loocv_temp/n                                           # Compute loocv for variable 
开发者ID:Sandy4321,项目名称:FFFS,代码行数:33,代码来源:npfs.py


示例19: nearestNeighborDist

def nearestNeighborDist(mySet,dType):
    dMatX=sd.cdist(mySet,mySet,dType)
    minD=[]
    j=0
    for i in range(len(dMatX)):
        arr=dMatX[i]
        ind = sc.argmin(arr)
        if ind == j:
            arr = np.delete(arr,ind)
            myMin,ind = np.min(arr),sc.argmin(arr)
            minD.append(myMin) 
        j+=1
    nnDist = float(np.sum(minD)/len(minD))
    return nnDist        

    #older and slower implementation of nearest-neighbor distance          
    """            
开发者ID:chetanrrk,项目名称:GABasedCombinatorialSearches,代码行数:17,代码来源:DiversityOld.py


示例20: computeDistances

def computeDistances(data, centroids, f):
    N = centroids.shape[0]
    T = data.shape[0]
    clusterAssignments = sp.zeros(T)
    for i in xrange(T):
        dists = sp.array([f(data[i,:], centroids[j,:]) for j in xrange(N)])
        clusterAssignments[i] = sp.argmin(dists)
    return clusterAssignments
开发者ID:KathleenF,项目名称:numerical_computing,代码行数:8,代码来源:kmeans.py



注:本文中的scipy.argmin函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python scipy.argsort函数代码示例发布时间:2022-05-27
下一篇:
Python scipy.argmax函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap