• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python scipy.vstack函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中scipy.vstack函数的典型用法代码示例。如果您正苦于以下问题:Python vstack函数的具体用法?Python vstack怎么用?Python vstack使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了vstack函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: MNEfit

def MNEfit(stim,resp,order):
    # in order for dlogloss to work, we need to know -<g(yt(n),xt)>data
    # == calculate the constrained averages over the data set
    Nsamples = sp.size(stim,0)
    Ndim = sp.size(stim,1)
    psp = sp.mean(sp.mean(resp)) #spike probability (first constraint)
    avg = (1.0*stim.T*resp)/(Nsamples*1.0)
    avgs = sp.vstack((psp,avg))
    if(order > 1):
        avgsqrd = (stim.T*1.0)*(sp.array(sp.tile(resp,(1,Ndim)))*sp.array(stim))/(Nsamples*1.0)
        avgsqrd = sp.reshape(avgsqrd,(Ndim**2,1))
        avgs = sp.vstack((avgs,avgsqrd))
    
    #initialize params:
    pstart = sp.log(1/avgs[0,0] - 1)
    pstart = sp.hstack((pstart,(.001*(2*sp.random.rand(Ndim)-1))))
    if(order > 1):
        temp = .0005*(2*sp.random.rand(Ndim,Ndim)-1)
        pstart = sp.hstack((pstart,sp.reshape(temp+temp.T,(1,Ndim**2))[0]))
    
    #redefine functions with fixed vals:
    def logLoss(p):
        return LLF.log_loss(p, stim, resp, order)
    def dlogLoss(p):
        return LLF.d_log_loss(p, stim, avgs, order)
    #run the function:
    #pfinal = opt.fmin_tnc(logLoss,pstart,fprime=dlogLoss)
    # conjugate-gradient:
    pfinal = opt.fmin_cg(logLoss,pstart,fprime=dlogLoss)
    #pfinal = opt.fmin(logLoss,pstart,fprime=dlogLoss)
    return pfinal
开发者ID:MarvinT,项目名称:pyMNE,代码行数:31,代码来源:MNEfit.py


示例2: gpmapasrecc

def gpmapasrecc(optstate, **para):
    if para["onlyafter"] > len(optstate.y) or not len(optstate.y) % para["everyn"] == 0:
        return [sp.NaN for i in para["lb"]], {"didnotrun": True}
    logger.info("gpmapas reccomender")
    d = len(para["lb"])

    x = sp.hstack([sp.vstack(optstate.x), sp.vstack([e["xa"] for e in optstate.ev])])

    y = sp.vstack(optstate.y)
    s = sp.vstack([e["s"] for e in optstate.ev])
    dx = [e["d"] for e in optstate.ev]
    MAP = GPdc.searchMAPhyp(x, y, s, dx, para["mprior"], para["sprior"], para["kindex"])
    logger.info("MAPHYP {}".format(MAP))
    G = GPdc.GPcore(x, y, s, dx, GPdc.kernel(para["kindex"], d + 1, MAP))

    def directwrap(xq, y):
        xq.resize([1, d])
        xe = sp.hstack([xq, sp.array([[0.0]])])
        # print xe
        a = G.infer_m(xe, [[sp.NaN]])
        return (a[0, 0], 0)

    [xmin, ymin, ierror] = DIRECT.solve(
        directwrap, para["lb"], para["ub"], user_data=[], algmethod=1, volper=para["volper"], logfilename="/dev/null"
    )
    logger.info("reccsearchresult: {}".format([xmin, ymin, ierror]))
    return [i for i in xmin], {"MAPHYP": MAP, "ymin": ymin}
开发者ID:markm541374,项目名称:GPc,代码行数:27,代码来源:reccomenders.py


示例3: _pair_overlap

def _pair_overlap(waves1, waves2, mean1, mean2, cov1, cov2):
    """ Calculate FP/FN estimates for two gaussian clusters
    """
    from sklearn import mixture

    means = sp.vstack([[mean1], [mean2]])
    covars = sp.vstack([[cov1], [cov2]])
    weights = sp.array([waves1.shape[1], waves2.shape[1]], dtype=float)
    weights /= weights.sum()

    # Create mixture of two Gaussians from the existing estimates
    mix = mixture.GMM(n_components=2, covariance_type="full", init_params="")
    mix.covars_ = covars
    mix.weights_ = weights
    mix.means_ = means

    posterior1 = mix.predict_proba(waves1.T)[:, 1]
    posterior2 = mix.predict_proba(waves2.T)[:, 0]

    return (
        posterior1.mean(),
        posterior2.sum() / len(posterior1),
        posterior2.mean(),
        posterior1.sum() / len(posterior2),
    )
开发者ID:amchagas,项目名称:spykeutils,代码行数:25,代码来源:sorting_quality_assesment.py


示例4: PESvsaq

def PESvsaq(optstate,persist,**para):
    para = copy.deepcopy(para)
    if persist==None:
        persist = {'n':0,'d':len(para['ub'])}
    n = persist['n']
    d = persist['d']
    if n<para['nrandinit']:
        persist['n']+=1
        
        return randomaq(optstate,persist,**para)
    logger.info('PESvsaq')
    #logger.debug(sp.vstack([e[0] for e in optstate.ev]))
    #raise
    x=sp.vstack(optstate.x)
    y=sp.vstack(optstate.y)
    s= sp.vstack([e['s'] for e in optstate.ev])
    dx=[e['d'] for e in optstate.ev]
    
    pesobj = PES.PES(x,y,s,dx,para['lb'],para['ub'],para['kindex'],para['mprior'],para['sprior'],DH_SAMPLES=para['DH_SAMPLES'],DM_SAMPLES=para['DM_SAMPLES'], DM_SUPPORT=para['DM_SUPPORT'],DM_SLICELCBPARA=para['DM_SLICELCBPARA'],mode=para['SUPPORT_MODE'],noS=para['noS'])
    
    
        
    [xmin,ymin,ierror] = pesobj.search_acq(para['cfn'],para['logsl'],para['logsu'],volper=para['volper'])
    
    logger.debug([xmin,ymin,ierror])
    para['ev']['s']=10**xmin[-1]
    xout = [i for i in xmin[:-1]]
    return xout,para['ev'],persist,{'HYPdraws':[k.hyp for k in pesobj.G.kf],'mindraws':pesobj.Z,'DIRECTmessage':ierror,'PESmin':ymin}

    return
开发者ID:markm541374,项目名称:GPc,代码行数:30,代码来源:acquisitions.py


示例5: EIMAPaq

def EIMAPaq(optstate,persist,ev=None, ub = None, lb=None, nrandinit=None, mprior=None,sprior=None,kindex = None,directmaxiter=None):
    para = copy.deepcopy(para)
    if persist==None:
        persist = {'n':0,'d':len(ub)}
    n = persist['n']
    d = persist['d']
    if n<nrandinit:
        persist['n']+=1
        return randomaq(optstate,persist,ev=ev,lb=lb,ub=ub)
    logger.info('EIMAPaq')
    #logger.debug(sp.vstack([e[0] for e in optstate.ev]))
    #raise
    x=sp.vstack(optstate.x)
    y=sp.vstack(optstate.y)
    s= sp.vstack([e['s'] for e in optstate.ev])
    dx=[e['d'] for e in optstate.ev]
    MAP = GPdc.searchMAPhyp(x,y,s,dx,mprior,sprior, kindex)
    logger.info('MAPHYP {}'.format(MAP))

    G = GPdc.GPcore(x,y,s,dx,GPdc.kernel(kindex,d,MAP))
    def directwrap(xq,y):
        xq.resize([1,d])
        a = G.infer_lEI(xq,[ev['d']])
        return (-a[0,0],0)
    
    [xmin,ymin,ierror] = DIRECT.solve(directwrap,lb,ub,user_data=[], algmethod=0, maxf = directmaxiter, logfilename='/dev/null')
    #logger.debug([xmin,ymin,ierror])
    persist['n']+=1
    return [i for i in xmin],ev,persist,{'MAPHYP':MAP,'logEImin':ymin,'DIRECTmessage':ierror}
开发者ID:markm541374,项目名称:GPc,代码行数:29,代码来源:acquisitions.py


示例6: update

def update():
    global i
    if i == tvec.shape[0]-1:
        i = 0
    else:
        i = i + 1
    
    if show_left:
        poi_left_scatter.setData(pos=sp.expand_dims(poi_left_pos[i],0))
        hand_left_scatter.setData(pos=sp.expand_dims(hand_left_pos[i],0))
        string_left_line.setData(pos=sp.vstack((hand_left_pos[i],poi_left_pos[i])))
#        arm_left.setData(pos=sp.vstack((hand_left_pos[i],[0,-1*shoulder_width/2,0])))
        arm_left.setData(pos=sp.vstack((hand_left_pos[i],[0,0,offset])))
    else:
        poi_left_scatter.hide()
        poi_left_line.hide()
        hand_left_scatter.hide()
        hand_left_line.hide()
        string_left_line.hide()
        arm_left.hide()
    
    if show_right:
        poi_right_scatter.setData(pos=sp.expand_dims(poi_right_pos[i],0))
        hand_right_scatter.setData(pos=sp.expand_dims(hand_right_pos[i],0))
        string_right_line.setData(pos=sp.vstack((hand_right_pos[i],poi_right_pos[i])))
#        arm_right.setData(pos=sp.vstack((hand_right_pos[i],[0,shoulder_width/2,0])))
        arm_right.setData(pos=sp.vstack((hand_right_pos[i],[0,0,offset])))
    else:
        poi_right_scatter.hide()
        poi_right_line.hide()
        hand_right_scatter.hide()
        hand_right_line.hide()
        string_right_line.hide()
        arm_right.hide()
开发者ID:grg2rsr,项目名称:Poi_visualization,代码行数:34,代码来源:poi_code_working.py


示例7: Ei

    def Ei(self, Pp, i):
        """ Calculate E_i^P

        Parameters
        -------------
        Pp : ndarray, shape (n, k)
             Conditional choice probabilities for provinces
        i : int, 1 to k
            Province 

        Returns
        -----------
        Ei : ndarray, shape (n, )
             Values of :math:`E_i^P(l, a)` in part (b)

        Notes
        ----------
        
        .. math::
                        
           E_i^P(l, s) = \sum_{a=0}^1 P_i[a | l, s] E_i^P(a, l, s)

        """
        E = sp.vstack((self.Ei_ai(Pp, i, a) for a in (0, 1))).T
        W = sp.vstack((Pp[:, _pp(i, a)] for a in (0, 1))).T
        return (E * W).sum(1)
开发者ID:jrnold,项目名称:psc585,代码行数:26,代码来源:ps4.py


示例8: calc_probability_matrix

def calc_probability_matrix(trains_a, trains_b, metric, tau, z):
    """ Calculates the probability matrix that one spike train from stimulus X
    will be classified as spike train from stimulus Y.

    :param list trains_a: Spike trains of stimulus A.
    :param list trains_b: Spike trains of stimulus B.
    :param str metric: Metric to base the classification on. Has to be a key in
        :const:`metrics.metrics`.
    :param tau: Time scale parameter for the metric.
    :type tau: Quantity scalar.
    :param float z: Exponent parameter for the classifier.
    """

    with warnings.catch_warnings():
        warnings.filterwarnings("ignore", "divide by zero")
        dist_mat = calc_single_metric(trains_a + trains_b, metric, tau) ** z
    dist_mat[sp.diag_indices_from(dist_mat)] = 0.0

    assert len(trains_a) == len(trains_b)
    l = len(trains_a)
    classification_of_a = sp.argmin(sp.vstack((
        sp.sum(dist_mat[:l, :l], axis=0) / (l - 1),
        sp.sum(dist_mat[l:, :l], axis=0) / l)) ** (1.0 / z), axis=0)
    classification_of_b = sp.argmin(sp.vstack((
        sp.sum(dist_mat[:l, l:], axis=0) / l,
        sp.sum(dist_mat[l:, l:], axis=0) / (l - 1))) ** (1.0 / z), axis=0)
    confusion = sp.empty((2, 2))
    confusion[0, 0] = sp.sum(classification_of_a == 0)
    confusion[1, 0] = sp.sum(classification_of_a == 1)
    confusion[0, 1] = sp.sum(classification_of_b == 0)
    confusion[1, 1] = sp.sum(classification_of_b == 1)
    return confusion / 2.0 / l
开发者ID:jgosmann,项目名称:spyke-metrics-extra,代码行数:32,代码来源:section3.2.1.py


示例9: sqcover

def sqcover(A,n):
    edge = sp.sqrt(A) # the length of an edge
    d = edge/n # the distance between two adjacent points
    r = d/2 # the "radius of "
    end = edge - r # end point
    base = sp.linspace(r, end, n)
    first_line = sp.transpose(sp.vstack((base, r*sp.ones(n))))
    increment = sp.transpose(sp.vstack((sp.zeros(n), d*sp.ones(n))))
    pts = first_line
    y_diff = increment
    for i in range(n-1):
        pts = sp.vstack((pts, first_line + y_diff))
        y_diff = y_diff + increment
    
    # Color matter
    colors = []
    for p in pts:
        cval = n*p[0] + p[1] # the x-coord has a higher weight
        cval = colormap.Spectral(cval/((n+1)*end)) # normalize by the max value that cval can take.
        colors.append(cval)

    colors = sp.array(colors)

    cover = (pts, r, colors)
    return cover
开发者ID:atkm,项目名称:reed-modeling,代码行数:25,代码来源:ga_shapes.py


示例10: my_bh_fdr

def my_bh_fdr(p_val_vec):
    index = scipy.argsort(p_val_vec)
    exp_err = scipy.vstack((float(len(p_val_vec))/scipy.arange(1,len(p_val_vec) + 1)*p_val_vec[index],
                                      scipy.tile(1, [1, len(p_val_vec)]))).min(axis = 0)
    exp_err = scipy.vstack((exp_err,exp_err[scipy.r_[0,scipy.arange(len(exp_err)-1)]])).max(axis=0)
    #scipy.r_[index[0], index[range(len(index)-1)]
    resort_index = scipy.argsort(index)                 
    return exp_err[resort_index]
开发者ID:RDMelamed,项目名称:melamed_comorbidity,代码行数:8,代码来源:mendelian_mutation.py


示例11: infer_diag

 def infer_diag(self,X_i,D_i):
     ns=X_i.shape[0]
     D = [0 if sp.isnan(x[0]) else int(sum([8**i for i in x])) for x in D_i]
     R=sp.vstack([sp.empty([2,ns])]*self.size)
     libGP.infer_diag(self.s,cint(self.size), ns,X_i.ctypes.data_as(ctpd),(cint*len(D))(*D),R.ctypes.data_as(ctpd))
     m = sp.vstack([R[i*2,:] for i in xrange(self.size)])
     V = sp.vstack([R[i*2+1,:] for i in xrange(self.size)])
     return [m,V]
开发者ID:markm541374,项目名称:GPc,代码行数:8,代码来源:GPdc.py


示例12: test_skip

def test_skip():
    """Test if only keeping every n'th sample works."""
    X = scipy.vstack((scipy.arange(25), scipy.arange(25)))
    X_ = skip(X, 2, 5)
    print X_
    des = scipy.vstack((scipy.array([0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 20, 21, 22, 23 ,24]),
                       scipy.array([0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 20, 21, 22, 23 ,24])))

    assert (X_ == des).all(), 'wrong result'
开发者ID:ddofer,项目名称:breze,代码行数:9,代码来源:test_data.py


示例13: extract_spikes

def extract_spikes(train, signals, length, align_time):
    """ Extract spikes with waveforms from analog signals using a spike train. 
    Spikes that are too close to the beginning or end of the shortest signal
    to be fully extracted are ignored.

    :type train: :class:`neo.core.SpikeTrain`
    :param train: The spike times.
    :param sequence signals: A sequence of :class:`neo.core.AnalogSignal`
        objects from which the spikes are extracted. The waveforms of
        the returned spikes are extracted from these signals in the
        same order they are given.
    :type length: Quantity scalar
    :param length: The length of the waveform to extract as time scalar.
    :type align_time: Quantity scalar
    :param align_time: The alignment time of the spike times as time scalar.
        This is the time delta from the start of the extracted waveform
        to the exact time of the spike.
    :returns: A list of :class:`neo.core.Spike` objects, one for each
        time point in ``train``. All returned spikes include their
        ``waveform`` property.
    :rtype: list
    """
    if len(set(s.sampling_rate for s in signals)) > 1:
        raise ValueError(
            'All signals for spike extraction need the same sampling rate')

    wave_unit = signals[0].units
    srate = signals[0].sampling_rate
    end = min(s.shape[0] for s in signals)

    aligned_train = train - align_time
    cut_samples = int((length * srate).simplified)

    st = sp.asarray((aligned_train * srate).simplified)

    # Find extraction epochs
    st_ok = (st >= 0) * (st < end - cut_samples)
    epochs = sp.vstack((st[st_ok], st[st_ok] + cut_samples)).T

    nspikes = epochs.shape[0]
    if not nspikes:
        return []

    # Create data
    data = sp.vstack([sp.asarray(s.rescale(wave_unit)) for s in signals])
    nc = len(signals)

    spikes = []
    for s in xrange(nspikes):
        waveform = sp.zeros((cut_samples, nc))
        for c in xrange(nc):
            waveform[:, c] = \
                data[c, epochs[s, 0]:epochs[s, 1]]
        spikes.append(neo.Spike(train[st_ok][s], waveform=waveform * wave_unit))

    return spikes
开发者ID:mczhu,项目名称:spykeutils,代码行数:56,代码来源:tools.py


示例14: stripe2

def stripe2():
    Y1 = sp.vstack((sp.ones((50,1)), sp.zeros((50,1))))
    Y2 = sp.vstack((sp.zeros((50,1)), sp.ones((50,1))))
    Y = sp.hstack([Y1, Y2])

    X1 = sp.random.multivariate_normal([-2,2], [[1,.8],[.8,1]],size=50)
    X2 = sp.random.multivariate_normal([2,-1], [[1,.8],[.8,1]], size=50)
    X = sp.hstack((sp.ones((100,1)),sp.vstack([X1,X2])))

    return Y, X
开发者ID:ayr0,项目名称:StatLab,代码行数:10,代码来源:regressBayes.py


示例15: load_single_player_data

def load_single_player_data(use_existing=False, num_train=0):
    aa=np.load('/Users/amit/Desktop/Dropbox/Markov/IMSPL.npy')
    bb=np.load('/Users/amit/Desktop/Dropbox/Markov/IMSBGD.npy')
    aa=standardize_data(aa)
    bb=standardize_data(bb)


    #ii=np.int32(np.floor(np.random.rand(100)*bb.shape[0]))
    # py.figure(1)
    # for j,i in enumerate(ii):
    #     py.subplot(10,10,j+1)
    #     py.imshow(bb[i,:,:,:])
    #     py.axis('off')
    #     py.axis('equal')
    # py.show()
    if (num_train==0):
        num=aa.shape[0]
    else:
        num=np.minimum(aa.shape[0],num_train)
    if (not use_existing):
         ii=range(num)
         np.random.shuffle(ii)
         np.save('ii.npy',ii)
         aa=aa[ii,]
    else:
        if (os.path.isfile('ii.npy')):
            ii=np.load('ii.npy')
            aa=aa[ii,]
    train_num=np.int32(num/2)
    val_num=np.int32(num/4)
    test_num=np.int32(num/4)
    head=aa[:,0:25,:,:]
    body=aa[:,20:45,:,:]
    legs=aa[:,35:60,:,:]
    bgd=bb[:,20:45,:,:]
    val_start=train_num
    val_end=val_num+val_start
    test_start=val_end
    test_end=test_num+test_start
    X_train=scipy.vstack((head[0:train_num,],body[0:train_num,],legs[0:train_num],bgd[0:train_num,]))
    X_val=scipy.vstack((head[val_start:val_end,],body[val_start:val_end,],
                        legs[val_start:val_end,],bgd[val_start:val_end,]))
    X_test=scipy.vstack((head[test_start:test_end,],
                         body[test_start:test_end,],
                         legs[test_start:test_end,],
                         bgd[test_start:test_end,]))

    X_train=X_train.transpose((0,3,1,2)) #/256.
    X_val=X_val.transpose((0,3,1,2)) #/256.
    X_test=X_test.transpose((0,3,1,2)) #/256.
    y_train=np.repeat(range(4),train_num)
    y_val=np.repeat(range(4),val_num)
    y_test=np.repeat(range(4),test_num)

    return (np.float32(X_train),np.uint8(y_train),np.float32(X_val),np.uint8(y_val),np.float32(X_test),np.uint8(y_test))
开发者ID:yaliamit,项目名称:Players,代码行数:55,代码来源:players.py


示例16: broyden

def broyden(func, x1, x2, tol=1e-5, maxiter=50):
    """Calculate the zero of a multi-dimensional function using Broyden's method"""
    
    def isscalar(x):
        if isinstance(x, sp.ndarray):
            if x.size == 1:
                return x.flatten()[0]
            else:
                return x
        else:
            return x

    def update_Jacobian(preJac, ch_x, ch_F):
        """Update Jacobian from preX to newX
        preX and newX are assumed to be array objects of the same shape"""
                
        frac = (ch_F-(preJac.dot(ch_x)))/(la.norm(ch_x)**2)

        Jac = preJac+sp.dot(isscalar(frac),ch_x.T)
        return Jac
        
    #truncate list to two tiems and sort
    x1 = sp.vstack(x1.flatten())
    x2 = sp.vstack(x2.flatten())
    
    fx1 = func(x1)
    fx2 = func(x2)
    
    #check our original points for zeros
    if abs(fx1) < tol:
        return x1
    elif abs(fx2) < tol:
        return x2

    #Calculate initial Jacobian matrix
    jac = Jacobian(func)(x1)
    mi = maxiter
    while abs(fx2) > tol and mi > 0:        
        fx1 = func(x1)
        fx2 = func(x2)
        ch_x=x2-x1
        ch_F=fx2-fx1
        
        jac = update_Jacobian(jac, ch_x, ch_F)
        y = la.lstsq(jac, sp.array([-fx2]))[0]
        xnew = y+x2
        x1 = x2
        x2 = xnew
        mi -= 1
    
    if mi==0:
        raise StopIteration("Did not converge in {} iterations".format(maxiter))
    else:
        return x2, maxiter-mi
开发者ID:byuimpactrevisions,项目名称:numerical_computing,代码行数:54,代码来源:broyden.py


示例17: TCA

def TCA(X_S, X_T, m=40, mu=0.1, kernel_para=1, p=2, random_sample_T=0.01):

    X_S = sp.mat(X_S)
    X_T = sp.mat(X_T)

    n_S = X_S.shape[0]
    n_T = X_T.shape[0]
    if random_sample_T != 1:
        print str(int(n_T * random_sample_T)) + " samples taken from the task domain"
        index_sample = sp.random.choice([i for i in range(n_T)], size=int(n_T * random_sample_T))
        X_T = X_T[index_sample, :]

        n_T = X_T.shape[0]

    n = n_S + n_T

    if m > (n):
        print ("m is larger then n_S+n_T, so it has been changed")
        m = n

    L = sp.zeros(shape=(n, n))
    L_SS = sp.ones(shape=(n_S, n_S)) / (n_S ** 2)
    L_TT = sp.ones(shape=(n_T, n_T)) / (n_T ** 2)
    L_ST = -sp.ones(shape=(n_S, n_T)) / (n_S * n_T)
    L_TS = -sp.ones(shape=(n_T, n_S)) / (n_S * n_T)

    L[0:n_S, 0:n_S] = L_SS
    L[n_S : n_S + n_T, n_S : n_S + n_T] = L_TT
    L[n_S : n_S + n_T, 0:n_S] = L_TS
    L[0:n_S, n_S : n_S + n_T] = L_ST

    R = pdist(sp.vstack([X_S, X_T]), metric="euclidean", p=p, w=None, V=None, VI=None)

    K = Gaussian(R, kernel_para, p)

    Id = sp.zeros(shape=(n, n))
    H = sp.zeros(shape=(n, n))
    sp.fill_diagonal(Id, 1)
    sp.fill_diagonal(H, 1)
    H -= 1.0 / n

    Id = sp.mat(Id)
    H = sp.mat(H)
    K = sp.mat(K)
    L = sp.mat(L)

    matrix = sp.linalg.inv(K * L * K + mu * Id) * sp.mat(K * H * K)

    eigen_values = sp.linalg.eig(matrix)

    eigen_val = eigen_values[0][0:m]
    eigen_vect = eigen_values[1][:, 0:m]
    return (eigen_val, eigen_vect, K, sp.vstack([X_S, X_T]))
开发者ID:PeterJackNaylor,项目名称:InternWork2,代码行数:53,代码来源:TCA.py


示例18: radius

    def radius( self, frame ):
        '''
        Bubble radius at one frame.
        Method:
        1. Load the snapshot at frame
        2. Load x, y, z coordinates 
        3. Calculate density grid mesh at grid points
        4. Filter the shell grids with density between low * max density and high * max density
        5. Calculate the average radius
        '''
        start = time.clock()

        self.set_frame( frame )

        # Load x, y, z coordinates
        data = pd.DataFrame( list(self.universe.coord), columns=['x','y','z'])
        x    = data[ 'x' ].values
        y    = data[ 'y' ].values
        z    = data[ 'z' ].values

        # Density grid
        xyz  = scipy.vstack( [ x, y, z ] )
        kde  = scipy.stats.gaussian_kde( xyz )
        xmin, ymin, zmin = x.min(), y.min(), z.min()
        xmax, ymax, zmax = x.max(), y.max(), z.max()
        NI         = complex( imag=self.density_grid_length)
        xi, yi, zi = scipy.mgrid[ xmin:xmax:NI, ymin:ymax:NI, zmin:zmax:NI ]
        coords     = scipy.vstack([item.ravel() for item in [xi, yi, zi]])
        density    = kde(coords).reshape(xi.shape)

        # Filter density grid
        density_max  = density.max()
        density_low  = self.density_low * density_max
        density_high = self.density_high * density_max

        xyzs = []
        N = self.density_grid_length
        for idx, idy, idz in product( xrange(N), xrange(N), xrange(N) ):
            if density_low < density[ idx, idy, idz ] <= density_high:
                xyzs.append( [ xi[ idx, idy, idz ], yi[ idx, idy, idz ], zi[ idx, idy, idz ] ] )
        xyzs = np.array( xyzs )

        # Average radius
        center = xyzs.mean( axis=0 )
        rs = []
        for xyz_ele in xyzs:
            rs.append( np.linalg.norm( center - xyz_ele ) )

        duration = time.clock() - start
        print( "Radius for frame {} calculated in {:.2f} seconds".format( frame, duration ) )

        return center, scipy.mean( rs )
开发者ID:mikkkee,项目名称:Bubble,代码行数:52,代码来源:bubble.py


示例19: store

def store(old, new):
	old=old.reshape((1,len(old)))
	lold=old.shape[1]
	lnew=new.shape[1]
	if (lold==lnew):
		X=sc.vstack((old,new))
	elif (lold>lnew):
		new =sc.hstack(([0]*(lold-lnew),new))
		X=X=sc.vstack((old,new))
	elif (lnew>lold):
		old =sc.hstack((old,[0]*(lnew-lold)))
		X=X=sc.vstack((old,new))
	return(X)
开发者ID:OreUxio,项目名称:Ecosystems,代码行数:13,代码来源:Multi_Species.py


示例20: load

    def load(filename, network=None):
        r"""
        Loads data onto the given network from an appropriately formatted
        'mat' file (i.e. MatLAB output).

        Parameters
        ----------
        filename : string (optional)
            The name of the file containing the data to import.  The formatting
            of this file is outlined below.

        network : OpenPNM Network Object
            The Network object onto which the data should be loaded.  If no
            Network is supplied than one will be created and returned.

        Returns
        -------
        If no Network object is supplied then one will be created and returned.

        """
        net = {}

        import scipy.io as _spio
        data = _spio.loadmat(filename)
        # Deal with pore coords and throat conns specially
        if 'throat_conns' in data.keys():
            net.update({'throat.conns': _sp.vstack(data['throat_conns'])})
            Nt = _sp.shape(net['throat.conns'])[0]
            net.update({'throat.all': _sp.ones((Nt,), dtype=bool)})
            del data['throat_conns']
        else:
            logger.warning('\'throat_conns\' not found')
        if 'pore_coords' in data.keys():
            net.update({'pore.coords': _sp.vstack(data['pore_coords'])})
            Np = _sp.shape(net['pore.coords'])[0]
            net.update({'pore.all': _sp.ones((Np,), dtype=bool)})
            del data['pore_coords']
        else:
            logger.warning('\'pore_coords\' not found')

        # Now parse through all the other items
        items = [i for i in data.keys() if '__' not in i]
        for item in items:
            element = item.split('_')[0]
            prop = item.split('_', maxsplit=1)[1]
            net[element+'.'+prop] = _sp.squeeze(data[item].T)

        if network is None:
            network = OpenPNM.Network.GenericNetwork()
        network = _update_network(network=network, net=net)
        return network
开发者ID:TomTranter,项目名称:OpenPNM,代码行数:51,代码来源:IO.py



注:本文中的scipy.vstack函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python scipy.where函数代码示例发布时间:2022-05-27
下一篇:
Python scipy.vectorize函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap