• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.hstack函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.hstack函数的典型用法代码示例。如果您正苦于以下问题:Python hstack函数的具体用法?Python hstack怎么用?Python hstack使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了hstack函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_roc_score

def get_roc_score(edges_pos, edges_neg, score_matrix, apply_sigmoid=False):

    # Edge case
    if len(edges_pos) == 0 or len(edges_neg) == 0:
        return (None, None, None)

    # Store positive edge predictions, actual values
    preds_pos = []
    pos = []
    for edge in edges_pos:
        if apply_sigmoid == True:
            preds_pos.append(sigmoid(score_matrix[edge[0], edge[1]]))
        else:
            preds_pos.append(score_matrix[edge[0], edge[1]])
        pos.append(1) # actual value (1 for positive)
        
    # Store negative edge predictions, actual values
    preds_neg = []
    neg = []
    for edge in edges_neg:
        if apply_sigmoid == True:
            preds_neg.append(sigmoid(score_matrix[edge[0], edge[1]]))
        else:
            preds_neg.append(score_matrix[edge[0], edge[1]])
        neg.append(0) # actual value (0 for negative)
        
    # Calculate scores
    preds_all = np.hstack([preds_pos, preds_neg])
    labels_all = np.hstack([np.ones(len(preds_pos)), np.zeros(len(preds_neg))])
    roc_score = roc_auc_score(labels_all, preds_all)
    # roc_curve_tuple = roc_curve(labels_all, preds_all)
    ap_score = average_precision_score(labels_all, preds_all)
    
    # return roc_score, roc_curve_tuple, ap_score
    return roc_score, ap_score
开发者ID:habedi,项目名称:link-prediction,代码行数:35,代码来源:link_prediction_scores.py


示例2: gen_coastline

def gen_coastline(lon, lat, bathy, depth=0):
    """
    Given lon, lat, and bathymetry, generate vectors of line segments
    of the coastline. This can be exported to matlab (via savemat) to be
    used with the 'editmask' routine for creating grid masks.

    Input
    -----
    lon : array,
        longitudes of bathymetry locations
    lat : array,
        latitudes of bathymetry locations
    bathy : array,
        bathymetry (negative for ocean, positive for land) values
    depth : float,
        depth to use as the definition of the coast

    Returns
    -------
    lon : ndarray,
        vector of coastlines, separated by nan (matlab-style)
    lat : ndarray,
        vector of coastlines, separated by nan (matlab-style)
    """
    CS = plt.contour(lon, lat, bathy, [depth - 0.25, depth + 0.25])
    lon = list()
    lat = list()
    for col in CS.collections:
        for path in col.get_paths():
            lon.append(path.vertices[:, 0])
            lon.append(np.nan)
            lat.append(path.vertices[:, 1])
            lat.append(np.nan)
    return (np.hstack(lon), np.hstack(lat))
开发者ID:dalepartridge,项目名称:seapy,代码行数:34,代码来源:mapping.py


示例3: torgerson

def torgerson(distances, n_components=2):
    """
    Perform classical mds (Torgerson scaling).

    ..note ::
        If the distances are euclidean then this is equivalent to projecting
        the original data points to the first `n` principal components.

    """
    distances = np.asarray(distances)
    assert distances.shape[0] == distances.shape[1]
    N = distances.shape[0]
    # O ^ 2
    D_sq = distances ** 2

    # double center the D_sq
    rsum = np.sum(D_sq, axis=1, keepdims=True)
    csum = np.sum(D_sq, axis=0, keepdims=True)
    total = np.sum(csum)
    D_sq -= rsum / N
    D_sq -= csum / N
    D_sq += total / (N ** 2)
    B = np.multiply(D_sq, -0.5, out=D_sq)

    U, L, _ = np.linalg.svd(B)
    if n_components > N:
        U = np.hstack((U, np.zeros((N, n_components - N))))
        L = np.hstack((L, np.zeros((n_components - N))))
    U = U[:, :n_components]
    L = L[:n_components]
    D = np.diag(np.sqrt(L))
    return np.dot(U, D)
开发者ID:RachitKansal,项目名称:orange3,代码行数:32,代码来源:manifold.py


示例4: display_layer

def display_layer(X, filename="../images/layer.png"):
    """
    Produces an image, composed of the given N images, patches or neural network weights,
    stored in the array X. Saves it with the given filename.
    :param X: numpy array of size (NxD) — N images, patches or neural network weights
    :param filename: a string, the name of the produced file
    :return: None
    """
    if not isinstance(X, np.ndarray):
        raise TypeError("'X' must be a numpy array")
    N, D = X.shape
    d = get_reshaped_image_size(D)

    if N == 1:
        return X.reshape(d, d, 3)
    divizors = [n for n in range(1, N) if N % n == 0]
    im_sizes = divizors[int(len(divizors) / 2)], int(N / divizors[int(len(divizors) / 2)])
    for i in range(im_sizes[0]):
        # img_row = np.hstack((img_row, np.zeros((d, 1, 3))))
        img_row = np.hstack((np.zeros((d, 1, 3)), np.array(X[i * im_sizes[0], :].reshape(d, d, 3))))
        img_row = np.hstack((img_row, np.zeros((d, 1, 3))))
        for j in range(1, im_sizes[1]):
            img_row = np.hstack((img_row, X[i * im_sizes[1] + j, :].reshape(d, d, 3)))
            img_row = np.hstack((img_row, np.zeros((d, 1, 3))))
        if i == 0:
            img = img_row
        else:
            img = np.vstack((img, img_row))
        img = np.vstack((img, np.zeros((1, img.shape[1], 3))))
    img = np.vstack((np.zeros((1, img.shape[1], 3)), img))
    imsave(filename, img)
    return img
开发者ID:izmailovpavel,项目名称:Practicum,代码行数:32,代码来源:display_layer.py


示例5: __mul__

    def __mul__(self, df):
        """
        extract and stack  poles and zeros

        TODO : handling simplification
        """

        b1 = self.b
        a1 = self.a
        b2 = df.b
        a2 = df.a

        pb1 = np.poly1d(b1)
        pa1 = np.poly1d(a1)
        pb2 = np.poly1d(b2)
        pa2 = np.poly1d(a2)

        rpb1 = pb1.r
        rpb2 = pb2.r
        rpa1 = pa1.r
        rpa2 = pa2.r

        F = DF()
        F.p = np.hstack((rpa1, rpa2))
        F.z = np.hstack((rpb1, rpb2))

        F.simplify()

        return F
开发者ID:tattoxcm,项目名称:pylayers,代码行数:29,代码来源:DF.py


示例6: save

    def save(self,filename):
        num_objs = len(self.objects)
        data_size = 0
        
        desc = []
        data = []
        for obj in self.objects:
            if isinstance(obj,self.scalars):
                desc.append(0)
                data.append(obj)
                data_size += 1
            else:
                assert(isinstance(obj,np.ndarray))
                desc.append(len(obj.shape))
                desc.append(obj.shape)
                
                data.append(obj.flatten(order='F'))
                data_size += np.prod(obj.shape)

        desc = np.hstack(desc)
        header_size = 3 + desc.size
        output = np.hstack([num_objs,header_size,data_size]
                           + [desc]
                           + data).astype(np.double)
        assert((header_size + data_size,) == output.shape)
        output.tofile(filename)
开发者ID:order,项目名称:lcp-research,代码行数:26,代码来源:marshal.py


示例7: getScalars

  def getScalars(self, inputData):
    """
    Returns a numpy array containing the sub-field scalar value(s) for
    each sub-field of the inputData. To get the associated field names for each of
    the scalar values, call getScalarNames().

    For a simple scalar encoder, the scalar value is simply the input unmodified.
    For category encoders, it is the scalar representing the category string
    that is passed in. For the datetime encoder, the scalar value is the
    the number of seconds since epoch.

    The intent of the scalar representation of a sub-field is to provide a
    baseline for measuring error differences. You can compare the scalar value
    of the inputData with the scalar value returned from topDownCompute() on a
    top-down representation to evaluate prediction accuracy, for example.

    @param inputData The data from the source. This is typically a object with
                 members
    @returns array of scalar values
    """

    retVals = numpy.array([])

    if self.encoders is not None:
      for (name, encoder, offset) in self.encoders:
        values = encoder.getScalars(self._getInputValue(inputData, name))
        retVals = numpy.hstack((retVals, values))
    else:
      retVals = numpy.hstack((retVals, inputData))

    return retVals
开发者ID:TKCen,项目名称:nupic,代码行数:31,代码来源:base.py


示例8: load_sdss_data_both_catalogs

def load_sdss_data_both_catalogs(hemi):
    lowz = load_sdss_data('lowz', hemi)
    cmass = load_sdss_data('cmass', hemi)
    ra = np.hstack([lowz['ra'],cmass['ra']])
    dec = np.hstack([lowz['dec'],cmass['dec']])
    z = np.hstack([lowz['z'],cmass['z']])        
    return {'ra':ra, 'dec':dec, 'z':z}
开发者ID:amanzotti,项目名称:vksz,代码行数:7,代码来源:vksz.py


示例9: offsetPlane

 def offsetPlane(plane, x, y):
     """
     Takes a numpy 2D array and returns the same plane offset by x and y,
     adding rows and columns of 0 values
     """
     height, width = plane.shape
     dataType = plane.dtype
     # shift x by cropping, creating a new array of columns and stacking
     # horizontally
     if abs(x) > 0:
         newCols = zeros((height, abs(x)), dataType)
         x1 = max(0, 0 - x)
         x2 = min(width, width - x)
         crop = plane[0:height, x1:x2]
         if x > 0:
             plane = hstack((newCols, crop))
         else:
             plane = hstack((crop, newCols))
     # shift y by cropping, creating a new array of rows and stacking
     # vertically
     if abs(y) > 0:
         newRows = zeros((abs(y), width), dataType)
         y1 = max(0, 0 - y)
         y2 = min(height, height - y)
         crop = plane[y1:y2, 0:width]
         if y > 0:
             plane = vstack((newRows, crop))
         else:
             plane = vstack((crop, newRows))
     return plane
开发者ID:sbesson,项目名称:scripts,代码行数:30,代码来源:Channel_Offsets.py


示例10: sample_trajectory

def sample_trajectory(M, n_states):
    # Samples trajectories from random nodes
    #  in our domain (M)
    G, W = M.get_graph_inv()
    N = G.shape[0]
    if N >= n_states:
        rand_ind = np.random.permutation(N)
    else:
        rand_ind = np.tile(np.random.permutation(N), (1, 10))
    init_states = rand_ind[0:n_states].flatten()
    goal_s = M.map_ind_to_state(M.targetx, M.targety)
    states = []
    states_xy = []
    states_one_hot = []
    # Get optimal path from graph
    g_dense = W
    g_masked = np.ma.masked_values(g_dense, 0)
    g_sparse = csr_matrix(g_dense)
    d, pred = dijkstra(g_sparse, indices=goal_s, return_predecessors=True)
    for i in range(n_states):
        path = trace_path(pred, goal_s, init_states[i])
        path = np.flip(path, 0)
        states.append(path)
    for state in states:
        L = len(state)
        r, c = M.get_coords(state)
        row_m = np.zeros((L, M.n_row))
        col_m = np.zeros((L, M.n_col))
        for i in range(L):
            row_m[i, r[i]] = 1
            col_m[i, c[i]] = 1
        states_one_hot.append(np.hstack((row_m, col_m)))
        states_xy.append(np.hstack((r, c)))
    return states_xy, states_one_hot
开发者ID:Kaushalya,项目名称:pytorch-value-iteration-networks,代码行数:34,代码来源:gridworld.py


示例11: phase_step_spike_fq

    def phase_step_spike_fq(self, spikes_time, full_step, nb_block, fs):
        stance_spike_fq=[]
        swing_spike_fq=[]
        for step in full_step:
            stance_block_duration = (step[1]-step[0])/nb_block
            swing_block_duration = (step[2]-step[1])/nb_block
            step_stance_count = []
            step_swing_count = []
            for i in range(nb_block):
                step_stance_count.append(0)
                step_swing_count.append(0)

            for spike_time in spikes_time:
                #if stance phase
                if step[0] < spike_time/fs < step[1]:
                    list_block = np.arange(step[0], step[1], stance_block_duration)
                    list_block = np.hstack((list_block, step[1]))
                    for i in range(nb_block):
                        if list_block[i] < spike_time/fs < list_block[i+1]:
                            step_stance_count[i] += 1
                #if swing phase
                elif step[1] < spike_time/fs < step[2]:
                    list_block = np.arange(step[1], step[2], swing_block_duration)
                    list_block = np.hstack((list_block, step[2]))
                    for i in range(nb_block):
                        if list_block[i] < spike_time/fs < list_block[i+1]:
                            step_swing_count[i] += 1
                # elif spike_time/fs > step[2]:
                #     break
            stance_spike_fq.append(np.array(step_stance_count) / stance_block_duration)
            swing_spike_fq.append(np.array(step_swing_count) / swing_block_duration)

        return stance_spike_fq, swing_spike_fq
开发者ID:scauglog,项目名称:brain_record_toolbox,代码行数:33,代码来源:signal_processing.py


示例12: test_fuzz

 def test_fuzz(self):
     # try a bunch of crazy inputs
     rfuncs = (
             np.random.uniform,
             np.random.normal,
             np.random.standard_cauchy,
             np.random.exponential)
     ntests = 100
     for i in range(ntests):
         rfunc = random.choice(rfuncs)
         target_norm_1 = random.expovariate(1.0)
         n = random.randrange(2, 16)
         A_original = rfunc(size=(n,n))
         E_original = rfunc(size=(n,n))
         A_original_norm_1 = scipy.linalg.norm(A_original, 1)
         scale = target_norm_1 / A_original_norm_1
         A = scale * A_original
         E = scale * E_original
         M = np.vstack([
             np.hstack([A, E]),
             np.hstack([np.zeros_like(A), A])])
         expected_expm = scipy.linalg.expm(A)
         expected_frechet = scipy.linalg.expm(M)[:n, n:]
         observed_expm, observed_frechet = expm_frechet(A, E)
         assert_allclose(expected_expm, observed_expm)
         assert_allclose(expected_frechet, observed_frechet)
开发者ID:ymarfoq,项目名称:outilACVDesagregation,代码行数:26,代码来源:test_matfuncs.py


示例13: run_classify

def run_classify(X_groups_train, y_train, X_groups_validate, y_validate):
    """
    Although this function is given groups, it actually doesn't utilize the groups at all in the criterion
    """

    method_label = "gridsearch_lasso"

    X_validate = np.hstack(X_groups_validate)

    max_power = np.log(50)
    min_power = np.log(1e-4)
    lambda_guesses = np.power(np.e, np.arange(min_power, max_power, (max_power - min_power - 1e-5) / (NUM_LAMBDAS - 1)))
    print method_label, "lambda_guesses", lambda_guesses

    X_train = np.hstack(X_groups_train)
    problem_wrapper = LassoClassifyProblemWrapper(X_train, y_train, [])

    best_cost = 1e5
    best_betas = []
    best_regularization = lambda_guesses[0]

    for l1 in reversed(lambda_guesses):
        betas = problem_wrapper.solve([l1])
        current_cost, _ = testerror_logistic_grouped(X_validate, y_validate, betas)
        if best_cost > current_cost:
            best_cost = current_cost
            best_betas = betas
            best_regularization = l1
            print method_label, "best_cost so far", best_cost, "best_regularization", best_regularization
            sys.stdout.flush()

    print method_label, "best_validation_error", best_cost
    print method_label, "best lambdas:", best_regularization

    return best_betas, best_cost
开发者ID:jjfeng,项目名称:descent_optimization,代码行数:35,代码来源:gridsearch_lasso.py


示例14: _plot_traj

    def _plot_traj(self, z, axes, units):
        """Plots spacecraft trajectory.

        Args:
            - z (``tuple``, ``list``, ``numpy.ndarray``): Decision chromosome.
            - axes (``matplotlib.axes._subplots.Axes3DSubplot``): 3D axes to use for the plot
            - units (``float``, ``int``): Length unit by which to normalise data.

        Examples:
            >>> prob.extract(pykep.trajopt.indirect_or2or).plot_traj(pop.champion_x)
        """

        # times
        t0 = pk.epoch(0)
        tf = pk.epoch(z[0])

        # Mean Anomalies
        M0 = z[1] - self.elem0[1] * np.sin(z[1])
        Mf = z[2] - self.elemf[1] * np.sin(z[2])

        elem0 = np.hstack([self.elem0[:5], [M0]])
        elemf = np.hstack([self.elemf[:5], [Mf]])

        # Keplerian points
        kep0 = pk.planet.keplerian(t0, elem0)
        kepf = pk.planet.keplerian(tf, elemf)

        # planets
        pk.orbit_plots.plot_planet(
            kep0, t0=t0, units=units, ax=axes, color=(0.8, 0.8, 0.8))
        pk.orbit_plots.plot_planet(
            kepf, t0=tf, units=units, ax=axes, color=(0.8, 0.8, 0.8))
开发者ID:darioizzo,项目名称:pykep,代码行数:32,代码来源:_indirect.py


示例15: _stimcorr_core

    def _stimcorr_core(self, motionfile, intensityfile, designmatrix, cwd=None):
        """
        Core routine for determining stimulus correlation

        """
        if not cwd:
            cwd = os.getcwd()
        # read in motion parameters
        mc_in = np.loadtxt(motionfile)
        g_in = np.loadtxt(intensityfile)
        g_in.shape = g_in.shape[0], 1
        dcol = designmatrix.shape[1]
        mccol = mc_in.shape[1]
        concat_matrix = np.hstack((np.hstack((designmatrix, mc_in)), g_in))
        cm = np.corrcoef(concat_matrix, rowvar=0)
        corrfile = self._get_output_filenames(motionfile, cwd)
        # write output to outputfile
        file = open(corrfile, 'w')
        file.write("Stats for:\n")
        file.write("Stimulus correlated motion:\n%s\n" % motionfile)
        for i in range(dcol):
            file.write("SCM.%d:" % i)
            for v in cm[i, dcol + np.arange(mccol)]:
                file.write(" %.2f" % v)
            file.write('\n')
        file.write("Stimulus correlated intensity:\n%s\n" % intensityfile)
        for i in range(dcol):
            file.write("SCI.%d: %.2f\n" % (i, cm[i, -1]))
        file.close()
开发者ID:DimitriPapadopoulos,项目名称:nipype,代码行数:29,代码来源:rapidart.py


示例16: dobetterstuff

def dobetterstuff(inpath):
    data_files = [f for f in os.listdir(inpath) if f.endswith('.mel.npy')]
    random.shuffle(data_files)
    artists = set([f[:18] for f in data_files])
    artist_string_to_id = dict([(s,i) for i, s in enumerate(artists)])

    def get_split(datafiles___, splitpercent):
        # gen = filtered_stratified_split(datafiles___,
        #                                 sklearn.cross_validation.StratifiedShuffleSplit,
        #                                 [1] * len(datafiles___), n_iterations=1, test_size=splitpercent)
        gen = sklearn.cross_validation.ShuffleSplit(len(datafiles___), 1, splitpercent)
        for i_trs, i_tes in gen:
            return [datafiles___[i] for i in i_trs],  [datafiles___[i] for i in i_tes]

    training_files, test_files =  get_split(data_files, .2)
    training_files, validation_files = get_split(training_files, .2)

    print training_files
    print test_files
    print validation_files

    train_set_y = np.hstack([[artist_string_to_id[f[:18]]] * 129 for f in training_files])
    train_set_x = np.vstack([np.load(os.path.join(inpath, f)) for f in training_files])
    test_set_y = np.hstack([[artist_string_to_id[f[:18]]] * 129 for f in test_files])
    test_set_x = np.vstack([np.load(os.path.join(inpath, f)) for f in test_files])
    validation_set_y = np.hstack([[artist_string_to_id[f[:18]]] * 129 for f in validation_files])
    validation_set_x = np.vstack([np.load(os.path.join(inpath, f)) for f in validation_files])

    datasets = [(train_set_x, train_set_y), (validation_set_x, validation_set_y), (test_set_x, test_set_y)]
    return datasets
开发者ID:bmcfee,项目名称:deep-artists,代码行数:30,代码来源:make_dataset.py


示例17: _train

    def _train(self, samples):
        """Perform network training.

        Parameter
        ---------
        samples : array-like
          Used for unsupervised training of the SOM.
        """
        # XXX initialize with clever default, e.g. plain of first two PCA
        # components
        self._K = np.random.standard_normal(tuple(self.kshape) + (samples.shape[1],))

        # units weight vector deltas for batch training
        # (height x width x #features)
        unit_deltas = np.zeros(self._K.shape, dtype='float')

        # precompute distance kernel between elements in the Kohonen layer
        # that will remain constant throughout the training
        # (just compute one quadrant, as the distances are symmetric)
        # XXX maybe do other than squared Euclidean?
        dqd = np.fromfunction(lambda x, y: (x**2 + y**2)**0.5,
                             self.kshape, dtype='float')

        # for all iterations
        for it in xrange(1, self.niter + 1):
            # compute the neighborhood impact kernel for this iteration
            # has to be recomputed since kernel shrinks over time
            k = self._compute_influence_kernel(it, dqd)

            # for all training vectors
            for s in samples:
                # determine closest unit (as element coordinate)
                b = self._get_bmu(s)
                # train all units at once by unfolding the kernel (from the
                # single quadrant that is precomputed), cutting it to the
                # right shape and simply multiply it to the difference of target
                # and all unit weights....
                infl = np.vstack((
                        np.hstack((
                            # upper left
                            k[b[0]:0:-1, b[1]:0:-1],
                            # upper right
                            k[b[0]:0:-1, :self.kshape[1] - b[1]])),
                        np.hstack((
                            # lower left
                            k[:self.kshape[0] - b[0], b[1]:0:-1],
                            # lower right
                            k[:self.kshape[0] - b[0], :self.kshape[1] - b[1]]))
                               ))
                unit_deltas += infl[:,:,np.newaxis] * (s - self._K)

            # apply cumulative unit deltas
            self._K += unit_deltas

            if __debug__:
                debug("SOM", "Iteration %d/%d done: ||unit_deltas||=%g" %
                      (it, self.niter, np.sqrt(np.sum(unit_deltas **2))))

            # reset unit deltas
            unit_deltas.fill(0.)
开发者ID:PepGardiola,项目名称:PyMVPA,代码行数:60,代码来源:som.py


示例18: setUp

 def setUp(self):
     db =  pysal.open(pysal.examples.get_path("baltim.dbf"),'r')
     self.ds_name = "baltim.dbf"
     self.y_name = "PRICE"
     self.y = np.array(db.by_col(self.y_name)).T
     self.y.shape = (len(self.y),1)
     self.x_names = ["NROOM","AGE","SQFT"]
     self.x = np.array([db.by_col(var) for var in self.x_names]).T
     ww = pysal.open(pysal.examples.get_path("baltim_q.gal"))
     self.w = ww.read()
     ww.close()
     self.w_name = "baltim_q.gal"
     self.w.transform = 'r'
     self.regimes = db.by_col("CITCOU")
     #Artficial:
     n = 256
     self.n2 = n/2
     self.x_a1 = np.random.uniform(-10,10,(n,1))
     self.x_a2 = np.random.uniform(1,5,(n,1))
     self.q_a = self.x_a2 + np.random.normal(0,1,(n,1))
     self.x_a = np.hstack((self.x_a1,self.x_a2))
     self.y_a = np.dot(np.hstack((np.ones((n,1)),self.x_a)),np.array([[1],[0.5],[2]])) + np.random.normal(0,1,(n,1))
     latt = int(np.sqrt(n))
     self.w_a = pysal.lat2W(latt,latt)
     self.w_a.transform='r'
     self.regi_a = [0]*(n/2) + [1]*(n/2)
     self.w_a1 = pysal.lat2W(latt/2,latt)
     self.w_a1.transform='r'
开发者ID:nathania,项目名称:pysal,代码行数:28,代码来源:test_ml_error_regimes.py


示例19: evaluate_transformation_matrix

    def evaluate_transformation_matrix(self, dynamic_values, constant_values):
        """Returns the numerical transformation matrices for each time step.

        Parameters
        ----------
        dynamic_values : array_like, shape(m,) or shape(n, m)
            The m state values for each n time step.
        constant_values : array_like, shape(p,)
            The p constant parameter values of the system.

        Returns
        -------
        transform_matrix : numpy.array, shape(n, 4, 4)
            A 4 x 4 transformation matrix for each time step.

        """
        #If states is instance of numpy array, well and good.
        #else convert it to one:

        states = np.array(dynamic_values)
        if len(states.shape) > 1:
            n = states.shape[0]
            new = np.zeros((n, 4, 4))
            for i, time_instance in enumerate(states):
                args = np.hstack((time_instance, constant_values))
                new[i, :, :] = self._numeric_transform(*args)
        else:
            n = 1
            args = np.hstack((states, constant_values))
            new = self._numeric_transform(*args)

        self._visualization_matrix = new.reshape(n, 16)
        return self._visualization_matrix
开发者ID:chrisdembia,项目名称:pydy,代码行数:33,代码来源:visualization_frame.py


示例20: compute_sketch

 def compute_sketch(self):
     start_time = time.time()
     if self.sketch is not None:
         return self.sketch
     mat_b = np.zeros([self.l + self.b_size, self.m])
     # compute zero valued row list
     zero_rows = np.nonzero([round(s, 7) == 0.0 for s in np.sum(mat_b[:self.l, :], axis = 1)])[0]
     zero_rows = np.hstack((zero_rows, np.arange(self.l, self.l + self.b_size))).tolist()
     # repeat inserting each row of matrix A 
     for i in range(0, self.mat.shape[0]):
         # insert a row into matrix B
         mat_b[zero_rows[0], :] = self.mat[i, :]
         # remove zero valued row from the list
         zero_rows.remove(zero_rows[0])
         # if there is no more zero valued row
         if len(zero_rows) == 0:
             # compute SVD of matrix B, we want to find the first l
             self._sketch_func(mat_b)
             # update the zero valued row list
             zero_rows = np.nonzero([round(s, 7) == 0.0 for s in np.sum(mat_b[:self.l, :], axis = 1)])[0]
             zero_rows = np.hstack((zero_rows, np.arange(self.l, self.l + self.b_size))).tolist()
     # why do we need this here? 
     # do we need to do a sketch one last time at the end? 
     self._sketch_func(mat_b)
     # get rid of extra non-zero rows when we return 
     self.sketch = mat_b[:self.l, :]
     self.sketching_time = time.time() - start_time
     return self.sketch
开发者ID:nithintumma,项目名称:sketching,代码行数:28,代码来源:fd_sketch.py



注:本文中的numpy.hstack函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.hypot函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.hsplit函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap