• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python extent.create函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中spartan.array.extent.create函数的典型用法代码示例。如果您正苦于以下问题:Python create函数的具体用法?Python create怎么用?Python create使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了create函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: cholesky

def cholesky(A):
  '''
  Cholesky matrix decomposition.
 
  Args:
    A(Expr): matrix to be decomposed
  '''
 
  A = expr.force(A)
  n = int(math.sqrt(len(A.tiles)))
  tile_size = A.shape[0] / n
  for k in range(n):
    # A[k,k] = DPOTRF(A[k,k])
    diag_ex = get_ex(k, k, tile_size, A.shape)
    A = expr.region_map(A, diag_ex, _cholesky_dpotrf_mapper)
    
    if k == n - 1: break
    
    # A[l,k] = DTRSM(A[k,k], A[l,k]) l -> [k+1,n)
    col_ex = extent.create(((k+1)*tile_size, k*tile_size),(n*tile_size, (k+1)*tile_size), A.shape)
    A = expr.region_map(A, col_ex, _cholesky_dtrsm_mapper, fn_kw=dict(diag_ex=diag_ex))
    
    # A[m,m] = DSYRK(A[m,k], A[m,m]) m -> [k+1,n)
    # A[l,m] = DGEMM(A[l,k], A[m,k], A[l,m]) m -> [k+1,n) l -> [m+1,n)
    col_exs = list([extent.create((m*tile_size, m*tile_size), (n*tile_size, (m+1)*tile_size), A.shape) for m in range(k+1,n)])
    A = expr.region_map(A, col_exs, _cholesky_dsyrk_dgemm_mapper, fn_kw=dict(k=k))
  
  
  # update the right corner to 0
  col_exs = list([extent.create((0, m*tile_size),(m*tile_size, (m+1)*tile_size),A.shape) for m in range(1,n)])
  A = expr.region_map(A, col_exs, lambda input, array, ex: np.zeros(input.shape, input.dtype))
  return A
开发者ID:EasonLiao,项目名称:spartan,代码行数:32,代码来源:cholesky.py


示例2: _svm_mapper

def _svm_mapper(array, ex, labels, alpha, w, m, scale, lambda_n):
  '''
  Local linear SVM solver.

  Args:
    array(DistArray): features of the training data.
    ex(Extent): Region being processed.
    labels(DistArray): labels of the training data.
    alpha(DistArray): alpha vector which is the parameter optimized by SVM. 
    w(DistArray): weight vector of the previous iteration.
    m(int): number of samples to train (now we set it to the whole local data set).
    scale(int): number of tiles
    lambda_n: lambda/size(total train data) which is the parameter of this svm model.
  '''
  X = array.fetch(ex)
  Y = labels.fetch(extent.create((ex.ul[0], 0), (ex.lr[0], 1), labels.shape))
  
  tile_id = ex.ul[0]/(ex.lr[0]-ex.ul[0])
  ex_alpha = extent.create((tile_id*m, 0), ((tile_id+1)*m, 1), alpha.shape)
  old_alpha = alpha.fetch(ex_alpha)
  
  old_w = np.zeros((X.shape[1],1)) if w is None else w[:]
  
  new_w, new_alpha = _svm_disdca_train(X, Y, old_alpha, old_w, m, scale, lambda_n)
  
  # update the alpha vector
  alpha.update(ex_alpha, new_alpha)
  
  # reduce the weight vector
  yield extent.create((0,0),(array.shape[1],1),(array.shape[1], 1)), new_w
开发者ID:EasonLiao,项目名称:spartan,代码行数:30,代码来源:disdca_svm.py


示例3: _solve_U_or_M_mapper

def _solve_U_or_M_mapper(array, ex, U_or_M, la, alpha, implicit_feedback):
  '''
  given A and U (or M), solve M (or U) such that A = U M' 
  using alternating least-squares factorization method
  
  Args:
    array(DistArray): the user-item (or item-user) rating matrix.
    ex(Extent): region being processed.
    U_or_M(DistArray): the matrix U (or M).
    la(float): the parameter of the als.
    alpha(int): confidence parameter used on implicit feedback.
    implicit_feedback(bool): whether using implicit_feedback method for als.
  '''
  rating_matrix = array.fetch(extent.create((ex.ul[0], 0), (ex.lr[0], array.shape[1]), array.shape))
  U_or_M = U_or_M[:]
  
  if implicit_feedback:
    Y = U_or_M
    YT = Y.T
    YTY = np.dot(YT, Y)
 
  result = np.zeros((rating_matrix.shape[0], U_or_M.shape[1]))
  for i in range(rating_matrix.shape[0]):
    if implicit_feedback:
      result[i] = _implicit_feedback_als_solver(rating_matrix[i], la, alpha, Y, YT, YTY)
    else:
      non_zero_idx = rating_matrix[i].nonzero()[0]
      rating_vector = rating_matrix[i, non_zero_idx]
      feature_vectors = U_or_M[non_zero_idx]
      result[i] = _als_solver(feature_vectors, rating_vector, la)
    
  yield extent.create((ex.ul[0], 0), (ex.lr[0], U_or_M.shape[1]), (array.shape[0], U_or_M.shape[1])), result
开发者ID:xuanhan863,项目名称:spartan,代码行数:32,代码来源:als.py


示例4: kmeans_map2_center_mapper

def kmeans_map2_center_mapper(ex, tile, centers=None, m=None):
  X = tile[0]
  weights = tile[1] ** m
  new_centers = np.dot(X.T, weights).T
  target_ex = extent.create((ex[0].ul[0], ),
                            (ex[0].lr[0], ),
                            (ex[0].array_shape[0], ))
  target_ex = extent.create((0, 0), (centers.shape[0], centers.shape[1]),
                            (centers.shape[0], centers.shape[1]))
  yield target_ex, new_centers
开发者ID:GabrielWen,项目名称:spartan,代码行数:10,代码来源:fuzzy_kmeans.py


示例5: _init_label_mapper

def _init_label_mapper(array, ex):
  data = array.fetch(extent.create((ex.ul[0], 0), (ex.lr[0], array.shape[1]), array.shape))
  
  labels = np.zeros((data.shape[0], 1), dtype=np.int64)
  for i in range(data.shape[0]):
    if data[i,0] > data[i,1]:
      labels[i,0] = 1.0
    else:
      labels[i,0] = -1.0
    
  yield extent.create((ex.ul[0], 0), (ex.lr[0], 1), (array.shape[0], 1)), labels
开发者ID:MaggieQi,项目名称:spartan,代码行数:11,代码来源:test_svm.py


示例6: _cholesky_dsyrk_dgemm_mapper

def _cholesky_dsyrk_dgemm_mapper(input, array, ex, k):
  
  mk_ex = extent.create((ex.ul[1], k*input.shape[1]), (ex.lr[1], (k+1)*input.shape[1]), array.shape)
  A_mk = array.fetch(mk_ex)
  
  if ex.ul[0] == ex.ul[1] and ex.lr[0] == ex.lr[1]:
    # diag block
    return linalg.blas.dsyrk(-1.0, A_mk, 1.0, input, lower=1)
  else:
    # other block
    lk_ex = extent.create((ex.ul[0], k*input.shape[1]), (ex.lr[0], (k+1)*input.shape[1]), array.shape)
    A_lk = array.fetch(lk_ex)
    return linalg.blas.dgemm(-1.0, A_lk, A_mk.T, 1.0, input)
开发者ID:EasonLiao,项目名称:spartan,代码行数:13,代码来源:cholesky.py


示例7: kmeans_outer_dist_mapper

def kmeans_outer_dist_mapper(ex_a, tile_a, ex_b, tile_b):
  points = tile_a
  centers = tile_b
  target_ex = extent.create((ex_a[0].ul[0], ),
                            (ex_a[0].lr[0], ),
                            (ex_a[0].array_shape[0], ))
  yield target_ex, np.argmin(cdist(points, centers), axis=1)
开发者ID:rgardner,项目名称:spartan,代码行数:7,代码来源:k_means_.py


示例8: _fuzzy_kmeans_mapper

def _fuzzy_kmeans_mapper(array, ex, old_centers, centers, counts, labels, m):
  '''
  Update the new centers, new counts and labels using fuzzy kmeans method.
  
  Args:
    array(DistArray): the input data points matrix.
    ex(Extent): region being processed.
    old_centers(DistArray): the current centers of each cluster.
    centers(DistArray): the new centers to be updated.
    counts(DistArray): the new counts to be updated.
    labels(DistArray): the new labels for each point to be updated.
    m(float): the parameter of fuzzy kmeans. 
  '''
  points = array.fetch(ex)
  old_centers = old_centers[:]
  
  new_centers = np.zeros_like(old_centers)
  new_counts = np.zeros((old_centers.shape[0], 1))
  new_labels = np.zeros(points.shape[0], dtype=np.int)
  for i in range(points.shape[0]):
    point = points[i]    
    prob = _calc_probability(point, old_centers, m)
    new_labels[i] = np.argmax(prob)
    
    for i in prob.nonzero()[0]:
      new_counts[i] += prob[i]
      new_centers[i] += prob[i] * point
      
  centers.update(extent.from_shape(centers.shape), new_centers)
  counts.update(extent.from_shape(counts.shape), new_counts)
  labels.update(extent.create((ex.ul[0],), (ex.lr[0],), labels.shape), new_labels)
  return []  
开发者ID:EasonLiao,项目名称:spartan,代码行数:32,代码来源:fuzzy_kmeans.py


示例9: _solve_U_or_M_mapper

def _solve_U_or_M_mapper(ex_a, rating_matrix, ex_b, U_or_M, la, alpha, implicit_feedback, shape=None):
  '''
  given A and U (or M), solve M (or U) such that A = U M'
  using alternating least-squares factorization method

  Args:
    rating_matrix: the user-item (or item-user) rating matrix.
    U_or_M: the matrix U (or M).
    la(float): the parameter of the als.
    alpha(int): confidence parameter used on implicit feedback.
    implicit_feedback(bool): whether using implicit_feedback method for als.
  '''
  if implicit_feedback:
    Y = U_or_M
    YT = Y.T
    YTY = np.dot(YT, Y)

  result = np.zeros((rating_matrix.shape[0], U_or_M.shape[1]))
  if implicit_feedback:
    for i in range(rating_matrix.shape[0]):
      result[i] = _implicit_feedback_als_solver(rating_matrix[i], la, alpha, Y, YT, YTY)
  else:
    for i in range(rating_matrix.shape[0]):
      non_zero_idx = rating_matrix[i].nonzero()[0]
      rating_vector = rating_matrix[i, non_zero_idx]
      feature_vectors = U_or_M[non_zero_idx]
      result[i] = _als_solver(feature_vectors, rating_vector, la)

  target_ex = extent.create((ex_a.ul[0], 0), (ex_a.lr[0], U_or_M.shape[1]), shape)
  yield target_ex, result
开发者ID:GabrielWen,项目名称:spartan,代码行数:30,代码来源:als.py


示例10: _lda_mapper

def _lda_mapper(ex_a, term_docs_matrix, ex_b, local_topic_term_counts, k_topics, alpha, eta, max_iter_per_doc):
    """
  Using Collapsed Variational Bayes method (Mahout implementation) to train local LDA model.

  Args:
    array(DistArray): the count of each term in each document.
    ex(Extent): Region being processed.
    k_topics: the number of topics we need to find.
    alpha(float): parameter of LDA model.
    eta(float): parameter of LDA model.
    max_iter_per_doc(int): the max iterations to train each document.
    topic_term_counts(DistArray): the matrix to save p(topic x | term).
  """
    # term_docs_matrix = array.fetch(extent.create((0, ex.ul[1]), (array.shape[0], ex.lr[1]), array.shape))
    # local_topic_term_counts = topic_term_counts[:]
    local_topic_sums = np.linalg.norm(local_topic_term_counts, 1, axis=1)

    local_topic_term_counts = _lda_train(
        term_docs_matrix, local_topic_term_counts, local_topic_sums, None, k_topics, alpha, eta, max_iter_per_doc
    )

    # yield extent.create((0, 0), (k_topics, array.shape[0]), (k_topics, array.shape[0])), local_topic_term_counts
    yield (
        extent.create((0, 0), (k_topics, ex_a.array_shape[0]), (k_topics, ex_a.array_shape[0])),
        local_topic_term_counts,
    )
开发者ID:rgardner,项目名称:spartan,代码行数:26,代码来源:lda.py


示例11: _lda_doc_topic_mapper

def _lda_doc_topic_mapper(
    ex_a, term_docs_matrix, ex_b, local_topic_term_counts, k_topics, alpha, eta, max_iter_per_doc
):
    """
  Last iteration that uses Collapsed Variational Bayes method (Mahout implementation) to calculate the final document/topic inference.

  Args:
    array(DistArray): the count of each term in each document.
    ex(Extent): Region being processed.
    k_topics: the number of topics we need to find.
    alpha(float): parameter of LDA model.
    eta(float): parameter of LDA model.
    max_iter_per_doc(int): the max iterations to train each document.
    topic_term_counts(DistArray): the matrix to save p(topic x | term).
  """
    # term_docs_matrix = array.fetch(extent.create((0, ex.ul[1]), (array.shape[0], ex.lr[1]), array.shape))
    # local_topic_term_counts = topic_term_counts[:]
    local_topic_sums = np.linalg.norm(local_topic_term_counts, 1, axis=1)

    doc_topics = np.ones((term_docs_matrix.shape[1], k_topics), dtype=np.float64) / k_topics

    local_topic_term_counts = _lda_train(
        term_docs_matrix, local_topic_term_counts, local_topic_sums, doc_topics, k_topics, alpha, eta, max_iter_per_doc
    )

    # yield extent.create((ex.ul[1], 0), (ex.lr[1], k_topics), (array.shape[1], k_topics)), doc_topics
    yield (extent.create((ex_a.ul[1], 0), (ex_a.lr[1], k_topics), (ex_a.array_shape[1], k_topics)), doc_topics)
开发者ID:rgardner,项目名称:spartan,代码行数:27,代码来源:lda.py


示例12: _cluster_mapper

def _cluster_mapper(array, ex, centers):
  '''
  label the cluster id for each data point.

  Args:
    array(DistArray): the input data points matrix.
    ex(Extent): region being processed.
    centers(numpy.array): the center points for each cluster.
  '''
  points = array.fetch(ex)
  labels = np.zeros(points.shape[0], dtype=np.int32)
  for i in range(points.shape[0]):
    point = points[i]
    max = -1
    max_id = -1
    for j in range(centers.shape[0]):
      dist = np.square(centers[j] - point).sum()
      pdf = 1.0 / (1 + dist)
      if max < pdf:
        max = pdf
        max_id = j

    labels[i] = max_id

  yield extent.create((ex.ul[0],), (ex.lr[0],), (array.shape[0],)), labels
开发者ID:GabrielWen,项目名称:spartan,代码行数:25,代码来源:canopy_clustering.py


示例13: _init_label_mapper

def _init_label_mapper(array, ex):
  data = array.fetch(ex)
  
  labels = np.zeros((data.shape[0], 1), dtype=np.int64)
  for i in range(data.shape[0]):
    labels[i] = np.argmax(data[i])
    
  yield extent.create((ex.ul[0], 0), (ex.lr[0], 1), (array.shape[0], 1)), labels
开发者ID:EasonLiao,项目名称:spartan,代码行数:8,代码来源:test_naive_bayes.py


示例14: _sum_instance_by_label_mapper

def _sum_instance_by_label_mapper(array, ex, labels, label_size):
  '''
  For each label, compute the sum of the feature vectors which belong to that label.
  
  Args:
    array(DistArray): tf-idf normalized training data.
    ex(Extent): Region being processed.
    labels(DistArray): labels of the training data.
    label_size: the number of different labels.
  '''
  X = array.fetch(extent.create((ex.ul[0], 0), (ex.lr[0], array.shape[1]), array.shape))
  Y = labels.fetch(extent.create((ex.ul[0], 0), (ex.lr[0], 1), labels.shape))
  
  sum_instance_by_label = np.zeros((label_size, X.shape[1]))
  for i in xrange(Y.shape[0]):
    sum_instance_by_label[Y[i, 0]] += X[i]
    
  yield extent.create((0, 0), (label_size, X.shape[1]), (label_size, X.shape[1])), sum_instance_by_label
开发者ID:MaggieQi,项目名称:spartan,代码行数:18,代码来源:naive_bayes.py


示例15: test_unravel

def test_unravel():
  for i in range(100):
    shp = (20, 77)
    ul = (random.randint(0, 19), random.randint(0, 76))
    lr = (random.randint(ul[0] + 1, 20), random.randint(ul[1] + 1, 77))
                         
    a = extent.create(ul, lr, shp)
    ravelled = a.ravelled_pos()
    unravelled = extent.unravelled_pos(ravelled, a.array_shape)
    Assert.eq(a.ul, unravelled)
开发者ID:EasonLiao,项目名称:spartan,代码行数:10,代码来源:test_extent.py


示例16: kmeans_map2_dist_mapper

def kmeans_map2_dist_mapper(ex, tile, centers=None, m=None):
  points = tile[0]
  target_ex = extent.create((ex[0].ul[0], 0),
                            (ex[0].lr[0], centers.shape[0]),
                            (ex[0].array_shape[0], centers.shape[0]))
  distances = cdist(points, centers)
  distances[distances == 0] = 0.0000000001
  distances **= 1.0 / (m - 1)
  distances /= np.sum(distances, axis=1)[:, np.newaxis]
  yield target_ex, distances
开发者ID:GabrielWen,项目名称:spartan,代码行数:10,代码来源:fuzzy_kmeans.py


示例17: test_ravelled_pos

def test_ravelled_pos():
  a = extent.create((2, 2), (7, 7), (10, 10))
  for i in range(0, 10):
    for j in range(0, 10):
      assert extent.ravelled_pos((i, j), a.array_shape) == 10 * i + j
      
  Assert.eq(a.to_global(0, axis=None), 22)
  Assert.eq(a.to_global(10, axis=None), 42)
  Assert.eq(a.to_global(11, axis=None), 43)
  Assert.eq(a.to_global(20, axis=None), 62)
开发者ID:EasonLiao,项目名称:spartan,代码行数:10,代码来源:test_extent.py


示例18: _local_read_sparse_mm

def _local_read_sparse_mm(array, ex, fn, data_begin):
  '''
  1. Noted that Matrix Market format doesn't require (row, col) to be sorted.
     If the file is sorted (by either row or col), each worker will return
     only a part of the array. If the file is unsorted, each worker may
     return a very big and sparser sub-array of the original array. In the
     worst case, the sub-array can be as large as the original array but
     sparser.
  2. We can't know how many lines without reading the whole file. So we simply
     decide the region this worker should read based on the file size.
  '''
  data_size = os.path.getsize(fn) - data_begin
  array_size = np.product(array.shape)
  begin = extent.ravelled_pos(ex.ul, array.shape)
  begin = math.ceil(((begin * 1.0) / array_size) * data_size) + data_begin
  end = extent.ravelled_pos([(i - 1) for i in ex.lr], array.shape)
  end = math.floor(((end * 1.0) / array_size) * data_size) + data_begin

  ul = [array.shape[0], array.shape[1]]
  lr = [0, 0]
  rows = []
  cols = []
  data = []
  with open(fn) as fp:
    fp.seek(begin)
    if begin != data_begin:
      fp.seek(begin - 1)
      a = fp.read(1)
      if a != '\n':
        line = fp.readline()

    pos = fp.tell()
    for line in fp:
      if pos > end + 1: # +1 in case end locates on \n
        break
      pos += len(line)
      (_row, _col), val = _extract_mm_coordinate(line)
      _row -= 1
      _col -= 1
      rows.append(_row)
      cols.append(_col)
      data.append(float(val))
      ul[0] = _row if _row < ul[0] else ul[0]
      ul[1] = _col if _col < ul[1] else ul[1]
      lr[0] = _row if _row > lr[0] else lr[0]
      lr[1] = _col if _col > lr[1] else lr[1]

  # Adjust rows and cols based on the ul of this submatrix.
  for i in xrange(len(rows)):
    rows[i] -= ul[0]
    cols[i] -= ul[1]

  new_ex = extent.create(ul, [lr[0] + 1, lr[1] + 1], array.shape)
  new_array = sp.coo_matrix((data, (rows, cols)), new_ex.shape)
  return new_ex, sparse.convert_sparse_array(new_array)
开发者ID:EasonLiao,项目名称:spartan,代码行数:55,代码来源:write_array.py


示例19: _transpose_mapper

def _transpose_mapper(array, ex, orig_array):
  '''
  Transpose ``orig_array`` into ``array``.
  
  Args:
    array(DistArray): destination array.
    ex(Extent): region being processed.
    orig_array(DistArray): array to be transposed.
  '''
  orig_ex = extent.create(ex.ul[::-1], ex.lr[::-1], orig_array.shape)
  yield ex, orig_array.fetch(orig_ex).transpose()
开发者ID:EasonLiao,项目名称:spartan,代码行数:11,代码来源:als.py


示例20: test_intersection

def test_intersection():
  a = extent.create((0, 0), (10, 10), None)
  b = extent.create((5, 5), (6, 6), None)
  
  Assert.eq(extent.intersection(a, b),
            extent.create((5,5), (6,6), None))
  Assert.eq(extent.intersection(b, a),
            extent.create((5,5), (6,6), None))
  
  a = extent.create((5, 5), (10, 10), None)
  b = extent.create((4, 6), (6, 8), None)
  Assert.eq(extent.intersection(a, b),
            extent.create((5,6), (6, 8), None))

  a = extent.create((5, 5), (5, 5), None)
  b = extent.create((1, 1), (2, 2), None)
  assert extent.intersection(a, b) == None
开发者ID:EasonLiao,项目名称:spartan,代码行数:17,代码来源:test_extent.py



注:本文中的spartan.array.extent.create函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python expr.arange函数代码示例发布时间:2022-05-27
下一篇:
Python base.render函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap