• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python networkx.pagerank_scipy函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中networkx.pagerank_scipy函数的典型用法代码示例。如果您正苦于以下问题:Python pagerank_scipy函数的具体用法?Python pagerank_scipy怎么用?Python pagerank_scipy使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了pagerank_scipy函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_scipy_pagerank

 def test_scipy_pagerank(self):
     G = self.G
     p = networkx.pagerank_scipy(G, alpha=0.9, tol=1.e-08)
     for n in G:
         assert_almost_equal(p[n], G.pagerank[n], places=4)
     personalize = dict((n, random.random()) for n in G)
     p = networkx.pagerank_scipy(G, alpha=0.9, tol=1.e-08,
                                 personalization=personalize)
开发者ID:jklaise,项目名称:networkx,代码行数:8,代码来源:test_pagerank.py


示例2: test_scipy_pagerank

    def test_scipy_pagerank(self):
        G = self.G
        try:
            import scipy
        except ImportError:
            raise SkipTest("scipy not available.")
        p = networkx.pagerank_scipy(G, alpha=0.9, tol=1.0e-08)
        for n in G:
            assert_almost_equal(p[n], G.pagerank[n], places=4)
        personalize = dict((n, random.random()) for n in G)
        p = networkx.pagerank_scipy(G, alpha=0.9, tol=1.0e-08, personalization=personalize)

        assert_raises(networkx.NetworkXError, networkx.pagerank_scipy, G, max_iter=0)
开发者ID:steveblackmon,项目名称:networkx,代码行数:13,代码来源:test_pagerank.py


示例3: test_empty_scipy

 def test_empty_scipy(self):
     try:
         import scipy
     except ImportError:
         raise SkipTest("scipy not available.")
     G = networkx.Graph()
     assert_equal(networkx.pagerank_scipy(G), {})
开发者ID:ciarancourtney,项目名称:cloudify-trial,代码行数:7,代码来源:test_pagerank.py


示例4: random_walk_word_scoring

    def random_walk_word_scoring(self):
        """Compute a random walk ranking on the words using the power method.

        """
        G = nx.Graph()

        # loop through the sentences to build the graph
        for i, sentence in enumerate(self.sentences):
            nodes = set([])
            for words, offset in sentence.candidates:
                for w in words:
                    nodes.add(w)

            # add the missing nodes to the graph
            for node in nodes:
                if not node in G:
                    G.add_node(node)
            
            # add the edges to the graph
            for n1, n2 in combinations(nodes, 2):
                if not G.has_edge(n1, n2):
                    G.add_edge(n1, n2, weight=0)
                G[n1][n2]['weight'] += 1.0

        # return the random walk scores
        return self.normalize(nx.pagerank_scipy(G))
开发者ID:boudinfl,项目名称:kepy,代码行数:26,代码来源:word_based.py


示例5: ExtractSentence

def ExtractSentence(text,k):
    "根据文本内容获得句子重要性排名"
    print('开始句子重要性排名')

    sent_tokens = nlp.sent_tokenize(text)

    #可以加入限制条件,如果句子中的实体数少于阈值则放弃这个句子,等等,待扩展
    sent_tokens = filter_sent(sent_tokens,1)

    #建图结构
    text_graph = graph_construct(sent_tokens)

    #这里pagerank有三种,一种是正常的pg,一种是利用numpy还有一种就是下面的利用scipy的稀疏矩阵
    print('start to calculate')
    #cal_gr_page_rank = nx.pagerank(text_graph,weight='weight')
    cal_gr_page_rank = nx.pagerank_scipy(text_graph)
    print('ended')

    #按照最后的score得分进行排序,获得前K个,待扩展,使之取不超250个词的句子
    sents = sorted(cal_gr_page_rank,key = cal_gr_page_rank.get, reverse=True)

    kth = get_sum_sents(sents,250)
    #topK

    str_tmp_list = []
    for sidx in range(kth):
        str_tmp = sents[sidx]
        str_tmp += '[%.4f]'%(cal_gr_page_rank[sents[sidx]])
        str_tmp_list.append(str_tmp)
    print_score(str_tmp_list)

    return ' '.join(sents[:kth])
开发者ID:JunoShen,项目名称:insummer,代码行数:32,代码来源:text_rank.py


示例6: rooted_pagerank

def rooted_pagerank(G, root, alpha=0.85, beta=0, weight='weight'):
    """Return the rooted PageRank of all nodes with respect to node `root`

    Parameters
    ----------

    G : a networkx.(Di)Graph
        network to compute PR on

    root : a node from the network
        the node that will be the starting point of all random walks

    alpha : float
        PageRank probability that we will advance to a neighbour of the
        current node in a random walk

    beta : float or int
        Normally, we return to the root node with probability 1 - alpha.
        With this parameter, we can also advance to a random other node in the
        network with probability beta. Thus, we get back to the root node with
        probability 1 - alpha - beta. This is off (0) by default.

    weight : string or None
        The edge attribute that holds the numerical value used for
        the edge weight.  If None then treat as unweighted.

    """
    personalization = dict.fromkeys(G, beta)
    personalization[root] = 1 - beta

    return networkx.pagerank_scipy(G, alpha, personalization, weight=weight)
开发者ID:royshan,项目名称:linkpred,代码行数:31,代码来源:algorithms.py


示例7: compute_centrality

def compute_centrality(star_dict, edge_dict):
    
    #build up a nx graph
    galaxy = networkx.Graph()
    for v, vertex in star_dict.iteritems():
        galaxy.add_node(v)
    
    for v, neighbors in edge_dict.iteritems():
        for n in neighbors:
            galaxy.add_edge(v,n)
            
    print "betweenness"
    betweenness_map = networkx.current_flow_betweenness_centrality(galaxy)
    betweenness_map = normalize(betweenness_map)
    
    for key, value in betweenness_map.iteritems():
        star_dict[key]['betweenness'] = value
        
    print "closeness"
    closeness_map = networkx.current_flow_closeness_centrality(galaxy)
    closeness_map = normalize(closeness_map)
    
    for key, value in closeness_map.iteritems():
        star_dict[key]['closeness'] = value
        

    print "pagerank"
    pagerank_map = networkx.pagerank_scipy(galaxy)
    pagerank_map = normalize(pagerank_map)
    
    for key, value in pagerank_map.iteritems():
        star_dict[key]['pagerank'] = value
开发者ID:ejmahler,项目名称:galaxygen,代码行数:32,代码来源:centrality.py


示例8: text_summary

def text_summary(doc, sent_count):
    """
    Summarizes given text using word vectors and graph-based ranking.

    Args:
        doc: a spacy.Doc object
        sent_count: number (/ratio) of sentences in the summary
    Returns:
        Text summary
    """
    sents = list(doc.sents)
    sent_graph = networkx.Graph()
    sent_graph.add_nodes_from(idx for idx, sent in enumerate(sents))

    for i, j in it.combinations(sent_graph.nodes_iter(), 2):
        # Calculate cosine similarity of two sentences transformed to the interval [0,1]
        similarity = (sents[i].similarity(sents[j]) + 1) / 2
        if similarity != 0:
            sent_graph.add_edge(i, j, weight=similarity)

    sent_ranks = networkx.pagerank_scipy(sent_graph)

    if 0 < sent_count < 1:
        sent_count = round(sent_count * len(sent_ranks))
    sent_count = int(sent_count)

    top_indices = top_keys(sent_count, sent_ranks)

    # Return the key sentences in chronological order
    top_sents = map(lambda i: sents[i], sorted(top_indices))

    return format_output(doc, list(top_sents))
开发者ID:rkheikkila,项目名称:summarizer,代码行数:32,代码来源:summarizer.py


示例9: main

def main():
	print '- updating pagerank :'

	# DB-CONNECT
	conn = sqlite3.connect(db_path)
	c = conn.cursor()
	
	# DB-EXECUTE
	# get subgraph
	r = c.execute("SELECT blog_name, source_title FROM subgraph")
	graph = {key : value.split() for (key, value) in r}
	
	G = nx.DiGraph(graph)
	pr = nx.pagerank_scipy(G, alpha=0.85)
	
	# normalise
	ranks = pr.values()
	rank_min, rank_max = min(ranks), max(ranks)
	for k in pr: pr[k] = round(((pr[k] - rank_min) / (rank_max - rank_min)), 4)
	
	# update table
	for blog in pr:
		c.execute("UPDATE tumblr_model SET pagerank=? WHERE blog_name=?", [pr[blog], blog])
	
	# DB-COMMIT AND CLOSE
	conn.commit()
	conn.close()
	
	# sorting, optional
	pr_sorted = sorted(pr.items(), key=operator.itemgetter(1))
	print "    %s is the most popular domain in the network"%pr_sorted[-1:][0][0] 
	print '' 
开发者ID:botorge,项目名称:vscave,代码行数:32,代码来源:update_pagerank.py


示例10: candidate_weighting

    def candidate_weighting(self, window=10, pos=None, normalized=False):
        """ Candidate weight calculation using random walk.

            Args:
                window (int): the window within the sentence for connecting two
                    words in the graph, defaults to 10.
                pos (set): the set of valid pos for words to be considered as
                    nodes in the graph, defaults to (NN, NNS, NNP, NNPS, JJ,
                    JJR, JJS).
                normalized (False): normalize keyphrase score by their length,
                    defaults to False
        """

        # define default pos tags set
        if pos is None:
            pos = set(['NN', 'NNS', 'NNP', 'NNPS', 'JJ', 'JJR', 'JJS'])

        # build the word graph
        self.build_word_graph(window=window, pos=pos)

        # compute the word scores using random walk
        w = nx.pagerank_scipy(self.graph)

        # loop through the candidates
        for k in self.candidates.keys():
            tokens = self.candidates[k].lexical_form
            self.weights[k] = sum([w[t] for t in tokens])
            if normalized:
                self.weights[k] /= len(tokens)
开发者ID:CharleyPeng1,项目名称:pke,代码行数:29,代码来源:unsupervised.py


示例11: findBestChilds

    def findBestChilds(self,nodes,k = 4):
        n = len(nodes)
        node_list = dict()
        i = 0
        for node in nodes:
            node_list[i] = node
            i += 1
            
        self.stateGraph = np.zeros(shape=(n, n), dtype=np.byte)
        
        [self.buildSubGraph(i, n, node_list) for i in range(n)]

        try:
            self.logger.debug (len(self.stateGraph))
            h = (nx.pagerank_scipy(nx.Graph(self.stateGraph), max_iter=100, tol=1e-07))

            res = list(sorted(h, key=h.__getitem__, reverse=True))

            important = res[:k]          
        except:
            self.logger.error ('Graph is empty')
            self.logger.error (sys.exc_info())
        
        dereffed_list = set([self.sub(i, node_list) for i in important])
        if len(dereffed_list) > 1:
            dereffed_list.discard(0)
            dereffed_list.discard(1)
        return list(dereffed_list)
开发者ID:muratarslan,项目名称:eice,代码行数:28,代码来源:pathfinder_async.py


示例12: networkx_algo

def networkx_algo():
    import networkx as nx
    beta = GlobalPara.beta
    edges = LoadEdges()
    G = nx.DiGraph(edges)
    # print(G.edges())
    pagerank_dict = nx.pagerank_scipy(G, alpha=beta)
    print(pagerank_dict[99])
开发者ID:pipilove,项目名称:MachineLearning,代码行数:8,代码来源:PageRank.py


示例13: graph_stats

	def graph_stats(self, n):
		stats = {}
		stats['Top'] = self.top_nodes(n+1)
		stats['Pagerank'] = nx.pagerank_scipy(self.G)
		stats['Pagerank'] = sorted(stats['Pagerank'].iteritems(), key=itemgetter(1),reverse=True)[0:n+1]
		stats['Articulation Points'] = list(nx.articulation_points(self.G.to_undirected()))
		stats['Histogram'] = self.degree_histogram()[1:26]
		return stats
开发者ID:pmcgannon22,项目名称:wikigraph,代码行数:8,代码来源:graph_utils.py


示例14: lexrank

def lexrank(sentences, continuous=False, sim_threshold=0.1, alpha=0.9):
    """
    compute centrality score of sentences.

    Args:
      sentences: [u'こんにちは.', u'私の名前は飯沼です.', ... ]
      continuous: if True, apply continuous LexRank. (see reference)
      sim_threshold: if continuous is False and smilarity is greater or
        equal to sim_threshold, link the sentences.
      alpha: the damping factor of PageRank

    Returns: tuple
      (
        {
          # sentence index -> score
          0: 0.003,
          1: 0.002,
          ...
        },
        similarity_matrix
      )
    
    Reference:
      Günes Erkan and Dragomir R. Radev.
      LexRank: graph-based lexical centrality as salience in text
      summarization. (section 3)
      http://www.cs.cmu.edu/afs/cs/project/jair/pub/volume22/erkan04a-html/erkan04a.html
    """
    graph = networkx.DiGraph()

    # sentence -> tf
    sent_tf_list = []
    for sent in sentences:
        words = tools.word_segmenter_ja(sent)
        tf = collections.Counter(words)
        sent_tf_list.append(tf)

    sent_vectorizer = DictVectorizer(sparse=True)
    sent_vecs = sent_vectorizer.fit_transform(sent_tf_list)

    # compute similarities between senteces
    sim_mat = 1 - pairwise_distances(sent_vecs, sent_vecs, metric="cosine")

    if continuous:
        linked_rows, linked_cols = numpy.where(sim_mat > 0)
    else:
        linked_rows, linked_cols = numpy.where(sim_mat >= sim_threshold)

    # create similarity graph
    graph.add_nodes_from(range(sent_vecs.shape[0]))
    for i, j in zip(linked_rows, linked_cols):
        if i == j:
            continue
        weight = sim_mat[i, j] if continuous else 1.0
        graph.add_edge(i, j, {"weight": weight})

    scores = networkx.pagerank_scipy(graph, alpha=alpha, max_iter=1000)
    return scores, sim_mat
开发者ID:nus,项目名称:summpy,代码行数:58,代码来源:lexrank.py


示例15: test_scipy_pagerank

 def test_scipy_pagerank(self):
     G=self.G
     try:
         p=networkx.pagerank_scipy(G,alpha=0.9,
                                                        tol=1.e-08)
         for (a,b) in zip(p,self.G.pagerank):
             assert_almost_equal(a,b)
     except ImportError:
         print "Skipping pagerank_scipy test"
开发者ID:jbjorne,项目名称:CVSTransferTest,代码行数:9,代码来源:test_pagerank.py


示例16: test_scipy_pagerank

 def test_scipy_pagerank(self):
     G=self.G
     try:
         import scipy
     except ImportError:
         raise SkipTest('scipy not available.')
     p=networkx.pagerank_scipy(G,alpha=0.9,tol=1.e-08)
     for (a,b) in zip(p,self.G.pagerank):
         assert_almost_equal(a,b)
开发者ID:JaneliaSciComp,项目名称:Neuroptikon,代码行数:9,代码来源:test_pagerank.py


示例17: test_scipy_pagerank

 def test_scipy_pagerank(self):
     G=self.G
     try:
         import scipy
     except ImportError:
         raise SkipTest('scipy not available.')
     p=networkx.pagerank_scipy(G,alpha=0.9,tol=1.e-08)
     for n in G:
         assert_almost_equal(p[n],G.pagerank[n],places=4)    
开发者ID:AhmedPho,项目名称:NetworkX_fork,代码行数:9,代码来源:test_pagerank.py


示例18: compute

    def compute(self, own_public_key):
        """
        Compute the reputation based on the data in the TrustChain database using the Temporal PageRank algorithm.
        """

        nodes = set()
        G = nx.DiGraph()

        for block in self.blocks:
            if block.link_sequence_number == UNKNOWN_SEQ or block.type != 'tx_done' \
                    or 'tx' not in block.transaction:
                continue  # Don't consider half interactions

            pubkey_requester = block.link_public_key
            pubkey_responder = block.public_key

            sequence_number_requester = block.link_sequence_number
            sequence_number_responder = block.sequence_number

            # In our market, we consider the amount of Bitcoin that have been transferred from A -> B.
            # For now, we assume that the value from B -> A is of equal worth.

            value_exchange = block.transaction["tx"]["transferred"]["first"]["amount"]

            G.add_edge((pubkey_requester, sequence_number_requester), (pubkey_requester, sequence_number_requester + 1),
                       contribution=value_exchange)
            G.add_edge((pubkey_requester, sequence_number_requester), (pubkey_responder, sequence_number_responder + 1),
                       contribution=value_exchange)

            G.add_edge((pubkey_responder, sequence_number_responder), (pubkey_responder, sequence_number_responder + 1),
                       contribution=value_exchange)
            G.add_edge((pubkey_responder, sequence_number_responder), (pubkey_requester, sequence_number_requester + 1),
                       contribution=value_exchange)

            nodes.add(pubkey_requester)
            nodes.add(pubkey_responder)

        personal_nodes = [node1 for node1 in G.nodes() if node1[0] == own_public_key]
        number_of_nodes = len(personal_nodes)
        if number_of_nodes == 0:
            return {}
        personalisation = {node_name: 1.0 / number_of_nodes if node_name in personal_nodes else 0
                           for node_name in G.nodes()}

        try:
            result = nx.pagerank_scipy(G, personalization=personalisation, weight='contribution')
        except nx.NetworkXException:
            self._logger.info("Empty Temporal PageRank, returning empty scores")
            return {}

        sums = {}

        for interaction in result.keys():
            sums[interaction[0]] = sums.get(interaction[0], 0) + result[interaction]

        return sums
开发者ID:Tribler,项目名称:tribler,代码行数:56,代码来源:temporal_pagerank_manager.py


示例19: test_empty

 def test_empty(self):
     try:
         import numpy
     except ImportError:
         raise SkipTest('numpy not available.')
     G=networkx.Graph()
     assert_equal(networkx.pagerank(G),{})
     assert_equal(networkx.pagerank_numpy(G),{})
     assert_equal(networkx.pagerank_scipy(G),{})
     assert_equal(networkx.google_matrix(G).shape,(0,0))
开发者ID:123jefferson,项目名称:MiniBloq-Sparki,代码行数:10,代码来源:test_pagerank.py


示例20: OrigPagerank

    def OrigPagerank(self):
        """ returns a 2d array containing the pagerank of the origin node for all edges

        probas = np.dot(
            np.array(nx.pagerank_scipy(self).values(), dtype=float).reshape(-1, 1),
            np.ones((1, self.number_of_nodes())))
        """
        try:
            return self.Orig(nx.pagerank_scipy(self))
        except:
            return self.Orig(np.ones(self.number_of_nodes(), dtype=float) / self.number_of_nodes())
开发者ID:FourquetDavid,项目名称:evo,代码行数:11,代码来源:Directed_UnweightedGWU.py



注:本文中的networkx.pagerank_scipy函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python networkx.parse_gml函数代码示例发布时间:2022-05-27
下一篇:
Python networkx.pagerank_numpy函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap