• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python networkx.eigenvector_centrality函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中networkx.eigenvector_centrality函数的典型用法代码示例。如果您正苦于以下问题:Python eigenvector_centrality函数的具体用法?Python eigenvector_centrality怎么用?Python eigenvector_centrality使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了eigenvector_centrality函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: eigenvector

def eigenvector(g, recalculate=False):
    """
    Performs robustness analysis based on eigenvector centrality,  
    on the network specified by infile using sequential (recalculate = True) 
    or simultaneous (recalculate = False) approach. Returns a list 
    with fraction of nodes removed, a list with the corresponding sizes of 
    the largest component of the network, and the overall vulnerability 
    of the network.
    """

    m = networkx.eigenvector_centrality(g, max_iter=5000)
    l = sorted(m.items(), key=operator.itemgetter(1), reverse=True)
    x = []
    y = []
    largest_component = max(networkx.connected_components(g), key=len)
    n = len(g.nodes())
    x.append(0)
    y.append(len(largest_component) * 1. / n)
    r = 0.0
    for i in range(1, n - 1):
        g.remove_node(l.pop(0)[0])
        if recalculate:

            try:
                m = networkx.eigenvector_centrality(g, max_iter=5000)
            except networkx.NetworkXError:
                break

            l = sorted(m.items(), key=operator.itemgetter(1),
                       reverse=True)
        largest_component = max(networkx.connected_components(g), key=len)
        x.append(i * 1. / n)
        r += len(largest_component) * 1. / n
        y.append(len(largest_component) * 1. / n)
    return x, y, r / n
开发者ID:computational-center,项目名称:complexNetworksMeasurements,代码行数:35,代码来源:robustness2.py


示例2: plot_co_x

def plot_co_x(cox, start, end, size = (20,20), title = '', weighted=False, weight_threshold=10):

        """ Plotting function for keyword graphs

        Parameters
        --------------------
        cox: the coword networkx graph; assumes that nodes have attribute 'topic'
        start: start year
        end: end year
        """

        plt.figure(figsize=size)
        plt.title(title +' %s - %s'%(start,end), fontsize=18)
        if weighted:
            elarge=[(u,v) for (u,v,d) in cox.edges(data=True) if d['weight'] >weight_threshold]
            esmall=[(u,v) for (u,v,d) in cox.edges(data=True) if d['weight'] <=weight_threshold]
            pos=nx.graphviz_layout(cox) # positions for all nodes
            nx.draw_networkx_nodes(cox,pos,
                node_color= [s*4500 for s in nx.eigenvector_centrality(cox).values()],
                node_size = [s*6+20  for s in nx.degree(cox).values()],
                alpha=0.7)
            # edges
            nx.draw_networkx_edges(cox,pos,edgelist=elarge,
                                width=1, alpha=0.5, edge_color='black') #, edge_cmap=plt.cm.Blues
            nx.draw_networkx_edges(cox,pos,edgelist=esmall,
                                width=0.3,alpha=0.5,edge_color='yellow',style='dotted')
            # labels
            nx.draw_networkx_labels(cox,pos,font_size=10,font_family='sans-serif')
            plt.axis('off')
        else:
            nx.draw_graphviz(cox, with_labels=True,
                         alpha = 0.8, width=0.1,
                         fontsize=9,
                         node_color = [s*4 for s in nx.eigenvector_centrality(cox).values()],
                         node_size = [s*6+20 for s in nx.degree(cox).values()])
开发者ID:datapractice,项目名称:machinelearning,代码行数:35,代码来源:net_lit_anal.py


示例3: buildGraphFromTwitterFollowing

	def buildGraphFromTwitterFollowing(self):
		while True:
			twitter_id=self.userq.get()
		        #print "======================================"
			twitter_id_dict=json.loads(twitter_id.AsJsonString())
			#print twitter_id_dict["name"]
		        #print i.AsJsonString()
		        #pprint.pprint(i.GetCreatedAt())
		        #pprint.pprint(i.GetGeo())
		        #pprint.pprint(i.GetLocation())
		        #pprint.pprint(i.GetText())
			for f in self.api.GetFollowers(twitter_id):
				try:
					follower_id_dict=json.loads(f.AsJsonString())
					#print follower_id_dict["name"]
					self.tng.add_edge(twitter_id_dict["name"],follower_id_dict["name"])
					self.userq.put(f)	
					self.no_of_vertices+=1
				except:
					pass
			if self.no_of_vertices > 50:
				break
			print "======================================"
		nx.shell_layout(self.tng)
		nx.draw_networkx(self.tng)
		print "==========================================================================================="
		print "Bonacich Power Centrality of the Social Network (Twitter) Crawled - computed using PageRank"
		print "(a degree centrality based on social prestige)"
		print "==========================================================================================="
		print sorted(nx.pagerank(self.tng).items(),key=operator.itemgetter(1),reverse=True)
		print "==========================================================================================="
		print "Eigen Vector Centrality"
		print "==========================================================================================="
		print nx.eigenvector_centrality(self.tng)
		plt.show()
开发者ID:shrinivaasanka,项目名称:asfer-github-code,代码行数:35,代码来源:SocialNetworkAnalysis_Twitter.py


示例4: eigenvector_component

def eigenvector_component(seed_num, graph_json_filename=None, graph_json_str=None):
  if graph_json_filename is None and graph_json_str is None:
    return []

  G = None
  if graph_json_str is None:
    G = util.load_graph(graph_json_filename=graph_json_filename)
  else:
    G = util.load_graph(graph_json_str=graph_json_str)

  components = list(nx.connected_components(G))
  components = filter(lambda x: len(x) > 0.1 * len(G), components)
  total_size = sum(map(lambda x: len(x), components))
  total_nodes = 0
  rtn = []
  for comp in components[1:]:
    num_nodes = int(float(len(comp)) / total_size * seed_num)
    component = G.subgraph(list(comp))
    clse_cent = nx.eigenvector_centrality(component)
    collector = collections.Counter(clse_cent)
    clse_cent = collector.most_common(num_nodes)
    rtn += map(lambda (x, y): x, clse_cent)
    total_nodes += num_nodes

  num_nodes = seed_num - total_nodes
  component = G.subgraph(list(components[0]))
  clse_cent = nx.eigenvector_centrality(component)
  collector = collections.Counter(clse_cent)
  clse_cent = collector.most_common(num_nodes)
  rtn += map(lambda (x, y): x, clse_cent)
  return rtn
开发者ID:shimmy1996,项目名称:Pandemaniac,代码行数:31,代码来源:eigenvector_component.py


示例5: sna_calculations

def sna_calculations(g, play_file):
    """
    :param g: a NetworkX graph object
    :type g: object
    :param play_file: the location of a play in .txt format
    :type play_file: string
    :return: returns a dictionary containing various network related figures
    :rtype: dict
    :note: also writes into results/file_name-snaCalculations.csv and results/allCharacters.csv
    """
    file_name = os.path.splitext(os.path.basename(play_file))[0]
    sna_calculations_list = dict()
    sna_calculations_list['playType'] = file_name[0]
    sna_calculations_list['avDegreeCentrality'] = numpy.mean(numpy.fromiter(iter(nx.degree_centrality(g).values()),
                                                                            dtype=float))
    sna_calculations_list['avDegreeCentralityStd'] = numpy.std(
        numpy.fromiter(iter(nx.degree_centrality(g).values()), dtype=float))
    sna_calculations_list['avInDegreeCentrality'] = numpy.mean(
        numpy.fromiter(iter(nx.in_degree_centrality(g).values()), dtype=float))
    sna_calculations_list['avOutDegreeCentrality'] = numpy.mean(
        numpy.fromiter(iter(nx.out_degree_centrality(g).values()), dtype=float))

    try:
        sna_calculations_list['avShortestPathLength'] = nx.average_shortest_path_length(g)
    except:
        sna_calculations_list['avShortestPathLength'] = 'not connected'

    sna_calculations_list['density'] = nx.density(g)
    sna_calculations_list['avEigenvectorCentrality'] = numpy.mean(
        numpy.fromiter(iter(nx.eigenvector_centrality(g).values()), dtype=float))
    sna_calculations_list['avBetweennessCentrality'] = numpy.mean(
        numpy.fromiter(iter(nx.betweenness_centrality(g).values()), dtype=float))
    sna_calculations_list['DegreeCentrality'] = nx.degree_centrality(g)
    sna_calculations_list['EigenvectorCentrality'] = nx.eigenvector_centrality(g)
    sna_calculations_list['BetweennessCentrality'] = nx.betweenness_centrality(g)

    # sna_calculations.txt file
    sna_calc_file = csv.writer(open('results/' + file_name + '-snaCalculations.csv', 'wb'), quoting=csv.QUOTE_ALL,
                               delimiter=';')
    for key, value in sna_calculations_list.items():
        sna_calc_file.writerow([key, value])

    # all_characters.csv file
    if not os.path.isfile('results/allCharacters.csv'):
        with open('results/allCharacters.csv', 'w') as f:
            f.write(
                'Name;PlayType;play_file;DegreeCentrality;EigenvectorCentrality;BetweennessCentrality;speech_amount;AverageUtteranceLength\n')

    all_characters = open('results/allCharacters.csv', 'a')
    character_speech_amount = speech_amount(play_file)
    for character in sna_calculations_list['DegreeCentrality']:
        all_characters.write(character + ';' + str(sna_calculations_list['playType']) + ';' + file_name + ';' + str(
            sna_calculations_list['DegreeCentrality'][character]) + ';' + str(
            sna_calculations_list['EigenvectorCentrality'][character]) + ';' + str(
            sna_calculations_list['BetweennessCentrality'][character]) + ';' + str(
            character_speech_amount[0][character]) + ';' + str(character_speech_amount[1][character]) + '\n')
    all_characters.close()

    return sna_calculations
开发者ID:IngoKl,项目名称:shakespearesna1406,代码行数:59,代码来源:ShakespeareSnaAnalysis.py


示例6: eigenvector_centrality

	def eigenvector_centrality(self, iterations, withme=False, node=None, average=False):
		my_dict = nx.eigenvector_centrality(self.mynet,
			max_iter = iterations)

		if node==None:
			if withme:
				my_dict =nx.eigenvector_centrality(self.mynet,
					max_iter = iterations)
				new = {}
				new2={}
				for i in my_dict:
					new[self.id_to_name(i)] = my_dict[i]
					new2[i] = my_dict[i]
				if average:
					print "The average is " + str(round(sum(new.values())/float(len(new.values())),4))
				else:
					for i,j in new.items():
						print i, round(j,4)
					return new2
			else:

				my_dict = nx.eigenvector_centrality(self.no_ego_net,
					max_iter = iterations)

				new = {}
				new2={}
				for i in my_dict:
					new[self.id_to_name(i)] = my_dict[i]
					new2[i] = my_dict[i]
				if average:
					print "The average is " + str(round(sum(new.values())/float(len(new.values())),4))
				else:
					for i,j in new.items():
						print i, round(j,4)
					return new2


		else:
			if withme:
				my_dict = nx.eigenvector_centrality(self.mynet,max_iter = iterations)
				try:
					print "The coefficient for node "+str(node)+ "is "+ str(round(my_dict[node],4))
				except:
					try:
						return my_dict[self.name_to_id(node)]
					except:
						print "Invalid node name"
			else:
				my_dict = nx.eigenvector_centrality(self.no_ego_net,max_iter = iterations)
				try:
					print "The coefficient for node "+str(node)+ "is "+ str(round(my_dict[node],4))
				except:
					try:
						print "The coefficient for node "+str(node)+ "is "+ str(round(my_dict[[self.name_to_id(node)]],4))
					except:
						print "Invalid node name"
开发者ID:atwel,项目名称:BigData2015,代码行数:56,代码来源:networks_lab.py


示例7: centrality_measures

    def centrality_measures(self):

        centrality_measures = []
        txt = ''
        
        # betweenness
        # unweighted
        self.unweighted_betweenness_distribution	= nx.betweenness_centrality(self.G)
        statistics		= self.Stats.get_distribution_info(self.unweighted_betweenness_distribution)
        centrality_measures.extend(statistics[:5])
        centrality_measures.extend(statistics[5])
        txt += ',average betweenness centrality (unweighted)' + self.standard_text_distribution

        # # weighted
        self.weighted_betweenness_distribution		= nx.betweenness_centrality(self.G, weight = self.weight_id)
        # statistics		= self.Stats.get_distribution_info(self.weighted_betweenness_distribution)
        # centrality_measures.extend(statistics[:5])
        # centrality_measures.extend(statistics[5])
        # txt += ',average betweenness centrality (weighted)' + self.standard_text_distribution
        
        # closeness
        # unweighted
        self.unweighted_closeness_distribution	= nx.closeness_centrality(self.G)
        statistics		= self.Stats.get_distribution_info(self.unweighted_closeness_distribution)
        centrality_measures.extend(statistics[:5])
        centrality_measures.extend(statistics[5])
        txt += ',average closeness centrality (unweighted)' + self.standard_text_distribution        
        
        # eigen vector
		# right
        try:
            self.right_eigenvector_distribution	= nx.eigenvector_centrality(self.G)
            statistics	= self.Stats.get_distribution_info(self.right_eigenvector_distribution)
            centrality_measures.extend(statistics[:5])
            centrality_measures.extend(statistics[5])
        except:
            centrality_measures.extend([0,0,0,0,0])
            centrality_measures.extend([0]*len(statistics[5])) 
        txt += ',average right eigenvector' + self.standard_text_distribution
		
		# left
        try:
            G_rev 								= self.G.reverse()
            self.lef_eigenvector_distribution	= nx.eigenvector_centrality(G_rev)
            statistics							= self.Stats.get_distribution_info(self.lef_eigenvector_distribution)
            centrality_measures.extend(statistics[:5])
            centrality_measures.extend(statistics[5])
        except:
            centrality_measures.extend([0,0,0,0,0])
            centrality_measures.extend([0]*len(statistics[5])) 
        txt += ',average left eigenvector' + self.standard_text_distribution

        return [centrality_measures, txt]
开发者ID:andresportocarrero,项目名称:NetGen,代码行数:53,代码来源:network_handler.py


示例8: eigenvector_apl

def eigenvector_apl(g, recalculate=False):
    """
    Performs robustness analysis based on eigenvector centrality,
    on the network specified by infile using sequential (recalculate = True)
    or simultaneous (recalculate = False) approach. Returns a list
    with fraction of nodes removed, a list with the corresponding sizes of
    the largest component of the network, and the overall vulnerability
    of the network.
    """

    m = networkx.eigenvector_centrality(g)
    l = sorted(m.items(), key=operator.itemgetter(1), reverse=True)
    x = []
    y = []

    average_path_length = 0.0
    number_of_components = 0
    n = len(g.nodes())

    for sg in networkx.connected_component_subgraphs(g):
        average_path_length += networkx.average_shortest_path_length(sg)
    number_of_components += 1

    average_path_length /= number_of_components
    initial_apl = average_path_length

    r = 0.0
    for i in range(1, n - 1):
        g.remove_node(l.pop(0)[0])
        if recalculate:

            try:
                m = networkx.eigenvector_centrality(g, max_iter=5000)
            except networkx.NetworkXError:
                break

            l = sorted(m.items(), key=operator.itemgetter(1),
                       reverse=True)
        average_path_length = 0.0
        number_of_components = 0

        for sg in networkx.connected_component_subgraphs(g):
            if len(sg.nodes()) > 1:
                average_path_length += networkx.average_shortest_path_length(sg)
            number_of_components += 1

        average_path_length = average_path_length / number_of_components

        x.append(i * 1. / initial_apl)
        r += average_path_length * 1. / initial_apl
        y.append(average_path_length * 1. / initial_apl)
    return x, y, r / initial_apl
开发者ID:computational-center,项目名称:complexNetworksMeasurements,代码行数:52,代码来源:robustness2.py


示例9: eigValue

def eigValue(charList, graphFile, bookNetworksPath):
    # Compute eigenvectors for all characters in the current chapter graph.
    g = nx.read_gexf(graphFile)
    eigCentrality = nx.eigenvector_centrality(g, max_iter=100, tol=1.0e-6, nstart=None, weight="Weight")
    eigValues = eigCentrality.values()

    # NORMALISE eigenvector values
    d = decimal.Decimal
    maxEig = max(eigValues)
    minEig = min(eigValues)
    maxMinusMin = d(maxEig) - d(minEig)

    if not charList:
        # Get top 10 overall characters from overall.gexf graph
        overallGraphFile = bookNetworksPath + "overall.gexf"
        overall_g = nx.read_gexf(overallGraphFile)
        overallEigCent = nx.eigenvector_centrality(overall_g, max_iter=100, tol=1.0e-6, nstart=None, weight="Weight")

        # sortedCentrality = dict(sorted(overallEigCent.iteritems(), key=itemgetter(1), reverse=True)[:10])
        sortedCentrality = dict(sorted(overallEigCent.iteritems(), key=itemgetter(1), reverse=True))
        sortedCentrality = sorted(sortedCentrality.iteritems(), key=itemgetter(1), reverse=True)

        charList = [seq[0] for seq in sortedCentrality]
        return charList

    else:
        charList = [item for item in charList]

        for index, item in enumerate(charList):
            currentChar = None
            for key, value in eigCentrality.iteritems():
                if key == item:
                    # Unnormalised version...
                    charList[index] = (key, str(value))
                    currentChar = key
                # if key == item:
                #     nummerator = d(value)-d(minEig)
                #     if nummerator==0:
                #         charList[index] = (key, str(0))
                #     else:
                #         norm_value = (d(value)-d(minEig))/d(maxMinusMin)
                #         charList[index] = (key, str(norm_value))
                #     currentChar = key
            # If current character is not present in the current chapter assign 0 influence.
            if not currentChar:
                charList[index] = (item, 0)

        return charList
开发者ID:pivots,项目名称:networkx-sna-fiction,代码行数:48,代码来源:snaData.py


示例10: centralities

 def centralities(self):
     '''
     Get info on centralities of data
     Params:
         None
     Returns:
         dictionary of centrality metrics with keys(centralities supported):
             degree - degree centrality
             betweeness - betweeness centrality
             eigenvector - eigenvector centrality
             hub - hub scores - not implemented
             authority - authority scores - not implemented
             katz - katz centrality with params X Y
             pagerank - pagerank centrality with params X Y
     '''
     output = {}
     output['degree'] = nx.degree_centrality(self.G)
     output['betweeness'] = nx.betweenness_centrality(self.G)
     try:
         output['eigenvector'] = nx.eigenvector_centrality(self.G)
         output['katz'] = nx.katz_centrality(self.G)
     except:
         output['eigenvector'] = 'empty or exception'
         output['katz'] = 'empty or exception'
     # output['hub'] = 'Not implemented'
     # output['authority'] = 'Not implemented'
     # output['pagerank'] = 'Not implemented'
     return output
开发者ID:harrisonhunter,项目名称:groupcest,代码行数:28,代码来源:data_object.py


示例11: describe

def describe(G, ny_tri, chems):
	global describeNetwork
	'''
	Describe the network: degrees, clustering, and centrality measures
	'''	
	# Degree
	# The number of connections a node has to other nodes.
	degrees= nx.degree(G)
	degrees_df = pd.DataFrame(degrees.items(), columns=['Facility', 'Degrees'])
	values = sorted(set(degrees.values())) 
	hist = [degrees.values().count(x) for x in values]
	plt.figure()
	plt.plot(values, hist,'ro-') # degree
	plt.xlabel('Degree')
	plt.ylabel('Number of nodes')
	plt.title('Degree Distribution')
	plt.savefig('output/degree_distribution.png')

	# Clustering coefficients
	# The bipartie clustering coefficient is a measure of local density of connections.
	clust_coefficients = nx.clustering(G)
	clust_coefficients_df = pd.DataFrame(clust_coefficients.items(), columns=['Facility', 'Clustering Coefficient'])
	clust_coefficients_df = clust_coefficients_df.sort('Clustering Coefficient', ascending=False)
	#print clust_coefficients_df

	# Node centrality measures
	FCG=list(nx.connected_component_subgraphs(G, copy=True))[0]
	# Current flow betweenness centrality
	# Current-flow betweenness centrality uses an electrical current model for information spreading 
	# in contrast to betweenness centrality which uses shortest paths.
	betweeness = nx.current_flow_betweenness_centrality(FCG)
	betweeness_df = pd.DataFrame(betweeness.items(), columns=['Facility', 'Betweeness'])
	betweeness_df = betweeness_df.sort('Betweeness', ascending=False)
	# Closeness centrality
	# The closeness of a node is the distance to all other nodes in the graph 
	# or in the case that the graph is not connected to all other nodes in the connected component containing that node.
	closeness = nx.closeness_centrality(FCG)
	closeness_df = pd.DataFrame(closeness.items(), columns=['Facility', 'Closeness'])
	closeness_df = closeness_df.sort('Closeness', ascending=False)
	# Eigenvector centrality
	# Eigenvector centrality computes the centrality for a node based on the centrality of its neighbors.
	# In other words, how connected a node is to other highly connected nodes.
	eigenvector = nx.eigenvector_centrality(FCG)
	eigenvector_df = pd.DataFrame(eigenvector.items(), columns=['Facility', 'Eigenvector'])
	eigenvector_df = eigenvector_df.sort('Eigenvector', ascending=False)

	# Create dataframe of facility info
	fac_info = ny_tri[['tri_facility_id','facility_name', 'primary_naics', 'parent_company_name']].drop_duplicates()
	fac_info.rename(columns={'facility_name':'Facility'}, inplace=True)

	# Merge everything
	describeNetwork = degrees_df.merge(
		clust_coefficients_df,on='Facility').merge(
		betweeness_df,on='Facility').merge(
		closeness_df, on='Facility').merge(
		eigenvector_df, on='Facility').merge(
		fac_info, on='Facility', how='left').merge(
		chems, on='Facility', how='left')
	describeNetwork = describeNetwork.sort('Degrees', ascending=False)
	describeNetwork.to_csv('output/describeNetwork.csv')
开发者ID:stevecarrea,项目名称:ny_tri_networkAnalysis,代码行数:60,代码来源:buildNetwork.py


示例12: attack_based_max_eigenvector

def attack_based_max_eigenvector(G):
    """ Recalculate eigenvector centrality attack
    """
    n = G.number_of_nodes()
    tot_ND = [0] * (n+1)
    tot_T = [0] * (n+1)

    ND, ND_lambda = ECT.get_number_of_driver_nodes(G)
    tot_ND[0] = ND
    tot_T[0] = 0

    for i in range(1, n+1):
        # calculate all nodes' eigenvector centrality
        allEigenvectorCentrality = nx.eigenvector_centrality(G, max_iter=1000, weight=None)
        # get node with max eigenvector centrality       
        node = max(allEigenvectorCentrality, key=allEigenvectorCentrality.get)
        # remove all the edges adjacent to node
        if not nx.is_directed(G):   # undirected graph
            for key in G[node].keys():
                G.remove_edge(node, key)
        else:   # directed graph
            for x in [v for u, v in G.out_edges_iter(node)]:
                G.remove_edge(node, x)
            for x in [u for u, v in G.in_edges_iter(node)]:
                G.remove_edge(x, node)
        ND, ND_lambda = ECT.get_number_of_driver_nodes(G)
        tot_ND[i] = ND
        tot_T[i]  = i
    return (tot_ND, tot_T)
开发者ID:python27,项目名称:NetworkControllability,代码行数:29,代码来源:AttackBasedOnNode.py


示例13: _graph_centrality_measures

    def _graph_centrality_measures(self, df_totals):
        '''
        INPUT: DataFrame
        OUTPUT: dict, dict, dict

        For every participant, calculates degree centrality, Eigenvector centrality, and
        weighted Eigenvector centrality (the last being weighted by the df's 'cnt' column).
        '''
        df = df_totals.copy()
        df = df[df['participantID'] > df['participantID.B']]
        G = from_pandas_dataframe(df, 'participantID', 'participantID.B', 'cnt')
        degree_centrality = nx.degree_centrality(G)
        eigen_centrality = nx.eigenvector_centrality(G)
        eigen_centrality_weighted = nx.eigenvector_centrality(G, weight='cnt')

        return degree_centrality, eigen_centrality, eigen_centrality_weighted
开发者ID:seanmandell,项目名称:mood-predictor-project,代码行数:16,代码来源:feature_engineer.py


示例14: test_K5

    def test_K5(self):
        """Eigenvector centrality: K5"""
        G = nx.complete_graph(5)
        b = nx.eigenvector_centrality(G)
        v = math.sqrt(1 / 5.0)
        b_answer = dict.fromkeys(G, v)
        for n in sorted(G):
            assert_almost_equal(b[n], b_answer[n])
        nstart = dict([(n, 1) for n in G])
        b = nx.eigenvector_centrality(G, nstart=nstart)
        for n in sorted(G):
            assert_almost_equal(b[n], b_answer[n])

        b = nx.eigenvector_centrality_numpy(G)
        for n in sorted(G):
            assert_almost_equal(b[n], b_answer[n], places=3)
开发者ID:ProgVal,项目名称:networkx,代码行数:16,代码来源:test_eigenvector_centrality.py


示例15: node_eigenvector_centrality

def node_eigenvector_centrality(X):
    """
    based on networkx function: eigenvector_centrality
    """
    XX = np.zeros((X.shape[0], np.sqrt(X.shape[1])))
    for i, value in enumerate(X):
        adj_mat = value.reshape((np.sqrt(len(value)),-1))
        adj_mat = (adj_mat - np.min(adj_mat)) / (np.max(adj_mat) - np.min(adj_mat))
        adj_mat = 1 - adj_mat

#        th = np.mean(adj_mat) - 0.2
#        adj_mat = np.where(adj_mat < th, adj_mat, 0.)

        percent, th, adj_mat, triu = percentage_removed(adj_mat, 0.78)
        print("percent = {0}, threshold position = {1}, threshold = {2}\n".format(percent, th, triu[th]))

        g = nx.from_numpy_matrix(adj_mat)
        print "Graph Nodes = {0}, Graph Edges = {1} ".format(g.number_of_nodes(), g.number_of_edges())
        print "\nEdge kept ratio, {0}".format(float(g.number_of_edges())/((g.number_of_nodes()*(g.number_of_nodes()-1))/2))

        deg_cent = nx.eigenvector_centrality(g, max_iter=10000)
        node_cent = np.zeros(g.number_of_nodes())

        for k in deg_cent:
            node_cent[k] = deg_cent[k]
        XX[i] = node_cent
        print "graph {0} => mean {1}, min {2}, max {3}".format(i, np.mean(XX[i]), np.min(XX[i]), np.max(XX[i]))
#    XX = XX*100
    ss = StandardScaler()
    XX = ss.fit_transform(XX.T).T

    return XX
开发者ID:kirk86,项目名称:Task-1,代码行数:32,代码来源:code.py


示例16: most_central

 def most_central(self,F=1,cent_type='betweenness'):
     if cent_type == 'betweenness':
         ranking = nx.betweenness_centrality(self.G).items()
     elif cent_type == 'closeness':
         ranking = nx.closeness_centrality(self.G).items()
     elif cent_type == 'eigenvector':
         ranking = nx.eigenvector_centrality(self.G).items()
     elif cent_type == 'harmonic':
         ranking = nx.harmonic_centrality(self.G).items()
     elif cent_type == 'katz':
         ranking = nx.katz_centrality(self.G).items()
     elif cent_type == 'load':
         ranking = nx.load_centrality(self.G).items()
     elif cent_type == 'degree':
         ranking = nx.degree_centrality(self.G).items()
     ranks = [r for n,r in ranking]
     cent_dict = dict([(self.lab[n],r) for n,r in ranking])
     m_centrality = sum(ranks)
     if len(ranks) > 0:
         m_centrality = m_centrality/len(ranks)
     #Create a graph with the nodes above the cutoff centrality- remove the low centrality nodes
     thresh = F*m_centrality
     lab = {}
     for k in self.lab:
         lab[k] = self.lab[k]
     g = Graph(self.adj.copy(),self.char_list)
     for n,r in ranking:
         if r < thresh:
             g.G.remove_node(n)
             del g.lab[n]
     return (cent_dict,thresh,g)
开发者ID:PCJohn,项目名称:Script-Analyzer,代码行数:31,代码来源:graph.py


示例17: eigenvectorcentralitynx

def eigenvectorcentralitynx(mutualinformation,startingvector):
    #Identical to eigenvectorcentralitynx0, but requires an additional argument startingvector.
    #starting vector provides an initial guess for the eigen vector centrality of all nodes.
    #startingvector must be a python dictionary. key = node, value = eigenvector centrality estimate.
    G=nx.Graph(mutualinformation)
    eigvcent=nx.eigenvector_centrality(G, weight='weight',max_iter=2000,nstart=startingvector)
    return eigvcent
开发者ID:lhillber,项目名称:qgl_exact,代码行数:7,代码来源:networkmeasures.py


示例18: main

def main():
    # n = get_node_list('Output.txt')
    # save_mapper_file(n, 'Mapper.txt')
    # anonymize_names("Output.txt", 'AnOutput.txt')
    an = get_node_list('AnOutput.txt')

    G = create_graph('AnOutput.txt', True)
    #in_deg_res, out_deg_res = get_degree_counts(G, an)
    # print(in_deg_res, out_deg_res)
    G1 = create_graph_for_snap(an, 'AnOutput.txt')

    # snap_traids = snap.GetTriads(G1)
    # triads = nx.transitivity(G)

    pagerank = nx.pagerank(G)
    max_pagerank = key_with_max_val(pagerank)
    import operator
    a = sorted(pagerank.items(), key=operator.itemgetter(1), reverse=True)
    print max_pagerank

    centrality = nx.in_degree_centrality(G)
    a = sorted(centrality.items(), key=operator.itemgetter(1), reverse=True)

    eigen_vector_centrality = nx.eigenvector_centrality(G)
    a = sorted(eigen_vector_centrality.items(), key=operator.itemgetter(1), reverse=True)

    # snap_dia = snap.GetBfsFullDiam(G1, 10)
    # dia = nx.diameter(G)
    avg_local_clustering_coeff = nx.average_clustering(G)
    print avg_local_clustering_coeff
    #global_clustering_coeff = snap.GetClustCf(G1, -1)
    #print global_clustering_coeff
    #plot_data = diameter_phase_transition()
    pass
开发者ID:ttchakra,项目名称:YelpCrawler_GraphModels,代码行数:34,代码来源:part3.py


示例19: print_most_often_optimal

 def print_most_often_optimal(self, bests):
   ''' Print those cities which are most often in optimal layouts. '''
   print("The centrality measure previously discussed is a good judge of " +
     "how good building a research center in a city is, but let's check our " +
     "work by counting how many times each city appears in the optimal " +
     "placements.")
   occurences = [item for sublist in bests for subsublist in
     sublist for item in subsublist]
   cities = list(self)
   for city in sorted(cities, key=occurences.count):
     if occurences.count(city):
       print(city, occurences.count(city))
   lone = []
   ltwo = []
   centrality = nx.eigenvector_centrality(self, max_iter=1000)
   for city in cities:
     lone.append(occurences.count(city))
     ltwo.append(centrality[city])
   (corr, pvalue) = stats.pearsonr(lone, ltwo)
   print ("The correlation is " + str((corr, pvalue)))
   lone = []
   ltwo = []
   for city in cities:
     if city != "Atlanta":
       lone.append(occurences.count(city))
       ltwo.append(centrality[city])
   (corr, pvalue) = stats.pearsonr(lone, ltwo)
   print ("The correlation without Atlanta is " + str((corr, pvalue)))
开发者ID:gracegallis,项目名称:Pandemonium,代码行数:28,代码来源:pandemic.py


示例20: all_users_popular_nodes

    def all_users_popular_nodes(self):

        Gall = self._graph_from_cursor('graph3')

        slots = []
        for i in range(1,7):
            G = self._graph_from_cursor('all_posts_s%d' % i)
            slots.append(G)

        degree = nx.degree_centrality(G).items()
        eigen = nx.eigenvector_centrality(G).items()
        betweeness = nx.betweenness_centrality(G, k=20).items()

        topDegree = sorted(degree, key=lambda (n,x): x, reverse=True)[:10]
        topEigen = sorted(eigen, key=lambda (n,x): x, reverse=True)[:10]
        topBetweeness = sorted(betweeness, key=lambda (n,x): x, reverse=True)[:10]

        topDegreeIds = map(lambda (n,x): n, topDegree)
        topEigenIds = map(lambda (n,x): n, topEigen)
        topBetweenessIds = map(lambda (n,x): n, topBetweeness)

        inter = list(set(topDegreeIds).intersection(topEigenIds).intersection(topBetweenessIds))
        union = list(set(topDegreeIds).union(topEigenIds).union(topBetweenessIds))

        out = StringIO.StringIO()
        writer = csv.writer(out, delimiter='|', quoting=csv.QUOTE_NONE)
开发者ID:dgawlik,项目名称:ed,代码行数:26,代码来源:sna.py



注:本文中的networkx.eigenvector_centrality函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python networkx.eigenvector_centrality_numpy函数代码示例发布时间:2022-05-27
下一篇:
Python networkx.ego_graph函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap