• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python networkx.average_clustering函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中networkx.average_clustering函数的典型用法代码示例。如果您正苦于以下问题:Python average_clustering函数的具体用法?Python average_clustering怎么用?Python average_clustering使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了average_clustering函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: main

def main():
    tempo_dir = "../corpus-local/tempo-txt"
    file_regex = ".*\.txt"

    G = build_graph(tempo_dir, file_regex)
    """
  ccs = nx.clustering(G)
  avg_clust = sum(ccs.values()) / len(ccs)
  """
    print tempo_dir
    print "\tAda " + str(len(G.nodes())) + " node."
    print "\tAda " + str(len(G.edges())) + " edge."
    print "\tClustering coefficient      : " + str(nx.average_clustering(G))
    print "\tAverage shortest path length"
    for g in nx.connected_component_subgraphs(G):
        print "\t\t" + str(nx.average_shortest_path_length(g))

    kompas_dir = "../corpus-local/kompas-txt"
    G = build_graph(kompas_dir, file_regex)
    print kompas_dir
    print "\tAda " + str(len(G.nodes())) + " node."
    print "\tAda " + str(len(G.edges())) + " edge."
    print "\tClustering coefficient      : " + str(nx.average_clustering(G))
    print "\tAverage shortest path length"
    for g in nx.connected_component_subgraphs(G):
        print "\t\t" + str(nx.average_shortest_path_length(g))
开发者ID:barliant,项目名称:krextown,代码行数:26,代码来源:graftempo.py


示例2: check_and_merge_clusters

def check_and_merge_clusters(index):
    global clusters
    global G
        
    given_cluster = []
    total_clusters = len(clusters)
    cluster_coeff_all = [0]*total_clusters
    cluster_coeff_temp = [0]*total_clusters
    for string in clusters[index]:
        given_cluster.append(int(string))
    given_graph = G.subgraph(given_cluster)
    clustering_coeff_given   = nx.average_clustering(given_graph)
    
    temp_index = 0
    while temp_index < total_clusters:
        temp_cluster = []
        for string in clusters[temp_index]:
            temp_cluster.append(int(string))
        temp_graph = G.subgraph(temp_cluster)
        temp_graph_all = G.subgraph(temp_cluster + given_cluster)

        clustering_coeff_all = nx.average_clustering(temp_graph_all)
        clustering_coeff_temp = nx.average_clustering(temp_graph)
        cluster_coeff_all[temp_index] = clustering_coeff_all
        cluster_coeff_temp[temp_index] = clustering_coeff_temp        
        temp_index = temp_index + 1

    # Find the index with highest coefficient and combine them
    max_index = cluster_coeff_all.index(max(cluster_coeff_all))
    if clustering_coeff_given > .94:
        clustering_coeff_given = 0.94
    if cluster_coeff_temp[max_index] > .94:
        cluster_coeff_temp[max_index] =0.94
    if (cluster_coeff_all[max_index] >= .95*clustering_coeff_given) and (cluster_coeff_all[max_index] >= .95*cluster_coeff_temp[max_index]):
        combine_cluster(index, max_index)
开发者ID:vijkp,项目名称:graph-bench,代码行数:35,代码来源:old.bfs_clustering.py


示例3: can_combine_cluster

def can_combine_cluster(cl1, cl2):
    global G
    cl1_int = []
    cl2_int = []
    for string in cl1:
        cl1_int.append(int(string))
    for string in cl2:
        cl2_int.append(int(string))

    temp_graph1 = G.subgraph(cl1_int)
    temp_graph2 = G.subgraph(cl2_int)
    temp_graph_all = G.subgraph(cl1_int + cl2_int)

    clustering_coeff_1 = nx.average_clustering(temp_graph1)
    clustering_coeff_2 = nx.average_clustering(temp_graph2)
    clustering_coeff_all = nx.average_clustering(temp_graph_all)
    # print (str)(clustering_coeff_1) + " " + (str)(clustering_coeff_2) +" "+ (str)(clustering_coeff_all)

    if clustering_coeff_1 == 1:
        clustering_coeff_1 = 0.96

    if clustering_coeff_2 == 1:
        clustering_coeff_2 = 0.96

    if (clustering_coeff_1 == 0) and (clustering_coeff_2 == 0):
        return False

    fraction = 0.95
    if (clustering_coeff_all > fraction * clustering_coeff_1) and (
        clustering_coeff_all > fraction * clustering_coeff_2
    ):
        # print "combine"
        return True
    return False
开发者ID:vijkp,项目名称:graph-bench,代码行数:34,代码来源:cluster_neo4j.py


示例4: MvsD

def MvsD(A, Au, M, D):
    """docstring for MvsD"""
    #Calculate the number of nodes
    print("Number of nodes in A  : " + str(len(A.nodes())))
    print("Number of nodes in Au : " + str(len(Au.nodes())))
    #Calculate the number of links
    print("Number of links in A  : " + str(len(A.edges())))
    print("Number of links in Au : " + str(len(Au.edges())))
    t = nx.average_clustering(Au)
    print("network clustering coefficient for Au : " + str(t))
    print("")
    #Calculate the number of nodes
    print("Number of nodes in M  : " + str(len(M.nodes())))
    print("Number of nodes in D : " + str(len(D.nodes())))
    t = nx.average_clustering(M)
    print("network clustering coefficient for M  : " + str(t))
    t = nx.average_clustering(D)
    print("network clustering coefficient for D : " + str(t))
    
    
    MavgD = float(sum(M.degree().values()))/float(len(M.nodes()))
    print("Connectivity M : " + str(MavgD))
    DavgD = float(sum(D.degree().values()))/float(len(D.nodes()))
    print("Connectivity D : " + str(DavgD))
    
    pass
开发者ID:WingYn,项目名称:DtuJobBot,代码行数:26,代码来源:Analyze.py


示例5: t_t_cc

def t_t_cc(path=r"d:\data\9.txt"):
    rstr = ''
    g = nx.Graph()
    g = read_file_txt(g, path)
    w = [14,13,12,6]
    print nx.average_clustering(g)
    for each in w:
        R=gRa(g,each)
        pg=r_perturbR(g, R)
        rstr=rstr+'{0:8},{1:10.4}'.format(each,nx.average_clustering(pg))
        rstr=rstr+'\n'

    try:
        path=path.replace('9','9_cc')
        f=open(path, 'w')
    except:
        print "int Create File error"

    p = np.array(w)/14.0
    for each in p:
        pg=r_perturbS(g, each)
        rstr=rstr+'{0:8},{1:10.4}'.format(each,nx.average_clustering(pg))
        rstr=rstr+'\n'

    f.write(rstr)
    f.close()
开发者ID:liupenggl,项目名称:dpr,代码行数:26,代码来源:grandom.py


示例6: t_Gnutella_cc

def t_Gnutella_cc(path=r"d:\data\p2p-Gnutella08.txt"):
    rstr = ''
    g = nx.Graph()
    g = read_file_txt(g, path)
    w = [20777,18700,17995,17023]
    for each in w:
        R=gRa(g,each)
        pg=r_perturbR(g, R)
        rstr=rstr+'{0:8},{1:10.4}'.format(each,nx.average_clustering(pg))
        rstr=rstr+'\n'

    try:
        path=path.replace('p2p-Gnutella','GrQcp2p-Gnutella_cc')
        f=open(path, 'w')
    except:
        print "int Create File error"

    p = np.array(w)/20777.0
    for each in p:
        pg=r_perturbS(g, each)
        rstr=rstr+'{0:8},{1:10.4}'.format(each,nx.average_clustering(pg))
        rstr=rstr+'\n'

    f.write(rstr)
    f.close()
开发者ID:liupenggl,项目名称:dpr,代码行数:25,代码来源:grandom.py


示例7: t_facebook_cc

def t_facebook_cc(path=r"d:\data\facebook1.txt"):
    rstr = ''
    g = nx.Graph()
    g = read_file_txt(g, path)
    w = [1945, 1294, 860, 643]
    for each in w:
        R=gRa(g,each)
        pg=r_perturbR(g, R)
        rstr=rstr+'{0:8},{1:10.4}'.format(each,nx.average_clustering(pg))
        rstr=rstr+'\n'

    try:
        path=path.replace('book1','book1_cc')
        f=open(path, 'w')
    except:
        print "int readFileTxt open error"

    p = np.array(w)/4813.0
    for each in p:
        pg=r_perturbS(g, each)
        rstr=rstr+'{0:8},{1:10.4}'.format(each,nx.average_clustering(pg))
        rstr=rstr+'\n'

    f.write(rstr)
    f.close()
开发者ID:liupenggl,项目名称:dpr,代码行数:25,代码来源:grandom.py


示例8: compare_graphs

def compare_graphs(graph):
    n = nx.number_of_nodes(graph)
    m = nx.number_of_edges(graph)
    k = np.mean(list(nx.degree(graph).values()))
    erdos = nx.erdos_renyi_graph(n, p=m/float(n*(n-1)/2))
    barabasi = nx.barabasi_albert_graph(n, m=int(k)-7)
    small_world = nx.watts_strogatz_graph(n, int(k), p=0.04)
    print(' ')
    print('Compare the number of edges')
    print(' ')
    print('My network: ' + str(nx.number_of_edges(graph)))
    print('Erdos: ' + str(nx.number_of_edges(erdos)))
    print('Barabasi: ' + str(nx.number_of_edges(barabasi)))
    print('SW: ' + str(nx.number_of_edges(small_world)))
    print(' ')
    print('Compare average clustering coefficients')
    print(' ')
    print('My network: ' + str(nx.average_clustering(graph)))
    print('Erdos: ' + str(nx.average_clustering(erdos)))
    print('Barabasi: ' + str(nx.average_clustering(barabasi)))
    print('SW: ' + str(nx.average_clustering(small_world)))
    print(' ')
    print('Compare average path length')
    print(' ')
    print('My network: ' + str(nx.average_shortest_path_length(graph)))
    print('Erdos: ' + str(nx.average_shortest_path_length(erdos)))
    print('Barabasi: ' + str(nx.average_shortest_path_length(barabasi)))
    print('SW: ' + str(nx.average_shortest_path_length(small_world)))
    print(' ')
    print('Compare graph diameter')
    print(' ')
    print('My network: ' + str(nx.diameter(graph)))
    print('Erdos: ' + str(nx.diameter(erdos)))
    print('Barabasi: ' + str(nx.diameter(barabasi)))
    print('SW: ' + str(nx.diameter(small_world)))
开发者ID:feygina,项目名称:social-network-VK-analysis,代码行数:35,代码来源:functions_for_vk_users.py


示例9: Type2AlmostCompleteGraph

def Type2AlmostCompleteGraph(n, m):
    if (BinomialCoefficient(n - 2, 2) + 4 <= m) and (m <= BinomialCoefficient(n - 1, 2) + 1):
        first_candidate = nx.complete_graph(n - 2)
        remaining_edges = m - BinomialCoefficient(n - 2, 2)
        first_candidate.add_edge(n - 2, 0)
        first_candidate.add_edge(n - 2, 1)
        for vertex_index in range(remaining_edges - 2):
            first_candidate.add_edge(n - 1, vertex_index)
        first_coefficient = nx.average_clustering(first_candidate)

        second_candidate = nx.complete_graph(n - 2)
        second_candidate.add_edge(n - 2, n - 1)
        remaining_edges = m - BinomialCoefficient(n - 2, 2) - 1
        number_of_common_neighbors = remaining_edges / 2
        for vertex_index in range(number_of_common_neighbors):
            second_candidate.add_edge(vertex_index, n - 2)
            second_candidate.add_edge(vertex_index, n - 1)
        if (remaining_edges - 2 * number_of_common_neighbors) == 1:
            second_candidate.add_edge(vertex_index + 1, n - 2)
        second_coefficient = nx.average_clustering(second_candidate)

        if first_coefficient > second_coefficient:
            G = first_candidate.copy()
        else:
            G = second_candidate.copy()
        return G
开发者ID:rakeen,项目名称:NetworkStuff,代码行数:26,代码来源:FastClustering.py


示例10: can_combine_cluster2

def can_combine_cluster2(cl1, cl2):
    combine = False
    temp_graph1 = G.subgraph(cl1)
    temp_graph2 = G.subgraph(cl2)
    temp_graph_all = G.subgraph(cl1 + cl2)
    
    if len(cl1) >= len(cl2):
        common_elements = list(set(cl1).intersection(set(cl2)))
        if len(common_elements) > 0.8*len(cl2):
            combine = True
        #print common_elements
    else:
        common_elements = list(set(cl2).intersection(set(cl1)))
        if len(common_elements) > 0.8*len(cl1): 
            combine = True


    clustering_coeff_1   = nx.average_clustering(temp_graph1)
    clustering_coeff_2   = nx.average_clustering(temp_graph2)
    clustering_coeff_all = nx.average_clustering(temp_graph_all)
    #print cl1
    #print cl2
    #print (str)(clustering_coeff_1) + " " + (str)(clustering_coeff_2) +" "+ (str)(clustering_coeff_all)
    #print " "
    
    if combine:
        if (clustering_coeff_all >= .8*clustering_coeff_1) and (clustering_coeff_all >= 0.8*clustering_coeff_2):
            return True
    else:
        if (clustering_coeff_all >= clustering_coeff_1) and (clustering_coeff_all >= clustering_coeff_2):
            return True
    return False 
开发者ID:vijkp,项目名称:graph-bench,代码行数:32,代码来源:cluster-small.py


示例11: getCoherenceMeasure

def getCoherenceMeasure(essay):
	graph = makeWordGraph(essay)
	# obtain clustering coefficient			
	clustCoeffList=nx.clustering(graph)
	#####
	print getScore(clustCoeffList,graph)
	print nx.average_clustering(graph)
开发者ID:apoorvaraob,项目名称:Automatic-Essay-Grader,代码行数:7,代码来源:Coherence.py


示例12: algorithm

def algorithm(w1,w2,w3,w4,G1,G2,G3,G4):
	try:
		cc=np.array([nx.average_clustering(G1,weight='weight'),nx.average_clustering(G2,weight='weight'),nx.average_clustering(G3,weight='weight'),nx.average_clustering(G4,weight='weight')])
		spl=np.array([nx.average_shortest_path_length(G1,weight='weight'),nx.average_shortest_path_length(G2,weight='weight'),nx.average_shortest_path_length(G3,weight='weight'),nx.average_shortest_path_length(G4,weight='weight')])
		nds=np.array([nx.number_of_nodes(G1),nx.number_of_nodes(G2),nx.number_of_nodes(G3),nx.number_of_nodes(G4)])
		edgs= np.array([nx.number_of_edges(G1),nx.number_of_edges(G2),nx.number_of_edges(G3),nx.number_of_edges(G4)])
		if valid(cc):
			cc=stats.zscore(cc)
		else:
			cc=np.array([.1,.1,.1,.1])
		cc= cc-min(cc)+.1
		if valid(spl):
			spl=stats.zscore(spl)
		else:
			spl=np.array([.1,.1,.1,.1])
		spl= spl-min(spl)+.1
		if valid(nds):
			nds=stats.zscore(nds)
		else:
			nds=np.array([.1,.1,.1,.1])
		nds = nds-min(nds)+.1
		if valid(edgs):
			edgs=stats.zscore(edgs)
		else:
			edgs=np.array([.1,.1,.1,.1])
		edgs=edgs-min(edgs)+.1
		r1=(w1*cc[0]+w2*spl[0]+w3*nds[0]+w4*edgs[0])*1000
		r2=(w1*cc[1]+w2*spl[1]+w3*nds[1]+w4*edgs[1])*1000
		r3=(w1*cc[2]+w2*spl[2]+w3*nds[2]+w4*edgs[2])*1000
		r4=(w1*cc[3]+w2*spl[3]+w3*nds[3]+w4*edgs[3])*1000
		d={'Player 1:': r1, 'Player 2:': r2,'Player 3:': r3, 'Player 4:': r4}
		rank = sorted(d.items(), key=lambda x: x[1], reverse=True)
		return ["USAU RANKINGS",str(rank[0][0])+ " " + str(int(rank[0][1])),str(rank[1][0])+" "+ str(int(rank[1][1])),str(rank[2][0])+" "+ str(int(rank[2][1])),str(rank[3][0])+" "+str(int(rank[3][1]))]
	except:
		return ["Unable to compute rankings!  Need data","Player 1","Player 2","Player 3","Player 4"]
开发者ID:dagley11,项目名称:Garuda_Game,代码行数:35,代码来源:Graph.py


示例13: t_GrQc_cc

def t_GrQc_cc(path=r"d:\data\CA-GrQc.txt"):
    rstr = ''
    g = nx.Graph()
    g = read_file_txt(g, path)
    w = [14496,13454,12394,9782]
    for each in w:
        R=gRa(g,each)
        pg=r_perturbR(g, R)
        rstr=rstr+'{0:8},{1:10.4}'.format(each,nx.average_clustering(pg))
        rstr=rstr+'\n'

    try:
        path=path.replace('GrQc','GrQc_cc')
        f=open(path, 'w')
    except:
        print "int readFileTxt open error"

    p = np.array(w)/14496.0
    for each in p:
        pg=r_perturbS(g, each)
        rstr=rstr+'{0:8},{1:10.4}'.format(each,nx.average_clustering(pg))
        rstr=rstr+'\n'

    f.write(rstr)
    f.close()
开发者ID:liupenggl,项目名称:dpr,代码行数:25,代码来源:grandom.py


示例14: test_clustering

def test_clustering(size):
    print("Barabasi-Albert:")
    ba = networkx.barabasi_albert_graph(1000, 4)
    print("Clustering: ", networkx.average_clustering(ba))
    print("Average length: ", networkx.average_shortest_path_length(ba))
    print("Watts-Strogatz:")
    ws = networkx.watts_strogatz_graph(size, 4, 0.001)
    print("Clustering: ", networkx.average_clustering(ws))
    print("Average length: ", networkx.average_shortest_path_length(ws))
开发者ID:onesandzeroes,项目名称:Complexity,代码行数:9,代码来源:scale_free_net.py


示例15: gen_graph_stats

def gen_graph_stats (graph):
	G = nx.read_graphml(graph)
	stats = {}

	edges, nodes = 0,0
	for e in G.edges_iter(): edges += 1
	for n in G.nodes_iter(): nodes += 1
	stats['Edges'] = (edges,'The number of edges within the Graph')
	stats['Nodes'] = (nodes, 'The number of nodes within the Graph')
	print "%i edges, %i nodes" % (edges, nodes)


	# Accessing the highest degree node
	center, degree = sorted(G.degree().items(), key=itemgetter(1), reverse=True)[0]
	stats['Center Node'] = ('%s: %0.5f' % (center,degree),'The center most node in the graph. Which has the highest degree')


	hairball = nx.subgraph(G, [x for x in nx.connected_components(G)][0])
	print "Average shortest path: %0.4f" % nx.average_shortest_path_length(hairball)
	stats['Average Shortest Path Length'] = (nx.average_shortest_path_length(hairball), '')
	# print "Center: %s" % G[center]

	# print "Shortest Path to Center: %s" % p


	print "Degree: %0.5f" % degree
	stats['Degree'] = (degree,'The node degree is the number of edges adjacent to that node.')

	print "Order: %i" % G.number_of_nodes()
	stats['Order'] = (G.number_of_nodes(),'The number of nodes in the graph.')

	print "Size: %i" % G.number_of_edges()
	stats['Size'] = (G.number_of_edges(),'The number of edges in the graph.')

	print "Clustering: %0.5f" % nx.average_clustering(G)
	stats['Average Clustering'] = (nx.average_clustering(G),'The average clustering coefficient for the graph.')

	print "Transitivity: %0.5f" % nx.transitivity(G)
	stats['Transitivity'] = (nx.transitivity(G),'The fraction of all possible triangles present in the graph.')

	part = community.best_partition(G)
	# values = [part.get(node) for node in G.nodes()]

	# nx.draw_spring(G, cmap = plt.get_cmap('jet'), node_color = values, node_size=30, with_labels=False)
	# plt.show()

	mod = community.modularity(part,G)
	print "modularity: %0.5f" % mod
	stats['Modularity'] = (mod,'The modularity of a partition of a graph.')

	knn = nx.k_nearest_neighbors(G)
	print knn
	stats['K Nearest Neighbors'] = (knn,'the average degree connectivity of graph.\nThe average degree connectivity is the average nearest neighbor degree of nodes with degree k. For weighted graphs, an analogous measure can be computed using the weighted average neighbors degre')


	return G, stats
开发者ID:neviim,项目名称:Georgetown-Capstone,代码行数:56,代码来源:Graph_stats.py


示例16: get_average_cluster_coefficient

def get_average_cluster_coefficient(filename):
  import networkx as nx
  threshold = 0
  f = open(filename[:-4]+'_average_cc.dat','w')
  for i in range(0,101):
    threshold = float(i)/100
    G = get_threshold_matrix(filename, threshold)
    print 'threshold: %f, average cluster coefficient: %f' %(threshold, nx.average_clustering(G))
    f.write("%f\t%f\n" % (threshold, nx.average_clustering(G)))
  f.close()
开发者ID:sheyma,项目名称:lab_rot_berlin,代码行数:10,代码来源:threshold_matrix.py


示例17: get_small_worldness

def get_small_worldness(filename):
  import networkx as nx
  threshold = 0
  f = open(filename[:-4]+'_small_worldness.dat','w')
  for i in range(0,101):
    threshold = float(i)/100
    G = get_threshold_matrix(filename, threshold)
    ER_graph = nx.erdos_renyi_graph(nx.number_of_nodes(G), nx.density(G))

    cluster = nx.average_clustering(G)
    ER_cluster = nx.average_clustering(ER_graph)
    
    transi = nx.transitivity(G)
    ER_transi = nx.transitivity(ER_graph)

    print 'threshold: %f, average cluster coefficient: %f, random nw: %f, transitivity: %f, random nw: %f' %(threshold, cluster, ER_cluster, transi, ER_transi)

    f.write("%f\t%f\t%f" % (threshold, cluster, ER_cluster))
    components = nx.connected_component_subgraphs(G)
    ER_components = nx.connected_component_subgraphs(ER_graph)

    values = []
    ER_values = []
    for i in range(len(components)):
      if nx.number_of_nodes(components[i]) > 1:
        values.append(nx.average_shortest_path_length(components[i]))
    for i in range(len(ER_components)):
      if nx.number_of_nodes(ER_components[i]) > 1:
        ER_values.append(nx.average_shortest_path_length(ER_components[i]))
    if len(values) == 0:
      f.write("\t0.")
    else:
      f.write("\t%f" % (sum(values)/len(values)))

    if len(ER_values) == 0:
      f.write("\t0.")
    else:
      f.write("\t%f" % (sum(ER_values)/len(ER_values)))
    
    f.write("\t%f\t%f" % (transi, ER_transi))  
    
    if (ER_cluster*sum(values)*len(values)*sum(ER_values)*len(ER_values)) >0 :
      S_WS = (cluster/ER_cluster) / ((sum(values)/len(values)) / (sum(ER_values)/len(ER_values)))
    else:
      S_WS = 0.
    if (ER_transi*sum(values)*len(values)*sum(ER_values)*len(ER_values)) >0 :
      S_Delta = (transi/ER_transi) / ((sum(values)/len(values)) / (sum(ER_values)/len(ER_values)))
    else:
      S_Delta = 0.
    
    f.write("\t%f\t%f" % (S_WS, S_Delta))  
    f.write("\n")
    
  f.close()  
  print "1:threshold 2:cluster-coefficient 3:random-cluster-coefficient 4:shortest-pathlength 5:random-shortest-pathlength 6:transitivity 7:random-transitivity 8:S-Watts-Strogatz 9:S-transitivity" 
开发者ID:sheyma,项目名称:lab_rot_berlin,代码行数:55,代码来源:threshold_matrix.py


示例18: test_random_reference

def test_random_reference():
    G = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng)
    Gr = random_reference(G, niter=1, seed=rng)
    C = nx.average_clustering(G)
    Cr = nx.average_clustering(Gr)
    assert_true(C > Cr)

    assert_raises(nx.NetworkXError, random_reference, nx.Graph())
    assert_raises(nx.NetworkXNotImplemented, random_reference, nx.DiGraph())

    H = nx.Graph(((0, 1), (2, 3)))
    Hl = random_reference(H, niter=1, seed=rng)
开发者ID:jianantian,项目名称:networkx,代码行数:12,代码来源:test_smallworld.py


示例19: get_small_worldness

def get_small_worldness(G, thr):
	f = open(out_prfx + 'small_worldness.dat', 'a')
	g = open(out_prfx + 'cc_trans_ER.dat', 'a')
	#g.write('r(thre.)\t\cc_A\tcc_ER\ttran_A\ttran_ER\n')
	ER_graph = nx.erdos_renyi_graph(nx.number_of_nodes(G), nx.density(G))
	# erdos-renyi, binomial random graph generator ...(N,D:density)	
	cluster = nx.average_clustering(G)   # clustering coef. of whole network
	ER_cluster = nx.average_clustering(ER_graph)	#cc of random graph
	
	transi = nx.transitivity(G)
	ER_transi = nx.transitivity(ER_graph)

	g.write("%f\t%f\t%f\t%f\t%f\n" % (thr, cluster,ER_cluster,transi,ER_transi ))
	
	f.write("%f\t%f\t%f" % (thr, cluster, ER_cluster))
	components = nx.connected_component_subgraphs(G)
	ER_components = nx.connected_component_subgraphs(ER_graph)

	values = []
	ER_values = []
	for i in range(len(components)):
		if nx.number_of_nodes(components[i]) > 1:
			values.append(nx.average_shortest_path_length(components[i]))
	for i in range(len(ER_components)):
		if nx.number_of_nodes(ER_components[i]) > 1:
			ER_values.append(nx.average_shortest_path_length(ER_components[i]))
	if len(values) == 0:
		f.write("\t0.")
	else:
		f.write("\t%f" % (sum(values)/len(values))) # pathlenght

	if len(ER_values) == 0:
		f.write("\t0.")
	else:
		f.write("\t%f" % (sum(ER_values)/len(ER_values)))

	f.write("\t%f\t%f" % (transi, ER_transi))  

	if (ER_cluster*sum(values)*len(values)*sum(ER_values)*len(ER_values)) >0 :
		S_WS = (cluster/ER_cluster) / ((sum(values)/len(values)) / (sum(ER_values)/len(ER_values)))  
	else:
		S_WS = 0.
	if (ER_transi*sum(values)*len(values)*sum(ER_values)*len(ER_values)) >0 :
		S_Delta = (transi/ER_transi) / ((sum(values)/len(values)) / (sum(ER_values)/len(ER_values)))
	else:
		S_Delta = 0.

	f.write("\t%f\t%f" % (S_WS, S_Delta)) # S_WS ~ small worldness 
	f.write("\n")

	f.close() 
	g.close()	 
开发者ID:rudimeier,项目名称:MSc_Thesis,代码行数:52,代码来源:sb_randomization.py


示例20: run

def run(G, cut_pct, iterations=10):
  print nx.average_clustering(G)
  nodes = G.nodes()
  edges = G.edges()
  for i in range(iterations):
    np.random.shuffle(nodes)
    cut_count = int(cut_pct*len(nodes))
    selected_nodes = nodes[0:-cut_count]
    not_selected_nodes = set(nodes)-set(selected_nodes)
    not_selected_edges = G.subgraph(not_selected_nodes).edges()
    H = G.subgraph(nodes)
    H.remove_edges_from(not_selected_edges)
    H.remove_nodes_from(list(set(not_selected_nodes)&set(nx.isolates(H))))
    print nx.average_clustering(H)
开发者ID:DGaffney,项目名称:missing_data_network_analysis,代码行数:14,代码来源:compare_clustering_coefficient.py



注:本文中的networkx.average_clustering函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python networkx.average_shortest_path_length函数代码示例发布时间:2022-05-27
下一篇:
Python networkx.astar_path_length函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap