• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python statistics.mean函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中statistics.mean函数的典型用法代码示例。如果您正苦于以下问题:Python mean函数的具体用法?Python mean怎么用?Python mean使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了mean函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: show_result2

def show_result2():
    fd_list = db.session.query(Price_History).all()

    # Some simple statistics for sample questions
    GPDALC = []
    GPWALC = []
    MSWALC = []
    MSDALC = []
    for el in fd_list:
        if(el.SCHOOL=='GP'):
            GPWALC.append(el.WALC)
            GPDALC.append(el.DALC)
        elif(el.SCHOOL=='MS'):
            MSWALC.append(el.WALC)
            MSDALC.append(el.DALC)
        else:
            print("School error")

    mean_GPWALC = statistics.mean(GPWALC)
    mean_GPDALC = statistics.mean(GPDALC)

    mean_MSWALC = statistics.mean(MSWALC)
    mean_MSDALC = statistics.mean(MSDALC)

    # Prepare data for google charts
    data = [['GP School Workday Alcohol Consumption', mean_GPDALC], ['GP School Weekend  Alcohol Consumption', mean_GPWALC],
            ['MS School Workday Alcohol Consumption', mean_MSDALC], ['MS School Weekend Alcohol Consumption', mean_MSWALC]]
    return render_template('result2.html', data=data)
开发者ID:Imielin,项目名称:git,代码行数:28,代码来源:dataeng.py


示例2: sample

    def sample(self, borrowers, threshold, n_iterations=1000, eps=0.0001, target=None):
        """

        :param borrowers: list of borrower (and information about them)
        :type borrowers: list[Borrower]

        :param threshold: big losses threshold
        :type threshold: float

        :param n_iterations: number of simulations
        :type n_iterations: int

        :return:
        """
        weights_matrix, independent_weight, losses, vitality = self.get_parameters(borrowers)
        res = []
        iteration = 0
        for iteration in range(n_iterations):
            res.append(self.one_loss(weights_matrix, independent_weight, losses, vitality, threshold))
            if iteration > 100 and target is not None and abs(target - mean(res)) < eps:
                break
            elif iteration > 100 and (max(res) - min(res)) / (iteration ** 0.5) < eps:
                break
        print("TwoStepSampler break after {} iterations".format(iteration))

        return mean(res)
开发者ID:DaryaPopova,项目名称:diplom,代码行数:26,代码来源:two_step_sampler.py


示例3: process_result

	def process_result(self, t_frame, r_frame):
		print(t_frame, r_frame) 
		
		try:
			stat_const = float(2.776)

			res2 = [] # frame transmission 
			res3 = [] # throughput 
			for i in range(int(self.T[0])):
				# frame transmission
				res2.append(t_frame[i]/r_frame[i])
				res3.append(self.F * r_frame[i] / self.R)

			# print(res2, res3)

			avg_res2 = statistics.mean(res2)
			sd2 = statistics.stdev(res2)
			dif2 = sd2/math.sqrt(int(self.T[0]))*stat_const
			upper_bound2 = avg_res2 + dif2 
			lower_bound2 = avg_res2 - dif2 

			avg_res3 = statistics.mean(res3)
			sd3 = statistics.stdev(res3)
			dif3 = sd3/math.sqrt(int(self.T[0]))*stat_const
			upper_bound3 = avg_res3 + dif3
			lower_bound3 = avg_res3 - dif3 

		except ZeroDivisionError: 
			return float("inf"), float("inf"), float("inf"), 0, 0, 0

		return avg_res2, lower_bound2, upper_bound2, avg_res3, lower_bound3, upper_bound3 
开发者ID:Hank-TNguyen,项目名称:W16,代码行数:31,代码来源:Simulation.py


示例4: mean_dev

def mean_dev(training_set):
    '''
    Calculates and returns the mean and standard deviation to the classes yes and no of a given training set
    '''
    class_yes = []
    class_no = []
    mean_yes = {}
    mean_no = {}
    dev_yes = {}
    dev_no = {}
    for key in training_set[0]:
        for i in range(len(training_set)):
            if training_set[i]['DiabetesClass'] == 'yes':
                class_yes.append(training_set[i][key])
            else:
                class_no.append(training_set[i][key])
        if not key == 'DiabetesClass':
            mean_yes[key] = statistics.mean(class_yes)
            mean_no[key] = statistics.mean(class_no)
            dev_yes[key] = statistics.stdev(class_yes)
            dev_no[key] = statistics.stdev(class_no)
        else:
            prob_yes = float(len(class_yes) / len(training_set))
            prob_no = float(len(class_no) / len(training_set))
        class_yes = []
        class_no = []
    return mean_yes, mean_no, dev_yes, dev_no, prob_yes, prob_no
开发者ID:pedrotst,项目名称:trab1-ai,代码行数:27,代码来源:old_main.py


示例5: get_parts_closeness

def get_parts_closeness(part1, part2) -> float:
    part1_distances = part1.distances
    part2_distances = part2.distances
    mean1 = statistics.mean(part1_distances)
    mean2 = statistics.mean(part2_distances)
    difference = abs(mean1 - mean2)
    return difference
开发者ID:notnami,项目名称:signify,代码行数:7,代码来源:substring_parser.py


示例6: main

def main(graph, nbk, delta_max, mu, max_eval, iter, move_operator, tabuSize, logsPath):
    logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
    fh = logging.FileHandler(logsPath + "/tabusearch.log")
    fh.setLevel(logging.INFO)
    frmt = logging.Formatter('%(message)s')
    fh.setFormatter(frmt)
    log.addHandler(fh)

    all_num_evaluations = []
    all_best_score = []
    all_time = []
    log.info("-------RUNNING TABU SEARCH-------")
    for i in range(iter):
        start = timeit.default_timer()
        num_evaluations, best_score, best = test_file_tabusearch(graph, nbk, delta_max, mu, max_eval, move_operator, tabuSize)
        stop = timeit.default_timer()
        log.debug('time : %f' % (stop - start))
        all_num_evaluations.append(num_evaluations)
        all_best_score.append(best_score)
        all_time.append(stop - start)
    log.info("nbS = %d; nbK = %d; delta_max = %d; mu = %r; move_operator= %s; tabu_maxsize = %d" % (graph.get_nbVertices(), nbk, delta_max, mu, move_operator.__name__, tabuSize))
    log.info("for %d iteration with %d max_evaluations each, "
             "\n best score found is %d,"
             "\n total time in sec : %r"
             "\n mean time in sec : %r,"
             "\n mean best_score : %r, EcT : %r"
             "\n mean num_eval : %r"
             % (iter,
                max_eval,
                min(score for score in all_best_score),
                sum(all_time),
                statistics.mean(all_time),
                statistics.mean(all_best_score), statistics.stdev(all_best_score),
                statistics.mean(all_num_evaluations)))
开发者ID:BaaptM,项目名称:project_metaheuristics,代码行数:34,代码来源:test_tabusearch.py


示例7: calculate_latencies

def calculate_latencies(version_dates):
    linux_latencies = latency(version_dates['linux'], OrderedDict(avo.os_to_kernel))
    set_latex_value('linuxMeanUpdateLatency', ufloat(statistics.mean(linux_latencies.values()),statistics.stdev(linux_latencies.values())))
    openssl_latencies = latency(version_dates['openssl'], OrderedDict(avo.os_to_project['openssl']))
    set_latex_value('opensslMeanUpdateLatency', ufloat(statistics.mean(openssl_latencies.values()),statistics.stdev(openssl_latencies.values())))
    bouncycastle_latencies = latency(version_dates['bouncycastle'], OrderedDict(avo.os_to_project['bouncycastle']))
    set_latex_value('bouncycastleMeanUpdateLatency',ufloat(statistics.mean(bouncycastle_latencies.values()),statistics.stdev(bouncycastle_latencies.values())))
开发者ID:ucam-cl-dtg,项目名称:paper-da-securityupdates,代码行数:7,代码来源:versions.py


示例8: nutritionfacts

    def nutritionfacts(self):

        # print keys
        svgdata = ""
        frame_x = self.width * self.bins + 100 - 90
        frame_y = (self.graphheight + 700) // 2 + 25 - self.graphheight
        for i, s in enumerate([l for l in self.points if l[2]]):
            mu = "μ = —"
            sigma = "σ = —"
            if len(s[0]) != 0:
                xmean = stat.mean([t[0] for t in s[0]])
                xsigma = stat.pstdev([t[0] for t in s[0]], xmean)

                ymean = stat.mean([t[1] for t in s[0]])
                ysigma = stat.pstdev([t[1] for t in s[0]], ymean)

                mu = "μ = (" + str(round(xmean, 4)) + ", " + str(round(ymean, 4)) + ")"
                sigma = "σ = (" + str(round(xsigma, 4)) + ", " + str(round(ysigma, 4)) + ")"

            line_y = frame_y + i * 65
            svgdata += circle(frame_x - 4, line_y + 3, 2, s[1])
            svgdata += circle(frame_x + 4, line_y + 4, 2, s[1])
            svgdata += circle(frame_x - 1, line_y + 10, 2, s[1])

            svgdata += text(frame_x + 20, line_y + 10, s[2], align=-1, color=s[1], font="Neue Frutiger 65")
            svgdata += text(frame_x + 28, line_y + 25, "n = " + str(len(s[0])), align=-1, color=s[1])
            svgdata += text(frame_x + 28, line_y + 40, mu, align=-1, color=s[1])

            svgdata += text(frame_x + 28, line_y + 55, sigma, align=-1, color=s[1])
        self._frostbyte(svgdata)
开发者ID:kelvin13,项目名称:svgplot,代码行数:30,代码来源:graph.py


示例9: get_stats_window

def get_stats_window(depth_iterator, length, window_size):
    """Calculate min/max/mean and min/max windowed mean.

    Assumes the depth_iterator will fill in all the implicit zero
    entries which ``samtools depth`` may omit!

    Assumes window_size < number of values in iterator!
    """
    window = deque()
    total_cov = 0
    min_cov = None
    max_cov = 0.0

    assert 1 <= window_size <= length

    prev_pos = 0
    while len(window) < window_size:
        try:
            ref, pos, depth = next(depth_iterator)
        except NoCoverage:
            return 0, 0, 0.0, 0.0, 0.0
        except StopIteration:
            outstr = "Not enough depth values to fill %i window" % window_size
            logger.info(outstr)
            raise ValueError("%s" % outstr)
        prev_pos += 1
        assert pos == prev_pos, "Discontinuity in cov vals for %s position %i" % (ref,
                                                                                  pos)
        total_cov += depth
        if min_cov is None:
            min_cov = depth
        else:
            min_cov = min(min_cov, depth)
        max_cov = max(max_cov, depth)
        window.append(depth)

    assert len(window) == window_size
    min_win = max_win = mean(window)
    for ref, pos, depth in depth_iterator:
        prev_pos += 1
        assert pos == prev_pos, "Discontinuity in cov val for %s position %i" % (ref,
                                                                                 pos)
        total_cov += depth
        min_cov = min(min_cov, depth)
        max_cov = max(max_cov, depth)
        window.popleft()
        window.append(depth)
        assert len(window) == window_size
        win_depth = mean(window)
        min_win = min(min_win, win_depth)
        max_win = max(max_win, win_depth)

    mean_cov = total_cov / float(length)

    assert prev_pos == length, "Missing final coverage?"
    assert len(window) == window_size
    assert min_cov <= mean_cov <= max_cov
    assert min_cov <= min_win <= max_win <= max_cov

    return min_cov, max_cov, mean_cov, min_win, max_win
开发者ID:Grindell,项目名称:public_scripts,代码行数:60,代码来源:Fix_five_prime_CDS.py


示例10: features_present1

 def features_present1(self, othertmpft):
     a=FeatureFinder()
     a.train(othertmpft)
     j=a.scan_data(othertmpft)
     features=list()
     dre=self.dict_process(othertmpft)
     sendback=list()
     final_list=list()
     del j[0]
     del j[len(j)-1]
     for i in j:
         #print(i.location)
         if i.location<2:
             final_list.append(Feature(i.location, statistics.mean(dre[i.location-1:i.location+3])))
         else:
             final_list.append(Feature(i.location, statistics.mean(dre[i.location-2:i.location+2])))
     for i in self.outline:
        if type(i)==Feature:features.append(i)
     for i in features:
         if len(final_list)>0:l=min(final_list, key=lambda x: abs(i.loc-x.loc))
         else:return [0]*len(self.outline)
         dis=len(othertmpft)-abs(i.loc-l.loc)
         penalize_by=dis/len(othertmpft)
         #print(penalize_by)
         sendback.append(statistics.mean([penalize_by, self.t(abs(i.lo-l.lo))]))
   #  print(sendback)
     #print("I am features1")
     return self.find_outliar(sendback)            
开发者ID:GGGG1020,项目名称:KineticEEG,代码行数:28,代码来源:SLICERZ.py


示例11: scan

 def scan(self):
     dre=list()
     final=list()
     dre=self.dict_process(self.data)
     pol=[]
     oo=list()
     for d in self.listy:
         r=self.__process(d)
         if len(r[1])<2 and not len(r[1])==0:pol.append(statistics.mean(r[1]))
         elif len(r[1])==0:pass
         else:pol.append(statistics.mean(r[1]))
       #  print(pol)
     for i in range(len(pol)):
         final.append(Slope(self.download[i].location, pol[i]))
        ## print(final)
     del self.download[0]
     del self.download[-1]
     last=1
     for i in range(len(self.download)):
         try:
             final.insert(i+last, Feature(self.download[i].location, statistics.mean(dre[self.download[i].location-2:self.download[i].location+2])))
         except statistics.StatisticsError:
             #del  final[i-1]
             pass
         last+=1
       #  print(final)
     self.outline=final
开发者ID:GGGG1020,项目名称:KineticEEG,代码行数:27,代码来源:SLICERZ.py


示例12: insertNormalizedModelInDB

def insertNormalizedModelInDB(idUser, idString, keystroke, isTest = False):
	insertNormalizedRecord = replaceIfIsTest("INSERT INTO `mdl_user#isTest_keystroke_normalized`(`id_user`, `id_string`) VALUES (%s, %s)", isTest);
	updateNormalizedRecord = replaceIfIsTest("UPDATE `mdl_user#isTest_keystroke_normalized` ", isTest);
	
		
	executeSqlInDB(insertNormalizedRecord, (idUser, idString));
	
	keyDimensionsExtractor = KeystrokeDimensionsExtractor(keystroke);
	
	#extracting dimensions
	timePressed = keyDimensionsExtractor.getTimePressed();
	#geting avarage and standardDeviation
	timePressedAverage = statistics.mean(timePressed);
	timePressedstandardDeviation = statistics.pstdev(timePressed);
	
	latencies = keyDimensionsExtractor.getLatencies();
	latenciesAverage = statistics.mean(latencies);
	latenciesStandardDeviation = statistics.pstdev(latencies);
	
	dbModel = {
		'id_user': idUser,
		'id_string': idString,
		'press_average': timePressedAverage,
		'latency_avarage': latenciesAverage,
		'press_standard_deviation': timePressedstandardDeviation,
		'latency_standard_deviation': latenciesStandardDeviation,
	}
	
	#update in table created before
	updateNormalizedRecord = updateNormalizedRecord + (" SET `press_average`= %(press_average)s,`latency_avarage`= %(latency_avarage)s, `press_standard_deviation`= %(press_standard_deviation)s,`latency_standard_deviation`= %(latency_standard_deviation)s " 
		" WHERE `id_user`= %(id_user)s AND `id_string`= %(id_string)s");
	executeSqlInDB(updateNormalizedRecord, dbModel);
开发者ID:MarcoASCruz,项目名称:LearningPython,代码行数:32,代码来源:validate.py


示例13: csv_dict_reader

def csv_dict_reader(file_obj):
    """
    Read a CSV file using csv.DictReader
    """
    reader = csv.DictReader(file_obj, delimiter=',')
    num_likes = []
    num_comments = []
    num_shares = []
    for line in reader:
        p = int(line["num_likes"])
        q = int(line["first_page_comment"])
        r = int(line["comments_beyond_pageone"])
        num_likes.append(p)
        num_comments.append(q)
        num_shares.append(r)
    mean_num_likes = statistics.mean(num_likes)
    stdev_num_likes = statistics.stdev(num_likes)
    mean_num_comments = statistics.mean(num_comments)
    stdev_num_comments = statistics.stdev(num_comments)
    mean_num_shares = statistics.mean(num_shares)
    stdev_num_shares = statistics.stdev(num_shares)
    covariance_likes = stdev_num_likes / mean_num_likes
    covariance_comments = stdev_num_comments / mean_num_comments
    covariance_shares = stdev_num_shares / mean_num_shares
    w = csv.writer(open("svm_dataset.csv","a"),delimiter=',',quoting=csv.QUOTE_ALL)
    
    w.writerow([mean_num_likes,stdev_num_likes,covariance_likes,mean_num_comments,stdev_num_comments,covariance_comments,mean_num_shares,stdev_num_shares,covariance_shares])
开发者ID:envious777,项目名称:Brand-Valuation-using-Social-Media-Data,代码行数:27,代码来源:save.py


示例14: show_result3

def show_result3():
    fd_list = db.session.query(Price_History).all()

    # Some simple statistics for sample questions
    MDALC = []
    MWALC = []
    FWALC = []
    FDALC = []
    for el in fd_list:
        if(el.SEX=='M'):
            MWALC.append(el.WALC)
            MDALC.append(el.DALC)
        elif(el.SEX=='F'):
            FWALC.append(el.WALC)
            FDALC.append(el.DALC)
        else:
            print("Sex error")

    mean_MWALC = statistics.mean(MWALC)
    mean_MDALC = statistics.mean(MDALC)

    mean_FWALC = statistics.mean(FWALC)
    mean_FDALC = statistics.mean(FDALC)

    # Prepare data for google charts
    data = [['Female Workday Alcohol Consumption', mean_FDALC], ['Female Weekend  Alcohol Consumption', mean_FWALC],
            ['Male Workday Alcohol Consumption', mean_MDALC], ['Male Weekend Alcohol Consumption', mean_MWALC]]
    return render_template('result3.html', data=data)
开发者ID:Imielin,项目名称:git,代码行数:28,代码来源:dataeng.py


示例15: threshold

def threshold(imageArray):
    balanceAr=[]
    newAr = imageArray
    
    #averages each pixle's RGB values
    for evryRow in imageArray:
        for evryPix in evryRow:
            avgNum = mean(evryPix[:3])
            balanceAr.append(avgNum)
        
    #averages all pixle averages
    balance = mean(balanceAr)
    for evryRow in newAr:
        for evryPix in evryRow:
            #brighter pixles are made white
            if mean(evryPix[:3]) > balance:
                evryPix[0] = 255
                evryPix[1] = 255
                evryPix[2] = 255
            #darker pixles made black
            else:
                evryPix[0] = 0
                evryPix[1] = 0
                evryPix[2] = 0
    return newAr
开发者ID:adamwe1,项目名称:AdamWendlerProj3,代码行数:25,代码来源:SVCMethod.py


示例16: show_result

def show_result():
    fd_list = db.session.query(Formdata).all()

    # Some simple statistics for sample questions
    satisfaction = []
    q1 = []
    q2 = []
    for el in fd_list:
        satisfaction.append(int(el.satisfaction))
        q1.append(int(el.q1))
        q2.append(int(el.q2))

    if len(satisfaction) > 0:
        mean_satisfaction = statistics.mean(satisfaction)
    else:
        mean_satisfaction = 0

    if len(q1) > 0:
        mean_q1 = statistics.mean(q1)
    else:
        mean_q1 = 0

    if len(q2) > 0:
        mean_q2 = statistics.mean(q2)
    else:
        mean_q2 = 0

    # Prepare data for google charts
    data = [['Satisfaction', mean_satisfaction], ['Python skill', mean_q1], ['Flask skill', mean_q2]]

    return render_template('result.html', data=data)
开发者ID:mrevening,项目名称:FlaskAconda,代码行数:31,代码来源:unit5_webapp.py


示例17: survey

    def survey(filtered=NO_FILTER):
        from statistics import mean

        # for lex_user in Lexicon.lex_users:
        #     for lex in Lexicon.lex[lex_user]:
        #         if lex.headword in Lexicon.common_vocabulary:
        #             lex["stats"][lex.headword].update(lex.timestamp, lex.start_time)
        stats_by_lex = [
            list(zip(*[(logic_entry["stats"][headword].count, logic_entry["stats"][headword].latency,
                        mean(logic_entry["stats"][headword].interval) if logic_entry["stats"][headword].interval else 0,
                        mean(logic_entry["stats"][headword].permanence) if logic_entry["stats"][
                            headword].permanence else 0)
                       for logic_entry in Logicon.logic.values() if filtered.filtered(logic_entry["logic"][0])]))
            # for idiom_entry in Idiomaton.idiom.values() if idiom_entry["logic"][0].clazz == "E"]))
            for headword in Logicon.common_logics]
        print("Logicon.logic.", [(l, Logicon.logic[l]) for l in Logicon.logic])
        print("stats_by_lex", stats_by_lex)
        stat_props = [
            (count, latency, interval,
             permanence)
            for count, latency, interval, permanence in stats_by_lex] if any(stats_by_lex) else \
            [(0, 2.5e3, 0, 0), (1, 1.5e3, 1, 1)]
        ticks = [Logicon.logicon[burst]["sequence"] for burst in Logicon.common_logics]
        return list(zip(*stat_props)), \
            ["%s da Lógica" % stat for stat in "Contagem Latência Intervalo Permanência".split()], \
            ticks, filtered.claz, "Índices das lógicas EICA"
开发者ID:labase,项目名称:eica,代码行数:26,代码来源:language.py


示例18: analyzeData

def analyzeData(purchases, times):
    std = statistics.stdev(purchases, statistics.mean(purchases))
    data = []

    for i in range(len(purchases)):
        if purchases[i] > (statistics.mean(purchases) + std * 1.5):
            outliers.append(purchases[i])
        else:
            trimmedData.append(purchases[i])
            trimmedTimes.append(times[i])

    for i in range(max(trimmedTimes)):
        data.append(0)

    flag = 0

    for w in trimmedTimes:
        data[w - 1] += trimmedData[flag]
        flag = flag + 1

    for w in range(max(trimmedTimes) - 1):

        data[w + 1] += data[w]

    for i in outliers:
        for w in range(len(data)):
            data[w] += i

    return data
开发者ID:qazwsxcde125,项目名称:DaemonDashFall2015-1,代码行数:29,代码来源:dataAnalysis.py


示例19: main

def main(total_rolls=20000):
    rolls_list = rolls(total_rolls, 1, 6)
    sliced_sum20 = sliced_sums(20, rolls_list)
    sums20 = sums(sliced_sum20, -20)
    roll_count20 = lens(sliced_sum20)
    sliced_sum10k = sliced_sums(10000, rolls_list)
    sums10k = sums(sliced_sum10k, -10000)
    roll_count10k = lens(sliced_sum10k)
    paired_sums = [(20, sums20), (10000, sums10k)]
    paired_rolls = [(20, roll_count20), (10000, roll_count10k)]

    answers("Mean of the sum - {0} when M is {0}:",
            paired_sums, lambda s: statistics.mean(s))
    answers("Mean of the number of rolls when M is {0}:",
            paired_rolls, lambda s: statistics.mean(s))
    answers("Standard deviation of the sum - {0} when M is {0}:",
            paired_sums, lambda s: statistics.stdev(s))
    answers("Standard deviation of the number of rolls when M is {0}:",
            paired_rolls, lambda s: statistics.stdev(s))
    answers("\nView of the rolls summing to {0}\n" +
            format("Count", ">7") + " " + format("Sum", ">7") + " Rolls\n",
            [(20, sliced_sum20), (10000, sliced_sum10k)],
            lambda ss: ''.join(
                format(len(s[1]), ">7") + " " + format(s[0], ">7") + " " +
                format(s[1]) + "\n" for s in ss)
            , sep=''
            )
开发者ID:dlamblin,项目名称:NYC-taxi-Data-Incubator-challenge,代码行数:27,代码来源:rolls.py


示例20: run_simulation

def run_simulation(init_duration, init_stake, samples, player):
    """ Run simulation, print the result to stdout

    """
    wheel = create_wheel()
    table = Table(wheel)
    game = RouletteGame(wheel, table)
    simulator = Simulator(game, player,
                          init_duration=init_duration, samples=samples,
                          init_stake=init_stake)
    simulator.gather()
    durations = simulator.durations
    maxima = simulator.maxima
    print(player)
    print()
    print("Durations")
    print("  min :", min(durations))
    print("  max :", max(durations))
    print("  mean: %.2f" % statistics.mean(durations))
    print("  dev : %.2f" % statistics.stdev(durations))
    print("Maxima")
    print("  min :", min(maxima))
    print("  max :", max(maxima))
    print("  mean: %.2f" % statistics.mean(maxima))
    print("  dev : %.2f" % statistics.stdev(maxima))
开发者ID:yannicklm,项目名称:pyroulette,代码行数:25,代码来源:simulator.py



注:本文中的statistics.mean函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python statistics.median函数代码示例发布时间:2022-05-27
下一篇:
Python eveapihandler.EveAPIHandler类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap