本文整理汇总了Python中simulate.simulate函数的典型用法代码示例。如果您正苦于以下问题:Python simulate函数的具体用法?Python simulate怎么用?Python simulate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了simulate函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: get_simulated_runtime
def get_simulated_runtime(self, waterfall_prefix=""):
""" Returns the simulated runtime for the job.
This should be approximately the same as the original runtime of the job, except
that it doesn't include scheduler delay.
If a non-empty waterfall_prefix is passed in, makes a waterfall plot based on the simulated
runtimes.
"""
total_runtime = 0
tasks_for_combined_stages = []
all_start_finish_times = []
for id, stage in self.stages.iteritems():
if id in self.stages_to_combine:
tasks_for_combined_stages.extend(stage.tasks)
else:
tasks = sorted(stage.tasks, key = lambda task: task.start_time)
simulated_runtime, start_finish_times = simulate.simulate(
[t.runtime() for t in tasks], concurrency.get_max_concurrency(tasks))
start_finish_times_adjusted = [
(start + total_runtime, finish + total_runtime) for start, finish in start_finish_times]
all_start_finish_times.append(start_finish_times_adjusted)
total_runtime += simulated_runtime
if len(tasks_for_combined_stages) > 0:
tasks = sorted(tasks_for_combined_stages, key = lambda task: task.start_time)
simulated_runtime, start_finish_times = simulate.simulate(
[task.runtime() for task in tasks], self.combined_stages_concurrency)
start_finish_times_adjusted = [
(start - simulated_runtime, finish - simulated_runtime) for start, finish in start_finish_times]
all_start_finish_times.append(start_finish_times_adjusted)
total_runtime += simulated_runtime
if waterfall_prefix:
self.write_simulated_waterfall(all_start_finish_times, "%s_simulated" % waterfall_prefix)
return total_runtime
开发者ID:pxgao,项目名称:trace-analysis,代码行数:35,代码来源:parse_logs.py
示例2: median_progress_rate_speedup
def median_progress_rate_speedup(self, prefix):
""" Returns how fast the job would have run if all tasks had the median progress rate. """
total_median_progress_rate_runtime = 0
runtimes_for_combined_stages = []
all_start_finish_times = []
for id, stage in self.stages.iteritems():
median_rate_runtimes = stage.task_runtimes_with_median_progress_rate()
if id in self.stages_to_combine:
runtimes_for_combined_stages.extend(median_rate_runtimes)
else:
no_stragglers_runtime, start_finish_times = simulate.simulate(
median_rate_runtimes, concurrency.get_max_concurrency(stage.tasks))
start_finish_times_adjusted = [
(start + total_median_progress_rate_runtime, finish + total_median_progress_rate_runtime) \
for start, finish in start_finish_times]
total_median_progress_rate_runtime += no_stragglers_runtime
all_start_finish_times.append(start_finish_times_adjusted)
if len(runtimes_for_combined_stages) > 0:
no_stragglers_runtime, start_finish_times = simulate.simulate(
runtimes_for_combined_stages, self.combined_stages_concurrency)
start_finish_times_adjusted = [
(start + total_median_progress_rate_runtime, finish + total_median_progress_rate_runtime) \
for start, finish in start_finish_times]
total_median_progress_rate_runtime += no_stragglers_runtime
all_start_finish_times.append(start_finish_times_adjusted)
self.write_simulated_waterfall(all_start_finish_times, "%s_sim_median_progress_rate" % prefix)
return total_median_progress_rate_runtime * 1.0 / self.get_simulated_runtime()
开发者ID:pxgao,项目名称:trace-analysis,代码行数:29,代码来源:parse_logs.py
示例3: replace_95_stragglers_with_median_speedup
def replace_95_stragglers_with_median_speedup(self):
""" Returns how much faster the job would have run if there were no stragglers.
Removes stragglers by replacing the longest 5% of tasks with the median runtime
for tasks in the stage.
"""
total_no_stragglers_runtime = 0
runtimes_for_combined_stages = []
for id, stage in self.stages.iteritems():
runtimes = [task.runtime() for task in stage.tasks]
runtimes.sort()
median_runtime = get_percentile(runtimes, 0.5)
threshold_runtime = get_percentile(runtimes, 0.95)
no_straggler_runtimes = []
for runtime in runtimes:
if runtime >= threshold_runtime:
no_straggler_runtimes.append(median_runtime)
else:
no_straggler_runtimes.append(runtime)
if id in self.stages_to_combine:
runtimes_for_combined_stages.extend(no_straggler_runtimes)
else:
no_stragglers_runtime = simulate.simulate(no_straggler_runtimes)[0]
total_no_stragglers_runtime += no_stragglers_runtime
original_runtime = simulate.simulate([task.runtime() for task in stage.tasks])[0]
print "%s: Orig: %s, no stragg: %s" % (id, original_runtime, no_stragglers_runtime)
if len(runtimes_for_combined_stages) > 0:
total_no_stragglers_runtime += simulate.simulate(runtimes_for_combined_stages)[0]
return total_no_stragglers_runtime * 1.0 / self.get_simulated_runtime()
开发者ID:jegonzal,项目名称:trace-analysis,代码行数:29,代码来源:parse_logs.py
示例4: replace_all_tasks_with_average_speedup
def replace_all_tasks_with_average_speedup(self, prefix):
""" Returns how much faster the job would have run if there were no stragglers.
Eliminates stragglers by replacing each task's runtime with the average runtime
for tasks in the job.
"""
self.print_heading("Computing speedup by averaging out stragglers")
total_no_stragglers_runtime = 0
averaged_runtimes_for_combined_stages = []
all_start_finish_times = []
for id, stage in self.stages.iteritems():
averaged_runtimes = [stage.average_task_runtime()] * len(stage.tasks)
if id in self.stages_to_combine:
averaged_runtimes_for_combined_stages.extend(averaged_runtimes)
else:
no_stragglers_runtime, start_finish_times = simulate.simulate(averaged_runtimes)
# Adjust the start and finish times based on when the stage staged.
start_finish_times_adjusted = [
(start + total_no_stragglers_runtime, finish + total_no_stragglers_runtime) \
for start, finish in start_finish_times]
total_no_stragglers_runtime += no_stragglers_runtime
all_start_finish_times.append(start_finish_times_adjusted)
if len(averaged_runtimes_for_combined_stages) > 0:
no_stragglers_runtime, start_finish_times = simulate.simulate(
averaged_runtimes_for_combined_stages)
# Adjust the start and finish times based on when the stage staged.
# The subtraction is a hack to put the combined stages at the beginning, which
# is when they usually occur.
start_finish_times_adjusted = [
(start - no_stragglers_runtime, finish - no_stragglers_runtime) for start, finish in start_finish_times]
total_no_stragglers_runtime += no_stragglers_runtime
all_start_finish_times.append(start_finish_times_adjusted)
self.write_simulated_waterfall(all_start_finish_times, "%s_sim_no_stragglers" % prefix)
return total_no_stragglers_runtime * 1.0 / self.get_simulated_runtime()
开发者ID:shivaram,项目名称:trace-analysis,代码行数:35,代码来源:parse_logs.py
示例5: replace_stragglers_with_median_speedup
def replace_stragglers_with_median_speedup(self, threshold_fn):
""" Returns how much faster the job would have run if there were no stragglers.
For each stage, passes the list of task runtimes into threshold_fn, which should
return a threshold runtime. Then, replaces all task runtimes greater than the given
threshold with the median runtime.
For example, to replace the tasks with the longest 5% of runtimes with the median:
self.replace_stragglers_with_median_speedup(lambda runtimes: numpy.percentile(runtimes, 95)
"""
self.print_heading("Computing speedup from replacing straggler tasks with median")
total_no_stragglers_runtime = 0
start_and_runtimes_for_combined_stages = []
original_start_and_runtimes_for_combined_stages = []
num_stragglers_combined_stages = 0
for id, stage in self.stages.iteritems():
runtimes = [task.runtime() for task in stage.tasks]
median_runtime = numpy.percentile(runtimes, 50)
threshold_runtime = threshold_fn(runtimes)
no_straggler_start_and_runtimes = []
num_stragglers = 0
sorted_stage_tasks = sorted(stage.tasks, key = lambda t: t.runtime())
for task in sorted_stage_tasks:
if task.runtime() >= threshold_runtime:
assert(median_runtime <= task.runtime())
no_straggler_start_and_runtimes.append((task.start_time, median_runtime))
num_stragglers += 1
else:
no_straggler_start_and_runtimes.append((task.start_time, task.runtime()))
if id in self.stages_to_combine:
start_and_runtimes_for_combined_stages.extend(no_straggler_start_and_runtimes)
original_start_and_runtimes_for_combined_stages.extend(
[(t.start_time, t.runtime()) for t in stage.tasks])
num_stragglers_combined_stages += num_stragglers
else:
max_concurrency = concurrency.get_max_concurrency(stage.tasks)
no_stragglers_runtime = simulate.simulate(
[x[1] for x in no_straggler_start_and_runtimes], max_concurrency)[0]
total_no_stragglers_runtime += no_stragglers_runtime
original_runtime = simulate.simulate(
[task.runtime() for task in sorted_stage_tasks], max_concurrency)[0]
print ("%s: Original: %s, Orig (sim): %s, no stragg: %s (%s stragglers)" %
(id, stage.finish_time() - stage.start_time, original_runtime, no_stragglers_runtime,
num_stragglers))
if len(start_and_runtimes_for_combined_stages) > 0:
original_start_time = min([x[0] for x in start_and_runtimes_for_combined_stages])
original_finish_time = max([x[0] + x[1] for x in start_and_runtimes_for_combined_stages])
start_and_runtimes_for_combined_stages.sort()
runtimes_for_combined_stages = [x[1] for x in start_and_runtimes_for_combined_stages]
new_runtime = simulate.simulate(
runtimes_for_combined_stages, self.combined_stages_concurrency)[0]
original_runtime = simulate.simulate(
[x[1] for x in sorted(original_start_and_runtimes_for_combined_stages)],
self.combined_stages_concurrency)[0]
print ("Combined: Original: %s, Orig (sim): %s, no stragg: %s (%s stragglers)" %
(original_finish_time - original_start_time, original_runtime, new_runtime,
num_stragglers_combined_stages))
total_no_stragglers_runtime += new_runtime
return total_no_stragglers_runtime * 1.0 / self.get_simulated_runtime()
开发者ID:pxgao,项目名称:trace-analysis,代码行数:59,代码来源:parse_logs.py
示例6: median_progress_rate_speedup
def median_progress_rate_speedup(self):
""" Returns how fast the job would have run if all tasks had the median progress rate. """
total_median_progress_rate_runtime = 0
runtimes_for_combined_stages = []
for id, stage in self.stages.iteritems():
median_rate_runtimes = stage.task_runtimes_with_median_progress_rate()
if id in self.stages_to_combine:
runtimes_for_combined_stages.extend(median_rate_runtimes)
else:
total_median_progress_rate_runtime += simulate.simulate(median_rate_runtimes)[0]
if len(runtimes_for_combined_stages) > 0:
total_median_progress_rate_runtime += simulate.simulate(runtimes_for_combined_stages)[0]
return total_median_progress_rate_runtime * 1.0 / self.get_simulated_runtime()
开发者ID:jegonzal,项目名称:trace-analysis,代码行数:13,代码来源:parse_logs.py
示例7: add_tasks_to_totals
def add_tasks_to_totals(unsorted_tasks):
# Sort the tasks by the start time, not the finish time -- otherwise the longest tasks
# end up getting run last, which can artificially inflate job completion time.
tasks = sorted(unsorted_tasks, key=lambda task: task.start_time)
# Get the runtime for the stage
task_runtimes = [compute_base_runtime(task) for task in tasks]
base_runtime = simulate.simulate(task_runtimes)[0]
total_time[0] += base_runtime
faster_runtimes = [compute_faster_runtime(task) for task in tasks]
faster_runtime = simulate.simulate(faster_runtimes)[0]
total_faster_time[0] += faster_runtime
print "Base: %s, faster: %s" % (base_runtime, faster_runtime)
开发者ID:jegonzal,项目名称:trace-analysis,代码行数:14,代码来源:parse_logs.py
示例8: test_overshoot
def test_overshoot(self):
'''Test if the overshoot is less than 1.0e-2.
'''
T, X = simulate.simulate(t0=0.0, t1=2.0, dt=1.0e-2)
# Make sure that X is damped below 1.0e-3 at the last iteration.
self.assertLess(max(X), 1.0e-2)
return
开发者ID:emecercelik,项目名称:python-unittest,代码行数:7,代码来源:tests.py
示例9: replace_stragglers_with_median_speedup
def replace_stragglers_with_median_speedup(self):
""" Returns how much faster the job would have run if there were no stragglers.
Removes stragglers by replacing all task runtimes with the median runtime for tasks in the
stage.
"""
total_no_stragglers_runtime = 0
runtimes_for_combined_stages = []
for id, stage in self.stages.iteritems():
runtimes = [task.runtime() for task in stage.tasks]
median_runtime = numpy.median(runtimes)
no_straggler_runtimes = [numpy.median(runtimes)] * len(stage.tasks)
if id in self.stages_to_combine:
runtimes_for_combined_stages.extend(no_straggler_runtimes)
else:
total_no_stragglers_runtime += simulate.simulate(no_straggler_runtimes)[0]
if len(runtimes_for_combined_stages) > 0:
total_no_stragglers_runtime += simulate.simulate(runtimes_for_combined_stages)[0]
return total_no_stragglers_runtime * 1.0 / self.get_simulated_runtime()
开发者ID:jegonzal,项目名称:trace-analysis,代码行数:19,代码来源:parse_logs.py
示例10: runTest
def runTest(self):
""" Test if simulation algorithm works correctly """
results, _ = readFromFile('inputs/testSimulation.dat')
seed(1)
events, stats = simulate(results, False, True)
for i, event in enumerate(events):
self.failUnless(event == testEvents[i], 'Simulation do not match: %s' % event)
for i, stat in enumerate(stats):
self.failUnless(stat == testStats[i], 'Statistics do not match: %s' % stat)
开发者ID:Dirzys,项目名称:ComputerScienceLargePractical,代码行数:13,代码来源:unitTest.py
示例11: process
def process():
parser = ArgumentParser(
description="Simulate the flocking behaviour of a number of birds. When no configuration file is given the simulation will run with default parameters and an example configuration file (containing the default parameters) will be saved in the current directory.")
parser.add_argument("--file", "-f",
help="The configuration file, in yaml format")
arguments = parser.parse_args()
if arguments.file:
params = yaml.load(open(arguments.file))
else:
params = yaml.load(
open(
os.path.join(
os.path.dirname(__file__),
'params.yaml')))
with open('example_config.yaml', "w") as f:
f.write(yaml.dump(params))
flock_params = params["flock_params"]
boid_params = params["boid_params"]
anim_params = params["anim_params"]
flock = Flock(flock_params, boid_params)
simulate(anim_params, flock)
开发者ID:MikeVasmer,项目名称:bad-boids,代码行数:24,代码来源:command.py
示例12: attempt
def attempt(memory, input_string, ht):
collisions = 0
output = simulate.simulate(memory, str(input_string))
#We've seen this output before
if output in ht:
#We have NOT seen this input string before
if input_string not in ht[output]:
#We have a collision
ht[output][input_string] = True
collisions += 1
#Haven't seen the output before. Just set up the dictionary.
else:
ht[output] = {}
return collisions
开发者ID:mallyvai,项目名称:hashbash,代码行数:17,代码来源:fitness.py
示例13: run_simulation
def run_simulation(symbol):
query_params = request.args
print query_params.get('startdate')
print query_params.get('enddate')
trim_start = query_params.get('startdate') or '2015-11-01'
trim_end = query_params.get('enddate') or '2016-11-01'
prices = get_prices([symbol], trim_start=trim_start, trim_end=trim_end)
prices = prices[symbol]
signal_crosses, simulation, earnings = simulate(prices)
dailies = prices
for timestamp in dailies.keys():
dailies[timestamp] = {
'price': prices[timestamp],
'signal': signal_crosses[timestamp],
'shares': simulation[timestamp]['shares'],
'cash_on_hand': simulation[timestamp]['cash_on_hand']
}
dailies = SortedDict(dailies)
return json.dumps({'earnings': earnings, 'dailies': dailies})
开发者ID:jeremyjs,项目名称:stockoverflow,代码行数:19,代码来源:app.py
示例14: get_badness
def get_badness(self, p, method, debug):
# this is the value that will accumulate the deviation from the measurements
badness_total = 0.0
badnesses = []
# for all 48x4 possible setups
for typeid in range(self.strain_count):
badness = 0.0
# only simulate the cloneable arrangements
if self.valids[typeid] == True:
for iptgatc in range(4):
# get the parameters for the simulation via teh ruleset
params = self.apply_ruleset(p, typeid, iptgatc)
# get the simualted yfp levels
yfps = np.array(simulate(params))
# get the actual measurements for comparison
measurements = self.data[typeid][iptgatc]
# comute the quadratic difference and add it to the badness
#print(yfps)
#print(measurements)
if method == 0:
badness += np.sum((yfps-measurements)**2)
elif method == 1:
yfps = np.maximum(yfps, np.add(np.zeros(4), 0.000001))
badness += np.sum(np.abs(np.log10(yfps) - np.log10(measurements)))
elif method == 2:
badness += np.sum(abs(yfps-measurements))
elif method == 3:
yfps = np.maximum(yfps, np.add(np.zeros(4), 0.000001))
badness += np.sum(np.exp(np.abs(np.log10(yfps) - np.log10(measurements))))
badness_total += badness
if debug >= 2:
print("%s: %f" % (self.types[typeid], badness))
if debug >= 3:
badnesses.append(badness)
if debug >= 3:
return badness_total, badnesses
return badness_total
开发者ID:geoo89,项目名称:genetic-network,代码行数:38,代码来源:optimize.py
示例15: range
import random
shiftrange = range(1, 20)
ws = access.wsmake(mem=range(10000), rand=random, size=5)
alist = []
for i in range(500000):
alist.append(access.access(range(10000), ws, random, 0.95))
if not i % 10:
access.wsmove(range(10000), ws, random)
if not i % 1000:
print "alist: " + str(i)
ratios = []
for i in shiftrange:
mms = algorithms.Aging(6, bits=4, shift=i) # Instantiate.
ratios.append((i, simulate.simulate(mms, alist)))
print "Shifting frequency: " + str(i)
g = Gnuplot.Gnuplot()
g('set data style points')
g('set yrange[0:]')
g('set terminal epslatex monochrome')
g('set output "diagram2.eps"')
g('set xlabel "shifting frequency in read-instructions per shift"')
g('set ylabel "ratio of page faults to accesses"')
g.plot(Gnuplot.Data(ratios, inline=1))
print "\nNow move diagram2.* to ../"
开发者ID:david78k,项目名称:pagereplacement,代码行数:30,代码来源:makediagram2.py
示例16: range
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
N=500
T=100
Gamma,Y,Shocks,y = {},{},{},{}
Gamma[0] = np.zeros((N,3)) #initialize 100 agents at m = 1 for testing purposes
Gamma[0][:,0] = np.zeros(N)
v = simulate.v
v.execute('import calibrate_begs_id_nu as Para')
v.execute('import approximate_begs as approximate')
v.execute('approximate.calibrate(Para)')
simulate.simulate(Para,Gamma,Y,Shocks,y,T)
indx_y,indx_Y,indx_Gamma=Para.indx_y,Para.indx_Y,Para.indx_Gamma
mu,output,g,assets,bar_g,simulation_data={},{},{},{},{},{}
for t in range(T-1):
if np.shape(y[t])[1]<10:
y[t]=np.hstack((y[t][:,0:1],np.atleast_2d(Shocks[0]).reshape(N,1),y[t][:,1:]))
output[t]=np.atleast_2d(y[t][:,indx_y['l']]*np.exp(y[t][:,indx_y['e']])).reshape(N,1)
mu[t]=np.exp(y[t][:,indx_y['logm']])*(y[t][:,indx_y['muhat']])
g[t]=(1-mu[t]*(1+Para.gamma))*((y[t][:,indx_y['c']])**(-Para.sigma))
bar_g[t]=np.mean(g[t])
g[t]=g[t]/bar_g[t]
g[t]=np.atleast_2d(g[t]).reshape(N,1)
if t==0:
开发者ID:dgevans,项目名称:IdioApprox,代码行数:31,代码来源:main_atlanta.py
示例17: print
clockStart = time.time()
# Generate simulated data
print('Generation of the simulated data generation with 2 classes')
eventNb = 5
keyframeNb = 3
sampleNb = 1000
classNb = 2
dataFolder = './gen_data/'
trainFile = 'train.txt'
trainLabels = 'trainLabels.txt'
testFile = 'test.txt'
testLabels = 'testLabels.txt'
if not os.path.isdir(dataFolder) :
os.makedirs(dataFolder)
(targetSeq1Tr, targetSeq2Tr) = simulate(eventNb, keyframeNb, sampleNb, dataFolder + trainFile, dataFolder + trainLabels, classNb)
(targetSeq1Te, targetSeq2Te) = simulate(eventNb, keyframeNb, sampleNb, dataFolder + testFile, dataFolder + testLabels, classNb + 1, (targetSeq1Tr, targetSeq2Tr))
trainTargets = [targetSeq1Tr, targetSeq2Tr]
print('Target sequences of')
print('class 1 = ' + str(trainTargets[0]))
print('class 2 = ' + str(trainTargets[1]))
print('\nBuilding and Training the 2 graphs (2 classes example)')
# Build graphs for each class with random edges
vertexNatures = {'unlinkedLeaf' : -1, 'while' : 0, 'before' : 1, 'whileNot' : 2, 'leaf' : 3}
commands = {'stop' : -1, 'swap' : 0, 'pivot' : 1}
argsCard = {commands['swap'] : 4, commands['pivot'] : 3, commands['stop'] : 0}
targetSeq = []
valuesPerClass = []
graphs = []
开发者ID:CamilleEscher,项目名称:VideoRetrieval,代码行数:31,代码来源:main.py
示例18: range
return [5] + range(10, x + 10, 10)
FIVE_TO_FORTY = five_to_x(40)
FIVE_TO_SIXTY = five_to_x(60)
PARAMETER_GROUPS = [
# First group: both e1 & e2 are 5, 10 to 40
(FIVE_TO_FORTY, FIVE_TO_FORTY),
# Second group: e1 = 1 to 5, e2 = 5, 10 to 60
# (range(1, 6), FIVE_TO_SIXTY)
]
if __name__ == '__main__':
if len(argv) == 1:
print('No output path specified')
exit(1)
output_path = argv[1]
prepare_chromosomes(BLOCKS_PER_GENOME)
random.seed()
for i, group in enumerate(PARAMETER_GROUPS):
e1s, e2s = group
for e1 in e1s:
for e2 in e2s:
if not path.exists(output_path):
makedirs(output_path)
config = '_'.join([str(i), str(e1), str(e2)])
group_e1_e2_folder_path = path.join(output_path, config)
simulate(e1, e2, BLOCKS_PER_GENOME, CHROMOSOME_NUMBER, CircularChromosome, SIMULATIONS_PER_SETUP,
group_e1_e2_folder_path)
print('Finished {0}'.format(config))
开发者ID:nkartashov,项目名称:4genome_simulator,代码行数:31,代码来源:generate_gasts_datasets.py
示例19: simulate
elif index == Tstep_i:
Tstep = testlst[test]
elif index == delay_i:
delay = testlst[test]
elif index == beta_i:
beta = testlst[test]
elif index == PIinner0_i:
PIinner[0] = testlst[test]
elif index == PIinner1_i:
PIinner[1] = testlst[test]
elif index == PIouter0_i:
PIouter[0] = testlst[test]
elif index == PIouter1_i:
PIouter[1] = testlst[test]
#Do the simulation
data,state_in_time,tlst = simulate(ti,tlst,Tstep,mode,state,data,state_in_time,tmax,zmax,zland,covset,vwind,delay,beta,Kstep,PIinner,PIouter,emax,f_window,noise)
#Append K and x on instability point
if mode == 3:
Kcritlst.append(data[iKx][-int(delay/Tstep)])
elif mode == 4:
Kcritlst.append(data[iKy][-int(delay/Tstep)])
xcritlst.append(state_in_time[-int(delay/Tstep)][ix])
#Make model
slope,intercept,r,pvalue,std = sst.linregress(Kcritlst,xcritlst)
#Add the models and corresponding test
#[testparameter, value of testparameter, slope, intercept, std]
output = [index, testlst[test], slope, intercept, std]
outputs = np.vstack([outputs,output])
print "Done: index = ",index,"test = ",testlst[test],"Percent:",100.*(len(testlst)*index+test+1)/(len(testpars)*len(testlst)),"%"
#Results of running with mode = 3 [covset,m, Kstep,Tstep,delay,beta]
开发者ID:martinjanssens,项目名称:stabdist,代码行数:31,代码来源:distance.py
示例20: range
ratios = []
for j in range(1, 5):
#for j in range(1, 61):
mms = i(physicalpages) # Instantiate with the number of totalpages of physical memory.
mms.shift = 200 # for Aging and NRU algorithms
mms.firstbit = 1 << 7 # for Aging algorithm
# create a working set from virtual page numbers ranging 0 to 9999
ws = access.wsmake(mem=range(virtualpages), rand=random, size=j)
# create a access list
mms.alist = access.makealist(range(virtualpages), ws, random, 0.95, accesscount)
# simulate accessing the physical memory to read pages in the access list
# and append the ratio of page faults
ratios.append((j, simulate.simulate(mms, mms.alist)))
print i.__name__ + ": " + str(j)
d.append(Gnuplot.Data(ratios, title=i.__name__, inline=1))
g = Gnuplot.Gnuplot()
#g('set data style lines')
#g('set style data lines')
g('set style data linespoints')
g('set yrange [0:]')
g('set terminal epslatex monochrome')
g('set output "diagram1.eps"')
g('set xlabel "working set size in pages"')
g('set ylabel "ratio of page faults to accesses"')
g.plot(*d)
开发者ID:david78k,项目名称:pagereplacement,代码行数:31,代码来源:diagram1.py
注:本文中的simulate.simulate函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论