本文整理汇总了Python中nose.tools.assert_less函数的典型用法代码示例。如果您正苦于以下问题:Python assert_less函数的具体用法?Python assert_less怎么用?Python assert_less使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assert_less函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_read_write_project
def test_read_write_project():
sample_song_compressed = os.path.join(
SCRIPT_DIR, "test_data", "sample_song_compressed.json")
with open(sample_song_compressed, "r") as fp:
song_data_compressed = json.load(fp)
song_data = filepack.decompress(song_data_compressed)
song_name = "UNTOLDST"
song_version = 23
# 0xbadf00d for size in blocks is synthetic, since we don't really care
# about its size for the purposes of this test
bogus_size_blks = 0xbadf00d
proj = Project(
song_name, song_version, bogus_size_blks, song_data)
assert_equal(proj.name, song_name)
assert_equal(proj.version, song_version)
raw_data = proj.get_raw_data()
recompressed = filepack.compress(raw_data)
assert_less(math.fabs(len(recompressed) - len(song_data_compressed)), 512)
# Do comparison based on parsed object, since the actual input data can
# contain noise
proj_from_raw_data = Project(
song_name, song_version, bogus_size_blks, raw_data)
assert_equal(proj_from_raw_data._song_data, proj._song_data)
开发者ID:alexras,项目名称:pylsdj,代码行数:33,代码来源:test_project.py
示例2: test_score_none
def test_score_none(root, encoding, **unused):
with loom.query.get_server(root, debug=True) as server:
preql = loom.preql.PreQL(server, encoding)
fnames = preql.feature_names
assert_less(
abs(server.score([None for _ in fnames])),
SCORE_TOLERANCE)
开发者ID:fritzo,项目名称:loom,代码行数:7,代码来源:test_query_math.py
示例3: test_info_serialliar
def test_info_serialliar():
fib_tracker = [0]
inspector.info(SerialLiar(fib_tracker))
# Nested attribute access should be cut off at 100 levels deep to avoid
# infinite loops: https://github.com/ipython/ipython/issues/9122
nt.assert_less(fib_tracker[0], 9000)
开发者ID:PKpacheco,项目名称:monitor-dollar-value-galicia,代码行数:7,代码来源:test_oinspect.py
示例4: test_simple_stochastic_synapse
def test_simple_stochastic_synapse(sim, plot_figure=False):
# in this test we connect
sim.setup(min_delay=0.5)
t_stop = 1000.0
spike_times = np.arange(2.5, t_stop, 5.0)
source = sim.Population(1, sim.SpikeSourceArray(spike_times=spike_times))
neurons = sim.Population(4, sim.IF_cond_exp(tau_syn_E=1.0))
synapse_type = sim.SimpleStochasticSynapse(weight=0.5,
p=np.array([[0.0, 0.5, 0.5, 1.0]]))
connections = sim.Projection(source, neurons, sim.AllToAllConnector(),
synapse_type=synapse_type)
source.record('spikes')
neurons.record('gsyn_exc')
sim.run(t_stop)
data = neurons.get_data().segments[0]
gsyn = data.analogsignals[0].rescale('uS')
if plot_figure:
import matplotlib.pyplot as plt
for i in range(neurons.size):
plt.subplot(neurons.size, 1, i+1)
plt.plot(gsyn.times, gsyn[:, i])
plt.savefig("test_simple_stochastic_synapse_%s.png" % sim.__name__)
print(data.analogsignals[0].units)
crossings = []
for i in range(neurons.size):
crossings.append(
gsyn.times[:-1][np.logical_and(gsyn.magnitude[:-1, i] < 0.4, 0.4 < gsyn.magnitude[1:, i])])
assert_equal(crossings[0].size, 0)
assert_less(crossings[1].size, 0.6*spike_times.size)
assert_greater(crossings[1].size, 0.4*spike_times.size)
assert_equal(crossings[3].size, spike_times.size)
assert_not_equal(crossings[1], crossings[2])
print(crossings[1].size / spike_times.size)
return data
开发者ID:antolikjan,项目名称:PyNN,代码行数:35,代码来源:test_synapse_types.py
示例5: test_anim_concordance
def test_anim_concordance(self):
"""ANIm results concordant with JSpecies."""
# Perform ANIm on the input directory contents
# We have to separate nucmer/delta-filter command generation
# because Travis-CI doesn't play nicely with changes we made
# for local SGE/OGE integration.
# This might be avoidable with a scheduler flag passed to
# jobgroup generation in the anim.py module. That's a TODO.
ncmds, fcmds = anim.generate_nucmer_commands(self.infiles, self.outdir)
run_mp.multiprocessing_run(ncmds)
# delta-filter commands need to be treated with care for
# Travis-CI. Our cluster won't take redirection or semicolon
# separation in individual commands, but the wrapper we wrote
# for this (delta_filter_wrapper.py) can't be called under
# Travis-CI. So we must deconstruct the commands below
dfcmds = [
" > ".join([" ".join(fcmd.split()[1:-1]), fcmd.split()[-1]])
for fcmd in fcmds
]
run_mp.multiprocessing_run(dfcmds)
results = anim.process_deltadir(self.deltadir, self.orglengths)
result_pid = results.percentage_identity
result_pid.to_csv(os.path.join(self.outdir, "pyani_anim.tab"), sep="\t")
# Compare JSpecies output to results
result_pid = result_pid.sort_index(axis=0).sort_index(axis=1) * 100.0
diffmat = result_pid.values - self.target["ANIm"].values
anim_diff = pd.DataFrame(
diffmat, index=result_pid.index, columns=result_pid.columns
)
anim_diff.to_csv(os.path.join(self.outdir, "pyani_anim_diff.tab"), sep="\t")
assert_less(anim_diff.abs().values.max(), self.tolerance["ANIm"])
开发者ID:HuttonICS,项目名称:pyani,代码行数:34,代码来源:test_concordance.py
示例6: check_updates_node
def check_updates_node(constructor):
np.random.seed(42)
x = np.random.randn(5).astype(fX)
w = np.random.randn(5) * 20
b = np.random.randn() + 35
y = (np.dot(x, w) + b).astype(fX)[np.newaxis]
graph = hth.RootGraph(
[hth.AddSequential("model",
nodes=[hth.InputNode("x", shape=(5,)),
hth.AffineNode("pred", num_units=1)]),
hth.AddOne("target",
node=hth.InputNode("y", shape=(1,))),
hth.AddOne("cost",
node=hth.SquaredErrorCostNode("mse"),
inputs={"target": ("target", "y"),
"pred": ("model", "pred")}),
hth.AddOne("updates",
node=constructor("node"),
inputs={"cost": ("cost", "mse")}),
hth.AddOne("fn",
node=hth.TheanoDictFunctionDSLNode(
"dsl",
inputs={"x": ("model", "x"),
"y": ("target", "y")},
outputs={"cost": ("cost", "mse")},
updates_path=("updates", "node"))),
hth.UseDatumCallFunction("call", node_path=("fn", "dsl"))])
prev_cost = graph(in_dict={"x": x, "y": y})["cost"]
for _ in range(5):
cost = graph(in_dict={"x": x, "y": y})["cost"]
print prev_cost, cost
nt.assert_less(cost, prev_cost)
prev_cost = cost
开发者ID:diogo149,项目名称:hooky,代码行数:35,代码来源:test_utils.py
示例7: _get_example_window
def _get_example_window(self, row, col):
'''
Returns a model-input-sized subwindow of self.examples_pixels.
Parameters
----------
row, col: int
The grid row and column of the subwindow to extract.
'''
# input_format = self.model.input_node.output_format
# input_shape = numpy.asarray(
# [input_format.shape[input_format.axes.index(a)]
# for a in ('0', '1')])
input_shape = self.original_image_size
gutter = 10 # space between images in self.status_pixels
row_col = numpy.asarray([row, col])
min_corner = gutter + (input_shape + gutter) * row_col
max_corner = min_corner + input_shape
assert_less(max_corner[0], self.examples_pixels.shape[0])
assert_less(max_corner[1], self.examples_pixels.shape[1])
return self.examples_pixels[min_corner[0]:max_corner[0],
min_corner[1]:max_corner[1],
:]
开发者ID:SuperElectric,项目名称:poselearn,代码行数:25,代码来源:video_demo_elev_azim.py
示例8: test_center
def test_center():
assert_true(not RE._run_is_open)
det = SynGauss('det', motor, 'motor', 0, 1000, 1, 'poisson', True)
d = {}
cen = Center([det], 'det', motor, 0.1, 1.1, 0.01, d)
RE(cen)
assert_less(abs(d['center']), 0.1)
开发者ID:klauer,项目名称:bluesky,代码行数:7,代码来源:test_scans.py
示例9: test_agent_exit
def test_agent_exit(db, fname, backend):
r = root_metrics.agent_exit(db=db)
obs = r()
if obs is None:
return
assert_less(0, len(obs))
assert_equal('AgentExit', r.name)
开发者ID:cyclus,项目名称:cymetric,代码行数:7,代码来源:test_root_metrics.py
示例10: test_decom_schedule
def test_decom_schedule(db, fname, backend):
r = root_metrics.decom_schedule(db=db)
obs = r()
if obs is None:
return
assert_less(0, len(obs))
assert_equal('DecomSchedule', r.name)
开发者ID:cyclus,项目名称:cymetric,代码行数:7,代码来源:test_root_metrics.py
示例11: test_products
def test_products(db, fname, backend):
r = root_metrics.products(db=db)
obs = r()
if obs is None:
return
assert_less(0, len(obs))
assert_equal('Products', r.name)
开发者ID:cyclus,项目名称:cymetric,代码行数:7,代码来源:test_root_metrics.py
示例12: test_compressed_encrypt_then_decrypt_string
def test_compressed_encrypt_then_decrypt_string():
plaintext = "X" * 4096
key = "this is my key"
ciphertext = crypto.encrypt_string(plaintext, key, compress=True)
assert_less(len(ciphertext), len(plaintext) / 10)
plaintext_after = crypto.decrypt_string(ciphertext, key)
assert_equal(plaintext, plaintext_after)
开发者ID:asimihsan,项目名称:crypto_example,代码行数:7,代码来源:test_crypto.py
示例13: test_within_avg
def test_within_avg():
args = Namespace(**args_dict)
avg_test_val = 3
args.v_avg = avg_test_val
roster = run(NFL, args)
for player in roster.players:
ntools.assert_less(abs(player.v_avg), avg_test_val)
开发者ID:imdone,项目名称:draft-kings-fun,代码行数:7,代码来源:test_command_line.py
示例14: check_amici
def check_amici(brief, expectation):
def standardize(nonstandard:str) -> str:
return re.sub(r'[ .]', '', nonstandard.strip().lower())
observation = list(f.amici(brief))
for expected_amicus in expectation:
for observed_amicus in observation:
if standardize(expected_amicus) in standardize(observed_amicus):
break
else:
msg = '''The expected amicus "%s" should be found in the following brief.
%s
But it is not among the following values output from the amici function.
%s'''
raise AssertionError(msg % (expected_amicus, brief, pformat(observation)))
lengths = (len(observation), len(expectation))
if len(observation) < len(expectation): # - 1: # Failures at non-Oxford comma are okay.
msg = 'The amici were not broken up enough; only %d amici were reported, but there are supposed to be %d:\n' + '\n* '.join(observation)
raise AssertionError(msg % lengths)
if len(observation) > len(expectation) + 1:
msg = 'The amici were too broken up; %d amici were reported, but there are only supposed to be %d:\n' + '\n* '.join(observation)
raise AssertionError(msg % lengths)
for observed_amicus in observation:
n.assert_less(len(observed_amicus), 80, msg = observation)
开发者ID:tlevine,项目名称:friendly_brief,代码行数:28,代码来源:test_read.py
示例15: test_gmlan
def test_gmlan():
p = connect_wo_esp()
if p.legacy:
return
# enable output mode
p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
# enable CAN loopback mode
p.set_can_loopback(True)
p.set_can_speed_kbps(1, SPEED_NORMAL)
p.set_can_speed_kbps(2, SPEED_NORMAL)
p.set_can_speed_kbps(3, SPEED_GMLAN)
# set gmlan on CAN2
for bus in [Panda.GMLAN_CAN2, Panda.GMLAN_CAN3, Panda.GMLAN_CAN2, Panda.GMLAN_CAN3]:
p.set_gmlan(bus)
comp_kbps_gmlan = time_many_sends(p, 3)
assert_greater(comp_kbps_gmlan, 0.8 * SPEED_GMLAN)
assert_less(comp_kbps_gmlan, 1.0 * SPEED_GMLAN)
p.set_gmlan(None)
comp_kbps_normal = time_many_sends(p, bus)
assert_greater(comp_kbps_normal, 0.8 * SPEED_NORMAL)
assert_less(comp_kbps_normal, 1.0 * SPEED_NORMAL)
print("%d: %.2f kbps vs %.2f kbps" % (bus, comp_kbps_gmlan, comp_kbps_normal))
开发者ID:n2aws,项目名称:panda,代码行数:29,代码来源:2_usb_to_can.py
示例16: test_score_counts_is_normalized
def test_score_counts_is_normalized(Model, EXAMPLE, sample_count):
for sample_size in iter_valid_sizes(EXAMPLE, max_size=10):
model = Model()
model.load(EXAMPLE)
if Model.__name__ == 'LowEntropy' and sample_size < model.dataset_size:
print 'WARNING LowEntropy.score_counts normalization is imprecise'
print ' when sample_size < dataset_size'
tol = 0.5
else:
tol = 0.01
probs_dict = {}
for _ in xrange(sample_count):
value = model.sample_assignments(sample_size)
sample = canonicalize(value)
if sample not in probs_dict:
assignments = dict(enumerate(value))
counts = count_assignments(assignments)
prob = math.exp(model.score_counts(counts))
probs_dict[sample] = prob
total = sum(probs_dict.values())
assert_less(abs(total - 1), tol, 'not normalized: {}'.format(total))
开发者ID:datamicroscopes,项目名称:distributions,代码行数:25,代码来源:test_clustering.py
示例17: test_gmlan_bad_toggle
def test_gmlan_bad_toggle():
p = connect_wo_esp()
if p.legacy:
return
# enable output mode
p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
# enable CAN loopback mode
p.set_can_loopback(True)
# GMLAN_CAN2
for bus in [Panda.GMLAN_CAN2, Panda.GMLAN_CAN3]:
p.set_gmlan(bus)
comp_kbps_gmlan = time_many_sends(p, 3)
assert_greater(comp_kbps_gmlan, 0.6 * SPEED_GMLAN)
assert_less(comp_kbps_gmlan, 1.0 * SPEED_GMLAN)
# normal
for bus in [Panda.GMLAN_CAN2, Panda.GMLAN_CAN3]:
p.set_gmlan(None)
comp_kbps_normal = time_many_sends(p, bus)
assert_greater(comp_kbps_normal, 0.6 * SPEED_NORMAL)
assert_less(comp_kbps_normal, 1.0 * SPEED_NORMAL)
开发者ID:n2aws,项目名称:panda,代码行数:25,代码来源:2_usb_to_can.py
示例18: test_aniblastall_concordance
def test_aniblastall_concordance(self):
"""ANIblastall results concordant with JSpecies."""
# Perform ANIblastall on the input directory contents
outdir = os.path.join(self.outdir, "blastall")
os.makedirs(outdir, exist_ok=True)
fragfiles, fraglengths = anib.fragment_fasta_files(
self.infiles, outdir, self.fragsize
)
jobgraph = anib.make_job_graph(
self.infiles, fragfiles, anib.make_blastcmd_builder("ANIblastall", outdir)
)
assert_equal(0, run_mp.run_dependency_graph(jobgraph))
results = anib.process_blast(
outdir, self.orglengths, fraglengths, mode="ANIblastall"
)
result_pid = results.percentage_identity
result_pid.to_csv(os.path.join(self.outdir, "pyani_aniblastall.tab"), sep="\t")
# Compare JSpecies output to results
result_pid = result_pid.sort_index(axis=0).sort_index(axis=1) * 100.0
diffmat = result_pid.values - self.target["ANIb"].values
aniblastall_diff = pd.DataFrame(
diffmat, index=result_pid.index, columns=result_pid.columns
)
aniblastall_diff.to_csv(
os.path.join(self.outdir, "pyani_aniblastall_diff.tab"), sep="\t"
)
assert_less(aniblastall_diff.abs().values.max(), self.tolerance["ANIblastall"])
开发者ID:HuttonICS,项目名称:pyani,代码行数:28,代码来源:test_concordance.py
示例19: test
def test():
year = 2013
reform = landais_piketty_saez.build_reform(base.tax_benefit_system)
scenario = reform.new_scenario().init_single_entity(
axes = [
dict(
count = 10,
max = 30000,
min = 0,
name = 'sali',
),
],
period = periods.period('year', year),
parent1 = dict(birth = datetime.date(year - 40, 1, 1)),
parent2 = dict(birth = datetime.date(year - 40, 1, 1)),
enfants = [
dict(birth = datetime.date(year - 9, 1, 1)),
dict(birth = datetime.date(year - 9, 1, 1)),
],
)
reference_simulation = scenario.new_simulation(debug = True, reference = True)
error_margin = 0.01
af = reference_simulation.calculate('af')
expected_af = [1532.16] * 10
assert_less(max(abs(expected_af - af)), error_margin)
revdisp = reference_simulation.calculate('revdisp')
reform_simulation = scenario.new_simulation(debug = True)
reform_af = reform_simulation.calculate('af')
assert_less(max(abs(expected_af - reform_af)), error_margin)
reform_revdisp = reform_simulation.calculate('revdisp')
开发者ID:clbe,项目名称:openfisca-france,代码行数:33,代码来源:test_landais_piketty_saez.py
示例20: __init__
def __init__(self):
self.mp = get_shared_mailpile()
self.mp.set("sys.postinglist_kb=%s" % postinglist_kb)
self.mp.set("prefs.num_results=50")
self.mp.set("prefs.default_order=rev-date")
results = self.mp.search(*query)
assert_less(float(results.as_dict()["elapsed"]), 0.2)
开发者ID:Akhilan,项目名称:Mailpile,代码行数:7,代码来源:test_performance.py
注:本文中的nose.tools.assert_less函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论