本文整理汇总了Python中nose.tools.assert_true函数的典型用法代码示例。如果您正苦于以下问题:Python assert_true函数的具体用法?Python assert_true怎么用?Python assert_true使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assert_true函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_make_dig_points
def test_make_dig_points():
"""Test application of Polhemus HSP to info"""
dig_points = _read_dig_points(hsp_fname)
info = create_info(ch_names=['Test Ch'], sfreq=1000., ch_types=None)
assert_false(info['dig'])
info['dig'] = _make_dig_points(dig_points=dig_points)
assert_true(info['dig'])
assert_array_equal(info['dig'][0]['r'], [-106.93, 99.80, 68.81])
dig_points = _read_dig_points(elp_fname)
nasion, lpa, rpa = dig_points[:3]
info = create_info(ch_names=['Test Ch'], sfreq=1000., ch_types=None)
assert_false(info['dig'])
info['dig'] = _make_dig_points(nasion, lpa, rpa, dig_points[3:], None)
assert_true(info['dig'])
idx = [d['ident'] for d in info['dig']].index(FIFF.FIFFV_POINT_NASION)
assert_array_equal(info['dig'][idx]['r'],
np.array([1.3930, 13.1613, -4.6967]))
assert_raises(ValueError, _make_dig_points, nasion[:2])
assert_raises(ValueError, _make_dig_points, None, lpa[:2])
assert_raises(ValueError, _make_dig_points, None, None, rpa[:2])
assert_raises(ValueError, _make_dig_points, None, None, None,
dig_points[:, :2])
assert_raises(ValueError, _make_dig_points, None, None, None, None,
dig_points[:, :2])
开发者ID:esdalmaijer,项目名称:mne-python,代码行数:27,代码来源:test_meas_info.py
示例2: test_valid_signature
def test_valid_signature(self):
for example in self._examples:
client_shared_secret = example["private_key"]
client_certificate = example["certificate"]
public_key = example["public_key"]
url = example["url"]
method = example["method"]
oauth_params = example["oauth_params"]
expected_signature = example["oauth_signature"]
# Using the RSA private key.
assert_equal(expected_signature,
generate_rsa_sha1_signature(client_shared_secret,
method=method,
url=url,
oauth_params=oauth_params
)
)
# Using the X.509 certificate.
assert_true(verify_rsa_sha1_signature(
client_certificate, expected_signature,
method, url, oauth_params))
# Using the RSA public key.
assert_true(verify_rsa_sha1_signature(
public_key, expected_signature,
method, url, oauth_params))
开发者ID:davidlehn,项目名称:pyoauth,代码行数:25,代码来源:test_pyoauth_protocol.py
示例3: test_em_gmm_cv
def test_em_gmm_cv():
# Comparison of different GMMs using cross-validation
# generate some data
dim = 2
xtrain = np.concatenate((nr.randn(100, dim), 3 + 2 * nr.randn(100, dim)))
xtest = np.concatenate((nr.randn(1000, dim), 3 + 2 * nr.randn(1000, dim)))
#estimate different GMMs for xtrain, and test it on xtest
prec_type = 'full'
k, maxiter, delta = 2, 300, 1.e-4
ll = []
# model 1
lgmm = GMM(k,dim,prec_type)
lgmm.initialize(xtrain)
bic = lgmm.estimate(xtrain,maxiter, delta)
ll.append(lgmm.test(xtest).mean())
# model 2
prec_type = 'diag'
lgmm = GMM(k, dim, prec_type)
lgmm.initialize(xtrain)
bic = lgmm.estimate(xtrain, maxiter, delta)
ll.append(lgmm.test(xtest).mean())
for k in [1, 3, 10]:
lgmm = GMM(k,dim,prec_type)
lgmm.initialize(xtrain)
ll.append(lgmm.test(xtest).mean())
assert_true(ll[4] < ll[1])
开发者ID:FNNDSC,项目名称:nipy,代码行数:32,代码来源:test_gmm.py
示例4: test_check_threshold
def test_check_threshold():
adjacency_matrix = np.array([[1., 2.],
[2., 1.]])
name = 'edge_threshold'
calculate = 'fast_abs_percentile'
# a few not correctly formatted strings for 'edge_threshold'
wrong_edge_thresholds = ['0.1', '10', '10.2.3%', 'asdf%']
for wrong_edge_threshold in wrong_edge_thresholds:
assert_raises_regex(ValueError,
'{0}.+should be a number followed by '
'the percent sign'.format(name),
check_threshold,
wrong_edge_threshold, adjacency_matrix,
calculate, name)
threshold = object()
assert_raises_regex(TypeError,
'{0}.+should be either a number or a string'.format(name),
check_threshold,
threshold, adjacency_matrix,
calculate, name)
# To check if it also gives the score which is expected
assert_true(1. < check_threshold("50%", adjacency_matrix,
percentile_calculate=fast_abs_percentile,
name='threshold') <= 2.)
开发者ID:carlosf,项目名称:nilearn,代码行数:26,代码来源:test_displays.py
示例5: _compare
def _compare(a, b):
"""Compare two python objects."""
global last_keys
skip_types = ['whitener', 'proj', 'reginv', 'noisenorm', 'nchan',
'command_line', 'working_dir', 'mri_file', 'mri_id']
try:
if isinstance(a, (dict, Info)):
assert_true(isinstance(b, (dict, Info)))
for k, v in six.iteritems(a):
if k not in b and k not in skip_types:
raise ValueError('First one had one second one didn\'t:\n'
'%s not in %s' % (k, b.keys()))
if k not in skip_types:
last_keys.pop()
last_keys = [k] + last_keys
_compare(v, b[k])
for k, v in six.iteritems(b):
if k not in a and k not in skip_types:
raise ValueError('Second one had one first one didn\'t:\n'
'%s not in %s' % (k, a.keys()))
elif isinstance(a, list):
assert_true(len(a) == len(b))
for i, j in zip(a, b):
_compare(i, j)
elif isinstance(a, sparse.csr.csr_matrix):
assert_array_almost_equal(a.data, b.data)
assert_equal(a.indices, b.indices)
assert_equal(a.indptr, b.indptr)
elif isinstance(a, np.ndarray):
assert_array_almost_equal(a, b)
else:
assert_equal(a, b)
except Exception:
print(last_keys)
raise
开发者ID:claire-braboszcz,项目名称:mne-python,代码行数:35,代码来源:test_inverse.py
示例6: test_default_diverging_vlims
def test_default_diverging_vlims(self):
p = mat._HeatMapper(self.df_norm, **self.default_kws)
vlim = max(abs(self.x_norm.min()), abs(self.x_norm.max()))
nt.assert_equal(p.vmin, -vlim)
nt.assert_equal(p.vmax, vlim)
nt.assert_true(p.divergent)
开发者ID:petebachant,项目名称:seaborn,代码行数:7,代码来源:test_matrix.py
示例7: test_WilsonLT_Defaults_attrs1
def test_WilsonLT_Defaults_attrs1():
'''Confirm default geo_all equivalence in derived classes with base.'''
geos_all = [
'0-0-2000',
'0-0-1000',
'1000-0-0',
'600-0-800',
'600-0-400S',
'500-500-0',
'400-[200]-0',
'400-200-800',
'400-[200]-800',
'400-200-400S',
'400-[100,100]-0',
'500-[250,250]-0',
'400-[100,100]-800',
'400-[100,100]-400S',
'400-[100,100,100]-800',
'500-[50,50,50,50]-0',
'400-[100,100,100,100]-800',
'400-[100,100,100,100,100]-800'
]
default_attr1 = bdft.geos_all # Base attribute
default_attr2 = dft.geos_all # Sub-class attribute
expected = geos_all
#print(set(default_dict))
#print(set(expected)
# Allows extension in BaseDefaults().geo_inputs
actual1 = (set(default_attr1) >= set(expected))
actual2 = (set(default_attr2) >= set(expected))
#print(actual1)
# TODO: is this supposed to be assert_true?
nt.assert_true(actual1, expected)
nt.assert_true(actual2, expected)
开发者ID:par2,项目名称:lamana,代码行数:34,代码来源:test_Wilson_LT.py
示例8: given_a_created_product_with_name_group1
def given_a_created_product_with_name_group1(step, product_id):
body = dict_to_xml(default_product(name=product_id))
response = api_utils.add_new_product(headers=world.headers, body=body)
assert_true(response.ok, response.content)
print response.content
world.product_id = response.json()[PRODUCT_NAME]
开发者ID:alberts-tid,项目名称:fiware-sdc,代码行数:7,代码来源:add_product_release.py
示例9: given_a_created_product_with_attributes_and_name_group1
def given_a_created_product_with_attributes_and_name_group1(step, product_id):
metadatas = create_default_metadata_or_attributes_list(2)
body = dict_to_xml(default_product(name=product_id, metadata=metadatas))
response = api_utils.add_new_product(headers=world.headers, body=body)
assert_true(response.ok, response.content)
world.product_id = response.json()[PRODUCT_NAME]
开发者ID:alberts-tid,项目名称:fiware-sdc,代码行数:7,代码来源:add_product_release.py
示例10: step_impl
def step_impl(context):
driver = context.driver
util = context.util
element, parent, parent_text = get_element_parent_and_parent_text(
driver, ".__start_label._title_label")
# This is where our selection will end
end = util.element_screen_center(element)
end["left"] += 2 # Move it off-center for this test
element.click()
wedutil.wait_for_caret_to_be_in(util, parent)
# From the label to before the first letter and then past the
# first letter.
ActionChains(driver)\
.send_keys(*[Keys.ARROW_RIGHT] * 2)\
.perform()
# We need to get the location of the caret.
start = wedutil.caret_selection_pos(driver)
select_text(context, start, end)
assert_true(util.is_something_selected(), "something must be selected")
context.expected_selection = parent_text[0:1]
context.selection_parent = parent
context.caret_screen_position = wedutil.caret_screen_pos(driver)
开发者ID:bennettbuchanan,项目名称:wed,代码行数:30,代码来源:caret.py
示例11: test_registered_classes_can_be_set_as_attrs
def test_registered_classes_can_be_set_as_attrs(self):
app_registry.register('dummy', DummyAppDataContainer)
art = Article()
art.app_data.dummy = {'answer': 42}
tools.assert_true(isinstance(art.app_data.dummy, DummyAppDataContainer))
tools.assert_equals(DummyAppDataContainer(art, {'answer': 42}), art.app_data.dummy)
tools.assert_equals({'dummy': {'answer': 42}}, art.app_data)
开发者ID:divio,项目名称:django-appdata,代码行数:7,代码来源:test_fields.py
示例12: test_decimate
def test_decimate():
"""Test decimation of digitizer headshapes with too many points."""
# load headshape and convert to meters
hsp_mm = _get_ico_surface(5)['rr'] * 100
hsp_m = hsp_mm / 1000.
# save headshape to a file in mm in temporary directory
tempdir = _TempDir()
sphere_hsp_path = op.join(tempdir, 'test_sphere.txt')
np.savetxt(sphere_hsp_path, hsp_mm)
# read in raw data using spherical hsp, and extract new hsp
with warnings.catch_warnings(record=True) as w:
raw = read_raw_kit(sqd_path, mrk_path, elp_txt_path, sphere_hsp_path)
assert_true(any('more than' in str(ww.message) for ww in w))
# collect headshape from raw (should now be in m)
hsp_dec = np.array([dig['r'] for dig in raw.info['dig']])[8:]
# with 10242 points and _decimate_points set to resolution of 5 mm, hsp_dec
# should be a bit over 5000 points. If not, something is wrong or
# decimation resolution has been purposefully changed
assert_true(len(hsp_dec) > 5000)
# should have similar size, distance from center
dist = np.sqrt(np.sum((hsp_m - np.mean(hsp_m, axis=0))**2, axis=1))
dist_dec = np.sqrt(np.sum((hsp_dec - np.mean(hsp_dec, axis=0))**2, axis=1))
hsp_rad = np.mean(dist)
hsp_dec_rad = np.mean(dist_dec)
assert_almost_equal(hsp_rad, hsp_dec_rad, places=3)
开发者ID:HSMin,项目名称:mne-python,代码行数:29,代码来源:test_kit.py
示例13: test_validate_name
def test_validate_name(self):
# Test invalid names
invalid_name = '/invalid'
response = self.client.post('/desktop/api2/doc/mkdir', {'parent_uuid': json.dumps(self.home_dir.uuid), 'name': json.dumps(invalid_name)})
data = json.loads(response.content)
assert_equal(-1, data['status'], data)
assert_true('invalid character' in data['message'])
开发者ID:san21886,项目名称:hue,代码行数:7,代码来源:tests_doc2.py
示例14: test_ica_rank_reduction
def test_ica_rank_reduction():
"""Test recovery of full data when no source is rejected"""
# Most basic recovery
raw = Raw(raw_fname).crop(0.5, stop, False)
raw.load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
n_components = 5
max_pca_components = len(picks)
for n_pca_components in [6, 10]:
with warnings.catch_warnings(record=True): # non-convergence
warnings.simplefilter('always')
ica = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=n_pca_components,
method='fastica', max_iter=1).fit(raw, picks=picks)
rank_before = raw.estimate_rank(picks=picks)
assert_equal(rank_before, len(picks))
raw_clean = ica.apply(raw, copy=True)
rank_after = raw_clean.estimate_rank(picks=picks)
# interaction between ICA rejection and PCA components difficult
# to preduct. Rank_after often seems to be 1 higher then
# n_pca_components
assert_true(n_components < n_pca_components <= rank_after <=
rank_before)
开发者ID:mdclarke,项目名称:mne-python,代码行数:26,代码来源:test_ica.py
示例15: test_unicode_decode_error
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
# Check the old interface
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
charset='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("charset" in str(w[0].message).lower())
开发者ID:BloodD,项目名称:scikit-learn,代码行数:26,代码来源:test_text.py
示例16: test_ica_reset
def test_ica_reset():
"""Test ICA resetting"""
raw = Raw(raw_fname).crop(0.5, stop, False)
raw.load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
run_time_attrs = (
'_pre_whitener',
'unmixing_matrix_',
'mixing_matrix_',
'n_components_',
'n_samples_',
'pca_components_',
'pca_explained_variance_',
'pca_mean_'
)
with warnings.catch_warnings(record=True):
ica = ICA(
n_components=3, max_pca_components=3, n_pca_components=3,
method='fastica', max_iter=1).fit(raw, picks=picks)
assert_true(all(hasattr(ica, attr) for attr in run_time_attrs))
ica._reset()
assert_true(not any(hasattr(ica, attr) for attr in run_time_attrs))
开发者ID:mdclarke,项目名称:mne-python,代码行数:25,代码来源:test_ica.py
示例17: test_similarity_lookup
def test_similarity_lookup(self):
"""Test Similarity Lookup.
Tests that a similarity lookup for a kindle returns 10 results.
"""
products = self.amazon.similarity_lookup(ItemId="B0051QVF7A")
assert_true(len(products) > 5)
开发者ID:Mondego,项目名称:pyreco,代码行数:7,代码来源:allPythonContent.py
示例18: testR
def testR(d=simple(), size=500):
X = random_from_categorical_formula(d, size)
X = ML.rec_append_fields(X, 'response', np.random.standard_normal(size))
fname = tempfile.mktemp()
ML.rec2csv(X, fname)
Rstr = '''
data = read.table("%s", sep=',', header=T)
cur.lm = lm(response ~ %s, data)
COEF = coef(cur.lm)
''' % (fname, d.Rstr)
rpy2.robjects.r(Rstr)
remove(fname)
nR = list(np.array(rpy2.robjects.r("names(COEF)")))
nt.assert_true('(Intercept)' in nR)
nR.remove("(Intercept)")
nF = [str(t).replace("_","").replace("*",":") for t in d.formula.terms]
nR = sorted([sorted(n.split(":")) for n in nR])
nt.assert_true('1' in nF)
nF.remove('1')
nF = sorted([sorted(n.split(":")) for n in nF])
nt.assert_equal(nR, nF)
return d, X, nR, nF
开发者ID:fperez,项目名称:formula,代码行数:29,代码来源:random_design.py
示例19: test_io_inverse_operator
def test_io_inverse_operator():
"""Test IO of inverse_operator
"""
tempdir = _TempDir()
inverse_operator = read_inverse_operator(fname_inv)
x = repr(inverse_operator)
assert_true(x)
assert_true(isinstance(inverse_operator['noise_cov'], Covariance))
# just do one example for .gz, as it should generalize
_compare_io(inverse_operator, '.gz')
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
inv_badname = op.join(tempdir, 'test-bad-name.fif.gz')
write_inverse_operator(inv_badname, inverse_operator)
read_inverse_operator(inv_badname)
assert_naming(w, 'test_inverse.py', 2)
# make sure we can write and read
inv_fname = op.join(tempdir, 'test-inv.fif')
args = (10, 1. / 9., 'dSPM')
inv_prep = prepare_inverse_operator(inverse_operator, *args)
write_inverse_operator(inv_fname, inv_prep)
inv_read = read_inverse_operator(inv_fname)
_compare(inverse_operator, inv_read)
inv_read_prep = prepare_inverse_operator(inv_read, *args)
_compare(inv_prep, inv_read_prep)
inv_prep_prep = prepare_inverse_operator(inv_prep, *args)
_compare(inv_prep, inv_prep_prep)
开发者ID:claire-braboszcz,项目名称:mne-python,代码行数:30,代码来源:test_inverse.py
示例20: test_incremental
def test_incremental(self):
sp = self.sp
sp.push('%%cellm line2\n')
nt.assert_true(sp.push_accepts_more()) #1
sp.push('\n')
# In this case, a blank line should end the cell magic
nt.assert_false(sp.push_accepts_more()) #2
开发者ID:marcosptf,项目名称:fedora,代码行数:7,代码来源:test_inputsplitter.py
注:本文中的nose.tools.assert_true函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论