本文整理汇总了Python中numpy.invert函数的典型用法代码示例。如果您正苦于以下问题:Python invert函数的具体用法?Python invert怎么用?Python invert使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了invert函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: ScoreSimilarity
def ScoreSimilarity(ideal, pattern):
inverted_ideal = np.invert(ideal)
inverted_pattern = np.invert(pattern)
# I DON'T THINK THIS IS RIGHT.
white = np.sum(np.bitwise_and(ideal, pattern))
black = np.sum(np.bitwise_and(inverted_ideal, inverted_pattern))
return white + black
开发者ID:ebensh,项目名称:fractal_evolution,代码行数:7,代码来源:main.py
示例2: three_dim_pos_bundle
def three_dim_pos_bundle(table, key1, key2, key3,
return_complement=False, **kwargs):
"""
Method returns 3d positions of particles in
the standard form of the inputs used by many of the
functions in the `~halotools.mock_observables`.
Parameters
----------
table : data table
`~astropy.table.Table` object
key1, key2, key3: strings
Keys used to access the relevant columns of the data table.
mask : array, optional
array used to apply a mask over the input ``table``. Default is None.
return_complement : bool, optional
If set to True, method will also return the table subset given by the inverse mask.
Default is False.
"""
if 'mask' in kwargs.keys():
mask = kwargs['mask']
x, y, z = table[key1][mask], table[key2][mask], table[key3][mask]
if return_complement is True:
x2, y2, z2 = table[key1][np.invert(mask)], table[key2][np.invert(mask)], table[key3][np.invert(mask)]
return np.vstack((x, y, z)).T, np.vstack((x2, y2, z2)).T
else:
return np.vstack((x, y, z)).T
else:
x, y, z = table[key1], table[key2], table[key3]
return np.vstack((x, y, z)).T
开发者ID:bsipocz,项目名称:halotools,代码行数:34,代码来源:mock_helpers.py
示例3: _make_image_mask
def _make_image_mask(outlines, pos, res):
"""Aux function
"""
mask_ = np.c_[outlines['mask_pos']]
xmin, xmax = (np.min(np.r_[np.inf, mask_[:, 0]]),
np.max(np.r_[-np.inf, mask_[:, 0]]))
ymin, ymax = (np.min(np.r_[np.inf, mask_[:, 1]]),
np.max(np.r_[-np.inf, mask_[:, 1]]))
inside = _inside_contour(pos, mask_)
outside = np.invert(inside)
outlier_points = pos[outside]
while np.any(outlier_points): # auto shrink
pos *= 0.99
inside = _inside_contour(pos, mask_)
outside = np.invert(inside)
outlier_points = pos[outside]
image_mask = np.zeros((res, res), dtype=bool)
xi_mask = np.linspace(xmin, xmax, res)
yi_mask = np.linspace(ymin, ymax, res)
Xi_mask, Yi_mask = np.meshgrid(xi_mask, yi_mask)
pos_ = np.c_[Xi_mask.flatten(), Yi_mask.flatten()]
inds = _inside_contour(pos_, mask_)
image_mask[inds.reshape(image_mask.shape)] = True
return image_mask, pos
开发者ID:BushraR,项目名称:mne-python,代码行数:28,代码来源:topomap.py
示例4: get_combined_calibration
def get_combined_calibration(self, nbc_disc, nbc_bulge, split_half=2, names=["m", "c1", "c2"]):
print "Will combine bulge and disc calibration fits."
if split_half==0:
for bias in names:
self.res = arr.add_col(self.res, bias, np.zeros_like(self.res['e1']))
bulge = self.res["is_bulge"].astype(bool)
print "column : %s, bulge : %d/%d, disc : %d/%d"%(bias, self.res[bulge].size, self.res.size, self.res[np.invert(bulge)].size, self.res.size)
try:
self.res[bias][bulge] = nbc_bulge.res[bias][bulge]
except:
import pdb ; pdb.set_trace()
self.res[bias][np.invert(bulge)] = nbc_disc.res[bias][np.invert(bulge)]
else:
com ="""
for i, bias in enumerate(names):
bulge = self.res['is_bulge'].astype(bool)
if i==0: print 'bulge :', self.res[bulge].size, 'disc : ', self.res[np.invert(bulge)].size, 'total : ', self.res.size
self.res = arr.add_col(self.res, bias, np.zeros_like(self.res['e1']))
print 'column : ', bias
self.res[bias][bulge] = nbc_bulge.res[bias][bulge]
self.res[bias][np.invert(bulge)] = nbc_disc.res[bias][np.invert(bulge)]""".replace("res", "res%d"%split_half)
exec(com)
print "done"
开发者ID:ssamuroff,项目名称:cosmology_code,代码行数:27,代码来源:nbc.py
示例5: archive_human_masks
def archive_human_masks(human_directory, new_directory, work_directory):
'''
For a directory of hand-drawn masks, mask out everything in the accompanying bright-field file except for the worm itself and a 100-pixel surrounding area to save disk space. Also, re-compress all images to maximize compression and space efficiency.
'''
for a_subdir in os.listdir(human_directory):
if os.path.isdir(human_directory + os.path.sep + a_subdir):
folderStuff.ensure_folder(new_directory + os.path.sep + a_subdir)
for a_file in os.listdir(human_directory + os.path.sep + a_subdir):
if a_file.split(' ')[-1] == 'hmask.png':
if not os.path.isfile(new_directory + os.path.sep + a_subdir + os.path.sep + a_file):
print('Up to ' + a_subdir + ' ' + a_file + '.')
my_stem = a_file.split(' ')[0]
my_mask = freeimage.read(human_directory + os.path.sep + a_subdir + os.path.sep + my_stem + ' ' + 'hmask.png')
bf_path = human_directory + os.path.sep + a_subdir + os.path.sep + my_stem + ' ' + 'bf.png'
if os.path.isfile(bf_path):
my_image = freeimage.read(bf_path)
else:
my_image = freeimage.read(bf_path.replace(human_directory, work_directory))
area_mask = my_mask.copy().astype('bool')
distance_from_mask = scipy.ndimage.morphology.distance_transform_edt(np.invert(area_mask)).astype('uint16')
area_mask[distance_from_mask > 0] = True
area_mask[distance_from_mask > 100] = False
my_image[np.invert(area_mask)] = False
freeimage.write(my_image, new_directory + os.path.sep + a_subdir + os.path.sep + my_stem + ' ' + 'bf.png', flags = freeimage.IO_FLAGS.PNG_Z_BEST_COMPRESSION)
freeimage.write(my_mask, new_directory + os.path.sep + a_subdir + os.path.sep + my_stem + ' ' + 'hmask.png', flags = freeimage.IO_FLAGS.PNG_Z_BEST_COMPRESSION)
elif a_file.split('.')[-1] == 'json':
shutil.copyfile(human_directory + os.path.sep + a_subdir + os.path.sep + a_file, new_directory + os.path.sep + a_subdir + os.path.sep + a_file)
return
开发者ID:zhang-wb,项目名称:wormPhysiology,代码行数:28,代码来源:imageOperations.py
示例6: baseline_recovery_test
def baseline_recovery_test(self, model):
baseline_method = getattr(model, 'baseline_'+model._method_name_to_decorate)
baseline_result = baseline_method(halo_table = self.toy_halo_table2)
method = getattr(model, model._method_name_to_decorate)
result = method(halo_table = self.toy_halo_table2)
mask = self.toy_halo_table2['halo_zform_percentile'] >= model._split_ordinates[0]
oldmean = result[mask].mean()
youngmean = result[np.invert(mask)].mean()
baseline_mean = baseline_result.mean()
assert oldmean != youngmean
assert oldmean != baseline_mean
assert youngmean != baseline_mean
param_key = model._method_name_to_decorate + '_assembias_param1'
param = model.param_dict[param_key]
if param > 0:
assert oldmean > youngmean
elif param < 0:
assert oldmean < youngmean
else:
assert oldmean == youngmean
split = model.percentile_splitting_function(halo_table = self.toy_halo_table2)
split = np.where(mask, split, 1-split)
derived_result = split*oldmean
derived_result[np.invert(mask)] = split[np.invert(mask)]*youngmean
derived_mean = derived_result[mask].mean() + derived_result[np.invert(mask)].mean()
baseline_mean = baseline_result.mean()
np.testing.assert_allclose(baseline_mean, derived_mean, rtol=1e-3)
开发者ID:surhudm,项目名称:halotools,代码行数:32,代码来源:test_assembias.py
示例7: data_checker_mask
def data_checker_mask(loader):
X, Y = next(loader.get())
X, Y = X[:100].squeeze(), Y[:100]
X, Y = X.transpose([1, 2, 0]), Y.transpose([1, 2, 0])
stride = 60
pos_x = np.arange(0, 600, stride)
pos_y = np.arange(0, 600, stride)
vx, vy = np.meshgrid(pos_x, pos_y)
pos = np.stack([vx, vy]).reshape((2, -1)).transpose([1,0])+stride//2
real, binary = np.zeros((600, 600)), np.zeros((600, 600))
real[patch_interface(pos[:,0], pos[:,1], stride//2)] = X
binary[patch_interface(pos[:,0], pos[:,1], stride//2)] = Y
binary = binary.astype('bool')
# print(real.max(), binary.max(), binary.sum(), binary.size)
img = np.stack([real]*3, axis=2)+500
img[:,:, 0] = img[:,:, 0]*binary
img[:,:, 1] = img[:,:, 1]*np.invert(binary)
img[:,:, 2] = img[:,:, 2]*np.invert(binary)
imwrite(rescale_intensity(img.astype('uint8')), "./save/data_check.png")
开发者ID:overshiki,项目名称:datasets,代码行数:27,代码来源:loader.py
示例8: uniform
def uniform():
bits_to_get = fast_random_bool((self.psize, self.numbit))
pop_part = self.pop_part_rec # if fitness function is too hard, then it could be faster to take best children only for some part of population
# When pop_part = 1.0 it is slower, but based on few tests it's better to leave the children with max fit. Maybe with some probability?
bound = int(self.psize * pop_part)
buff1 = np.empty((1, self.numbit), dtype=int)
buff2 = np.empty((1, self.numbit), dtype=int)
for p_index in range(bound):
buff1[0] = (self.data[self.parents[2 * p_index]] & bits_to_get[p_index]) + (
self.data[self.parents[2 * p_index + 1]] & (np.invert(bits_to_get[p_index]) + 2))
buff2[0] = (self.data[self.parents[2 * p_index + 1]] & bits_to_get[p_index]) + (
self.data[self.parents[2 * p_index]] & (np.invert(bits_to_get[p_index]) + 2))
if self.fitness_function(buff1[0]) > self.fitness_function(buff2[0]):
self.children[p_index] = buff1[0]
else:
self.children[p_index] = buff2[0]
if bound != self.psize:
# choose just first child, not necessarily the best
self.children[bound:self.psize] = (self.data[self.parents[2 * bound::2]] & bits_to_get[
bound:self.psize]) + (
self.data[self.parents[2 * bound + 1::2]] & (
np.invert(bits_to_get[bound:self.psize]) + 2))
del buff1
del buff2
return
开发者ID:cog-isa,项目名称:aqjsm,代码行数:25,代码来源:population.py
示例9: distance_combinatorics
def distance_combinatorics(Dorig,FDR,resolution,n,th,tl,as_str=True,mode=0,res_diff=1.):
D = np.copy(Dorig)
D[((D > tl) & (D < th))] = 1
D[(FDR == 0)] = 1
Dmerged = defaultdict(list)
Dmax = np.zeros(D.shape[1])
for low,high in itertools.combinations(range(D.shape[0]),2):
if resolution[low]/resolution[high] < res_diff:
Dcurrent = np.zeros(D.shape[1])
elif mode == 1:
# positive low, negative high
# zeros where one or both signs incorrect
Dcurrent = (1./(high - low))/(1./4 + 1./(high - low))*((D[high] - 1)*(1 - D[low]))**.5
Dcurrent[np.invert((1 - D[high] < 0)*(1 - D[low] > 0))] = 0
elif mode == 2:
# positive low, positive high
Dcurrent = (1./4 + 1./(high - low))/(1./(high - low))*((1 - D[high])*(1 - D[low]))**.5
Dcurrent[np.invert((1 - D[high] > 0)*(1 - D[low] > 0))] = 0
Dmax = np.array([Dmax,Dcurrent]).max(0)
#Dmax[np.isnan(Dmax)] = 0
del Dcurrent
for i,idx in enumerate(itertools.combinations(xrange(n),2)):
d = Dmax[i]
if d > .0:
if as_str:
Dmerged[idx[0]].append('%d:%f' % (idx[1],d))
Dmerged[idx[1]].append('%d:%f' % (idx[0],d))
else:
Dmerged[idx[0]].append((idx[1],d))
Dmerged[idx[1]].append((idx[0],d))
return Dmerged
开发者ID:brian-cleary,项目名称:WaveletCombinatorics,代码行数:31,代码来源:create_wavelet_clusters.py
示例10: derivative_G
def derivative_G(propensities,V,X,w,deter_vector,stoc_positions, positions, valid):
# just the deterministics
X_d = X[deter_vector,:].copy()
temp_eta = np.zeros((np.sum(deter_vector),X.shape[1]))
j = 0
for i in range(len(stoc_positions)):
##pdb.set_trace()
# If x-\nu_i is non zero
if stoc_positions[i] == True:
if np.sum(valid[:,j]) != 0:
#print(" X shape: " + str(X.shape))
#print(" w shape: " + str(w.shape))
#print("test :" + str(map(propensities[i],*X[:,positions[valid[:,j]][:,j]])))
temp_eta[:,valid[:,j]] += (X_d[:,positions[valid[:,j]][:,j]]
- X_d[:,valid[:,j]] +
V[i][deter_vector][:,np.newaxis]
)*map(propensities[i],* X[:,positions[valid[:,j]][:,j]])*w[positions[valid[:,j]][:,j]]
j += 1
else:
temp_eta[:,:] += (V[i][deter_vector][:,np.newaxis])*map(propensities[i],* X)*w
return_X = np.zeros(X.shape)
return_X[deter_vector,:] = temp_eta
return_X[np.invert(deter_vector),:] = X[np.invert(deter_vector),:].copy()
return return_X
开发者ID:SysSynBio,项目名称:PyME,代码行数:29,代码来源:util.py
示例11: pcols
def pcols(self, pheno):
'''
Requires a list.
'''
expt_cols = []
pheno_cols = []
if pheno is None:
self._experimentcolumns = self.columns
self._phenocolumns = None
return
if not isinstance(pheno, list):
raise TypeError("A list is required for setting pheno columns")
# Create a column name dict for quick lookups
col_dict = dict(zip(self.columns, range(0, len(self.columns))))
is_pheno = array([c in pheno for c in col_dict])
is_expt = invert(is_pheno)
num_pheno = sum(is_pheno)
num_expt = sum(is_expt)
# Sanity check!
if num_pheno + num_expt != len(self.columns):
raise ValueError("Not all phenotype columns could be found in \
the GenomeFrame.")
# Assign values
if num_pheno > 0:
pheno_cols = self.columns[is_pheno].tolist()
if num_expt > 0:
expt_cols = self.columns[invert(is_pheno)].tolist()
self._phenocolumns = pheno_cols
self._experimentcolumns = expt_cols
开发者ID:JasonR055,项目名称:arama,代码行数:29,代码来源:genomeframe140613.py
示例12: query_by_bagging
def query_by_bagging(X, y, current_model, batch_size, rng, base_model=SVC(C=1, kernel='linear'), n_bags=5, method="KL", D=None):
"""
:param base_model: Model that will be **fitted every iteration**
:param n_bags: Number of bags on which train n_bags models
:param method: 'entropy' or 'KL'
:return:
"""
assert method == 'entropy' or method == 'KL'
eps = 0.0000001
if method == 'KL':
assert hasattr(base_model, 'predict_proba'), "Model with probability prediction needs to be passed to this strategy!"
clfs = BaggingClassifier(base_model, n_estimators=n_bags, random_state=rng)
clfs.fit(X[y.known], y[y.known])
pc = clfs.predict_proba(X[np.invert(y.known)])
# Settles page 17
if method == 'entropy':
pc += eps
fitness = np.sum(pc * np.log(pc), axis=1)
ids = np.argsort(fitness)[:batch_size]
elif method == 'KL':
p = np.array([clf.predict_proba(X[np.invert(y.known)]) for clf in clfs.estimators_])
fitness = np.mean(np.sum(p * np.log(p / pc), axis=2), axis=0)
ids = np.argsort(fitness)[-batch_size:]
return y.unknown_ids[ids], fitness/np.max(fitness)
开发者ID:gmum,项目名称:mlls2015,代码行数:25,代码来源:strategy.py
示例13: predict
def predict(self, data, modes):
"""predict whether a list of position follows atrain route by detecting
the nearest train stops. Input is the pandas data frame of
measurements and an array of current mode predictions. Returns
an array of predicted modes of the same size as the input data
frame has rows.
"""
# extract lat/lon from data frame
lat = data['WLATITUDE'].values
lon = data['WLONGITUDE'].values
# chunk is a tuple (start_idx, end_idx, mode)
for start_idx, end_idx, _ in ifilter(lambda chunk: chunk[2] in [MODE_CAR, MODE_BUS, MODE_TRAIN],
chunks(modes, include_values=True)):
# test for distance first
lat_seg = lat[start_idx:end_idx]
lon_seg = lon[start_idx:end_idx]
valid_lat_seg = lat_seg[np.where(np.invert(np.isnan(lat_seg)))[0]]
valid_lon_seg = lon_seg[np.where(np.invert(np.isnan(lon_seg)))[0]]
if len(valid_lon_seg) == 0:
continue
# TODO: parameters have to be tuned carefully
is_train = predict_mode_by_location(valid_lat_seg,
valid_lon_seg,
self.train_location_tree,
self.train_location_dict,
self.train_route_dict,
dist_thre = 400,
dist_pass_thres = 7,
num_stops_thre = 3,
dist_pass_thres_perc = 0.7)
#check entry point distance
entry_pt_near = -1
exit_pt_near = -1
if start_idx-1>=0:
if not np.isnan(lat[start_idx-1]):
nearest_station = find_nearest_station(lat[start_idx-1], lon[start_idx-1], self.train_location_tree, self.dist_thres_entry_exit)
if len(nearest_station)!=0:
entry_pt_near = 1
else:
entry_pt_near = 0
if end_idx < len(modes):
if not np.isnan(lat[end_idx]):
nearest_station = find_nearest_station(lat[end_idx],lon[end_idx],
self.train_location_tree,
self.dist_thres_entry_exit)
if len(nearest_station)!=0:
exit_pt_near = 1
else:
exit_pt_near = 0
if is_train or entry_pt_near + exit_pt_near == 2:
modes[start_idx:end_idx] = MODE_TRAIN
else:
modes[start_idx:end_idx] = MODE_CAR
return modes
开发者ID:SUTDMEC,项目名称:NSE_Validation,代码行数:60,代码来源:TransitHeuristic.py
示例14: extract_coordinates
def extract_coordinates():
data = np.loadtxt(config_variables.name_of_time_series_promoter_file_for_TSS_start, dtype = str, delimiter = '\t')
plus_strand = data[:, 4] == '+'
minus_strand = np.invert(plus_strand)
promoter_data = np.zeros_like(data).astype(int)[:,:4]
promoter_data[plus_strand, 1] = data[plus_strand, 1].astype(int) - upstream_validation
promoter_data[plus_strand, 2] = data[plus_strand, 1].astype(int) + downstream_validation
promoter_data[minus_strand, 2] = data[minus_strand, 2].astype(int) + upstream_validation
promoter_data[minus_strand, 1] = data[minus_strand, 2].astype(int) - downstream_validation
promoter_data = promoter_data.astype(str)
promoter_data[:, 0] = data[:, 0]
#--------------------
ER_promoters = np.loadtxt("{0}ER_controled_promoters_pindexed.txt".format(temp_output), dtype = str, delimiter = '\t')
Non_ER_promoters = np.loadtxt("{0}Non_ER_controled_promoters_pindexed.txt".format(temp_output), dtype = str, delimiter = '\t')
def un_string(array_to_clean): return np.array(map(lambda x: int(re.findall('\d+', x)[0]), array_to_clean))
ER_promoters_indexes = un_string(ER_promoters[:, 3])
ER_promoters_indexes_mask = np.zeros(len(data), bool)
ER_promoters_indexes_mask[ER_promoters_indexes] = True
promoter_data[np.invert(ER_promoters_indexes_mask), 3] = Non_ER_promoters[:,-1]
promoter_data[ER_promoters_indexes_mask, 3] = ER_promoters[:,-1]
np.savetxt("{0}ER_controled_promoters_pindexed_2.txt".format(temp_output), promoter_data[ER_promoters_indexes_mask], fmt = "%s", delimiter = "\t")
np.savetxt("{0}Non_ER_controled_promoters_pindexed_2.txt".format(temp_output), promoter_data[np.invert(ER_promoters_indexes_mask)], fmt = "%s", delimiter = "\t")
开发者ID:ManchesterBioinference,项目名称:EP_Bayes,代码行数:30,代码来源:interaction_finder_wrapper.py
示例15: enrichment_apply_fn
def enrichment_apply_fn(row, timepoints):
"""
:py:meth:`pandas.DataFrame.apply` apply function for calculating
enrichment scores and r-squared values.
"""
if math.isnan(row[0]):
# not present in input library
score = float("NaN")
r_sq = float("NaN")
else:
row = row.values
ratios = row[np.invert(np.isnan(row))]
times = timepoints[np.invert(np.isnan(row))]
if len(ratios) == 1:
# only present in input library
score = float("NaN")
r_sq = float("NaN")
elif len(ratios) == 2:
# rise over run
score = (ratios[1] - ratios[0]) / (times[1] - times[0])
r_sq = float("NaN")
else:
score, _, r, _, _ = stats.linregress(times, ratios)
r_sq = r ** 2
return pd.Series({'score' : score, 'r_sq' : r_sq})
开发者ID:mulescent,项目名称:Enrich,代码行数:26,代码来源:selection.py
示例16: rank_S2_predictions
def rank_S2_predictions(self, forcefields, indices):
norms = {}
means = {}
SDs = {}
for simName in self.S2.keys():
s2 = self.S2[simName]
diff = s2 - self.S2exp
norms[simName] = np.linalg.norm(diff[np.invert(np.isnan(diff))])
means[simName] = diff[np.invert(np.isnan(diff))].mean()
SDs[simName] = diff[np.invert(np.isnan(diff))].std()
data = means
fig = plt.figure()
i = 0
for ff in forcefields:
x = []; y = []
for index in indices:
simName = "{:s}_2IL6_{:d}".format(ff, index)
x.append(i)
y.append(data[simName])
i += 1
plt.plot(x,y, 'o', 'MarkerSize', 2)
plt.show()
开发者ID:schilli,项目名称:md2nmr,代码行数:29,代码来源:compare_ff.py
示例17: multiple_auc
def multiple_auc(Y_actual, Y_pred, return_individual=False):
"""
Calculates the averaged ROC for each class
@params:
Y_actual: true values of the labels shape:(n , 1)
Y_pred: predicted values of the labels shape:(n , 1)
@returns:
[0] roc_auc for each class (dict)
[1] averaged_roc amongst classes (float)
"""
uniques = np.unique(Y_actual)
# print uniques
roc_aucs = {}
for label in uniques:
# print label
Y_a = Y_actual.copy()
Y_p = Y_pred.copy()
matches = Y_a == label
Y_a[matches] = -1
Y_a[np.invert(matches)] = 0
Y_a[Y_a == -1] = 1
matches = Y_p == label
Y_p[matches] = -1
Y_p[np.invert(matches)] = 0
Y_p[Y_p == -1] = 1
roc_aucs[label] = roc_auc_score(Y_a, Y_p)
averaged_roc = np.mean(roc_aucs.values())
if return_individual:
return roc_aucs, averaged_roc
else:
return averaged_roc
开发者ID:zafarali,项目名称:MindReader,代码行数:33,代码来源:custom.py
示例18: segment
def segment(self, src):
image = src.ndarray[:]
if self.use_adaptive_threshold:
block_size = 25
markers = threshold_adaptive(image, block_size) * 255
markers = invert(markers)
else:
markers = zeros_like(image)
markers[image < self.threshold_low] = 1
markers[image > self.threshold_high] = 255
elmap = sobel(image, mask=image)
wsrc = watershed(elmap, markers, mask=image)
# elmap = ndimage.distance_transform_edt(image)
# local_maxi = is_local_maximum(elmap, image,
# ones((3, 3))
# )
# markers = ndimage.label(local_maxi)[0]
# wsrc = watershed(-elmap, markers, mask=image)
# fwsrc = ndimage.binary_fill_holes(out)
# return wsrc
if self.use_inverted_image:
out = invert(wsrc)
else:
out = wsrc
# time.sleep(1)
# do_later(lambda:self.show_image(image, -elmap, out))
return out
开发者ID:softtrainee,项目名称:arlab,代码行数:31,代码来源:region.py
示例19: extract_local_sparse_matrix
def extract_local_sparse_matrix(self, target_rank):
logger.debug("Extract local sparse matrix for rank{}".format(target_rank))
t_rank = target_rank
dsts = self.dsts
srcs = self.srcs
wgts = self.wgts
rank_dsts = self.rank_dsts
rank_srcs = self.rank_srcs
t_rank_dsts = rank_dsts == t_rank # bool type array
t_rank_srcs = rank_srcs == t_rank
local_idxs = np.where(t_rank_dsts * t_rank_srcs)[0]
send_idxs = np.where(np.invert(t_rank_dsts) * t_rank_srcs)[0]
recv_idxs = np.where(t_rank_dsts * np.invert(t_rank_srcs))[0]
arr_dict = dict()
arr_dict["spmat_size"] = self.spmat_size
arr_dict["local_dsts"] = dsts[local_idxs]
arr_dict["local_srcs"] = srcs[local_idxs]
arr_dict["local_wgts"] = wgts[local_idxs]
arr_dict["send_ranks"] = rank_dsts[send_idxs]
arr_dict["send_dsts"] = dsts[send_idxs]
arr_dict["send_srcs"] = srcs[send_idxs]
arr_dict["send_wgts"] = wgts[send_idxs]
arr_dict["recv_ranks"] = rank_srcs[recv_idxs]
arr_dict["recv_dsts"] = dsts[recv_idxs]
return arr_dict
开发者ID:wbkifun,项目名称:my_stuff,代码行数:33,代码来源:cube_mpi.py
示例20: measure_autofluorescence
def measure_autofluorescence(fluorescent_image, worm_mask, time_series):
'''
Measure fluorescence characteristics of a worm at the time corresponding to the information given.
'''
my_fluorescence = fluorescent_image[worm_mask].copy()
(intensity_50, intensity_60, intensity_70, intensity_80, intensity_90, intensity_95, intensity_100) = np.percentile(my_fluorescence, np.array([50, 60, 70, 80, 90, 95, 100]).astype('float64'))
integrated_50 = np.sum(my_fluorescence[my_fluorescence > intensity_50])
integrated_60 = np.sum(my_fluorescence[my_fluorescence > intensity_60])
integrated_70 = np.sum(my_fluorescence[my_fluorescence > intensity_70])
integrated_80 = np.sum(my_fluorescence[my_fluorescence > intensity_80])
integrated_90 = np.sum(my_fluorescence[my_fluorescence > intensity_90])
integrated_95 = np.sum(my_fluorescence[my_fluorescence > intensity_95])
integrated_0 = np.sum(my_fluorescence)
time_series.loc[['intensity_50', 'intensity_60', 'intensity_70', 'intensity_80', 'intensity_90', 'intensity_95', 'intensity_100', 'integrated_50', 'integrated_60', 'integrated_70', 'integrated_80', 'integrated_90', 'integrated_95', 'integrated_0']] = (intensity_50, intensity_60, intensity_70, intensity_80, intensity_90, intensity_95, intensity_100, integrated_50, integrated_60, integrated_70, integrated_80, integrated_90, integrated_95, integrated_0)
over_0_mask = np.zeros(worm_mask.shape).astype('bool')
over_50_mask = np.zeros(worm_mask.shape).astype('bool')
over_60_mask = np.zeros(worm_mask.shape).astype('bool')
over_70_mask = np.zeros(worm_mask.shape).astype('bool')
over_80_mask = np.zeros(worm_mask.shape).astype('bool')
over_90_mask = np.zeros(worm_mask.shape).astype('bool')
over_0_mask[worm_mask] = True
over_50_mask[fluorescent_image > intensity_50] = True
over_50_mask[np.invert(worm_mask)] = False
over_60_mask[fluorescent_image > intensity_60] = True
over_60_mask[np.invert(worm_mask)] = False
over_70_mask[fluorescent_image > intensity_70] = True
over_70_mask[np.invert(worm_mask)] = False
over_80_mask[fluorescent_image > intensity_80] = True
over_80_mask[np.invert(worm_mask)] = False
over_90_mask[fluorescent_image > intensity_90] = True
over_90_mask[np.invert(worm_mask)] = False
colored_areas = color_features([over_0_mask, over_50_mask, over_60_mask, over_70_mask, over_80_mask, over_90_mask])
return (time_series, colored_areas)
开发者ID:zhang-wb,项目名称:wormPhysiology,代码行数:34,代码来源:extractFeatures.py
注:本文中的numpy.invert函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论