本文整理汇总了Python中numpy.expand_dims函数的典型用法代码示例。如果您正苦于以下问题:Python expand_dims函数的具体用法?Python expand_dims怎么用?Python expand_dims使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了expand_dims函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: grad_EVzxVzxT_by_hyper_exact
def grad_EVzxVzxT_by_hyper_exact(self, EVzxVzxT_list_this, Z, A, B, hyperno):
P = Z.shape[0]
R = Z.shape[1]
N = A.shape[0]
if hyperno != 0:
return EVzxVzxT_list_this * 0
alpha = self.length_scale * self.length_scale
I = np.identity(R)
S = np.diag(B[0, :] * B[0, :])
Sinv = np.diag(1 / B[0, :] * B[0, :])
C = I * alpha
Cinv = I * (1 / alpha)
CinvSinv = 2 * Cinv + Sinv
CinvSinv_inv = np.diag(1 / CinvSinv.diagonal())
dC = self.length_scale * I
dCinv = -Cinv.dot(dC).dot(Cinv)
dCinvSinv = 2 * dCinv
dCinvSinv_inv = -CinvSinv_inv.dot(dCinvSinv).dot(CinvSinv_inv)
S1 = (
dCinv
- dCinv.dot(CinvSinv_inv).dot(Cinv)
- Cinv.dot(dCinvSinv_inv).dot(Cinv)
- Cinv.dot(CinvSinv_inv).dot(dCinv)
)
S2 = -Sinv.dot(dCinvSinv_inv).dot(Sinv)
S3 = Sinv.dot(dCinvSinv_inv).dot(Cinv) + Sinv.dot(CinvSinv_inv).dot(dCinv)
S4 = dCinv.dot(CinvSinv_inv).dot(Cinv) + Cinv.dot(dCinvSinv_inv).dot(Cinv) + Cinv.dot(CinvSinv_inv).dot(dCinv)
T1s = np.tile(Z.dot(S1).dot(Z.T).diagonal(), [P, 1])
T1 = np.tile(T1s, [N, 1, 1])
T2s = T1s.T
T2 = np.tile(T2s, [N, 1, 1])
T3 = np.tile(Z.dot(S4).dot(Z.T), [N, 1, 1])
T4 = np.tile(A.dot(S2).dot(A.T).diagonal(), [P, 1]).T
T4 = np.expand_dims(T4, axis=2)
T4 = np.repeat(T4, P, axis=2)
T5 = A.dot(S3).dot(Z.T)
T5 = np.expand_dims(T5, axis=2)
T5 = np.repeat(T5, P, axis=2)
T6 = np.swapaxes(T5, 1, 2)
SCinvI = 2 * Cinv.dot(S) + I
SCinvI_inv = np.diag(1 / SCinvI.diagonal())
(temp, logDetSCinvI) = np.linalg.slogdet(SCinvI)
detSCinvI = np.exp(logDetSCinvI)
dDetSCinvI = -0.5 * np.power(detSCinvI, -0.5) * SCinvI_inv.dot(2 * dCinv).dot(S).trace()
expTerm = EVzxVzxT_list_this / np.power(detSCinvI, -0.5)
res = EVzxVzxT_list_this * (-0.5 * T1 - 0.5 * T2 + T3 - 0.5 * T4 + T5 + T6) + dDetSCinvI * expTerm
res = np.sum(res, axis=0)
return res
开发者ID:LinZhineng,项目名称:atldgp,代码行数:60,代码来源:RBFKernel.py
示例2: velocity
def velocity(self,l=None, t=None, squeeze=True):
"""
return velocities at a given position l, or a given time t
(since this is just a ballistic model, the output does
not depend on l or t, but it could in a non-ballistic model)
"""
# convert the l or t input to a t input:
if t is None and l is None:
raise IOError('you must input l or t.')
if t is None:
l = np.array(l)
if len(l.shape) == 0:
l = np.array([l])
t = self.time(l=l, squeeze=squeeze)
else:
t = np.array(t)
if len(t.shape) == 0:
t = np.array([t])
#formatting to make sure I can perform the array operations
g = np.expand_dims(
np.expand_dims(self.params.GRAVITY,0),0)
t = np.expand_dims(t - self.t,2)
x = np.expand_dims(self.x,0)
v = np.expand_dims(self.v,0)
vel = v + g * t
if squeeze:
vel = np.squeeze(vel, axis=0)
return vel
开发者ID:b-r-oleary,项目名称:acme,代码行数:31,代码来源:beam_source.py
示例3: compute_overlap
def compute_overlap(a, b):
"""
Parameters
----------
a: (N, 4) ndarray of float
b: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
area = (b[:, 2] - b[:, 0] + 1) * (b[:, 3] - b[:, 1] + 1)
iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0]) + 1
ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1]) + 1
iw = np.maximum(iw, 0)
ih = np.maximum(ih, 0)
ua = np.expand_dims((a[:, 2] - a[:, 0] + 1) * (a[:, 3] - a[:, 1] + 1), axis=1) + area - iw * ih
ua = np.maximum(ua, np.finfo(float).eps)
intersection = iw * ih
return intersection / ua
开发者ID:jodiexyx,项目名称:rob599,代码行数:25,代码来源:anchors.py
示例4: cloud_callback
def cloud_callback(self, cloud):
points = point_cloud2.read_points(cloud)
points_list = np.asarray(list(points))
points_arr = np.asarray(points_list)
# Unpack RGB color info
_float2rgb_vectorized = np.vectorize(_float2rgb)
r, g, b = _float2rgb_vectorized(points_arr[:, 3])
# Concatenate and Reshape
r = np.expand_dims(r, 1) # insert blank 3rd dimension (for concatenation)
g = np.expand_dims(g, 1)
b = np.expand_dims(b, 1)
points_rgb = np.concatenate((points_arr[:, 0:3], r, g, b), axis=1)
image_rgb = points_rgb.reshape(cloud.height, cloud.width, 6)
z = copy.deepcopy(image_rgb[:, :, 2]) # get depth values (I think)
image_np = copy.deepcopy(image_rgb[:, :, 3:].astype('uint8'))
#code.interact(local=locals())
# TWO-METER DISTANCE FILTER
z[np.isnan(z)] = 0.0
mask = np.logical_or(z > 2, z == 0)
for i in range(image_np.shape[2]):
image_np[:, :, i][mask] = 0
# Convert to Image msg
image_cv = cv.fromarray(image_np)
image_msg = self.bridge.cv_to_imgmsg(image_cv, encoding='bgr8')
self.pub.publish(image_msg)
开发者ID:OSUrobotics,项目名称:rgbd_numpy,代码行数:29,代码来源:python_pointcloud2.py
示例5: pad
def pad(total_boxes, w, h):
# compute the padding coordinates (pad the bounding boxes to square)
tmpw = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32)
tmph = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32)
numbox = total_boxes.shape[0]
dx = np.ones((numbox), dtype=np.int32)
dy = np.ones((numbox), dtype=np.int32)
edx = tmpw.copy().astype(np.int32)
edy = tmph.copy().astype(np.int32)
x = total_boxes[:, 0].copy().astype(np.int32)
y = total_boxes[:, 1].copy().astype(np.int32)
ex = total_boxes[:, 2].copy().astype(np.int32)
ey = total_boxes[:, 3].copy().astype(np.int32)
tmp = np.where(ex > w)
edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmpw[tmp], 1)
ex[tmp] = w
tmp = np.where(ey > h)
edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmph[tmp], 1)
ey[tmp] = h
tmp = np.where(x < 1)
dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1)
x[tmp] = 1
tmp = np.where(y < 1)
dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1)
y[tmp] = 1
return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph
开发者ID:nxp-gf,项目名称:flask-facep-reg-v3,代码行数:33,代码来源:mtcnn_detect.py
示例6: twopoint_spidx_bootstrap
def twopoint_spidx_bootstrap(freq, flux, flux_err, niter=10000):
"""
Quick bootstrap for spectral index calulcation
freq: 2 array
flux: 2 or 2xN array
flux_err: 2 or 2xN array
N is the number of sources
"""
# calculate spidx assuming [iter,source,freq_point] shapes
def spidx(freq, flux):
return np.log10(flux[:,:,0]/flux[:,:,1])/np.log10(freq[:,:,0]/freq[:,:,1])
freq = np.array(freq).astype(float)
flux = np.array(flux).astype(float)
flux_err = np.array(flux_err).astype(float)
# if only 1 source, add degenerate axis
if flux.shape == (2,): flux = np.expand_dims(flux, axis=1)
if flux_err.shape == (2,): flux_err = np.expand_dims(flux_err, axis=1)
flux = flux.T
flux_err = flux_err.T
nsource = flux.shape[0]
results = np.zeros(shape=(niter,nsource))
random_flux = np.resize(flux, (niter, nsource, 2)) + np.resize(flux_err, (niter, nsource, 2)) * np.random.randn(niter, nsource, 2)
random_flux[random_flux <= 0] = np.nan # remove negative, this create a bias
freq = np.resize(freq, (niter, nsource, 2))
results = spidx(freq, random_flux)
mean = np.nanmean(results,axis=0)
err = np.nanstd(results,axis=0)
return mean, err
开发者ID:revoltek,项目名称:scripts,代码行数:31,代码来源:lib_linearfit.py
示例7: _perform_clip
def _perform_clip(self, _filtered_data, axis=None):
"""
Perform sigma clip by comparing the data to the minimum and
maximum values (median + sig * standard deviation). Use
sigma_lower and sigma_upper to get the correct limits. Data
values less or greater than the minimum / maximum values
will have True set in the mask array.
"""
if _filtered_data.size == 0:
return _filtered_data
max_value = self.cenfunc(_filtered_data, axis=axis)
std = self.stdfunc(_filtered_data, axis=axis)
min_value = max_value - std * self.sigma_lower
max_value += std * self.sigma_upper
if axis is not None:
if axis != 0:
min_value = np.expand_dims(min_value, axis=axis)
max_value = np.expand_dims(max_value, axis=axis)
if max_value is np.ma.masked:
max_value = np.ma.MaskedArray(np.nan, mask=True)
min_value = np.ma.MaskedArray(np.nan, mask=True)
_filtered_data.mask |= _filtered_data > max_value
_filtered_data.mask |= _filtered_data < min_value
return _filtered_data
开发者ID:Juanlu001,项目名称:astropy,代码行数:29,代码来源:sigma_clipping.py
示例8: extract_imfeats
def extract_imfeats( hdf5name, network ):
# Image files
hdf5file=h5py.File(hdf5name)
# Final output of neural network
imfeatures = np.zeros( (0,4096) )
# Loop through all the images in the HDF5 file
for imname in hdf5file.keys():
img = 1.0 - hdf5file[imname].value /255.0
shards = np.zeros( (0, 1, 56, 56) )
# Collect the inputs for the image
for shard in StepShingler(img, hstep=30, vstep=30, shingle_size=(56,56)):
shard = np.expand_dims(np.expand_dims(shard, 0),0)
shards = np.concatenate( (shards, shard) )
print "Loaded %d shards in and predicting on image %s" %(len(shards), imname)
sys.stdout.flush()
# Predict the neural network and append the mean of features to overall imfeatures
features = network.predict( shards, verbose=1 )
imfeatures = np.concatenate( (imfeatures, np.expand_dims(features.mean(axis=0),0)) )
return imfeatures
开发者ID:lgensinger,项目名称:d-script,代码行数:25,代码来源:fielutil.py
示例9: compute_overlap
def compute_overlap(a, b):
"""
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
Parameters
----------
a: (N, 4) ndarray of float
b: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0])
ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1])
iw = np.maximum(iw, 0)
ih = np.maximum(ih, 0)
ua = np.expand_dims((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1) + area - iw * ih
ua = np.maximum(ua, np.finfo(float).eps)
intersection = iw * ih
return intersection / ua
开发者ID:souvikb1812,项目名称:keras-yolo2,代码行数:26,代码来源:utils.py
示例10: get_inception_score
def get_inception_score(images, splits=10):
assert(type(images) == list)
assert(type(images[0]) == np.ndarray)
assert(len(images[0].shape) == 3)
assert(np.max(images[0]) > 10)
assert(np.min(images[0]) >= 0.0)
inps = []
for img in images:
img = img.astype(np.float32)
inps.append(np.expand_dims(img, 0))
bs = 100
with tf.Session() as sess:
preds = []
n_batches = int(math.ceil(float(len(inps)) / float(bs)))
for i in range(n_batches):
# sys.stdout.write(".")
# sys.stdout.flush()
inp = inps[(i * bs):min((i + 1) * bs, len(inps))]
inp = np.concatenate(inp, 0)
pred = sess.run(softmax, {'ExpandDims:0': inp})
preds.append(pred)
preds = np.concatenate(preds, 0)
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
开发者ID:codealphago,项目名称:PassGAN,代码行数:29,代码来源:inception_score.py
示例11: tiff_sino_to_image_slice
def tiff_sino_to_image_slice(tiffdir,slice_ind):
"""
Convert TIFF of sinograms and process to horizontal slices of sinograms.
Assume structure of files from Octopus, and that files are numbered in
uniform order
Need tiffdir to be the path to the director of files, ending with "/"
slice_ind is a 2-array for the first and last slice (as a fraction of the
whole list) to be reconstructed. slice_ind[0]=slice_ind[1] is permitted.
"""
import glob
from PIL import Image
files = glob.glob(tiffdir+'*.tif')
#Read in data
index=(np.round(slice_ind*len(files))).astype('int')
if slice_ind[0]==slice_ind[1]:
files = files[index]
else:
files = files[index[0]:index[1]]
sinos = np.expand_dims(np.array(Image.open(files[0])),2)
if len(files)>1:
for i in range(len(files)-1):
sinos = np.concatenate((sinos,np.expand_dims(np.array
(Image.open(files[0])),2)),2)
sinos = np.transpose(sinos,(1,0,2))
return sinos
开发者ID:ornlneutronimaging,项目名称:bregman_tomo,代码行数:26,代码来源:TV_Split_utilities.py
示例12: np_matrix_to_tf_sparse
def np_matrix_to_tf_sparse(np_matrix, row_slices=None,
col_slices=None, transpose=False,
shuffle=False):
"""Simple util to slice non-zero np matrix elements as tf.SparseTensor."""
indices = np.nonzero(np_matrix)
# Only allow slices of whole rows or whole columns.
assert not (row_slices is not None and col_slices is not None)
if row_slices is not None:
selected_ind = np.concatenate(
[np.where(indices[0] == r)[0] for r in row_slices], 0)
indices = (indices[0][selected_ind], indices[1][selected_ind])
if col_slices is not None:
selected_ind = np.concatenate(
[np.where(indices[1] == c)[0] for c in col_slices], 0)
indices = (indices[0][selected_ind], indices[1][selected_ind])
if shuffle:
shuffled_ind = [x for x in range(len(indices[0]))]
random.shuffle(shuffled_ind)
indices = (indices[0][shuffled_ind], indices[1][shuffled_ind])
ind = (np.concatenate(
(np.expand_dims(indices[1], 1),
np.expand_dims(indices[0], 1)), 1).astype(np.int64) if transpose else
np.concatenate((np.expand_dims(indices[0], 1),
np.expand_dims(indices[1], 1)), 1).astype(np.int64))
val = np_matrix[indices].astype(np.float32)
shape = (np.array(
[max(indices[1]) + 1, max(indices[0]) + 1]).astype(np.int64) if transpose
else np.array(
[max(indices[0]) + 1, max(indices[1]) + 1]).astype(np.int64))
return tf.SparseTensor(ind, val, shape)
开发者ID:2020zyc,项目名称:tensorflow,代码行数:35,代码来源:factorization_ops_test.py
示例13: ReadSegmentFlow
def ReadSegmentFlow(path, offsets, new_height, new_width, new_length, is_color, name_pattern):
if is_color:
cv_read_flag = cv2.IMREAD_COLOR # > 0
else:
cv_read_flag = cv2.IMREAD_GRAYSCALE # = 0
interpolation = cv2.INTER_LINEAR
sampled_list = []
for offset_id in range(len(offsets)):
offset = offsets[offset_id]
for length_id in range(1, new_length+1):
frame_name_x = name_pattern % ("x", length_id + offset)
frame_path_x = path + "/" + frame_name_x
cv_img_origin_x = cv2.imread(frame_path_x, cv_read_flag)
frame_name_y = name_pattern % ("y", length_id + offset)
frame_path_y = path + "/" + frame_name_y
cv_img_origin_y = cv2.imread(frame_path_y, cv_read_flag)
if cv_img_origin_x is None or cv_img_origin_y is None:
print("Could not load file %s or %s" % (frame_path_x, frame_path_y))
sys.exit()
# TODO: error handling here
if new_width > 0 and new_height > 0:
cv_img_x = cv2.resize(cv_img_origin_x, (new_width, new_height), interpolation)
cv_img_y = cv2.resize(cv_img_origin_y, (new_width, new_height), interpolation)
else:
cv_img_x = cv_img_origin_x
cv_img_y = cv_img_origin_y
sampled_list.append(np.expand_dims(cv_img_x, 2))
sampled_list.append(np.expand_dims(cv_img_y, 2))
clip_input = np.concatenate(sampled_list, axis=2)
return clip_input
开发者ID:Alawaka,项目名称:two-stream-pytorch,代码行数:32,代码来源:ucf101.py
示例14: prepare_submission
def prepare_submission():
mypath = '/Users/philipppushnyakov/data/test/'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
s = open('submission.csv', 'wt')
s.write('Id,label' + '\n')
for f in onlyfiles:
im_name = mypath + f
try:
im = cv2.resize(cv2.imread(im_name), (64,64))
#im = cv2.cvtColor(cv2.resize(cv2.imread(im_name), (64, 64)).astype(np.float32), cv2.COLOR_BGR2GRAY)
except KeyboardInterrupt: raise
except:
print im_name
continue
#print im.shape
#im = im.transpose((2,0,1))
im = np.expand_dims(im, axis=0)
im = np.expand_dims(im, axis=0)
#for j in range(1):
#im[:,j,:,:] -= np.mean(im[:,j,:,:])
#im[:,j,:,:] /= np.std(im[:,j,:,:])
#im -= np.mean(im)
#im /= np.std(im)
out = model.predict(im)
#print out
s.write(f.split('.')[0] + ',' + str(np.argmax(out) + 1) + '\n')
开发者ID:pushnyakov,项目名称:DSG16,代码行数:26,代码来源:classifier_from_little_data_script_2.py
示例15: prob
def prob(samples, data, rho_D_M, d_distr_samples, d_Tree=None):
r"""
Calculates :math:`P_{\Lambda}(\mathcal{V}_{\lambda_{samples}})`, the
probability assoicated with a set of voronoi cells defined by the model
solves at :math:`(\lambda_{samples})` where the volumes of these voronoi
cells are assumed to be equal under the MC assumption.
:param samples: The samples in parameter space for which the model was run.
:type samples: :class:`~numpy.ndarray` of shape (num_samples, ndim)
:param data: The data from running the model given the samples.
:type data: :class:`~numpy.ndarray` of size (num_samples, mdim)
:param rho_D_M: The simple function approximation of rho_D
:type rho_D_M: :class:`~numpy.ndarray` of shape (M,)
:param d_distr_samples: The samples in the data space that define a
parition of D to for the simple function approximation
:type d_distr_samples: :class:`~numpy.ndarray` of shape (M, mdim)
:param d_Tree: :class:`~scipy.spatial.KDTree` for d_distr_samples
:rtype: tuple of :class:`~numpy.ndarray` of sizes (num_samples,),
(num_samples,), (ndim, num_l_emulate), (num_samples,), (num_l_emulate,)
:returns: (P, lam_vol, io_ptr) where P is the
probability associated with samples, and lam_vol the volumes associated
with the samples, io_ptr a pointer from data to M bins.
"""
if len(samples.shape) == 1:
samples = np.expand_dims(samples, axis=1)
if len(data.shape) == 1:
data = np.expand_dims(data, axis=1)
if len(d_distr_samples.shape) == 1:
d_distr_samples = np.expand_dims(d_distr_samples, axis=1)
if type(d_Tree) == type(None):
d_Tree = spatial.KDTree(d_distr_samples)
# Set up local arrays for parallelism
local_index = range(0+comm.rank, samples.shape[0], comm.size)
samples_local = samples[local_index, :]
data_local = data[local_index, :]
local_array = np.array(local_index, dtype='int64')
# Determine which inputs go to which M bins using the QoI
(_, io_ptr) = d_Tree.query(data_local)
# Apply the standard MC approximation and
# calculate probabilities
P_local = np.zeros((samples_local.shape[0],))
for i in range(rho_D_M.shape[0]):
Itemp = np.equal(io_ptr, i)
Itemp_sum = np.sum(Itemp)
Itemp_sum = comm.allreduce(Itemp_sum, op=MPI.SUM)
if Itemp_sum > 0:
P_local[Itemp] = rho_D_M[i]/Itemp_sum
P_global = util.get_global_values(P_local)
global_index = util.get_global_values(local_array)
P = np.zeros(P_global.shape)
P[global_index] = P_global[:]
lam_vol = (1.0/float(samples.shape[0]))*np.ones((samples.shape[0],))
return (P, lam_vol, io_ptr)
开发者ID:npandachg,项目名称:BET,代码行数:60,代码来源:calculateP.py
示例16: testVarianceAndCovarianceMatrix
def testVarianceAndCovarianceMatrix(self):
amp = np.float64(.5)
len_scale = np.float64(.2)
jitter = np.float64(1e-4)
observation_noise_variance = np.float64(3e-3)
kernel = psd_kernels.ExponentiatedQuadratic(amp, len_scale)
index_points = np.expand_dims(np.random.uniform(-1., 1., 10), -1)
gp = tfd.GaussianProcess(
kernel,
index_points,
observation_noise_variance=observation_noise_variance,
jitter=jitter)
def _kernel_fn(x, y):
return amp * np.exp(-.5 * (np.squeeze((x - y)**2)) / (len_scale**2))
expected_covariance = (
_kernel_fn(np.expand_dims(index_points, 0),
np.expand_dims(index_points, 1)) +
(observation_noise_variance + jitter) * np.eye(10))
self.assertAllClose(expected_covariance,
self.evaluate(gp.covariance()))
self.assertAllClose(np.diag(expected_covariance),
self.evaluate(gp.variance()))
开发者ID:lewisKit,项目名称:probability,代码行数:28,代码来源:gaussian_process_test.py
示例17: testParams
def testParams(self):
"""Tests that the params work as intended."""
num_classes = 2
with self.test_session() as sess:
# Experiment 1. Update weights only.
data = constant_op.constant(self.data, dtype=dtypes.float32)
gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes,
[[3.0, 3.0], [0.0, 0.0]], 'w')
training_ops = gmm_tool.training_ops()
variables.global_variables_initializer().run()
sess.run(gmm_tool.init_ops())
for _ in xrange(self.iterations):
sess.run(training_ops)
# Only the probability to each class is updated.
alphas = sess.run(gmm_tool.alphas())
self.assertGreater(alphas[1], 0.6)
means = sess.run(gmm_tool.clusters())
np.testing.assert_almost_equal(
np.expand_dims([[3.0, 3.0], [0.0, 0.0]], 1), means)
covs = sess.run(gmm_tool.covariances())
np.testing.assert_almost_equal(covs[0], covs[1])
# Experiment 2. Update means and covariances.
gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes,
[[3.0, 3.0], [0.0, 0.0]], 'mc')
training_ops = gmm_tool.training_ops()
variables.global_variables_initializer().run()
sess.run(gmm_tool.init_ops())
for _ in xrange(self.iterations):
sess.run(training_ops)
alphas = sess.run(gmm_tool.alphas())
self.assertAlmostEqual(alphas[0], alphas[1])
means = sess.run(gmm_tool.clusters())
np.testing.assert_almost_equal(
np.expand_dims([[2.0, 2.0], [-1.0, -1.0]], 1), means, decimal=1)
covs = sess.run(gmm_tool.covariances())
np.testing.assert_almost_equal(
[[0.371111, -0.0050774], [-0.0050774, 0.8651744]], covs[0], decimal=4)
np.testing.assert_almost_equal(
[[0.146976, 0.0259463], [0.0259463, 0.2543971]], covs[1], decimal=4)
# Experiment 3. Update covariances only.
gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes,
[[-1.0, -1.0], [1.0, 1.0]], 'c')
training_ops = gmm_tool.training_ops()
variables.global_variables_initializer().run()
sess.run(gmm_tool.init_ops())
for _ in xrange(self.iterations):
sess.run(training_ops)
alphas = sess.run(gmm_tool.alphas())
self.assertAlmostEqual(alphas[0], alphas[1])
means = sess.run(gmm_tool.clusters())
np.testing.assert_almost_equal(
np.expand_dims([[-1.0, -1.0], [1.0, 1.0]], 1), means)
covs = sess.run(gmm_tool.covariances())
np.testing.assert_almost_equal(
[[0.1299582, 0.0435872], [0.0435872, 0.2558578]], covs[0], decimal=5)
np.testing.assert_almost_equal(
[[3.195385, 2.6989155], [2.6989155, 3.3881593]], covs[1], decimal=5)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:60,代码来源:gmm_ops_test.py
示例18: CreateCrossCorrelationTable
def CreateCrossCorrelationTable(maindir, file_names, outpath):
"""
Takes a directory and list of numpy files and horizontally concatenates
them all and saves the output in outdir. Labels are also added.
"""
for number, file_name in enumerate(file_names):
database_brain = np.load(maindir+os.sep+file_name) # Loading the
# correlation column.
if number==0:
concatenate_data= database_brain
else:
concatenate_data=np.concatenate((concatenate_data,
database_brain), axis=1)
# Add concept indices:
processed_fn = [string.replace('.nii.gz.npy', '') for string in file_names]
processed_fn = [string.replace('_main', '') for string in processed_fn]
horz_labels = np.array(processed_fn)
horz_labels = np.expand_dims(horz_labels, axis=0) # Necessary for swapping
# and concatenating.
vert_labels = np.swapaxes(horz_labels, 0, 1)
horz_labels = np.insert(horz_labels, 0, 0)
horz_labels = np.expand_dims(horz_labels, axis=0) # Expands again because
# the last line eliminates an axis for some reason.
concatenate_data = np.char.mod('%10.3f', concatenate_data)
concatenate_data = np.concatenate((vert_labels, concatenate_data), axis=1)
concatenate_data = np.concatenate((horz_labels, concatenate_data), axis=0)
np.save(outpath, concatenate_data)
np.savetxt(outpath, concatenate_data, fmt='%s', delimiter=',')
开发者ID:law826,项目名称:Neurosynth_SNA,代码行数:33,代码来源:Neurosynth_SNA.py
示例19: update_statistics
def update_statistics(Y, P, beta=0.9):
"""
Args
----
Y:
2d array whose columns encode the
activity of the output units
P:
2d array encoding the pairwise average
activity of the output units
Returns
-------
The updated average activities
"""
(n, d) = Y.shape
A = np.expand_dims(Y, axis=1) * np.expand_dims(Y, axis=0)
assert(A.shape == (n, n, d))
Q = np.mean(A, axis=2)
Q[np.where(Q == 0.)] = 0.000001
assert(P.shape == Q.shape)
return beta*P + (1-beta)*Q
开发者ID:dubing12,项目名称:htmresearch,代码行数:27,代码来源:utils.py
示例20: load_mask_labels
def load_mask_labels():
'''Load both target and style masks.
A mask image (nr x nc) with m labels/colors will be loaded
as a 4D boolean tensor: (1, m, nr, nc) for 'th' or (1, nr, nc, m) for 'tf'
'''
target_mask_img = load_img(target_mask_path,
target_size=(img_nrows, img_ncols))
target_mask_img = img_to_array(target_mask_img)
style_mask_img = load_img(style_mask_path,
target_size=(img_nrows, img_ncols))
style_mask_img = img_to_array(style_mask_img)
if K.image_dim_ordering() == 'th':
mask_vecs = np.vstack([style_mask_img.reshape((3, -1)).T,
target_mask_img.reshape((3, -1)).T])
else:
mask_vecs = np.vstack([style_mask_img.reshape((-1, 3)),
target_mask_img.reshape((-1, 3))])
labels = kmeans(mask_vecs, nb_labels)
style_mask_label = labels[:img_nrows *
img_ncols].reshape((img_nrows, img_ncols))
target_mask_label = labels[img_nrows *
img_ncols:].reshape((img_nrows, img_ncols))
stack_axis = 0 if K.image_dim_ordering() == 'th' else -1
style_mask = np.stack([style_mask_label == r for r in xrange(nb_labels)],
axis=stack_axis)
target_mask = np.stack([target_mask_label == r for r in xrange(nb_labels)],
axis=stack_axis)
return (np.expand_dims(style_mask, axis=0),
np.expand_dims(target_mask, axis=0))
开发者ID:AnishShah,项目名称:keras,代码行数:32,代码来源:neural_doodle.py
注:本文中的numpy.expand_dims函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论