本文整理汇总了Python中numpy.zeros函数的典型用法代码示例。如果您正苦于以下问题:Python zeros函数的具体用法?Python zeros怎么用?Python zeros使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了zeros函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(value=numpy.zeros((n_in, n_out),
dtype=theano.config.floatX),
name='W', borrow=True)
# initialize the baises b as a vector of n_out 0s
self.b = theano.shared(value=numpy.zeros((n_out,),
dtype=theano.config.floatX),
name='b', borrow=True)
# compute vector of class-membership probabilities in symbolic form
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# compute prediction as class whose probability is maximal in
# symbolic form
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# parameters of the model
self.params = [self.W, self.b]
开发者ID:vivanac,项目名称:DeepLearningTutorials,代码行数:35,代码来源:logistic_sgd.py
示例2: calculate_zernikes
def calculate_zernikes(self, workspace):
zernike_indexes = cpmz.get_zernike_indexes(self.zernike_degree.value + 1)
meas = workspace.measurements
for o in self.objects:
object_name = o.object_name.value
objects = workspace.object_set.get_objects(object_name)
#
# First, get a table of centers and radii of minimum enclosing
# circles per object
#
ij = np.zeros((objects.count + 1, 2))
r = np.zeros(objects.count + 1)
for labels, indexes in objects.get_labels():
ij_, r_ = minimum_enclosing_circle(labels, indexes)
ij[indexes] = ij_
r[indexes] = r_
#
# Then compute x and y, the position of each labeled pixel
# within a unit circle around the object
#
ijv = objects.ijv
l = ijv[:, 2]
yx = (ijv[:, :2] - ij[l, :]) / r[l, np.newaxis]
z = cpmz.construct_zernike_polynomials(
yx[:, 1], yx[:, 0], zernike_indexes)
for image_group in self.images:
image_name = image_group.image_name.value
image = workspace.image_set.get_image(
image_name, must_be_grayscale=True)
pixels = image.pixel_data
mask = (ijv[:, 0] < pixels.shape[0]) & \
(ijv[:, 1] < pixels.shape[1])
mask[mask] = image.mask[ijv[mask, 0], ijv[mask, 1]]
yx_ = yx[mask, :]
l_ = l[mask]
z_ = z[mask, :]
if len(l_) == 0:
for i, (n, m) in enumerate(zernike_indexes):
ftr = self.get_zernike_magnitude_name(image_name, n, m)
meas[object_name, ftr] = np.zeros(0)
if self.wants_zernikes == Z_MAGNITUDES_AND_PHASE:
ftr = self.get_zernike_phase_name(image_name, n, m)
meas[object_name, ftr] = np.zeros(0)
continue
areas = scind.sum(
np.ones(l_.shape, int), labels=l_, index=objects.indices)
for i, (n, m) in enumerate(zernike_indexes):
vr = scind.sum(
pixels[ijv[mask, 0], ijv[mask, 1]] * z_[:, i].real,
labels=l_, index=objects.indices)
vi = scind.sum(
pixels[ijv[mask, 0], ijv[mask, 1]] * z_[:, i].imag,
labels=l_, index=objects.indices)
magnitude = np.sqrt(vr * vr + vi * vi) / areas
ftr = self.get_zernike_magnitude_name(image_name, n, m)
meas[object_name, ftr] = magnitude
if self.wants_zernikes == Z_MAGNITUDES_AND_PHASE:
phase = np.arctan2(vr, vi)
ftr = self.get_zernike_phase_name(image_name, n, m)
meas[object_name, ftr] = phase
开发者ID:dinglyosu,项目名称:CellProfiler,代码行数:60,代码来源:measureobjectintensitydistribution.py
示例3: __fen2tensor
def __fen2tensor(self, fen):
frdpos = np.zeros((9, 10, 16), dtype=OUT_TYPE)
frdmove = np.zeros((9, 10, 16), dtype=OUT_TYPE)
emypos = np.zeros((9, 10, 16), dtype=OUT_TYPE)
emymove = np.zeros((9, 10, 16), dtype=OUT_TYPE)
movelabel = np.zeros((9, 10, 16), dtype=OUT_TYPE)
fenlist = fen.split('\t')
frdpos, emypos = self.__f2tpos(fenlist[0], frdpos, emypos)
frdmove = self.__f2tfrdmove(fenlist[1], frdmove, frdpos)
label = fenlist[2].strip().split('-')
layer = np.argmax(frdpos[self.__loca2i(label[0][0])][self.__loca2i(label[0][1])])
movelabel[self.__loca2i(label[1][0])][self.__loca2i(label[1][1])][layer] = 1
if fenlist[0].split()[1] == 'b':
self.__switch_round(frdpos)
self.__switch_round(frdmove)
self.__switch_round(emypos)
self.__switch_round(movelabel)
# shuffle random
self.__shuffle([frdpos, frdmove, movelabel], self.__shuffle_args())
self.__shuffle([emypos], self.__shuffle_args())
return frdpos, frdmove, emypos, movelabel
开发者ID:milkpku,项目名称:BetaElephant,代码行数:27,代码来源:dataset.py
示例4: makeHist
def makeHist(self, normalize = True, doPMF = True):
if self.isDataPickled:
return
if not self.Dim == 1:
raise TypeError('Variable # mismatch')
z = self.z
Nframes = len(z)
bin_min = 0.98 * z.min(); bin_max = 1.02*z.max()
delta = (bin_max - bin_min)/float(self.nbins)
bin_centers = np.zeros(self.nbins)
bin_vals = np.zeros(self.nbins)
pmf = np.zeros(self.nbins)
for i in range(self.nbins):
bin_centers[i] = bin_min + (i+0.5) * delta
frameStatus = pb(Text = 'Binning frame by frame', Steps = Nframes)
for i in range(Nframes):
assignment = int((z[i] - bin_min)/delta)
bin_vals[assignment] += 1.0
frameStatus.Update(i)
if normalize:
#bin_vals /= (np.sum(bin_vals) * delta)
bin_vals /= np.trapz(bin_vals, bin_centers, dx = delta)
if doPMF:
pmf = - np.log(bin_vals)
hist = {'bin_centers': bin_centers, 'bin_vals': bin_vals, 'pmf' : pmf}
pickle.dump(hist, open(self.data, 'w'))
self.isDataPickled = True
开发者ID:tanmoy7989,项目名称:c25ld,代码行数:35,代码来源:Utils.py
示例5: __init__
def __init__(self):
"""
Setup tri33 cell.
"""
vertices = numpy.array([[-1.0, -1.0],
[+1.0, -1.0],
[-1.0, +1.0]])
quadPts = vertices[:]
quadWts = numpy.array( [2.0/3.0, 2.0/3.0, 2.0/3.0])
# Compute basis fns and derivatives at quadrature points
basis = numpy.zeros( (3, 3), dtype=numpy.float64)
basisDeriv = numpy.zeros( (3, 3, 2), dtype=numpy.float64)
iQuad = 0
for q in quadPts:
basis[iQuad] = numpy.array([self.N0(q), self.N1(q), self.N2(q)],
dtype=numpy.float64).reshape( (3,) )
deriv = numpy.array([[self.N0p(q), self.N0q(q)],
[self.N1p(q), self.N1q(q)],
[self.N2p(q), self.N2q(q)]])
basisDeriv[iQuad] = deriv.reshape((3, 2))
iQuad += 1
self.cellDim = 2
self.numCorners = len(vertices)
self.numQuadPts = len(quadPts)
self.vertices = vertices
self.quadPts = quadPts
self.quadWts = quadWts
self.basis = basis
self.basisDeriv = basisDeriv
return
开发者ID:panzhengyang,项目名称:pylith,代码行数:32,代码来源:TestFIATSimplex.py
示例6: backprop
def backprop(self, x, y):
activation = x
activations = [x]
zs = []
for weight, bias in zip(self.weights, self.biases):
z = np.dot(activation, weight)+bias
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
delta = (activation-y)*sigmoid_prime(zs[-1])
nabla_weights = [np.zeros(w.shape) for w in self.weights]
nabla_biases = [np.zeros(b.shape) for b in self.biases]
nabla_weights[-1] = np.dot(activations[-2].transpose(), delta)
nabla_biases[-1] = delta
for l in xrange(2, len(self.layers)):
delta = np.dot(delta, self.weights[-l+1].transpose())*sigmoid_prime(zs[-l])
nabla_weights[-l] = np.dot(activations[-l-1].transpose(), delta)
nabla_biases[-l] = delta
return (nabla_weights, nabla_biases)
开发者ID:Enhuiz,项目名称:text-classification-with-bp-network,代码行数:25,代码来源:network.py
示例7: divide_arrays
def divide_arrays(self, num_array, num_array_error, den_array, den_array_error):
'''
This function calculates the ratio of two arrays and calculate the respective error values
'''
nbr_elements = np.shape(num_array)[0]
# calculate the ratio array
ratio_array = np.zeros(nbr_elements)
for i in range(nbr_elements):
if den_array[i] is 0:
_tmp_ratio = 0
else:
_tmp_ratio = num_array[i] / den_array[i]
ratio_array[i] = _tmp_ratio
# calculate the error of the ratio array
ratio_error_array = np.zeros(nbr_elements)
for i in range(nbr_elements):
if (num_array[i] == 0) or (den_array[i] == 0):
ratio_error_array[i] = 0
else:
tmp1 = pow(num_array_error[i] / num_array[i],2)
tmp2 = pow(den_array_error[i] / den_array[i],2)
ratio_error_array[i] = math.sqrt(tmp1+tmp2)*(num_array[i]/den_array[i])
return [ratio_array, ratio_error_array]
开发者ID:JeanBilheux,项目名称:RefRed,代码行数:28,代码来源:reduction_quicknxs.py
示例8: test_reset_data_shape
def test_reset_data_shape(self):
shape1 = 10, 10, 10
shape3 = 10, 10, 10, 3
# Init data (explicit shape)
data = np.zeros((10, 10, 10, 1), dtype=np.uint8)
T = Texture3D(data=data)
assert T.shape == (10, 10, 10, 1)
assert T._format == gl.GL_LUMINANCE
# Set data to rgb
T.set_data(np.zeros(shape3, np.uint8))
assert T.shape == (10, 10, 10, 3)
assert T._format == gl.GL_RGB
# Set data to grayscale
T.set_data(np.zeros(shape1, np.uint8))
assert T.shape == (10, 10, 10, 1)
assert T._format == gl.GL_LUMINANCE
# Set size to rgb
T.resize(shape3)
assert T.shape == (10, 10, 10, 3)
assert T._format == gl.GL_RGB
# Set size to grayscale
T.resize(shape1)
assert T.shape == (10, 10, 10, 1)
assert T._format == gl.GL_LUMINANCE
开发者ID:gbaty,项目名称:vispy,代码行数:29,代码来源:test_texture.py
示例9: torgerson
def torgerson(distances, n_components=2):
"""
Perform classical mds (Torgerson scaling).
..note ::
If the distances are euclidean then this is equivalent to projecting
the original data points to the first `n` principal components.
"""
distances = np.asarray(distances)
assert distances.shape[0] == distances.shape[1]
N = distances.shape[0]
# O ^ 2
D_sq = distances ** 2
# double center the D_sq
rsum = np.sum(D_sq, axis=1, keepdims=True)
csum = np.sum(D_sq, axis=0, keepdims=True)
total = np.sum(csum)
D_sq -= rsum / N
D_sq -= csum / N
D_sq += total / (N ** 2)
B = np.multiply(D_sq, -0.5, out=D_sq)
U, L, _ = np.linalg.svd(B)
if n_components > N:
U = np.hstack((U, np.zeros((N, n_components - N))))
L = np.hstack((L, np.zeros((n_components - N))))
U = U[:, :n_components]
L = L[:n_components]
D = np.diag(np.sqrt(L))
return np.dot(U, D)
开发者ID:RachitKansal,项目名称:orange3,代码行数:32,代码来源:manifold.py
示例10: sort_assemblies
def sort_assemblies(self, pattern, assemblies) :
""" Sort the assemblies by reactivity.
"""
# TODO(robertsj): Consider a cleaner approach for this sorting.
# We build a 2-d array of [index,kinf] pairs. Sorting this gives
# permuted index in the first entry. The location of each
# original index will become the new pattern. (Note that kinf
# is negated so we get descending order of reactivity. It seems
# argsort has no option for ascend/descend.
pattern_length = len(pattern)
index = np.zeros((pattern_length,2))
for i in range(0, pattern_length) :
index[i][0] = i
index[i][1] = -assemblies[i].kinf()
index=index[index[:,1].argsort(),0]
# Define the sorted pattern and assemblies using the permuted
# indices. Note that each pattern element will be unique, even
# if a small number of unique assemblies defined the pattern
# initially.
sorted_pattern = np.zeros(len(pattern),dtype='i')
sorted_assemblies = []
for i in range(0, pattern_length) :
sorted_pattern[i] = (np.where(index == i))[0][0]
sorted_assemblies.append(assemblies[int(index[i])])
return sorted_pattern, sorted_assemblies
开发者ID:archphy,项目名称:poropy,代码行数:28,代码来源:reactor.py
示例11: test_setitem_all_no_store
def test_setitem_all_no_store(self):
data = np.zeros((10, 10), dtype=np.uint8)
T = Texture(data=data, store=False)
T[...] = np.ones((10, 10), np.uint8)
assert len(T._pending_data) == 1
assert np.allclose(data, np.zeros((10, 10)))
开发者ID:gbaty,项目名称:vispy,代码行数:7,代码来源:test_texture.py
示例12: _create_collision_coefficient_matrix
def _create_collision_coefficient_matrix(self):
self.C_ul_interpolator = {}
self.delta_E_matrices = {}
self.g_ratio_matrices = {}
collision_group = self.atom_data.collision_data.groupby(level=['atomic_number', 'ion_number'])
for species in self.nlte_species:
no_of_levels = self.atom_data.levels.ix[species].energy.count()
C_ul_matrix = np.zeros(
(
no_of_levels,
no_of_levels,
len(self.atom_data.collision_data_temperatures))
)
delta_E_matrix = np.zeros((no_of_levels, no_of_levels))
g_ratio_matrix = np.zeros((no_of_levels, no_of_levels))
for (
atomic_number,
ion_number,
level_number_lower,
level_number_upper), line in (
collision_group.get_group(species).iterrows()):
# line.columns : delta_e, g_ratio, temperatures ...
C_ul_matrix[level_number_lower, level_number_upper, :] = line.values[2:]
delta_E_matrix[level_number_lower, level_number_upper] = line['delta_e']
#TODO TARDISATOMIC fix change the g_ratio to be the otherway round - I flip them now here.
g_ratio_matrix[level_number_lower, level_number_upper] = line['g_ratio']
self.C_ul_interpolator[species] = interpolate.interp1d(
self.atom_data.collision_data_temperatures,
C_ul_matrix)
self.delta_E_matrices[species] = delta_E_matrix
self.g_ratio_matrices[species] = g_ratio_matrix
开发者ID:rcthomas,项目名称:tardis,代码行数:33,代码来源:atomic.py
示例13: conv_backward_naive
def conv_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a convolutional layer.
Inputs:
- dout: Upstream derivatives.
- cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive
Returns a tuple of:
- dx: Gradient with respect to x
- dw: Gradient with respect to w
- db: Gradient with respect to b
"""
dx, dw, db = None, None, None
x, w, b, conv_param = cache
stride = conv_param['stride']
pad = conv_param['pad']
N, C, H, W = x.shape
F, _, HH, WW = w.shape
Hp = 1 + (H + 2 * pad - HH) / stride
Wp = 1 + (W + 2 * pad - WW) / stride
dx = np.zeros(x.shape)
dw = np.zeros(w.shape)
db = np.zeros(b.shape)
for i in xrange(N):
# for j in xrange(F):
data = x[i]
data = np.pad(data, ((0, 0), (pad, pad), (pad, pad)), 'constant')
paded_dxi = np.pad(dx[i], ((0, 0), (pad, pad), (pad, pad)), 'constant')
filter_vert_indices = 0
filter_hori_indices = 0
for s in xrange(Hp):
filter_hori_indices = 0
for p in xrange(Wp):
data_fragment = data[:, filter_vert_indices:filter_vert_indices+HH,
filter_hori_indices:filter_hori_indices+WW]
dw += np.einsum('i, jkl->ijkl', dout[i, :, s, p], data_fragment)
# paded_dxi[:, filter_vert_indices:filter_vert_indices+HH,
# filter_hori_indices:filter_hori_indices+WW] = \
# np.einsum('ijkl,i->jkl', w, dout[i, :, s, p])
# paded_dxi[:, filter_vert_indices:filter_vert_indices+HH,
# filter_hori_indices:filter_hori_indices+WW] = \
# np.tensordot(w, dout[i, :, s, p], axes = ([0], [0]))
for f in xrange(F):
paded_dxi[:, filter_vert_indices:filter_vert_indices+HH,
filter_hori_indices:filter_hori_indices+WW] \
+= w[f] * dout[i, f, s, p]
filter_hori_indices += stride
filter_vert_indices += stride
dx[i] = paded_dxi[:, pad:-pad, pad:-pad]
db = np.einsum('ijkl->j', dout)
# print(dx)
#############################################################################
# TODO: Implement the convolutional backward pass. #
#############################################################################
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx, dw, db
开发者ID:Zardinality,项目名称:cs231n_project,代码行数:60,代码来源:layers.py
示例14: conv3d_oneToMany
def conv3d_oneToMany(x, xShape, w, wShape, strideT, strideY, strideX, inName):
[ntp, nyp, nxp, nifp, nofp] = wShape
[nb, nt, ny, nx, nf] = xShape
# stride must be divisible by both weights and input
assert ntp % strideT == 0
assert nyp % strideY == 0
assert nxp % strideX == 0
assert nt % strideT == 0
assert ny % strideY == 0
assert nx % strideX == 0
assert nifp == nf
print "Building weight indices for conv3d"
# Build gather indices for weights
# Must be in shape of target output weights
weightIdxs = np.zeros(
(int(ntp / strideT), int(nyp / strideY), int(nxp / strideX), nifp, nofp * strideT * strideX * strideY, 5)
).astype(np.int32)
# Adding kernel number to end of features
for itp in range(ntp):
for iyp in range(nyp):
for ixp in range(nxp):
for iifp in range(nifp):
for iofp in range(nofp):
# Calculate output indices given input indices
# Must reverse, as we're using conv2d as transpose conv2d
otp = int((ntp - itp - 1) / strideT)
oyp = int((nyp - iyp - 1) / strideY)
oxp = int((nxp - ixp - 1) / strideX)
oifp = iifp # Input features stay the same
# oofp uses iofp as offset, plus an nf stride based on which kernel it belongs to
kernelIdx = (itp % strideT) * strideY * strideX + (iyp % strideY) * strideX + (ixp % strideX)
oofp = iofp + nofp * kernelIdx
weightIdxs[otp, oyp, oxp, oifp, oofp, :] = [itp, iyp, ixp, iifp, iofp]
print "Building output indices for conv3d"
# Build gather indices for output
# Must be in shape of target output data
dataIdxs = np.zeros((nb, nt * strideT, ny * strideY, nx * strideX, nofp, 5)).astype(np.int32)
for oob in range(nb):
for oot in range(nt * strideT):
for ooy in range(ny * strideY):
for oox in range(nx * strideX):
for oof in range(nofp):
# Calculate input indices given output indices
iib = oob
iit = oot / strideT
iiy = ooy / strideY
iix = oox / strideX
kernelIdx = (oot % strideT) * strideY * strideX + (ooy % strideY) * strideX + (oox % strideX)
iif = oof + nofp * kernelIdx
dataIdxs[oob, oot, ooy, oox, oof, :] = [iib, iit, iiy, iix, iif]
# Build convolution structure
w_reshape = tf.gather_nd(w, weightIdxs)
o_reshape = tf.nn.conv3d(x, w_reshape, strides=[1, 1, 1, 1, 1], padding="SAME", name=inName)
o = tf.gather_nd(o_reshape, dataIdxs)
return o
开发者ID:slundqui,项目名称:TFSparseCode,代码行数:60,代码来源:utils.py
示例15: fix_labels
def fix_labels(mnist_label, add_num):
"""
Args:
label: [[int]] arary, class labels
n: int, number of add data
Returns:
[[int]] array
"""
c_num = len(mnist_label[0])
# add one dimention
fixed_label = np.c_[mnist_label, np.zeros(len(mnist_label))]
assert len(fixed_label[0]) == c_num + 1
# generate new class label
new_label = np.zeros(c_num + 1)
new_label[c_num] = 1
new_label = np.array([new_label for i in range(add_num)])
# add new class label
fixed_label = np.r_[fixed_label, new_label]
assert len(fixed_label) == len(mnist_label) + add_num
return fixed_label
开发者ID:masaponto,项目名称:dentaku,代码行数:27,代码来源:data_processer.py
示例16: all_patches
def all_patches(padded_brain,i,predict_patchsize,obs_patchsize,num_channels):
image = padded_brain[i]
ishape_h , ishape_w = padded_brain.shape[1:3]
#ipdb.set_trace()
#ipdb.set_trace()
half_obs_patchsize = obs_patchsize/2
half_predict_patchsize = predict_patchsize/2
extended_image = np.zeros((ishape_h+obs_patchsize-predict_patchsize,ishape_w+obs_patchsize-predict_patchsize,num_channels))
extended_image[half_obs_patchsize - half_predict_patchsize : -(half_obs_patchsize - half_predict_patchsize),half_obs_patchsize - half_predict_patchsize : -(half_obs_patchsize - half_predict_patchsize)]= image
num_patches_rows = ishape_h // predict_patchsize
num_patches_cols = ishape_w // predict_patchsize
list_patches = np.zeros((num_patches_cols*num_patches_rows, obs_patchsize, obs_patchsize, num_channels))
index = 0
h_range = np.arange(obs_patchsize/2,ishape_h+obs_patchsize/2,predict_patchsize)
#h_range = h_range[:-1]
v_range = np.arange(obs_patchsize/2,ishape_w+obs_patchsize/2,predict_patchsize)
#v_range = v_range[:-1]
#ipdb.set_trace()
for index_h in h_range:
for index_w in v_range:
patch_brian = extended_image[index_h-obs_patchsize/2: index_h+obs_patchsize/2 ,index_w-obs_patchsize/2: index_w+obs_patchsize/2,:]
#if patch_brian.shape == (38,29,4):
# ipdb.set_trace()
list_patches[index,:,:,:] = patch_brian
index += 1
#ipdb.set_trace()
assert index == num_patches_rows*num_patches_cols
return list_patches
开发者ID:havaeimo,项目名称:PL2_added_layers,代码行数:31,代码来源:generate_prediction_patch.py
示例17: photoz
def photoz(s1100,e1100=0.,s14=0.,e14=0.,ntry=50000):
'''
Determine the photometric redshift of a galaxy given the
measured 1.4 cm and 1100 micron flux and uncertainty
'''
z = np.arange(0,10,.05)
ngal = 44
if s14 == 0:
ratioin = -1
ratiosig = -1
else:
ratioin = s1100/s14
ratiosig = (e1100/s1100**2+e14/s14**2)**.5
a = idlsave.read('fluxratio1100.sav')
dat = a.get('data')
zs = a.get('redshift')
averatio = np.zeros(200)
sigma = np.zeros(200)
array = np.random.randn(ntry)
array1 = np.random.randn(ntry)
if s14 <= 0.:
ydarts = (s1100+array*e1100)/(np.abs(array1*e14))
else:
ydarts = array*ratiosig+ratioin
xdarts = np.zeros(ntry)
for i in range(ntry):
jrangal = np.floor(ngal*np.random.rand(1))[0]
testtrack = dat[:,jrangal]
yval = ydarts[i]
xdarts[i] = np.interp(yval,testtrack,z)
return xdarts,ydarts
开发者ID:tconklin,项目名称:coprops,代码行数:31,代码来源:getz.py
示例18: backprop
def backprop(self, x, y):
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = self.cost_derivative(activations[-1], y) * \
sigmoid_prime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
# Note that the variable l in the loop below is used a little
# differently to the notation in Chapter 2 of the book. Here,
# l = 1 means the last layer of neurons, l = 2 is the
# second-last layer, and so on. It's a renumbering of the
# scheme in the book, used here to take advantage of the fact
# that Python can use negative indices in lists.
for l in xrange(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
开发者ID:ztq09290929,项目名称:AnnTest,代码行数:34,代码来源:network.py
示例19: train_set_loss_vars_for_cur_batches
def train_set_loss_vars_for_cur_batches(self):
"""
Called via Engine.SeqTrainParallelControl.
"""
assert self.train_have_loss_for_cur_batches()
# See EngineUtil.assign_dev_data for reference.
from Dataset import Dataset
n_time, n_batch = Dataset.index_shape_for_batches(self.train_batches)
n_output_dim = self.output_layer.attrs['n_out']
output_loss = numpy.zeros((n_batch,), "float32")
output_hat_y = numpy.zeros((n_time, n_batch, n_output_dim), "float32")
offset_slice = 0
for batch in self.train_batches:
for seq in batch.seqs:
o = seq.batch_frame_offset
q = seq.batch_slice + offset_slice
l = seq.frame_length
# input-data, input-index will also be set in this loop. That is data-key "data".
for k in [self.output_target]:
if l[k] == 0: continue
loss, hat_y = self.get_loss_and_hat_y(seq.seq_idx)
assert seq.seq_start_frame[k] < hat_y.shape[0]
assert seq.seq_end_frame[k] <= hat_y.shape[0]
output_loss[q] += loss * float(l[k]) / hat_y.shape[0]
output_hat_y[o[k]:o[k] + l[k], q] = hat_y[seq.seq_start_frame[k]:seq.seq_end_frame[k]]
self.output_var_loss.set_value(output_loss)
self.output_var_hat_y.set_value(output_hat_y)
开发者ID:atuxhe,项目名称:returnn,代码行数:27,代码来源:SprintErrorSignals.py
示例20: value_of_policy
def value_of_policy(sigma):
"Computes the value of following policy sigma."
# Set up the stochastic kernel p_sigma as a 2D array:
N = len(S)
p_sigma = zeros((N, N))
for x in S:
for y in S:
p_sigma[x, y] = phi(y - sigma[x])
# Create the right Markov operator M_sigma:
M_sigma = lambda h: dot(p_sigma, h)
# Set up the function r_sigma as an array:
r_sigma = array([U(x - sigma[x]) for x in S])
# Reshape r_sigma into a column vector:
r_sigma = r_sigma.reshape((N, 1))
# Initialize v_sigma to zero:
v_sigma = zeros((N,1))
# Initialize the discount factor to 1:
discount = 1
for i in range(50):
v_sigma = v_sigma + discount * r_sigma
r_sigma = M_sigma(r_sigma)
discount = discount * rho
return v_sigma
开发者ID:1simon,项目名称:edtc-code,代码行数:29,代码来源:kurtzvsigma.py
注:本文中的numpy.zeros函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论