本文整理汇总了Python中numpy.tile函数的典型用法代码示例。如果您正苦于以下问题:Python tile函数的具体用法?Python tile怎么用?Python tile使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了tile函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: grad_EVzxVzxT_by_hyper_exact
def grad_EVzxVzxT_by_hyper_exact(self, EVzxVzxT_list_this, Z, A, B, hyperno):
P = Z.shape[0]
R = Z.shape[1]
N = A.shape[0]
if hyperno != 0:
return EVzxVzxT_list_this * 0
alpha = self.length_scale * self.length_scale
I = np.identity(R)
S = np.diag(B[0, :] * B[0, :])
Sinv = np.diag(1 / B[0, :] * B[0, :])
C = I * alpha
Cinv = I * (1 / alpha)
CinvSinv = 2 * Cinv + Sinv
CinvSinv_inv = np.diag(1 / CinvSinv.diagonal())
dC = self.length_scale * I
dCinv = -Cinv.dot(dC).dot(Cinv)
dCinvSinv = 2 * dCinv
dCinvSinv_inv = -CinvSinv_inv.dot(dCinvSinv).dot(CinvSinv_inv)
S1 = (
dCinv
- dCinv.dot(CinvSinv_inv).dot(Cinv)
- Cinv.dot(dCinvSinv_inv).dot(Cinv)
- Cinv.dot(CinvSinv_inv).dot(dCinv)
)
S2 = -Sinv.dot(dCinvSinv_inv).dot(Sinv)
S3 = Sinv.dot(dCinvSinv_inv).dot(Cinv) + Sinv.dot(CinvSinv_inv).dot(dCinv)
S4 = dCinv.dot(CinvSinv_inv).dot(Cinv) + Cinv.dot(dCinvSinv_inv).dot(Cinv) + Cinv.dot(CinvSinv_inv).dot(dCinv)
T1s = np.tile(Z.dot(S1).dot(Z.T).diagonal(), [P, 1])
T1 = np.tile(T1s, [N, 1, 1])
T2s = T1s.T
T2 = np.tile(T2s, [N, 1, 1])
T3 = np.tile(Z.dot(S4).dot(Z.T), [N, 1, 1])
T4 = np.tile(A.dot(S2).dot(A.T).diagonal(), [P, 1]).T
T4 = np.expand_dims(T4, axis=2)
T4 = np.repeat(T4, P, axis=2)
T5 = A.dot(S3).dot(Z.T)
T5 = np.expand_dims(T5, axis=2)
T5 = np.repeat(T5, P, axis=2)
T6 = np.swapaxes(T5, 1, 2)
SCinvI = 2 * Cinv.dot(S) + I
SCinvI_inv = np.diag(1 / SCinvI.diagonal())
(temp, logDetSCinvI) = np.linalg.slogdet(SCinvI)
detSCinvI = np.exp(logDetSCinvI)
dDetSCinvI = -0.5 * np.power(detSCinvI, -0.5) * SCinvI_inv.dot(2 * dCinv).dot(S).trace()
expTerm = EVzxVzxT_list_this / np.power(detSCinvI, -0.5)
res = EVzxVzxT_list_this * (-0.5 * T1 - 0.5 * T2 + T3 - 0.5 * T4 + T5 + T6) + dDetSCinvI * expTerm
res = np.sum(res, axis=0)
return res
开发者ID:LinZhineng,项目名称:atldgp,代码行数:60,代码来源:RBFKernel.py
示例2: testSoftmaxMNIST
def testSoftmaxMNIST():
x_, y_ = getData("training_images.gz", "training_labels.gz")
N = 600
x = x_[0:N].reshape(N, 784).T/255.0
y = np.zeros((10, N))
for i in xrange(N):
y [y_[i][0]][i] = 1
#nn1 = SimpleNN(784, 800, 10, 100, 0.15, 0.4, False)
#nn2 = SimpleNN(784, 800, 10, 1, 0.15, 0.4, False)
nn3 = Softmax(784, 800, 1, 10, 0.15, 0, False)
nn4 = Softmax(784, 800, 10, 10, 0.35, 0, False)
#nn1.Train(x, y)
#nn2.Train(x, y)
nn3.Train(x, y)
nn4.Train(x, y)
N = 10000
x_, y_ = getData("test_images.gz", "test_labels.gz")
x = x_.reshape(N, 784).T/255.0
y = y_.T
correct = np.zeros((4, 1))
print "Testing"
startTime = time()
for i in xrange(N):
#h1 = nn1.Evaluate(np.tile(x.T[i].T, (1, 1)).T)
#h2 = nn2.Evaluate(np.tile(x.T[i].T, (1, 1)).T)
h3 = nn3.Evaluate(np.tile(x.T[i].T, (1, 1)).T)
h4 = nn4.Evaluate(np.tile(x.T[i].T, (1, 1)).T)
#if h1[y[0][i]][0] > 0.8:
# correct[0][0] += 1
#if h2[y[0][i]][0] > 0.8:
# correct[1][0] += 1
if h3[y[0][i]][0] > 0.8:
correct[2][0] += 1
if h4[y[0][i]][0] > 0.8:
correct[3][0] += 1
if(i > 0):
stdout.write("Testing %d/%d image. Time Elapsed: %ds. \r" % (i, N, time() - startTime))
stdout.flush()
stdout.write("\n")
#print "Accuracy 1: ", correct[0][0]/10000.0 * 100, "%"
#print "Accuracy 2: ", correct[1][0]/10000.0 * 100, "%"
print "Accuracy 3: ", correct[2][0]/10000.0 * 100, "%"
print "Accuracy 4: ", correct[3][0]/10000.0 * 100, "%"
开发者ID:devjeetr,项目名称:ufldl-exercises,代码行数:60,代码来源:test.py
示例3: all_shar_trials
def all_shar_trials(nblocks=5, ntargets=8, distance=10):
'''
Generates a sequence of 2D (x and z) target pairs with the first target
always at the origin and a second field indicating the extractor type (always shared)
'''
#Make blocks of 80 trials:
theta = []
for i in range(10):
temp = np.arange(0, 2*np.pi, 2*np.pi/ntargets)
np.random.shuffle(temp)
theta = theta + [temp]
theta = np.hstack(theta)
#Each target has correct % of private and correct % of shared targets
trial_type = np.empty(len(theta), dtype='S10')
trial_type[:] = 'shared'
#Make Target set:
x = distance*np.cos(theta)
y = np.zeros(len(theta))
z = distance*np.sin(theta)
pairs = np.zeros([len(theta), 2, 3])
pairs[:,1,:] = np.vstack([x, y, z]).T
Pairs = np.tile(pairs, [nblocks, 1, 1])
Trial_type = np.tile(trial_type, [nblocks])
#Will yield a tuple where target location is in next_trial[0], trial_type is in next_trial[1]
return zip(Pairs, Trial_type)
开发者ID:pkhanna104,项目名称:fa_analysis,代码行数:30,代码来源:factor_analysis_tasks.py
示例4: __update_b_vec
def __update_b_vec(self,cur_obs):
# convert measurement vector into emission probabilities
# repeat the observation in columns
cur_obs_mat = np.tile(cur_obs,(self.V_mat.shape[1],1)).T
masked_mat = cur_obs_mat == self.V_mat
# Extract the probability of the observation on each link for each state
p_obs_given_off_link = np.sum(self.off_links*masked_mat,axis=1)
p_obs_given_on_link = np.sum(self.on_links*masked_mat,axis=1)
# replicate the probability of each measurement on each link for each state
p_obs_mat_off = np.tile(p_obs_given_off_link,(self.num_states,1)).T
p_obs_mat_on = np.tile(p_obs_given_on_link,(self.num_states,1)).T
# Compute emission probabilities
tmp1 = self.codewords*p_obs_mat_on
tmp2 = np.logical_not(self.codewords)*p_obs_mat_off
tmp3 = tmp1 + tmp2
# divide tmp3 into groups of 4. Multiply and normalize
prev = np.ones(self.num_states)
start_mark = 0
end_mark = 4
group = end_mark
while start_mark < self.num_links:
current = np.product(tmp3[start_mark:np.minimum(self.num_links,end_mark),:],axis=0)
current = current/np.sum(current)
prev = (prev*current)/np.sum(prev*current)
end_mark += group
start_mark += group
# add emission probabilities to the circular buffer
self.C.add_observation(prev)
开发者ID:peterhillyard,项目名称:double_border,代码行数:33,代码来源:hmm_border_class_v1.py
示例5: grad_EVzxVzxT_by_Z
def grad_EVzxVzxT_by_Z(self, EVzxVzxT_list_this, Z, A, B, p, r):
P = Z.shape[0]
R = Z.shape[1]
N = A.shape[0]
ainv = 1 / (self.length_scale * self.length_scale)
siginv = 1 / (B[0, 0] * B[0, 0])
dZthis = np.zeros([1, R])
dZthis[0, r] = 1
res1 = -0.5 * (dZthis.dot(Z[p, :]) + Z[p, :].dot(dZthis.T)) * (ainv - ainv * (1 / (siginv + 2 * ainv)) * ainv)
res2 = np.tile(dZthis.dot(A.T) * (ainv * (1 / (siginv + 2 * ainv)) * siginv), [P, 1])
res3 = np.tile(dZthis.dot(Z.T) * (ainv * (1 / (siginv + 2 * ainv)) * ainv), [N, 1])
dZ = np.zeros([N, P, P])
dZ[:, p, :] += np.float64(res1) + res2.T + res3
dZ[:, :, p] += np.float64(res1) + res2.T + res3
# set the diagonal
# dZ[:,p,p] = dZ[:,p,p]/2.
res = np.sum(EVzxVzxT_list_this * dZ, axis=0)
return res
开发者ID:LinZhineng,项目名称:atldgp,代码行数:30,代码来源:RBFKernel.py
示例6: add_bbox_regression_targets
def add_bbox_regression_targets(roidb):
num_images = len(roidb)
num_classes = roidb[0]['gt_overlaps'].shape[1]
for idx in xrange(num_images):
rois = roidb[idx]['boxes']
max_overlaps = roidb[idx]['max_overlaps']
max_classes = roidb[idx]['max_classes']
roidb[idx]['bbox_targets'] = _compute_targets(rois, max_overlaps, max_classes)
means = np.tile(
np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS), (num_classes, 1))
stds = np.tile(
np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS), (num_classes, 1))
print 'means'
print means
print 'stds'
print stds
print 'Normalizing targets'
for idx in xrange(num_images):
targets = roidb[idx]['bbox_targets']
for cls in xrange(1, num_classes):
cls_inds = np.where(targets[:, 0] == cls)[0]
roidb[idx]['bbox_targets'] -= means[cls, :]
roidb[idx]['bbox_targets'] /= stds[cls, :]
return means.ravel(), stds.ravel()
开发者ID:abhishekambastha,项目名称:pedestrian-rcnn,代码行数:28,代码来源:bbox_targets.py
示例7: test_mean_std_12bit
def test_mean_std_12bit(self):
# Input 12-bit, with an 8-bit color target
input_scene = np.tile(np.arange(4096)[:, None, None], (1, 1, 3))
color_target = np.tile(np.arange(256)[:, None, None], (1, 1, 3))
luts = hm.mean_std_luts(input_scene.astype(np.uint16),
color_target.astype(np.uint8))
np.testing.assert_array_equal(luts[0], luts[1])
np.testing.assert_array_equal(luts[1], luts[2])
lut = luts[0]
assert np.all(lut[:8] == 0)
assert np.all(lut[-8:] == 4096)
assert np.diff(lut[8:-8]).min() == 1
assert np.diff(lut[8:-8]).max() == 2
# Input 12-bit, with a 12-bit color target
input_scene = np.tile(np.arange(4096)[:, None, None], (1, 1, 3))
color_target = np.tile(np.arange(4096)[:, None, None], (1, 1, 3))
luts = hm.mean_std_luts(input_scene.astype(np.uint16),
color_target.astype(np.uint16))
# Should be a 1 to 1 look-up-table...
np.testing.assert_array_equal(luts[0], np.arange(4097))
开发者ID:huleg,项目名称:color_balance,代码行数:26,代码来源:histogram_match_tests.py
示例8: test_001_t
def test_001_t(self):
num_frames = 5
total_subcarriers = 8
used_subcarriers = 4
channel_map = ft.get_channel_map(used_subcarriers, total_subcarriers)
payload_symbols = 8
overlap = 4
num_preamble_symbols = 4
payload = ft.get_payload(payload_symbols, used_subcarriers)
frame = ft.get_frame(payload, total_subcarriers, channel_map, payload_symbols, overlap)
frame = np.tile(frame, num_frames).flatten()
payload = np.tile(payload, num_frames).flatten()
# set up fg
src = blocks.vector_source_c(frame, repeat=False, vlen=total_subcarriers)
deframer = fbmc.deframer_vcb(used_subcarriers, total_subcarriers, num_preamble_symbols, payload_symbols, overlap, channel_map)
snk = blocks.vector_sink_b(1)
self.tb.connect(src, deframer, snk)
self.tb.run()
# check data
res = np.array(snk.data())
print res
print payload
self.assertTupleEqual(tuple(payload), tuple(res))
开发者ID:kit-cel,项目名称:gr-fbmc,代码行数:27,代码来源:qa_deframer_vcb.py
示例9: boxfilter
def boxfilter(I, r):
"""Fast box filter implementation.
Parameters
----------
I: a single channel/gray image data normalized to [0.0, 1.0]
r: window radius
Return
-----------
The filtered image data.
"""
M, N = I.shape
dest = np.zeros((M, N))
# cumulative sum over Y axis
sumY = np.cumsum(I, axis=0)
# difference over Y axis
dest[:r + 1] = sumY[r: 2 * r + 1]
dest[r + 1:M - r] = sumY[2 * r + 1:] - sumY[:M - 2 * r - 1]
dest[-r:] = np.tile(sumY[-1], (r, 1)) - sumY[M - 2 * r - 1:M - r - 1]
# cumulative sum over X axis
sumX = np.cumsum(dest, axis=1)
# difference over Y axis
dest[:, :r + 1] = sumX[:, r:2 * r + 1]
dest[:, r + 1:N - r] = sumX[:, 2 * r + 1:] - sumX[:, :N - 2 * r - 1]
dest[:, -r:] = np.tile(sumX[:, -1][:, None], (1, r)) - \
sumX[:, N - 2 * r - 1:N - r - 1]
return dest
开发者ID:guanlongzhao,项目名称:dehaze,代码行数:31,代码来源:guidedfilter.py
示例10: reconstr_freq
def reconstr_freq(center_freq, pts, sweep_up=True, bdwth=1.):
''' Reconstruct frequency array.
Arguments:
center_freq -- center frequency of each sweep. float or np.array
pts -- dimension of the frequency array. int
**sweep_up -- first sweep frequency increases. defautl True. boolean
**bdwth -- sweep bandwidth (MHz), default 1. float
Returns:
freq -- frequency array, np.array 1D/2D
'''
if sweep_up:
single_band = bdwth * (np.arange(pts)/(pts-1) - 0.5)
else:
single_band = bdwth * (0.5 - np.arange(pts)/(pts-1))
if isinstance(center_freq, np.ndarray):
freq = np.tile(single_band, (len(center_freq), 1)).transpose() + \
np.tile(center_freq, (pts, 1))
else:
freq = single_band + center_freq
return freq
开发者ID:luyaozou,项目名称:SweepPulse,代码行数:25,代码来源:sweep.py
示例11: trans_param_to_current_array
def trans_param_to_current_array(self, quantity_dict, trans_param,
model='LIF', mcnc_grouping=None,
std=None):
quantity_array = quantity_dict['quantity_array']
quantity_rate_array = np.abs(np.gradient(quantity_array)) / DT
if model == 'LIF':
current_array = trans_param[0] * quantity_array +\
trans_param[1] * quantity_rate_array + trans_param[2]
if std is not None:
std = 0 if std < 0 else std
current_array += np.random.normal(
loc=0., scale=std, size=quantity_array.shape)
if model == 'Lesniak':
trans_param = np.tile(trans_param, (4, 1))
trans_param[:, :2] = np.multiply(
trans_param[:, :2].T, mcnc_grouping).T
quantity_array = np.tile(quantity_array, (mcnc_grouping.size, 1)).T
quantity_rate_array = np.tile(
quantity_rate_array, (mcnc_grouping.size, 1)).T
current_array = np.multiply(quantity_array, trans_param[:, 0]) +\
np.multiply(quantity_rate_array, trans_param[:, 1]) +\
np.multiply(np.ones_like(quantity_array), trans_param[:, 2])
if std is not None:
std = 0 if std < 0 else std
current_array += np.random.normal(loc=0., scale=std,
size=quantity_array.shape)
return current_array
开发者ID:yw5aj,项目名称:YoshiRecordingData,代码行数:27,代码来源:fitlif.py
示例12: write_frames
def write_frames(self, length=10, change_frequency=6.0, checker_size=48):
"""Write video frames to file.
Parameters
----------
length : float
Length in seconds of the written frames
change_frequency : float
Frequency of change in the stimulus in Hz
checker_size : int
Number of pixels for each checker field
"""
# Prepare image
checkerboard = np.tile(
np.kron(np.array([[0, 1], [1, 0]]),
np.ones((checker_size, checker_size))),
(checker_size, checker_size))
checkerboard = checkerboard[:self._frame_size[1], :self._frame_size[0]]
image = np.tile(checkerboard[:, :, np.newaxis] * 255, (1, 1, 3))
frame_change = self._fps // change_frequency
assert frame_change == int(frame_change)
# Write frames
for frame_num in range(int(length * self._fps)):
if frame_num % frame_change == 0:
image = 255 - image
self._video_writer.write(image)
开发者ID:fnielsen,项目名称:brede,代码行数:29,代码来源:video.py
示例13: sample_predictive_parameters
def sample_predictive_parameters(self):
Lext = \
np.vstack((self.L, np.sqrt(self.eta) * np.random.randn(1, self.dim)))
# Compute mean and covariance over extended space
D = ((Lext[:,None,:] - Lext[None,:,:])**2).sum(2)
Mu = -D + self.b
Mu_row = np.tile(Mu[-1,:][:,None], (1,self.B))
Mu_row[-1] = self._self_gaussian.mu
Mu_col = Mu_row.copy()
# Mu = np.tile(Mu[:,:,None], (1,1,self.B))
# for n in xrange(self.N+1):
# Mu[n,n,:] = self._self_gaussian.mu
L = np.linalg.cholesky(self.cov.sigma)
L_row = np.tile(L[None,:,:], (self.N+1, 1, 1))
L_row[-1] = np.linalg.cholesky(self._self_gaussian.sigma)
L_col = L_row.copy()
# L = np.tile(L[None,None,:,:], (self.N+1, self.N+1, 1, 1))
# for n in xrange(self.N+1):
# L[n,n,:,:] = np.linalg.cholesky(self._self_gaussian.sigma)
# Mu_row, Mu_col = Mu[-1,:,:], Mu[:,-1,:]
# L_row, L_col = L[-1,:,:,:], L[:,-1,:,:]
return Mu_row, Mu_col, L_row, L_col
开发者ID:slinderman,项目名称:graphistician,代码行数:27,代码来源:weights.py
示例14: deframesignal
def deframesignal(frames,signal_length,frame_length,frame_step,winfunc=lambda x:numpy.ones((x,))):
'''定义函数对原信号的每一帧进行变换,应该是为了消除关联性
参数定义:
frames:audio2frame函数返回的帧矩阵
signal_length:信号长度
frame_length:帧长度
frame_step:帧间隔
winfunc:对每一帧加window函数进行分析,默认此处不加window
'''
#对参数进行取整操作
signal_length=round(signal_length) #信号的长度
frame_length=round(frame_length) #帧的长度
frames_num=numpy.shape(frames)[0] #帧的总数
assert numpy.shape(frames)[1]==frame_length,'"frames"矩阵大小不正确,它的列数应该等于一帧长度' #判断frames维度
indices=numpy.tile(numpy.arange(0,frame_length),(frames_num,1))+numpy.tile(numpy.arange(0,frames_num*frame_step,frame_step),(frame_length,1)).T #相当于对所有帧的时间点进行抽取,得到frames_num*frame_length长度的矩阵
indices=numpy.array(indices,dtype=numpy.int32)
pad_length=(frames_num-1)*frame_step+frame_length #铺平后的所有信号
if signal_length<=0:
signal_length=pad_length
recalc_signal=numpy.zeros((pad_length,)) #调整后的信号
window_correction=numpy.zeros((pad_length,1)) #窗关联
win=winfunc(frame_length)
for i in range(0,frames_num):
window_correction[indices[i,:]]=window_correction[indices[i,:]]+win+1e-15 #表示信号的重叠程度
recalc_signal[indices[i,:]]=recalc_signal[indices[i,:]]+frames[i,:] #原信号加上重叠程度构成调整后的信号
recalc_signal=recalc_signal/window_correction #新的调整后的信号等于调整信号处以每处的重叠程度
return recalc_signal[0:signal_length] #返回该新的调整信号
开发者ID:yinheyi,项目名称:machinelearning,代码行数:27,代码来源:sigprocess.py
示例15: getPointsForInterpolation
def getPointsForInterpolation(self,EndOfPrdvP,aNrmNow):
'''
Find endogenous interpolation points for each asset point and each
discrete preference shock.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aNrmNow : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
Returns
-------
c_for_interpolation : np.array
Consumption points for interpolation.
m_for_interpolation : np.array
Corresponding market resource points for interpolation.
'''
c_base = self.uPinv(EndOfPrdvP)
PrefShkCount = self.PrefShkVals.size
PrefShk_temp = np.tile(np.reshape(self.PrefShkVals**(1.0/self.CRRA),(PrefShkCount,1)),
(1,c_base.size))
self.cNrmNow = np.tile(c_base,(PrefShkCount,1))*PrefShk_temp
self.mNrmNow = self.cNrmNow + np.tile(aNrmNow,(PrefShkCount,1))
# Add the bottom point to the c and m arrays
m_for_interpolation = np.concatenate((self.BoroCnstNat*np.ones((PrefShkCount,1)),
self.mNrmNow),axis=1)
c_for_interpolation = np.concatenate((np.zeros((PrefShkCount,1)),self.cNrmNow),axis=1)
return c_for_interpolation,m_for_interpolation
开发者ID:albop,项目名称:HARK,代码行数:32,代码来源:ConsPrefShockModel.py
示例16: linSVM
def linSVM(new_dset, validF):
print "loading dataset"
new_dset = L.RhythmDataset('/Users/Tlacael/NYU/RhythmData/lmd_scalars1x64.pkl',"/Users/Tlacael/NYU/RhythmData/"+new_dset,valid=validF,test=(validF+1)%10, dim=[64,1])
#get training set
print "loading training set"
xAll = [new_dset.get(i[0])[0] for i in new_dset.split_idx['train']]
xAll = np.concatenate(xAll)
xAll = xAll.reshape(xAll.shape[0],xAll.shape[2])
#get classes for training set
print "loading validation set"
classAll=[np.tile(new_dset.get(i[0])[1],(new_dset.get(i[0])[0].shape[0],)) for i in new_dset.split_idx['train']]
target=np.concatenate(classAll)
#get validation set
xVerify = [new_dset.get(i[0])[0] for i in new_dset.split_idx['valid']]
xVerify = np.concatenate(xVerify)
xVerify = xVerify.reshape(xVerify.shape[0],xVerify.shape[2])
classVer=[np.tile(new_dset.get(i[0])[1],(new_dset.get(i[0])[0].shape[0],)) for i in new_dset.split_idx['valid']]
targetVer=np.concatenate(classVer)
print "building model"
svc = svm.SVC(kernel='linear', verbose=True)
print "fit data"
svc.fit(xAll,target)
scre = svc.score(xVerify,targetVer)
print "score: ", scre
return scre
开发者ID:tlacael,项目名称:RhythmData,代码行数:32,代码来源:linSVM_clf.py
示例17: _csd_array
def _csd_array(x, sfreq, window_fun, eigvals, freq_mask, freq_mask_mt, n_fft,
mode, mt_adaptive):
"""Calculate Fourier transform using multitaper module.
The arguments correspond to the values in `compute_csd_epochs` and
`csd_array`.
"""
x_mt, _ = _mt_spectra(x, window_fun, sfreq, n_fft)
if mt_adaptive:
# Compute adaptive weights
_, weights = _psd_from_mt_adaptive(x_mt, eigvals, freq_mask,
return_weights=True)
# Tiling weights so that we can easily use _csd_from_mt()
weights = weights[:, np.newaxis, :, :]
weights = np.tile(weights, [1, x_mt.shape[0], 1, 1])
else:
# Do not use adaptive weights
if mode == 'multitaper':
weights = np.sqrt(eigvals)[np.newaxis, np.newaxis, :, np.newaxis]
else:
# Hack so we can sum over axis=-2
weights = np.array([1.])[:, np.newaxis, np.newaxis, np.newaxis]
x_mt = x_mt[:, :, freq_mask_mt]
# Calculating CSD
# Tiling x_mt so that we can easily use _csd_from_mt()
x_mt = x_mt[:, np.newaxis, :, :]
x_mt = np.tile(x_mt, [1, x_mt.shape[0], 1, 1])
y_mt = np.transpose(x_mt, axes=[1, 0, 2, 3])
weights_y = np.transpose(weights, axes=[1, 0, 2, 3])
csds = _csd_from_mt(x_mt, y_mt, weights, weights_y)
return csds
开发者ID:hoechenberger,项目名称:mne-python,代码行数:35,代码来源:csd.py
示例18: test_003_block_pinching
def test_003_block_pinching(self):
n_reps = 1
n_subcarriers = 8
n_timeslots = 8
block_len = n_subcarriers * n_timeslots
cp_len = 8
ramp_len = 4
cs_len = ramp_len * 2
window_len = get_window_len(cp_len, n_timeslots, n_subcarriers, cs_len)
window_taps = get_raised_cosine_ramp(ramp_len, window_len)
data = np.arange(block_len, dtype=np.complex) + 1
ref = add_cyclic_starfix(data, cp_len, cs_len)
ref = pinch_block(ref, window_taps)
data = np.tile(data, n_reps)
ref = np.tile(ref, n_reps)
print "input is: ", len(data), " -> " , len(ref)
# short_window = np.concatenate((window_taps[0:ramp_len], window_taps[-ramp_len:]))
prefixer = gfdm.cyclic_prefixer_cc(block_len, cp_len, cs_len, ramp_len, window_taps)
src = blocks.vector_source_c(data)
dst = blocks.vector_sink_c()
self.tb.connect(src, prefixer, dst)
self.tb.run()
res = np.array(dst.data())
print ref[-10:]
print res[-10:]
self.assertComplexTuplesAlmostEqual(res, ref, 4)
开发者ID:jdemel,项目名称:gr-gfdm,代码行数:28,代码来源:qa_cyclic_prefixer_cc.py
示例19: __init__
def __init__(self, nrows, ncols):
self.nrows = nrows
self.ncols = ncols
self.num_elements = nrows * ncols
self.X = np.tile(np.arange(self.ncols, dtype = np.double).reshape((1, self.ncols))*np.sqrt(3),
(self.nrows, 1))
if (self.ncols % 2 == 0):
self.Y = np.tile(np.arange(2*self.nrows, dtype = np.double).reshape((self.nrows, 2)),
(1, self.ncols//2))
else:
self.Y = np.tile(np.arange(2*self.nrows, dtype = np.double).reshape((self.nrows, 2)),
(1, self.ncols//2+1))
self.Y = self.Y[:,0:-1]
self.col = np.tile(np.arange(self.ncols, dtype = np.int32).reshape((1, self.ncols)),
(self.nrows, 1))
self.row = np.tile(np.arange(self.nrows, dtype = np.int32).reshape((self.nrows, 1)),
(1, self.ncols))
#self.Y = self.Y + np.tile(np.asarray([0, 1]),
# (self.nrows, self.ncols/2))
self.col = self.col.reshape(-1)
self.row = self.row.reshape(-1)
self.num = np.arange(self.num_elements, dtype = np.int32).reshape(nrows, ncols)
开发者ID:neurokernel,项目名称:sensory_int,代码行数:25,代码来源:vision_configuration.py
示例20: _verifySolveBatch
def _verifySolveBatch(self, x, y):
# Since numpy.linalg.lsqr does not support batch solves, as opposed
# to numpy.linalg.solve, we just perform this test for a fixed batch size
# of 2x3.
for np_type in [np.float32, np.float64]:
a = np.tile(x.astype(np_type), [2, 3, 1, 1])
b = np.tile(y.astype(np_type), [2, 3, 1, 1])
np_ans = np.empty([2, 3, a.shape[-1], b.shape[-1]])
for dim1 in range(2):
for dim2 in range(3):
np_ans[dim1, dim2, :, :], _, _, _ = np.linalg.lstsq(
a[dim1, dim2, :, :], b[dim1, dim2, :, :])
for fast in [True, False]:
with self.test_session():
tf_ans = tf.batch_matrix_solve_ls(a, b, fast=fast).eval()
self.assertEqual(np_ans.shape, tf_ans.shape)
# Check residual norm.
tf_r = b - BatchMatMul(a, tf_ans)
tf_r_norm = np.sum(tf_r * tf_r)
np_r = b - BatchMatMul(a, np_ans)
np_r_norm = np.sum(np_r * np_r)
self.assertAllClose(np_r_norm, tf_r_norm)
# Check solution.
if fast or a.shape[-2] >= a.shape[-1]:
# We skip this test for the underdetermined case when using the
# slow path, because Eigen does not return a minimum norm solution.
# TODO(rmlarsen): Enable this check for all paths if/when we fix
# Eigen's solver.
self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5)
开发者ID:AboorvaDevarajan,项目名称:tensorflow,代码行数:29,代码来源:matrix_solve_ls_op_test.py
注:本文中的numpy.tile函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论