本文整理汇总了Python中scipy.delete函数的典型用法代码示例。如果您正苦于以下问题:Python delete函数的具体用法?Python delete怎么用?Python delete使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了delete函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: scanSound
def scanSound(self, source, minnotel):
binarized = source
scale = 60. / self.wavetempo * (binarized[0].size / self.duration)
noise_length = scale*minnotel
antinoised = sp.zeros_like(binarized)
for i in range(sp.shape(binarized)[0]):
new_line = binarized[i, :].copy()
diffed = sp.diff(new_line)
ones_keys = sp.where(diffed == 1)[0]
minus_keys = sp.where(diffed == -1)[0]
if(ones_keys.size != 0 and minus_keys.size != 0):
if(ones_keys[0] > minus_keys[0]):
new_line = self.cutNoise(
(0, minus_keys[0]), noise_length, new_line)
minus_keys = sp.delete(minus_keys, 0)
if(ones_keys[-1] > minus_keys[-1]):
new_line = self.cutNoise(
(ones_keys[-1], new_line.size-1), noise_length, new_line)
ones_keys = sp.delete(ones_keys, -1)
for j in range(sp.size(ones_keys)):
new_line = self.cutNoise(
(ones_keys[j], minus_keys[j]), noise_length, new_line)
antinoised[i, :] = new_line
return antinoised
开发者ID:mackee,项目名称:utakata,代码行数:31,代码来源:utakata_time_freq.py
示例2: generateNodesAdaptive
def generateNodesAdaptive(self):
innerDomainSize = self.innerDomainSize
innerMeshSize = self.innerMeshSize
numberElementsInnerDomain = innerDomainSize/innerMeshSize
assert(numberElementsInnerDomain < self.numberElements)
domainCenter = (self.domainStart+self.domainEnd)/2
nodes0 = np.linspace(domainCenter,innerDomainSize/2.0,(numberElementsInnerDomain/2.0)+1.0)
nodes0 = np.delete(nodes0,-1)
numberOuterIntervalsFromDomainCenter = (self.numberElements - numberElementsInnerDomain)/2.0
const = np.log2(innerDomainSize/2.0)/0.5
exp = np.linspace(const,np.log2(self.domainEnd*self.domainEnd),numberOuterIntervalsFromDomainCenter+1)
nodes1 = np.power(np.sqrt(2),exp)
nodesp = np.concatenate((nodes0,nodes1))
nodesn = -nodesp[::-1]
nodesn = np.delete(nodesn,-1)
linNodalCoordinates = np.concatenate((nodesn,nodesp))
nodalCoordinates = 0
#Introduce higher order nodes
if self.elementType == "quadratic" or self.elementType == "cubic":
if self.elementType == "quadratic":
numberNodesPerElement = 3
elif self.elementType == "cubic":
numberNodesPerElement = 4
for i in range(0,len(linNodalCoordinates)-1):
newnodes = np.linspace(linNodalCoordinates[i],linNodalCoordinates[i+1],numberNodesPerElement)
nodalCoordinates = np.delete(nodalCoordinates,-1)
nodalCoordinates = np.concatenate((nodalCoordinates,newnodes))
else:
nodalCoordinates = linNodalCoordinates
return nodalCoordinates
开发者ID:mrinaliyer,项目名称:tuckerDFT,代码行数:34,代码来源:FEM.py
示例3: _csv2m
def _csv2m(self, csv_link):
'''
Import the csv as an array, clipping out strings for bars
...
Arguments
---------
csv_link : str
Path to csv file to be converted into a map
Returns
-------
m : array
Array of floats to be plotted as map
rows : list
List of tuples (row, color) to locate horizontal bars
cols : list
List of tuples (col, color) to locate vertical bars
'''
csv = [line.strip('\n').strip('\r').split(',') for line in open(csv_link).readlines()]
a = np.array(csv)
rows, cols = [], []
for i, row in enumerate(a):
color = [row[0], row[-1]]
if 'w' in color or 'b' in color:
rows.append((i, color[0]))
for i, col in enumerate(a.T):
color = [col[0], col[-1]]
if 'w' in color or 'b' in color:
cols.append((i, color[0]))
m = scipy.delete(a, [i[0] for i in rows], 0)
m = scipy.delete(m, [i[0] for i in cols], 1)
return np.array(m, dtype=float), rows, cols
开发者ID:darribas,项目名称:simVizMap,代码行数:33,代码来源:simVizMap.py
示例4: gstamp
def gstamp(self, ports_v, time=0, reduced=True):
"""Returns the differential (trans)conductance wrt the port specified by port_index
when the element has the voltages specified in ports_v across its ports,
at (simulation) time.
ports_v: a list in the form: [voltage_across_port0, voltage_across_port1, ...]
port_index: an integer, 0 <= port_index < len(self.get_ports())
time: the simulation time at which the evaluation is performed. Set it to
None during DC analysis.
"""
indices = ([self.n1 - 1]*2 + [self.n2 - 1]*2,
[self.n1 - 1, self.n2 - 1]*2)
gm = self.model.get_gm(self.model, 0, utilities.tuplinator(ports_v), 0, self.device)
if gm == 0:
gm = options.gmin*2
stamp = np.array(((gm, -gm),
(-gm, gm)), dtype=np.float64)
if reduced:
zap_rc = [pos for pos, i in enumerate(indices[1][:2]) if i == -1]
stamp = np.delete(stamp, zap_rc, axis=0)
stamp = np.delete(stamp, zap_rc, axis=1)
indices = tuple(zip(*[(i, y) for i, y in zip(*indices) if (i != -1 and y != -1)]))
stamp_flat = stamp.reshape(-1)
stamp_folded = []
indices_folded = []
for ix, it in enumerate([(i, y) for i, y in zip(*indices)]):
if it not in indices_folded:
indices_folded.append(it)
stamp_folded.append(stamp_flat[ix])
else:
w = indices_folded.index(it)
stamp_folded[w] += stamp_flat[ix]
indices = tuple(zip(*indices_folded))
stamp = np.array(stamp_folded)
return indices, stamp
开发者ID:ahkab,项目名称:ahkab,代码行数:35,代码来源:TunnelJunction.py
示例5: calc_coh
def calc_coh(subject, conditions, task, meg_electordes_names, meg_electrodes_data, tmin=0, tmax=2.5, sfreq=1000, fmin=55, fmax=110, bw=15, n_jobs=6):
input_file = op.join(ELECTRODES_DIR, subject, task, 'electrodes_data_trials.mat')
output_file = op.join(ELECTRODES_DIR, subject, task, 'electrodes_coh.npy')
d = sio.loadmat(input_file)
# Remove and sort the electrodes according to the meg_electordes_names
electrodes = get_electrodes_names(subject, task)
electrodes_to_remove = set(electrodes) - set(meg_electordes_names)
indices_to_remove = [electrodes.index(e) for e in electrodes_to_remove]
electrodes = scipy.delete(electrodes, indices_to_remove).tolist()
electrodes_indices = np.array([electrodes.index(e) for e in meg_electordes_names])
electrodes = np.array(electrodes)[electrodes_indices].tolist()
assert(np.all(electrodes==meg_electordes_names))
for cond, data in enumerate([d[conditions[0]], d[conditions[1]]]):
data = scipy.delete(data, indices_to_remove, 1)
data = data[:, electrodes_indices, :]
data = downsample_data(data)
data = data[:, :, :meg_electrodes_data.shape[2]]
if cond == 0:
coh_mat = np.zeros((data.shape[1], data.shape[1], 2))
con_cnd, _, _, _, _ = spectral_connectivity(
data, method='coh', mode='multitaper', sfreq=sfreq,
fmin=fmin, fmax=fmax, mt_adaptive=True, n_jobs=n_jobs, mt_bandwidth=bw, mt_low_bias=True,
tmin=tmin, tmax=tmax)
con_cnd = np.mean(con_cnd, axis=2)
coh_mat[:, :, cond] = con_cnd
np.save(output_file[:-4], coh_mat)
return con_cnd
开发者ID:ofek-schechner,项目名称:mmvt,代码行数:29,代码来源:meg_electrodes.py
示例6: main
def main(filename,metric,opts):
reader = csv.reader(open(filename,'r'),delimiter=',')
reader.next() # ignore first line
header = reader.next()
origModels = header[1:]
students = origModels[:-4]
if opts.useHC:
models = list(origModels)
else:
models = list(students)
results = numpy.zeros([len(students),len(models)])
for i,row in enumerate(reader):
if len(origModels) != len(row) - 1:
print >>sys.stderr,'Bad Size:',len(origModels),len(row)-1
sys.exit(2)
for j,v in enumerate(row[1:len(models)+1]):
results[i,j] = float(v)
# get the arguments we want to call
args = []
args.append((results,models,opts.numStudentsToChoose,metric))
for i,student in enumerate(students):
tempResults = scipy.delete(results,i,0)
tempResults = scipy.delete(tempResults,i,1)
tempModels = list(models)
del tempModels[i]
args.append((tempResults,tempModels,opts.numStudentsToChoose,metric))
if opts.multi:
pool = Pool()
res = pool.map(calcBestWrapper,args)
else:
res = map(calcBestWrapper,args)
for student,(bestVal,bestInds,bestModels) in zip(['Overall'] + students,res):
print '%s,%s' % (student,','.join(bestModels))
开发者ID:goodchong,项目名称:rl_pursuit,代码行数:35,代码来源:analyzeMatrix.py
示例7: marginalize
def marginalize(dist_vars,marg_vars):
#Initialize marginal dict, same for all dists
margdist_vars={}
margdist_vars['dist']=dist_vars['dist']
#Gaussian
if dist_vars['dist']=='gaussian':
N_k=len(dist_vars['w'])#Number of gaussians
N_D=len(dist_vars['mu'][0])#Dim of orgiginal parameter space
#Initialize remaining components of marg dict, before any marginalization
margdist_vars['mu']=dist_vars['mu'][:]
margdist_vars['cov']=dist_vars['cov'][:]
margdist_vars['w']=dist_vars['w'][:]
margdist_vars['vars']=dist_vars['vars'][:]
for marg_var in marg_vars:
#Get indices of marginalized var in current gaussian
i_m=margdist_vars['vars'].index(marg_var)
#Create list of current indices
i_old=list(range(N_D))
#remove index of marg_var
i_old.remove(i_m)
#remove marg_var from list of vars
margdist_vars['vars'].remove(marg_var)
margdist_vars['mu']=[sp.delete(margdist_vars['mu'][i],i_m,0) for i in range(len(margdist_vars['w']))]
#For testing
# for i in range(N_k):
# margdist_vars['w'][i]=dist_vars['w'][i]
# margdist_vars['cov'][i]=sp.delete(sp.delete(margdist_vars['cov'][i],i_m,0),i_m,1)
#Loop over components in mixture
#marg cov:T_M=L_m-T_m
#marg weight:w_m=sp.sqrt(2*pi/L_mm)
for i in range(N_k):
#invert original covariance matrix
Lambda=inv(sp.matrix(margdist_vars['cov'][i]))
#Store marg compononent of
L_mm=Lambda[i_m,i_m]
#Remove marginal component from Lambda
L_m=sp.delete(sp.delete(Lambda,i_m,0),i_m,1)
#Construct skew matrix
l_m=sp.matrix(Lambda[i_m,i_old]+Lambda[i_old,i_m])
T_m=l_m.T*l_m/(4*L_mm)
#Construct marginalized covariance matrix
margdist_vars['cov'][i]=sp.asarray(inv(L_m-T_m))
#Scale weight
margdist_vars['w'][i]=sp.sqrt(2*sp.pi/L_mm)*dist_vars['w'][i]
#Update dimensions of marginalized parameter space
N_D=N_D-1
return margdist_vars
开发者ID:JanLindroos,项目名称:SUSYScanner,代码行数:56,代码来源:dist_lib.py
示例8: lab_reduce
def lab_reduce(y_true, y_score):
empty_indices = scan_empty(y_true)
i = 0
for k in empty_indices:
y_true = scipy.delete(y_true, k-i, 1)
y_score = scipy.delete(y_score, k-i, 1)
i += 1
return y_true, y_score
开发者ID:deepnadevkar,项目名称:topbox,代码行数:8,代码来源:topbox.py
示例9: load_structural
def load_structural(fname):
data = np.genfromtxt(fname, delimiter=',')
# removing first column and first row, because they're headers
data = scipy.delete(data, 0, 1)
data = scipy.delete(data, 0, 0)
# format it to be subjects x variables
data = data.T
return data
开发者ID:gsudre,项目名称:research_code,代码行数:8,代码来源:permute_correlation.py
示例10: main
def main():
'''
Breast Cancer data set
'''
# Get the breast cancer data
cancer_data = np.loadtxt("breast-cancer-wisconsin.data", delimiter=',', dtype=str)
# All the missing values are subsitutes to 0.0
cancer_data[cancer_data == "?"] = 0.0
# Extract the cancer ids from the given input
cancer_id = cancer_data[:, :1]
# Extract the features from the given input
input_matrix = cancer_data[:, 1:-1]
# Extract the output labels
labels = cancer_data[:, -1]
# Instantiation of Logistic Regression
# Regularization to avoid overfitting
logistic_classifier = LogisticRegression(C=0.5, max_iter = 900)
# Splitting the datas into training and testing
# Could have split into training , test and cross-valdation to avoid overfitting.
train_set, test_set, train_class_label, test_class_label = train_test_split(input_matrix, labels, train_size = 0.5, test_size=0.5, random_state=10)
# To ease, all the values are converted to float
train_set=np.array(train_set,dtype=float)
test_set=np.array(test_set,dtype=float)
train_class_label=np.array(train_class_label,dtype=float)
test_class_label=np.array(test_class_label,dtype=float)
'''Train a machine learning model with the given training set'''
logistic_classifier.fit(train_set, train_class_label)
'''
Titanic Data set
'''
titanic_data = np.loadtxt("train.csv", delimiter=',', dtype=str)
titanic_data[titanic_data == "?"] = 0.0
titanic_data[titanic_data == ""] = 0.0
labels = titanic_data[1:, 1]
# To Ease, all the string columns are removed so that the logistic regression model can be built easily
# Columns removed are : Passenger Id, Name, Pclass, Embarkment, Sex, Cabin
# Traveller info contains the information of the passenger's name, id and sex
titanic_data = titanic_data[1:, 2:-1]
titanic_data = scipy.delete(titanic_data, [1,2,3,7,9], 1)
titanic_data=np.array(titanic_data,dtype=float)
titanic_logistic_classifier = LogisticRegression(C=0.5, max_iter = 900)
titanic_logistic_classifier.fit(titanic_data, labels)
# Test set of titanic data set
titanic_test_set = np.loadtxt("test.csv", delimiter=',', dtype=str)
titanic_test_set[titanic_test_set == "?"] = 0.0
titanic_test_set[titanic_test_set == ""] = 0.0
# Slice the features from the input
# To Ease, all the string columns are removed so that the logistic regression model can be built easily
# Columns removed are : Passenger Id, Name, Pclass, Embarkment, Sex, Cabin
# Traveller info contains the information of the passenger's name, id and sex
traveller_info = titanic_test_set[1:, :5]
titanic_test_set = titanic_test_set[1:, 1:]
titanic_test_set = scipy.delete(titanic_test_set, [1,2,3,7,9], 1)
titanic_test_set=np.array(titanic_test_set,dtype=float)
# Calling the function correlate date
correlate_data_sets(test_set, logistic_classifier, titanic_test_set, titanic_logistic_classifier, traveller_info, cancer_id)
开发者ID:yagamiram,项目名称:Hart_Coding_Challenge,代码行数:58,代码来源:logistic_regression.py
示例11: __init__
def __init__(self, opts):
self.train_file = opts["train_file"]
self.test_file = opts["test_file"]
self.out_file = opts["out_file"]
self.learning_rate = opts["learning_rate"]
self.decay_rate = opts["decay_rate"]
self.batch_size = opts["batch_size"]
self.n_iter = opts["n_iter"]
self.shuffle = opts["shuffle"]
self.holdout_size = opts["holdout_size"]
self.l2 = opts["l2"]
self.standardization = opts["standardize"]
self.loss_method = opts["loss"]
self.use_adagrad = opts["adagrad"]
self.use_rmsprop = opts["rmsprop"]
self.hash_trick_mod = opts["hash"]
print opts
train_data = read_data(self.train_file)
test_data = read_data(self.test_file)
self.test_input = np.ones((test_data.shape[0], test_data.shape[1] + 1), dtype=np.float)
self.test_input[:, 1:] = test_data[:, :]
self.test_initial = np.array(self.test_input)
self.test_output = np.zeros(test_data.shape[0])
self.input = np.ones(train_data.shape, dtype=np.float)
self.input[:, 1:] = train_data[:, :-1]
self.output = train_data[:, -1:].transpose(1, 0)[0]
self.validation_input = np.array([])
self.validation_output = np.array([])
if self.holdout_size:
holdout_part = int(self.holdout_size * self.input.shape[0])
random_rows = random.sample(range(self.input.shape[0]), holdout_part)
self.validation_input = self.input[random_rows, :]
self.validation_output = self.output[random_rows]
self.input = scipy.delete(self.input, random_rows, 0)
self.output = scipy.delete(self.output, random_rows)
self.learning_input = np.array(self.input)
self.learning_output = np.array(self.output)
if self.hash_trick_mod != 0:
self.learning_input = hash_trick(self.learning_input, self.hash_trick_mod)
self.validation_input = hash_trick(self.validation_input, self.hash_trick_mod)
self.test_input = hash_trick(self.test_input, self.hash_trick_mod)
if self.standardization:
standardize(self.learning_input)
standardize(self.validation_input)
standardize(self.test_input)
self.w = np.zeros(self.learning_input.shape[1], dtype=np.float)
self.adagrad_cache = np.zeros(len(self.w))
self.rmsprop_cache = np.zeros(len(self.w))
开发者ID:epawlowska,项目名称:machineLearning,代码行数:58,代码来源:regression.py
示例12: condenseMatrix
def condenseMatrix(self,H):
# applyBoundaryConditions on Hx Hy Hz
H = np.delete(H,0,0)
H = np.delete(H,-1,0)
H = np.delete(H,0,1)
H = np.delete(H,-1,1)
return H
开发者ID:mrinaliyer,项目名称:tuckerDFT,代码行数:9,代码来源:FunctionalRayleighQuotientSeparable.py
示例13: delete_invalid_data
def delete_invalid_data(self,value=0.0):
r"""
.. todo:: The explicite dependency on cathode current needs to be removed
"""
rows=sp.where(self._data[self._objectives[0]]==value)
self._logger.warning('Deleting invalid data rows: '+str(rows))
sp._data=sp.delete(self._data,rows,axis=0)
for key in self._datadict.keys():
self._datadict[key] = sp.delete(self._datadict[key],rows,axis=0)
开发者ID:OpenFCST,项目名称:OpenFCST_v0.2,代码行数:9,代码来源:parsers.py
示例14: rankUsingPCA
def rankUsingPCA(fileName):
fp = open(fileName)
line = fp.readline()
firstLine = line.strip().split(',')
fp.close()
#names = numpy.array(firstLine[1:-1])
names = numpy.array(firstLine[1:])
#print names.shape
print names
dataMat = loadDataSet(fileName)
#print dataMat
meanVals = mean(dataMat, axis=0)
meanRemoved = dataMat - meanVals
covMat = cov(meanRemoved, rowvar=0)
eigVals,eigVects = linalg.eig(mat(covMat))
eigValInd = argsort(eigVals)
eigValInd = eigValInd[:-(999999+1):-1]
redEigVects = eigVects[:,eigValInd]
#lowDMat, reconMat = pca(dataMat)
lowDDataMat = meanRemoved * redEigVects
T = redEigVects.getA()
print T
# calculate the variance covered by each components in PCA
percentagePCA = calculateFractionOfVarianceExplainedByPCA(lowDDataMat)
for d in range(T.shape[0]):
T[:,d] = T[:,d] * percentagePCA[d]
#print T
rankMatrix = {}
rank = 0
while(T.shape[0] > 1 and T.shape[1] > 1):
rowMax = -99999
index = 0
maxIndex = -1
for r in T:
valMax = numpy.amax(r)
if (valMax > rowMax):
rowMax = valMax
maxIndex = index
#endif
index = index + 1
#endfor
print names[maxIndex]
rankMatrix[names[maxIndex]] = rank
rank = rank + 1
T = scipy.delete(T,maxIndex,0)
#print T
names = scipy.delete(names,maxIndex,0)
#print names
#end while
print names[0]
rankMatrix[names[0]] = rank
return rankMatrix
开发者ID:Sandy4321,项目名称:feature_selection,代码行数:54,代码来源:pca.py
示例15: remove_from_hierarchy
def remove_from_hierarchy(obj, remove_half_orphans=True):
""" Removes a Neo object from the hierarchy it is embedded in. Mostly
downward links are removed (except for possible links in
:class:`neo.core.Spike` or :class:`neo.core.SpikeTrain` objects).
For example, when ``obj`` is a :class:`neo.core.Segment`, the link from
its parent :class:`neo.core.Block` will be severed. Also, all links to
the segment from its spikes and spike trains will be severed.
:param obj: The object to be removed.
:type obj: Neo object
:param bool remove_half_orphans: When True, :class:`neo.core.Spike`
and :class:`neo.core.SpikeTrain` belonging to a
:class:`neo.core.Segment` or :class:`neo.core.Unit` removed by
this function will be removed from the hierarchy as well, even
if they are still linked from a :class:`neo.core.Unit` or
:class:`neo.core.Segment`, respectively. In this case, their
links to the hierarchy defined by ``obj`` will be kept intact.
"""
classname = type(obj).__name__
# Parent for arbitrary object
if classname in neo.description.many_to_one_relationship:
for n in neo.description.many_to_one_relationship[classname]:
p = getattr(obj, n.lower())
if p is None:
continue
l = getattr(p, classname.lower() + 's', ())
try:
l.remove(obj)
except ValueError:
pass
# Many-to-many relationships
if isinstance(obj, neo.RecordingChannel):
for rcg in obj.recordingchannelgroups:
try:
idx = rcg.recordingchannels.index(obj)
if rcg.channel_indexes.shape[0] == len(rcg.recordingchannels):
rcg.channel_indexes = sp.delete(rcg.channel_indexes, idx)
if rcg.channel_names.shape[0] == len(rcg.recordingchannels):
rcg.channel_names = sp.delete(rcg.channel_names, idx)
rcg.recordingchannels.remove(obj)
except ValueError:
pass
if isinstance(obj, neo.RecordingChannelGroup):
for rc in obj.recordingchannels:
try:
rc.recordingchannelgroups.remove(obj)
except ValueError:
pass
_handle_orphans(obj, remove_half_orphans)
开发者ID:NeuroArchive,项目名称:spykeutils,代码行数:53,代码来源:tools.py
示例16: rem_borders
def rem_borders (img, u, d, l, r):
for i in range(u):
img = sc.delete(img, 0, 0)
img = np.flipud(img)
for i in range(d):
img = sc.delete(img, 0, 0)
img = np.flipud(img)
for i in range(l):
img = sc.delete(img, 0, 1)
img = np.fliplr(img)
for i in range(r):
img = sc.delete(img, 0, 1)
img = np.fliplr(img)
return img
开发者ID:lohmataja,项目名称:fourier,代码行数:14,代码来源:penmen3.py
示例17: test_tfm2
def test_tfm2 (n, addnoise=False):
Tms = make_obs()
Tsm = np.linalg.inv(Tms)
I = np.eye(4)
M_final = np.empty((0,16))
for i in range(n):
del_Ts = make_obs(0.05, 0.05)
if addnoise:
noise = make_obs(0.01,0.01)
del_Tm = Tms.dot(del_Ts.dot(Tsm)).dot(noise)
else:
del_Tm = Tms.dot(del_Ts.dot(Tsm))
print "Observation %i"%(i+1)
print "Delta Ts:"
print del_Ts
print "Delta Tm:"
print del_Tm, '\n'
M = np.kron(I,del_Tm)-np.kron(del_Ts.T,I)
M_final = np.r_[M_final, M]
L_final = -1*M_final[:,15]
M_final = scp.delete(M_final, (3,7,11,15), 1)
X = np.linalg.lstsq(M_final,L_final)[0]
Tfm = np.reshape(X,(3,4),order='F')
print Tfm.shape
Tfm = np.r_[Tfm,np.array([[0,0,0,1]])]
np.set_printoptions(precision=5)
print Tfm
print Tms
R = Tfm[0:3,0:3]
print R.T.dot(R)
X2 = scp.delete(np.reshape(Tms,16,order="F"),(3,7,11,15),0)
print X2
if not addnoise:
assert(np.allclose(M_final.dot(X2),L_final, atol=0.001))
assert (np.allclose(Tfm,Tms, atol=0.001))
开发者ID:rishabh-battulwar,项目名称:human_demos,代码行数:49,代码来源:test_tfm.py
示例18: solve_sylvester2
def solve_sylvester2 (tfms1, tfms2):
"""
Solves the system of Sylvester's equations to find the calibration transform.
Returns the calibration transform from sensor 1 (corresponding to tfms1) to sensor 2.
This functions forces the bottom row to be 0,0,0,1 by neglecting columns of M and changing L.
"""
assert len(tfms1) == len(tfms2) and len(tfms1) >= 2
I = np.eye(4)
I_0 = np.copy(I)
I_0[3,3] = 0
M_final = np.empty((0,16))
s1_t0_inv = np.linalg.inv(tfms1[0])
s2_t0_inv = np.linalg.inv(tfms2[0])
print "\n CONSTRUCTING M: \n"
for i in range(1,len(tfms1)):
del1 = np.linalg.inv(tfms1[i]).dot(tfms1[0])
del2 = np.linalg.inv(tfms2[i]).dot(tfms2[0])
print "\n del1:"
print del1
print del1.dot(I_0).dot(del1.T)
print "\n del2:"
print del2, '\n'
print del2.dot(I_0).dot(del2.T)
M = np.kron(I, del1) - np.kron(del2.T,I)
M_final = np.r_[M_final, M]
L_final = -1*np.copy(M_final[:,15])
M_final = scp.delete(M_final, (3,7,11,15), 1)
X = np.linalg.lstsq(M_final,L_final)[0]
print M_final.dot(X) - L_final
X2 = (np.reshape(scp.delete(np.eye(4),3,0),12,order="F"))
print M_final.dot(X2) - L_final
tt = np.reshape(X,(3,4),order='F')
tt = np.r_[tt,np.array([[0,0,0,1]])]
print tt.T.dot(tt)
return tt
开发者ID:rishabh-battulwar,项目名称:human_demos,代码行数:48,代码来源:calib_hydra_pr2.py
示例19: trim_fftconvolve
def trim_fftconvolve(image):
"""
Removes invalid rows and columns from a convolved image after
fftconvolve with the "same" option.
Arguments:
- `image`: input image for trimming
"""
# remove invalid edge
image = delete(image, 0, 0)
image = delete(image, 0, 1)
image = delete(image, image.shape[1]-1, 0)
image = delete(image, image.shape[0]-1, 1)
return image
开发者ID:rsuhada,项目名称:code,代码行数:16,代码来源:test_2d_im.py
示例20: bipolarize_data
def bipolarize_data(data, labels):
bipolar_electrodes = []
if isinstance(data, dict):
single_trials = True
bipolar_data = {}
for key in data.keys():
bipolar_data[key] = np.zeros(data[key].shape)
else:
single_trials = False
bipolar_electrodes_num = calc_bipolar_electrodes_number(labels)
bipolar_data = np.zeros((bipolar_electrodes_num, data.shape[1], data.shape[2]))
bipolar_data_index = 0
for index in range(len(labels) - 1):
elc1_name = labels[index].strip()
elc2_name = labels[index + 1].strip()
elc_group1, _ = utils.elec_group_number(elc1_name)
elc_group2, _ = utils.elec_group_number(elc2_name)
if elc_group1 == elc_group2:
elec_name = '{}-{}'.format(elc2_name, elc1_name)
bipolar_electrodes.append(elec_name)
if single_trials:
for key in data.keys():
bipolar_data[key][:, bipolar_data_index, :] = (data[key][:, index, :] + data[key][:, index + 1, :]) / 2.
else:
bipolar_data[bipolar_data_index, :, :] = (data[index, :, :] + data[index + 1, :, :]) / 2.
bipolar_data_index += 1
if single_trials:
for key in data.keys():
bipolar_data[key] = scipy.delete(bipolar_data[key], range(bipolar_data_index, len(labels)), 1)
return bipolar_data, bipolar_electrodes
开发者ID:pelednoam,项目名称:mmvt,代码行数:30,代码来源:electrodes.py
注:本文中的scipy.delete函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论