本文整理汇总了Python中numpy.insert函数的典型用法代码示例。如果您正苦于以下问题:Python insert函数的具体用法?Python insert怎么用?Python insert使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了insert函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: correct_missing_doms
def correct_missing_doms(self, scalerarray, no_channels):
"""
Backup method in case geometry is not given.
Very back-of-the-envelope.
Not used at the moment.
Correcting an artifact of storing variable length arrays in a table.
Changes to the SNDAQ geometry removed certain DOMs from the snall
data array, so putting them back into the array at the right location.
Need to remove last 7 or 8 dummy entries produced when reading data from file
and insert zeros at appropriate places in array.
:param scalerarray: Scaler array missing DOMs shifted to end
:param no_channels: Number of active channels assumed for the file
:returns: Scaler array with the correct location mapping
"""
if no_channels == 5153:
return np.insert(scalerarray[:-7],
[45, 403, 1308, 1925, 2278, 3594, 4061], 0)
elif no_channels == 5152:
return np.insert(scalerarray[:-8],
[45, 403, 1308, 1925, 2278, 3594, 4061, 5069], 0)
else:
raise RuntimeError("No. of channels (= %d) is not support" % no_channels)
开发者ID:briedel,项目名称:sandbox_icecube,代码行数:25,代码来源:sndaq_root_hdf5_converter_v2.py
示例2: execEnd
def execEnd(self,eventIdx):
# execute an end-breaking or depolymerization event.
oligoEndBreak=self.ald['end'][eventIdx/2]
leftRight=eventIdx%2*2-1
lr=-(leftRight+1)/2
unitMoving=oligoEndBreak.ends[lr]
oligo_vanish,form_oligo,self.event_code=oligoEndBreak.end_break(leftRight,self.units)
if form_oligo:
# not empty
mono=form_oligo['monomer']
if mono:
# monomer + monomer (mergeOligo)
idx=np.where([x in [mono,unitMoving] for x in self.monomers])[0]
self.monomers=np.delete(self.monomers,idx)
self.oligos=np.insert(self.oligos,0,form_oligo['oligo'])
else:
# monomer + multimer (mergeOligo)
idx=np.where([unitMoving is x for x in self.monomers])[0]
self.monomers=np.delete(self.monomers,idx)
else:
#empty, add the end to monomers
self.monomers=np.insert(self.monomers,0,unitMoving)
unitMoving.energize()
if oligo_vanish:
idx=np.where([oligoEndBreak is x for x in self.oligos])[0]
self.oligos=np.delete(self.oligos,idx)
idx=np.where([unitMoving is not x for x in oligoEndBreak.subunits])[0]
nonmoving_unit=oligoEndBreak.subunits[idx[0]]
self.monomers=np.insert(self.monomers,0,nonmoving_unit)
nonmoving_unit.energize()
开发者ID:chemaoxfz,项目名称:proteinInteractionSim,代码行数:32,代码来源:actinTreadmill_sim.py
示例3: cells
def cells(self, cells, grid):
from lxml import etree as ET
if len(cells) == 1:
meshio_type = list(cells.keys())[0]
num_cells = len(cells[meshio_type])
xdmf_type = meshio_to_xdmf_type[meshio_type][0]
topo = ET.SubElement(
grid,
"Topology",
TopologyType=xdmf_type,
NumberOfElements=str(num_cells),
)
dt, prec = numpy_to_xdmf_dtype[cells[meshio_type].dtype.name]
dim = "{} {}".format(*cells[meshio_type].shape)
data_item = ET.SubElement(
topo,
"DataItem",
DataType=dt,
Dimensions=dim,
Format=self.data_format,
Precision=prec,
)
data_item.text = self.numpy_to_xml_string(cells[meshio_type])
elif len(cells) > 1:
total_num_cells = sum(c.shape[0] for c in cells.values())
topo = ET.SubElement(
grid,
"Topology",
TopologyType="Mixed",
NumberOfElements=str(total_num_cells),
)
total_num_cell_items = sum(numpy.prod(c.shape) for c in cells.values())
dim = total_num_cell_items + total_num_cells
# Lines translate to Polylines, and one needs to specify the exact
# number of nodes. Hence, prepend 2.
if "line" in cells:
cells["line"] = numpy.insert(cells["line"], 0, 2, axis=1)
dim += len(cells["line"])
dim = str(dim)
cd = numpy.concatenate(
[
# prepend column with xdmf type index
numpy.insert(
value, 0, meshio_type_to_xdmf_index[key], axis=1
).flatten()
for key, value in cells.items()
]
)
dt, prec = numpy_to_xdmf_dtype[cd.dtype.name]
data_item = ET.SubElement(
topo,
"DataItem",
DataType=dt,
Dimensions=dim,
Format=self.data_format,
Precision=prec,
)
data_item.text = self.numpy_to_xml_string(cd)
return
开发者ID:gdmcbain,项目名称:meshio,代码行数:60,代码来源:time_series.py
示例4: calculate
def calculate(self):
ephem_location = ephem.Observer()
ephem_location.lat = self.location.latitude.to(u.rad) / u.rad
ephem_location.lon = self.location.longitude.to(u.rad) / u.rad
ephem_location.elevation = self.location.height / u.meter
ephem_location.date = ephem.Date(self.time.datetime)
if self.data is None:
self.alt = Latitude([], unit=u.deg)
self.az = Longitude([], unit=u.deg)
self.names = Column([], dtype=np.str)
self.vmag = Column([])
else:
ra = Longitude(self.data["ra"], u.h)
dec = Latitude(self.data["dec"], u.deg)
c = SkyCoord(ra, dec, frame='icrs')
altaz = c.transform_to(AltAz(obstime=self.time, location=self.location))
self.alt = altaz.alt
self.az = altaz.az
self.names = self.data['name']
self.vmag = self.data['mag']
for ephemeris in self.ephemerides:
ephemeris.compute(ephem_location)
self.vmag = np.insert(self.vmag, [0], ephemeris.mag)
self.alt = np.insert(self.alt, [0], (ephemeris.alt.znorm * u.rad).to(u.deg))
self.az = np.insert(self.az, [0], (ephemeris.az * u.rad).to(u.deg))
self.names = np.insert(self.names, [0], ephemeris.name)
return self.names, self.vmag, self.alt, self.az
开发者ID:wschoenell,项目名称:pynephoscope,代码行数:31,代码来源:sky.py
示例5: value_counts
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of unique values.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN, even if NaN is in sp_values.
Returns
-------
counts : Series
"""
keys, counts = algos._value_counts_arraylike(self.sp_values,
dropna=dropna)
fcounts = self.sp_index.ngaps
if fcounts > 0:
if self._null_fill_value and dropna:
pass
else:
if self._null_fill_value:
mask = pd.isnull(keys)
else:
mask = keys == self.fill_value
if mask.any():
counts[mask] += fcounts
else:
keys = np.insert(keys, 0, self.fill_value)
counts = np.insert(counts, 0, fcounts)
if not isinstance(keys, pd.Index):
keys = pd.Index(keys)
result = pd.Series(counts, index=keys)
return result
开发者ID:Casyfill,项目名称:Capstone_dashboard,代码行数:35,代码来源:array.py
示例6: forwardPropPredict
def forwardPropPredict(nn_params, input_layer_size, hidden_layer_size, num_labels, X):
length1 = (input_layer_size+1)*(hidden_layer_size)
nn1 = nn_params[:length1]
T1 = nn1.reshape((hidden_layer_size, input_layer_size+1))
nn2 = nn_params[length1:]
T2 = nn2.reshape((num_labels, 1+ hidden_layer_size))
m = X.shape[0] # number of training examples, useful for calculations
max_pred = 0
predictions = []
# for each training example
train_ex = -1 # training example number we're on (ie. which row of input matrix)
for x in X:
train_ex += 1
# forward propagation
a1 = x
a1 = np.insert(a1, 0, 1, axis=0)
z2 = np.dot(T1, a1)
a2 = sigmoid(z2)
a2 = np.insert(a2, 0 , 1, axis=0)
z3 = np.dot(T2, a2)
a3 = sigmoid(z3)
predictions.append(int(np.argmax(a3)))
return predictions
开发者ID:jkroening,项目名称:machine-learning,代码行数:29,代码来源:NeuralNet_.py
示例7: trainNN
def trainNN(self, imagesTrainSet, labelsTrainSet, etha):
self.reset_weights()
trainingSetSize = labelsTrainSet.shape[0];
j = 0
while j < 30:
i = 0
# print("Round: " + str(j + 1))
while i < trainingSetSize :
x = imagesTrainSet[i].ravel() # Convert 28x28 pixel image into a (784,) vector
x = np.array([ 0 if val == 0 else 1 for val in x ])
x_a = np.insert(x, 0, values=1, axis=0) # Augmented Feature vector
net_hidd = np.dot(self.w1, x_a)
y = self.signum(net_hidd)
y_a = np.insert(y, 0, values=1, axis=0) # Augmented Feature vector
net_out = np.dot(self.w2, y_a)
z = self.signum(net_out)
lab = np.array([ 1 if k == self.labels[i] else 0 for k in range(10) ])
J = z - lab;
J = np.sum(0.5 * J * J);
if J < 1 and self.enableWeightDecay:
break;
out_sensitivity = (lab - z) * self.signum_prime(net_out)
net_hidd_prime = self.signum_prime(net_hidd)
hid_sensitivity = np.dot(self.w2.T, out_sensitivity) * np.insert(net_hidd_prime, 0, 1)
grad_hidd_out = etha * np.outer(out_sensitivity, y_a.T)
grad_in_hidd = etha * np.outer(hid_sensitivity[1:] , x_a.T)
self.update_weights_bias(grad_in_hidd, grad_hidd_out)
i += 1
j += 1
return self.w1, self.w2
开发者ID:prabhakar9885,项目名称:Statistical-Methods-in-AI,代码行数:35,代码来源:AlphabetRecognization.py
示例8: eta2direct
def eta2direct(self, x):
"""eta2direct(x)
Args:
- x (``array-like``): a chromosome encoding an MGA trajectory in the eta encoding
Returns:
``numpy.array``: a chromosome encoding the MGA trajectory using the direct encoding
Raises:
- ValueError: when the tof_encoding is not 'eta'
"""
if self.tof_encoding is not 'eta':
raise ValueError(
"cannot call this method if the tof_encoding is not 'eta'")
# decision vector is [t0, n1, n2, n3, ... ]
n = len(x) - 1
dt = self.tof
T = [0] * n
T[0] = dt * x[1]
for i in range(1, len(T)):
T[i] = (dt - sum(T[:i])) * x[i + 1]
np.insert(T, 0, [0])
return T
开发者ID:darioizzo,项目名称:pykep,代码行数:25,代码来源:_mga.py
示例9: get_affine_inliers_RANSAC
def get_affine_inliers_RANSAC(num_m, xy1_m, xy2_m,\
acd1_m, acd2_m, xy_thresh_sqrd, sigma_thresh_sqrd=None):
'''Computes initial inliers by iteratively computing affine transformations
between matched keypoints'''
aff_inliers = []
# Enumerate All Hypothesis (Match transformations)
for mx in xrange(num_m):
xy1 = xy1_m[:,mx].reshape(2,1) # XY Positions
xy2 = xy2_m[:,mx].reshape(2,1)
A1 = matrix(insert(acd1_m[:,mx], [1.], 0.)).reshape(2,2)
A2 = matrix(insert(acd2_m[:,mx], [1.], 0.)).reshape(2,2)
# Compute Affine Tranform
# from img1 to img2 = (E2\E1)
Aff = linalg.inv(A2).dot(A1)
#
# Transform XY-Positions
xy1_mAt = xy2 + Aff.dot( (xy1_m - xy1) )
xy_err_sqrd = sum( power(xy1_mAt - xy2_m, 2) , 0)
_inliers = find(xy_err_sqrd < xy_thresh_sqrd)
#
# Transform Ellipse Geometry (solved on paper)
if not sigma_thresh_sqrd is None:
scale1_mAt = (acd1_m[0]*Aff[0,0]) *\
(acd1_m[1]*Aff[1,0]+acd1_m[2]*Aff[1,1])
scale2_m = acd2_m[0] * acd2_m[2]
scale_err = np.abs(scale1_mAt - scale2_m)
_inliers_scale = find(scale_err < sigma_thresh_sqrd)
_inliers = np.bitwise_and(_inliers, _inliers_scale)
#If this hypothesis transformation is better than the ones we have
#previously seen then set it as the best
if len(_inliers) > len(aff_inliers):
aff_inliers = _inliers
#bst_xy_err = xy_err_sqrd
return aff_inliers
开发者ID:Erotemic,项目名称:hotspotter,代码行数:34,代码来源:spatial_functions.py
示例10: hawkesfeat
def hawkesfeat(timeseries,args):
'''
Generate hawkes feature: positive rate/negtive rate
args['params']: 1X8 ndarray containing the params of hawkes process
'''
#Assign parameters
params = args['params'] if 'params' in args.keys() else np.array([0.2,0.2, 0.2, 0.7, 0.7, 0.2, 1.0, 1.0])
#Utilize the rate calculation function in the hawkes simulator
sim = simulator(theta = params)
sim.sethistory(timeseries)
rate = sim.historydata[:,2]/sim.historydata[:,3]
rate = np.insert(rate,0,params[0]/params[1]).reshape(-1,1)
time = np.insert(sim.historydata[:,0],0,0.0).reshape(-1,1)
time = np.cumsum(time,axis=0)
value = np.hstack((time,rate))
value = value.astype(object,copy=False)
value[:,0] = Vsecond2delta(value[:,0])
anchor = timeseries.values[0]
anchor[1] = 0.0
value = value + anchor
rateseries = pd.DataFrame(value,columns=['time','quantity'])
rateseries.index = rateseries['time']
rateseries = rateseries.reindex(timeseries.index,method = 'ffill')
return rateseries
开发者ID:B-Rich,项目名称:VA_PYTHON,代码行数:32,代码来源:hawkes.py
示例11: balance_workload
def balance_workload(nproc, popsize, *index, **kwds):
"""divide popsize elements on 'nproc' chunks
nproc: int number of nodes
popsize: int number of jobs
index: int rank of node(s) to calculate for (using slice notation)
skip: int rank of node upon which to not calculate (i.e. the master)
returns (begin, end) index vectors"""
_skip = False
skip = kwds.get('skip', None)
if skip is not None and skip < nproc:
nproc = nproc - 1
_skip = True
count = np.round(popsize/nproc)
counts = count * np.ones(nproc, dtype=np.int)
diff = popsize - count*nproc
counts[:diff] += 1
begin = np.concatenate(([0], np.cumsum(counts)[:-1]))
#return counts, index #XXX: (#jobs, begin index) for all elements
if _skip:
if skip == nproc: # remember: nproc has been reduced
begin = np.append(begin, begin[-1]+counts[-1])
counts = np.append(counts, 0)
else:
begin = np.insert(begin, skip, begin[skip])
counts = np.insert(counts, skip, 0)
if not index:
return begin, begin+counts #XXX: (begin, end) index for all elements
#if len(index) > 1:
# return lookup((begin, begin+counts), *index) # index a slice
return lookup((begin, begin+counts), *index) # index a single element
开发者ID:hpparvi,项目名称:pyina,代码行数:32,代码来源:tools.py
示例12: data_concatenate
def data_concatenate(list_data_neuro):
"""
Tool function for blk_align_to_evt, make sure they contains the same number of signals
:param list_data_neuro: a list of data_neuro
:return: concatenated data_neuro
"""
data_neuro_all = {}
for i, data_neuro in enumerate(list_data_neuro):
if i == 0: # if the first block, copy it
data_neuro_all = data_neuro
else: # for next incoming blocks
if len(data_neuro['ts']) == len(data_neuro_all['ts']): # check if ts length matches, otherwise raise error
# check if signals match, if not match, fill the missing signal with all zeros
if not np.array_equal(data_neuro['signal_info'], data_neuro_all['signal_info']):
for indx_signal_new, signal_new in enumerate(data_neuro['signal_info']): # if emerging signal
if signal_new not in data_neuro_all['signal_info']:
data_neuro_all['signal_info'] = np.insert(data_neuro_all['signal_info'], indx_signal_new,
signal_new)
data_neuro_all['data'] = np.insert(data_neuro_all['data'], indx_signal_new, 0.0, axis=2)
for indx_signal_old, signal_old in enumerate(data_neuro_all['signal_info']): # if mising signal
if signal_old not in data_neuro['signal_info']:
data_neuro['signal_info'] = np.insert(data_neuro['signal_info'], indx_signal_old,
signal_old)
data_neuro['data'] = np.insert(data_neuro['data'], indx_signal_old, 0.0, axis=2)
# concatenate
data_neuro_all['data'] = np.concatenate((data_neuro_all['data'], data_neuro['data']), axis=0)
else:
print('function data_concatenate can not work with data of different "ts" length')
warnings.warn('function data_concatenate can not work with data of different "ts" length')
return data_neuro_all
开发者ID:SummitKwan,项目名称:PyNeuroSG,代码行数:33,代码来源:PyNeuroData.py
示例13: chans
def chans(self, invert=False):
""" Method to convert the bit mask into a string of channel ranges in CASA format. e.g.
[3,10],[25,50] => "3~10;25~50"
Parameters
----------
None
Returns
-------
string containing the formatted channel ranges
"""
output = ""
if invert:
basechan = np.append(1-self._chans, 0)
shiftchan = np.insert(1-self._chans, 0, 0)
else:
basechan = np.append(self._chans, 0)
shiftchan = np.insert(self._chans, 0, 0)
diff = basechan - shiftchan
st = np.where(diff == 1)[0]
en = np.where(diff == -1)[0]
first = True
for seg in zip(st, en):
if not first:
output += ";"
else:
first = False
output += str(seg[0] + self._startchan) + "~" + str(seg[1] - 1 + self._startchan)
return output
开发者ID:teuben,项目名称:admit,代码行数:31,代码来源:Segments.py
示例14: set_params
def set_params(self):
r"""
Internally, scipy.signal works with systems of the form
.. math::
ar_{poly}(L) X_t = ma_{poly}(L) \epsilon_t
where L is the lag operator. To match this, we set
.. math::
ar_{poly} = (1, -\phi_1, -\phi_2,..., -\phi_p)
ma_{poly} = (1, \theta_1, \theta_2,..., \theta_q)
In addition, ar_poly must be at least as long as ma_poly.
This can be achieved by padding it out with zeros when required.
"""
# === set up ma_poly === #
ma_poly = np.asarray(self._theta)
self.ma_poly = np.insert(ma_poly, 0, 1) # The array (1, theta)
# === set up ar_poly === #
if np.isscalar(self._phi):
ar_poly = np.array(-self._phi)
else:
ar_poly = -np.asarray(self._phi)
self.ar_poly = np.insert(ar_poly, 0, 1) # The array (1, -phi)
# === pad ar_poly with zeros if required === #
if len(self.ar_poly) < len(self.ma_poly):
temp = np.zeros(len(self.ma_poly) - len(self.ar_poly))
self.ar_poly = np.hstack((self.ar_poly, temp))
开发者ID:GaussHuo,项目名称:quant-econ,代码行数:35,代码来源:linproc.py
示例15: next
def next(self):
totim,dt,kper,kstp,swrstp,success = self.read_header()
if success == False:
# print 'SWR_Stage.next() object reached end of file'
return 0.0,0.0,0,0,0,False,self.null_record
else:
if self.type > 0:
#r = numpy.zeros((self.items+1))
r = numpy.zeros((self.items+2))
for rec in range(0,self.nrecord):
#nlay = self.read_integer()
nlay = self.reachlayers[rec]
for lay in range(0,nlay):
this_lay = self.read_integer()
this_items = self.read_items()
this_r = numpy.insert(this_items,[0],this_lay)
this_r = numpy.insert(this_r,[0],rec+1)
#print totim,this_lay,numpy.shape(r),numpy.shape(this_r)
r = numpy.vstack((r,this_r))
r = numpy.delete(r,0,axis=0)
return totim,dt,kper,kstp,swrstp,True,r
else:
r = self.read_record()
# print 'SWR data read for time step ',kstp,',stress period \
# ',kper,'and swr step ',swrstp
return totim,dt,kper,kstp,swrstp,True,r
开发者ID:jdhughes,项目名称:MF2005-SWR1,代码行数:26,代码来源:MFBinaryClass.py
示例16: transform
def transform(self, pos=(0,0), angle=0, scale=1):
'''In-plane transformation function. Update the 3D transform based on the 2D changes'''
center = self.shape * self.spacing / 2. + (self.shape + 1) % 2 * self.spacing / 2.
inv = self.xfm.transform.homogeneous_inverse
wpos = self.handle.center.representation.world_position
wpos -= center
if not isinstance(scale, (tuple, list, np.ndarray)):
scale = [scale, scale]
if self.axis == 1:
trans = np.insert(pos[:2][::-1], self.axis, 0)
wpos = np.insert(wpos[:2][::-1], self.axis, self.ipw_3d.ipw.slice_position)
#angle = -angle
else:
trans = np.insert(pos[:2], self.axis, 0)
wpos = np.insert(wpos[:2], self.axis, self.ipw_3d.ipw.slice_position)
scale = np.insert(scale, self.axis, 1)
self.parent._undolist.append(self.xfm.transform.matrix.to_array())
self.xfm.transform.post_multiply()
self.xfm.transform.translate(-wpos)
self.xfm.transform.rotate_wxyz(np.degrees(angle), *self.ipw_3d.ipw.normal)
self.xfm.transform.scale(scale)
self.xfm.transform.translate(wpos)
self.xfm.transform.translate(trans)
self.xfm.transform.pre_multiply()
self.xfm.widget.set_transform(self.xfm.filter.transform)
self.xfm.update_pipeline()
self.parent.update_slabs()
np.save("/tmp/last_xfm.npy", self.parent.get_xfm())
开发者ID:QihongL,项目名称:pycortex,代码行数:34,代码来源:mayavi_aligner.py
示例17: fit
def fit(self, X, sample_weight=None, **kwargs):
# Checks
X = check_array(X)
if sample_weight is not None and len(sample_weight) != len(X):
raise ValueError
# Compute histogram and edges
h, e = np.histogramdd(X, bins=self.bins, range=self.range,
weights=sample_weight, normed=True)
# Add empty bins for out of bound samples
for j in range(X.shape[1]):
h = np.insert(h, 0, 0., axis=j)
h = np.insert(h, h.shape[j], 0., axis=j)
e[j] = np.insert(e[j], 0, -np.inf)
e[j] = np.insert(e[j], len(e[j]), np.inf)
if X.shape[1] == 1 and self.interpolation:
inputs = e[0][2:-1] - (e[0][2] - e[0][1]) / 2.
inputs[0] = e[0][1]
inputs[-1] = e[0][-2]
outputs = h[1:-1]
self.interpolation_ = interp1d(inputs, outputs,
kind=self.interpolation,
bounds_error=False,
fill_value=0.)
self.histogram_ = h
self.edges_ = e
self.ndim_ = X.shape[1]
return self
开发者ID:glouppe,项目名称:carl,代码行数:33,代码来源:histogram.py
示例18: polyadd
def polyadd(p1,p2):
s1 = np.size(p1)
s2 = np.size(p2)
length = max(s1,s2)
p1 = np.insert(p1,np.zeros( length-s1 >= 0 and length-s1 or 0),0)
p2 = np.insert(p2,np.zeros( length-s2 >= 0 and length-s2 or 0),0)
return p1+p2
开发者ID:pytutor,项目名称:python-tutor,代码行数:7,代码来源:polyadd.py
示例19: calcEarthParams
def calcEarthParams(layerThickness, layerResistivity):
""""""
nLayers = len(layerResistivity["min"]) # or 'max'
thicknessParam = np.empty((nLayers,))
resistivityParam = np.empty((nLayers,))
# Iterate through the layers, applying the p formula to both
# thickness and resistivity
for i in range(nLayers):
# Generate a random number to control where in the range of
# possible values the true value of p could lie. This precedes the
# MC iteration, so take one p value with a grain of salt, but many
# with a salt shaker
randomNumber = np.random.random_sample()
if i < (nLayers - 1): # Skip last depth (infinite)
thicknessP = (layerThickness["max"][i] - layerThickness["min"][i]) * randomNumber + layerThickness["min"][i]
thicknessParam = np.insert(thicknessParam, i, thicknessP)
del thicknessP
resistivityP = (layerResistivity["max"][i] - layerResistivity["min"][i]) * randomNumber + layerResistivity[
"min"
][i]
resistivityParam = np.insert(resistivityParam, i, resistivityP)
del resistivityP
return (thicknessParam[: nLayers - 1], resistivityParam[:nLayers])
开发者ID:vitale232,项目名称:ves,代码行数:26,代码来源:inversion_analysis.py
示例20: insert
def insert(array, obj, values):
"""Insert values along the given axis before the given indices.
Parameters:
-----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which values is inserted.
values : array_like
Values to insert into arr. If the type of values is different from that of arr, values is converted to the type of arr.
axis : int, optional
Axis along which to insert values. If axis is None then arr is flattened first.
Returns:
--------
out : ndarray
A copy of arr with values inserted. Note that insert does not occur in-place: a new array is returned. If axis is None, out is a flattened array.
"""
if isphysicalquantity(array):
return np.insert(array.value, obj, values.value) * q[array.unit]
else:
return np.insert(array, obj, values)
开发者ID:juhasch,项目名称:PhysicalQuantities,代码行数:27,代码来源:numpywrapper.py
注:本文中的numpy.insert函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论