• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.resize函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.resize函数的典型用法代码示例。如果您正苦于以下问题:Python resize函数的具体用法?Python resize怎么用?Python resize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了resize函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: genImg

    def genImg(self,xml,compressionLevel):
        xml = re.split('\n',xml)
        img_data = re.split('"',xml[1])

        type = img_data[1]
        size = [int(x) for x in img_data[3].split(',')]
        compressed = img_data[5]
        pixels = xml[2]

        pixels = base64.b64decode(pixels)       # decode base 64 encoding

        if compressed == 'True':
            pixels = zlib.decompress(pixels)    # if data was compressed, decompress it

        pixels = list(pixels)                   # converting byte data into a list, which will give us the actual numbers of ndarray(i.e. the image)

        if type == 'Gray':                      # Based on image type, reconstruct numpy.ndarray
            return numpy.resize(pixels,tuple(size))

        else:
            r = pixels[:size[0]*size[1]]
            g = pixels[size[0]*size[1]:2*size[0]*size[1]]
            b = pixels[2*size[0]*size[1]:3*size[0]*size[1]]
            image = []
            for i in range(size[0] * size[1]):
                image.append(r[i])
                image.append(g[i])
                image.append(b[i])

            size.append(3)
            return numpy.resize(image,tuple(size))
开发者ID:jain98,项目名称:PurdueProgramming,代码行数:31,代码来源:Steganography.py


示例2: batchsd

def batchsd(trace, batches=5):
    """
    Calculates the simulation standard error, accounting for non-independent
    samples. The trace is divided into batches, and the standard deviation of
    the batch means is calculated.
    """

    if len(np.shape(trace)) > 1:

        dims = np.shape(trace)
        # ttrace = np.transpose(np.reshape(trace, (dims[0], sum(dims[1:]))))
        ttrace = np.transpose([t.ravel() for t in trace])

        return np.reshape([batchsd(t, batches) for t in ttrace], dims[1:])

    else:
        if batches == 1:
            return np.std(trace) / np.sqrt(len(trace))

        try:
            batched_traces = np.resize(trace, (batches, len(trace) / batches))
        except ValueError:
            # If batches do not divide evenly, trim excess samples
            resid = len(trace) % batches
            batched_traces = np.resize(trace[:-resid], 
                (batches, len(trace[:-resid]) / batches))

        means = np.mean(batched_traces, 1)

        return np.std(means) / np.sqrt(batches)
开发者ID:AsymmetricHuang,项目名称:pymc,代码行数:30,代码来源:base.py


示例3: aabut

 def aabut (source, *args):
    """
Like the |Stat abut command.  It concatenates two arrays column-wise
and returns the result.  CAUTION:  If one array is shorter, it will be
repeated until it is as long as the other.

Usage:   aabut (source, args)    where args=any # of arrays
Returns: an array as long as the LONGEST array past, source appearing on the
         'left', arrays in <args> attached on the 'right'.
"""
    if len(source.shape)==1:
        width = 1
        source = N.resize(source,[source.shape[0],width])
    else:
        width = source.shape[1]
    for addon in args:
        if len(addon.shape)==1:
            width = 1
            addon = N.resize(addon,[source.shape[0],width])
        else:
            width = source.shape[1]
        if len(addon) < len(source):
            addon = N.resize(addon,[source.shape[0],addon.shape[1]])
        elif len(source) < len(addon):
            source = N.resize(source,[addon.shape[0],source.shape[1]])
        source = N.concatenate((source,addon),1)
    return source
开发者ID:phamnhuson,项目名称:VDNABARCODE,代码行数:27,代码来源:pstat.py


示例4: mc_error

def mc_error(x, batches=5):
    """
    Calculates the simulation standard error, accounting for non-independent
    samples. The trace is divided into batches, and the standard deviation of
    the batch means is calculated.

    :Arguments:
      x : Numpy array
          An array containing MCMC samples
      batches : integer
          Number of batchas
    """

    if x.ndim > 1:

        dims = np.shape(x)
        #ttrace = np.transpose(np.reshape(trace, (dims[0], sum(dims[1:]))))
        trace = np.transpose([t.ravel() for t in x])

        return np.reshape([mc_error(t, batches) for t in trace], dims[1:])

    else:
        if batches == 1: return np.std(x)/np.sqrt(len(x))

        try:
            batched_traces = np.resize(x, (batches, len(x)/batches))
        except ValueError:
            # If batches do not divide evenly, trim excess samples
            resid = len(x) % batches
            batched_traces = np.resize(x[:-resid], (batches, len(x)/batches))

        means = np.mean(batched_traces, 1)

        return np.std(means)/np.sqrt(batches)
开发者ID:bkanuka,项目名称:pymc,代码行数:34,代码来源:stats.py


示例5: draw_lnm_samples

def draw_lnm_samples(**kwargs):
    ''' Draw samples for uniform-in-log model

        Parameters
        ----------
        **kwargs: string
           Keyword arguments as model parameters and number of samples

        Returns
        -------
        array
           The first mass
        array
           The second mass
    '''

    #PDF doesnt match with sampler
    nsamples = kwargs.get('nsamples', 1)
    min_mass = kwargs.get('min_mass', 5.)
    max_mass = kwargs.get('max_mass', 95.)
    max_mtotal = min_mass + max_mass
    lnmmin = log(min_mass)
    lnmmax = log(max_mass)

    k = nsamples * int(1.5 + log(1 + 100./nsamples))
    aa = np.exp(np.random.uniform(lnmmin, lnmmax, k))
    bb = np.exp(np.random.uniform(lnmmin, lnmmax, k))

    idx = np.where(aa + bb < max_mtotal)
    m1, m2 = (np.maximum(aa, bb))[idx], (np.minimum(aa, bb))[idx]

    return np.resize(m1, nsamples), np.resize(m2, nsamples)
开发者ID:bhooshan-gadre,项目名称:pycbc,代码行数:32,代码来源:rates_functions.py


示例6: test

def test(npoints):
    xx = numpy.arange(npoints)
    xx=numpy.resize(xx,(npoints,1))
    #yy = 1000.0 * exp (- 0.5 * (xx * xx) /15)+ 2.0 * xx + 10.5
    yy = gauss([10.5,2,1000.0,20.,15],xx)
    yy=numpy.resize(yy,(npoints,1))
    sy = numpy.sqrt(abs(yy))
    sy=numpy.resize(sy,(npoints,1))
    data = numpy.concatenate((xx, yy, sy),1)
    parameters = [0.0,1.0,900.0, 25., 10]
    stime = time.time()
    if 0:
        #old fashion
        fittedpar, chisq, sigmapar = LeastSquaresFit(gauss,parameters,data)
    else:
        #easier to handle
        fittedpar, chisq, sigmapar = LeastSquaresFit(gauss,parameters,
                                                     xdata=xx.reshape((-1,)),
                                                     ydata=yy.reshape((-1,)),
                                                     sigmadata=sy.reshape((-1,)))
    etime = time.time()
    print("Took ",etime - stime, "seconds")
    print("chi square  = ",chisq)
    print("Fitted pars = ",fittedpar)
    print("Sigma pars  = ",sigmapar)
开发者ID:PiRK,项目名称:pymca,代码行数:25,代码来源:Gefit.py


示例7: __init__

    def __init__(self, name, geometry, order, init_context=True):
        LagrangeSimplexPolySpace.__init__(self, name, geometry, order,
                                          init_context=False)

        nodes, nts, node_coors = self.nodes, self.nts, self.node_coors

        shape = [nts.shape[0] + 1, 2]
        nts = nm.resize(nts, shape)
        nts[-1,:] = [3, 0]

        shape = [nodes.shape[0] + 1, nodes.shape[1]]
        nodes = nm.resize(nodes, shape)
        # Make a 'hypercubic' (cubic in 2D) node.
        nodes[-1,:] = 1

        n_v = self.geometry.n_vertex
        tmp = nm.ones((n_v,), nm.int32)

        node_coors = nm.vstack((node_coors,
                                nm.dot(tmp, self.geometry.coors) / n_v))

        self.nodes, self.nts = nodes, nts
        self.node_coors = nm.ascontiguousarray(node_coors)

        self.bnode = nodes[-1:,:]

        self.n_nod = self.nodes.shape[0]

        if init_context:
            self.eval_ctx = self.create_context(None, 0, 1e-15, 100, 1e-8,
                                                tdim=n_v - 1)

        else:
            self.eval_ctx = None
开发者ID:clazaro,项目名称:sfepy,代码行数:34,代码来源:poly_spaces.py


示例8: test_QuaternionClass

 def test_QuaternionClass(self):        
     v1 = np.array([0.2, 0.2, 0.4])
     v2 = np.array([1, 0, 0])         
     q1 = Quaternion.q_exp(v1)
     q2 = Quaternion.q_exp(v2)
     v=np.array([1, 2, 3])
     # Testing Mult and rotate
     np.testing.assert_almost_equal(Quaternion.q_rotate(Quaternion.q_mult(q1,q2),v), Quaternion.q_rotate(q1,Quaternion.q_rotate(q2,v)), decimal=7)
     np.testing.assert_almost_equal(Quaternion.q_rotate(q1,v2), np.resize(Quaternion.q_toRotMat(q1),(3,3)).dot(v2), decimal=7)
     # Testing Boxplus, Boxminus, Log and Exp
     np.testing.assert_almost_equal(Quaternion.q_boxPlus(q1,Quaternion.q_boxMinus(q2,q1)), q2, decimal=7)
     np.testing.assert_almost_equal(Quaternion.q_log(q1), v1, decimal=7)
     # Testing Lmat and Rmat
     np.testing.assert_almost_equal(Quaternion.q_mult(q1,q2), Quaternion.q_Lmat(q1).dot(q2), decimal=7)
     np.testing.assert_almost_equal(Quaternion.q_mult(q1,q2), Quaternion.q_Rmat(q2).dot(q1), decimal=7)
     # Testing ypr and quat
     roll = 0.2
     pitch = -0.5
     yaw = 2.5
     q_test = Quaternion.q_mult(np.array([np.cos(0.5*pitch), 0, np.sin(0.5*pitch), 0]),np.array([np.cos(0.5*yaw), 0, 0, np.sin(0.5*yaw)]))
     q_test = Quaternion.q_mult(np.array([np.cos(0.5*roll), np.sin(0.5*roll), 0, 0]),q_test)
     np.testing.assert_almost_equal(Quaternion.q_toYpr(q_test), np.array([roll, pitch, yaw]), decimal=7)
     # Testing Jacobian of Ypr
     for i in np.arange(0,3):
         dv1 = np.array([0.0, 0.0, 0.0])
         dv1[i] = 1.0
         epsilon = 1e-6
         ypr1 = Quaternion.q_toYpr(q1)
         ypr1_dist = Quaternion.q_toYpr(Quaternion.q_boxPlus(q1,dv1*epsilon))
         dypr1_1 = (ypr1_dist-ypr1)/epsilon
         J = np.resize(Quaternion.q_toYprJac(q1),(3,3))
         dypr1_2 = J.dot(dv1)
         np.testing.assert_almost_equal(dypr1_1,dypr1_2, decimal=5)
开发者ID:Yvaine,项目名称:trajectory_toolkit,代码行数:33,代码来源:Tests.py


示例9: explicitmidpoint

def explicitmidpoint(ode, vardict, soln, h, relerr):
    """
    Implementation of the Explicit Midpoint method.
    """
    eqnum = len(ode)
    dim = [eqnum, 2]
    dim.extend(soln[0][0].shape)
    dim = tuple(dim)
    if numpy.iscomplexobj(soln[0]):
        aux = numpy.resize([0. + 0j], dim)
    else:
        aux = numpy.resize([0.], dim)
    dim = soln[0][0].shape
    for vari in range(eqnum):
        vardict.update({'y_{}'.format(vari): soln[vari][-1]})
    for vari in range(eqnum):
        aux[vari][0] = numpy.resize([seval(ode[vari], **vardict) * h[0] + soln[vari][-1]], dim)
    for vari in range(eqnum):
        vardict.update({"y_{}".format(vari): aux[vari][0]})
    vardict.update({'t': vardict['t'] + 0.5 * h[0]})
    for vari in range(eqnum):
        aux[vari][0] = numpy.resize([seval(ode[vari], **vardict)], dim)
    for vari in range(eqnum):
        vardict.update({"y_{}".format(vari): numpy.array(soln[vari][-1] + h[0] * aux[vari][0])})
        pt = soln[vari]
        kt = numpy.array([vardict['y_{}'.format(vari)]])
        soln[vari] = numpy.concatenate((pt, kt))
    vardict.update({'t': vardict['t'] + 0.5 * h[0]})
开发者ID:Microno95,项目名称:DESolver,代码行数:28,代码来源:differentialsystem.py


示例10: append

 def append(self, value):
     # convert the appended object to an array if it starts as something else
     if type(value) is not np.ndarray:
         value = np.array(value)
     # add the data
     if value.ndim == 1:
         # adding a single row of data
         n = self.__n
         if n + 1 > self.__N:
             # need to allocate more memory
             self.__N += self.__n_grow
             self.__data = np.resize(self.__data, (self.__N, self.__cols))
         self.__data[n] = value
         self.__n = n + 1
     elif value.ndim == 2:
         # adding multiple rows of data
         # avoid loops for appending large arrays
         n = self.__n
         L = value.shape[0]
         N_needed = n + L - self.__N
         if N_needed > 0:
             # need to allocate more memory
             self.__N += (N_needed / self.__n_grow + 1) * self.__n_grow
             self.__data = np.resize(self.__data, (self.__N, self.__cols))
         self.__data[n:n+L] = value
         self.__n += L
开发者ID:avelo,项目名称:Pythics,代码行数:26,代码来源:lib.py


示例11: initialize

def initialize(dx, x_shore, eta_shore, eta_toe, S_d, S_b, S, Q_w, B0):
    """Initialize the variables for the simulation.

    Args:

    Returns: 
        eta_b : array of z-coordinate of the basement at every node. Has the
                same size as dx. [ L ] 

    Comments: 
    """
    # First thing we should do is specify our computational domain.
    N, dx_shore = nodes_in_domain(x_shore, dx)
    N_old = N.copy()
    dx = init_domain(N, dx_shore, dx)
    x_shore = x(dx)[-1]
    # Then we compute the basement elevation and the location of the delta toe.
    eta_b = np.resize(eta_toe, N)
    eta_b, x_toe = init_basement(dx, eta_shore, eta_toe, eta_b, S_d, S_b)
    # Next, we compute the initial fluvial profile
    eta, S = init_flumen(dx, eta_shore, S)
    # Instantiate a water-depth array for the domain.
    H = np.zeros_like(dx)
    # Compute unit flow rate
    qw = unit_flowrate(Q_w, B0) # [ L**2 / T ]
    # Compute critical depth for the section.
    Hc = critical_flow(qw)
    # Redefine B0 as a vector, to have that information on every node.    
    B0 = np.resize(B0, N)
    # Instantiate a sediment transport capacity array for the domain
    qt = np.zeros_like(dx)
    return (N, N_old, dx_shore, dx, x_shore, eta_b, x_toe, eta, S, H, Hc, qw,
            B0, qt)
开发者ID:hernanrr,项目名称:autoretreat,代码行数:33,代码来源:autoretreat.py


示例12: _wrap

def _wrap(vector, pad_tuple, iaxis, kwargs):
    '''
    Private function to calculate the before/after vectors for pad_wrap.

    Parameters
    ----------
    vector : ndarray
        Input vector that already includes empty padded values.
    pad_tuple : tuple
        This tuple represents the (before, after) width of the padding
        along this particular iaxis.
    iaxis : int
        The axis currently being looped across.  Not used in _wrap.
    kwargs : keyword arguments
        Keyword arguments. Not used in _wrap.

    Return
    ------
    _wrap : ndarray
        Padded vector
    '''
    if pad_tuple[1] == 0:
        after_vector = vector[pad_tuple[0]:None]
    else:
        after_vector = vector[pad_tuple[0]:-pad_tuple[1]]

    before_vector = np.resize(after_vector[::-1], pad_tuple[0])[::-1]
    after_vector = np.resize(after_vector, pad_tuple[1])

    return _create_vector(vector, pad_tuple, before_vector, after_vector)
开发者ID:WeatherGod,项目名称:numpy,代码行数:30,代码来源:arraypad.py


示例13: cube

def cube():
    """
    Build vertices for a colored cube.

    V  is the vertices
    I1 is the indices for a filled cube (use with GL_TRIANGLES)
    I2 is the indices for an outline cube (use with GL_LINES)
    """
    vtype = [('a_position', np.float32, 3),
             ('a_normal'  , np.float32, 3),
             ('a_color',    np.float32, 4)] 
    # Vertices positions
    v = [ [ 1, 1, 1],  [-1, 1, 1],  [-1,-1, 1], [ 1,-1, 1],
          [ 1,-1,-1],  [ 1, 1,-1],  [-1, 1,-1], [-1,-1,-1] ]
    # Face Normals
    n = [ [ 0, 0, 1],  [ 1, 0, 0],  [ 0, 1, 0] ,
          [-1, 0, 1],  [ 0,-1, 0],  [ 0, 0,-1] ]
    # Vertice colors
    c = [ [0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 1, 0, 1],
          [1, 1, 0, 1], [1, 1, 1, 1], [1, 0, 1, 1], [1, 0, 0, 1] ];

    V =  np.array([(v[0],n[0],c[0]), (v[1],n[0],c[1]), (v[2],n[0],c[2]), (v[3],n[0],c[3]),
                   (v[0],n[1],c[0]), (v[3],n[1],c[3]), (v[4],n[1],c[4]), (v[5],n[1],c[5]),
                   (v[0],n[2],c[0]), (v[5],n[2],c[5]), (v[6],n[2],c[6]), (v[1],n[2],c[1]),
                   (v[1],n[3],c[1]), (v[6],n[3],c[6]), (v[7],n[3],c[7]), (v[2],n[3],c[2]),
                   (v[7],n[4],c[7]), (v[4],n[4],c[4]), (v[3],n[4],c[3]), (v[2],n[4],c[2]),
                   (v[4],n[5],c[4]), (v[7],n[5],c[7]), (v[6],n[5],c[6]), (v[5],n[5],c[5]) ],
                  dtype = vtype)
    I1 = np.resize( np.array([0,1,2,0,2,3], dtype=np.uint32), 6*(2*3))
    I1 += np.repeat( 4*np.arange(2*3), 6)

    I2 = np.resize( np.array([0,1,1,2,2,3,3,0], dtype=np.uint32), 6*(2*4))
    I2 += np.repeat( 4*np.arange(6), 8)

    return V, I1, I2
开发者ID:tobyhijzen,项目名称:vispy,代码行数:35,代码来源:rotate-cube.py


示例14: plot_image

    def plot_image(self, image, nb_repeat=40, show_plot=True):
        """Plot augmented variations of an image.

        This method takes an image and plots it by default in 40 differently
        augmented versions.

        This method is intended to visualize the strength of your chosen
        augmentations (so for debugging).

        Args:
            image: The image to plot.
            nb_repeat: How often to plot the image. Each time it is plotted,
                the chosen augmentation will be different. (Default: 40).
            show_plot: Whether to show the plot. False makes sense if you
                don't have a graphical user interface on the machine.
                (Default: True)

        Returns:
            The figure of the plot.
            Use figure.savefig() to save the image.
        """
        if len(image.shape) == 2:
            images = np.resize(image, (nb_repeat, image.shape[0], image.shape[1]))
        else:
            images = np.resize(image, (nb_repeat, image.shape[0], image.shape[1],
                               image.shape[2]))
        return self.plot_images(images, True, show_plot=show_plot)
开发者ID:jonsonlion,项目名称:face-comparer,代码行数:27,代码来源:ImageAugmenter.py


示例15: _assignInitialPoints

def _assignInitialPoints(cvmat,S):
    h,w,c = cvmat.shape
    
    # Compute the max grid assignment
    nx = w/S
    ny = h/S

    # Compute the super pixel x,y grid
    xgrid = np.arange(nx).reshape(1,nx)*np.ones(ny,dtype=np.int).reshape(ny,1)
    ygrid = np.arange(ny).reshape(ny,1)*np.ones(nx,dtype=np.int).reshape(1,nx)
    
    # compute an x,y lookup to a label look up
    label_map = nx*ygrid + xgrid    
    
    # Compute the x groups in pixel space
    tmp = np.arange(nx)
    tmp = np.resize(tmp,(w,))
    xgroups = tmp[tmp.argsort()]

    # Compute the y groups in pixel space
    tmp = np.arange(ny)
    tmp = np.resize(tmp,(h,))
    ygroups = tmp[tmp.argsort()]

    labels = np.zeros((h,w),dtype=np.int)
    
    for x in range(w):
        for y in range(h):
            labels[y,x] = label_map[ygroups[y],xgroups[x]]

    return label_map,xgroups,ygroups,labels
开发者ID:bolme,项目名称:pyvision,代码行数:31,代码来源:superpixel.py


示例16: eulertrap

def eulertrap(ode, vardict, soln, h, relerr):
    """
    Implementation of the Euler-Trapezoidal method.
    """
    eqnum = len(ode)
    dim = [eqnum, 3]
    dim.extend(soln[0][0].shape)
    dim = tuple(dim)
    if numpy.iscomplexobj(soln[0]):
        aux = numpy.resize([0. + 0j], dim)
    else:
        aux = numpy.resize([0.], dim)
    dim = soln[0][0].shape
    for vari in range(eqnum):
        vardict.update({'y_{}'.format(vari): soln[vari][-1]})
    for vari in range(eqnum):
        aux[vari][0] = numpy.resize(seval(ode[vari], **vardict), dim)
    for vari in range(eqnum):
        aux[vari][1] = numpy.resize(seval(ode[vari], **vardict) * h[0] + soln[vari][-1], dim)
    for vari in range(eqnum):
        vardict.update({'y_{}'.format(vari): aux[vari][1]})
    vardict.update({'t': vardict['t'] + h[0]})
    for vari in range(eqnum):
        aux[vari][2] = numpy.resize(seval(ode[vari], **vardict), dim)
    for vari in range(eqnum):
        vardict.update({"y_{}".format(vari): soln[vari][-1] + h[0] * (aux[vari][0] + aux[vari][2])})
        pt = soln[vari]
        kt = numpy.array([vardict['y_{}'.format(vari)]])
        soln[vari] = numpy.concatenate((pt, kt))
开发者ID:Microno95,项目名称:DESolver,代码行数:29,代码来源:differentialsystem.py


示例17: build_data_dict

def build_data_dict(data1, data2, match):

    """Build a dictionary of zeros like the union of the two data
    dictionaries, but with 'length' equal to the shorter dict.
    """

    data = {}
    keys1 = [key for key in data1.keys() if key != match]
    keys2 = [key for key in data2.keys() if key != match]
    nfinal = min(np.shape(data1[match])[0],
                 np.shape(data2[match])[0])

    if nfinal == np.shape(data1[match])[0]:
        data[match] = np.zeros_like(data1[match])
    else:
        data[match] = np.zeros_like(data2[match])
    for k in keys1:
        data[k] = np.zeros_like(data1[k])
        shp = list(np.shape(data1[k]))
        shp[0] = nfinal
        data[k] = np.resize(data[k], tuple(shp))
    for k in keys2:
        data[k] = np.zeros_like(data2[k])
        shp = list(np.shape(data2[k]))
        shp[0] = nfinal
        data[k] = np.resize(data[k], tuple(shp))

    return data, keys1, keys2
开发者ID:gnperdue,项目名称:hdf5_manipulator,代码行数:28,代码来源:combine.py


示例18: sympforeuler

def sympforeuler(ode, vardict, soln, h, relerr):
    """
    Implementation of the Symplectic Euler method.
    """
    eqnum = len(ode)
    dim = [eqnum, 1]
    dim.extend(soln[0][0].shape)
    dim = tuple(dim)
    if numpy.iscomplexobj(soln[0]):
        aux = numpy.resize([0. + 0j], dim)
    else:
        aux = numpy.resize([0.], dim)
    dim = soln[0][0].shape
    for vari in range(eqnum):
        vardict.update({'y_{}'.format(vari): soln[vari][-1]})
    for vari in range(eqnum):
        aux[vari][0] = numpy.resize((seval(ode[vari], **vardict) * h[0] + soln[vari][-1]), dim)
        if vari % 2 == 0:
            vardict.update({"y_{}".format(vari): aux[vari][0]})
    for vari in range(eqnum):
        vardict.update({"y_{}".format(vari): aux[vari][0]})
        pt = soln[vari]
        kt = numpy.array([vardict['y_{}'.format(vari)]])
        soln[vari] = numpy.concatenate((pt, kt))
    vardict.update({'t': vardict['t'] + h[0]})
开发者ID:Microno95,项目名称:DESolver,代码行数:25,代码来源:differentialsystem.py


示例19: _evaluatePart

    def _evaluatePart(self, expr, part):
        """Evaluate expression expr for part part.

        Returns True if succeeded
        """
        # replace dataset names with calls
        newexpr = substituteDatasets(self.document.data, expr, part)[0]

        comp = self.document.evaluate.compileCheckedExpression(
            newexpr, origexpr=expr)
        if comp is None:
            return False

        # set up environment to evaluate expressions in
        environment = self.document.evaluate.context.copy()

        # create dataset using parametric expression
        if self.parametric:
            p = self.parametric
            if p[2] >= 2:
                deltat = (p[1]-p[0]) / (p[2]-1)
                t = N.arange(p[2])*deltat + p[0]
            else:
                t = N.array([p[0]])
            environment['t'] = t

        # this fn gets called to return the value of a dataset
        environment['_DS_'] = self.evaluateDataset

        # actually evaluate the expression
        try:
            result = eval(comp, environment)
            evalout = N.array(result, N.float64)

            if len(evalout.shape) > 1:
                raise RuntimeError("Number of dimensions is not 1")
        except Exception as ex:
            self.document.log(
                _("Error evaluating expression: %s\n"
                  "Error: %s") % (self.expr[part], cstr(ex)) )
            return False

        # make evaluated error expression have same shape as data
        if part != 'data':
            data = self.evaluated['data']
            if evalout.shape == ():
                # zero dimensional - expand to data shape
                evalout = N.resize(evalout, data.shape)
            else:
                # 1-dimensional - make it right size and trim
                oldsize = evalout.shape[0]
                evalout = N.resize(evalout, data.shape)
                evalout[oldsize:] = N.nan
        else:
            if evalout.shape == ():
                # zero dimensional - make a single point
                evalout = N.resize(evalout, 1)

        self.evaluated[part] = evalout
        return True
开发者ID:tainstr,项目名称:veusz,代码行数:60,代码来源:expression.py


示例20: __init__

    def __init__(self, name, geometry, order):
        LagrangeSimplexPolySpace.__init__(self, name, geometry, order)

        nodes, nts, node_coors = self.nodes, self.nts, self.node_coors

        shape = [nts.shape[0] + 1, 2]
        nts = nm.resize(nts, shape)
        nts[-1,:] = [3, 0]

        shape = [nodes.shape[0] + 1, nodes.shape[1]]
        nodes = nm.resize(nodes, shape)
        # Make a 'hypercubic' (cubic in 2D) node.
        nodes[-1,:] = 1

        n_v = self.geometry.n_vertex
        tmp = nm.ones((n_v,), nm.int32)

        node_coors = nm.vstack((node_coors,
                                nm.dot(tmp, self.geometry.coors) / n_v))

        self.nodes, self.nts = nodes, nts
        self.node_coors = nm.ascontiguousarray(node_coors)

        self.bnode = nodes[-1:,:]

        self.n_nod = self.nodes.shape[0]
开发者ID:mikegraham,项目名称:sfepy,代码行数:26,代码来源:poly_spaces.py



注:本文中的numpy.resize函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.result_type函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.reshape函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap