• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python interpolate.interp1d函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中scipy.interpolate.interpolate.interp1d函数的典型用法代码示例。如果您正苦于以下问题:Python interp1d函数的具体用法?Python interp1d怎么用?Python interp1d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了interp1d函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

 def __init__(self, redshift, absnap, hubble = 0.71, fbar=0.17, units=None, sf_neutral=True):
     if units is not None:
         self.units = units
     else:
         self.units = unitsystem.UnitSystem()
     self.absnap = absnap
     self.f_bar = fbar
     self.redshift = redshift
     self.sf_neutral = sf_neutral
     #Interpolate for opacity and gamma_UVB
     #Opacities for the FG09 UVB from Rahmati 2012.
     #IMPORTANT: The values given for z > 5 are calculated by fitting a power law and extrapolating.
     #Gray power law was: -1.12e-19*(zz-3.5)+2.1e-18 fit to z > 2.
     #gamma_UVB was: -8.66e-14*(zz-3.5)+4.84e-13
     #This is clearly wrong, but this model is equally a poor choice at these redshifts anyway.
     gray_opac = [2.59e-18,2.37e-18,2.27e-18, 2.15e-18, 2.02e-18, 1.94e-18, 1.82e-18, 1.71e-18, 1.60e-18]
     gamma_UVB = [3.99e-14, 3.03e-13, 6e-13, 5.53e-13, 4.31e-13, 3.52e-13, 2.678e-13,  1.81e-13, 9.43e-14]
     zz = [0, 1, 2, 3, 4, 5, 6, 7,8]
     self.redshift_coverage = True
     if redshift > zz[-1]:
         self.redshift_coverage = False
         print("Warning: no self-shielding at z=",redshift)
     else:
         gamma_inter = intp.interp1d(zz,gamma_UVB)
         gray_inter = intp.interp1d(zz,gray_opac)
         self.gray_opac = gray_inter(redshift)
         self.gamma_UVB = gamma_inter(redshift)
     #self.hy_mass = 0.76 # Hydrogen massfrac
     self.gamma=5./3
     #Boltzmann constant (cgs)
     self.boltzmann=1.38066e-16
     self.hubble = hubble
     #Physical density threshold for star formation in H atoms / cm^3
     self.PhysDensThresh = self._get_rho_thresh(hubble)
开发者ID:sbird,项目名称:fake_spectra,代码行数:34,代码来源:gas_properties.py


示例2: sanitize_data

 def sanitize_data(self):
     """Fill the series via interpolation"""
     validx = None; validy = None
     countx = None; county = None  
     if self.x is not None:
         validx = np.sum(np.isfinite(self.x))
         countx = float(self.x.size)
     else: 
         raise ValueError("The x-axis is not populated, calculate values before you interpolate.")
     if self.y is not None:
         validy = np.sum(np.isfinite(self.y))
         county = float(self.y.size)
     else: 
         raise ValueError("The y-axis is not populated, calculate values before you interpolate.")
     
     if min([validx/countx,validy/county]) < self.VALID_REQ: 
         warnings.warn(
           "Poor data quality, there are not enough valid entries for x ({0:f}/{1:f}) or y ({2:f}/{3:f}).".format(validx,countx,validy,county),
           UserWarning)
     # TODO: use filter and cubic splines!
     #filter = np.logical_and(np.isfinite(self.x),np.isfinite(self.y))
     if validy > validx:
         y = self.y[np.isfinite(self.y)]
         self.x = interp1d(self.y, self.x, kind='linear')(y)
         self.y = y
     else:
         x = self.x[np.isfinite(self.x)] 
         self.y = interp1d(self.x, self.y, kind='linear')(x)
         self.x = x
开发者ID:TimHarvey2,项目名称:CoolProp,代码行数:29,代码来源:Common.py


示例3: __init__

	def __init__(self, a_sound, t_age):
		'''Initialize physical parameters and create interpolation objects'''
		from numpy import array
		from scipy.interpolate import interpolate as interp
		from math import log10

		# Init sound speed and collapse age
		self.a_sound = float(a_sound)
		self.t_age = float(t_age)

		# Dimensionless parameters given in table 2 of the paper.
		# These describe the "expansion-wave collapse solution" where A = 2+
		# (see section II-c for details)
		table2_x = array([0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1])
		table2_alpha = array([71.5, 27.8, 16.4, 11.5, 8.76, 7.09, 5.95, 5.14, 4.52, 4.04, 3.66, 3.35, 3.08, 2.86, 2.67, 2.5, 2.35, 2.22, 2.1, 2])
		table2_negv = array([5.44, 3.47, 2.58, 2.05, 1.68, 1.4, 1.18, 1.01, 0.861, 0.735, 0.625, 0.528, 0.442, 0.363, 0.291, 0.225, 0.163, 0.106, 0.051, 0])

		# Set interpolation limits
		self.xmin = table2_x[0]
		self.xmax = table2_x[-1]

		# Init interpolation objects:
		#   alpha is best interpolated linearly in log space
		log10_x = [log10(x) for x in table2_x]
		log10_alpha = [log10(alpha) for alpha in table2_alpha]
		self.alpha_interp = interp.interp1d(log10_x, log10_alpha, kind='linear')

		#   neg_v is best interpolated cubically
		self.negv_interp = interp.interp1d(table2_x, table2_negv, kind='cubic')
开发者ID:itahsieh,项目名称:sparx-alpha,代码行数:29,代码来源:models.py


示例4: __init__

    def __init__(self, redshift,hubble = 0.71, fbar=0.17, molec = True, UnitLength_in_cm=3.085678e21, UnitMass_in_g=1.989e43):
        self.f_bar = fbar
        self.redshift = redshift
        self.molec = molec
        #Some constants and unit systems
        #Internal gadget mass unit: 1e10 M_sun/h in g/h
        #UnitMass_in_g=1.989e43
        #Internal gadget length unit: 1 kpc/h in cm/h
        self.UnitLength_in_cm=UnitLength_in_cm
        self.UnitDensity_in_cgs = UnitMass_in_g/self.UnitLength_in_cm**3
        #Internal velocity unit : 1 km/s in cm/s
        self.UnitVelocity_in_cm_per_s=1e5
        #proton mass in g
        self.protonmass=1.67262178e-24
        #self.hy_mass = 0.76 # Hydrogen massfrac
        self.gamma=5./3
        #Boltzmann constant (cgs)
        self.boltzmann=1.38066e-16

        self.hubble = hubble

        self.star = StarFormation(hubble)
        #Physical density threshold for star formation in H atoms / cm^3
        self.PhysDensThresh = self.star.get_rho_thresh()
        if redshift > zz[-1]:
            self.redshift_coverage = False
            print("Warning: no self-shielding at z=",redshift)
        #Interpolate for opacity and gamma_UVB
        gamma_inter = intp.interp1d(zz,gamma_UVB)
        gray_inter = intp.interp1d(zz,gray_opac)
        self.gray_opac = gray_inter(redshift)
        self.gamma_UVB = gamma_inter(redshift)
开发者ID:sbird,项目名称:spb_common,代码行数:32,代码来源:cold_gas.py


示例5: nllk

 def nllk(mts):
     'current ests:'
     tau = exp(mts[1:2])
     mu_sigma =  [mts[0], exp(mts[2])];
     
     'parametrize solver'
     lSolver =  generateDefaultAdjointSolver(tau, mu_sigma,  Tf=Tf);
     lSolver.refine(0.01, 0.5);
     
     'interpolate control:'
     alphas_for_f = alphaF(lSolver._ts);
     
     'compute hitting time distn:'
     gs = lSolver.solve_hittime_distn_per_parameter(tau,
                                                    mu_sigma,
                                                    alphas_for_f,
                                                    force_positive=True)
     if visualize_gs:
         figure();    plot(lSolver._ts, gs, 'r') ;
     
     'Form likelihood'
     gs_interp = interp1d( lSolver._ts, gs)
     
     'Form negativee log likelihood'
     nllk = -sum(log( gs_interp(hts) ) )
     
     'diagnose:'
     print 'mts: %.3f,%.3f,%.3f,%.0f '%(mu_sigma[0], tau, mu_sigma[1], nllk);
     
     return  nllk; 
开发者ID:aviolov,项目名称:OptEstimatePython,代码行数:30,代码来源:Adjoint_MuTauSigma_Estimator.py


示例6: linke_interp

def linke_interp(day,turb_array):
    # put monthly data here
    # Angelo area LT from helios satellite data (http://www.soda-is.com/linke/linke_helioserve.html)
    # ltm1 and angelo-1 are identical, kept for backwards compatibility.  They are helios - 1
    if turb_array == 'helios':
        linke_data = numpy.array ([3.2,3.2,3.2,3.4,3.7,3.8,3.7,3.8,3.5,3.4,3.1,2.9])
    elif turb_array == 'angelo80':
        linke_data = numpy.array ([2.56,2.56,2.56,2.72,2.96,3.04,2.96,3.04,2.80,2.72,2.48,2.32])
    elif turb_array == 'angelo70':
        linke_data = numpy.array ([2.3,2.3,2.3,2.5,2.7,2.8,2.7,2.8,2.6,2.5,2.3,2.1])
    elif turb_array == 'angelo-1':
        linke_data = numpy.array ([2.2,2.2,2.2,2.4,2.7,2.8,2.7,2.8,2.5,2.4,2.1,1.9])
    elif turb_array == 'ltm1':
        linke_data = numpy.array ([2.2,2.2,2.2,2.4,2.7,2.8,2.7,2.8,2.5,2.4,2.1,1.9])
    else:
        linke_data = numpy.array ([1.5,1.6,1.8,1.9,2.0,2.3,2.3,2.3,2.1,1.8,1.6,1.5])
    
    linke_data_wrap = numpy.concatenate((linke_data[9:12],linke_data, linke_data[0:3]))
    monthDays = numpy.array ([0,31,28,31,30,31,30,31,31,30,31,30,31])
    midmonth_day = numpy.array ([0,0,0,0,0,0,0,0,0,0,0,0]) # create empty array to fill
    for i in range(1, 12+1):
        midmonth_day[i-1] = 15 + sum(monthDays[0:i])
    midmonth_day_wrap = numpy.concatenate((midmonth_day[9:12]-365, midmonth_day,midmonth_day[0:3]+365))
    linke = interpolate.interp1d(midmonth_day_wrap, linke_data_wrap, kind='cubic')
    lt = linke(day)
    return lt
开发者ID:cbode,项目名称:ssr,代码行数:26,代码来源:ssr_rsun.py


示例7: numerov

def numerov(f, start, stop, dx, first, second):
    x = np.linspace(start, stop, abs(start - stop)/dx )
    psi = np.empty(len(x))
    dx2 = dx**2
    f1 = f(start)
    psi[0], psi[1] = first, second
    q0, q1 = psi[0]/(1 - dx2*f1/12), psi[1]/(1 - dx2*f1/12)
    for (i, ix) in enumerate(x):
        if i == 0: 
            continue
        f1 = f(ix)
        q2 = 2*q1 - q0 + dx2*f1*psi[i-1]
        q0, q1 = q1, q2
        psi[i] = q1/(1 - dx2*f1/12)

    return interpolate.interp1d(x, psi)
       
# def V(r):
#     return - 1. / r
#        
# sol = numerov(lambda r: -1, 1e-10, 100, 0.01, 0, 1)
# 
# x = np.arange(1e-10,100,0.01)
# y = sol(x)
# 
# plt.plot(x,y)
# plt.show()
开发者ID:Embreus,项目名称:NHQM,代码行数:27,代码来源:numerov.py


示例8: __cmap_discretise

    def __cmap_discretise(self, cmap, N):
        """ Returns a discrete colormap from a continuous colormap 
        
            cmap: colormap instance, e.g. cm.jet
            N: Number of colors
            
            http://www.scipy.org/Cookbook/Matplotlib/ColormapTransformations
        """
        cdict = cmap._segmentdata.copy()
        # N colors
        colors_i = np.linspace(0, 1.0, N)
        # N + 1 indices
        indices = np.linspace(0, 1.0, N + 1)

        for key in ("red", "green", "blue"):
            # Find the N colors
            D = np.array(cdict[key])
            I = interpolate.interp1d(D[:, 0], D[:, 1])
            colors = I(colors_i)
            # Place those colors at the correct indices
            A = np.zeros((N + 1, 3), float)
            A[:, 0] = indices
            A[1:, 1] = colors
            A[:-1, 2] = colors
            # Create a tuple for the dictionary
            L = []
            for l in A:
                L.append(tuple(l))
            cdict[key] = tuple(L)

        return mpl.colors.LinearSegmentedColormap("colormap", cdict, 1024)
开发者ID:vasanthperiyasamy,项目名称:bom-mapping,代码行数:31,代码来源:plot_type.py


示例9: _inline_label

 def _inline_label(self,xv,yv,x=None,y=None):
     """
     This will give the coordinates and rotation required to align a label with
     a line on a plot in SI units.
     """
     if y is None and x is not None:
         trash=0
         (xv,yv)=self._to_pixel_coords(xv,yv)
         #x is provided but y isn't
         (x,trash)=self._to_pixel_coords(x,trash)
 
         #Get the rotation angle and y-value
         x,y,dy_dx = BasePlot.get_x_y_dydx(xv,yv,x)
         rot = np.arctan(dy_dx)/np.pi*180.
 
     elif x is None and y is not None:
         #y is provided, but x isn't
         _xv = xv[::-1]
         _yv = yv[::-1]
         #Find x by interpolation
         x = interp1d(yv, xv)(y)
         trash=0
         (xv,yv)=self._to_pixel_coords(xv,yv)
         (x,trash)=self._to_pixel_coords(x,trash)
 
         #Get the rotation angle and y-value
         x,y,dy_dx = BasePlot.get_x_y_dydx(xv,yv,x)
         rot = np.arctan(dy_dx)/np.pi*180.
     (x,y)=self._to_data_coords(x,y)
     return (x,y,rot)
开发者ID:TimHarvey2,项目名称:CoolProp,代码行数:30,代码来源:Common.py


示例10: calculateTs_Kolmogorov_BVP

def calculateTs_Kolmogorov_BVP(alpha, beta, tauchar = 1.0, xth= 1.0, xmin = -1.0):
    D = beta*beta /2.
    
    def dS(S,x):
        return  -( (alpha-x/tauchar)/D ) * S -1.0/D;
    
    S_0 = .0;
    
    xs = linspace(xmin, xth, 1000); dx = (xs[1]-xs[0])
    Ss = odeint(dS, S_0, xs);
    if max(Ss) > .0:
        raise RuntimeError('Ss should be  negative')
    
    T1s = -cumsum(Ss[-1::-1]) * dx
    T1s = T1s[-1::-1]
    
    T1_interpolant = interp1d(xs, T1s, bounds_error=False, fill_value = T1s[-1]);
    def dS2(S,x):
        T1 = T1_interpolant(x)
        return  -( (alpha-x/tauchar)/D ) * S -2.0*T1/D;
    
    Ss = odeint(dS2, S_0, xs);
    if max(Ss) > .0:
        raise RuntimeError('Ss should be  negative')
    T2s = -cumsum(Ss[-1::-1]) * dx
    T2s = T2s[-1::-1]
    
    return xs, T1s, T2s  
开发者ID:aviolov,项目名称:OptSpikePython,代码行数:28,代码来源:HJB_TerminalConditioner.py


示例11: interpolate_values_1d

def interpolate_values_1d(x,y,x_points=None,kind='linear'):
    try:
        from scipy.interpolate.interpolate import interp1d
        if x_points is None:
            return interp1d(x, y, kind=kind)(x[np.isfinite(x)])
        else:
            return interp1d(x, y, kind=kind)(x_points)
    except ImportError:
        if kind != 'linear':
            warnings.warn(
              "You requested a non-linear interpolation, but SciPy is not available. Falling back to linear interpolation.",
              UserWarning)
        if x_points is None:
            return np.interp((x[np.isfinite(x)]), x, y)
        else:
            return np.interp(x_points, x, y)
开发者ID:spinnau,项目名称:coolprop,代码行数:16,代码来源:Common.py


示例12: RebaseTime

 def RebaseTime(cls, trace, newTimebase, **kwargs):
     bounds_error = False
     
     interpolator = interp1d(trace._time, trace.rawdata, "linear", bounds_error=bounds_error)
     newData = interpolator(newTimebase)
     rebasedTrace = Trace(newTimebase, newData, timeUnit=trace.timeUnit, dataUnit=trace.dataUnit, **kwargs)
     return rebasedTrace
开发者ID:mpelko,项目名称:neurovivo,代码行数:7,代码来源:tracesplicer.py


示例13: __getitem__

    def __getitem__(self, time):
        from scipy.interpolate import interp1d
        from morphforge.traces.tracetypes.tracefixeddt import TraceFixedDT

        if isinstance(time, tuple):
            assert len(time) == 2
            start = unit(time[0])
            stop = unit(time[1])

            if start < self._time[0]:
                assert False, 'Time out of bounds'
            if stop > self._time[-1]:
                assert False, 'Time out of bounds'

            mask = np.logical_and(start < self.time_pts, self._time < stop)

            if len(np.nonzero(mask)[0]) < 2:
                assert False
            return TraceFixedDT(time=self._time[np.nonzero(mask)[0]],
                                data=self.data_pts[np.nonzero(mask)[0]])


        assert isinstance(time, pq.quantity.Quantity), "Times should be quantity. Found: %s %s"%(time, type(time))
        # Rebase the Time:
        time.rescale(self._time.units)
        interpolator = interp1d(self.time_pts_np,
                                self.data_pts_np)
        d_mag = interpolator(time.magnitude)
        return d_mag * self.data_unit
开发者ID:bmerrison,项目名称:morphforge,代码行数:29,代码来源:tracepointbased.py


示例14: calibrate

 def calibrate(self,data_files, plot=1, fig = None):
     print 'calibrating'
     # data structure: [m1,m2,focus,x,y,z]
     
     # if not a list, make it a list
     if type(data_files) is not list:
         print 'not a list, making it a list'
         data_files = [data_files]
 
     # load data
     self.data = None
     for data_file in data_files:
         print data_file
         if self.data is None:
             self.data = np.loadtxt(  data_file, delimiter=',' )
         else:
             tmp = np.loadtxt(  data_file,delimiter=',')
             self.data = np.vstack((self.data,tmp))
     
     self.calc_distc()
     focus = self.data[:,2]
     
     # fit a/(x) + c
     print 'fitting using: ', self.interpolation
     
     if self.interpolation is 'xinv':
         self.coeffs = np.polyfit(self.distc**(-1), focus, 1)
         ## now try fmin fit using coeffs as seed ##
         seed = np.zeros(3)
         seed[0] = self.coeffs[0]
         seed[2] = self.coeffs[1]
         
         tmp = scipy.optimize.fmin( self.fmin_func, seed, full_output = 1, disp=0)
         self.coeffs = tmp[0]
     
     if self.interpolation is 'polyfit2':
         self.coeffs = np.polyfit(self.distc, focus, 2)
         print 'linear coeffs: ', self.coeffs
     
     if self.interpolation is 'interp1d':
         print 'interpolating using interp1d'
         self.interp = interpolate.interp1d(self.distc, focus, kind = 'linear', fill_value = 0)
  
         
     if plot == 1:
         if fig is None:    
             fig = plt.figure(1)
         plt.scatter(self.distc,focus)
         xi = np.linspace(min(self.distc),max(self.distc),50)
         yi = [self.get_focus_distc(x) for x in xi]
         
         plt.title('Calibration data for Pan Tilt Focus')
         plt.xlabel('distance to camera center, m')
         plt.ylabel('focus motor setting, radians')
         plt.plot(xi,yi)
         fig.show()
             
     
     
     return 1
开发者ID:florisvb,项目名称:hydra,代码行数:60,代码来源:multiaxis_script.py


示例15: cmap_discretize

def cmap_discretize(cmap, N):
    """Return a discrete colormap from the continuous colormap cmap.
    
    cmap: colormap instance, eg. cm.jet. 
    N: Number of colors.
    
    Example
    x = resize(arange(100), (5,100))
    djet = cmap_discretize(cm.jet, 5)
    imshow(x, cmap=djet)
    
    from http://www.scipy.org/Cookbook/Matplotlib/ColormapTransformations
    """
    cdict = cmap._segmentdata.copy()
    # N colors
    colors_i = np.linspace(0,1.,N)
    # N+1 indices
    indices = np.linspace(0,1.,N+1)
    for key in ('red','green','blue'):
                # Find the N colors
        D = np.array(cdict[key])
        I = interpolate.interp1d(D[:,0], D[:,1])
        colors = I(colors_i)
        # Place these colors at the correct indices.
        A = np.zeros((N+1,3), float)
        A[:,0] = indices
        A[1:,1] = colors
        A[:-1,2] = colors
        # Create a tuple for the dictionary.
        L = []
        for l in A:
            L.append(tuple(l))
        cdict[key] = tuple(L)
    # Return colormap object.
    return mpl.colors.LinearSegmentedColormap('colormap',cdict,1024)
开发者ID:acharleses,项目名称:contour-wms,代码行数:35,代码来源:prototype_paste.py


示例16: train

    def train(self, images):
        r"""
        Train a standard intensity space and an associated transformation model.
        
        Note that the passed images should be masked to contain only the foreground.
        
        Parameters
        ----------
        images : sequence of array_likes
            A number of images.
        
        Returns
        -------
        IntensityRangeStandardization : IntensityRangeStandardization
            This instance of IntensityRangeStandardization
        """
        self.__stdrange = self.__compute_stdrange(images)
        
        lim = []
        for idx, i in enumerate(images):
            ci = numpy.array(numpy.percentile(i, self.__cutoffp))
            li = numpy.array(numpy.percentile(i, self.__landmarkp))
            ipf = interp1d(ci, self.__stdrange)
            lim.append(ipf(li))

            # treat single intensity accumulation error            
            if not len(numpy.unique(numpy.concatenate((ci, li)))) == len(ci) + len(li):
                raise SingleIntensityAccumulationError('Image no.{} shows an unusual single-intensity accumulation that leads to a situation where two percentile values are equal. This situation is usually caused, when the background has not been removed from the image. Another possibility would be to reduce the number of landmark percentiles landmarkp or to change their distribution.'.format(idx))
            
        self.__model = [self.__stdrange[0]] + list(numpy.mean(lim, 0)) + [self.__stdrange[1]]
        self.__sc_umins = [self.__stdrange[0]] + list(numpy.min(lim, 0)) + [self.__stdrange[1]]
        self.__sc_umaxs = [self.__stdrange[0]] + list(numpy.max(lim, 0)) + [self.__stdrange[1]]
            
        return self
开发者ID:AlfiyaZi,项目名称:medpy,代码行数:34,代码来源:IntensityRangeStandardization.py


示例17: estimateHarness

def estimateHarness(simPs, fbkSoln,
                     Nblocks, Nhits, 
                     refine_factor = 2.,
                     fig_name=None, reestimate = True):
    
    #Load:
    simPaths = SimulationPaths.load([Nblocks, Nhits, simPs.mu, simPs.tau_char, simPs.sigma, ''])
    
    if reestimate:
        'Target Data:'
        thits_Blocks = simPaths.thits_blocks;
        
        'FP Solver:'
        lSolver = deepcopy(fbkSoln._Solver);
        
        ts = simPaths.sim_ts;
        alphasMap = simPaths.alphasDict;
        Nalphas = len(alphasMap);
        print 'Loaded %d Blocks of %d Hitting times for %d controls:'%(thits_Blocks.shape[1], thits_Blocks.shape[2], thits_Blocks.shape[0])
        
        betaEsts = empty((Nalphas, Nblocks))
            
        alpha_max = fbkSoln._alpha_bounds[-1]; 
        
        
        'refine the solver:'
        lSolver.rediscretize(lSolver._dx/refine_factor,   lSolver._dt/refine_factor,
                             lSolver.getTf(), lSolver._xs[0])
        
        for adx, (alpha_tag, alphas) in enumerate( alphasMap.iteritems() ):
            
            print adx, alpha_tag 
            
            alphaF =  interp1d(ts, alphas , bounds_error = False, fill_value = alpha_max)
            
            for bdx in xrange(Nblocks):
    #        for bdx in [0]:
                hts = squeeze(thits_Blocks[adx, bdx,:]);
                
    #            subplot(3, 1, adx)
    #            hist(hts, bins = 100)
    #            title('%s %d'%(alpha_tag, len(hts) ))
                
                betaEsts[adx, bdx] = calcBetaEstimate(lSolver,
                                                      hts,
                                                      alphaF,
                                                      [simPaths.simParams.mu,
                                                        simPaths.simParams.sigma] );
                print '\n', betaEsts[adx, bdx]
        
        'Append estimates to the paths:' 
        simPaths.betaEsts = betaEsts;
        
        'resave:'
        simPaths.save();
    
    ''' Post Analysis: visualize and latexify results'''
    postAnalysis( Nblocks = Nblocks, 
                  Nhits = Nhits, 
                  simPs = simPs) 
开发者ID:aviolov,项目名称:OptEstimatePython,代码行数:60,代码来源:Adjoint_TauChar_Estimator.py


示例18: transform

 def transform(self, image, surpress_mapping_check = False):
     r"""
     Transform an images intensity values to the learned standard intensity space.
     
     Note that the passed image should be masked to contain only the foreground.
     
     The transformation is guaranteed to be lossless i.e. a one-to-one mapping between
     old and new intensity values exists. In cases where this does not hold, an error
     is thrown. This can be suppressed by setting ``surpress_mapping_check`` to 'True'.
     Do this only if you know what you are doing.
     
     Parameters
     ----------
     image : array_like
         The image to transform.
     surpress_mapping_check : bool
         Whether to ensure a lossless transformation or not.
     
     Returns
     -------
     image : ndarray
         The transformed image
     
     Raises
     -------
     InformationLossException 
         If a lossless transformation can not be ensured
     Exception
         If no model has been trained before
     """
     if None == self.__model:
         raise UntrainedException('Model not trained. Call train() first.')
     
     image = numpy.asarray(image)
     
     # determine image intensity values at cut-off percentiles & landmark percentiles
     li = numpy.percentile(image, [self.__cutoffp[0]] + self.__landmarkp + [self.__cutoffp[1]])
     
     # treat single intensity accumulation error            
     if not len(numpy.unique(li)) == len(li):
         raise SingleIntensityAccumulationError('The image shows an unusual single-intensity accumulation that leads to a situation where two percentile values are equal. This situation is usually caused, when the background has not been removed from the image. The only other possibility would be to re-train the model with a reduced number of landmark percentiles landmarkp or a changed distribution.')
     
     # create linear mapping models for the percentile segments to the learned standard intensity space  
     ipf = interp1d(li, self.__model, bounds_error = False)
     
     # transform the input image intensity values
     output = ipf(image)
     
     # treat image intensity values outside of the cut-off percentiles range separately
     llm = IntensityRangeStandardization.linear_model(li[:2], self.__model[:2])
     rlm = IntensityRangeStandardization.linear_model(li[-2:], self.__model[-2:])
     
     output[image < li[0]] = llm(image[image < li[0]])
     output[image > li[-1]] = rlm(image[image > li[-1]])
     
     if not surpress_mapping_check and not self.__check_mapping(li):
         raise InformationLossException('Image can not be transformed to the learned standard intensity space without loss of information. Please re-train.')
     
     return output
开发者ID:AlfiyaZi,项目名称:medpy,代码行数:59,代码来源:IntensityRangeStandardization.py


示例19: get_values

 def get_values(self, time_array):
     from scipy.interpolate.interpolate import interp1d
     time_units = self._time.units
     data_units = self._data.units
     interpolator = interp1d(self._time.magnitude,
                             self._data.magnitude)
     return interpolator(time_array.rescale(time_units).magnitude) \
         * data_units
开发者ID:bmerrison,项目名称:morphforge,代码行数:8,代码来源:tracepointbased.py


示例20: _mtf

 def _mtf(self, x=50):
     norm = max(roi.mtf for roi in self.hc_rois)
     ys = [roi.mtf / norm for roi in self.hc_rois]
     xs = np.arange(len(ys))
     f = interp1d(ys, xs)
     try:
         mtf = f(x / 100)
     except ValueError:
         mtf = min(ys)
     return float(mtf)
开发者ID:jrkerns,项目名称:pylinac,代码行数:10,代码来源:planar_imaging.py



注:本文中的scipy.interpolate.interpolate.interp1d函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python io.loadmat函数代码示例发布时间:2022-05-27
下一篇:
Python fitpack2.UnivariateSpline类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap