• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.ptp函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.ptp函数的典型用法代码示例。如果您正苦于以下问题:Python ptp函数的具体用法?Python ptp怎么用?Python ptp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了ptp函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: gearth_fig

def gearth_fig(llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat, pixels=1024):
    """
    Return a Matplotlib `fig` and `ax` handles for a Google-Earth Image.
    TJL - Obtained from 
    http://ocefpaf.github.io/python4oceanographers/blog/2014/03/10/gearth/
    
    """
    aspect = np.cos(np.mean([llcrnrlat, urcrnrlat]) * np.pi/180.0)
    xsize = np.ptp([urcrnrlon, llcrnrlon]) * aspect
    ysize = np.ptp([urcrnrlat, llcrnrlat])
    aspect = ysize / xsize
    
    if aspect > 1.0:
        figsize = (10.0 / aspect, 10.0)
    else:
        figsize = (10.0, 10.0 * aspect)
    
    if False:
        plt.ioff()  # Make `True` to prevent the KML components from popping-up.
    fig = plt.figure(figsize=figsize, frameon=False, dpi=pixels//10)
    # KML friendly image.  If using basemap try: `fix_aspect=False`.
    ax = fig.add_axes([0, 0, 1, 1])
    ax.set_xlim(llcrnrlon, urcrnrlon)
    ax.set_ylim(llcrnrlat, urcrnrlat)
    return fig, ax
开发者ID:DomoCat,项目名称:PyAMPR,代码行数:25,代码来源:google_earth_tools.py


示例2: zap_minmax

    def zap_minmax(self,windowsize=20,threshold=4):
        '''
        Run NANOGrav algorithm, median zapping. Run per subintegration
        windowsize = 20 frequency bins long
        threshold = 4 sigma
        '''
        if not self.can_mitigate():
            return


        nsubint = self.archive.getNsubint()
        nchan = self.archive.getNchan()

        # Prepare data
        data = self.archive.getData(squeeze=False)
        spavg = self.archive.spavg #SinglePulse average profile, no need to invoke creating more SinglePulse instances
        opw = spavg.opw
        
        if nchan <= windowsize:
            for i in xrange(nsubint):
                for j in xrange(nchan):
                    subdata = data[i,0,:,opw] 
                    compptp = np.ptp(data[i,0,j,opw])
                    ptps = np.zeros(windowsize)
                    for k in xrange(windowsize):
                        ptps[k] = np.ptp(subdata[k,:])
                

                    med = np.median(ptps)
                    if compptp > threshold*med:
                        self.zap(f=j)
            return

        
        for i in xrange(nsubint):
            for j in xrange(nchan):
                low = j - windowsize//2
                high = j + windowsize//2

                if low < 0:
                    high = abs(low)
                    low = 0
                elif high > nchan:
                    diff = high - nchan
                    high -= diff
                    low -= diff

                subdata = data[i,0,low:high,opw] 
                compptp = np.ptp(data[i,0,j,opw])
                ptps = np.zeros(windowsize)
                for k in xrange(windowsize):
                    ptps[k] = np.ptp(subdata[k,:])
                    
                #ptps = np.array(map(lambda subdata: np.ptp(subdata),data[i,0,low:high,opw]))

                med = np.median(ptps)
                if compptp > threshold*med:
                    self.zap(f=j)
                
        return
开发者ID:mtlam,项目名称:PyPulse,代码行数:60,代码来源:rfimitigator.py


示例3: get_matching_mask

def get_matching_mask(f_real, Ibox):
    """
    Find the best matching region per level in the feature pyramid    
    """
    maskers = []
    sizers = []
    
    import numpy.ma as mask
    from scipy.misc import imresize
    
    for i in range(len(f_real)):
        feature_goods = mask.array(np.sum(np.square(f_real[i]), 2), dtype=np.bool_)
        Ibox_resize = imresize(Ibox, (f_real[i].shape[0], f_real[i].shape[1]))
        Ibox_resize = Ibox_resize.astype(np.float64) / 255.0
        Ibox_goods = Ibox_resize > 0.1
        
        masker = np.logical_and(feature_goods, Ibox_goods)
        
        max_indice = np.unravel_index(Ibox_resize.argmax(), Ibox_resize.shape)
        
        if np.where(masker == True)[0].size == 0:
            masker[max_indice[0], max_indice[1]] = True
            
        indices = np.where(masker == True)
        masker[np.amin(indices[0]):np.amax(indices[0]),
               np.amin(indices[1]):np.amax(indices[1])] = True
        sizer=[np.ptp(indices[0])+1, np.ptp(indices[1])+1]   
        maskers.append(masker)
        sizers.append(sizer)
        
    return(maskers, sizers)
开发者ID:stomachacheGE,项目名称:esvm-python,代码行数:31,代码来源:utils.py


示例4: vehicle_fit

def vehicle_fit(surface, up_direction, fore_direction, transport_dimensions):
    """
    Determines if the vehicle can fit into specified dimensions for transport.
    """
    ## Get the vehicle extent in the 3 cartesian directions
    ## determine orientation of the vehicle given the direction up and forward
    coord_array = np.vstack((surface['x'], surface['y'], surface['z'])).T
    side_direction = np.cross(up_direction, fore_direction)

    width = np.ptp(np.dot(coord_array, side_direction))
    height = np.ptp(np.dot(coord_array, up_direction))
    length = np.ptp(np.dot(coord_array, fore_direction))
    #print width, height, length

    ## Store the calculated vehicle dimensions for info only (not an output metric)
    results = {"_vehicle_calculated_dimension": {"vehicle_length[m]": length,
                                                 "vehicle_width[m]": width,
                                                 "vehicle_height[m]": height}}

    ## Check each transport option in turn and write True for any that can fit the vehicle
    trans_compat = results["Transportation_Compatibility"] = {}
    for transport, size in transport_dimensions.items():
        if size["max_length"] < length or size["max_width"] < width or size["max_height"] < height:
            trans_compat[transport] = False
        else:
            trans_compat[transport] = True

    return results
开发者ID:cephdon,项目名称:meta-core,代码行数:28,代码来源:transportability.py


示例5: plot_checkpoint

    def plot_checkpoint(self,e):
        filename = "/data/sample_"+str(e)+".png"

        noise = self.sample_latent_space(16)
        images = self.generator.Generator.predict(noise)
        
        plt.figure(figsize=(10,10))
        for i in range(images.shape[0]):
            plt.subplot(4, 4, i+1)
            if self.C==1:
                image = images[i, :, :]
                image = np.reshape(image, [self.H,self.W])
                image = (255*(image - np.min(image))/np.ptp(image)).astype(int)
                plt.imshow(image,cmap='gray')
            elif self.C==3:
                image = images[i, :, :, :]
                image = np.reshape(image, [self.H,self.W,self.C])
                image = (255*(image - np.min(image))/np.ptp(image)).astype(int)
                plt.imshow(image)
            
            plt.axis('off')
        plt.tight_layout()
        plt.savefig(filename)
        plt.close('all')
        return
开发者ID:fileung,项目名称:Generative-Adversarial-Networks-Cookbook,代码行数:25,代码来源:train.py


示例6: _load_edflib

def _load_edflib(filename):
    """load a multi-channel Timeseries from an EDF (European Data Format) file
    or EDF+ file, using edflib.

    Args:
      filename: EDF+ file

    Returns:
      Timeseries
    """
    import edflib
    e = edflib.EdfReader(filename, annotations_mode='all')
    if np.ptp(e.get_samples_per_signal()) != 0:
        raise Error('channels have differing numbers of samples')
    if np.ptp(e.get_signal_freqs()) != 0:
        raise Error('channels have differing sample rates')
    n = e.samples_in_file(0)
    m = e.signals_in_file
    channelnames = e.get_signal_text_labels()
    dt = 1.0/e.samplefrequency(0)
    # EDF files hold <=16 bits of information for each sample. Representing as
    # double precision (64bit) is unnecessary use of memory. use 32 bit float:
    ar = np.zeros((n, m), dtype=np.float32)
    # edflib requires input buffer of float64s
    buf = np.zeros((n,), dtype=np.float64)
    for i in range(m):
        e.read_phys_signal(i, 0, n, buf)
        ar[:,i] = buf
    tspan = np.arange(0, (n - 1 + 0.5) * dt, dt, dtype=np.float32)
    return Timeseries(ar, tspan, labels=[None, channelnames])
开发者ID:mattja,项目名称:nsim,代码行数:30,代码来源:readfile.py


示例7: check

    def check(dms_a, dms_b):
        """Check quadratic energy model between two dms."""
        ham.reset(*dms_a)
        energy_a_0 = ham.compute_energy()
        focks_a = [np.zeros(dm_a.shape) for dm_a in dms_a]
        ham.compute_fock(*focks_a)

        delta_dms = []
        for idm in xrange(ham.ndm):
            delta_dms.append(dms_b[idm] - dms_a[idm])
        ham.reset_delta(*delta_dms)
        dots_a = [np.zeros(dm_a.shape) for dm_a in dms_a]
        ham.compute_dot_hessian(*dots_a)

        energy_a_1 = 0.0
        energy_a_2 = 0.0
        for idm in xrange(ham.ndm):
            energy_a_1 += np.einsum('ab,ba', focks_a[idm], delta_dms[idm])*ham.deriv_scale
            energy_a_2 += np.einsum('ab,ba', dots_a[idm], delta_dms[idm])*ham.deriv_scale**2

        # print 'energy_a_0', energy_a_0
        # print 'energy_a_1', energy_a_1
        # print 'energy_a_2', energy_a_2

        # Compute interpolation and compare
        energies_x = np.zeros(npoint)
        energies_2nd_order = np.zeros(npoint)
        derivs_x = np.zeros(npoint)
        derivs_2nd_order = np.zeros(npoint)
        for ipoint in xrange(npoint):
            x = xs[ipoint]
            dms_x = []
            for idm in xrange(ham.ndm):
                dm_x = dms_a[idm]*(1-x) + dms_b[idm]*x
                dms_x.append(dm_x)
            ham.reset(*dms_x)
            energies_x[ipoint] = ham.compute_energy()
            ham.compute_fock(*focks_a)
            for idm in xrange(ham.ndm):
                derivs_x[ipoint] += np.einsum('ab,ba', focks_a[idm], delta_dms[idm]) * \
                                    ham.deriv_scale

            energies_2nd_order[ipoint] = energy_a_0 + x*energy_a_1 + 0.5*x*x*energy_a_2
            derivs_2nd_order[ipoint] = energy_a_1 + x*energy_a_2
            # print '%5.2f %15.8f %15.8f' % (x, energies_x[ipoint], energies_2nd_order[ipoint])

        if do_plot:  # pragma: no cover
            import matplotlib.pyplot as pt
            pt.clf()
            pt.plot(xs, energies_x, 'ro')
            pt.plot(xs, energies_2nd_order, 'k-')
            pt.savefig('test_energies.png')
            pt.clf()
            pt.plot(xs, derivs_x, 'ro')
            pt.plot(xs, derivs_2nd_order, 'k-')
            pt.savefig('test_derivs.png')

        assert abs(energies_x - energies_2nd_order).max()/np.ptp(energies_x) < threshold
        assert abs(derivs_x - derivs_2nd_order).max()/np.ptp(derivs_x) < threshold
        return energy_a_0, energy_a_1, energy_a_2
开发者ID:QuantumElephant,项目名称:horton,代码行数:60,代码来源:common.py


示例8: plot_histogram

def plot_histogram( ax ):
    """Here we take the data from ROI (between aN and bN). A 100 ms window (size
    = 10) slides over it. At each step, we get min and max of window, store
    these values in a list. 

    We plot histogram of the list
    """
    global newtime, time
    roiData = sensor[aN:bN]
    baselineData = np.concatenate( (sensor[:aN], sensor[bN:]) )
    windowSize = 10
    histdataRoi = []
    for i in range( len(roiData) ):
        window = roiData[i:i+windowSize]
        histdataRoi.append( np.ptp( window ) ) # peak to peak

    histdataBaseline = []
    for i in range( len(baselineData) ):
        window = baselineData[i:i+windowSize]
        histdataBaseline.append( np.ptp( window ) )

    plt.hist( histdataBaseline
            , bins = np.arange( min(histdataBaseline), max(histdataBaseline), 5)
            , normed = True, label = 'baseline (peak to peak)'
            , alpha = 0.7
            )
    plt.hist( histdataRoi
            , bins = np.arange( min(histdataRoi), max(histdataRoi), 5)
            , normed = True , label = 'ROI (peak to peak)'
            , alpha = 0.7
            )
    # plt.title('Histogram of sensor readout')
    plt.legend(loc='best', framealpha=0.4)
开发者ID:ananthamurthy,项目名称:eyeBlinkBehaviour,代码行数:33,代码来源:analyze_trial.py


示例9: draw_group

    def draw_group(data, panel_params, coord, ax, **params):
        data = coord.transform(data, panel_params)
        fill = to_rgba(data['fill'], data['alpha'])
        color = to_rgba(data['color'], data['alpha'])
        ranges = coord.range(panel_params)

        # For perfect circles the width/height of the circle(ellipse)
        # should factor in the dimensions of axes
        bbox = ax.get_window_extent().transformed(
            ax.figure.dpi_scale_trans.inverted())
        ax_width, ax_height = bbox.width, bbox.height

        factor = ((ax_width/ax_height) *
                  np.ptp(ranges.y)/np.ptp(ranges.x))
        size = data.loc[0, 'binwidth'] * params['dotsize']
        offsets = data['stackpos'] * params['stackratio']

        if params['binaxis'] == 'x':
            width, height = size, size*factor
            xpos, ypos = data['x'], data['y'] + height*offsets
        elif params['binaxis'] == 'y':
            width, height = size/factor, size
            xpos, ypos = data['x'] + width*offsets, data['y']

        circles = []
        for xy in zip(xpos, ypos):
            patch = mpatches.Ellipse(xy, width=width, height=height)
            circles.append(patch)

        coll = mcoll.PatchCollection(circles,
                                     edgecolors=color,
                                     facecolors=fill)
        ax.add_collection(coll)
开发者ID:jwhendy,项目名称:plotnine,代码行数:33,代码来源:geom_dotplot.py


示例10: process_raw_all

def process_raw_all(field = 'AEGIS'):
    #### Reprocess *all* of the FLTs with variable backgrounds that 
    #### weren't already refit above
    import glob
    import os
    
    import numpy as np
    
    import unicorn
    import threedhst
    from threedhst import catIO
    
       
    files = glob.glob('/3DHST/Spectra/Work/BACKGROUND/%s/*G141_orbit.dat'%(field))
    redo_list = []
    for file in files:
        bg = catIO.Readfile(file, save_fits=False, force_lowercase=True)
        var_bg = np.ptp(bg.bg[1:]) > 0.15
        no_skip = True
        if os.path.exists('%sq_flt.fits' %(os.path.split(file)[-1].split('j_')[0])): 
            im2flt_key = threedhst.utils.gethead('%sq_flt.fits' %(os.path.split(file)[-1].split('j_')[0]), keys=['IMA2FLT'])
            if im2flt_key[0] == '': 
                no_skip = True
            else: 
                no_skip = False
        rawfile='%sq_raw.fits'%(os.path.split(file)[-1].split('j_')[0])
        print rawfile, np.ptp(bg.bg[1:]), var_bg, no_skip, var_bg & no_skip
        #   
        if var_bg & no_skip:
            redo_list.append(rawfile)
            if not os.path.exists(rawfile):
                print '%s does not exist!'%(rawfile)
                continue
            #
            unicorn.prepare.make_IMA_FLT(raw=rawfile, pop_reads=[])
开发者ID:gbrammer,项目名称:unicorn,代码行数:35,代码来源:prepare_fixed_flt.py


示例11: main

def main(clp, center_stddev, **kwargs):
    """
    Obtain Gaussian filtered 2D x,y histograms and the maximum values in them
    as centers.
    """

    # Standard deviation values for the Gaussian filter.
    st_dev_lst = (center_stddev * .5, center_stddev, center_stddev * 2.)

    # Obtain center coordinates using Gaussian filters with different
    # standard deviation values, applied on the 2D (x,y) histogram.
    cents_xy, hist_2d_g, cents_bin_2d = center_xy(
        clp['hist_2d'], clp['xedges'], clp['yedges'], st_dev_lst)

    # Raise a flag if the standard deviation for either coordinate is larger
    # than 10% of that axis range. Use the full x,y positions list to
    # calculate the STDDEV.
    flag_center_std = False
    stddev = np.std(zip(*cents_xy[:3]), 1)
    if stddev[0] > 0.1 * np.ptp(clp['xedges']) or \
            stddev[1] > 0.1 * np.ptp(clp['yedges']):
        flag_center_std = True

    clp['flag_center_std'], clp['cents_xy'], clp['hist_2d_g'],\
        clp['cents_bin_2d'], clp['st_dev_lst'] = flag_center_std, cents_xy,\
        hist_2d_g, cents_bin_2d, st_dev_lst

    return clp
开发者ID:asteca,项目名称:ASteCA,代码行数:28,代码来源:xy_density.py


示例12: _test

    def _test( self, deltas ):
        # "Passing" behavior is more like the original (slower, more energy).
        # "Failing" behavior is more optimized (faster, less energy).

        fitness = np.array( self.get_fitness( deltas ) )
        if len( fitness ) == 0:
            return self.UNRESOLVED
        if np.any( fitness == 0 ):
            return self.UNRESOLVED
        m = np.mean( fitness, axis = 0 )
        s = np.std( fitness, axis = 0 )
        sqrtn = np.sqrt( fitness.shape[ 0 ] )
        for i in range( fitness.shape[ 1 ] ):
            infomsg( "   ", m[ i ], "+/-", 1.96 * s[ i ] / sqrtn )
        for i in range( fitness.shape[ 1 ] ):
            if np.ptp( self.optimized[ ::, i ] ) == 0 and \
                    np.ptp( fitness[ ::, i ] ) == 0 and \
                    self.optimized[ 0, i ] == fitness[ 0, i ]:
                # Optimized and fitness are all the same value, likely because
                # we are comparing the optimized variant to itself. This counts
                # as a fail, since they are clearly drawn from the same distro.
                continue
            pval = mannwhitneyu( self.optimized[ ::, i ], fitness[ ::, i ] )[ 1 ]
            if pval < options.alpha and m[ i ] < self.mean[ i ]:
                return self.PASS
        return self.FAIL
开发者ID:dornja,项目名称:goa2,代码行数:26,代码来源:minimize.py


示例13: _get_domain_area

 def _get_domain_area(self, inlets=None, outlets=None):
     logger.warning('Attempting to estimate inlet area...will be low')
     network = self.project.network
     # Abort if network is not 3D
     if np.sum(np.ptp(network['pore.coords'], axis=0) == 0) > 0:
         raise Exception('The network is not 3D, specify area manually')
     if inlets is None:
         inlets = self._get_inlets()
     if outlets is None:
         outlets = self._get_outlets()
     inlets = network['pore.coords'][inlets]
     outlets = network['pore.coords'][outlets]
     if not iscoplanar(inlets):
         logger.error('Detected inlet pores are not coplanar')
     if not iscoplanar(outlets):
         logger.error('Detected outlet pores are not coplanar')
     Nin = np.ptp(inlets, axis=0) > 0
     if Nin.all():
         logger.warning('Detected inlets are not oriented along a '
                        + 'principle axis')
     Nout = np.ptp(outlets, axis=0) > 0
     if Nout.all():
         logger.warning('Detected outlets are not oriented along a '
                        + 'principle axis')
     hull_in = ConvexHull(points=inlets[:, Nin])
     hull_out = ConvexHull(points=outlets[:, Nout])
     if hull_in.volume != hull_out.volume:
         logger.error('Inlet and outlet faces are different area')
     area = hull_in.volume  # In 2D volume=area, area=perimeter
     return area
开发者ID:PMEAL,项目名称:OpenPNM,代码行数:30,代码来源:GenericTransport.py


示例14: preprocess

def preprocess():
    numberTrain = 50000
    numberAttribute = 784
    with open('AI_quick_draw.pickle', 'rb') as open_ai_quick:
        train_data1 = pickle.load(open_ai_quick)
        train_label1 = pickle.load(open_ai_quick)
        test_data = pickle.load(open_ai_quick)
        test_label = pickle.load(open_ai_quick)
    train_data1 = train_data1.astype(np.float64) / 255.0
    test_data = test_data.astype(np.float64) / 255.0
    permutation = np.random.permutation(range(train_data1.shape[0]))
    validation_data = train_data1[permutation[numberTrain:], :]
    validation_label = train_label1[permutation[numberTrain:]]
    train_data = train_data1[permutation[0:numberTrain], :]
    train_label = train_label1[permutation[0:numberTrain]]
    toRemove = []
    for i in range(numberAttribute):
        if np.ptp(train_data[:, i]) == 0.0 and \
                        np.ptp(validation_data[:, i]) == 0.0:
            toRemove.append(i)
    train_data = np.delete(train_data, toRemove, axis=1)
    test_data = np.delete(test_data, toRemove, axis=1)
    validation_data = np.delete(validation_data, toRemove, axis=1)
    print("Preprocessing Done!")
    return train_data, train_label, validation_data, validation_label, test_data, test_label
开发者ID:VikramGaru,项目名称:ClassProjects,代码行数:25,代码来源:nnScriptAIData.py


示例15: resample

def resample(old_dispersion, new_dispersion):
    """
    Resample a spectrum to a new dispersion map while conserving total flux.

    :param old_dispersion:
        The original dispersion array.

    :type old_dispersion:
        :class:`numpy.array`

    :param new_dispersion:
        The new dispersion array to resample onto.

    :type new_dispersion:
        :class:`numpy.array`
    """

    data = []
    old_px_indices = []
    new_px_indices = []
    for i, new_wl_i in enumerate(new_dispersion):

        # These indices should span just over the new wavelength pixel.
        indices = np.unique(np.clip(
            old_dispersion.searchsorted(new_dispersion[i:i + 2], side="left") \
                + [-1, +1], 0, old_dispersion.size - 1))
        N = np.ptp(indices)

        if N == 0:
            # 'Fake' pixel.
            data.append(np.nan)
            new_px_indices.append(i)
            old_px_indices.extend(indices)
            continue

        # Sanity checks.
        assert (old_dispersion[indices[0]] <= new_wl_i \
            or indices[0] == 0)
        assert (new_wl_i <= old_dispersion[indices[1]] \
            or indices[1] == old_dispersion.size - 1)

        fractions = np.ones(N)

        # Edges are handled as fractions between rebinned pixels.
        _ = np.clip(i + 1, 0, new_dispersion.size - 1)
        lhs = old_dispersion[indices[0]:indices[0] + 2]
        rhs = old_dispersion[indices[-1] - 1:indices[-1] + 1]
        fractions[0]  = (lhs[1] - new_dispersion[i])/np.ptp(lhs)
        fractions[-1] = (new_dispersion[_] - rhs[0])/np.ptp(rhs)

        # Being binned to a single pixel. Prevent overflow from fringe cases.
        fractions = np.clip(fractions, 0, 1)
        fractions /= fractions.sum()

        data.extend(fractions) 
        new_px_indices.extend([i] * N) # Mark the new pixel indices affected.
        old_px_indices.extend(np.arange(*indices)) # And the old pixel indices.

    return scipy.sparse.csc_matrix((data, (old_px_indices, new_px_indices)),
        shape=(old_dispersion.size, new_dispersion.size))
开发者ID:andycasey,项目名称:sick,代码行数:60,代码来源:specutils.py


示例16: test_basic

 def test_basic(self):
     a = [3, 4, 5, 10, -3, -5, 6.0]
     assert_equal(np.ptp(a, axis=0), 15.0)
     b = [[3, 6.0, 9.0],
          [4, 10.0, 5.0],
          [8, 3.0, 2.0]]
     assert_equal(np.ptp(b, axis=0), [5.0, 7.0, 7.0])
     assert_equal(np.ptp(b, axis= -1), [6.0, 6.0, 6.0])
开发者ID:ericsuh,项目名称:numpy,代码行数:8,代码来源:test_function_base.py


示例17: main

def main():
    """Do the things"""

    # Check if we have already loaded the data
    global benchmarks, node_data, stellar_parameters, node_results_filenames

    try: benchmarks
    except NameError:
    	logger.info("Loading data..")
        node_results_filenames = glob("data/iDR2.1/GES_iDR2_WG11_*.fits")
        remove_nodes = ("Recommended", )
        node_results_filenames = [filename for filename in node_results_filenames \
            if "_".join(os.path.basename(filename).split("_")[3:]).rstrip(".fits") not in remove_nodes]

        # Load the data
        stellar_parameters = ("TEFF", "LOGG", "MH")
        benchmarks, node_data = prepare_data("data/benchmarks.txt", node_results_filenames,
            stellar_parameters)
    else:
    	logger.info("Using pre-loaded data")

    # Calculate weights based on minimal Euclidean distance
    stellar_parameters = ("TEFF", "LOGG", "MH")

    num_nodes = node_data.shape[2]
    recommended_measurements = np.zeros(map(len, [stellar_parameters, benchmarks]))
    weights = get_weights(benchmarks, node_data, stellar_parameters,
        scales={
            "TEFF": 1./np.ptp(benchmarks["TEFF"]),
            "LOGG": 1./np.ptp(benchmarks["LOGG"]),
            "MH": 1./np.ptp(benchmarks["MH"])
        })

    for j, stellar_parameter in enumerate(stellar_parameters):
        for i, benchmark in enumerate(benchmarks):

            node_measurements = node_data[2*j, i, :]
            isfinite = np.isfinite(node_measurements)

            # Normalise the weights
            normalised_weights = weights[isfinite]/sum(weights[isfinite])
            
            m_euclidean = np.sum((normalised_weights * node_measurements[isfinite]))
            recommended_measurements[j, i] = m_euclidean
            
    # Visualise the differences
    labels = ("$\Delta{}T_{\\rm eff}$ (K)", "$\Delta{}\log{g}$ (dex)", "$\Delta{}$[Fe/H] (dex)")
    figs = boxplots(benchmarks, node_data[::2, :, :], stellar_parameters,
        labels=labels, recommended_values=recommended_measurements)
    [fig.savefig("euclidean-benchmarks-{0}.png".format(stellar_parameter.lower())) \
    	for fig, stellar_parameter in zip(figs, stellar_parameters)]

    # Compare individual node dispersions to the recommended values
    repr_node = lambda filename: "_".join(os.path.basename(filename).split("_")[3:]).rstrip(".fits")
    fig = histograms(benchmarks, node_data[::2, :, :], stellar_parameters,
        parameter_labels=labels, recommended_values=recommended_measurements,
        node_labels=map(repr_node, node_results_filenames))
    fig.savefig("euclidean-distributions.png")
开发者ID:andycasey,项目名称:ges,代码行数:58,代码来源:euclidean.py


示例18: svdClean

def svdClean():
    #-- read file
    t0=time.time(); a,tx,ty=red(opt.FILE)
    #print "File read in",round(time.time()-t0,1),'s'    
    ntx,nbx=npy.shape(tx);nty,nby=npy.shape(ty)
    print '[H, V] bpms: [',nbx, nby,']'
    print '[H, V] turns: [',ntx, nty,']'

    #--- peak-2-peak cut, for LHC convert to microns
    print "Peak to peak cut:",opt.PK2PK, "mm"        
    pkx=npy.nonzero(npy.ptp(tx,axis=0)>float(opt.PK2PK))[0]
    pky=npy.nonzero(npy.ptp(ty,axis=0)>float(opt.PK2PK))[0]
    tx=npy.take(tx,pkx,1);ty=npy.take(ty,pky,1)
    print '[H,V] BPMs after P2P cut:',len(pkx),len(pky)
    
    #--- svd cut
    #t0=time.time()
    #gdx,gdy=foreach(rBPM,[tx,ty],threads=2,return_=True)
    gdx=rBPM(tx);gdy=rBPM(ty); #-- gdx->rdx for corr index 
    rdx=[pkx[j] for j in gdx]; rdy=[pky[j] for j in gdy]
    tx=npy.take(tx,(gdx),1);ty=npy.take(ty,(gdy),1)
    #print "Applied SVD cut in",round(time.time()-t0,1),'s'

    #--- svd clean
    if int(opt.SVALS)<nbx and int(opt.SVALS)<nby:
        t0=time.time();
        tx,ty=foreach(clean,[tx,ty],threads=2,return_=True)
        print "Cleaned using SVD in",round(time.time()-t0,1),'s'
    else: print "All singulars values retained, no svd clean applied"

    #--- bad bpms to file
    f=open(opt.FILE+'.bad','w')
    print >> f, "@  FILE %s ",opt.FILE
    print >> f, "*  NAME    S    PLANE"
    print >> f, "$   %s     %le   %s "
    for j in range(len(a.H)):
        if j not in rdx:
            print >> f, a.H[j].name, a.H[j].location, "H"
    for j in range(len(a.V)):
        if j not in rdy:
            print >> f, a.V[j].name, a.V[j].location, "V"
    f.close()
    
    #--- good data to file #t0=time.time()
    f = open(opt.FILE+'.new','w')
    f.write('# '+opt.FILE+'\n')
    for j in range(len(gdx)):
        f.write('0 '+a.H[rdx[j]].name+' '+str(a.H[rdx[j]].location)+' ')
        #f.write('%s\n' % ' '.join(['%5.5f' % val for val in \
        #                           a.H[rdx[j]].data]))
        f.write('%s\n' % ' '.join(['%5.5f' % val for val in tx[:,j]]))
    for j in range(len(gdy)):
        f.write('1 '+a.V[rdy[j]].name+' '+str(a.V[rdy[j]].location)+' ')
        #f.write('%s\n' % ' '.join(['%5.5f' % val for val in \
        #                           a.V[rdy[j]].data]))
        f.write('%s\n' % ' '.join(['%5.5f' % val for val in ty[:,j]]))
    f.close();#print "File written in",round(time.time()-t0,1),'s'
    print "Total",round(time.time()-t0,1),'s'
开发者ID:vimaier,项目名称:Beta-Beat.src,代码行数:58,代码来源:svd_clean_v0.1.py


示例19: loadsweeptimes

def loadsweeptimes(path):
    '''loads sweep timing corrected for computer - monitor delay'''
    datapath = path + ".dat"
    metapath = path + ".meta"
    
    channels,_ = loadmeta(metapath)
#    m = open(metapath)
#    meta = m.readlines()
#    
#    channels = int(meta[7].split(' = ')[1])
    #samplerate = int(meta[10].split(' = ')[1])
    #duration = len(data)/channels/samplerate
    
    data = loadbinary(datapath, channels=channels)
    sweeptrace = np.array(data[:, (channels-5)])
    vsynctrace = np.array(data[:, (channels-4)])
    diodetrace = np.array(data[:, (channels-3)])    
    d = open(datapath)
#    data = np.fromfile(d,np.int16)    
#    datareshaped = np.transpose(np.reshape(data,(len(data)/channels,channels)))
#    del data
#    
#    sweeptrace = datareshaped[(channels-3),:]
#    vsynctrace = datareshaped[(channels-2),:]
#    diodetrace = datareshaped[(channels-1),:]
#    del datareshaped    
    
    #sweep start and end times
    sthr = np.ptp(sweeptrace)/4
    sweepup = findlevels(sweeptrace, sthr, 40000, 'up')
    sweepdown = findlevels(sweeptrace, (-1*sthr), 40000, 'down')
    if sweepdown[0] > sweepup[0]:
        if len(sweepup) > len(sweepdown):
            sweep = np.column_stack([sweepup[:-1], sweepdown])
        else:
            sweep = np.column_stack([sweepup, sweepdown])
    elif sweepdown[0] <= sweep[0]:
        sweep = np.column_stack([sweepup, sweepdown[1:]])
    
    vthr = -1*(np.ptp(vsynctrace)/5)
    vsync = findlevels(vsynctrace, vthr, 300, 'up')
    
    dthr = np.ptp(diodetrace)/4    
    diode = findlevels(diodetrace, dthr, 200, 'both')
    #diode = np.reshape(diode, (len(diode)/2, 2))
    diode = np.delete(diode,[0,1],0)
    
    #corrects for delay between computer and monitor
    delay = vsync[0] - diode[0] + 0.0
    print "***Monitor lag:", (delay/20000)
    if delay > 0:
        print "ERROR: diode before vsync"
        sys.exit('diode error')
    sweep -= delay
    #converts to time in seconds
    sweeptiming = sweep + 0.0
    sweeptiming /= 20000    
    return sweeptiming
开发者ID:neuromind81,项目名称:aibs,代码行数:58,代码来源:loadlog.py


示例20: plot_hrd

def plot_hrd(data, members):

    x, y, c = data["TEFF"], data["LOGG"], data["FEH"]
    x_err, y_err = data["E_TEFF"], data["E_LOGG"]

    member_scatter_kwds = {
        "edgecolor": "#000000",
        "linewidths": 2,
        "s": 50,
        "zorder": 2
    }

    uves = np.array(["U580" in _ for _ in data["SETUP"]])
    giraffe = ~uves

    fig, ax = plt.subplots()
    scat = ax.scatter(x[members * uves], y[members * uves], c=c[members * uves],
        marker="s", label="UVES", **member_scatter_kwds)
    
    scat = ax.scatter(x[members * giraffe], y[members * giraffe],
        c=c[members * giraffe], marker="o", label="GIRAFFE",
        **member_scatter_kwds)
    
    ax.errorbar(x[members], y[members],
        xerr=x_err[members], yerr=y_err[members],
        fmt=None, ecolor="#000000", zorder=-1, elinewidth=1.5)

    #ax.legend(loc="upper left", frameon=False)

    cbar = plt.colorbar(scat)
    cbar.set_label(r"$[{\rm Fe/H}]$")

    ax.set_xlim(ax.get_xlim()[::-1])
    ax.set_ylim(ax.get_ylim()[::-1])

    ax.set_xlabel(r"$T_{\rm eff}$ $(K)$")
    ax.set_ylabel(r"$\log{g}$")

    ax.xaxis.set_major_locator(MaxNLocator(4))
    ax.yaxis.set_major_locator(MaxNLocator(4))

    ax.set(adjustable='box-forced',
        aspect=np.ptp(ax.get_xlim())/np.ptp(ax.get_ylim()))

    fig.tight_layout()

    # Load isochrone?
    """
    isochrone = Table.read("basti.isochrone", format="ascii",
        names=(
            "(M/Mo)in", "(M/Mo)", "log(L/Lo)", "logTe", "Mv", 
            "(U-B)", "(B-V)", "(V-I)", "(V-R)", "(V-J)", 
            "(V-K)", "(V-L)", "(H-K)"))
    """

    return fig
开发者ID:andycasey,项目名称:ngc2808,代码行数:56,代码来源:plot_hrd.py



注:本文中的numpy.ptp函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.put函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.promote_types函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap