• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python ma.mask_or函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.ma.mask_or函数的典型用法代码示例。如果您正苦于以下问题:Python mask_or函数的具体用法?Python mask_or怎么用?Python mask_or使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了mask_or函数的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __setslice__

 def __setslice__(self, i, j, value):
     "Sets the slice described by [i,j] to `value`."
     _localdict = self.__dict__
     d = self._data
     m = _localdict['_fieldmask']
     names = self.dtype.names
     if value is masked:
         for n in names:
             m[i:j][n] = True
     elif not self._hardmask:
         fval = filled(value)
         mval = getmaskarray(value)
         for n in names:
             d[n][i:j] = fval
             m[n][i:j] = mval
     else:
         mindx = getmaskarray(self)[i:j]
         dval = np.asarray(value)
         valmask = getmask(value)
         if valmask is nomask:
             for n in names:
                 mval = mask_or(m[n][i:j], valmask)
                 d[n][i:j][~mval] = value
         elif valmask.size > 1:
             for n in names:
                 mval = mask_or(m[n][i:j], valmask)
                 d[n][i:j][~mval] = dval[~mval]
                 m[n][i:j] = mask_or(m[n][i:j], mval)
     self._fieldmask = m
开发者ID:mbentz80,项目名称:jzigbeercp,代码行数:29,代码来源:mrecords.py


示例2: apply_intersection_mask_to_two_arrays

def apply_intersection_mask_to_two_arrays(array1, array2):
    """
    Ensure two (optionally) masked arrays have the same mask.
    If both arrays are masked the intersection of the masks is used.
    If one array is masked and the other is not, the mask from the masked array is applied to the unmasked array.
    If neither array is masked then both arrays are returned as masked arrays with an empty mask.

    :param array1: An (optionally masked) array
    :param array2: Another (optionally masked) array
    :return: Two masked arrays with a common mask
    """

    import numpy.ma as ma
    if isinstance(array1, ma.MaskedArray):
        if isinstance(array2, ma.MaskedArray):
            intersection_mask = ma.mask_or(array1.mask, array2.mask)
        else:
            intersection_mask = array1.mask
    else:
        if isinstance(array2, ma.MaskedArray):
            intersection_mask = array2.mask
        else:
            intersection_mask = False

    array1 = ma.array(array1, mask=intersection_mask)
    array2 = ma.array(array2, mask=intersection_mask)

    return array1, array2
开发者ID:cpaulik,项目名称:cis,代码行数:28,代码来源:utils.py


示例3: MakeFrameMask

def MakeFrameMask(data,frame):
    pixelSize = data['pixelSize']
    scalex = pixelSize[0]/1000.
    scaley = pixelSize[1]/1000.
    blkSize = 512
    Nx,Ny = data['size']
    nXBlks = (Nx-1)/blkSize+1
    nYBlks = (Ny-1)/blkSize+1
    tam = ma.make_mask_none(data['size'])
    for iBlk in range(nXBlks):
        iBeg = iBlk*blkSize
        iFin = min(iBeg+blkSize,Nx)
        for jBlk in range(nYBlks):
            jBeg = jBlk*blkSize
            jFin = min(jBeg+blkSize,Ny)                
            nI = iFin-iBeg
            nJ = jFin-jBeg
            tax,tay = np.mgrid[iBeg+0.5:iFin+.5,jBeg+.5:jFin+.5]         #bin centers not corners
            tax = np.asfarray(tax*scalex,dtype=np.float32)
            tay = np.asfarray(tay*scaley,dtype=np.float32)
            tamp = ma.make_mask_none((1024*1024))
            tamp = ma.make_mask(pm.polymask(nI*nJ,tax.flatten(),
                tay.flatten(),len(frame),frame,tamp)[:nI*nJ])-True  #switch to exclude around frame
            if tamp.shape:
                tamp = np.reshape(tamp[:nI*nJ],(nI,nJ))
                tam[iBeg:iFin,jBeg:jFin] = ma.mask_or(tamp[0:nI,0:nJ],tam[iBeg:iFin,jBeg:jFin])
            else:
                tam[iBeg:iFin,jBeg:jFin] = True
    return tam.T
开发者ID:svaksha,项目名称:pyGSAS,代码行数:29,代码来源:GSASIIimage.py


示例4: set_UVC

 def set_UVC(self, U, V, C=None):
     U = ma.masked_invalid(U, copy=False).ravel()
     V = ma.masked_invalid(V, copy=False).ravel()
     mask = ma.mask_or(U.mask, V.mask, copy=False, shrink=True)
     if C is not None:
         C = ma.masked_invalid(C, copy=False).ravel()
         mask = ma.mask_or(mask, C.mask, copy=False, shrink=True)
         if mask is ma.nomask:
             C = C.filled()
         else:
             C = ma.array(C, mask=mask, copy=False)
     self.U = U.filled(1)
     self.V = V.filled(1)
     self.Umask = mask
     if C is not None:
         self.set_array(C)
     self._new_UV = True
开发者ID:AmitAronovitch,项目名称:matplotlib,代码行数:17,代码来源:quiver.py


示例5: __init__

 def __init__(self, x, y):
     x = masked_array(x, copy=False, subok=True, dtype=float_, order="F").ravel()
     y = masked_array(y, copy=False, subok=True, dtype=float_, order="F").ravel()
     if x.size != y.size:
         msg = "Incompatible size between observations (%s) and response (%s)!"
         raise ValueError(msg % (x.size, y.size))
     idx = x.argsort()
     self._x = x[idx]
     self._y = y[idx]
     self._mask = mask_or(self._x._mask, self._y._mask, copy=False)
开发者ID:ruananswer,项目名称:pyloess,代码行数:10,代码来源:mpyloess.py


示例6: set_UVC

 def set_UVC(self, U, V, C=None):
     # We need to ensure we have a copy, not a reference
     # to an array that might change before draw().
     U = ma.masked_invalid(U, copy=True).ravel()
     V = ma.masked_invalid(V, copy=True).ravel()
     mask = ma.mask_or(U.mask, V.mask, copy=False, shrink=True)
     if C is not None:
         C = ma.masked_invalid(C, copy=True).ravel()
         mask = ma.mask_or(mask, C.mask, copy=False, shrink=True)
         if mask is ma.nomask:
             C = C.filled()
         else:
             C = ma.array(C, mask=mask, copy=False)
     self.U = U.filled(1)
     self.V = V.filled(1)
     self.Umask = mask
     if C is not None:
         self.set_array(C)
     self._new_UV = True
开发者ID:8BitTRex,项目名称:matplotlib,代码行数:19,代码来源:quiver.py


示例7: Make2ThetaAzimuthMap

def Make2ThetaAzimuthMap(data,masks,iLim,jLim,times): #most expensive part of integration!
    'Needs a doc string'
    #transforms 2D image from x,y space to 2-theta,azimuth space based on detector orientation
    pixelSize = data['pixelSize']
    scalex = pixelSize[0]/1000.
    scaley = pixelSize[1]/1000.
    
    tay,tax = np.mgrid[iLim[0]+0.5:iLim[1]+.5,jLim[0]+.5:jLim[1]+.5]         #bin centers not corners
    tax = np.asfarray(tax*scalex,dtype=np.float32)
    tay = np.asfarray(tay*scaley,dtype=np.float32)
    nI = iLim[1]-iLim[0]
    nJ = jLim[1]-jLim[0]
    t0 = time.time()
    #make position masks here
    frame = masks['Frames']
    tam = ma.make_mask_none((nI,nJ))
    if frame:
        tamp = ma.make_mask_none((1024*1024))
        tamp = ma.make_mask(pm.polymask(nI*nJ,tax.flatten(),
            tay.flatten(),len(frame),frame,tamp)[:nI*nJ])-True  #switch to exclude around frame
        tam = ma.mask_or(tam.flatten(),tamp)
    polygons = masks['Polygons']
    for polygon in polygons:
        if polygon:
            tamp = ma.make_mask_none((1024*1024))
            tamp = ma.make_mask(pm.polymask(nI*nJ,tax.flatten(),
                tay.flatten(),len(polygon),polygon,tamp)[:nI*nJ])
            tam = ma.mask_or(tam.flatten(),tamp)
    if tam.shape: tam = np.reshape(tam,(nI,nJ))
    spots = masks['Points']
    for X,Y,diam in spots:
        tamp = ma.getmask(ma.masked_less((tax-X)**2+(tay-Y)**2,(diam/2.)**2))
        tam = ma.mask_or(tam,tamp)
    times[0] += time.time()-t0
    t0 = time.time()
    TA = np.array(GetTthAzmG(tax,tay,data))     #includes geom. corr. as dist**2/d0**2 - most expensive step
    times[1] += time.time()-t0
    TA[1] = np.where(TA[1]<0,TA[1]+360,TA[1])
    return np.array(TA),tam           #2-theta, azimuth & geom. corr. arrays & position mask
开发者ID:svaksha,项目名称:pyGSAS,代码行数:39,代码来源:GSASIIimage.py


示例8: Fill2ThetaAzimuthMap

def Fill2ThetaAzimuthMap(masks,TA,tam,image):
    'Needs a doc string'
    Zlim = masks['Thresholds'][1]
    rings = masks['Rings']
    arcs = masks['Arcs']
    TA = np.dstack((ma.getdata(TA[1]),ma.getdata(TA[0]),ma.getdata(TA[2])))    #azimuth, 2-theta, dist
    tax,tay,tad = np.dsplit(TA,3)    #azimuth, 2-theta, dist**2/d0**2
    for tth,thick in rings:
        tam = ma.mask_or(tam.flatten(),ma.getmask(ma.masked_inside(tay.flatten(),max(0.01,tth-thick/2.),tth+thick/2.)))
    for tth,azm,thick in arcs:
        tamt = ma.getmask(ma.masked_inside(tay.flatten(),max(0.01,tth-thick/2.),tth+thick/2.))
        tama = ma.getmask(ma.masked_inside(tax.flatten(),azm[0],azm[1]))
        tam = ma.mask_or(tam.flatten(),tamt*tama)
    taz = ma.masked_outside(image.flatten(),int(Zlim[0]),Zlim[1])
    tabs = np.ones_like(taz)
    tam = ma.mask_or(tam.flatten(),ma.getmask(taz))
    tax = ma.compressed(ma.array(tax.flatten(),mask=tam))   #azimuth
    tay = ma.compressed(ma.array(tay.flatten(),mask=tam))   #2-theta
    taz = ma.compressed(ma.array(taz.flatten(),mask=tam))   #intensity
    tad = ma.compressed(ma.array(tad.flatten(),mask=tam))   #dist**2/d0**2
    tabs = ma.compressed(ma.array(tabs.flatten(),mask=tam)) #ones - later used for absorption corr.
    return tax,tay,taz,tad,tabs
开发者ID:svaksha,项目名称:pyGSAS,代码行数:22,代码来源:GSASIIimage.py


示例9: __init__

    def __init__(self, bare_dir, dem_dir, outdir):
        
        #loads the GDAL derived DEMs to produce the crop height model
        
        if not os.path.exists(outdir):
            os.mkdir(outdir)
        
        os.path.join(dem_dir,bare_dir)
        
        bare = None
        crop_model = None
        
        for bare_dem in os.listdir(bare_dir):
            os.chdir(bare_dir)
            bare = LoadImage(bare_dem)
            
        bare_filtered = filters.median_filter(bare.stacked,size=(9,9))
        bare_spatial = bare.spatial
        
        for survey in os.listdir(dem_dir):
            im_path = os.path.join(dem_dir, survey)
            os.chdir(im_path)
            for image in os.listdir(im_path):
                crop = LoadImage(image)
                crop_filtered = filters.median_filter(crop.stacked,size=(3,3))
                
                crop_model = crop_filtered-bare_filtered

                
                
                bare_mask = ma.masked_where(bare_filtered==-999, bare_filtered)
                crop_mask = ma.masked_where(crop_filtered==-999, crop_filtered)
                #combine these into a single mask so that anything masked in either dataset
                #will be masked in the combined output
                combined_mask = ma.mask_or(bare_mask.mask, crop_mask.mask)
                #use this mask t omask the crop model
                crop_model = ma.array(crop_model, mask=combined_mask)
                
                #convert back to a bog-standard numpy array and fill the masked values
                crop_model = crop_model.filled(-999)
                
                
                name = survey+'_CropHeightModel.tif'
                
                self.writeimage(outdir,
                                name,
                                crop_model.shape[0],
                                crop_model.shape[1],
                                crop_model,
                                bare_spatial)
开发者ID:AntArch,项目名称:2014_Lidar_Paper,代码行数:50,代码来源:lidar_csm.py


示例10: __setattr__

    def __setattr__(self, attr, val):
        "Sets the attribute attr to the value val."
#        newattr = attr not in self.__dict__
        try:
            # Is attr a generic attribute ?
            ret = object.__setattr__(self, attr, val)
        except:
            # Not a generic attribute: exit if it's not a valid field
            fielddict = self.dtype.names or {}
            if attr not in fielddict:
                exctype, value = sys.exc_info()[:2]
                raise exctype, value
        else:
            if attr in ['_mask','fieldmask']:
                self.__setmask__(val)
                return
            # Get the list of names ......
            _names = self.dtype.names
            if _names is None:
                _names = []
            else:
                _names = list(_names)
            # Check the attribute
            self_dict = self.__dict__
            if attr not in _names+list(self_dict):
                return ret
            if attr not in self_dict:         # We just added this one
                try:            #  or this setattr worked on an internal
                                #  attribute.
                    object.__delattr__(self, attr)
                except:
                    return ret
        # Case #1.: Basic field ............
        base_fmask = self._fieldmask
        _names = self.dtype.names or []
        if attr in _names:
            if val is masked:
                fval = self.fill_value[attr]
                mval = True
            else:
                fval = filled(val)
                mval = getmaskarray(val)
            if self._hardmask:
                mval = mask_or(mval, base_fmask.__getattr__(attr))
            self._data.__setattr__(attr, fval)
            base_fmask.__setattr__(attr, mval)
            return
开发者ID:mbentz80,项目名称:jzigbeercp,代码行数:47,代码来源:mrecords.py


示例11: approx

def approx (a, b, fill_value=True, rtol=1.e-5, atol=1.e-8):
    """Returns true if all components of a and b are equal subject to given tolerances.

If fill_value is True, masked values considered equal. Otherwise, masked values
are considered unequal.
The relative error rtol should be positive and << 1.0
The absolute error atol comes into play for those elements of b that are very
small or zero; it says how small a must be also.
    """
    m = mask_or(getmask(a), getmask(b))
    d1 = filled(a)
    d2 = filled(b)
    if d1.dtype.char == "O" or d2.dtype.char == "O":
        return np.equal(d1,d2).ravel()
    x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
    y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
    d = np.less_equal(umath.absolute(x-y), atol + rtol * umath.absolute(y))
    return d.ravel()
开发者ID:catawbasam,项目名称:FlightDataUtilities,代码行数:18,代码来源:masked_array_testutils.py


示例12: __init__

    def __init__(self,vals,vals_dmin,vals_dmax,mask=ma.nomask):
        super(UncertContainer, self).__init__()
        
        # If input data already masked arrays extract unmasked data
        if ma.isMaskedArray(vals): 
            vals = vals.data
        if ma.isMaskedArray(vals_dmin):
            vals_dmin = vals_dmin.data
        if ma.isMaskedArray(vals_dmax):
            vals_dmax = vals_dmax.data
        
        # Adjust negative values
        ineg = np.where(vals_dmin <= 0.0)
        vals_dmin[ineg] = TOL*vals[ineg]

        # Calculate weight based on fractional uncertainty 
        diff = vals_dmax - vals_dmin
        diff_m = ma.masked_where(vals_dmax == vals_dmin,diff)        

        self.vals = ma.masked_where(vals == 0.0,vals)

        self.wt = (self.vals/diff_m)**2
        self.uncert = diff_m/self.vals

        self.wt.fill_value = np.inf
        self.uncert.fill_vaule = np.inf

        assert np.all(self.wt.mask == self.uncert.mask)
        
        # Mask data if uncertainty is not finite or if any of the inputs were
        # already masked

        mm = ma.mask_or(self.wt.mask,mask)
        
        self.vals.mask = mm
        self.wt.mask = mm
        self.uncert.mask = mm
        self.dmin = ma.array(vals_dmin,mask=mm,fill_value=np.inf)
        self.dmax = ma.array(vals_dmax,mask=mm,fill_value=np.inf)

        self.mask = ma.getmaskarray(self.vals)
开发者ID:nrego,项目名称:westpa,代码行数:41,代码来源:UncertMath.py


示例13: getTimeseries

def getTimeseries(productcode, subproductcode, version, mapsetcode, wkt, start_date, end_date):
    #    Extract timeseries from a list of files and return as JSON object
    #    It applies to a single dataset (prod/sprod/version/mapset) and between 2 dates
    ogr.UseExceptions()
    theGeomWkt = ' '.join(wkt.strip().split())
    geom = Geometry(wkt=str(theGeomWkt), srs=4326)

    # Get Product Info
    product_info = querydb.get_product_out_info(productcode=productcode,
                                                subproductcode=subproductcode,
                                                version=version)
    if product_info.__len__() > 0:
        scale_factor = 0
        scale_offset = 0
        nodata = 0
        date_format = ''
        for row in product_info:
            scale_factor = row.scale_factor
            scale_offset = row.scale_offset
            nodata = row.nodata
            unit = row.unit
            date_format = row.date_format

        [list_files, dates_list] = getFilesList(productcode, subproductcode, version, mapsetcode, date_format, start_date, end_date)

        # Built a dictionary with filesnames/dates
        dates_to_files_dict = dict(zip(dates_list, list_files))

        # Generate unique list of files
        unique_list = set(list_files)
        uniqueFilesValues = []

        for infile in unique_list:
            if os.path.isfile(infile):
                try:
                    mx = []
                    single_result = {'filename': '', 'meanvalue_noscaling': nodata, 'meanvalue': nodata}
                    with Raster(infile) as img:
                        # Assign nodata from prod_info
                        img._nodata = nodata
                        with img.clip(geom) as clipped:
                            # Save clipped image (for debug only)
                            # clipped.save(dataset.fullpath+'clipped_'+productfilename)
                            mx = clipped.array()

                    nodata_array_masked = ma.masked_equal(mx, nodata)
                    merged_mask = ma.mask_or(ma.getmask(mx), ma.getmask(nodata_array_masked))
                    mxnodata = ma.masked_array(ma.getdata(mx), merged_mask)

                    if mxnodata.count() == 0:
                        meanResult = 0.0
                    else:
                        meanResult = mxnodata.mean()

                    single_result['filename'] = infile
                    single_result['meanvalue_noscaling'] = meanResult
                    # Scale to physical value
                    finalvalue = (meanResult*scale_factor+scale_offset)
                    single_result['meanvalue'] = finalvalue

                    uniqueFilesValues.append(single_result)

                except Exception, e:
                    logger.debug('ERROR: clipping - %s' % (e))
                    # sys.exit (1)
            else:
                logger.debug('ERROR: raster file does not exist - %s' % infile)
                # sys.exit (1)

        # Define a dictionary to associate filenames/values
        files_to_values_dict = dict((x['filename'], x['meanvalue']) for x in uniqueFilesValues)

        # Prepare array for result
        resultDatesValues = []

        # Returns a list of 'filenames', 'dates', 'values'
        for mydate in dates_list:
            # my_result = {'date': datetime.date.today(), 'filename':'', 'meanvalue':nodata}
            my_result = {'date': datetime.date.today(), 'meanvalue':nodata}

            # Assign the date
            my_result['date'] = mydate
            # Assign the filename
            my_filename = dates_to_files_dict[mydate]
            # my_result['filename'] = my_filename
            # Map from array of Values
            my_result['meanvalue'] = files_to_values_dict[my_filename]

            # Map from array of dates
            resultDatesValues.append(my_result)

        return resultDatesValues
开发者ID:eStation2,项目名称:eStation2,代码行数:92,代码来源:getTimeseries.py


示例14: ImageRecalibrate

def ImageRecalibrate(self,data,masks):
    'Needs a doc string'
    import ImageCalibrants as calFile
    print 'Image recalibration:'
    time0 = time.time()
    pixelSize = data['pixelSize']
    scalex = 1000./pixelSize[0]
    scaley = 1000./pixelSize[1]
    pixLimit = data['pixLimit']
    cutoff = data['cutoff']
    data['rings'] = []
    data['ellipses'] = []
    if not data['calibrant']:
        print 'no calibration material selected'
        return True    
    skip = data['calibskip']
    dmin = data['calibdmin']
    Bravais,SGs,Cells = calFile.Calibrants[data['calibrant']][:3]
    HKL = []
    for bravais,sg,cell in zip(Bravais,SGs,Cells):
        A = G2lat.cell2A(cell)
        if sg:
            SGData = G2spc.SpcGroup(sg)[1]
            hkl = G2pwd.getHKLpeak(dmin,SGData,A)
            HKL += hkl
        else:
            hkl = G2lat.GenHBravais(dmin,bravais,A)
            HKL += hkl
    HKL = G2lat.sortHKLd(HKL,True,False)
    varyList = [item for item in data['varyList'] if data['varyList'][item]]
    parmDict = {'dist':data['distance'],'det-X':data['center'][0],'det-Y':data['center'][1],
        'tilt':data['tilt'],'phi':data['rotation'],'wave':data['wavelength'],'dep':data['DetDepth']}
    Found = False
    wave = data['wavelength']
    frame = masks['Frames']
    tam = ma.make_mask_none(self.ImageZ.shape)
    if frame:
        tam = ma.mask_or(tam,MakeFrameMask(data,frame))
    for iH,H in enumerate(HKL):
        if debug:   print H 
        dsp = H[3]
        tth = 2.0*asind(wave/(2.*dsp))
        if tth+abs(data['tilt']) > 90.:
            print 'next line is a hyperbola - search stopped'
            break
        ellipse = GetEllipse(dsp,data)
        Ring = makeRing(dsp,ellipse,pixLimit,cutoff,scalex,scaley,ma.array(self.ImageZ,mask=tam))
        if Ring:
            if iH >= skip:
                data['rings'].append(np.array(Ring))
            data['ellipses'].append(copy.deepcopy(ellipse+('r',)))
            Found = True
        elif not Found:         #skipping inner rings, keep looking until ring found 
            continue
        else:                   #no more rings beyond edge of detector
            data['ellipses'].append([])
            continue
#            break
    rings = np.concatenate((data['rings']),axis=0)
    chisq = FitDetector(rings,varyList,parmDict)
    data['wavelength'] = parmDict['wave']
    data['distance'] = parmDict['dist']
    data['center'] = [parmDict['det-X'],parmDict['det-Y']]
    data['rotation'] = np.mod(parmDict['phi'],360.0)
    data['tilt'] = parmDict['tilt']
    data['DetDepth'] = parmDict['dep']
    data['chisq'] = chisq
    N = len(data['ellipses'])
    data['ellipses'] = []           #clear away individual ellipse fits
    for H in HKL[:N]:
        ellipse = GetEllipse(H[3],data)
        data['ellipses'].append(copy.deepcopy(ellipse+('b',)))    
    print 'calibration time = ',time.time()-time0
    G2plt.PlotImage(self,newImage=True)        
    return True
开发者ID:svaksha,项目名称:pyGSAS,代码行数:75,代码来源:GSASIIimage.py


示例15: make_cube_movie

def make_cube_movie(source_key, colorbar_title, frame_dir,
                    filetag_suffix="", saveslice=None, saveslice_file=None,
                    outputdir="./", sigmarange=6., ignore=None, multiplier=1.,
                    transverse=False, title=None, sigmacut=None,
                    logscale=False, physical=False, convolve=False, tag=None):
    """Make a stack of spatial slice maps and animate them
    transverse plots along RA and freq and image plane is in Dec
    First mask any points that exceed `sigmacut`, and then report the extent of
    `sigmarange` away from the mean
    """
    datapath_db = data_paths.DataPath()
    cosmology = Cosmology()
    littleh = (cosmology.H0 / 100.0)

    beam_data = np.array([0.316148488246, 0.306805630985, 0.293729620792,
                          0.281176247549, 0.270856788455, 0.26745856078,
                          0.258910010848, 0.249188429031])

    freq_data = np.array([695, 725, 755, 785, 815, 845, 875, 905],
                             dtype=float)

    beam_fwhm = interp1d(freq_data, beam_data)
    freq_data_Hz = freq_data * 1.0e6

    if tag is None:
        tag = '_'.join(source_key.split(";"))
        tag = '-'.join(tag.split(":"))

        # for a given path
        #tag = ".".join(source_key.split(".")[:-1])  # extract root name
        #tag = tag.split("/")[-1]

    if title is None:
        title = "%(tag)s (i = %(index)d, "
        title += "freq = %(freq)3.1f MHz, z = %(redshift)3.3f, "
        title += "Dc=%(distance)3.0f cMpc)"

    print tag
    fileprefix = frame_dir + tag
    nlevels = 500

    if transverse:
        orientation = "_freqRA"
    else:
        orientation = "_RADec"

    # prepare the data
    #cube = algebra.make_vect(algebra.load(source_key)) * multiplier
    cube =  datapath_db.fetch_multi(source_key) * multiplier
    if logscale:
        cube[cube < 0] = np.min(np.abs(cube))
        cube = np.log10(cube)

    isnan = np.isnan(cube)
    isinf = np.isinf(cube)
    maskarray = ma.mask_or(isnan, isinf)

    if ignore is not None:
        maskarray = ma.mask_or(maskarray, (cube == ignore))

    convolved = ""
    if convolve:
        convolved = "_convolved"

        beamobj = beam.GaussianBeam(beam_data, freq_data_Hz)
        cube = beamobj.apply(cube)

    if sigmacut:
        #np.set_printoptions(threshold=np.nan, precision=4)
        deviation = np.abs((cube - np.mean(cube)) / np.std(cube))
        extend_maskarray = (cube > (sigmacut * deviation))
        maskarray = ma.mask_or(extend_maskarray, maskarray)

    mcube = ma.masked_array(cube, mask=maskarray)

    try:
        whmaskarray = np.where(maskarray)[0]
        mask_fraction = float(len(whmaskarray)) / float(cube.size)
    except:
        mask_fraction = 0.

    print "fraction of map clipped: %f" % mask_fraction
    (cube_mean, cube_std) = (mcube.mean(), mcube.std())
    print "cube mean=%g std=%g" % (cube_mean, cube_std)

    try:
        len(sigmarange)
        color_axis = np.linspace(sigmarange[0], sigmarange[1],
                                nlevels, endpoint=True)
    except TypeError:
        if (sigmarange > 0.):
            color_axis = np.linspace(cube_mean - sigmarange * cube_std,
                                    cube_mean + sigmarange * cube_std,
                                    nlevels, endpoint=True)
        else:
            if saveslice is not None:
                color_axis = np.linspace(ma.min(mcube[saveslice, :, :]),
                                         ma.max(mcube[saveslice, :, :]),
                                         nlevels, endpoint=True)
            else:
#.........这里部分代码省略.........
开发者ID:OMGitsHongyu,项目名称:analysis_IM,代码行数:101,代码来源:plot_cube.py


示例16: read_avhrrgac

def read_avhrrgac(f, a, tim, cha, tsm_corr=None):

    # get angle and geolocation
    sza, szanam = read_var(a, 'image1')
    lat, latnam = read_var(f, 'lat')
    lon, lonnam = read_var(f, 'lon')

    # get measurement
    # channel 1
    tar1, tarname1 = read_var(f, 'image1')
    tar1[:] = tar1 / 100.
    # channel 2
    tar2, tarname2 = read_var(f, 'image2')
    tar2[:] = tar2 / 100.
    # channel 3b
    tar3, tarname3 = read_var(f, 'image3')
    # channel 4
    tar4, tarname4 = read_var(f, 'image4')
    # channel 5
    tar5, tarname5 = read_var(f, 'image5')
    # channel 3a
    tar6, tarname6 = read_var(f, 'image6')
    tar6[:] = tar6 / 100.

    # --- START temporary scan motor issue correction
    if tsm_corr:
        # absolute difference because ch1 is very similar to ch2
        abs_d12 = abs(tar1 - tar2)
        # relative difference because ch4 and ch5 differ
        rel_d45 = 100.0*(tar4 - tar5)/tar5

        # standard deviation of abs_d12 and rel_d45
        box_size = 3
        fill_value = -9999.0
        std_d12 = gridbox_std(abs_d12, box_size, fill_value)
        std_d45 = gridbox_std(rel_d45, box_size, fill_value)

        # using ch1, ch2, ch4, ch5 in combination
        # all channels seems to be affected throughout the whole orbit,
        # independent of VIS and NIR or day and night
        ind1 = np.where( (std_d12 > 0.02) & (std_d45 > 2.00) )
        tar1[ind1] = -999.0
        tar2[ind1] = -999.0
        tar3[ind1] = -999.0
        tar4[ind1] = -999.0
        tar5[ind1] = -999.0
        tar6[ind1] = -999.0
    # --- END temporary scan motor issue correction

    if cha == 'ch1':
        tar = tar1
        tarname = tarname1 
    elif cha == 'ch2':
        tar = tar2
        tarname = tarname2
    elif cha == 'ch3b':
        tar = tar3
        tarname = tarname3
    elif cha == 'ch4':
        tar = tar4
        tarname = tarname4
    elif cha == 'ch5':
        tar = tar5
        tarname = tarname5
    elif cha == 'ch3a':
        tar = tar6
        tarname = tarname6

    # some lat/lon fields are not fill_value although they should be
    # lat/lon min/max outside realistic values
    # fixed here in read_var
    # but then tar and lat/lon do not have the same masked elements
    # thus:
    all_masks = [lat < -90., lat > 90., lon < -180., lon > 180.]
    total_mask = reduce(np.logical_or, all_masks)
    lat = ma.masked_where(total_mask, lat)
    lon = ma.masked_where(total_mask, lon)
    tar = ma.masked_where(total_mask, tar)
    sza = ma.masked_where(total_mask, sza)

    # select time
    if tim == 'day_90sza':
        # consider only daytime, i.e. sza < 90
        lon = ma.masked_where(sza >= 90., lon)
        lat = ma.masked_where(sza >= 90., lat)
        tar = ma.masked_where(sza >= 90., tar)

    if tim == 'day':
        # consider only daytime, i.e. sza < 80
        lon = ma.masked_where(sza >= 80., lon)
        lat = ma.masked_where(sza >= 80., lat)
        tar = ma.masked_where(sza >= 80., tar)

    if tim == 'twilight':
        # consider only twilight, i.e. 80 <= sza < 90
        # mask everything outside the current sza range
        omask = ma.mask_or(sza < 80, sza >= 90)
        lon = ma.masked_where(omask, lon)
        lat = ma.masked_where(omask, lat)
        tar = ma.masked_where(omask, tar)
#.........这里部分代码省略.........
开发者ID:cschlund,项目名称:pytAVHRRGACl1c,代码行数:101,代码来源:read_avhrrgac_h5.py


示例17: interp


#.........这里部分代码省略.........

    .. note::
     If datain is a masked array and order=1 (bilinear interpolation) is
     used, elements of dataout will be masked if any of the four surrounding
     points in datain are masked.  To avoid this, do the interpolation in two
     passes, first with order=1 (producing dataout1), then with order=0
     (producing dataout2).  Then replace all the masked values in dataout1
     with the corresponding elements in dataout2 (using numpy.where).
     This effectively uses nearest neighbor interpolation if any of the
     four surrounding points in datain are masked, and bilinear interpolation
     otherwise.

    Returns ``dataout``, the interpolated data on the grid ``xout, yout``.
    """    
    # xin and yin must be monotonically increasing.
    if xin[-1]-xin[0] < 0 or yin[-1]-yin[0] < 0:
        raise ValueError, 'xin and yin must be increasing!'
    if xout.shape != yout.shape:
        raise ValueError, 'xout and yout must have same shape!'
    # check that xout,yout are
    # within region defined by xin,yin.
    if checkbounds:
        if xout.min() < xin.min() or \
           xout.max() > xin.max() or \
           yout.min() < yin.min() or \
           yout.max() > yin.max():
            raise ValueError, 'yout or xout outside range of yin or xin'
    # compute grid coordinates of output grid.
    delx = xin[1:]-xin[0:-1]
    dely = yin[1:]-yin[0:-1]
    if max(delx)-min(delx) < 1.e-4 and max(dely)-min(dely) < 1.e-4:
        # regular input grid.
        xcoords = (len(xin)-1)*(xout-xin[0])/(xin[-1]-xin[0])
        ycoords = (len(yin)-1)*(yout-yin[0])/(yin[-1]-yin[0])
    else:
        # irregular (but still rectilinear) input grid.
        xoutflat = xout.flatten(); youtflat = yout.flatten()
        ix = (np.searchsorted(xin,xoutflat)-1).tolist()
        iy = (np.searchsorted(yin,youtflat)-1).tolist()
        xoutflat = xoutflat.tolist(); xin = xin.tolist()
        youtflat = youtflat.tolist(); yin = yin.tolist()
        xcoords = []; ycoords = []
        for n,i in enumerate(ix):
            if i < 0:
                xcoords.append(-1) # outside of range on xin (lower end)
            elif i >= len(xin)-1:
                xcoords.append(len(xin)) # outside range on upper end.
            else:
                xcoords.append(float(i)+(xoutflat[n]-xin[i])/(xin[i+1]-xin[i]))
        for m,j in enumerate(iy):
            if j < 0:
                ycoords.append(-1) # outside of range of yin (on lower end)
            elif j >= len(yin)-1:
                ycoords.append(len(yin)) # outside range on upper end
            else:
                ycoords.append(float(j)+(youtflat[m]-yin[j])/(yin[j+1]-yin[j]))
        xcoords = np.reshape(xcoords,xout.shape)
        ycoords = np.reshape(ycoords,yout.shape)
    # data outside range xin,yin will be clipped to
    # values on boundary.
    if masked:
        xmask = np.logical_or(np.less(xcoords,0),np.greater(xcoords,len(xin)-1))
        ymask = np.logical_or(np.less(ycoords,0),np.greater(ycoords,len(yin)-1))
        xymask = np.logical_or(xmask,ymask)
    xcoords = np.clip(xcoords,0,len(xin)-1)
    ycoords = np.clip(ycoords,0,len(yin)-1)
    # interpolate to output grid using bilinear interpolation.
    if order == 1:
        xi = xcoords.astype(np.int32)
        yi = ycoords.astype(np.int32)
        xip1 = xi+1
        yip1 = yi+1
        xip1 = np.clip(xip1,0,len(xin)-1)
        yip1 = np.clip(yip1,0,len(yin)-1)
        delx = xcoords-xi.astype(np.float32)
        dely = ycoords-yi.astype(np.float32)
        dataout = (1.-delx)*(1.-dely)*datain[yi,xi] + \
                  delx*dely*datain[yip1,xip1] + \
                  (1.-delx)*dely*datain[yip1,xi] + \
                  delx*(1.-dely)*datain[yi,xip1]
    elif order == 0:
        xcoordsi = np.around(xcoords).astype(np.int32)
        ycoordsi = np.around(ycoords).astype(np.int32)
        dataout = datain[ycoordsi,xcoordsi]
    elif order == 3:
        try:
            from scipy.ndimage import map_coordinates
        except ImportError:
            raise ValueError('scipy.ndimage must be installed if order=3')
        coords = [ycoords,xcoords]
        dataout = map_coordinates(datain,coords,order=3,mode='nearest')
    else:
        raise ValueError,'order keyword must be 0, 1 or 3'
    if masked and isinstance(masked,bool):
        dataout = ma.masked_array(dataout)
        newmask = ma.mask_or(ma.getmask(dataout), xymask)
        dataout = ma.masked_array(dataout,mask=newmask)
    elif masked and is_scalar(masked):
        dataout = np.where(xymask,masked,dataout)
    return dataout
开发者ID:Monte-Carlo,项目名称:pysenorge-1,代码行数:101,代码来源:grid.py


示例18: expand_value_coverage

def expand_value_coverage(input_raster, expand_raster, output_raster,
                          union=False, compress='DEFLATE', verbose=False,
                          logger=None):
    """ Expand a raster based on occurrence of informative cells in another.

    Argument "intersect" can be used to define if only the mask of the
    expand raster should be used, or an union between masks of input and
    expand raster.

    :param input_raster: String path to input raster.
    :param expand_raster: String path to mask raster.
    :param output_raster: String path to output raster.
    :param union: Boolean should masks of input_raster and expand_raster
                  be unioned.
    :param compress: String compression level used for the output raster.
    :param verbose: Boolean indicating how much information is printed out.
    :param logger: logger object to be used.
    :return Boolean success.
    """
    # 1. Setup  --------------------------------------------------------------

    all_start = timer()

    if not logger:
        logging.basicConfig()
        llogger = logging.getLogger('maskvalue_coverage')
        llogger.setLevel(logging.DEBUG if verbose else logging.INFO)
    else:
        llogger = logger

    # 2. Read and process raster  ---------------------------------------------

    # First, get the mask and dtype from the mask raster
    expand_raster = rasterio.open(expand_raster)
    expand_raster_src = expand_raster.read(1, masked=True)
    expand_mask = expand_raster_src.mask

    # Read raster bands directly to Numpy arrays.
    with rasterio.open(input_raster) as raster:
        llogger.info("Reading and processing raster {}".format(input_raster))
        input_nodata = raster.nodata

        # Read in the data
        src = raster.read(1, masked=True)
        src_dtype = src.dtype
        src_mask = src.mask

        # Perform a union on the masks if needed
        if union:
            llogger.info("[NOTE] Using union of masks")
            expand_mask = ma.mask_or(expand_mask, src_mask)

        llogger.debug("Number of informative cells in the data: {}".format(np.sum(~src_mask)))
        llogger.debug("Number of informative cells in the expand mask: {}".format(np.sum(~expand_mask)))

        # Change the mask and the underlying values
        src.mask = expand_mask
        src.data[src.mask] = input_nodata
        # There might be some NoData values lurking around, replace them with
        # zero.
        src.data[src == input_nodata] = 0.0
        
        profile = raster.profile
        profile.update(dtype=src_dtype, count=1, compress=compress,
                       nodata=input_nodata)

        with rasterio.open(output_raster, 'w', **profile) as dst:
            llogger.info("Writing output raster {}".format(output_raster))
            #import pdb; pdb.set_trace()
            dst.write(src.astype(src_dtype), 1)

    all_end = timer()
    all_elapsed = round(all_end - all_start, 2)
    llogger.info(" [TIME] Masking took {} sec".format(all_elapsed))
开发者ID:VUEG,项目名称:priocomp,代码行数:74,代码来源:data_coverage.py



注:本文中的numpy.ma.mask_or函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python ma.masked_all函数代码示例发布时间:2022-05-27
下一篇:
Python ma.make_mask_descr函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap