• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python shapefile.Reader类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中shapefile.Reader的典型用法代码示例。如果您正苦于以下问题:Python Reader类的具体用法?Python Reader怎么用?Python Reader使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Reader类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_wdb_boundaries

def get_wdb_boundaries(resolution, level, rivers=False):
    polymeta = []
    polybounds = []
    if rivers:
        filename = "WDBII_shp/%s/WDBII_river_%s_L%02i" % (resolution, resolution, level)
    else:
        filename = "WDBII_shp/%s/WDBII_border_%s_L%s" % (resolution, resolution, level)
    print filename
    shf = Reader(filename)
    fields = shf.fields
    for shprec in shf.shapeRecords():
        shp = shprec.shape
        rec = shprec.record
        parts = shp.parts.tolist()
        if parts != [0]:
            print "multipart polygon"
            raise SystemExit
        verts = shp.points
        lons, lats = list(zip(*verts))
        north = max(lats)
        south = min(lats)
        attdict = {}
        for r, key in zip(rec, fields[1:]):
            attdict[key[0]] = r
        area = -1
        id = attdict["id"]
        polymeta.append([-1, -1, south, north, len(lons), id])
        b = np.empty((len(lons), 2), np.float32)
        b[:, 0] = lons
        b[:, 1] = lats
        if lsd is not None:
            b = quantize(b, lsd)
        polybounds.append(b)
    return polybounds, polymeta
开发者ID:jdkloe,项目名称:basemap,代码行数:34,代码来源:readboundaries_shp.py


示例2: get_coast_polygons

def get_coast_polygons(resolution):
    polymeta = []; polybounds = []
    for level in [1,2,3,5]:
        filename = os.path.join(GSHHS_DIR, 'GSHHS_shp/', resolution,
                                'GSHHS_{}_L{}'.format(resolution, level))
        print filename
        shf = Reader(filename)
        fields = shf.fields
        try:
            shf.shapeRecords()
        except:
            continue
        for shprec in shf.shapeRecords():
            shp = shprec.shape; rec = shprec.record
            parts = shp.parts.tolist()
            if parts != [0]:
                print 'multipart polygon'
                raise SystemExit
            verts = shp.points
            lons, lats = list(zip(*verts))
            north = max(lats); south = min(lats)
            attdict={}
            for r,key in zip(rec,fields[1:]):
                attdict[key[0]]=r
            area = attdict['area']
            id = attdict['id']
            polymeta.append([level,area,south,north,len(lons),id])
            b = np.empty((len(lons),2),np.float32)
            b[:,0] = lons; b[:,1] = lats
            if lsd is not None:
                b = quantize(b,lsd)
            polybounds.append(b)

        # Manual fix for incorrect Antarctica polygons at full resolution
        # This issue is only present in the shapefile version and may be fixed
        # in future versions of GSHHS!
        if resolution == 'f' and level == 5:
            i = [item[-1] for item in polymeta].index('4-E')
            coords = polybounds[i][2:-1, :]
            coords = np.vstack([coords,
                                [180.0, -90.0],
                                [0.0, -90.0]]).astype(np.float32)
            polybounds[i] = coords
            polymeta[i][-2] = len(coords)

            j = [item[-1] for item in polymeta].index('4-W')
            coords = polybounds[j][3:, :]
            np.savetxt('coordinates.txt', coords)
            coords = np.vstack([coords,
                                [0.0, coords[-1][1]],
                                [0.0, -90.0],
                                [-180.0, -90.0],
                                coords[0]]).astype(np.float32)

            polybounds[j] = coords
            polymeta[j][-2] = len(coords)

    return polybounds, polymeta
开发者ID:jenshnielsen,项目名称:basemap,代码行数:58,代码来源:readboundaries_shp.py


示例3: extract_flowlines

    def extract_flowlines(self, source, destination, HUC8, verbose = True):
        """Extracts flowlines from the source datafile to the destination using
        the HUC8 for the query."""

        # open the flowline file
    
        if verbose: print('reading the flowline file\n')
    
        shapefile = Reader(source, shapeType = 3)
        records   = shapefile.records()
    
        # figure out which field codes are the Reach code and comid
    
        reach_index = shapefile.fields.index(['REACHCODE', 'C', 14, 0]) - 1
    
        # go through the reach indices, add add them to the list of flowlines
        # if in the watershed; also make a list of the corresponding comids
    
        if verbose: print('searching for flowlines in the watershed\n')
    
        indices = []
       
        i = 0
        for record in records:
            if record[reach_index][:8] == HUC8: indices.append(i)
            i+=1

        if len(indices) == 0:
            if verbose: print('error: query returned no values')
            raise
    
        # write the data from the HUC8 to a new shapefile
    
        w = Writer(shapeType = 3)
    
        for field in shapefile.fields:  w.field(*field)
    
        for i in indices:
            shape = shapefile.shape(i)
            w.poly(shapeType = 3, parts = [shape.points])
    
            record = records[i]
    
            # little work around for blank GNIS_ID and GNIS_NAME values
    
            if isinstance(record[3], bytes):
                record[3] = record[3].decode('utf-8')
            if isinstance(record[4], bytes):
                record[4] = record[4].decode('utf-8')
    
            w.record(*record)
    
        w.save(destination)
    
        if verbose: 
            l = len(indices)
            print('queried {} flowlines from original shapefile\n'.format(l))
开发者ID:geclark330,项目名称:PyHSPF,代码行数:57,代码来源:nhdplusextractor.py


示例4: extract_catchments

    def extract_catchments(self, 
                           source, 
                           destination, 
                           flowlinefile, 
                           verbose = True,
                           ):
        """
        Extracts the catchments from the source data file to the destination
        using the list of comids for the query.
        """

        # make a list of the comids

        comids = self.get_comids(flowlinefile)

        # open the catchment shapefile
    
        if verbose: print('reading the catchment shapefile\n')
    
        shapefile = Reader(source)
    
        # get the index of the feature id, which links to the flowline comid
    
        featureid_index = shapefile.fields.index(['FEATUREID', 'N', 9, 0]) - 1
    
        # go through the comids from the flowlines and add the corresponding 
        # catchment to the catchment list
    
        if verbose: print('searching the catchments in the watershed\n')
    
        records = shapefile.records()
        indices = []
    
        i = 0
        for record in records:
            if record[featureid_index] in comids: indices.append(i)
            i+=1
    
        if len(indices) == 0:
            print('query returned no values, returning\n')
            raise

        # create the new shapefile
    
        if verbose: print('writing the new catchment shapefile\n')
        
        w = Writer()
    
        for field in shapefile.fields:  w.field(*field)
    
        for i in indices:
            shape = shapefile.shape(i)
            w.poly(shapeType = 5, parts = [shape.points])
            w.record(*records[i])
    
        w.save(destination)
开发者ID:djibi2,项目名称:PyHSPF,代码行数:56,代码来源:nhdplusextractor.py


示例5: set_metadata

    def set_metadata(self, 
                     gagefile,
                     ):
        """
        Opens the gage file with the station metadata.
        """

        # metadata for stations

        self.gages  = []
        self.day1s  = []
        self.dayns  = []
        self.drains = []
        self.states = []
        self.sites  = []
        self.nwiss  = []
        self.aves   = []
        self.names  = []

        gagereader = Reader(gagefile, shapeType = 1)

        # get the fields with pertinent info

        day1_index  = gagereader.fields.index(['DAY1',       'N', 19, 0]) - 1
        dayn_index  = gagereader.fields.index(['DAYN',       'N', 19, 0]) - 1
        drain_index = gagereader.fields.index(['DA_SQ_MILE', 'N', 19, 2]) - 1
        HUC8_index  = gagereader.fields.index(['HUC',        'C',  8, 0]) - 1
        state_index = gagereader.fields.index(['STATE',      'C',  2, 0]) - 1
        site_index  = gagereader.fields.index(['SITE_NO',    'C', 15, 0]) - 1
        nwis_index  = gagereader.fields.index(['NWISWEB',    'C', 75, 0]) - 1
        ave_index   = gagereader.fields.index(['AVE',        'N', 19, 3]) - 1
        name_index  = gagereader.fields.index(['STATION_NM', 'C', 60, 0]) - 1

        # iterate through the records

        for r in gagereader.records():
            
            gage  = r[site_index] 
            day1  = r[day1_index] 
            dayn  = r[dayn_index] 
            drain = r[drain_index]
            state = r[state_index]
            nwis  = r[nwis_index]
            ave   = r[ave_index]  
            name  = r[name_index]
            site  = r[site_index]

            self.gages.append(gage)
            self.day1s.append(day1)
            self.dayns.append(dayn)
            self.drains.append(drain)
            self.states.append(state)
            self.sites.append(site)
            self.nwiss.append(nwis)
            self.aves.append(ave)
            self.names.append(name)
开发者ID:djibi2,项目名称:PyHSPF,代码行数:56,代码来源:nwisextractor.py


示例6: merge_shapes

def merge_shapes(inputfile, 
                 outputfile = None, 
                 overwrite = False, 
                 verbose = True, 
                 vverbose = False,
                 ):
    """
    Merges all the shapes in a shapefile into a single shape.
    """

    if outputfile is None: output = '{}/merged'.format(os.getcwd())

    if os.path.isfile(outputfile + '.shp') and not overwrite:
        if verbose: 
            print('combined watershed shapefile {} exists'.format(outputfile))
        return
   
    if verbose: print('combining shapes from {}\n'.format(inputfile) + 
                      'this may take a while...\n')

    # start by copying the projection files

    shutil.copy(inputfile + '.prj', outputfile + '.prj')

    # load the catchment and flowline shapefiles

    r = Reader(inputfile, shapeType = 5)

    try: 

        combined = combine_shapes(r.shapes(), verbose = vverbose)

    except:

        print('error: unable to combine shapes')
        raise

    # create the new file with the merged shapes

    w = Writer(shapeType = 5)

    w.poly(shapeType = 5, parts = [combined])

    # copy the fields from the original and then the first record; note this
    # can be adapted as needed

    for field in r.fields: w.field(*field)
    w.record(*r.record(0))

    w.save(outputfile)

    if verbose: 

        its = inputfile, outputfile
        print('successfully combined shapes from {} to {}\n'.format(*its))
开发者ID:djibi2,项目名称:PyHSPF,代码行数:55,代码来源:vectorutils.py


示例7: extract_raw

def extract_raw(source, destination, HUC8, plot = True, save = True, 
                verbose = True):
    """Extracts the grid data for the HUC8."""

    # make a new directory for the HUC8

    d = '{}/{}/NRCM'.format(destination, HUC8)

    if not os.path.isdir(d): os.mkdir(d)

    # make a "raw directory" for the unaltered info

    raw = '{}/raw'.format(d)

    if not os.path.isdir(raw): 
        os.mkdir(raw)
        if verbose: print('extracting NRCM predictions...\n')

    # use the boundary file to find the bounding box for the grid points

    boundaryfile = '{0}/{1}/{1}boundaries'.format(destination, HUC8)
    subbasinfile = '{0}/{1}/{1}subbasins'.format(destination, HUC8)
    space        = 0.1

    sf = Reader(boundaryfile)

    bbox = get_boundaries(sf.shapes(), space = space)

    xmin, ymin, xmax, ymax = bbox

    if verbose and not os.path.isdir(raw): 
        print('bounding box =', xmin, ymin, xmax, ymax, '\n')

    lats, lons = [], []
    for f in os.listdir(source):
        i = f.index('_')
        lon = float(f[:i])
        lat = float(f[i+1:])

        if inside_box([xmin, ymin], [xmax, ymax], [lon, lat]):
            lats.append(lat)
            lons.append(lon)

            if not os.path.isfile('{}/{}'.format(raw, f)):
                shutil.copy('{}/{}'.format(source, f), '{}/{}'.format(raw, f))
            
    if plot: 
        if save: output = '{}/gridpoints'.format(d)
        else:    output = None
        if not os.path.isfile(output):
            plot_NRCM(lons, lats, bfile = boundaryfile, sfile = subbasinfile,
                      output = output, show = False)
开发者ID:djibi2,项目名称:PyHSPF,代码行数:52,代码来源:extract_timeseries.py


示例8: get_comids

    def get_comids(self, flowlinefile):
        """Finds the comids from the flowline file."""

        # open the file

        shapefile = Reader(flowlinefile)

        # find the index of the comids

        comid_index = shapefile.fields.index(['COMID', 'N', 9,  0]) - 1

        # make a list of the comids

        comids = [r[comid_index] for r in shapefile.records()]

        return comids
开发者ID:geclark330,项目名称:PyHSPF,代码行数:16,代码来源:nhdplusextractor.py


示例9: get_wdb_boundaries

def get_wdb_boundaries(resolution,level,rivers=False):
    polymeta = []; polybounds = []
    if rivers:
        filename = os.path.join(GSHHS_DIR, 'WDBII_shp', resolution,
                            'WDBII_river_{}_L{:02}'.format(resolution, level))
    else:
        filename = os.path.join(GSHHS_DIR, 'WDBII_shp', resolution,
                            'WDBII_border_{}_L{}'.format(resolution, level))
    print filename
    shf = Reader(filename)
    fields = shf.fields
    for shprec in shf.shapeRecords():
        shp = shprec.shape; rec = shprec.record
        parts = shp.parts.tolist()
        if parts != [0]:
            print 'multipart polygon'
            raise SystemExit

        verts = shp.points
        # Detect degenerate lines that are actually points...
        if len(verts) == 2 and np.allclose(verts[0], verts[1]):
            print 'Skipping degenerate line...'
            continue

        lons, lats = list(zip(*verts))
        north = max(lats); south = min(lats)
        attdict={}
        for r,key in zip(rec,fields[1:]):
            attdict[key[0]]=r
        area = -1
        poly_id = attdict['id']
        b = np.empty((len(lons),2),np.float32)
        b[:,0] = lons; b[:,1] = lats

        if not rivers:
            b = interpolate_long_segments(b, resolution)

        if lsd is not None:
            b = quantize(b,lsd)

        polymeta.append([-1,-1,south,north,len(b),poly_id])
        polybounds.append(b)

    return polybounds, polymeta
开发者ID:jenshnielsen,项目名称:basemap,代码行数:44,代码来源:readboundaries_shp.py


示例10: _convert_csv_to_dbf

def _convert_csv_to_dbf(input_file, output_file, mapping_file=None,
                        mapping_from=None, mapping_to=None, print_data_dict=False):
    if output_file is None:
        name = new_file_ending(input_file.name, '.dbf')
        output_file = open(name, 'w')

    if mapping_file:
        #Read this file and map it
        try:
            from shapefile import Reader
        except ImportError:
            print "pyshp required for mapping feature"
            raise
        dbfr = Reader(dbf=mapping_file)
        #find field thath as the mapping_from name
        #use -1 because pyshp adds a column for flagging deleted fields
        name_i = _find_field_index_dbf(dbfr.fields, mapping_from) - 1
        map_values = [rec[name_i] for rec in dbfr.iterRecords()]

    # Parse the csv.
    parser = csv_parser(handle=input_file)
    header, fieldspecs, records = parser.parse()
    if mapping_file:
        csv_name_i = header.index(mapping_to)
        #be conservative and make sure they match
        if len(records) != len(map_values):
            raise Exception('mapping records lengths must match')
        #reorder the records so they match the original
        #This will reaise an error if something does not map
        mapped_records = [None]*len(map_values)
        for i in xrange(len(map_values)):
            mv = map_values[i]
            try:
                old_i = collect(records, csv_name_i).index(mv)
            except ValueError:
                raise ValueError('Could not find record name %s in csv' % mv)
            mapped_records[i] = records[old_i]
        records = mapped_records

    # Write to dbf.
    dbfwriter(output_file, header, fieldspecs, records)

    if print_data_dict:
        parser.write_dd(input_file.name, output_file)
开发者ID:charleslaw,项目名称:census2dbf,代码行数:44,代码来源:census2dbf.py


示例11: get_coast_polygons

def get_coast_polygons(resolution):
    polymeta = []
    polybounds = []
    for level in [1, 2, 3, 4]:
        filename = "GSHHS_shp/%s/GSHHS_%s_L%s" % (resolution, resolution, level)
        # filename = 'WDBII_shp/%s/WDBII_border_%s_L%s' % (resolution, resolution, level)
        print filename
        shf = Reader(filename)
        fields = shf.fields
        try:
            shf.shapeRecords()
        except:
            continue
        for shprec in shf.shapeRecords():
            shp = shprec.shape
            rec = shprec.record
            parts = shp.parts.tolist()
            if parts != [0]:
                print "multipart polygon"
                raise SystemExit
            verts = shp.points
            lons, lats = list(zip(*verts))
            north = max(lats)
            south = min(lats)
            attdict = {}
            for r, key in zip(rec, fields[1:]):
                attdict[key[0]] = r
            area = attdict["area"]
            id = attdict["id"]
            polymeta.append([level, area, south, north, len(lons), id])
            b = np.empty((len(lons), 2), np.float32)
            b[:, 0] = lons
            b[:, 1] = lats
            if lsd is not None:
                b = quantize(b, lsd)
            polybounds.append(b)
    return polybounds, polymeta
开发者ID:jdkloe,项目名称:basemap,代码行数:37,代码来源:readboundaries_shp.py


示例12: plot_NRCM

def plot_NRCM(lons, lats, bfile = None, sfile = None, space = 0.05,
              show = False, output = None):

    fig = pyplot.figure()

    sub = fig.add_subplot(111, aspect = 'equal')
    sub.set_title('Nested Regional Climate Model Grid Points')
    sub.scatter(lons, lats, marker = '+', c = 'r', s = 40)

    if bfile is not None:
        
        sf = Reader(bfile)
        boundary = sf.shape(0).points
        sub.add_patch(make_patch(boundary, (1, 0, 0, 0), width = 1.2))

    if sfile is not None:
        
        sf = Reader(sfile)

        for s in sf.shapes():
            boundary = s.points
            sub.add_patch(make_patch(boundary, (1, 0, 0, 0), width = 0.2))

    sub.set_xlabel('Longitude, Decimal Degrees', size = 13)
    sub.set_ylabel('Latitude, Decimal Degrees', size = 13)

    xmin, ymin, xmax, ymax = get_boundaries(sf.shapes(), space = space)

    pyplot.xlim([xmin, xmax])
    pyplot.ylim([ymin, ymax])

    if output is not None: pyplot.savefig(output)

    if show: pyplot.show()

    pyplot.clf()
    pyplot.close()
开发者ID:djibi2,项目名称:PyHSPF,代码行数:37,代码来源:extract_timeseries.py


示例13: extract_HUC8

    def extract_HUC8(self, HUC8, output, gagefile = 'gagestations', 
                     verbose = True):
        """Extracts the USGS gage stations for a watershed from the gage 
        station shapefile into a shapefile for the 8-digit hydrologic unit 
        code of interest. 
        """

        # make sure the metadata exist locally

        self.download_metadata()

        # make sure the output destination exists

        if not os.path.isdir(output): os.mkdir(output)

        sfile = '{}/{}'.format(output, gagefile)
        if not os.path.isfile(sfile + '.shp'):

            # copy the projection

            shutil.copy(self.NWIS + '.prj', sfile + '.prj')

            # read the file

            gagereader  = Reader(self.NWIS, shapeType = 1)
            gagerecords = gagereader.records()

            # pull out the HUC8 record to parse the dataset

            HUC8_index  = gagereader.fields.index(['HUC',  'C', 8, 0]) - 1

            # iterate through the field and find gages in the watershed

            its = HUC8, sfile
            print('extracting gage stations in {} to {}\n'.format(*its))

            gage_indices = []

            i = 0
            for record in gagerecords:
                if record[HUC8_index] == HUC8: gage_indices.append(i)
                i+=1

            # write the data from the HUC8 to a new shapefile

            w = Writer(shapeType = 1)

            for field in gagereader.fields:  w.field(*field)

            for i in gage_indices:
                point = gagereader.shape(i).points[0]
                w.point(*point)
                w.record(*gagerecords[i])

            w.save(sfile)

            if verbose: 
                print('successfully extracted NWIS gage stations\n')

        elif verbose: 

            print('gage station file {} exists\n'.format(sfile))

        self.set_metadata(sfile)
开发者ID:kbrannan,项目名称:PyHSPF,代码行数:64,代码来源:nwisextractor.py


示例14: average

# few more stations

space = 0.5

# download/set the location of the data using the "download_shapefile" method

processor.download_shapefile(filename, start, end, output,
                             datasets = ['precip3240'], space = 0.5)

# the ClimateProcessor's aggregate method can be used with inverse-distance
# weighted average (IDWA) to interpolate between the stations at a given point
# using the "method," "latitude," and "longitude" keyword arguments. the
# result is the same as the previous example. as before, the subbasin_catchments
# shapefile will be used that contains the centroid for each aggregation.

sf = Reader(filename)

# index of the comid, latitude, and longitude records

comid_index = [f[0] for f in sf.fields].index('ComID') - 1
lon_index   = [f[0] for f in sf.fields].index('CenX')  - 1
lat_index   = [f[0] for f in sf.fields].index('CenY')  - 1

# iterate through the shapefile records and aggregate the timeseries

for i in range(len(sf.records())):

    record = sf.record(i)
    comid  = record[comid_index]
    lon    = record[lon_index]
    lat    = record[lat_index]
开发者ID:djlampert,项目名称:PyHSPF,代码行数:31,代码来源:climateprocessor08.py


示例15: extract_bbox

    def extract_bbox(self, bbox, output, verbose = True):
        """Extracts the NID dam locations for a watershed from the dam 
        shapefile and the 8-digit hydrologic unit code of interest. 
        """

        self.download_compressed()

        xmin, ymin, xmax, ymax = bbox

        # copy the projection files

        if verbose: print('copying the projections from the NID source\n')

        projection = self.source + '.prj'

        shutil.copy(projection, output + '.prj')

        # get the dams within the watershed

        if verbose: print('reading the dam file\n')

        sf = Reader(self.source, shapeType = 1)

        # work around for issues with pyshp

        damrecords   = []
        for i in range(len(sf.shapes())):
            try: damrecords.append(sf.record(i))
            except: damrecords.append([-100 for i in range(len(sf.fields))])

        name_index  = sf.fields.index(['DAM_NAME',   'C', 65,   0]) - 1
        nid_index   = sf.fields.index(['NIDID',      'C', 7,    0]) - 1
        long_index  = sf.fields.index(['LONGITUDE',  'N', 19,  11]) - 1
        lat_index   = sf.fields.index(['LATITUDE',   'N', 19,  11]) - 1
        river_index = sf.fields.index(['RIVER',      'C', 65,   0]) - 1
        owner_index = sf.fields.index(['OWN_NAME',   'C', 65,   0]) - 1
        type_index  = sf.fields.index(['DAM_TYPE',   'C', 10,   0]) - 1
        purp_index  = sf.fields.index(['PURPOSES',   'C', 254,  0]) - 1
        year_index  = sf.fields.index(['YR_COMPL',   'C', 10,   0]) - 1
        high_index  = sf.fields.index(['NID_HEIGHT', 'N', 19,  11]) - 1
        mstor_index = sf.fields.index(['MAX_STOR',   'N', 19,  11]) - 1
        nstor_index = sf.fields.index(['NORMAL_STO', 'N', 19,  11]) - 1
        area_index  = sf.fields.index(['SURF_AREA',  'N', 19,  11]) - 1

        # iterate through the fields and determine which points are in the box

        if verbose: print('extracting dams into new file\n')

        dam_indices = []

        i = 0
        for record in damrecords:

            lat = record[lat_index]
            lon = record[long_index]

            if self.inside_box([xmin, ymin], [xmax, ymax], [lon, lat]):
                dam_indices.append(i)
            i+=1

        # write the data from the bbox to a new shapefile

        w = Writer(shapeType = 1)

        for field in sf.fields:  w.field(*field)

        for i in dam_indices:
            point = sf.shape(i).points[0]
            w.point(*point)

            values = damrecords[i]

            rs = []

            for value in values:

                if isinstance(value, bytes): value = value.decode('utf-8')
                rs.append(value)

            w.record(*rs)

        w.save(output)

        if verbose: 

            print('successfully extracted NID dam locations to new file\n')
开发者ID:eotp,项目名称:PyHSPF,代码行数:86,代码来源:nidextractor.py


示例16: climate


#.........这里部分代码省略.........
                data = station.make_timeseries('evaporation', s, e)

                # ignore datasets with no observations during the period

                observations = [v for v in data if v is not None]

                if len(observations) > 0: evapstations.append(k)

        # aggregate the hourly NSRDB metstat data

        hsolar = '{}/solar'.format(hourly)
        if not os.path.isfile(hsolar):
            ts = s, 60, climateprocessor.aggregate('NSRDB', 'metstat', s, e)
            with open(hsolar, 'wb') as f: pickle.dump(ts, f)
            
        # aggregate the hourly solar to daily

        dsolar = '{}/solar'.format(daily)
        if not os.path.isfile(dsolar):

            with open(hsolar, 'rb') as f: t, tstep, data = pickle.load(f)
            ts = s, 1440, [sum(data[i:i+24]) / 24 
                           for i in range(0, 24 * (e-s).days, 24)]

            with open(dsolar, 'wb') as f: pickle.dump(ts, f)

        # aggregate the hourly precipitation for each subbasin using IDWA

        precip = '{}/hourlyprecipitation'.format(climatedata)
        if not os.path.isdir(precip): os.mkdir(precip)

        # use the subbasin shapefile to get the location of the centroids

        sf = Reader(subbasinfile)

        # index of the comid, latitude, and longitude records

        comid_index = [f[0] for f in sf.fields].index('ComID') - 1
        lon_index   = [f[0] for f in sf.fields].index('CenX')  - 1
        lat_index   = [f[0] for f in sf.fields].index('CenY')  - 1
        elev_index  = [f[0] for f in sf.fields].index('AvgElevM') - 1
        area_index  = [f[0] for f in sf.fields].index('AreaSqKm') - 1

        # iterate through the shapefile records and aggregate the timeseries

        for i in range(len(sf.records())):

            record = sf.record(i)
            comid  = record[comid_index]
            lon    = record[lon_index]
            lat    = record[lat_index]

            # check if the aggregated time series exists or calculate it

            subbasinprecip = '{}/{}'.format(precip, comid)
            if not os.path.isfile(subbasinprecip):

                if verbose:
                    i = comid, lon, lat
                    print('aggregating timeseries for comid ' +
                          '{} at {}, {}\n'.format(*i))

                p = climateprocessor.aggregate('precip3240', 'precip', s, e,
                                               method = 'IDWA', 
                                               longitude = lon,
                                               latitude = lat)
开发者ID:eotp,项目名称:PyHSPF,代码行数:67,代码来源:preprocessor.py


示例17: build_watershed

    def build_watershed(self,
                        subbasinfile, 
                        flowfile, 
                        outletfile, 
                        damfile, 
                        gagefile,
                        landfiles, 
                        VAAfile, 
                        years, 
                        HUC8, 
                        filename,
                        plotname = None,
                        ):

        # create a dictionary to store subbasin data

        subbasins = {}

        # create a dictionary to keep track of subbasin inlets

        inlets = {}

        # read in the flow plane data into an instance of the FlowPlane class

        sf = Reader(subbasinfile, shapeType = 5)

        comid_index = sf.fields.index(['ComID',      'N',  9, 0]) - 1
        len_index   = sf.fields.index(['PlaneLenM',  'N',  8, 2]) - 1
        slope_index = sf.fields.index(['PlaneSlope', 'N',  9, 6]) - 1
        area_index  = sf.fields.index(['AreaSqKm',   'N', 10, 2]) - 1
        cx_index    = sf.fields.index(['CenX',       'N', 12, 6]) - 1
        cy_index    = sf.fields.index(['CenY',       'N', 12, 6]) - 1
        elev_index  = sf.fields.index(['AvgElevM',   'N',  8, 2]) - 1

        for record in sf.records():
            comid     = '{}'.format(record[comid_index])
            length    = record[len_index]
            slope     = record[slope_index]
            tot_area  = record[area_index]
            centroid  = [record[cx_index], record[cy_index]]
            elevation = record[elev_index]

            subbasin  = Subbasin(comid)
            subbasin.add_flowplane(length, slope, centroid, elevation)

            subbasins[comid] = subbasin

        # read in the flowline data to an instance of the Reach class

        sf = Reader(flowfile)

        outcomid_index   = sf.fields.index(['OutComID',   'N',  9, 0]) - 1
        gnis_index       = sf.fields.index(['GNIS_NAME',  'C', 65, 0]) - 1
        reach_index      = sf.fields.index(['REACHCODE',  'C',  8, 0]) - 1
        incomid_index    = sf.fields.index(['InletComID', 'N',  9, 0]) - 1
        maxelev_index    = sf.fields.index(['MaxElev',    'N',  9, 2]) - 1
        minelev_index    = sf.fields.index(['MinElev',    'N',  9, 2]) - 1
        slopelen_index   = sf.fields.index(['SlopeLenKM', 'N',  6, 2]) - 1
        slope_index      = sf.fields.index(['Slope',      'N',  8, 5]) - 1
        inflow_index     = sf.fields.index(['InFlowCFS',  'N',  8, 3]) - 1
        outflow_index    = sf.fields.index(['OutFlowCFS', 'N',  8, 3]) - 1
        velocity_index   = sf.fields.index(['VelFPS',     'N',  7, 4]) - 1
        traveltime_index = sf.fields.index(['TravTimeHR', 'N',  8, 2]) - 1

        for record in sf.records():

            outcomid   = '{}'.format(record[outcomid_index])
            gnis       = record[gnis_index]
            reach      = record[reach_index]
            incomid    = '{}'.format(record[incomid_index])
            maxelev    = record[maxelev_index] / 100
            minelev    = record[minelev_index] / 100
            slopelen   = record[slopelen_index]
            slope      = record[slope_index]
            inflow     = record[inflow_index]
            outflow    = record[outflow_index]
            velocity   = record[velocity_index]
            traveltime = record[traveltime_index]

            if isinstance(gnis, bytes): gnis = ''

            subbasin = subbasins[outcomid]

            flow = (inflow + outflow) / 2
            subbasin.add_reach(gnis, maxelev, minelev, slopelen, flow = flow, 
                               velocity = velocity, traveltime = traveltime)
            inlets[outcomid] = incomid

        # open up the outlet file and see if the subbasin has a gage or dam

        sf = Reader(outletfile)

        records = sf.records()

        comid_index = sf.fields.index(['COMID',   'N',  9, 0]) - 1
        nid_index   = sf.fields.index(['NIDID',   'C',  7, 0]) - 1
        nwis_index  = sf.fields.index(['SITE_NO', 'C', 15, 0]) - 1

        nids = {'{}'.format(r[comid_index]):r[nid_index] for r in records 
                if isinstance(r[nid_index], str)}
#.........这里部分代码省略.........
开发者ID:eotp,项目名称:PyHSPF,代码行数:101,代码来源:preprocessor.py


示例18: plot_climate

def plot_climate(HUC8, sfile, bfile, pfile = None, efile = None, tfile = None, 
                 snowfile = None, centroids = True, radius = None, 
                 patchcolor = None, solarfile = None, windfile = None,
                 output = None, show = False, verbose = True):
    """Makes a plot of all the hourly precipitation stations of a watershed
    defined by "bfile" with subbasin defined by "sfile" from the source 
    precipitation shapefile "pfile"."""


    if verbose: 
        print('generating plot of watershed %s NCDC stations\n' % HUC8)

    fig = pyplot.figure()
    subplot = fig.add_subplot(111, aspect = 'equal')
    subplot.tick_params(axis = 'both', which = 'major', labelsize = 10)

    # add the title

    description = 'Climate Data Stations'
    title = 'Cataloging Unit %s\n%s' % (HUC8, description)
    subplot.set_title(title, fontsize = 14)

    # open up and show the catchments

    if patchcolor is None: facecolor = (1,0,0,0.)
    else:                  facecolor = patchcolor

    b = Reader(bfile, shapeType = 5)

    points = np.array(b.shape(0).points)
    subplot.add_patch(make_patch(points, facecolor = facecolor, width = 1.))

    extent = get_boundaries(b.shapes(), space = 0.02)

    xmin, ymin, xmax, ymax = extent

    # add the subbasin file

    s = Reader(sfile, shapeType = 5)

    # make patches of the subbasins

    for i in range(len(s.records())):
        shape = s.shape(i)
        points = np.array(shape.points)
        subplot.add_patch(make_patch(points, facecolor, width = 0.15))

    plots = [] # keep track of the scatterplots
    names = [] # keep track of names for the legend
        
    # add the subbasin centroids

    if centroids:

        cx_index = s.fields.index(['CenX', 'N', 12, 6]) - 1
        cy_index = s.fields.index(['CenY', 'N', 12, 6]) - 1

        centroids = [[r[cx_index], r[cy_index]] for r in s.records()]
        xs, ys = zip(*centroids)
        cplot = subplot.scatter(xs, ys, marker = '+', c = 'pink', s = 15)
        plots.append(cplot)
        names.append('Centroids')

    # add a circle showing around subbasin "radius" showing the gages within
    # the radius for a given subbasin

    if radius is not None:

        comid_index = s.fields.index(['ComID',    'N',  9, 0]) - 1
        cx_index    = s.fields.index(['CenX',     'N', 12, 6]) - 1
        cy_index    = s.fields.index(['CenY',     'N', 12, 6]) - 1
        area_index  = s.fields.index(['AreaSqKm', 'N', 10, 2]) - 1

        comids = ['{}'.format(r[comid_index]) for r in s.records()]
        cxs    = [r[cx_index]    for r in s.records()]
        cys    = [r[cy_index]    for r in s.records()]
        areas  = [r[area_index]  for r in s.records()]

        try: i = comids.index(radius)
        except: i = 0
        
        c = [cxs[i], cys[i]]

        radii = [math.sqrt(a / math.pi) for a in areas]

        # scale kms to degrees

        km  = get_distance([xmin, ymin], [xmax, ymax])
        deg = math.sqrt((xmin - xmax)**2 + (ymax - ymin)**2)

        r = sum(radii) / len(radii) * deg / km * 5

        circle = pyplot.Circle(c, radius = r, edgecolor = 'black', 
                               facecolor = 'yellow', alpha = 0.5) 
        subplot.add_patch(circle)
        subplot.scatter(c[0], c[1], marker = '+', c = 'black')

    # add the precipitation gage points

    if pfile is not None:
#.........这里部分代码省略.........
开发者ID:djibi2,项目名称:PyHSPF,代码行数:101,代码来源:gisplots.py


示例19: cos

    cfpu = f.variables["fpu"][:]
    for c in ["maize", "wheat", "soy", "rice"]:
        careas[c] = f.variables["area_" + c][:]

# find valid fpus
tarea = 100 * (111.2 / 2) ** 2 * cos(pi * lats / 180)
tarea = resize(tarea, (nlons, nlats)).T
validfpus = []
for i in range(nfpu):
    hareafpu = harea[fpumap == fpu[i]].sum()
    tareafpu = tarea[fpumap == fpu[i]].sum()
    if hareafpu / tareafpu > percent / 100.0:
        validfpus.append(fpu[i])

# load shape file
r = Reader(shapefile)
shapes = r.shapes()
records = r.records()

models = ["epic", "gepic", "lpj-guess", "lpjml", "pdssat", "pegasus"]  # exclude image
gcms = ["gfdl-esm2m", "hadgem2-es", "ipsl-cm5a-lr", "miroc-esm-chem", "noresm1-m"]
crops = ["maize", "wheat", "soy", "rice"] if crop == "all" else [crop]
co2s = ["co2", "noco2"]

hadgemidx = gcms.index("hadgem2-es")

nm, ng, ncr, nco2 = len(models), len(gcms), len(crops), len(co2s)

# variables
sh = (nm, ng, ncr, 3, nfpu, nco2)
dy26arr = masked_array(zeros(sh), mask=ones(sh))
开发者ID:RDCEP,项目名称:ggcmi,代码行数:31,代码来源:blmap.isi1.py


示例20: merge_shapes

def merge_shapes(inputfile, outputfile = None, overwrite = False, 
                 verbose = True, vverbose = False):
    """Merges all the shapes in a shapefile into a single shape."""

    if outputfile is None: output = '{}/merged'.format(os.getcwd())

    if os.path.isfile(outputfile + '.shp') and not overwrite:
        if verbose: print('combined watershed shapefile %s exists' % outputfile)
        return
   
    if verbose: print('combining shapes from {}\n'.format(inputfile) + 
                      'this may take a while...\n')

    # start by copying the projection files

    shutil.copy(inputfile + '.prj', outputfile + '.prj')

    # load the catchment and flowline shapefiles

    r = Reader(inputfile, shapeType = 5)
    n = len(r.records())

    try: 
        shapes  = []
        records = [] 
        bboxes  = []

        for i in range(n):
            shape = r.shape(i)
            record = r.record(i)

            shape_list = format_shape(shape.points)

            for sh in shape_list:
           

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python affinity.rotate函数代码示例发布时间:2022-05-27
下一篇:
Python shape.Shape类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap