• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python utils.write函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中utils.write函数的典型用法代码示例。如果您正苦于以下问题:Python write函数的具体用法?Python write怎么用?Python write使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了write函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: trade_reciprocity

def trade_reciprocity(years,resource):
  corrmeans = []
  for year in years:
    G = get_graph(year,resource)
    corrcoeffs = []
    [xs,ys] = [[],[]]
    for country in G.nodes():
      for e in G.edges(country):
        try:
          [x1,y1] = [G[e[0]][e[1]],G[e[1]][e[0]]]
          #print [x1,y1]
          xs.append(x1['weight'])
          ys.append(y1['weight'])
        except KeyError:
          'whoops'
    if len(xs)>1:
      cc = np.corrcoef([xs,ys])
      corrcoeffs.append(cc[0][1])
    #print corrcoeffs
    corrmeans.append(np.mean(corrcoeffs))
    print [year,np.mean(corrcoeffs)]
  write({'means':corrmeans, 'years':years},get_results_directory(resource),'meanReciprocityCorrelation')
  plt.clf()
  plt.plot(years,corrmeans)
  plt.title('Mean Correlation of Import/Export By Year')
  plt.xlabel('Year')
  plt.ylabel('Mean Correlation of Import/Export')
  directory = get_images_directory(resource)
  plt.savefig(directory+'meanReciprocityCorrelation.png')
  plt.clf()
  return 0
开发者ID:hoqqanen,项目名称:itn,代码行数:31,代码来源:networkExplorer.py


示例2: write_bill_catoxml

def write_bill_catoxml(bill_version_id, options):
  utils.write(
    extract_xml_from_json(fetch_single_bill_json(bill_version_id)),
    document_filename_for(bill_version_id, "catoxml.xml")
  )

  return {'ok': True, 'saved': True}
开发者ID:JT5D,项目名称:congress,代码行数:7,代码来源:deepbills.py


示例3: panel

    def panel(self) :
        """
        Set up the side panel
        """

        self.disp.blit(IMG_SIDEPANEL_BG, (16*SQUARE, 16, 450, 496))
        self.btn_traps = []
        x = 16*SQUARE+65
        y = 20
        for i in TRAPS :
            self.disp.blit(i[0], (x,y))

            name = utils.write(i[1], BLACK)
            price = utils.write(str(i[3]), GRAY)
            lines = utils.formattext(i[2], 35, BLACK, 15)

            self.disp.blit(name, (x+40,y+2))
            self.disp.blit(price, (x+275, y+2))

            i = 20
            for l in lines :
                self.disp.blit(l, (x+40, y+i))
                i += 15

            self.btn_traps.append( (x, y, 330, i+5) )

            y += 75

        self.disp.blit(IMG_LEVEL, RECT_LEVEL)
        self.disp.blit(IMG_MONEY, RECT_MONEY)
        self.disp.blit(IMG_LAB_QUIT, BTN_LAB_QUIT)
        self.disp.blit(IMG_LAB_START, BTN_LAB_START)

        self.updatepanel()
开发者ID:megyland,项目名称:projetisn,代码行数:34,代码来源:Lab.py


示例4: update_bill_version_list

def update_bill_version_list(only_congress):
    bill_versions = {}

    # Which sitemap years should we look at?
    if not only_congress:
        sitemap_files = glob.glob(utils.cache_dir() + "/fdsys/sitemap/*/BILLS.xml")
    else:
        # If --congress=X is specified, only look at the relevant years.
        sitemap_files = [
            utils.cache_dir() + "/fdsys/sitemap/" + str(year) + "/BILLS.xml"
            for year in utils.get_congress_years(only_congress)
        ]
        sitemap_files = [f for f in sitemap_files if os.path.exists(f)]

    # For each year-by-year BILLS sitemap...
    for year_sitemap in sitemap_files:
        dom = etree.parse(year_sitemap).getroot()
        if dom.tag != "{http://www.sitemaps.org/schemas/sitemap/0.9}urlset":
            raise Exception("Mismatched sitemap type.")

        # Loop through each bill text version...
        for file_node in dom.xpath("x:url", namespaces=ns):
            # get URL and last modified date
            url = str(file_node.xpath("string(x:loc)", namespaces=ns))
            lastmod = str(file_node.xpath("string(x:lastmod)", namespaces=ns))

            # extract bill congress, type, number, and version from the URL
            m = re.match(r"http://www.gpo.gov/fdsys/pkg/BILLS-(\d+)([a-z]+)(\d+)(\D.*)/content-detail.html", url)
            if not m:
                raise Exception("Unmatched bill document URL: " + url)
            congress, bill_type, bill_number, version_code = m.groups()
            congress = int(congress)
            if bill_type not in utils.thomas_types:
                raise Exception("Invalid bill type: " + url)

            # If --congress=XXX is specified, only look at those bills.
            if only_congress and congress != only_congress:
                continue

            # Track the documents by congress, bill type, etc.
            bill_versions.setdefault(congress, {}).setdefault(bill_type, {}).setdefault(bill_number, {})[
                version_code
            ] = {"url": url, "lastmod": lastmod}

    # Output the bill version info. We can't do this until the end because we need to get
    # the complete list of versions for a bill before we write the file, and the versions
    # may be split across multiple sitemap files.

    for congress in bill_versions:
        for bill_type in bill_versions[congress]:
            for bill_number in bill_versions[congress][bill_type]:
                utils.write(
                    json.dumps(
                        bill_versions[congress][bill_type][bill_number],
                        sort_keys=True,
                        indent=2,
                        default=utils.format_datetime,
                    ),
                    output_for_bill(congress, bill_type, bill_number, "text-versions.json"),
                )
开发者ID:milimetric,项目名称:congress,代码行数:60,代码来源:fdsys.py


示例5: split_signatures

def split_signatures(pid, signatures=None):
    if not signatures:
        signatures = json.load(open(os.getcwd() + "/data/api/signatures/" + pid + ".json", "r"))
        
    for signature in signatures:
        signature['date'] = datetime.datetime.fromtimestamp(signature['created']).strftime("%y-%m-%d")
        signature['time'] = datetime.datetime.fromtimestamp(signature['created']).strftime("%H:%M:%S")
        #rm this needless field
        if signature['type'] == "signature":
            signature.pop("type")

    dates = sorted(set(map(lambda x:x['date'], signatures)))
    mostrecent = max([x['created'] for x in signatures])
    
    stats = {
        'total': len(signatures),
        'dates': [],
        'last': datetime.datetime.fromtimestamp(mostrecent).strftime("%y-%m-%d"),
        'laststamp': mostrecent
    }
    
    for day in dates:
        sigs = [x for x in signatures if x['date'] == day]
        stats['dates'].append((day, len(sigs)))
        write(json.dumps(sigs), "api/signatures/" + pid + "/" + day + ".json")
        
    write(json.dumps(stats, indent=2), "api/signatures/" + pid + "/stats.json")
开发者ID:imclab,项目名称:petitions,代码行数:27,代码来源:whitehouse.py


示例6: write_report

def write_report(report):
  data_path = "%s/%s/%s/report.json" % (report['inspector'], report['year'], report['report_id'])
  utils.write(
    utils.json_for(report),
    "%s/%s" % (utils.data_dir(), data_path)
  )
  return data_path
开发者ID:MRumsey,项目名称:inspectors-general,代码行数:7,代码来源:inspector.py


示例7: write_bill_version_metadata

def write_bill_version_metadata(bill_version_id):
  bill_type, number, congress, version_code = utils.split_bill_version_id(bill_version_id)

  bill_version = {
    'bill_version_id': bill_version_id,
    'version_code': version_code,
    'urls': { },
  }

  mods_ns = {"mods": "http://www.loc.gov/mods/v3"}
  doc = etree.parse(document_filename_for(bill_version_id, "mods.xml"))
  locations = doc.xpath("//mods:location/mods:url", namespaces=mods_ns)

  for location in locations:
    label = location.attrib['displayLabel']
    if "HTML" in label:
      format = "html"
    elif "PDF" in label:
      format = "pdf"
    elif "XML" in label:
      format = "xml"
    else:
      format = "unknown"
    bill_version["urls"][format] = location.text

  bill_version["issued_on"] = doc.xpath("string(//mods:dateIssued)", namespaces=mods_ns)

  utils.write(
    json.dumps(bill_version, sort_keys=True, indent=2, default=utils.format_datetime), 
    output_for_bill_version(bill_version_id)
  )

  return {'ok': True, 'saved': True}
开发者ID:GPHemsley,项目名称:congress,代码行数:33,代码来源:bill_versions.py


示例8: run

def run(options):
  # Load the committee metadata from the congress-legislators repository and make a
  # mapping from thomas_id and house_id to the committee dict. For each committee,
  # replace the subcommittees list with a dict from thomas_id to the subcommittee.
  utils.require_congress_legislators_repo()
  committees = { }
  for c in utils.yaml_load("congress-legislators/committees-current.yaml"):
    committees[c["thomas_id"]] = c
    if "house_committee_id" in c: committees[c["house_committee_id"] + "00"] = c
    c["subcommittees"] = dict((s["thomas_id"], s) for s in c.get("subcommittees", []))

  for chamber in ("house", "senate"):
    # Load any existing meetings file so we can recycle GUIDs generated for Senate meetings.
    existing_meetings = []
    output_file = utils.data_dir() + "/committee_meetings_%s.json" % chamber
    if os.path.exists(output_file):
      existing_meetings = json.load(open(output_file))

    # Scrape for meeting info.
    if chamber == "senate":
      meetings = fetch_senate_committee_meetings(existing_meetings, committees, options)
    else:
      meetings = fetch_house_committee_meetings(existing_meetings, committees, options)

    # Write out.
    utils.write(json.dumps(meetings, sort_keys=True, indent=2, default=utils.format_datetime),
      output_file)
开发者ID:GPHemsley,项目名称:congress,代码行数:27,代码来源:committee_meetings.py


示例9: combine

def combine():
    roster = defaultdict(list)    
    total = [defaultdict(int) for x in range(segments)]
    starts = {}
    data = json.load(open("data/times/all.json", "r"))
    duds = 0
    co = 0
    for runner in data:
        #print runner["bib number"], runner["5K"]
        #see if he/she showed up
        if "5K" not in runner or not runner["5K"][1]:
            duds += 1
            continue
        co += 1
        if co % 100 == 0:
            print co
        #placement will represent which marker he/she was closest to at each interval
        placement = ["0" for x in range(segments)]
        #stamps is the timestamps scraped from BAA.org
        stamps = [runner[x][1] for x in posts]
        marker = 0

        #fill in placement with most recent split time (intervals of 5K + half and finish)
        for c in range(segments):
            if c > 0:
                placement[c] = placement[c - 1]
            if marker < len(posts) and stamps[marker] and stamps[marker] < c * INTERVAL:
                placement[c] = posts[marker]
                marker += 1

        placement = [int(x.replace("K", "").replace("Finish Net", "42").replace("HALF", "21")) for x in placement]
        #print placement
        #print runner["bib number"]
        
        #calculate interpolations between kilometer marks

        #start at appropriate place for offset in starting point
        c = int(round(runner["0K"] / INTERVAL))
        while c < len(placement):
            if placement[c] == placement[-1] or c >= len(placement) - 2:
                break
            t = 1
            while c+t < len(placement) and placement[c + t] == placement[c]:
                t += 1
            #print c, t, placement[c+t], placement[c]
            step = float(placement[c+t]-placement[c]) / t
            for i in range(1, t):
                placement[c + i] = int(math.floor(placement[c + i] + i * step))
            c += t

        #print placement
        key = "_".join([str(x) for x in placement])
        roster[key].append(runner["bib number"])

        for c in range(segments):
            total[c][placement[c]] += 1
        

    write(json.dumps(roster, indent=2), "times/condensed.json")
    write(json.dumps(total, indent=2), "times/condensed_time.json")
开发者ID:wilson428,项目名称:boston_marathon,代码行数:60,代码来源:analyze.py


示例10: main

def main():
    parser = argparse.ArgumentParser(description="Retrieve petitions from We The People")
    parser.add_argument("-m", "--max", metavar="INTEGER", dest="max", type=int, default=None,
                        help="maximum pages of petitions to retrieve, default is 10, 100 per page")
    parser.add_argument("-s", "--start", metavar="INTEGER", dest="start", type=int, default=1,
                        help="starting page, 100 per page, default is 1")
    parser.add_argument("-q", "--query", metavar="STRING", dest="query", type=str, default="whitehouse+petition",
                        help="The query for searching twitter for petition links, default is 'whitehouse+petition'")
    args = parser.parse_args()

    if args.max is not None and args.max < 1:
        parser.error("How can I scrape less than one pages of twitter results? You make no sense! --max must be one or greater.")

    if args.start < 1:
        parser.error("--start must be one or greater.")

    if not len(sys.argv) > 1:
        log('Running with default values. Use --h to see options.')

    search(args.query, args.start, args.max)

    #write log
    scrapelog["query"] = args.query
    scrapelog["end"] = datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
    write(json.dumps(scrapelog, indent=2), "log-tw-" + scrapelog["begin"] + ".json", log_dir())
    log("Done. Found total %i petitions" % (len(scrapelog["signatures"])))
开发者ID:imclab,项目名称:petitions,代码行数:26,代码来源:twitter.py


示例11: do

    def do(self):
        log_msg = 'Tagging: "%s" as "%s"' % (self._revision, self._name)
        opts = {}
        if self._message:
            opts['F'] = utils.tmp_filename('tag-message')
            utils.write(opts['F'], self._message)

        if self._sign:
            log_msg += ', GPG-signed'
            opts['s'] = True
            status, output = self.model.git.tag(self._name,
                                                self._revision,
                                                with_status=True,
                                                with_stderr=True,
                                                **opts)
        else:
            opts['a'] = bool(self._message)
            status, output = self.model.git.tag(self._name,
                                                self._revision,
                                                with_status=True,
                                                with_stderr=True,
                                                **opts)
        if 'F' in opts:
            os.unlink(opts['F'])

        if output:
            log_msg += '\nOutput:\n%s' % output

        _notifier.broadcast(signals.log_cmd, status, log_msg)
        if status == 0:
            self.model.update_status()
开发者ID:dannyfeng,项目名称:gitGUI,代码行数:31,代码来源:cmds.py


示例12: fetch_version

def fetch_version(bill_version_id, options):
  logging.info("\n[%s] Fetching..." % bill_version_id)
  
  bill_type, number, congress, version_code = utils.split_bill_version_id(bill_version_id)
  # bill_id = "%s%s-%s" % (bill_type, number, congress)

  mods_filename = filename_for(bill_version_id)
  mods_cache = version_cache_for(bill_version_id, "mods.xml")
  issued_on, urls = fdsys.document_info_for(mods_filename, mods_cache, options)
  
  bill_version = {
    'issued_on': issued_on,
    'urls': urls,
    'version_code': version_code,
    'bill_version_id': bill_version_id
  }

  # 'bill_version_id': bill_version_id,
  #   'version_code': version_code

  utils.write(
    json.dumps(bill_version, sort_keys=True, indent=2, default=utils.format_datetime), 
    output_for_bill_version(bill_version_id)
  )

  return {'ok': True, 'saved': True}
开发者ID:ArlingtonHouse,项目名称:congress,代码行数:26,代码来源:bill_versions.py


示例13: main

def main():
    parser = argparse.ArgumentParser(description="Retrieve petitions from We The People")
    parser.add_argument(
        "-m",
        "--max",
        metavar="INTEGER",
        dest="max",
        type=int,
        default=None,
        help="maximum number of petitions to retrieve",
    )
    parser.add_argument(
        "-s",
        "--start",
        metavar="INTEGER",
        dest="start",
        type=int,
        default=1,
        help="starting page, 20 per page, default is 1",
    )
    args = parser.parse_args()

    if args.max is not None and args.max < 1:
        parser.error("How can I scrape less than one petition? You make no sense! --max must be one or greater.")

    if args.start < 1:
        parser.error("--start must be one or greater.")

    log("Found %i petitions" % (petitions(args.start, args.max)))

    # write log
    scrapelog["end"] = datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
    write(json.dumps(scrapelog, indent=2), "log-wh-" + scrapelog["begin"] + ".json", log_dir())
开发者ID:pallih,项目名称:petitions,代码行数:33,代码来源:petitions.py


示例14: process

def process(inFile, outFile, targets, algo):
    capture = cv2.VideoCapture(inFile)
    retval, image = capture.read()
    locations = []
    if retval:
        writer = cv2.VideoWriter(outFile + ".avi", 
            fps=25,
            fourcc=cv2.cv.CV_FOURCC(*"DIVX"),
            frameSize=image.shape[0:2][::-1])
        algorithms = []
        for x in targets:
            algo.start(image, x)
            algorithms.append(algo)
            utils.drawTarget(image, algo.target)
        writer.write(image)

    w,h = image.shape[:2]
    while retval:       
        retval, image = capture.read()
        target = np.array(algo.target) / np.array([h, w, h, w], dtype=np.float32)
        locations.append(target)
        if retval:
            for algo in algorithms:
                algo.next(image)
                color = (255, 0, 0)
                if algo.valid:
                    color = (0, 255, 0)
                utils.drawTarget(image, algo.target, color)
            writer.write(image)

    utils.write(outFile + ".txt", inFile, locations)
开发者ID:snuderl,项目名称:VideoTracking,代码行数:31,代码来源:video.py


示例15: fetch_votes

def fetch_votes(session, rootdir):
    #get list of all votes from session from GovTrack
    votes = parse("http://www.govtrack.us/data/us/%s/rolls/" % session)

    for vote in [x for x in votes.xpath("//a/@href") if x[-4:] == ".xml"]:
        chamber = "house" if vote[0] == 'h' else "senate"
        url = "http://www.govtrack.us/data/us/%s/rolls/%s" % (session, vote)
        doc = download(url, session + "/" + vote)
        doc = doc.replace("&", "&amp;")
        try:
            markup = lxml.objectify.fromstring(doc)
        except Exception, e:
            print "Couldn't read", url
            print e
            continue
        data = {}
        data["rollcall"] = {}
        #walk through xml and collect key/value pairs
        for el in markup.getiterator():
            if el.attrib == {}:
                data[el.tag] = el.text
            elif el.tag == 'voter':
                data["rollcall"][el.attrib["id"]] = el.attrib["value"]
        print rootdir + "/data/json/%s/%s/%s.json" % (chamber, session, vote[:-4])
                
        write(json.dumps(data, indent=2), rootdir + "/data/json/%s/%s/%s.json" % (chamber, session, vote[:-4]))
开发者ID:cmiller8,项目名称:force_talk,代码行数:26,代码来源:fetch.py


示例16: __write_templates

 def __write_templates(self, project_name, dir_name):
     """
     Generate upstart and startup for project
     """
     upstart = self.__generate_project_template(project_name,'upstart_template')
     startup = self.__generate_project_template(project_name, 'startup_template')
     write('%s/upstart.conf' % dir_name, upstart)
     write('%s/startup.sh' % dir_name, startup)
开发者ID:jness,项目名称:monotool,代码行数:8,代码来源:monotool.py


示例17: output_nomination

def output_nomination(nomination, options):
    logging.info("[%s] Writing to disk..." % nomination['nomination_id'])

    # output JSON - so easy!
    utils.write(
        json.dumps(nomination, sort_keys=True, indent=2, default=utils.format_datetime),
        output_for_nomination(nomination['nomination_id'], "json")
    )
开发者ID:TTREN,项目名称:congress,代码行数:8,代码来源:nomination_info.py


示例18: write_bill_catoxml

def write_bill_catoxml(bill_version_id, options):
    catoxml_filename = catoxml_filename_for(bill_version_id)

    utils.write(
        extract_xml_from_json(fetch_single_bill_json(bill_version_id)),
        catoxml_filename
    )

    return {"ok": True, "saved": True}
开发者ID:GeorgeMcIntire,项目名称:congress,代码行数:9,代码来源:deepbills.py


示例19: get_sitemap

def get_sitemap(year, collection, lastmod, options):
  """Gets a single sitemap, downloading it if the sitemap has changed.
  
  Downloads the root sitemap (year==None, collection==None), or
  the sitemap for a year (collection==None), or the sitemap for
  a particular year and collection. Pass lastmod which is the current
  modification time of the file according to its parent sitemap, which
  is how it knows to return a cached copy.
  
  Returns the sitemap parsed into a DOM.
  """
  
  # Construct the URL and the path to where to cache the file on disk.
  if year == None:
    url = "http://www.gpo.gov/smap/fdsys/sitemap.xml"
    path = "fdsys/sitemap/sitemap.xml"
  elif collection == None:
    url = "http://www.gpo.gov/smap/fdsys/sitemap_%s/sitemap_%s.xml" % (year, year)
    path = "fdsys/sitemap/%s/sitemap.xml" % year
  else:
    url = "http://www.gpo.gov/smap/fdsys/sitemap_%s/%s_%s_sitemap.xml" % (year, year, collection)
    path = "fdsys/sitemap/%s/%s.xml" % (year, collection)
    
  # Should we re-download the file?
  lastmod_cache_file = utils.cache_dir() + "/" + path.replace(".xml", "-lastmod.txt")
  if options.get("cached", False):
    # If --cached is used, don't hit the network.
    force = False
  elif not lastmod:
    # No *current* lastmod date is known for this file (because it is the master
    # sitemap file, probably), so always download.
    force = True
  else:
    # If the file is out of date or --force is used, download the file.
    cache_lastmod = utils.read(lastmod_cache_file)
    force = (lastmod != cache_lastmod) or options.get("force", False)
    
  if force:
    logging.warn("Downloading: %s" % url)
    
  body = utils.download(url, path, utils.merge(options, {
    'force': force, 
    'binary': True
  }))
  
  if not body:
      raise Exception("Failed to download %s" % url)
      
  # Write the current last modified date to disk so we know the next time whether
  # we need to fetch the file.
  if lastmod and not options.get("cached", False):
    utils.write(lastmod, lastmod_cache_file)
  
  try:
    return etree.fromstring(body)
  except etree.XMLSyntaxError as e:
    raise Exception("XML syntax error in %s: %s" % (url, str(e)))
开发者ID:GPHemsley,项目名称:congress,代码行数:57,代码来源:fdsys.py


示例20: save_bill_search_state

def save_bill_search_state(saved_bills, search_state):
    # For --fast mode, cache the current search result listing (in search_state)
    # to disk so we can detect major changes to the bill through the search
    # listing rather than having to parse the bill.
    for bill_id in saved_bills:
        if bill_id in search_state:
            fast_cache_path = utils.cache_dir() + "/" + bill_info.bill_cache_for(bill_id, "search_result.html")
            new_state = search_state[bill_id]
            utils.write(new_state, fast_cache_path)
开发者ID:TTREN,项目名称:congress,代码行数:9,代码来源:bills.py



注:本文中的utils.write函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python utils.write_file函数代码示例发布时间:2022-05-26
下一篇:
Python utils.workflowInfo函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap