• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python settings.get_settings函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中settings.get_settings函数的典型用法代码示例。如果您正苦于以下问题:Python get_settings函数的具体用法?Python get_settings怎么用?Python get_settings使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了get_settings函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_userheader

def get_userheader():
    options = settings.get_settings()
    templates = []
    for fn in settings.get_settings().user_headers:
        filename = utils.which(fn, options.user_data_dirs)
        if filename:
            templates.append(USERHEADER_INFO % fn)
            templates.append(open(filename).read())
    return string.join(templates, '\n')
开发者ID:IamMomotaros,项目名称:grailbrowser,代码行数:9,代码来源:PSStream.py


示例2: validate_settings

def validate_settings(l=True, v=True):

    log_info("", l, v)
    log_info("Validate GFW-sync settings", l, v)
    #print ""
    #print "Validate GFW-sync settings"
    
    sets = settings.get_settings()
    errors = 0
    warnings = 0
    bucket = sets['folders']['default_bucket']
    bucket_drives = sets['bucket_drives']
    bucket = validate_bucket(bucket, bucket_drives, l, v)
    if not bucket:
        errors += 1

    default_srs = sets['spatial_references']['default_srs']
    default_srs = validate_srs(default_srs, l, v)
    if not default_srs:
        errors += 1

    gdb_srs = sets['spatial_references']['gdb_srs']
    gdb_srs = validate_srs(gdb_srs, l, v)
    if not gdb_srs:
        errors += 1

    return errors, warnings
开发者ID:CV-Gate,项目名称:gfw-sync,代码行数:27,代码来源:validate_ini.py


示例3: get_docs_from_SimpleDB

 def get_docs_from_SimpleDB(self, ENV = "dev", last_updated_since = None):
     """
     Get the array of docs from the SimpleDB provider
     """
     docs = []
 
     # Specify run environment settings
     settings = settingsLib.get_settings(ENV)
     
     db = dblib.SimpleDB(settings)
     db.connect()
     
     if(last_updated_since is not None):
         xml_item_list = db.elife_get_POA_delivery_S3_file_items(last_updated_since = last_updated_since)
     else:
         # Get all - not implemented for now to avoid mistakes running too many workflows
         pass
         
     for x in xml_item_list:
         tmp = {}
         name = x['name']
         tmp['document'] = name
         docs.append(tmp)
     
     return docs
开发者ID:IanMulvany,项目名称:elife-bot,代码行数:25,代码来源:starter_PackagePOA.py


示例4: main

def main():
    parser = OptionParser()
    parser.add_option("-e", "--env", default="dev", action="store", type="string", dest="env",
                      help="set the environment to run, either dev or live")
    (options, args) = parser.parse_args()
    if options.env:
        env = options.env

    global settings
    settings = settings_lib.get_settings(env)

    log_file = "process_dashboard_queue.log"
    global logger
    logger = log.logger(log_file, settings.log_level)

    # Simple connect
    queue = get_queue()

    pool = Pool(settings.event_queue_pool_size)

    while True:
        messages = queue.get_messages(num_messages=settings.event_queue_message_count, visibility_timeout=60,
                                      wait_time_seconds=20)
        if messages is not None:
            logger.info(str(len(messages)) + " message received")
            pool.map(process_message, messages)
        else:
            logger.info("No messages received")
开发者ID:jhroot,项目名称:ppp-dashboard,代码行数:28,代码来源:process_dashboard_queue.py


示例5: decide

def decide(ENV = "dev"):
	# Specify run environment settings
	settings = settingsLib.get_settings(ENV)
	
	# Decider event history length requested
	maximum_page_size = 100
	
	# Log
	identity = "decider_%s" % int(random.random() * 1000)
	logFile = "decider.log"
	#logFile = None
	logger = log.logger(logFile, settings.setLevel, identity)
	
	# Simple connect
	conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)

	token = None

	# Poll for a decision task
	while(True):
		if(token == None):
			logger.info('polling for decision...')
			
			decision = conn.poll_for_decision_task(settings.domain, settings.default_task_list, identity, maximum_page_size)
			
			# Check for a nextPageToken and keep polling until all events are pulled
			decision = get_all_paged_events(decision, conn, settings.domain, settings.default_task_list, identity, maximum_page_size)
			
			token = get_taskToken(decision)
			
			logger.info('got decision: [json omitted], token %s' % token)
			#logger.info('got decision: \n%s' % json.dumps(decision, sort_keys=True, indent=4))

			if(token != None):
				# Get the workflowType and attempt to do the work
				workflowType = get_workflowType(decision)
				if(workflowType != None):

					logger.info('workflowType: %s' % workflowType)

					# Instantiate and object for the workflow using eval
					# Build a string for the object name
					workflow_name = get_workflow_name(workflowType)
					
					# Attempt to import the module for the workflow
					if(import_workflow_class(workflow_name)):
						# Instantiate the workflow object
						workflow_object = get_workflow_object(workflow_name, settings, logger, conn, token, decision, maximum_page_size)
				
						# Process the workflow
						success = workflow_object.do_workflow()
						
						# Print the result to the log
						logger.info('%s success %s' % (workflow_name, success))
						
					else:
						logger.info('error: could not load object %s\n' % workflow_name)
						
		# Reset and loop
		token = None
开发者ID:IanMulvany,项目名称:elife-bot,代码行数:60,代码来源:decider.py


示例6: start

 def start(self, ENV = "dev", workflow = "S3Monitor"):
   # Specify run environment settings
   settings = settingsLib.get_settings(ENV)
   
   # Log
   identity = "starter_%s" % int(random.random() * 1000)
   logFile = "starter.log"
   #logFile = None
   logger = log.logger(logFile, settings.setLevel, identity)
   
   # Simple connect
   conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
   if(workflow):
     (workflow_id, workflow_name, workflow_version, child_policy, execution_start_to_close_timeout, input) = self.get_workflow_params(workflow, settings)
 
     logger.info('Starting workflow: %s' % workflow_id)
     try:
       response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, input)
 
       logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))
       
     except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
       # There is already a running workflow with that ID, cannot start another
       message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id
       print message
       logger.info(message)
开发者ID:imclab,项目名称:elife-bot,代码行数:26,代码来源:starter_S3Monitor.py


示例7: post

	def post( self, bot_name ):
		
		creds = twitter.get_twitter_creds( bot_name )

		if not self.authenticate_user( creds ):
			self.render_notloggedin()
		else:
			bot_settings = settings.get_settings( creds )
			bot_settings.learning_style = self.request.get( 'learnfrom' )
			bot_settings.learning_guru = self.request.get( 'guru_name' )
			bot_settings.locquacity_onschedule = self.request.get( 'locquacity_onschedule' ) == "true"
			bot_settings.locquacity_reply = self.request.get( 'locquacity_reply' ) == "true"
			bot_settings.locquacity_speakonnew = self.request.get( 'locquacity_speakonnew' ) == "true"
			bot_settings.learn_retrospectively = self.request.get( 'learn_retrospectively' ) == "true"

			gn = self.request.get( 'locquacity_greetnew' ) == "true"
			logging.debug( 'SettingsHandler.post(): locquacity_greetnew=%s, bot_settings.locquacity_greetnew=%s' % (gn, bot_settings.locquacity_greetnew) )
			if gn and not bot_settings.locquacity_greetnew:
				logging.debug( '-> fetch follower ids' )
				api = twitter.get_api( creds )
				follower_ids = api.followers_ids()
				creds.follower_ids = follower_ids
				creds.put()
			bot_settings.locquacity_greetnew = gn
			
			tweet_frequency = self.request.get( 'tweet_frequency' )
			if tweet_frequency is not None and len(tweet_frequency) > 0:
				bot_settings.tweet_frequency = float( tweet_frequency )
			tweet_chance = self.request.get( 'tweet_chance' )
			if tweet_chance is not None and len(tweet_chance) > 0:
				bot_settings.tweet_chance = float( tweet_chance )
			self.render_template( creds, bot_settings, { "saved" : True } )
			bot_settings.creds = creds
			bot_settings.put()
开发者ID:prehensile,项目名称:mimeomorph,代码行数:34,代码来源:main.py


示例8: __init__

  def __init__(self, settings):
    gtk.ScrolledWindow.__init__(self)
    self.set_border_width(4)
    self.settings = settings.get_settings()

    self.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)

    self.vbox = gtk.VBox(False, 5)
    self.hbox = gtk.HBox(False, 8)

    self.vbox.pack_start(self.hbox, True)

    self.left = gtk.VBox()
    self.right = gtk.VBox()

    self.hbox.pack_start(self.left, True)
    self.hbox.pack_start(self.right, True)

    self.create_form()

    viewport = gtk.Viewport()
    viewport.set_shadow_type(gtk.SHADOW_NONE)
    viewport.add(self.vbox)
    self.add(viewport)

    self.show_all()
开发者ID:arielj,项目名称:danceinstitute,代码行数:26,代码来源:config.py


示例9: main

def main(flag):
    global settings
    global env
    parser = OptionParser()
    parser.add_option("-e", "--env", default="dev", action="store", type="string", dest="env",
                      help="set the environment to run, either dev or live")
    (options, args) = parser.parse_args()
    if options.env:
        env = options.env

    settings = settings_lib.get_settings(env)
    env = env

    log_file = "queue_workflow_starter.log"
    global logger
    logger = log.logger(log_file, settings.setLevel)

    # Simple connect
    queue = get_queue()

    while flag.green():
        messages = queue.get_messages(1, visibility_timeout=60,
                                      wait_time_seconds=20)
        if messages:
            logger.info(str(len(messages)) + " message received")
            logger.info('message contents: %s', messages[0])
            process_message(messages[0])
        else:
            logger.debug("No messages received")

    logger.info("graceful shutdown")
开发者ID:elifesciences,项目名称:elife-bot,代码行数:31,代码来源:queue_workflow_starter.py


示例10: update_metadata

def update_metadata(in_fc, tech_title, gfw_env):

    api_url = settings.get_settings(gfw_env)['metadata']['api_url']
    layer_url = api_url + r'/' + tech_title
    layer_url = api_url + r'/' + 'wdpa_protected_areas'

    response = requests.get(layer_url)
    api_data = json.loads(response.text)

    md = arcpy_metadata.MetadataEditor(in_fc)

    md.title = escape_html(api_data['title'])
    md.purpose = escape_html(api_data['function'])
    md.abstract = escape_html(api_data['overview'])
    md.tags = api_data['tags'].split(",")
    md.extent_description = escape_html(api_data['geographic_coverage'])
    md.last_update = escape_html(api_data['date_of_content'])
    md.update_frequency = escape_html(api_data['frequency_of_updates'])
    md.citation = escape_html(api_data['citation'])
    md.limitation = escape_html(api_data['cautions'])
    md.source = escape_html(api_data['source'])
    md.scale_resolution = escape_html(api_data['resolution'])
    md.supplemental_information = escape_html(api_data['other'])

    md.finish()
开发者ID:sunank200,项目名称:gfw-sync2,代码行数:25,代码来源:metadata.py


示例11: start

  def start(self, ENV = "dev", limit = None):
    # Specify run environment settings
    settings = settingsLib.get_settings(ENV)
    
    # Log
    identity = "starter_%s" % int(random.random() * 1000)
    logFile = "starter.log"
    #logFile = None
    logger = log.logger(logFile, settings.setLevel, identity)
    
    # Simple connect
    conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
    
    # Start a workflow execution
    workflow_id = "SendQueuedEmail"
    workflow_name = "SendQueuedEmail"
    workflow_version = "1"
    child_policy = None
    execution_start_to_close_timeout = None

    if(limit):
      input = '{"data": {"limit": "' + limit + '"}}'
    else:
      input = None
    
    try:
      response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, input)

      logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))
      
    except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
      # There is already a running workflow with that ID, cannot start another
      message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id
      print message
      logger.info(message)
开发者ID:imclab,项目名称:elife-bot,代码行数:35,代码来源:starter_SendQueuedEmail.py


示例12: start

 def start(self, ENV = "dev"):
   # Specify run environment settings
   settings = settingsLib.get_settings(ENV)
   
   ping_marker_id = "cron_NewS3FiguresPDF"
   
   # Log
   logFile = "starter.log"
   logger = log.logger(logFile, settings.setLevel, ping_marker_id)
   
   # Data provider
   db = dblib.SimpleDB(settings)
   db.connect()
   
   # SWF meta data provider
   swfmeta = swfmetalib.SWFMeta(settings)
   swfmeta.connect()
   
   # Default, if cron never run before
   last_startTimestamp = 0
   
   # Get the last time this cron was run
   last_startTimestamp = swfmeta.get_last_completed_workflow_execution_startTimestamp(workflow_id = ping_marker_id)
 
   # Start a ping workflow as a marker
   self.start_ping_marker(ping_marker_id, ENV)
 
   # Check for S3 PDF files that were updated since the last run
   date_format = "%Y-%m-%dT%H:%M:%S.000Z"
   
   # Quick hack - subtract 30 minutes to not ignore the top of the hour
   #   the time between S3Monitor running and this cron starter
   last_startTimestamp_minus_30 = last_startTimestamp - (60*30)
   if(last_startTimestamp_minus_30 < 0):
     last_startTimestamp_minus_30 = 0
   time_tuple = time.gmtime(last_startTimestamp_minus_30)
   
   last_startDate = time.strftime(date_format, time_tuple)
   
   logger.info('last run %s' % (last_startDate))
   
   S3_item_list = db.elife_get_article_S3_file_items(file_data_type = "figures", latest = True, last_updated_since = last_startDate)
   
   logger.info('Figures PDF files updated since %s: %s' % (last_startDate, str(len(S3_item_list))))
 
   if(len(S3_item_list) <= 0):
     # No new PDF
     pass
   else:
     # Found new PDF files
     
     # Start a PublishPDF starter
     try:
       starter_name = "starter_PublishFiguresPDF"
       self.import_starter_module(starter_name, logger)
       s = self.get_starter_module(starter_name, logger)
       s.start(ENV = ENV, last_updated_since = last_startDate)
     except:
       logger.info('Error: %s starting %s' % (ping_marker_id, starter_name))
       logger.exception('')
开发者ID:IanMulvany,项目名称:elife-bot,代码行数:60,代码来源:cron_NewS3FiguresPDF.py


示例13: get_docs_from_SimpleDB

	def get_docs_from_SimpleDB(self, ENV = "dev", last_updated_since = None, doi_id = None):
		"""
		Get the array of docs from the SimpleDB provider
		"""
		docs = []
		
		# Specify run environment settings
		settings = settingsLib.get_settings(ENV)
		
		db = dblib.SimpleDB(settings)
		db.connect()
		
		if(last_updated_since is not None):
			xml_item_list = db.elife_get_article_S3_file_items(file_data_type = "suppl", latest = True, last_updated_since = last_updated_since)
		elif(doi_id is not None):
			xml_item_list = db.elife_get_article_S3_file_items(file_data_type = "suppl", latest = True, doi_id = doi_id)
		else:
			# Get all
			xml_item_list = db.elife_get_article_S3_file_items(file_data_type = "suppl", latest = True)
			
		for x in xml_item_list:
			tmp = {}
			elife_id = str(x['name']).split("/")[0]
			document = 'https://s3.amazonaws.com/' + x['item_name']
			tmp['elife_id'] = elife_id
			tmp['document'] = document
			docs.append(tmp)
		
		return docs
开发者ID:imclab,项目名称:elife-bot,代码行数:29,代码来源:starter_PublishSuppl.py


示例14: get_systemheader

def get_systemheader():
    options = settings.get_settings()
    fn = utils.which(
        "header.ps", list(options.user_data_dirs) + [SYSTEM_DATA_DIR])
    if fn:
        return open(fn).read()
    return "%%\%%  System header %s not found!\n%%" % fn
开发者ID:IamMomotaros,项目名称:grailbrowser,代码行数:7,代码来源:PSStream.py


示例15: start

 def start(self, ENV = "dev"):
   # Specify run environment settings
   settings = settingsLib.get_settings(ENV)
   
   ping_marker_id = "cron_NewS3FullArticle"
   
   # Log
   logFile = "starter.log"
   logger = log.logger(logFile, settings.setLevel, ping_marker_id)
   
   # Data provider
   db = dblib.SimpleDB(settings)
   db.connect()
   
   # SWF meta data provider
   swfmeta = swfmetalib.SWFMeta(settings)
   swfmeta.connect()
   
   last_startTimestamp = swfmeta.get_last_completed_workflow_execution_startTimestamp(workflow_id = ping_marker_id)
 
   # Start a ping workflow as a marker
   self.start_ping_marker(ping_marker_id, ENV)
 
   # Check for S3 XML files that were updated since the last run
   date_format = "%Y-%m-%dT%H:%M:%S.000Z"
   
   # Quick hack - subtract 15 minutes, 
   #   the time between S3Monitor running and this cron starter
   if last_startTimestamp is not None:
     last_startTimestamp_minus_15 = last_startTimestamp - (60*15)
   else:
     # On the first run ever the last start timestamp will be unavailable
     last_startTimestamp_minus_15 = time.gmtime() - (60*15)
     
   time_tuple = time.gmtime(last_startTimestamp_minus_15)
   
   last_startDate = time.strftime(date_format, time_tuple)
   
   logger.info('last run %s' % (last_startDate))
   
   # 
   file_list = db.elife_get_production_final_delivery_S3_file_items(last_updated_since = last_startDate)
   
   logger.info('Full Article files updated since %s: %s' % (last_startDate, str(len(file_list))))
 
   if(len(file_list) <= 0):
     # No new XML
     pass
   else:
     # Found new XML files
     
     # Start a PackagePOA starter
     try:
       starter_name = "starter_PublishFullArticleZip"
       self.import_starter_module(starter_name, logger)
       s = self.get_starter_module(starter_name, logger)
       s.start(ENV = ENV, last_updated_since = last_startDate)
     except:
       logger.info('Error: %s starting %s' % (ping_marker_id, starter_name))
       logger.exception('')
开发者ID:IanMulvany,项目名称:elife-bot,代码行数:60,代码来源:cron_NewS3FullArticle.py


示例16: start

	def start(self, ENV = "dev"):
		# Specify run environment settings
		settings = settingsLib.get_settings(ENV)
		
		# Log
		identity = "starter_%s" % int(random.random() * 1000)
		logFile = "starter.log"
		#logFile = None
		logger = log.logger(logFile, settings.setLevel, identity)
		
		# Simple connect
		conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
	
		start = True
	
		# Start a workflow execution
		workflow_id = "LensIndexPublish"
		workflow_name = "LensIndexPublish"
		workflow_version = "1"
		child_policy = None
		execution_start_to_close_timeout = str(60*45)
		input = None
	
		if(start):
			response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, input)
	
			logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))
开发者ID:IanMulvany,项目名称:elife-bot,代码行数:27,代码来源:starter_LensIndexPublish.py


示例17: main

def main():

    # example on how to run:
    # From elife-bot folder run
    # python starter/starter_CopyGlencoeStillImages.py --env=dev --article-id=15224 --no-poa

    parser = ArgumentParser()
    parser.add_argument("-e", "--env", action="store", type=str, dest="env",
                        help="set the environment to run, e.g. dev, live, prod, end2end")
    parser.add_argument("-a", "--article-id", action="store", type=str, dest="article_id",
                        help="specify the article id to process")
    parser.add_argument("-p", "--poa", action="store_true", dest="poa",
                        help="Article is POA. If omitted it defaults to False.")
    parser.add_argument("-np", "--no-poa", action="store_false", dest="poa",
                        help="Article is NOT POA. If omitted it defaults to False.")
    parser.set_defaults(env="dev", article_id=None, poa=False)

    args = parser.parse_args()
    ENV = None
    if args.env:
        ENV = args.env
    article_id = None
    is_poa = False
    if args.article_id:
        article_id = args.article_id
    if args.poa:
        is_poa = args.poa

    import settings as settingsLib
    settings = settingsLib.get_settings(ENV)

    o = starter_CopyGlencoeStillImages()

    o.start(settings=settings, article_id=article_id, standalone=True, standalone_is_poa=is_poa)
开发者ID:gnott,项目名称:elife-bot,代码行数:34,代码来源:starter_CopyGlencoeStillImages.py


示例18: full_sync

def full_sync(addressbook=None):
    my_settings = settings.get_settings()
    if my_settings['resource'] == "":
        return

    # sqlite3 database connection
    conn = sqlite3.connect(os.path.join(glib.get_user_config_dir(), 'pyaddressbook', 'addressbook.db'))

    # local_status 0=nothing, 1=locally new, 2=locally deleted, 3=locally modified
    conn.execute('CREATE TABLE if not exists vcards (etag text primary key unique  not null, href text unique not null, vcard text not null, local_status smallint default 0)')
    conn.commit()

    available_href2etag = {}
    for href,etag in conn.execute('select href,etag from vcards where local_status<>1').fetchall():
        available_href2etag[href] = etag

    cdav = carddav2.PyCardDAV(verify=my_settings['verify'], resource=my_settings['resource'], user=my_settings['user'], passwd=my_settings['passwd'], write_support=True)
    abook = cdav.get_abook()

    deleted_vcards = available_href2etag.copy()
    server_modified_vcards = {}

    for href,etag in abook.items():
        if href in deleted_vcards:
            del deleted_vcards[href]
        if not href in available_href2etag or available_href2etag[href] != etag:
            server_modified_vcards[href] = etag

    # delete local vcards if they have been removed from the server side
    for href,etag in deleted_vcards.items():
        print("Removing contact for href: %s" % href)
        conn.execute('delete from vcards where href=?', (href,))
        if addressbook: addressbook.vcard_removed(href)


    # update local vcards that have been modified on the server side (regardless of the local status, i.e. locally modified vcards will be updated to the server side version)
    href_list = [ href for href,etag in server_modified_vcards.items() ]
    if len(href_list) > 0:
        print('Requesting modified/new vcards from server')
        dav_list = cdav._get_vcards(href_list)
        for dav in dav_list:
            href = dav['href']
            status = dav['status']
            etag = dav['etag']
            vcard = dav['vcard']
            print("Updating vcard for href %s since it was updated on the server-side" % href)
            if href in available_href2etag:
                conn.execute('update vcards set etag=?,href=?,vcard=?,local_status=0 where href=?', (etag,href,vcard,href))
                if addressbook: addressbook.vcard_updated(href, vcard)
            else:
                conn.execute('INSERT INTO vcards VALUES (?,?,?,0)', (etag,href, vcard))
                if addressbook: addressbook.vcard_added(href, etag, vcard)

    sync_local_changes(cdav, conn)
    if my_settings['write_vcf']:
        write_to_vcf(conn, my_settings['vcf_path'])

    conn.commit()
    conn.close()
开发者ID:finkandreas,项目名称:pyaddressbook,代码行数:59,代码来源:sync.py


示例19: _settings

def _settings():
    old_settings = get_settings()

    if request.method == 'POST':
        new_settings = request.get_json()
        for s in new_settings['settings']:
            name = s.get('name')
            value = s.get('value')
            if not name or value is None:
                flask.abort(400)
            setattr(settings, name, value)
        settings.commit()

    return jsonify({
        'settings': sorted([{'name': k, 'value': v}
                            for k, v in get_settings().items()], key=lambda d: d.get('name'))
    })
开发者ID:kearnh,项目名称:gtui,代码行数:17,代码来源:egrit.py


示例20: reload_settings

def reload_settings(config_path=None):
    global config, AUDIO_DIR, ANALYSIS_DIR
    visualizer_settings = settings.get_settings(config_path)
    config.ECHO_NEST_API_KEY = visualizer_settings['echo_nest_api_key']
    AUDIO_DIR = visualizer_settings['upload_dir']
    ANALYSIS_DIR = visualizer_settings['analysis_dir']
    check_dir(AUDIO_DIR, writable=True)
    check_dir(ANALYSIS_DIR, writable=True)
开发者ID:tylerwilliams,项目名称:visualizer2,代码行数:8,代码来源:visualizer_server.py



注:本文中的settings.get_settings函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python settings.getnodes函数代码示例发布时间:2022-05-27
下一篇:
Python settings.get_logger函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap