本文整理汇总了Python中schedule.every函数的典型用法代码示例。如果您正苦于以下问题:Python every函数的具体用法?Python every怎么用?Python every使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了every函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: RequestConsumption
def RequestConsumption():
global frame_id
global rooms_id
global frame_sending_attempts
global room_consumption_frame
now = datetime.datetime.now()
time_now = (int(now.strftime("%H")) * 60) + int(now.strftime("%M"))
day = now.strftime("%a")
date = now.strftime("%Y-%m-%d")
sql.SelectColumn("distinct rooms.rooms_address, rooms.rooms_id")
sql.FromTable("room_schedules")
sql.JoinTable([["rooms", "rooms.rooms_id=room_schedules.rooms_id"]])
sql.WhereCondition("(room_schedules.room_schedules_day like '%{}%' or room_schedules.room_schedules_date='{}') and room_schedules.room_schedules_end_time={}".format(day, date, time_now))
room_schedules_result = sql.FetchAll()
for schedule in room_schedules_result:
rooms_id = schedule[2]
frame_id = GetFrameID(sql)
room_addresses_data = GetRoomAddresses(sql, schedule[0])
frame_data = "{} {} 0".format(room_addresses_data, frame_id)
consumption_frame = CreateFrame(frame_data, 4)
frame_sending_attempts = 1
schedule.every(1).seconds.do(AcknowledgementTimer)
SendFrame()
开发者ID:nicolerey,项目名称:Thesis,代码行数:29,代码来源:request_consumption.py
示例2: run_or_schedule
def run_or_schedule(job, schedule=False, exception_handler=None):
"""Runs a job and optionally schedules it to run later
Args:
job (func): The func to run
schedule (bool): Schedule `func` to run in the future (default: False)
exception_handler (func): The exception handler to wrap the function in
(default: None)
Examples:
>>> job = partial(pprint, 'hello world')
>>> run_or_schedule(job)
u'hello world'
>>> exception_handler = ExceptionHandler('[email protected]').handler
>>> run_or_schedule(job, False, exception_handler)
u'hello world'
"""
if exception_handler and schedule:
job = exception_handler(job)
job()
if schedule:
sch.every(1).day.at(SCHEDULE_TIME).do(job)
while True:
sch.run_pending()
time.sleep(1)
开发者ID:reubano,项目名称:swutils,代码行数:28,代码来源:swutils.py
示例3: main
def main(arguments=None):
'''Runs thumbor server with the specified arguments.'''
if arguments is None:
arguments = sys.argv[1:]
server_parameters = get_server_parameters(arguments)
config = get_config(server_parameters.config_path)
configure_log(config, server_parameters.log_level.upper())
validate_config(config, server_parameters)
importer = get_importer(config)
with get_context(server_parameters, config, importer) as context:
application = get_application(context)
run_server(application, context)
if (config.GC_INTERVAL and config.GC_INTERVAL > 0):
schedule.every(config.GC_INTERVAL).seconds.do(gc_collect)
try:
logging.debug('thumbor running at %s:%d' % (context.server.ip, context.server.port))
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
sys.stdout.write('\n')
sys.stdout.write("-- thumbor closed by user interruption --\n")
开发者ID:scorphus,项目名称:thumbor,代码行数:26,代码来源:server.py
示例4: prepare_jobs
def prepare_jobs(self, jobs):
suffixed_names = {
'week': 'weekly',
'day': 'daily',
'hour': 'hourly',
'minute': 'minutes',
'second': 'seconds',
}
for job in jobs:
if not job.enabled:
continue
interval_name = job.time_unit.lower()
if job.interval > 0: # There can't be a job less than 0 (0 minutes? 0 seconds?)
plural_interval_name = interval_name + 's'
d = getattr(schedule.every(job.interval), plural_interval_name)
d.do(self.run_job, job)
Log.info(" Loading %s job: %s.", suffixed_names[interval_name], job.name)
elif interval_name == 'day':
schedule.every().day.at(job.at_time).do(self.run_job, job)
Log.info(" Loading time-based job: " + job.name)
else:
d = getattr(schedule.every(), interval_name)
d.do(self.run_job, job)
Log.info(" Loading %s job: %s", interval_name, job.name)
开发者ID:mayaculpa,项目名称:hapi,代码行数:25,代码来源:smart_module.py
示例5: main
def main():
global thingiverse
global twitter
auto_mode = True
if DEBUG: print 'welcome'
thingiverse.DEBUG = False
thingiverse.txt_url_mode = False
thingiverse.connect()
print api.VerifyCredentials().name
print '\n\nThingisimilar\n'
schedule.every(2).minutes.do(exploring)
if auto_mode:
#main_loop()
exploring()
while True:
schedule.run_pending()
sleep(1.0)
else:
#while True:
#num1 = raw_input('#1 --> ')
#num2 = raw_input('#2 -->')
num1 = test_things[0]
num2 = test_things[4]
standard_job(int(num1), int(num2))
开发者ID:RobotGrrl,项目名称:ThingiMix,代码行数:31,代码来源:thingisimilar.py
示例6: schedule_raw
def schedule_raw(self, df_rawsources):
# Iterate through all sources with 'raw' type
for index, source in df_rawsources.iterrows():
print "[SCHEDULER] Working with raw source: ",source['name']
updateFrequency = source['updateFrequency']
print "[SCHEDULER] Update frequency is <",updateFrequency,">"
updates = source['updates']
if len(updates) > 0:
# Get the most recent update
lastUpdate = dp.parse(updates[0]['createdAt'])
# Get the current time in seconds
now = int(round(time.time()))
# If time between now and the last update is greater than the
# update interval, schedule the event
if(now - int(lastUpdate.strftime('%s')) > updateFrequency):
source_id = source['_id']
print "[SCHEDULER] Scheduling source <",source['name'],"> with id <",source_id,">"
schedule.every(updateFrequency).seconds.do(process_raw, source_id)
source_id = source['_id']
print "[SCHEDULER] Scheduling source <",source['name'],"> with id <",source_id,">"
schedule.every(updateFrequency).seconds.do(self.process_raw, source_id)
# Process all scheduled items
while True:
schedule.run_pending()
time.sleep(1)
开发者ID:jlr84,项目名称:MyCrawler,代码行数:25,代码来源:scheduler.py
示例7: rules
def rules(cube, scheduler_type='minutes', scheduler_interval=59,
dashboard=None):
if scheduler_type:
scheduler_type = cube.get('scheduler_type', 'minutes')
if scheduler_interval:
scheduler_interval = cube.get('scheduler_interval', 59)
log_it("START REGISTER", "bin-scheduler")
log_it("cube: {}".format(cube.get('slug')), "bin-scheduler")
log_it("type: {}".format(scheduler_type), "bin-scheduler")
log_it("interval: {}".format(scheduler_interval), "bin-scheduler")
log_it("END REGISTER", "bin-scheduler")
t = {}
if scheduler_type == 'minutes':
env = schedule.every(int(scheduler_interval))
t = env.minutes
elif scheduler_type == 'hour':
env = schedule.every()
t = env.hour
elif scheduler_type == 'day':
env = schedule.every()
t = env.day
try:
t.do(job, slug=cube.get('slug'))
jobn = cube.get("slug")
if dashboard:
jobn = u"{}-{}".format(cube.get("slug"), dashboard)
onrun[jobn] = env
register.append(jobn)
except Exception, e:
log_it("ERROR {}: {}".format(cube.get('slug'), e))
开发者ID:pengjia,项目名称:mining,代码行数:33,代码来源:scheduler.py
示例8: dynamically_scrape_and_append_sales_data
def dynamically_scrape_and_append_sales_data(filename,
interval,
num_retries = 10):
"""
Dynamically scrapes sales data and appends the data to a file by generating
a list of links, checking it against an old list and only keeping new links,
and scraping those links for sales data.
"""
old_list = []
def job(old_list):
new_list = collect_all_featured_links()
new_links = remove_old_links(old_list, new_list)
bad_links = collect_bad_links(new_links)
clean_links = remove_bad_links_from_link_list(bad_links, new_links)
scrape_and_append_sales_data_from_featured_links(filename,
clean_links,
num_retries)
old_list = new_list
job(old_list)
schedule.every(interval).hours.do(job)
while True:
schedule.run_pending()
time.sleep(30)
print "Dynamic scraping finished"
开发者ID:fgscivittaro,项目名称:ebay,代码行数:31,代码来源:collect_sales_data.py
示例9: ChangePort
def ChangePort(roomID):
global frame_id
global frame_sending_attempts
global port_frame
sql.GetWhereQuery("rooms", "rooms_id={}".format(roomID))
sql.SelectColumn("rooms_port, rooms_address")
rooms_result = sql.FetchOne()
sql.GetWhereQuery("room_devices", "rooms_id={}".format(roomID))
sql.SelectColumn("room_devices_port")
room_devices_result = sql.FetchAll()
port_data = "{} {}".format(rooms_result[0], len(room_devices_result))
for device in room_devices_result:
port_data = "{} {}".format(port_data, device[0])
room_addresses_data = GetRoomAddresses(sql, rooms_result[1])
frame_id = GetFrameID(sql)
frame_data = "{} {} {}".format(room_addresses_data, frame_id, port_data)
port_frame = CreateFrame(frame_data, 12)
frame_sending_attempts = 1
schedule.every(1).seconds.do(AcknowledgementTimer)
SendFrame()
开发者ID:nicolerey,项目名称:Thesis,代码行数:28,代码来源:change_port.py
示例10: routine
def routine(self):
# install schedule
for entity in self.entities:
pieces = entity.getschedule().split(" ")
if re.match("^\d*$", pieces[1]):
every = schedule.every(int(pieces[1]))
pieces = pieces[2 : len(pieces)]
else:
every = schedule.every()
pieces = pieces[1 : len(pieces)]
timedes = getattr(every, pieces[0])
pieces = pieces[1 : len(pieces)]
if len(pieces) and pieces[0] == "at":
finish = timedes.at(pieces[1])
else:
finish = timedes
finish.do(self.monitor, entity)
while True:
time.sleep(1)
for entity in self.entities:
schedule.run_pending()
开发者ID:louis-she,项目名称:apimonitor,代码行数:25,代码来源:apimonitor.py
示例11: main
def main():
args = parser.parse_args()
log = logging.getLogger()
log.level = logging.INFO
stream = logging.StreamHandler()
file_handler = logging.FileHandler(args.logfile)
log.addHandler(stream)
log.addHandler(file_handler)
with open(args.config) as f:
config = yaml.safe_load(f)
log.info('Connecting to database')
database = connect_to_database(**config['mongodb'])
log.info('Connection established')
services = [
service(auxdir=args.auxdir)
for service in supported_services.values()
]
schedule.every().day.at('15:00').do(
fill_last_night, services=services, database=database
)
log.info('Schedule started')
try:
while True:
schedule.run_pending()
sleep(60)
except (KeyboardInterrupt, SystemExit):
pass
开发者ID:fact-project,项目名称:aux2mongodb,代码行数:33,代码来源:cron.py
示例12: dynamically_scrape_combined_data
def dynamically_scrape_combined_data(data_filename,
sales_filename,
interval,
num_retries = 10):
"""
Dynamically scrapes a continuously updated list of unique clean links and
appends the data to their respective files.
"""
old_list = []
def job(old_list):
new_list = collect_all_featured_links()
new_links = remove_old_links(old_list, new_list)
bad_links = collect_bad_links(new_links)
clean_links = remove_bad_links_from_link_list(bad_links, new_links)
scrape_combined_data_from_all_featured_products(data_filename,
sales_filename,
clean_links,
num_retries)
old_list = new_list
job(old_list)
schedule.every(interval).hours.do(job)
while True:
schedule.run_pending()
time.sleep(30)
print "Dynamic scraping finished"
开发者ID:fgscivittaro,项目名称:ebay,代码行数:32,代码来源:combined.py
示例13: scheduler_init
def scheduler_init (parent):
'''
Schedule Init
Start the main loop for the internal scheduler that
ticks every second.
--
@param parent:int The PID of the parent.
@return void
'''
# Define the jobs to run at which intervals
schedule.every().minute.do(Reminder.run_remind_once)
schedule.every().minute.do(Reminder.run_remind_recurring)
# Start the main thread, polling the schedules
# every second
while True:
# Check if the current parent pid matches the original
# parent that started us. If not, we should end.
if os.getppid() != parent:
logger.error(
'Killing scheduler as it has become detached from parent PID.')
sys.exit(1)
# Run the schedule
schedule.run_pending()
time.sleep(1)
return
开发者ID:Methimpact,项目名称:hogar,代码行数:34,代码来源:Scheduler.py
示例14: watch
def watch():
# set up argument parsing
parser = example.BigFixArgParser()
parser.add_argument('-a', '--actions', required = False, help = 'List of actions to watch')
parser.add_argument('-v', '--verbose', default = False, action = "store_true", required = False, help = 'To see the full list of commands that contain watched actions')
parser.add_argument('-t', '--time', default = 60, required = False, help = 'To set the waiting period')
parser.base_usage += """
-a, --actions [ACTIONS/FILENAME] Specify a list of actions to watch, seperated by comma(,);
if FILENAME with .wal extension detected, will read the file to get the list.
-v, --verbose To see the full list of commands that contain watched actions
-t, --time [MINUTE] A number specifing the waiting period between checks"""
parser.description = 'Used to watch certain actions'
ext = ".wal"
args = parser.parse_args()
args_actions = ""
if ext in args.actions:
actions_file = open(args.actions, 'r')
for line in actions_file:
args_actions += line
else:
args_actions = args.actions
actions_list = args_actions.split(",")
watched_actions = gen_regex(actions_list)
action_record = {}
for a in actions_list:
action_record[a] = False
t = int(args.time)
gen_summary(action_record, watched_actions, args)
schedule.every(t).minutes.do(gen_summary, action_record, watched_actions, args)
while True:
schedule.run_pending()
开发者ID:bigfix,项目名称:tools,代码行数:34,代码来源:watch_action.py
示例15: main
def main():
port = "5918"
if len(sys.argv) > 1:
port = sys.argv[1]
socket = initiate_zmq(port)
logging.basicConfig(filename='./log/ingest_lottery.log', level=logging.INFO)
tz = pytz.timezone(pytz.country_timezones('cn')[0])
schedule.every(30).seconds.do(run, socket, tz)
while True:
try:
schedule.run_pending()
time.sleep(1)
except KeyboardInterrupt:
now = datetime.now(tz)
message = "CTRL-C to quit the program at [%s]" % now.isoformat()
logging.info(message)
break
except Exception as e:
now = datetime.now(tz)
message = "Error at time [%s]" % now.isoformat()
logging.info(message)
logging.info(e)
# reschedule the job
schedule.clear()
socket = initiate_zmq(port)
schedule.every(30).seconds.do(run, socket, tz)
开发者ID:Tskatom,项目名称:Lottery,代码行数:27,代码来源:ingest_lottery.py
示例16: start
def start(self):
def make_reboot():
if self.alarm.alarm_started == False:
os.system("shutdown -r now")
schedule.every().monday.at("05:00").do(make_reboot)
开发者ID:LOCOSP,项目名称:raptor,代码行数:7,代码来源:reboot_scheduler.py
示例17: SetDailySchedule
def SetDailySchedule():
FileMain("Setting daily schedule")
date = datetime.datetime.now()
date = date.strftime("%Y-%m-%d")
sql_args = ["room_schedules_start_time, room_schedules_end_time", "room_schedules", "room_schedules_date='{}'".format(date)]
room_schedules_result = SQLMain(sql_args, 1)
if room_schedules_result != 0:
for row in room_schedules_result:
hour_start_time = row[0]/60
minute_start_time = row[0]%60
start_time = "{}:{}".format(str(hour_start_time), str(minute_start_time))
hour_end_time = row[1]/60
minute_end_time = row[1]%60
end_time = "{}:{}".format(str(hour_end_time), str(minute_end_time))
schedule.every().day.at(time).do(DailyChangeOfStatusOn)
schedule.every().day.at(time).do(DailyChangeOfStatusOff)
return
开发者ID:nicolerey,项目名称:Thesis,代码行数:25,代码来源:set_daily_schedule.py
示例18: run
def run(self):
logging.info('CrawlerDaemon run')
sqlite_session = get_session( self.config.database )
orm_engines = sqlite_session.query( ORM_Engine ).all()
if not self.config.dry_run:
if len( orm_engines ) == 0:
logging.debug( 'Crawler has no engines' )
# Start controllers in each thread
for orm_engine in orm_engines:
logging.info('Load orm_engine: %s' % orm_engine.name )
engine = Engine( orm_engine )
self.controllers[ engine.name ] = Controller( engine, sqlite_session )
self.controllers[ engine.name ].start()
# Start scheduling searches
for orm_search in sqlite_session.query( Search ).all():
for engine in orm_search.engines:
job = lambda: self.controllers[ engine.name ].queue.put( orm_search )
schedule.every( orm_search.periodicity ).seconds.do( job )
logging.debug('Put %s to schedule with periodicity %i seconds' % ( orm_search.name, orm_search.periodicity ) )
self.httpd = HTTPD( self.config, self.controllers )
self.httpd.start()
while True:
if not self.config.dry_run:
schedule.run_pending()
time.sleep(1)
开发者ID:gr,项目名称:CrapCrawler,代码行数:31,代码来源:crawlerdaemon.py
示例19: handle
def handle(self, *args, **options):
schedule.every().week.do(_fetch_mozillians)
schedule.every().week.do(_fetch_countries)
while True:
schedule.run_pending()
time.sleep(3600)
开发者ID:mozilla,项目名称:cardano,代码行数:7,代码来源:schedule.py
示例20: run
def run(self, path_local_log=None, branch='next', sched='false', launch_pause='false'):
"""
:param str path_local_log: Path to the local log file copied from the remote server. If ``None``, do not copy
remote log file.
:param str branch: Target git branch to test.
:param str sched: If ``'true'``, run tests only once. Otherwise, run tests at 23:00 hours daily.
:param str launch_pause: If ``'true'``, pause at a breakpoint after launching the instance and mounting the data
volume. Continuing from the breakpoint will terminate the instance and destroy the volume.
"""
import schedule
from logbook import Logger
self.log = Logger('nesii-testing')
self.path_local_log = path_local_log
self.branch = branch
self.launch_pause = launch_pause
if self.launch_pause == 'true':
self.log.info('launching instance then pausing')
self._run_tests_(should_email=False)
else:
if sched == 'true':
self.log.info('begin continous loop')
schedule.every().day.at("6:00").do(self._run_tests_, should_email=True)
while True:
schedule.run_pending()
time.sleep(1)
else:
self.log.info('running tests once')
self._run_tests_(should_email=True)
开发者ID:HydroLogic,项目名称:ocgis,代码行数:32,代码来源:fabfile.py
注:本文中的schedule.every函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论