本文整理汇总了Python中shinken.log.logger.debug函数的典型用法代码示例。如果您正苦于以下问题:Python debug函数的具体用法?Python debug怎么用?Python debug使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了debug函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: get_page
def get_page():
commands_list = []
try:
# Getting lists of informations for the commands
time_stamp_list = []
host_name_list = []
service_description_list = []
return_code_list = []
output_list = []
time_stamp_list = request.forms.getall(key="time_stamp")
logger.debug("[WS_Arbiter] time_stamp_list: %s" % (time_stamp_list))
host_name_list = request.forms.getall(key="host_name")
logger.debug("[WS_Arbiter] host_name_list: %s" % (host_name_list))
service_description_list = request.forms.getall(key="service_description")
logger.debug("[WS_Arbiter] service_description_list: %s" % (service_description_list))
return_code_list = request.forms.getall(key="return_code")
logger.debug("[WS_Arbiter] return_code_list: %s" % (return_code_list))
output_list = request.forms.getall(key="output")
logger.debug("[WS_Arbiter] output_list: %s" % (output_list))
commands_list = get_commands(
time_stamp_list, host_name_list, service_description_list, return_code_list, output_list
)
except Exception, e:
logger.error("[WS_Arbiter] failed to get the lists: %s" % str(e))
commands_list = []
开发者ID:axadil,项目名称:mod-ws-arbiter,代码行数:26,代码来源:module.py
示例2: set_value
def set_value(obj_ref, output=None, perfdata=None, return_code=None):
obj = get_object(obj_ref)
if not obj:
return
output = output or obj.output
perfdata = perfdata or obj.perf_data
if return_code is None:
return_code = obj.state_id
logger.debug("[trigger] Setting %s %s %s for object %s" % (output, perfdata, return_code, obj.get_full_name()))
if perfdata:
output = output + ' | ' + perfdata
now = time.time()
cls = obj.__class__
i = obj.launch_check(now, force=True)
for chk in obj.checks_in_progress:
if chk.id == i:
logger.debug("[trigger] I found the check I want to change")
c = chk
# Now we 'transform the check into a result'
# So exit_status, output and status is eaten by the host
c.exit_status = return_code
c.get_outputs(output, obj.max_plugins_output_length)
c.status = 'waitconsume'
c.check_time = now
# IMPORTANT: tag this check as from a trigger, so we will not
# loop in an infinite way for triggers checks!
c.from_trigger = True
开发者ID:JamesYuan,项目名称:shinken,代码行数:30,代码来源:trigger_functions.py
示例3: get_live_data_log
def get_live_data_log(self):
"""Like get_live_data, but for log objects"""
# finalize the filter stacks
self.mongo_time_filter_stack.and_elements(self.mongo_time_filter_stack.qsize())
self.mongo_filter_stack.and_elements(self.mongo_filter_stack.qsize())
if self.use_aggressive_sql:
# Be aggressive, get preselected data from sqlite and do less
# filtering in python. But: only a subset of Filter:-attributes
# can be mapped to columns in the logs-table, for the others
# we must use "always-true"-clauses. This can result in
# funny and potentially ineffective sql-statements
mongo_filter_func = self.mongo_filter_stack.get_stack()
else:
# Be conservative, get everything from the database between
# two dates and apply the Filter:-clauses in python
mongo_filter_func = self.mongo_time_filter_stack.get_stack()
dbresult = []
mongo_filter = mongo_filter_func()
logger.debug("[Logstore MongoDB] Mongo filter is %s" % str(mongo_filter))
# We can apply the filterstack here as well. we have columns and filtercolumns.
# the only additional step is to enrich log lines with host/service-attributes
# A timerange can be useful for a faster preselection of lines
filter_element = eval('{ ' + mongo_filter + ' }')
logger.debug("[LogstoreMongoDB] Mongo filter is %s" % str(filter_element))
columns = ['logobject', 'attempt', 'logclass', 'command_name', 'comment', 'contact_name', 'host_name', 'lineno', 'message', 'plugin_output', 'service_description', 'state', 'state_type', 'time', 'type']
if not self.is_connected == CONNECTED:
logger.warning("[LogStoreMongoDB] sorry, not connected")
else:
dbresult = [Logline([(c,) for c in columns], [x[col] for col in columns]) for x in self.db[self.collection].find(filter_element).sort([(u'time', pymongo.ASCENDING), (u'lineno', pymongo.ASCENDING)])]
return dbresult
开发者ID:Caez83,项目名称:mod-logstore-mongodb,代码行数:31,代码来源:module.py
示例4: show_minemap
def show_minemap():
user = app.request.environ['USER']
# Apply search filter if exists ...
search = app.request.query.get('search', "type:host")
if not "type:host" in search:
search = "type:host "+search
logger.debug("[WebUI-worldmap] search parameters '%s'", search)
items = app.datamgr.search_hosts_and_services(search, user, get_impacts=False)
# Fetch elements per page preference for user, default is 25
elts_per_page = app.prefs_module.get_ui_user_preference(user, 'elts_per_page', 25)
# We want to limit the number of elements
step = int(app.request.GET.get('step', elts_per_page))
start = int(app.request.GET.get('start', '0'))
end = int(app.request.GET.get('end', start + step))
# If we overflow, came back as normal
total = len(items)
if start > total:
start = 0
end = step
navi = app.helper.get_navi(total, start, step=step)
return {'navi': navi, 'search_string': search, 'items': items[start:end], 'page': "minemap"}
开发者ID:vizvayu,项目名称:mod-webui,代码行数:27,代码来源:minemap.py
示例5: manage_log_brok
def manage_log_brok(self, brok):
"""
Parse a Shinken log brok to enqueue a log line for Index insertion
"""
d = date.today()
index_name = self.index_prefix + "-" + d.strftime("%Y.%m.%d")
line = brok.data["log"]
if re.match("^\[[0-9]*\] [A-Z][a-z]*.:", line):
# Match log which NOT have to be stored
logger.warning("[elastic-logs] do not store: %s", line)
return
logline = Logline(line=line)
logline_dict = logline.as_dict()
logline_dict.update({"@timestamp": datetime.utcfromtimestamp(int(logline_dict["time"])).isoformat() + "Z"})
values = {"_index": index_name, "_type": "shinken-logs", "_source": logline_dict}
# values = logline.as_dict()
if logline.logclass != LOGCLASS_INVALID:
logger.debug("[elastic-logs] store log line values: %s", values)
self.logs_cache.append(values)
else:
logger.info("[elastic-logs] This line is invalid: %s", line)
return
开发者ID:descrepes,项目名称:mod-elastic-logs,代码行数:26,代码来源:module.py
示例6: get_graph_uris
def get_graph_uris(self, elt, graphstart=None, graphend=None, duration=None, source='detail'):
''' Aggregate the get_graph_uris of all the submodules.
The source parameter defines the source of the calling:
Are we displaying graphs for the element detail page (detail),
or a widget in the dashboard (dashboard) ?
If duration is not None, we consider it as a number of seconds to graph and
we call the module get_relative_graphs_uri
If get_relative_graphs_uri is not a module function we compute graphstart and
graphend and we call we call the module get_graphs_uri
If graphstart and graphend are not None, we call the module get_graphs_uri
'''
uris = []
for mod in self.modules:
if not duration:
uris.extend(mod.get_graph_uris(elt, graphstart, graphend, source))
else:
f = getattr(mod, 'get_relative_graph_uris', None)
if f and callable(f):
uris.extend(f(elt, duration, source))
else:
graphend = time.time()
graphstart = graphend - duration
uris.extend(mod.get_graph_uris(elt, graphstart, graphend, source))
logger.debug("[WebUI] Got graphs: %s", uris)
for uri in uris:
uri['img_src'] = '/graph?url=' + urllib.quote(uri['img_src'])
return uris
开发者ID:Azef1,项目名称:mod-webui,代码行数:33,代码来源:graphs.py
示例7: do_pynag_con_init
def do_pynag_con_init(self, id, type='scheduler'):
# Get the good links tab for looping..
links = self.get_links_from_type(type)
if links is None:
logger.debug('Type unknown for connection! %s', type)
return
if type == 'scheduler':
# If sched is not active, I do not try to init
# it is just useless
is_active = links[id]['active']
if not is_active:
return
# If we try to connect too much, we slow down our tests
if self.is_connection_try_too_close(links[id]):
return
# Ok, we can now update it
links[id]['last_connection'] = time.time()
# DBG: print "Init connection with", links[id]['uri']
running_id = links[id]['running_id']
# DBG: print "Running id before connection", running_id
uri = links[id]['uri']
try:
con = links[id]['con'] = HTTPClient(uri=uri, strong_ssl=links[id]['hard_ssl_name_check'])
except HTTPExceptions, exp:
# But the multiprocessing module is not compatible with it!
# so we must disable it immediately after
logger.info("Connection problem to the %s %s: %s", type, links[id]['name'], str(exp))
links[id]['con'] = None
return
开发者ID:h4wkmoon,项目名称:shinken,代码行数:33,代码来源:brokerdaemon.py
示例8: get_instance
def get_instance(plugin):
logger.debug("[MySQLImport]: Get MySQL importer instance for plugin %s" % plugin.get_name())
if not MySQLdb:
raise Exception('Missing module python-mysqldb. Please install it.')
host = plugin.host
login = plugin.login
password = plugin.password
database = plugin.database
reqlist = {}
reqlist['hosts'] = getattr(plugin, 'reqhosts', None)
reqlist['commands'] = getattr(plugin, 'reqcommands', None)
reqlist['timeperiods'] = getattr(plugin, 'reqtimeperiods', None)
reqlist['notificationways'] = getattr(plugin, 'reqnotificationways', None)
reqlist['services'] = getattr(plugin, 'reqservices', None)
reqlist['servicegroups'] = getattr(plugin, 'reqservicegroups', None)
reqlist['contacts'] = getattr(plugin, 'reqcontacts', None)
reqlist['contactgroups'] = getattr(plugin, 'reqcontactgroups', None)
reqlist['hostgroups'] = getattr(plugin, 'reqhostgroups', None)
reqlist['hostdependencies'] = getattr(plugin, 'reqhostdependencies', None)
reqlist['servicedependencies'] = getattr(plugin, 'reqservicedependencies', None)
reqlist['realms'] = getattr(plugin, 'reqrealms', None)
reqlist['schedulers'] = getattr(plugin, 'reqschedulers', None)
reqlist['pollers'] = getattr(plugin, 'reqpollers', None)
reqlist['brokers'] = getattr(plugin, 'reqbrokers', None)
reqlist['reactionners'] = getattr(plugin, 'reqreactionners', None)
reqlist['receivers'] = getattr(plugin, 'reqreceivers', None)
instance = MySQL_importer_arbiter(plugin, host, login, password, database, reqlist)
return instance
开发者ID:dgilm,项目名称:mod-import-mysql,代码行数:29,代码来源:module.py
示例9: manage_unknown_service_check_result_brok
def manage_unknown_service_check_result_brok(self, b):
data = b.data
tags = {
"host_name": data['host_name'],
"service_description": data['service_description']
}
post_data = []
post_data.extend(
self.get_check_result_perfdata_points(
b.data['perf_data'],
b.data['time_stamp'],
tags=tags
)
)
try:
logger.debug(
"[influxdb broker] Generated points: %s" % str(post_data))
except UnicodeEncodeError:
pass
self.extend_buffer(post_data)
开发者ID:cyberflow,项目名称:mod-influxdb,代码行数:25,代码来源:module.py
示例10: process_check_result
def process_check_result(self, databuffer, IV):
# 208 is the size of fixed received data ... NSCA packets are 208+512 (720) or 208+4096 (4304)
if not databuffer:
logger.warning("[NSCA] Received an empty NSCA packet")
return
logger.debug("[NSCA] Received NSCA packet: %s", binascii.hexlify(databuffer))
payload_length = len(databuffer) - 208
if payload_length != 512 and payload_length != 4096:
logger.warning("[NSCA] Received packet with unusual payload length: %d.", payload_length)
if self.payload_length != -1 and payload_length != self.payload_length:
logger.warning("[NSCA] Dropping packet with incorrect payload length.")
return
(timestamp, rc, hostname, service, output) = self.read_check_result(databuffer, IV, payload_length)
current_time = time.time()
check_result_age = current_time - timestamp
if timestamp > current_time and self.check_future_packet:
logger.warning("[NSCA] Dropping packet with future timestamp.")
elif check_result_age > self.max_packet_age:
logger.info(
"[NSCA] Dropping packet with stale timestamp - packet was %s seconds old. Timestamp: %s for %s/%s" % \
(check_result_age, timestamp, hostname, service))
else:
self.post_command(timestamp, rc, hostname, service, output)
开发者ID:shinken-monitoring,项目名称:mod-nsca,代码行数:27,代码来源:module.py
示例11: get_commands
def get_commands(time_stamps, hosts, services, return_codes, outputs):
"""Composing a command list based on the information received in
POST request"""
commands = []
current_time_stamp = int(time.time())
def _compose_command(t, h, s, r, o):
"""Simple function to create a command from the inputs"""
cmd = ""
if not s or s == "":
cmd = '[%s] PROCESS_HOST_CHECK_RESULT;%s;%s;%s' % (t if t is not None else current_time_stamp, h, r, o)
else:
cmd = '[%s] PROCESS_SERVICE_CHECK_RESULT;%s;%s;%s;%s' % (t if t is not None else current_time_stamp, h, s, r, o)
logger.debug("[Ws_arbiter] CMD: %s" % (cmd))
commands.append(cmd)
# Trivial case: empty commmand list
if (return_codes is None or len(return_codes) == 0):
return commands
# Sanity check: if we get N return codes, we must have N hosts.
# The other values could be None
if (len(return_codes) != len(hosts)):
logger.error("[Ws_arbiter] number of return codes (%d) does not match number of hosts (%d)" % (len(return_codes), len(hosts)))
abort(400, "number of return codes does not match number of hosts")
map(_compose_command, time_stamps, hosts, services, return_codes, outputs)
logger.debug("[Ws_arbiter] commands = %s" % (str(commands)))
return commands
开发者ID:David-,项目名称:shinken,代码行数:31,代码来源:module.py
示例12: hook_tick
def hook_tick(self, brok):
"""Each second the broker calls the hook_tick function
Every tick try to flush the buffer
"""
if self.buffer == []:
return
# Todo : why we need this?
if self.ticks >= self.tick_limit:
# If the number of ticks where data was not
# sent successfully to the raw socket reaches the buffer limit.
# Reset the buffer and reset the ticks
self.buffer = []
self.ticks = 0
return
# Real memory size
if sum(x.__sizeof__() for x in self.buffer) > self.max_buffer_size:
logger.debug("[RawSocket broker] Buffer size exceeded. I delete %d lines"
% self.lines_deleted)
self.buffer = self.buffer[self.lines_deleted:]
self.ticks += 1
try:
self.con.sendall('\n'.join(self.buffer).encode('UTF-8') + '\n')
except IOError, err:
logger.error("[RawSocket broker] Failed sending to the Raw network socket! IOError:%s"
% str(err))
self.init()
return
开发者ID:savoirfairelinux,项目名称:mod-rawsocket,代码行数:32,代码来源:module.py
示例13: set_ui_user_preference
def set_ui_user_preference(self, user, key, value):
if not self.is_connected:
if not self.open():
logger.error("[WebUI-MongoDBPreferences] error during initialization, no database connection!")
return None
if not user:
logger.warning("[WebUI-MongoDBPreferences] error set_ui_user_preference, no user!")
return None
try:
# check a collection exist for this user
u = self.db.ui_user_preferences.find_one({'_id': user.get_name()})
if not u:
# no collection for this user? create a new one
self.db.ui_user_preferences.save({'_id': user.get_name(), key: value})
r = self.db.ui_user_preferences.update({'_id': user.get_name()}, {'$set': {key: value}})
# Maybe there was no doc there, if so, create an empty one
if not r:
# Maybe the user exist, if so, get the whole user entry
u = self.db.ui_user_preferences.find_one({'_id': user.get_name()})
if not u:
logger.debug ("[WebUI-MongoDBPreferences] No user entry for %s, I create a new one", user.get_name())
self.db.ui_user_preferences.save({'_id': user.get_name(), key: value})
else: # ok, it was just the key that was missing, just update it and save it
u[key] = value
logger.debug ("[WebUI-MongoDBPreferences] Just saving the new key in the user pref")
self.db.ui_user_preferences.save(u)
except Exception, e:
logger.warning("[WebUI-MongoDBPreferences] Exception: %s", str(e))
self.is_connected = False
return None
开发者ID:Azef1,项目名称:mod-webui,代码行数:33,代码来源:prefs.py
示例14: do_recheck
def do_recheck():
# Getting lists of informations for the commands
time_stamp = request.forms.get('time_stamp', int(time.time()))
host_name = request.forms.get('host_name', '')
service_description = request.forms.get('service_description', '')
logger.debug("[WS_Arbiter] Timestamp '%s' - host: '%s', service: '%s'" % (time_stamp,
host_name,
service_description
)
)
if not host_name:
abort(400, 'Missing parameter host_name')
if service_description:
# SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time>
command = '[%s] SCHEDULE_FORCED_SVC_CHECK;%s;%s;%s\n' % (time_stamp,
host_name,
service_description,
time_stamp)
else:
# SCHEDULE_FORCED_HOST_CHECK;<host_name>;<check_time>
command = '[%s] SCHEDULE_FORCED_HOST_CHECK;%s;%s\n' % (time_stamp,
host_name,
time_stamp)
# We check for auth if it's not anonymously allowed
check_auth()
# Adding commands to the main queue()
logger.debug("[WS_Arbiter] command = %s" % command)
ext = ExternalCommand(command)
app.from_q.put(ext)
开发者ID:geektophe,项目名称:mod-ws-arbiter,代码行数:33,代码来源:module.py
示例15: linkify_hg_by_realms
def linkify_hg_by_realms(self, realms):
# Now we explode the realm value if we've got one
# The group realm must not override a host one (warning?)
for hg in self:
if not hasattr(hg, 'realm'):
continue
# Maybe the value is void?
if not hg.realm.strip():
continue
r = realms.find_by_name(hg.realm.strip())
if r is not None:
hg.realm = r
logger.debug("[hostgroups] %s is in %s realm", hg.get_name(), r.get_name())
else:
err = "the hostgroup %s got an unknown realm '%s'" % (hg.get_name(), hg.realm)
hg.configuration_errors.append(err)
hg.realm = None
continue
for h in hg:
if h is None:
continue
if h.realm is None or h.got_default_realm: # default value not hasattr(h, 'realm'):
logger.debug("[hostgroups] apply a realm %s to host %s from a hostgroup rule (%s)", \
hg.realm.get_name(), h.get_name(), hg.get_name())
h.realm = hg.realm
else:
if h.realm != hg.realm:
logger.warning("[hostgroups] host %s it not in the same realm than it's hostgroup %s", \
h.get_name(), hg.get_name())
开发者ID:G2fx,项目名称:shinken,代码行数:32,代码来源:hostgroup.py
示例16: grab_package
def grab_package(pname):
cprint('Grabbing : ' , end='')
cprint('%s' % pname, 'green')
# Now really publish it
proxy = CONFIG['shinken.io']['proxy']
api_key = CONFIG['shinken.io']['api_key']
# Ok we will push the file with a 10s timeout
c = pycurl.Curl()
c.setopt(c.POST, 0)
c.setopt(c.CONNECTTIMEOUT, 10)
c.setopt(c.TIMEOUT, 10)
if proxy:
c.setopt(c.PROXY, proxy)
c.setopt(c.URL, str('shinken.io/grab/%s' % pname))
response = StringIO()
c.setopt(pycurl.WRITEFUNCTION, response.write)
#c.setopt(c.VERBOSE, 1)
c.perform()
r = c.getinfo(pycurl.HTTP_CODE)
c.close()
if r != 200:
logger.error("There was a critical error : %s" % response.getvalue())
sys.exit(2)
else:
ret = response.getvalue()
logger.debug("CURL result len : %d " % len(ret))
return ret
开发者ID:Electroni-K,项目名称:shinken,代码行数:30,代码来源:cli.py
示例17: post
def post(self, path, args, wait='short'):
size = 0
# Take args, pickle them and then compress the result
for (k,v) in args.iteritems():
args[k] = zlib.compress(cPickle.dumps(v), 2)
size += len(args[k])
# Ok go for it!
logger.debug('Posting to %s: %sB' % (self.uri+path, size))
c = self.con
c.setopt(pycurl.HTTPGET, 0)
c.setopt(c.POST, 1)
# For the TIMEOUT, it will depends if we are waiting for a long query or not
# long:data_timeout, like for huge broks receptions
# short:timeout, like for just "ok" connection
if wait == 'short':
c.setopt(c.TIMEOUT, self.timeout)
else:
c.setopt(c.TIMEOUT, self.data_timeout)
#if proxy:
# c.setopt(c.PROXY, proxy)
# Pycurl want a list of tuple as args
postargs = [(k,v) for (k,v) in args.iteritems()]
c.setopt(c.HTTPPOST, postargs)
c.setopt(c.URL, str(self.uri+path))
# Ok now manage the response
response = StringIO()
c.setopt(pycurl.WRITEFUNCTION, response.write)
#c.setopt(c.VERBOSE, 1)
try:
c.perform()
except pycurl.error, error:
errno, errstr = error
raise HTTPException ('Connexion error to %s : %s' % (self.uri, errstr))
开发者ID:HubLot,项目名称:shinken,代码行数:35,代码来源:http_client.py
示例18: manage_initial_broks_done_brok
def manage_initial_broks_done_brok(self, b):
if self.con is None:
return
logger.info("[Active Directory UI] AD/LDAP: manage_initial_broks_done_brok, go for pictures")
searchScope = ldap.SCOPE_SUBTREE
## retrieve all attributes - again adjust to your needs - see documentation for more options
#retrieveAttributes = ["userPrincipalName", "thumbnailPhoto", "samaccountname", "email"]
logger.info("[Active Directory UI] Contacts? %d" % len(self.app.datamgr.get_contacts()))
for c in self.app.datamgr.get_contacts():
logger.debug("[Active Directory UI] Doing photo lookup for contact: %s" % c.get_name())
elts = self.find_contact_entry(c)
if elts is None:
logger.warning("[Active Directory UI] No ldap entry for %s" % c.get_name())
continue
# Ok, try to get photo from the entry
try:
photo = elts[self.photo_attr][0]
try:
p = os.path.join(self.app.photo_dir, c.get_name()+'.jpg')
f = open(p, 'wb')
f.write(photo)
f.close()
logger.info("[Active Directory UI] Photo wrote for %s" % c.get_name())
except Exception, exp:
logger.error("[Active Directory UI] Cannot write %s : %s" % (p, str(exp)))
except KeyError:
logger.warning("[Active Directory UI] No photo for %s" % c.get_name())
开发者ID:htgoebel,项目名称:shinken,代码行数:32,代码来源:active_directory_ui.py
示例19: open
def open(self):
"""
Connect to the Mongo DB with configured URI.
Execute a command to check if connected on master to activate immediate connection to
the DB because we need to know if DB server is available.
Update log rotation time to force a log rotation
"""
self.con = MongoClient(self.uri, connect=False)
logger.info("[mongo-logs] trying to connect MongoDB: %s", self.uri)
try:
result = self.con.admin.command("ismaster")
logger.info("[mongo-logs] connected to MongoDB, admin: %s", result)
logger.debug("[mongo-logs] server information: %s", self.con.server_info())
self.db = getattr(self.con, self.database)
logger.info("[mongo-logs] connected to the database: %s (%s)", self.database, self.db)
self.is_connected = CONNECTED
self.next_logs_rotation = time.time()
logger.info('[mongo-logs] database connection established')
except ConnectionFailure as e:
logger.error("[mongo-logs] Server is not available: %s", str(e))
return False
except Exception as e:
logger.error("[mongo-logs] Could not open the database", str(e))
raise MongoLogsError
return True
开发者ID:MPOWER4RU,项目名称:mod-mongo-logs,代码行数:31,代码来源:module.py
示例20: hook_save_retention
def hook_save_retention(self, daemon):
"""
main function that is called in the retention creation pass
"""
logger.debug("[RedisRetention] asking me to update retention objects")
all_data = daemon.get_retention_data()
hosts = all_data['hosts']
services = all_data['services']
# Now the flat file method
for h_name in hosts:
h = hosts[h_name]
key = self._get_host_key(h_name)
val = cPickle.dumps(h)
if self.expire_time:
self.rc.set(key, val, ex=self.expire_time)
else:
self.rc.set(key, val)
for (h_name, s_desc) in services:
s = services[(h_name, s_desc)]
key = self._get_service_key(h_name, s_desc)
val = cPickle.dumps(s)
if self.expire_time:
self.rc.set(key, val, ex=self.expire_time)
else:
self.rc.set(key, val)
logger.info("Retention information updated in Redis")
开发者ID:andyxning,项目名称:mod-retention-redis,代码行数:30,代码来源:module.py
注:本文中的shinken.log.logger.debug函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论