本文整理汇总了Python中shinken.log.logger.info函数的典型用法代码示例。如果您正苦于以下问题:Python info函数的具体用法?Python info怎么用?Python info使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了info函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: manage_service_check_resultup_brok
def manage_service_check_resultup_brok(self, b):
"""If a host is defined locally (in shinken) and not in GLPI,
we must not edit GLPI datas !
"""
if 'plugin_monitoring_servicescatalogs_id' not in b.data and\
'plugin_monitoring_services_id' not in b.data:
return list()
logger.info("GLPI : data in DB %s " % b.data)
new_data = copy.deepcopy(b.data)
new_data['last_check'] = time.strftime('%Y-%m-%d %H:%M:%S')
del new_data['perf_data']
del new_data['output']
del new_data['latency']
del new_data['execution_time']
try:
new_data['id'] = b.data['plugin_monitoring_servicescatalogs_id']
del new_data['plugin_monitoring_servicescatalogs_id']
table = 'glpi_plugin_monitoring_servicescatalogs'
except:
new_data['id'] = b.data['plugin_monitoring_services_id']
del new_data['plugin_monitoring_services_id']
table = 'glpi_plugin_monitoring_services'
where_clause = {'id' : new_data['id']}
#print "Update service : ", new_data
query = self.db_backend.create_update_query(table, new_data, where_clause)
return [query]
开发者ID:Morkxy,项目名称:shinken,代码行数:28,代码来源:glpidb_broker.py
示例2: launch_new_checks
def launch_new_checks(self):
for chk in self.checks:
if chk.status == 'queue':
logger.info("[Android SMS] Launching SMS for command %s" % chk.command)
elts = chk.command.split(' ')
# Check the command call first
if len(elts) < 3:
chk.exit_status = 2
chk.get_outputs('The android SMS call %s is not valid. should be android_sms PHONENUMBER TEXT', 8012)
chk.status = 'done'
chk.execution_time = 0.1
continue
# Should be android_sms PHONE TEXT
phone = elts[1]
text = ' '.join(elts[2:])
# Go call the SMS :)
try:
self.android.smsSend(phone, text)
except Exception, exp:
chk.exit_status = 2
chk.get_outputs('The android SMS to %s got an error %s' % (phone, exp), 8012)
chk.status = 'done'
chk.execution_time = 0.1
continue
logger.info("[Android SMS] Send SMS %s to %s" % text, str(phone))
# And finish the notification
chk.exit_status = 1
chk.get_outputs('SMS sent to %s' % phone, 8012)
chk.status = 'done'
chk.execution_time = 0.01
开发者ID:achamo,项目名称:shinken,代码行数:35,代码来源:android_sms.py
示例3: hook_late_configuration
def hook_late_configuration(self, arb):
""" Read config and fill database """
mac_resol = MacroResolver()
mac_resol.init(arb.conf)
for serv in arb.conf.services:
if serv.check_command.command.module_type == 'snmp_booster':
try:
# Serialize service
dict_serv = dict_serialize(serv,
mac_resol,
self.datasource)
except Exception as exp:
logger.error("[SnmpBooster] [code 0907] [%s,%s] "
"%s" % (serv.host.get_name(),
serv.get_name(),
str(exp)))
continue
# We want to make a diff between arbiter insert and poller insert. Some backend may need it.
try:
self.db_client.update_service_init(dict_serv['host'],
dict_serv['service'],
dict_serv)
except Exception as exp:
logger.error("[SnmpBooster] [code 0909] [%s,%s] "
"%s" % (dict_serv['host'],
dict_serv['service'],
str(exp)))
continue
logger.info("[SnmpBooster] [code 0908] Done parsing")
# Disconnect from database
self.db_client.disconnect()
开发者ID:titilambert,项目名称:mod-booster-snmp,代码行数:34,代码来源:snmpbooster_arbiter.py
示例4: manage_log_brok
def manage_log_brok(self, brok):
"""
Parse a Shinken log brok to enqueue a log line for Index insertion
"""
d = date.today()
index_name = self.index_prefix + "-" + d.strftime("%Y.%m.%d")
line = brok.data["log"]
if re.match("^\[[0-9]*\] [A-Z][a-z]*.:", line):
# Match log which NOT have to be stored
logger.warning("[elastic-logs] do not store: %s", line)
return
logline = Logline(line=line)
logline_dict = logline.as_dict()
logline_dict.update({"@timestamp": datetime.utcfromtimestamp(int(logline_dict["time"])).isoformat() + "Z"})
values = {"_index": index_name, "_type": "shinken-logs", "_source": logline_dict}
# values = logline.as_dict()
if logline.logclass != LOGCLASS_INVALID:
logger.debug("[elastic-logs] store log line values: %s", values)
self.logs_cache.append(values)
else:
logger.info("[elastic-logs] This line is invalid: %s", line)
return
开发者ID:descrepes,项目名称:mod-elastic-logs,代码行数:26,代码来源:module.py
示例5: publish_archive
def publish_archive(archive):
# Now really publish it
api_key = CONFIG['shinken.io']['api_key']
c = prepare_curl_connection('/push', post=1, verbose=1)
c.setopt(c.HTTPPOST, [("api_key", api_key),
("data",
(c.FORM_FILE, str(archive),
c.FORM_CONTENTTYPE, "application/x-gzip"))
])
response = StringIO()
c.setopt(pycurl.WRITEFUNCTION, response.write)
try:
c.perform()
except pycurl.error as exp:
logger.error("There was a critical error : %s", exp)
sys.exit(2)
return
r = c.getinfo(pycurl.HTTP_CODE)
c.close()
if r != 200:
logger.error("There was a critical error : %s", response.getvalue())
sys.exit(2)
else:
ret = json.loads(response.getvalue().replace('\\/', '/'))
status = ret.get('status')
text = ret.get('text')
if status == 200:
logger.info(text)
else:
logger.error(text)
sys.exit(2)
开发者ID:d9pouces,项目名称:shinken,代码行数:32,代码来源:cli.py
示例6: is_me
def is_me(self, lookup_name):
logger.info("And arbiter is launched with the hostname:%s "
"from an arbiter point of view of addr:%s", self.host_name, socket.getfqdn())
if lookup_name:
return lookup_name == self.get_name()
else:
return self.host_name == socket.getfqdn() or self.host_name == socket.gethostname()
开发者ID:andyxning,项目名称:shinken,代码行数:7,代码来源:arbiterlink.py
示例7: check_alive_instances
def check_alive_instances(self):
# Only for external
for inst in self.instances:
if not inst in self.to_restart:
if inst.is_external and not inst.process.is_alive():
logger.error("The external module %s goes down unexpectedly!" % inst.get_name())
logger.info("Setting the module %s to restart" % inst.get_name())
# We clean its queues, they are no more useful
inst.clear_queues(self.manager)
self.to_restart.append(inst)
# Ok, no need to look at queue size now
continue
# Now look for man queue size. If above value, the module should got a huge problem
# and so bailout. It's not a perfect solution, more a watchdog
# If max_queue_size is 0, don't check this
if self.max_queue_size == 0:
continue
# Ok, go launch the dog!
queue_size = 0
try:
queue_size = inst.to_q.qsize()
except Exception, exp:
pass
if queue_size > self.max_queue_size:
logger.error("The external module %s got a too high brok queue size (%s > %s)!" % (inst.get_name(), queue_size, self.max_queue_size))
logger.info("Setting the module %s to restart" % inst.get_name())
# We clean its queues, they are no more useful
inst.clear_queues(self.manager)
self.to_restart.append(inst)
开发者ID:JamesYuan,项目名称:shinken,代码行数:30,代码来源:modulesmanager.py
示例8: process_check_result
def process_check_result(self, databuffer, IV):
# 208 is the size of fixed received data ... NSCA packets are 208+512 (720) or 208+4096 (4304)
if not databuffer:
logger.warning("[NSCA] Received an empty NSCA packet")
return
logger.debug("[NSCA] Received NSCA packet: %s", binascii.hexlify(databuffer))
payload_length = len(databuffer) - 208
if payload_length != 512 and payload_length != 4096:
logger.warning("[NSCA] Received packet with unusual payload length: %d.", payload_length)
if self.payload_length != -1 and payload_length != self.payload_length:
logger.warning("[NSCA] Dropping packet with incorrect payload length.")
return
(timestamp, rc, hostname, service, output) = self.read_check_result(databuffer, IV, payload_length)
current_time = time.time()
check_result_age = current_time - timestamp
if timestamp > current_time and self.check_future_packet:
logger.warning("[NSCA] Dropping packet with future timestamp.")
elif check_result_age > self.max_packet_age:
logger.info(
"[NSCA] Dropping packet with stale timestamp - packet was %s seconds old. Timestamp: %s for %s/%s" % \
(check_result_age, timestamp, hostname, service))
else:
self.post_command(timestamp, rc, hostname, service, output)
开发者ID:shinken-monitoring,项目名称:mod-nsca,代码行数:27,代码来源:module.py
示例9: get_instance
def get_instance(plugin):
""" Return a module instance for the plugin manager """
logger.info("Get a NSCA arbiter module for plugin %s" % plugin.get_name())
host = getattr(plugin, 'host', '127.0.0.1')
if host == '*':
host = ''
port = int(getattr(plugin, 'port', '5667'))
buffer_length = int(getattr(plugin, 'buffer_length', '4096'))
payload_length = int(getattr(plugin, 'payload_length', '-1'))
encryption_method = int(getattr(plugin, 'encryption_method', '0'))
backlog = int(getattr(plugin, 'backlog', '10'))
password = getattr(plugin, 'password', '')
if password == "" and encryption_method != 0:
logger.error("[NSCA] No password specified whereas there is a encryption_method defined")
logger.warning("[NSCA] Setting password to dummy to avoid crash!")
password = "dummy"
max_packet_age = min(int(getattr(plugin, 'max_packet_age', '30')), 900)
check_future_packet = bool(getattr(plugin, 'check_future_packet', 0))
instance = NSCA_arbiter(plugin, host, port,
buffer_length, payload_length, encryption_method, password, max_packet_age, check_future_packet,
backlog)
return instance
开发者ID:shinken-monitoring,项目名称:mod-nsca,代码行数:28,代码来源:module.py
示例10: hook_late_configuration
def hook_late_configuration(self, arb):
# We will return external commands to the arbiter, so
# it can just manage it easily and in a generic way
ext_cmds = []
# If the file do not exist, we launch the command
# and we bail out
if not self._is_file_existing():
self._launch_command()
return
self._is_mapping_file_changed()
self._update_mapping()
additions, removed = self._got_mapping_changes()
for (father_k, son_k) in additions:
son_type, son_name = son_k
father_type, father_name = father_k
logger.info("[Hot dependencies] Linked son : %s and its father: %s" % (son_name, father_name))
if son_type == 'host' and father_type == 'host':
son = arb.conf.hosts.find_by_name(son_name)
father = arb.conf.hosts.find_by_name(father_name)
if son is not None and father is not None:
logger.debug("[Hot dependencies] Found! %s %s" % (son_name, father_name))
if not son.is_linked_with_host(father):
logger.debug("[Hot dependencies] Doing simple link between %s and %s" % (son.get_name(), father.get_name()))
# Add a dep link between the son and the father
son.add_host_act_dependency(father, ['w', 'u', 'd'], None, True)
else:
logger.debug("[Hot dependencies] Missing one of %s %s" % (son_name, father_name))
开发者ID:David-,项目名称:shinken,代码行数:30,代码来源:module.py
示例11: get_instance
def get_instance(plugin):
logger.info("Get a RawSocket broker for plugin %s" % plugin.get_name())
#Catch errors
#path = plugin.path
instance = RawSocket_broker(plugin)
return instance
开发者ID:savoirfairelinux,项目名称:mod-rawsocket,代码行数:7,代码来源:module.py
示例12: hook_save_retention
def hook_save_retention(self, daemon):
log_mgr = logger
logger.info("[PickleRetentionGeneric] asking me to update the retention objects")
# Now the flat file method
try:
# Open a file near the path, with .tmp extension
# so in cae or problem, we do not lost the old one
f = open(self.path + ".tmp", "wb")
# We get interesting retention data from the daemon it self
all_data = daemon.get_retention_data()
# And we save it on file :)
# s = cPickle.dumps(all_data)
# s_compress = zlib.compress(s)
cPickle.dump(all_data, f, protocol=cPickle.HIGHEST_PROTOCOL)
# f.write(s_compress)
f.close()
# Now move the .tmp fiel to the real path
shutil.move(self.path + ".tmp", self.path)
except IOError, exp:
log_mgr.log("Error: retention file creation failed, %s" % str(exp))
return
开发者ID:radu-gheorghe,项目名称:shinken,代码行数:26,代码来源:pickle_retention_file_generic.py
示例13: init_http
def init_http(self):
logger.info("[WS_Arbiter] Starting WS arbiter http socket")
try:
self.srv = run(host=self.host, port=self.port, server='wsgirefselect')
except Exception, e:
logger.error("[WS_Arbiter] Exception : %s" % str(e))
raise
开发者ID:geektophe,项目名称:mod-ws-arbiter,代码行数:7,代码来源:module.py
示例14: hook_save_retention
def hook_save_retention(self, daemon):
"""
main function that is called in the retention creation pass
"""
try:
self.max_workers = cpu_count()
except NotImplementedError:
pass
t0 = time.time()
logger.debug("[MongodbRetention] asking me to update the retention objects")
all_data = daemon.get_retention_data()
processes = []
for i in xrange(self.max_workers):
proc = Process(target=self.job, args=(all_data, i, self.max_workers))
proc.start()
processes.append(proc)
# Allow 30s to join the sub-processes, should be enough
for proc in processes:
proc.join(30)
logger.info("Retention information updated in Mongodb (%.2fs)" % (time.time() - t0))
开发者ID:shinken-monitoring,项目名称:mod-retention-mongodb,代码行数:26,代码来源:module.py
示例15: hook_save_retention
def hook_save_retention(self, daemon):
"""
main function that is called in the retention creation pass
"""
logger.debug("[MemcacheRetention] asking me to update the retention objects")
all_data = daemon.get_retention_data()
hosts = all_data['hosts']
services = all_data['services']
# Now the flat file method
for h_name in hosts:
try:
h = hosts[h_name]
key = self.normalize_key("HOST-%s" % h_name)
val = cPickle.dumps(h)
self.mc.set(key, val)
except:
logger.error("[MemcacheRetention] error while saving host %s" % key)
for (h_name, s_desc) in services:
try:
key = self.normalize_key("SERVICE-%s,%s" % (h_name, s_desc))
s = services[(h_name, s_desc)]
val = cPickle.dumps(s)
self.mc.set(key, val)
except:
logger.error("[MemcacheRetention] error while saving service %s" % key)
self.mc.disconnect_all()
logger.info("Retention information updated in Memcache")
开发者ID:David-,项目名称:shinken,代码行数:33,代码来源:module.py
示例16: load_config
def load_config(app):
global params
import os
from webui2.config_parser import config_parser
try:
currentdir = os.path.dirname(os.path.realpath(__file__))
configuration_file = "%s/%s" % (currentdir, 'plugin.cfg')
logger.info("[WebUI-logs] Plugin configuration file: %s", configuration_file)
scp = config_parser('#', '=')
z = params.copy()
z.update(scp.parse_config(configuration_file))
params = z
params['logs_type'] = [item.strip() for item in params['logs_type'].split(',')]
if len(params['logs_hosts']) > 0:
params['logs_hosts'] = [item.strip() for item in params['logs_hosts'].split(',')]
if len(params['logs_services']) > 0:
params['logs_services'] = [item.strip() for item in params['logs_services'].split(',')]
logger.info("[WebUI-logs] configuration loaded.")
logger.info("[WebUI-logs] configuration, fetching types: %s", params['logs_type'])
logger.info("[WebUI-logs] configuration, hosts: %s", params['logs_hosts'])
logger.info("[WebUI-logs] configuration, services: %s", params['logs_services'])
return True
except Exception, exp:
logger.warning("[WebUI-logs] configuration file (%s) not available: %s", configuration_file, str(exp))
return False
开发者ID:pombredanne,项目名称:mod-webui,代码行数:28,代码来源:logs.py
示例17: commit_and_rotate_log_db
def commit_and_rotate_log_db(self):
"""Submit a commit or rotate the complete database file.
This function is called whenever the mainloop doesn't handle a request.
The database updates are committed every second.
Every day at 00:05 the database contents with a timestamp of past days
are moved to their own datafiles (one for each day). We wait until 00:05
because in a distributed environment even after 00:00 (on the broker host)
we might receive data from other hosts with a timestamp dating from yesterday.
"""
if self.read_only:
return
now = time.time()
if self.next_log_db_commit <= now:
self.commit()
logger.debug("[Logstore SQLite] commit.....")
self.next_log_db_commit = now + 1
if self.next_log_db_rotate <= now:
logger.info("[Logstore SQLite] at %s we rotate the database file" % time.asctime(time.localtime(now)))
# Take the current database file
# Move the messages into daily files
self.log_db_do_archive()
today = datetime.date.today()
today0005 = datetime.datetime(today.year, today.month, today.day, 0, 5, 0)
if now < time.mktime(today0005.timetuple()):
nextrotation = today0005
else:
nextrotation = today0005 + datetime.timedelta(days=1)
# See you tomorrow
self.next_log_db_rotate = time.mktime(nextrotation.timetuple())
logger.info("[Logstore SQLite] next rotation at %s " % time.asctime(time.localtime(self.next_log_db_rotate)))
开发者ID:rledisez,项目名称:mod-logstore-sqlite,代码行数:33,代码来源:module.py
示例18: main
def main(self):
try:
self.load_config_file()
# Setting log level
logger.setLevel(self.log_level)
# Force the debug level if the daemon is said to start with such level
if self.debug:
logger.setLevel('DEBUG')
self.look_for_early_exit()
self.do_daemon_init_and_start()
self.load_modules_manager()
self.http_daemon.register(self.interface)
self.http_daemon.register(self.istats)
#self.inject = Injector(self.sched)
#self.http_daemon.register(self.inject)
self.http_daemon.unregister(self.interface)
self.uri = self.http_daemon.uri
logger.info("[scheduler] General interface is at: %s", self.uri)
self.do_mainloop()
except Exception, exp:
self.print_unrecoverable(traceback.format_exc())
raise
开发者ID:OpenConceptConsulting,项目名称:shinken,代码行数:25,代码来源:schedulerdaemon.py
示例19: main
def main(self):
self.set_proctitle(self.name)
self.set_exit_handler()
self.open()
input = [self.fifo]
while not self.interrupted:
if input == []:
time.sleep(1)
continue
try:
inputready, outputready, exceptready = select.select(input, [], [], 1)
except select.error, e:
if e.args[0] == errno.EINTR:
logger.info("[%s] Received exit signal. Bailing out." % self.get_name())
return
for s in inputready:
ext_cmds = self.get()
if ext_cmds:
for ext_cmd in ext_cmds:
self.from_q.put(ext_cmd)
else:
self.fifo = self.open()
if self.fifo is not None:
input = [self.fifo]
else:
input = []
开发者ID:Squizou,项目名称:shinken,代码行数:31,代码来源:module.py
示例20: get_objects
def get_objects(self):
if not hasattr(self, 'conn'):
logger.error("[MySQLImport]: Problem during init phase")
return {}
# Create variables for result
r = {}
cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)
# For all parameters
for k, v in self.reqlist.iteritems():
r[k] = []
if(v != None):
result_set = {}
logger.info("[MySQLImport]: Getting %s configuration from database" % (k))
try:
cursor.execute(v)
result_set = cursor.fetchall()
except MySQLdb.Error, e:
logger.error("[MySQLImport]: Error %d: %s" % (e.args[0], e.args[1]))
# Create set with result
for row in result_set:
h = {}
for column in row:
if row[column]:
h[column] = row[column]
r[k].append(h)
开发者ID:dgilm,项目名称:mod-import-mysql,代码行数:31,代码来源:module.py
注:本文中的shinken.log.logger.info函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论