本文整理汇总了Python中shinken.log.logger.error函数的典型用法代码示例。如果您正苦于以下问题:Python error函数的具体用法?Python error怎么用?Python error使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了error函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: linkify_sd_by_s
def linkify_sd_by_s(self, hosts, services):
for sd in self:
try:
s_name = sd.dependent_service_description
hst_name = sd.dependent_host_name
# The new member list, in id
s = services.find_srv_by_name_and_hostname(hst_name, s_name)
if s is None:
self.configuration_errors.append("Service %s not found for host %s"
% (s_name, hst_name))
sd.dependent_service_description = s
s_name = sd.service_description
hst_name = sd.host_name
# The new member list, in id
s = services.find_srv_by_name_and_hostname(hst_name, s_name)
if s is None:
self.configuration_errors.append("Service %s not found for host %s"
% (s_name, hst_name))
sd.service_description = s
except AttributeError, exp:
logger.error("[servicedependency] fail to linkify by service %s: %s", sd, exp)
开发者ID:A3Sec,项目名称:shinken,代码行数:25,代码来源:servicedependency.py
示例2: grab_package
def grab_package(pname):
cprint('Grabbing : ' , end='')
cprint('%s' % pname, 'green')
# Now really publish it
proxy = CONFIG['shinken.io']['proxy']
api_key = CONFIG['shinken.io']['api_key']
# Ok we will push the file with a 5m timeout
c = pycurl.Curl()
c.setopt(c.POST, 0)
c.setopt(c.CONNECTTIMEOUT, 30)
c.setopt(c.TIMEOUT, 300)
if proxy:
c.setopt(c.PROXY, proxy)
c.setopt(c.URL, str('shinken.io/grab/%s' % pname))
response = StringIO()
c.setopt(pycurl.WRITEFUNCTION, response.write)
#c.setopt(c.VERBOSE, 1)
try:
c.perform()
except pycurl.error, exp:
logger.error("There was a critical error : %s" % exp)
return ''
开发者ID:bossjones,项目名称:shinken,代码行数:25,代码来源:cli.py
示例3: get_services_by_explosion
def get_services_by_explosion(self, servicegroups):
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_explode = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[servicegroup::%s] got a loop in servicegroup definition", self.get_name())
if self.has('members'):
return self.members
else:
return ''
# Ok, not a loop, we tag it and continue
self.rec_tag = True
sg_mbrs = self.get_servicegroup_members()
for sg_mbr in sg_mbrs:
sg = servicegroups.find_by_name(sg_mbr.strip())
if sg is not None:
value = sg.get_services_by_explosion(servicegroups)
if value is not None:
self.add_string_member(value)
if self.has('members'):
return self.members
else:
return ''
开发者ID:G2fx,项目名称:shinken,代码行数:30,代码来源:servicegroup.py
示例4: open
def open(self):
try:
if self.replica_set:
self.conn = pymongo.ReplicaSetConnection(self.mongodb_uri, replicaSet=self.replica_set, fsync=self.mongodb_fsync)
else:
# Old versions of pymongo do not known about fsync
if ReplicaSetConnection:
self.conn = pymongo.Connection(self.mongodb_uri, fsync=self.mongodb_fsync)
else:
self.conn = pymongo.Connection(self.mongodb_uri)
self.db = self.conn[self.database]
self.db[self.collection].ensure_index([('host_name', pymongo.ASCENDING), ('time', pymongo.ASCENDING), ('lineno', pymongo.ASCENDING)], name='logs_idx')
self.db[self.collection].ensure_index([('time', pymongo.ASCENDING), ('lineno', pymongo.ASCENDING)], name='time_1_lineno_1')
if self.replica_set:
pass
# This might be a future option prefer_secondary
#self.db.read_preference = ReadPreference.SECONDARY
self.is_connected = CONNECTED
self.next_log_db_rotate = time.time()
except AutoReconnect as err:
# now what, ha?
logger.error("[LogStoreMongoDB] LiveStatusLogStoreMongoDB.AutoReconnect %s" % err)
# The mongodb is hopefully available until this module is restarted
raise LiveStatusLogStoreError(err)
except Exception as err:
# If there is a replica_set, but the host is a simple standalone one
# we get a "No suitable hosts found" here.
# But other reasons are possible too.
logger.error("[LogStoreMongoDB] Could not open the database: %s" % err)
raise LiveStatusLogStoreError(err)
开发者ID:Caez83,项目名称:mod-logstore-mongodb,代码行数:30,代码来源:module.py
示例5: publish_archive
def publish_archive(archive):
# Now really publish it
proxy = CONFIG['shinken.io']['proxy']
api_key = CONFIG['shinken.io']['api_key']
# Ok we will push the file with a 10s timeout
c = pycurl.Curl()
c.setopt(c.POST, 1)
c.setopt(c.CONNECTTIMEOUT, 30)
c.setopt(c.TIMEOUT, 300)
if proxy:
c.setopt(c.PROXY, proxy)
c.setopt(c.URL, "http://shinken.io/push")
c.setopt(c.HTTPPOST, [("api_key", api_key),
("data",
(c.FORM_FILE, str(archive),
c.FORM_CONTENTTYPE, "application/x-gzip"))
])
response = StringIO()
c.setopt(pycurl.WRITEFUNCTION, response.write)
c.setopt(c.VERBOSE, 1)
try:
c.perform()
except pycurl.error, exp:
logger.error("There was a critical error : %s" % exp)
return
开发者ID:bossjones,项目名称:shinken,代码行数:26,代码来源:cli.py
示例6: get_instance
def get_instance(plugin):
name = plugin.get_name()
logger.info("Get a Syslog broker for plugin %s" % (name))
# syslog.syslog priority defaults to (LOG_INFO | LOG_USER)
facility = syslog.LOG_USER
priority = syslog.LOG_INFO
# Get configuration values, if any
if hasattr(plugin, 'facility'):
facility = plugin.facility
if hasattr(plugin, 'priority'):
priority = plugin.priority
# Ensure config values have a string type compatible with
# SysLogHandler.encodePriority
if type(facility) in types.StringTypes:
facility = types.StringType(facility)
if type(priority) in types.StringTypes:
priority = types.StringType(priority)
# Convert facility / priority (integers or strings) to aggregated
# priority value
sh = SysLogHandler()
try:
priority = sh.encodePriority(facility, priority)
except TypeError, e:
logger.error("[%s] Couldn't get syslog priority, "
"reverting to defaults" % (name))
开发者ID:staute,项目名称:shinken,代码行数:29,代码来源:module.py
示例7: get_ui_availability
def get_ui_availability(self, elt, range_start=None, range_end=None):
import pymongo
if not self.db:
logger.error("[mongo-logs] error Problem during init phase, no database connection")
return None
logger.debug("[mongo-logs] get_ui_availability, name: %s", elt)
query = [{"hostname": elt.host_name}]
if elt.__class__.my_type == 'service':
query.append({"service": elt.service_description})
if range_start:
query.append({'day_ts': {'$gte': range_start}})
if range_end:
query.append({'day_ts': {'$lte': range_end}})
query = {'$and': query}
logger.debug("[mongo-logs] Fetching records from database with query: '%s'", query)
records = []
try:
for log in self.db[self.hav_collection].find(query).sort([
("day",pymongo.DESCENDING),
("hostname",pymongo.ASCENDING),
("service",pymongo.ASCENDING)]):
if '_id' in log:
del log['_id']
records.append(log)
logger.debug("[mongo-logs] %d records fetched from database.", len(records))
except Exception, exp:
logger.error("[mongo-logs] Exception when querying database: %s", str(exp))
开发者ID:vizvayu,项目名称:mod-webui,代码行数:31,代码来源:logs.py
示例8: get_hosts_by_explosion
def get_hosts_by_explosion(self, hostgroups):
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_explode = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[hostgroup::%s] got a loop in hostgroup definition", self.get_name())
return self.get_hosts()
# Ok, not a loop, we tag it and continue
self.rec_tag = True
hg_mbrs = self.get_hostgroup_members()
for hg_mbr in hg_mbrs:
hg = hostgroups.find_by_name(hg_mbr.strip())
if hg is not None:
value = hg.get_hosts_by_explosion(hostgroups)
if value is not None:
self.add_string_member(value)
return self.get_hosts()
开发者ID:G2fx,项目名称:shinken,代码行数:25,代码来源:hostgroup.py
示例9: open
def open(self):
"""
Connect to the Mongo DB with configured URI.
Execute a command to check if connected on master to activate immediate connection to
the DB because we need to know if DB server is available.
Update log rotation time to force a log rotation
"""
self.con = MongoClient(self.uri, connect=False)
logger.info("[mongo-logs] trying to connect MongoDB: %s", self.uri)
try:
result = self.con.admin.command("ismaster")
logger.info("[mongo-logs] connected to MongoDB, admin: %s", result)
logger.debug("[mongo-logs] server information: %s", self.con.server_info())
self.db = getattr(self.con, self.database)
logger.info("[mongo-logs] connected to the database: %s (%s)", self.database, self.db)
self.is_connected = CONNECTED
self.next_logs_rotation = time.time()
logger.info('[mongo-logs] database connection established')
except ConnectionFailure as e:
logger.error("[mongo-logs] Server is not available: %s", str(e))
return False
except Exception as e:
logger.error("[mongo-logs] Could not open the database", str(e))
raise MongoLogsError
return True
开发者ID:MPOWER4RU,项目名称:mod-mongo-logs,代码行数:31,代码来源:module.py
示例10: manage_log_brok
def manage_log_brok(self, b):
if self.read_only:
return
data = b.data
line = data['log']
if re.match("^\[[0-9]*\] [A-Z][a-z]*.:", line):
# Match log which NOT have to be stored
# print "Unexpected in manage_log_brok", line
return
try:
logline = Logline(line=line)
values = logline.as_tuple()
if logline.logclass != LOGCLASS_INVALID:
insert_log = True
current_state = int(values[12])
if self.do_not_log_successive_ok and current_state == 0:
dbresult = self.execute("SELECT state FROM logs WHERE host_name='%s' AND service_description='%s' AND class=%d ORDER BY time DESC LIMIT 1" % (values[6],values[11],int(values[2])))
if len(dbresult) > 0 and dbresult[0][0] == 0:
insert_log = False
if insert_log:
self.execute('INSERT INTO LOGS VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', values)
except LiveStatusLogStoreError, exp:
logger.error("[Logstore SQLite] An error occurred: %s", str(exp.args[0]))
logger.error("[Logstore SQLite] DATABASE ERROR!!!!!!!!!!!!!!!!!")
开发者ID:rledisez,项目名称:mod-logstore-sqlite,代码行数:25,代码来源:module.py
示例11: check_alive_instances
def check_alive_instances(self):
# Only for external
for inst in self.instances:
if not inst in self.to_restart:
if inst.is_external and not inst.process.is_alive():
logger.error("The external module %s goes down unexpectedly!" % inst.get_name())
logger.info("Setting the module %s to restart" % inst.get_name())
# We clean its queues, they are no more useful
inst.clear_queues(self.manager)
self.to_restart.append(inst)
# Ok, no need to look at queue size now
continue
# Now look for man queue size. If above value, the module should got a huge problem
# and so bailout. It's not a perfect solution, more a watchdog
# If max_queue_size is 0, don't check this
if self.max_queue_size == 0:
continue
# Ok, go launch the dog!
queue_size = 0
try:
queue_size = inst.to_q.qsize()
except Exception, exp:
pass
if queue_size > self.max_queue_size:
logger.error("The external module %s got a too high brok queue size (%s > %s)!" % (inst.get_name(), queue_size, self.max_queue_size))
logger.info("Setting the module %s to restart" % inst.get_name())
# We clean its queues, they are no more useful
inst.clear_queues(self.manager)
self.to_restart.append(inst)
开发者ID:JamesYuan,项目名称:shinken,代码行数:30,代码来源:modulesmanager.py
示例12: is_correct
def is_correct(self):
b = self.day in Daterange.weekdays
if not b:
logger.error("Error: %s is not a valid day", self.day)
# Check also if Daterange is correct.
b &= Daterange.is_correct(self)
return b
开发者ID:Rack42,项目名称:shinken,代码行数:7,代码来源:daterange.py
示例13: hook_tick
def hook_tick(self, brok):
"""Each second the broker calls the hook_tick function
Every tick try to flush the buffer
"""
if self.use_pickle:
if self.ticks >= self.tick_limit:
# If the number of ticks where data was not
# sent successfully to Graphite reaches the bufferlimit.
# Reset the buffer and reset the ticks
self.buffer = []
self.ticks = 0
return
self.ticks += 1
# Format the data
payload = cPickle.dumps(self.buffer)
header = struct.pack("!L", len(payload))
packet = header + payload
try:
self.con.sendall(packet)
except IOError, err:
logger.error(
"[Graphite broker] Failed sending to the Graphite Carbon instance network socket! IOError:%s"
% str(err)
)
return
# Flush the buffer after a successful send to Graphite
self.buffer = []
开发者ID:jstoja,项目名称:shinken,代码行数:31,代码来源:graphite_broker.py
示例14: create_pack
def create_pack(self, buf, name):
if not json:
logger.warning("[Pack] cannot load the pack file '%s': missing json lib", name)
return
# Ok, go compile the code
try:
d = json.loads(buf)
if not 'name' in d:
logger.error("[Pack] no name in the pack '%s'", name)
return
p = Pack({})
p.pack_name = d['name']
p.description = d.get('description', '')
p.macros = d.get('macros', {})
p.templates = d.get('templates', [p.pack_name])
p.path = d.get('path', 'various/')
p.doc_link = d.get('doc_link', '')
p.services = d.get('services', {})
p.commands = d.get('commands', [])
if not p.path.endswith('/'):
p.path += '/'
# Ok, add it
self[p.id] = p
except ValueError, exp:
logger.error("[Pack] error in loading pack file '%s': '%s'", name, exp)
开发者ID:A3Sec,项目名称:shinken,代码行数:25,代码来源:pack.py
示例15: publish_archive
def publish_archive(archive):
# Now really publish it
proxy = CONFIG['shinken.io']['proxy']
api_key = CONFIG['shinken.io']['api_key']
# Ok we will push the file with a 10s timeout
c = pycurl.Curl()
c.setopt(c.POST, 1)
c.setopt(c.CONNECTTIMEOUT, 10)
c.setopt(c.TIMEOUT, 10)
if proxy:
c.setopt(c.PROXY, proxy)
c.setopt(c.URL, "http://shinken.io/push")
c.setopt(c.HTTPPOST, [("api_key", api_key),
("data",
(c.FORM_FILE, str(archive),
c.FORM_CONTENTTYPE, "application/x-gzip"))
])
response = StringIO()
c.setopt(pycurl.WRITEFUNCTION, response.write)
c.setopt(c.VERBOSE, 1)
c.perform()
r = c.getinfo(pycurl.HTTP_CODE)
c.close()
if r != 200:
logger.error("There was a critical error : %s" % response.getvalue())
sys.exit(2)
else:
ret = json.loads(response.getvalue().replace('\\/', '/'))
status = ret.get('status')
text = ret.get('text')
if status == 200:
logger.log(text)
else:
logger.error(text)
开发者ID:kjellski,项目名称:shinken,代码行数:35,代码来源:cli.py
示例16: commit_logs
def commit_logs(self):
"""
Peridically called (commit_period), this method prepares a bunch of queued logs (commit_colume) to insert them in the DB
"""
if not self.logs_cache:
return
if not self.is_connected == CONNECTED:
if not self.open():
logger.warning("[mongo-logs] log commiting failed")
logger.warning("[mongo-logs] %d lines to insert in database", len(self.logs_cache))
return
logger.debug("[mongo-logs] commiting ...")
logger.debug("[mongo-logs] %d lines to insert in database (max insertion is %d lines)", len(self.logs_cache), self.commit_volume)
# Flush all the stored log lines
logs_to_commit = 1
now = time.time()
some_logs = []
while True:
try:
# result = self.db[self.logs_collection].insert_one(self.logs_cache.popleft())
some_logs.append(self.logs_cache.popleft())
logs_to_commit = logs_to_commit + 1
if logs_to_commit >= self.commit_volume:
break
except IndexError:
logger.debug("[mongo-logs] prepared all available logs for commit")
break
except Exception, exp:
logger.error("[mongo-logs] exception: %s", str(exp))
开发者ID:MPOWER4RU,项目名称:mod-mongo-logs,代码行数:33,代码来源:module.py
示例17: launch_query
def launch_query(self):
""" Prepare the request object's filter stacks """
# The Response object needs to access the Query
self.response.load(self)
# A minimal integrity check
if not self.table:
return []
try:
# Remember the number of stats filters. We need these numbers as columns later.
# But we need to ask now, because get_live_data() will empty the stack
if self.table == 'log':
result = self.get_live_data_log()
else:
# If the pnpgraph_present column is involved, then check
# with each request if the pnp perfdata path exists
if 'pnpgraph_present' in self.columns + self.filtercolumns + self.prefiltercolumns and self.pnp_path and os.access(self.pnp_path, os.R_OK):
self.pnp_path_readable = True
else:
self.pnp_path_readable = False
# Apply the filters on the broker's host/service/etc elements
result = self.get_live_data()
except Exception, e:
import traceback
logger.error("[Livestatus Wait Query] Error: %s" % e)
traceback.print_exc(32)
result = []
开发者ID:Thibautg16,项目名称:shinken,代码行数:29,代码来源:livestatus_wait_query.py
示例18: get_new_broks
def get_new_broks(self, type='scheduler'):
# Get the good links tab for looping..
links = self.get_links_from_type(type)
if links is None:
logger.debug('Type unknown for connection! %s', type)
return
# We check for new check in each schedulers and put
# the result in new_checks
for sched_id in links:
try:
con = links[sched_id]['con']
if con is not None: # None = not initialized
t0 = time.time()
# Before ask a call that can be long, do a simple ping to be sure it is alive
con.get('ping')
tmp_broks = con.get('get_broks', {'bname':self.name}, wait='long')
try:
_t = base64.b64decode(tmp_broks)
_t = zlib.decompress(_t)
tmp_broks = cPickle.loads(_t)
except (TypeError, zlib.error, cPickle.PickleError), exp:
logger.error('Cannot load broks data from %s : %s', links[sched_id]['name'], exp)
links[sched_id]['con'] = None
continue
logger.debug("%s Broks get in %s", len(tmp_broks), time.time() - t0)
for b in tmp_broks.values():
b.instance_id = links[sched_id]['instance_id']
# Ok, we can add theses broks to our queues
self.add_broks_to_queue(tmp_broks.values())
else: # no con? make the connection
self.pynag_con_init(sched_id, type=type)
开发者ID:h4wkmoon,项目名称:shinken,代码行数:33,代码来源:brokerdaemon.py
示例19: is_correct
def is_correct(self):
state = True
cls = self.__class__
# Raised all previously saw errors like unknown commands or timeperiods
if self.configuration_errors != []:
state = False
for err in self.configuration_errors:
logger.error("[item::%s] %s", self.get_name(), err)
for prop, entry in cls.properties.items():
if prop not in cls._special_properties:
if not hasattr(self, prop) and entry.required:
logger.warning("[checkmodulation::%s] %s property not set", self.get_name(), prop)
state = False # Bad boy...
# Ok now we manage special cases...
# Service part
if not hasattr(self, 'check_command'):
logger.warning("[checkmodulation::%s] do not have any check_command defined", self.get_name())
state = False
else:
if self.check_command is None:
logger.warning("[checkmodulation::%s] a check_command is missing", self.get_name())
state = False
if not self.check_command.is_valid():
logger.warning("[checkmodulation::%s] a check_command is invalid", self.get_name())
state = False
# Ok just put None as check_period, means 24x7
if not hasattr(self, 'check_period'):
self.check_period = None
return state
开发者ID:A3Sec,项目名称:shinken,代码行数:34,代码来源:checkmodulation.py
示例20: add
def add(self, elt):
cls_type = elt.__class__.my_type
if cls_type == 'brok':
# For brok, we TAG brok with our instance_id
elt.instance_id = 0
self.broks_internal_raised.append(elt)
return
elif cls_type == 'externalcommand':
logger.debug("Enqueuing an external command '%s'" % str(ExternalCommand.__dict__))
self.external_commands.append(elt)
# Maybe we got a Message from the modules, it's way to ask something
# like from now a full data from a scheduler for example.
elif cls_type == 'message':
# We got a message, great!
logger.debug(str(elt.__dict__))
if elt.get_type() == 'NeedData':
data = elt.get_data()
# Full instance id means: I got no data for this scheduler
# so give me all dumbass!
if 'full_instance_id' in data:
c_id = data['full_instance_id']
source = elt.source
logger.info('The module %s is asking me to get all initial data from the scheduler %d' % (source, c_id))
# so we just reset the connection and the running_id, it will just get all new things
try:
self.schedulers[c_id]['con'] = None
self.schedulers[c_id]['running_id'] = 0
except KeyError: # maybe this instance was not known, forget it
logger.warning("the module %s ask me a full_instance_id for an unknown ID (%d)!" % (source, c_id))
# Maybe a module tells me that it's dead, I must log it's last words...
if elt.get_type() == 'ICrash':
data = elt.get_data()
logger.error('the module %s just crash! Please look at the traceback:' % data['name'])
logger.error(data['trace'])
开发者ID:Thibautg16,项目名称:shinken,代码行数:34,代码来源:brokerdaemon.py
注:本文中的shinken.log.logger.error函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论