本文整理汇总了Python中shinken.log.logger.log函数的典型用法代码示例。如果您正苦于以下问题:Python log函数的具体用法?Python log怎么用?Python log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: add
def add(self, elt):
cls_type = elt.__class__.my_type
if cls_type == "brok":
# For brok, we TAG brok with our instance_id
elt.data["instance_id"] = 0
self.broks_internal_raised.append(elt)
return
elif cls_type == "externalcommand":
print "Adding in queue an external command", ExternalCommand.__dict__
self.external_commands.append(elt)
# Maybe we got a Message from the modules, it's way to ask something
# like from now a full data from a scheduler for example.
elif cls_type == "message":
# We got a message, great!
print elt.__dict__
if elt.get_type() == "NeedData":
data = elt.get_data()
# Full instance id mean : I got no data for this scheduler
# so give me all dumbass!
if "full_instance_id" in data:
c_id = data["full_instance_id"]
logger.log("A module is asking me to get all initial data from the scheduler %d" % c_id)
# so we just reset the connection adn the running_id, it will just get all new things
try:
self.schedulers[c_id]["con"] = None
self.schedulers[c_id]["running_id"] = 0
except KeyError: # maybe this instance was not known, forget it
print "WARNING: a module ask me a full_instance_id for an unknown ID!", c_id
开发者ID:wAmpIre,项目名称:shinken,代码行数:28,代码来源:brokerdaemon.py
示例2: load
def load(self):
now = int(time.time())
""" Try to import the requested modules ; put the imported modules in self.imported_modules.
The previous imported modules, if any, are cleaned before. """
# We get all modules file with .py
modules_files = [ fname[:-3] for fname in os.listdir(self.modules_path)
if fname.endswith(".py") ]
# And directories
modules_files.extend([ fname for fname in os.listdir(self.modules_path)
if os.path.isdir(os.path.join(self.modules_path, fname)) ])
# Now we try to load thems
# So first we add their dir into the sys.path
if not self.modules_path in sys.path:
sys.path.append(self.modules_path)
# We try to import them, but we keep only the one of
# our type
del self.imported_modules[:]
for fname in modules_files:
#print "Try to load", fname
try:
m = __import__(fname)
if not hasattr(m, 'properties'):
continue
# We want to keep only the modules of our type
if self.modules_type in m.properties['daemons']:
self.imported_modules.append(m)
except Exception , exp:
logger.log("Warning in importing module : %s" % exp)
开发者ID:mleinart,项目名称:shinken,代码行数:32,代码来源:modulesmanager.py
示例3: is_me
def is_me(self):
logger.log(
"And arbiter is launched with the hostname:%s from an arbiter point of view of addr :%s"
% (self.host_name, socket.getfqdn()),
print_it=False,
)
return self.host_name == socket.getfqdn() or self.host_name == socket.gethostname()
开发者ID:jfbutkiewicz,项目名称:Windows-Setup-for-Shinken,代码行数:7,代码来源:arbiterlink.py
示例4: compensate_system_time_change
def compensate_system_time_change(self, difference):
""" Compensate a system time change of difference for all hosts/services/checks/notifs """
logger.log('Warning: A system time change of %d has been detected. Compensating...' % difference)
# We only need to change some value
self.program_start = max(0, self.program_start + difference)
# Then we compasate all host/services
for h in self.sched.hosts:
h.compensate_system_time_change(difference)
for s in self.sched.services:
s.compensate_system_time_change(difference)
# Now all checks and actions
for c in self.sched.checks.values():
# Already launch checks should not be touch
if c.status == 'scheduled':
t_to_go = c.t_to_go
ref = c.ref
new_t = max(0, t_to_go + difference)
# But it's no so simple, we must match the timeperiod
new_t = ref.check_period.get_next_valid_time_from_t(new_t)
# But maybe no there is no more new value! Not good :(
# Say as error, with error output
if new_t is None:
c.state = 'waitconsume'
c.exit_status = 2
c.output = '(Error: there is no available check time after time change!)'
c.check_time = time.time()
c.execution_time = 0
else:
c.t_to_go = new_t
ref.next_chk = new_t
# Now all checks and actions
for c in self.sched.actions.values():
# Already launch checks should not be touch
if c.status == 'scheduled':
t_to_go = c.t_to_go
# Event handler do not have ref
ref = getattr(c, 'ref', None)
new_t = max(0, t_to_go + difference)
# Notification should be check with notification_period
if c.is_a == 'notification':
# But it's no so simple, we must match the timeperiod
new_t = ref.notification_period.get_next_valid_time_from_t(new_t)
# And got a creation_time variable too
c.creation_time = c.creation_time + difference
# But maybe no there is no more new value! Not good :(
# Say as error, with error output
if new_t is None:
c.state = 'waitconsume'
c.exit_status = 2
c.output = '(Error: there is no available check time after time change!)'
c.check_time = time.time()
c.execution_time = 0
else:
c.t_to_go = new_t
开发者ID:zoranzaric,项目名称:shinken,代码行数:60,代码来源:schedulerdaemon.py
示例5: setup_new_conf
def setup_new_conf(self):
conf = self.new_conf
self.new_conf = None
self.cur_conf = conf
# Got our name from the globals
if 'receiver_name' in conf['global']:
name = conf['global']['receiver_name']
else:
name = 'Unnamed receiver'
self.name = name
self.log.load_obj(self, name)
print "[%s] Sending us configuration %s" % (self.name, conf)
if not self.have_modules:
self.modules = mods = conf['global']['modules']
self.have_modules = True
logger.log("[%s] We received modules %s " % (self.name, mods))
# Set our giving timezone from arbiter
use_timezone = conf['global']['use_timezone']
if use_timezone != 'NOTSET':
logger.log("[%s] Setting our timezone to" % (self.name, use_timezone))
os.environ['TZ'] = use_timezone
time.tzset()
开发者ID:pjjw,项目名称:shinken,代码行数:25,代码来源:receiverdaemon.py
示例6: get_scheduler_ordered_list
def get_scheduler_ordered_list(self, r):
# get scheds, alive and no spare first
scheds = []
for s in r.schedulers:
scheds.append(s)
# now the spare scheds of higher realms
# they are after the sched of realm, so
# they will be used after the spare of
# the realm
for higher_r in r.higher_realms:
for s in higher_r.schedulers:
if s.spare:
scheds.append(s)
# Now we sort the scheds so we take master, then spare
# the dead, but we do not care about thems
scheds.sort(alive_then_spare_then_deads)
scheds.reverse() #pop is last, I need first
#DBG: dump
print_sched = [s.get_name() for s in scheds]
print_sched.reverse()
print_string = '[%s] Schedulers order : ' % r.get_name()
for s in print_sched:
print_string += '%s ' % s
logger.log(print_string)
#END DBG
return scheds
开发者ID:wAmpIre,项目名称:shinken,代码行数:30,代码来源:dispatcher.py
示例7: hook_save_retention
def hook_save_retention(self, daemon):
log_mgr = logger
logger.log("[PickleRetentionGeneric] asking me to update the retention objects")
#Now the flat file method
try:
# Open a file near the path, with .tmp extension
# so in cae or problem, we do not lost the old one
f = open(self.path+'.tmp', 'wb')
# We get interesting retention data from the daemon it self
all_data = daemon.get_retention_data()
# And we save it on file :)
#s = cPickle.dumps(all_data)
#s_compress = zlib.compress(s)
cPickle.dump(all_data, f, protocol=cPickle.HIGHEST_PROTOCOL)
#f.write(s_compress)
f.close()
# Now move the .tmp fiel to the real path
shutil.move(self.path+'.tmp', self.path)
except IOError , exp:
log_mgr.log("Error: retention file creation failed, %s" % str(exp))
return
开发者ID:zoranzaric,项目名称:shinken,代码行数:26,代码来源:pickle_retention_file_generic.py
示例8: search
def search(look_at):
# Now really publish it
proxy = CONFIG['shinken.io']['proxy']
api_key = CONFIG['shinken.io']['api_key']
# Ok we will push the file with a 10s timeout
c = pycurl.Curl()
c.setopt(c.POST, 0)
c.setopt(c.CONNECTTIMEOUT, 10)
c.setopt(c.TIMEOUT, 10)
if proxy:
c.setopt(c.PROXY, proxy)
args = {'keywords':','.join(look_at)}
c.setopt(c.URL, str('shinken.io/searchcli?'+urllib.urlencode(args)))
response = StringIO()
c.setopt(pycurl.WRITEFUNCTION, response.write)
#c.setopt(c.VERBOSE, 1)
c.perform()
r = c.getinfo(pycurl.HTTP_CODE)
c.close()
if r != 200:
logger.error("There was a critical error : %s" % response.getvalue())
sys.exit(2)
else:
ret = json.loads(response.getvalue().replace('\\/', '/'))
status = ret.get('status')
result = ret.get('result')
if status != 200:
logger.log(result)
return []
return result
开发者ID:kjellski,项目名称:shinken,代码行数:32,代码来源:cli.py
示例9: main
def main(self):
self.set_signal_handler()
logger.log("[%s[%d]]: Now running.." % (self.name, os.getpid()))
while not self.interrupted:
self.do_loop_turn()
self.do_stop()
logger.log("[%s]: exiting now.." % (self.name))
开发者ID:bs-github,项目名称:shinken,代码行数:7,代码来源:basemodule.py
示例10: publish_archive
def publish_archive(archive):
# Now really publish it
proxy = CONFIG['shinken.io']['proxy']
api_key = CONFIG['shinken.io']['api_key']
# Ok we will push the file with a 10s timeout
c = pycurl.Curl()
c.setopt(c.POST, 1)
c.setopt(c.CONNECTTIMEOUT, 10)
c.setopt(c.TIMEOUT, 10)
if proxy:
c.setopt(c.PROXY, proxy)
c.setopt(c.URL, "http://shinken.io/push")
c.setopt(c.HTTPPOST, [("api_key", api_key),
("data",
(c.FORM_FILE, str(archive),
c.FORM_CONTENTTYPE, "application/x-gzip"))
])
response = StringIO()
c.setopt(pycurl.WRITEFUNCTION, response.write)
c.setopt(c.VERBOSE, 1)
c.perform()
r = c.getinfo(pycurl.HTTP_CODE)
c.close()
if r != 200:
logger.error("There was a critical error : %s" % response.getvalue())
sys.exit(2)
else:
ret = json.loads(response.getvalue().replace('\\/', '/'))
status = ret.get('status')
text = ret.get('text')
if status == 200:
logger.log(text)
else:
logger.error(text)
开发者ID:kjellski,项目名称:shinken,代码行数:35,代码来源:cli.py
示例11: get_new_broks
def get_new_broks(self, type='scheduler'):
# Get the good links tab for looping..
links = self.get_links_from_type(type)
if links is None:
logger.log('DBG: Type unknown for connection! %s' % type)
return
# We check for new check in each schedulers and put
# the result in new_checks
for sched_id in links:
try:
con = links[sched_id]['con']
if con is not None: # None = not initilized
tmp_broks = con.get_broks()
for b in tmp_broks.values():
b.instance_id = links[sched_id]['instance_id']
# Ok, we can add theses broks to our queues
self.add_broks_to_queue(tmp_broks.values())
else: # no con? make the connection
self.pynag_con_init(sched_id, type=type)
# Ok, con is not known, so we create it
except KeyError , exp:
print exp
self.pynag_con_init(sched_id, type=type)
except Pyro.errors.ProtocolError , exp:
logger.log("[%s] Connection problem to the %s %s : %s" % (self.name, type, links[sched_id]['name'], str(exp)))
links[sched_id]['con'] = None
开发者ID:bs-github,项目名称:shinken,代码行数:29,代码来源:brokerdaemon.py
示例12: init
def init(self):
logger.log("I connect to NDO database")
self.db = DBMysql(self.host, self.user, self.password, self.database,
self.character_set, table_prefix='nagios_', port=self.port)
self.connect_database()
# Cache for hosts and services
# The structure is as follow:
# First the instance id then the host / (host,service desc) to access the wanted data
self.services_cache_sync = {}
self.hosts_cache_sync = {}
# We need to search for centreon_specific fields, like long_output
query = u"select TABLE_NAME from information_schema.columns where TABLE_SCHEMA='ndo' and TABLE_NAME='nagios_servicestatus' and COLUMN_NAME='long_output';"
self.db.execute_query(query)
row = self.db.fetchone()
if row is None or len(row) < 1:
self.centreon_version = False
else:
self.centreon_version = True
logger.log("[MySQL/NDO] Using the centreon version")
# Cache for database id
# In order not to query the database every time
self.database_id_cache = {}
# Mapping service_id in Shinken and in database
# Because can't acces host_name from a service everytime :(
self.mapping_service_id = {}
# Todo list to manage brok
self.todo = []
开发者ID:jfbutkiewicz,项目名称:Windows-Setup-for-Shinken,代码行数:32,代码来源:ndodb_mysql_broker.py
示例13: is_correct
def is_correct(self):
state = True #guilty or not? :)
cls = self.__class__
#All of the above are checks in the notificationways part
for prop, entry in cls.properties.items():
if prop not in _special_properties:
if not hasattr(self, prop) and entry.required:
print self.get_name(), " : I do not have", prop
state = False #Bad boy...
#There is a case where there is no nw : when there is not special_prop defined
#at all!!
if self.notificationways == []:
for p in _special_properties:
print self.get_name()," : I'm missing the property %s" % p
state = False
if hasattr(self, 'contact_name'):
for c in cls.illegal_object_name_chars:
if c in self.contact_name:
logger.log("%s : My contact_name got the caracter %s that is not allowed." % (self.get_name(), c))
state = False
else:
if hasattr(self, 'alias'): #take the alias if we miss the contact_name
self.contact_name = self.alias
return state
开发者ID:bs-github,项目名称:shinken,代码行数:28,代码来源:contact.py
示例14: no_loop_in_parents
def no_loop_in_parents(self):
# Ok, we say "from now, no loop :) "
r = True
# Create parent graph
parents = Graph()
# With all hosts as nodes
for h in self:
if h is not None:
parents.add_node(h)
# And now fill edges
for h in self:
for p in h.parents:
if p is not None:
parents.add_edge(p, h)
# Now get the list of all hosts in a loop
host_in_loops = parents.loop_check()
# and raise errors about it
for h in host_in_loops:
logger.log("Error: The host '%s' is part of a circular parent/child chain!" % h.get_name())
r = False
return r
开发者ID:zoranzaric,项目名称:shinken,代码行数:27,代码来源:host.py
示例15: add
def add(self, elt):
cls_type = elt.__class__.my_type
if cls_type == 'brok':
# For brok, we TAG brok with our instance_id
elt.data['instance_id'] = 0
self.broks_internal_raised.append(elt)
return
elif cls_type == 'externalcommand':
print "Adding in queue an external command", ExternalCommand.__dict__
self.external_commands.append(elt)
# Maybe we got a Message from the modules, it's way to ask something
#like from now a full data from a scheduler for example.
elif cls_type == 'message':
# We got a message, great!
print elt.__dict__
if elt.get_type() == 'NeedData':
data = elt.get_data()
# Full instance id mean : I got no data for this scheduler
# so give me all dumbass!
if 'full_instance_id' in data:
c_id = data['full_instance_id']
logger.log('A module is asking me to get all initial data from the scheduler %d' % c_id)
# so we just reset the connexion adn the running_id, it will just get all new things
self.schedulers[c_id]['con'] = None
self.schedulers[c_id]['running_id'] = 0
开发者ID:zoranzaric,项目名称:shinken,代码行数:25,代码来源:brokerdaemon.py
示例16: main
def main(self):
self.load_config_file()
for line in self.get_header():
self.log.log(line)
logger.log("[Broker] Using working directory : %s" % os.path.abspath(self.workdir))
self.do_daemon_init_and_start()
self.uri2 = self.pyro_daemon.register(self.interface, "ForArbiter")
print "The Arbtier uri it at", self.uri2
# We wait for initial conf
self.wait_for_initial_conf()
if not self.new_conf:
return
self.setup_new_conf()
# Set modules, init them and start external ones
self.modules_manager.set_modules(self.modules)
self.do_load_modules()
self.modules_manager.start_external_instances()
# Do the modules part, we have our modules in self.modules
# REF: doc/broker-modules.png (1)
self.hook_point('load_retention')
# Now the main loop
self.do_mainloop()
开发者ID:zoranzaric,项目名称:shinken,代码行数:32,代码来源:brokerdaemon.py
示例17: try_instance_init
def try_instance_init(self, inst):
""" Try to "init" the given module instance.
Returns: True on successfull init. False if instance init method raised any Exception. """
try:
print "Trying to init module", inst.get_name()
inst.init_try += 1
# Maybe it's a retry
if inst.init_try > 1:
# Do not try until 5 sec, or it's too loopy
if inst.last_init_try > time.time() - 5:
return False
inst.last_init_try = time.time()
# If it's an external, create/update Queues()
if inst.is_external:
inst.create_queues()
inst.init()
except Exception, e:
logger.log("Error : the instance %s raised an exception %s, I remove it!" % (inst.get_name(), str(e)))
output = cStringIO.StringIO()
traceback.print_exc(file=output)
logger.log("Back trace of this remove : %s" % (output.getvalue()))
output.close()
return False
开发者ID:wAmpIre,项目名称:shinken,代码行数:25,代码来源:modulesmanager.py
示例18: main
def main(self):
""" module "main" method. Only used by external modules. """
self.set_signal_handler()
logger.log("[%s[%d]]: Now running.." % (self.name, os.getpid()))
while not self.interrupted:
self.do_loop_turn()
self.do_stop()
logger.log("[%s]: exiting now.." % (self.name))
开发者ID:mleinart,项目名称:shinken,代码行数:8,代码来源:basemodule.py
示例19: create_connection
def create_connection(self):
try:
self.uri = pyro.create_uri(self.address, self.port, "ForArbiter", self.__class__.use_ssl)
self.con = pyro.getProxy(self.uri)
pyro.set_timeout(self.con, self.timeout)
except Pyro_exp_pack , exp:
self.con = None
logger.log('Error : in creation connexion for %s : %s' % (self.get_name(), str(exp)))
开发者ID:Dabg,项目名称:shinken,代码行数:8,代码来源:satellitelink.py
示例20: pynag_con_init
def pynag_con_init(self, id, type="scheduler"):
# Get teh good links tab for looping..
links = self.get_links_from_type(type)
if links is None:
logger.log("DBG: Type unknown for connection! %s" % type)
return
if type == "scheduler":
# If sched is not active, I do not try to init
# it is just useless
is_active = links[id]["active"]
if not is_active:
return
# If we try to connect too much, we slow down our tests
if self.is_connection_try_too_close(links[id]):
return
# Ok, we can now update it
links[id]["last_connection"] = time.time()
# DBG: print "Init connection with", links[id]['uri']
running_id = links[id]["running_id"]
# DBG: print "Running id before connection", running_id
uri = links[id]["uri"]
links[id]["con"] = Pyro.core.getProxyForURI(uri)
try:
# intial ping must be quick
pyro.set_timeout(links[id]["con"], 5)
links[id]["con"].ping()
new_run_id = links[id]["con"].get_running_id()
# data transfert can be longer
pyro.set_timeout(links[id]["con"], 120)
# The schedulers have been restart : it has a new run_id.
# So we clear all verifs, they are obsolete now.
if new_run_id != running_id:
print "[%s] New running id for the %s %s : %s (was %s)" % (
self.name,
type,
links[id]["name"],
new_run_id,
running_id,
)
links[id]["broks"].clear()
# we must ask for a enw full broks if
# it's a scheduler
if type == "scheduler":
print "[%s] I ask for a broks generation to the scheduler %s" % (self.name, links[id]["name"])
links[id]["con"].fill_initial_broks()
# else:
# print "I do nto ask for brok generation"
links[id]["running_id"] = new_run_id
except (Pyro.errors.ProtocolError, Pyro.errors.CommunicationError), exp:
logger.log("[%s] Connexion problem to the %s %s : %s" % (self.name, type, links[id]["name"], str(exp)))
links[id]["con"] = None
return
开发者ID:wAmpIre,项目名称:shinken,代码行数:58,代码来源:brokerdaemon.py
注:本文中的shinken.log.logger.log函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论