mirror of
https://github.com/munin-monitoring/contrib.git
synced 2018-11-08 00:59:34 +01:00
Merge pull request #79 from exhuma/master
Added a new tool similar to pypmmn
This commit is contained in:
commit
1253b1ded8
5
tools/pypmmn/.gitignore
vendored
Normal file
5
tools/pypmmn/.gitignore
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
env
|
||||
build
|
||||
MANIFEST
|
||||
dist
|
||||
plugins
|
68
tools/pypmmn/README.rst
Normal file
68
tools/pypmmn/README.rst
Normal file
@ -0,0 +1,68 @@
|
||||
PyPMMN
|
||||
======
|
||||
|
||||
PyPMMN is a pure python port of pmmn_. One small change: Instead of using the
|
||||
current working dir as ``plugins`` folder, it will look for a *subdirectory*
|
||||
called ``plugins`` in the current working folder. This value can be overridden
|
||||
by a command-line parameter!
|
||||
|
||||
Requirements
|
||||
============
|
||||
|
||||
PyPMMN does not have any requirements other than the python standard library.
|
||||
For compatibility, it's targeted for Python 2.4 and up.
|
||||
|
||||
Known Issues
|
||||
============
|
||||
|
||||
* The stdin mode does not work correctly. Consider using the original pmmn_
|
||||
instead.
|
||||
* It's not multithreaded. Only one connection is handled at a time. But given
|
||||
the nature of munin, this should not be an issue.
|
||||
|
||||
Installation
|
||||
============
|
||||
|
||||
The python way
|
||||
--------------
|
||||
|
||||
Download the folder and run::
|
||||
|
||||
python setup.py install
|
||||
|
||||
This will install ``pypmmn.py`` into your system's ``bin`` folder. Commonly,
|
||||
this is ``/usr/local/bin``.
|
||||
|
||||
And of course, you can use virtual environments too!
|
||||
|
||||
Manually
|
||||
--------
|
||||
|
||||
Download the folder and copy both files ``pypmmn/pypmmn.py`` and
|
||||
``pypmmn/daemon.py`` to a location of your choice and ensure ``pypmmn.py`` is
|
||||
executable.
|
||||
|
||||
Usage
|
||||
=====
|
||||
|
||||
All command-line parameters are documented. Simply run::
|
||||
|
||||
pypmmn.py --help
|
||||
|
||||
to get more information.
|
||||
|
||||
Example::
|
||||
|
||||
pypmmn.py -l /path/to/log-dir -d /path/to/plugins -p 4949
|
||||
|
||||
Daemon mode
|
||||
-----------
|
||||
|
||||
In daemon mode, it's very helpful to specify a log folder. It gives you a
|
||||
means to inspect what's happening. In the case you specified a log folder,
|
||||
pypmmn will also create a file called ``pypmmn.pid`` containing the PID of the
|
||||
daemon for convenience.
|
||||
|
||||
|
||||
.. _pmmn: http://blog.pwkf.org/post/2008/11/04/A-Poor-Man-s-Munin-Node-to-Monitor-Hostile-UNIX-Servers
|
||||
|
0
tools/pypmmn/pypmmn/__init__.py
Normal file
0
tools/pypmmn/pypmmn/__init__.py
Normal file
210
tools/pypmmn/pypmmn/daemon.py
Normal file
210
tools/pypmmn/pypmmn/daemon.py
Normal file
@ -0,0 +1,210 @@
|
||||
## {{{ http://code.activestate.com/recipes/278731/ (r6)
|
||||
"""Disk And Execution MONitor (Daemon)
|
||||
|
||||
Configurable daemon behaviors:
|
||||
|
||||
1.) The current working directory set to the "/" directory.
|
||||
2.) The current file creation mode mask set to 0.
|
||||
3.) Close all open files (1024).
|
||||
4.) Redirect standard I/O streams to "/dev/null".
|
||||
|
||||
A failed call to fork() now raises an exception.
|
||||
|
||||
References:
|
||||
1) Advanced Programming in the Unix Environment: W. Richard Stevens
|
||||
2) Unix Programming Frequently Asked Questions:
|
||||
http://www.erlenstar.demon.co.uk/unix/faq_toc.html
|
||||
"""
|
||||
|
||||
__author__ = "Chad J. Schroeder"
|
||||
__copyright__ = "Copyright (C) 2005 Chad J. Schroeder"
|
||||
|
||||
__revision__ = "$Id$"
|
||||
__version__ = "0.2"
|
||||
|
||||
# Standard Python modules.
|
||||
import os # Miscellaneous OS interfaces.
|
||||
import sys # System-specific parameters and functions.
|
||||
|
||||
# Default daemon parameters.
|
||||
# File mode creation mask of the daemon.
|
||||
UMASK = 0
|
||||
|
||||
# Default working directory for the daemon.
|
||||
WORKDIR = "/"
|
||||
|
||||
# Default maximum for the number of available file descriptors.
|
||||
MAXFD = 1024
|
||||
|
||||
# The standard I/O file descriptors are redirected to /dev/null by default.
|
||||
if (hasattr(os, "devnull")):
|
||||
REDIRECT_TO = os.devnull
|
||||
else:
|
||||
REDIRECT_TO = "/dev/null"
|
||||
|
||||
def createDaemon():
|
||||
"""Detach a process from the controlling terminal and run it in the
|
||||
background as a daemon.
|
||||
"""
|
||||
|
||||
try:
|
||||
# Fork a child process so the parent can exit. This returns control to
|
||||
# the command-line or shell. It also guarantees that the child will not
|
||||
# be a process group leader, since the child receives a new process ID
|
||||
# and inherits the parent's process group ID. This step is required
|
||||
# to insure that the next call to os.setsid is successful.
|
||||
pid = os.fork()
|
||||
except OSError, e:
|
||||
raise Exception, "%s [%d]" % (e.strerror, e.errno)
|
||||
|
||||
if (pid == 0): # The first child.
|
||||
# To become the session leader of this new session and the process group
|
||||
# leader of the new process group, we call os.setsid(). The process is
|
||||
# also guaranteed not to have a controlling terminal.
|
||||
os.setsid()
|
||||
|
||||
# Is ignoring SIGHUP necessary?
|
||||
#
|
||||
# It's often suggested that the SIGHUP signal should be ignored before
|
||||
# the second fork to avoid premature termination of the process. The
|
||||
# reason is that when the first child terminates, all processes, e.g.
|
||||
# the second child, in the orphaned group will be sent a SIGHUP.
|
||||
#
|
||||
# "However, as part of the session management system, there are exactly
|
||||
# two cases where SIGHUP is sent on the death of a process:
|
||||
#
|
||||
# 1) When the process that dies is the session leader of a session that
|
||||
# is attached to a terminal device, SIGHUP is sent to all processes
|
||||
# in the foreground process group of that terminal device.
|
||||
# 2) When the death of a process causes a process group to become
|
||||
# orphaned, and one or more processes in the orphaned group are
|
||||
# stopped, then SIGHUP and SIGCONT are sent to all members of the
|
||||
# orphaned group." [2]
|
||||
#
|
||||
# The first case can be ignored since the child is guaranteed not to have
|
||||
# a controlling terminal. The second case isn't so easy to dismiss.
|
||||
# The process group is orphaned when the first child terminates and
|
||||
# POSIX.1 requires that every STOPPED process in an orphaned process
|
||||
# group be sent a SIGHUP signal followed by a SIGCONT signal. Since the
|
||||
# second child is not STOPPED though, we can safely forego ignoring the
|
||||
# SIGHUP signal. In any case, there are no ill-effects if it is ignored.
|
||||
#
|
||||
# import signal # Set handlers for asynchronous events.
|
||||
# signal.signal(signal.SIGHUP, signal.SIG_IGN)
|
||||
|
||||
try:
|
||||
# Fork a second child and exit immediately to prevent zombies. This
|
||||
# causes the second child process to be orphaned, making the init
|
||||
# process responsible for its cleanup. And, since the first child is
|
||||
# a session leader without a controlling terminal, it's possible for
|
||||
# it to acquire one by opening a terminal in the future (System V-
|
||||
# based systems). This second fork guarantees that the child is no
|
||||
# longer a session leader, preventing the daemon from ever acquiring
|
||||
# a controlling terminal.
|
||||
pid = os.fork() # Fork a second child.
|
||||
except OSError, e:
|
||||
raise Exception, "%s [%d]" % (e.strerror, e.errno)
|
||||
|
||||
if (pid == 0): # The second child.
|
||||
# Since the current working directory may be a mounted filesystem, we
|
||||
# avoid the issue of not being able to unmount the filesystem at
|
||||
# shutdown time by changing it to the root directory.
|
||||
os.chdir(WORKDIR)
|
||||
# We probably don't want the file mode creation mask inherited from
|
||||
# the parent, so we give the child complete control over permissions.
|
||||
os.umask(UMASK)
|
||||
else:
|
||||
# exit() or _exit()? See below.
|
||||
os._exit(0) # Exit parent (the first child) of the second child.
|
||||
else:
|
||||
# exit() or _exit()?
|
||||
# _exit is like exit(), but it doesn't call any functions registered
|
||||
# with atexit (and on_exit) or any registered signal handlers. It also
|
||||
# closes any open file descriptors. Using exit() may cause all stdio
|
||||
# streams to be flushed twice and any temporary files may be unexpectedly
|
||||
# removed. It's therefore recommended that child branches of a fork()
|
||||
# and the parent branch(es) of a daemon use _exit().
|
||||
os._exit(0) # Exit parent of the first child.
|
||||
|
||||
# Close all open file descriptors. This prevents the child from keeping
|
||||
# open any file descriptors inherited from the parent. There is a variety
|
||||
# of methods to accomplish this task. Three are listed below.
|
||||
#
|
||||
# Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
|
||||
# number of open file descriptors to close. If it doesn't exists, use
|
||||
# the default value (configurable).
|
||||
#
|
||||
# try:
|
||||
# maxfd = os.sysconf("SC_OPEN_MAX")
|
||||
# except (AttributeError, ValueError):
|
||||
# maxfd = MAXFD
|
||||
#
|
||||
# OR
|
||||
#
|
||||
# if (os.sysconf_names.has_key("SC_OPEN_MAX")):
|
||||
# maxfd = os.sysconf("SC_OPEN_MAX")
|
||||
# else:
|
||||
# maxfd = MAXFD
|
||||
#
|
||||
# OR
|
||||
#
|
||||
# Use the getrlimit method to retrieve the maximum file descriptor number
|
||||
# that can be opened by this process. If there is not limit on the
|
||||
# resource, use the default value.
|
||||
#
|
||||
import resource # Resource usage information.
|
||||
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
|
||||
if (maxfd == resource.RLIM_INFINITY):
|
||||
maxfd = MAXFD
|
||||
|
||||
# Iterate through and close all file descriptors.
|
||||
for fd in range(0, maxfd):
|
||||
try:
|
||||
os.close(fd)
|
||||
except OSError: # ERROR, fd wasn't open to begin with (ignored)
|
||||
pass
|
||||
|
||||
# Redirect the standard I/O file descriptors to the specified file. Since
|
||||
# the daemon has no controlling terminal, most daemons redirect stdin,
|
||||
# stdout, and stderr to /dev/null. This is done to prevent side-effects
|
||||
# from reads and writes to the standard I/O file descriptors.
|
||||
|
||||
# This call to open is guaranteed to return the lowest file descriptor,
|
||||
# which will be 0 (stdin), since it was closed above.
|
||||
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
|
||||
|
||||
# Duplicate standard input to standard output and standard error.
|
||||
os.dup2(0, 1) # standard output (1)
|
||||
os.dup2(0, 2) # standard error (2)
|
||||
|
||||
return(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
retCode = createDaemon()
|
||||
|
||||
# The code, as is, will create a new file in the root directory, when
|
||||
# executed with superuser privileges. The file will contain the following
|
||||
# daemon related process parameters: return code, process ID, parent
|
||||
# process group ID, session ID, user ID, effective user ID, real group ID,
|
||||
# and the effective group ID. Notice the relationship between the daemon's
|
||||
# process ID, process group ID, and its parent's process ID.
|
||||
|
||||
procParams = """
|
||||
return code = %s
|
||||
process ID = %s
|
||||
parent process ID = %s
|
||||
process group ID = %s
|
||||
session ID = %s
|
||||
user ID = %s
|
||||
effective user ID = %s
|
||||
real group ID = %s
|
||||
effective group ID = %s
|
||||
""" % (retCode, os.getpid(), os.getppid(), os.getpgrp(), os.getsid(0),
|
||||
os.getuid(), os.geteuid(), os.getgid(), os.getegid())
|
||||
|
||||
open("createDaemon.log", "w").write(procParams + "\n")
|
||||
|
||||
sys.exit(retCode)
|
||||
## end of http://code.activestate.com/recipes/278731/ }}}
|
||||
|
411
tools/pypmmn/pypmmn/pypmmn.py
Normal file
411
tools/pypmmn/pypmmn/pypmmn.py
Normal file
@ -0,0 +1,411 @@
|
||||
#!/usr/bin/python
|
||||
"""
|
||||
A very simple munin-node written in pure python (no external libraries
|
||||
required)
|
||||
"""
|
||||
from datetime import datetime
|
||||
from logging.handlers import RotatingFileHandler
|
||||
from optparse import OptionParser
|
||||
from os import listdir, access, X_OK, getpid
|
||||
from os.path import join, isdir, abspath, dirname, exists
|
||||
from subprocess import Popen, PIPE
|
||||
from time import sleep
|
||||
import logging
|
||||
import socket
|
||||
|
||||
import sys
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
SESSION_TIMEOUT = 10 # Amount of seconds until an unused session is closed
|
||||
|
||||
from daemon import createDaemon
|
||||
|
||||
|
||||
__version__ = '1.0b1'
|
||||
|
||||
|
||||
class CmdHandler(object):
|
||||
"""
|
||||
This handler defines the protocol between munin and this munin node.
|
||||
Each method starting with ``do_`` responds to the corresponding munin
|
||||
command.
|
||||
"""
|
||||
|
||||
def __init__(self, get_fun, put_fun, options):
|
||||
"""
|
||||
Constructor
|
||||
|
||||
:param get_fun: The function used to receive a message from munin
|
||||
:param put_fun: The function used to send a message back to munin
|
||||
:param options: The command-line options object
|
||||
"""
|
||||
self.get_fun = get_fun
|
||||
self.put_fun = put_fun
|
||||
self.options = options
|
||||
|
||||
def do_version(self, arg):
|
||||
"""
|
||||
Prints the version of this instance.
|
||||
"""
|
||||
LOG.debug('Command "version" executed with args: %r' % arg)
|
||||
self.put_fun('# munin node at %s\n' % (
|
||||
self.options.host,
|
||||
))
|
||||
|
||||
def do_nodes(self, arg):
|
||||
"""
|
||||
Prints this hostname
|
||||
"""
|
||||
LOG.debug('Command "nodes" executed with args: %r' % arg)
|
||||
self.put_fun('%s\n' % self.options.host)
|
||||
self.put_fun('.\n')
|
||||
|
||||
def do_quit(self, arg):
|
||||
"""
|
||||
Stops this process
|
||||
"""
|
||||
LOG.debug('Command "quit" executed with args: %r' % arg)
|
||||
sys.exit(0)
|
||||
|
||||
def do_list(self, arg):
|
||||
"""
|
||||
Print a list of plugins
|
||||
"""
|
||||
LOG.debug('Command "list" executed with args: %r' % arg)
|
||||
try:
|
||||
LOG.debug('Listing files inside %s' % self.options.plugin_dir)
|
||||
for filename in listdir(self.options.plugin_dir):
|
||||
if not access(join(self.options.plugin_dir, filename), X_OK):
|
||||
LOG.warning('Non-executable plugin %s found!' % filename)
|
||||
continue
|
||||
LOG.debug('Found plugin: %s' % filename)
|
||||
self.put_fun("%s " % filename)
|
||||
except OSError, exc:
|
||||
self.put_fun("# ERROR: %s" % exc)
|
||||
self.put_fun("\n")
|
||||
|
||||
def _caf(self, plugin, cmd):
|
||||
"""
|
||||
handler for ``config``, ``alert`` and ``fetch``
|
||||
Calls the plugin with ``cmd`` as only argument.
|
||||
|
||||
:param plugin: The plugin name
|
||||
:param cmd: The command which is to passed to the plugin
|
||||
"""
|
||||
plugin_filename = join(self.options.plugin_dir, plugin)
|
||||
|
||||
# Sanity checks
|
||||
if isdir(plugin_filename) or not access(plugin_filename, X_OK):
|
||||
msg = "# Unknown plugin [%s] for %s" % (plugin, cmd)
|
||||
LOG.warning(msg)
|
||||
self.put_fun(msg)
|
||||
return
|
||||
|
||||
# for 'fetch' we don't need to pass a command to the plugin
|
||||
if cmd == 'fetch':
|
||||
plugin_arg = ''
|
||||
else:
|
||||
plugin_arg = cmd
|
||||
|
||||
try:
|
||||
cmd = [plugin_filename, plugin_arg]
|
||||
LOG.debug('Executing %r' % cmd)
|
||||
output = Popen(cmd, stdout=PIPE).communicate()[0]
|
||||
except OSError, exc:
|
||||
LOG.exception()
|
||||
self.put_fun("# ERROR: %s\n" % exc)
|
||||
return
|
||||
self.put_fun(output)
|
||||
self.put_fun('.\n')
|
||||
|
||||
def do_alert(self, arg):
|
||||
"""
|
||||
Handle command "alert"
|
||||
"""
|
||||
LOG.debug('Command "alert" executed with args: %r' % arg)
|
||||
self._caf(arg, 'alert')
|
||||
|
||||
def do_fetch(self, arg):
|
||||
"""
|
||||
Handles command "fetch"
|
||||
"""
|
||||
LOG.debug('Command "fetch" executed with args: %r' % arg)
|
||||
self._caf(arg, 'fetch')
|
||||
|
||||
def do_config(self, arg):
|
||||
"""
|
||||
Handles command "config"
|
||||
"""
|
||||
LOG.debug('Command "config" executed with args: %r' % arg)
|
||||
self._caf(arg, 'config')
|
||||
|
||||
def do_cap(self, arg):
|
||||
"""
|
||||
Handles command "cap"
|
||||
"""
|
||||
LOG.debug('Command "cap" executed with args: %r' % arg)
|
||||
self.put_fun("cap ")
|
||||
if self.options.spoolfetch_dir:
|
||||
self.put_fun("spool")
|
||||
else:
|
||||
LOG.debug('No spoolfetch_dir specified. Result spooling disabled')
|
||||
|
||||
self.put_fun("\n")
|
||||
|
||||
def do_spoolfetch(self, arg):
|
||||
"""
|
||||
Handles command "spoolfetch"
|
||||
"""
|
||||
LOG.debug('Command "spellfetch" executed with args: %r' % arg)
|
||||
output = Popen(['%s/spoolfetch_%s' % (self.options.spoolfetch_dir,
|
||||
self.options.host),
|
||||
arg]).communicate()[0]
|
||||
self.put_fun(output)
|
||||
self.put_fun('.\n')
|
||||
|
||||
# aliases
|
||||
do_exit = do_quit
|
||||
|
||||
def handle_input(self, line):
|
||||
"""
|
||||
Handles one input line and sends any result back using ``put_fun``
|
||||
"""
|
||||
line = line.strip()
|
||||
line = line.split(' ')
|
||||
cmd = line[0]
|
||||
if len(line) == 1:
|
||||
arg = ''
|
||||
elif len(line) == 2:
|
||||
arg = line[1]
|
||||
else:
|
||||
self.put_fun('# Invalid input: %s\n' % line)
|
||||
return
|
||||
|
||||
if not cmd:
|
||||
return
|
||||
|
||||
func = getattr(self, 'do_%s' % cmd, None)
|
||||
if not func:
|
||||
# Give the client a list of supported commands.
|
||||
commands = [_[3:] for _ in dir(self) if _.startswith('do_')]
|
||||
self.put_fun("# Unknown command. Supported commands: %s\n" % (
|
||||
commands))
|
||||
return
|
||||
|
||||
func(arg)
|
||||
|
||||
def is_timed_out(self):
|
||||
return (datetime.now() - self._last_command).seconds > SESSION_TIMEOUT
|
||||
|
||||
def reset_time(self):
|
||||
self._last_command = datetime.now()
|
||||
|
||||
|
||||
def usage(option, opt, value, parser):
|
||||
"""
|
||||
Prints the command usage and exits
|
||||
"""
|
||||
parser.print_help()
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
def get_options():
|
||||
"""
|
||||
Parses command-line arguments.
|
||||
"""
|
||||
parser = OptionParser(add_help_option=False)
|
||||
parser.add_option('-p', '--port', dest='port',
|
||||
default=None,
|
||||
help='TCP Port to listen on. (If not specified, use stdin/stdout)')
|
||||
parser.add_option('-d', '--plugin-dir', dest='plugin_dir',
|
||||
default='plugins',
|
||||
help=('The directory containing the munin-plugins.'
|
||||
' Default: <current working dir>/plugins'))
|
||||
parser.add_option('-h', '--host', dest='host',
|
||||
help=('The hostname which will be reported in the plugins.'
|
||||
' Default: %s' % socket.gethostname()),
|
||||
default=socket.gethostname())
|
||||
parser.add_option('-n', '--no-daemon', dest='no_daemon',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run in foreground. Do not daemonize. '
|
||||
'Will also enable debug logging to stdout.')
|
||||
parser.add_option('-l', '--log-dir', dest='log_dir',
|
||||
default=None,
|
||||
help='The log folder. Default: disabled')
|
||||
parser.add_option('-s', '--spoolfech-dir', dest='spoolfetch_dir',
|
||||
default=None,
|
||||
help='The spoolfetch folder. Default: disabled')
|
||||
parser.add_option('--help', action='callback', callback=usage,
|
||||
help='Shows this help')
|
||||
|
||||
options, args = parser.parse_args()
|
||||
|
||||
# ensure we are using absolute paths (for daemonizing)
|
||||
if options.log_dir:
|
||||
options.log_dir = abspath(options.log_dir)
|
||||
|
||||
if options.spoolfetch_dir:
|
||||
options.spoolfetch_dir = abspath(options.spoolfetch_dir)
|
||||
|
||||
if options.plugin_dir:
|
||||
options.plugin_dir = abspath(options.plugin_dir)
|
||||
|
||||
return (options, args)
|
||||
|
||||
|
||||
def process_stdin(options):
|
||||
"""
|
||||
Process commands by reading from stdin
|
||||
"""
|
||||
rfhandler = RotatingFileHandler(
|
||||
join(abspath(dirname(__file__)), 'log', 'pypmmn.log'),
|
||||
maxBytes=100 * 1024,
|
||||
backupCount=5
|
||||
)
|
||||
rfhandler.setFormatter(logging.Formatter(LOG_FORMAT))
|
||||
logging.getLogger().addHandler(rfhandler)
|
||||
handler = CmdHandler(sys.stdin.read, sys.stdout.write, options)
|
||||
handler.do_version(None)
|
||||
LOG.info('STDIN handler opened')
|
||||
while True:
|
||||
data = sys.stdin.readline().strip()
|
||||
if not data:
|
||||
return
|
||||
handler.handle_input(data)
|
||||
|
||||
|
||||
def process_socket(options):
|
||||
"""
|
||||
Process socket connections.
|
||||
|
||||
.. note::
|
||||
|
||||
This is not a multithreaded process. So only one connection can be
|
||||
handled at any given time. But given the nature of munin, this is Good
|
||||
Enough.
|
||||
"""
|
||||
|
||||
retcode = 0
|
||||
if options.no_daemon:
|
||||
# set up on-screen-logging
|
||||
console_handler = logging.StreamHandler(sys.stdout)
|
||||
console_handler.setFormatter(logging.Formatter(LOG_FORMAT))
|
||||
logging.getLogger().addHandler(console_handler)
|
||||
else:
|
||||
# fork fork
|
||||
retcode = createDaemon()
|
||||
|
||||
# set up a rotating file log
|
||||
rfhandler = RotatingFileHandler(
|
||||
join(options.log_dir, 'daemon.log'),
|
||||
maxBytes=100 * 1024,
|
||||
backupCount=5
|
||||
)
|
||||
rfhandler.setFormatter(logging.Formatter(LOG_FORMAT))
|
||||
logging.getLogger().addHandler(rfhandler)
|
||||
|
||||
# write down some house-keeping information
|
||||
LOG.info('New process PID: %d' % getpid())
|
||||
pidfile = open(join(options.log_dir, 'pypmmn.pid'), 'w')
|
||||
pidfile.write(str(getpid()))
|
||||
pidfile.close()
|
||||
LOG.info('PID file created in %s' % join(options.log_dir,
|
||||
'pypmmn.pid'))
|
||||
|
||||
LOG.info('Socket handler started.')
|
||||
|
||||
host = '' # listens on all addresses TODO: make this configurable
|
||||
port = int(options.port)
|
||||
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
s.bind((host, port))
|
||||
s.listen(1)
|
||||
|
||||
LOG.info('Listening on host %r, port %r' % (host, port))
|
||||
|
||||
conn, addr = s.accept()
|
||||
handler = CmdHandler(conn.recv, conn.send, options)
|
||||
handler.do_version(None)
|
||||
handler.reset_time()
|
||||
|
||||
LOG.info("Accepting incoming connection from %s" % (addr, ))
|
||||
while True:
|
||||
data = conn.recv(1024)
|
||||
if not data.strip():
|
||||
sleep(1)
|
||||
if handler.is_timed_out():
|
||||
LOG.info('Session timeout.')
|
||||
conn.shutdown(socket.SHUT_RDWR)
|
||||
conn.close()
|
||||
|
||||
LOG.info('Listening on host %r, port %r' % (host, port))
|
||||
|
||||
conn, addr = s.accept()
|
||||
handler.reset_time()
|
||||
handler.get_fun = conn.recv
|
||||
handler.put_fun = conn.send
|
||||
handler.do_version(None)
|
||||
|
||||
LOG.info("Accepting incoming connection from %s" % (addr, ))
|
||||
try:
|
||||
data = conn.recv(1024)
|
||||
except socket.error, exc:
|
||||
LOG.warning("Socket error. Reinitialising.: %s" % exc)
|
||||
conn, addr = s.accept()
|
||||
handler.reset_time()
|
||||
handler.get_fun = conn.recv
|
||||
handler.put_fun = conn.send
|
||||
handler.do_version(None)
|
||||
|
||||
LOG.info("Accepting incoming connection from %s" % (addr, ))
|
||||
|
||||
if data.strip() == 'quit':
|
||||
LOG.info('Client requested session end. Closing connection.')
|
||||
conn.shutdown(socket.SHUT_RDWR)
|
||||
conn.close()
|
||||
|
||||
LOG.info('Listening on host %r, port %r' % (host, port))
|
||||
|
||||
conn, addr = s.accept()
|
||||
handler.reset_time()
|
||||
handler.get_fun = conn.recv
|
||||
handler.put_fun = conn.send
|
||||
handler.do_version(None)
|
||||
|
||||
LOG.info("Accepting incoming connection from %s" % (addr, ))
|
||||
|
||||
continue
|
||||
|
||||
handler.handle_input(data)
|
||||
|
||||
sys.exit(retcode)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
The main entry point of the application
|
||||
"""
|
||||
options, args = get_options()
|
||||
|
||||
# Handle logging as early as possible.
|
||||
if options.log_dir:
|
||||
if not exists(options.log_dir):
|
||||
raise IOError('[Errno 2] No such file or directory: %r' % (
|
||||
options.log_dir))
|
||||
# set up logging if requested
|
||||
root_logger = logging.getLogger()
|
||||
root_logger.setLevel(logging.NOTSET) # TODO: Make configurable
|
||||
|
||||
# Start either the "stdin" interface, or the socked daemon. Depending on
|
||||
# whether a port was given on startup or not.
|
||||
if not options.port:
|
||||
process_stdin(options)
|
||||
else:
|
||||
process_socket(options)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
22
tools/pypmmn/setup.py
Normal file
22
tools/pypmmn/setup.py
Normal file
@ -0,0 +1,22 @@
|
||||
from distutils.core import setup
|
||||
from pypmmn.pypmmn import __version__
|
||||
|
||||
PACKAGE = "pypmmn"
|
||||
NAME = "pypmmn"
|
||||
DESCRIPTION = "Python port of the 'Poor man's munin-node'"
|
||||
AUTHOR = "Michel Albert"
|
||||
AUTHOR_EMAIL = "michel@albert.lu"
|
||||
|
||||
setup(
|
||||
name=NAME,
|
||||
version=__version__,
|
||||
description=DESCRIPTION,
|
||||
long_description=open("README.rst").read(),
|
||||
author=AUTHOR,
|
||||
author_email=AUTHOR_EMAIL,
|
||||
license="BSD",
|
||||
url='https://github.com/exhuma/munin-contrib/tree/master/tools/pypmmn',
|
||||
packages=['pypmmn'],
|
||||
scripts=['pypmmn/pypmmn.py'],
|
||||
)
|
||||
|
Loading…
Reference in New Issue
Block a user