mirror of
https://github.com/munin-monitoring/contrib.git
synced 2018-11-08 00:59:34 +01:00
Daemonizing and logging added, 'stdin mode' fixed
This commit is contained in:
parent
89d3ef0a65
commit
fa24082ea5
210
tools/pypmmn/pypmmn/daemon.py
Normal file
210
tools/pypmmn/pypmmn/daemon.py
Normal file
@ -0,0 +1,210 @@
|
||||
## {{{ http://code.activestate.com/recipes/278731/ (r6)
|
||||
"""Disk And Execution MONitor (Daemon)
|
||||
|
||||
Configurable daemon behaviors:
|
||||
|
||||
1.) The current working directory set to the "/" directory.
|
||||
2.) The current file creation mode mask set to 0.
|
||||
3.) Close all open files (1024).
|
||||
4.) Redirect standard I/O streams to "/dev/null".
|
||||
|
||||
A failed call to fork() now raises an exception.
|
||||
|
||||
References:
|
||||
1) Advanced Programming in the Unix Environment: W. Richard Stevens
|
||||
2) Unix Programming Frequently Asked Questions:
|
||||
http://www.erlenstar.demon.co.uk/unix/faq_toc.html
|
||||
"""
|
||||
|
||||
__author__ = "Chad J. Schroeder"
|
||||
__copyright__ = "Copyright (C) 2005 Chad J. Schroeder"
|
||||
|
||||
__revision__ = "$Id$"
|
||||
__version__ = "0.2"
|
||||
|
||||
# Standard Python modules.
|
||||
import os # Miscellaneous OS interfaces.
|
||||
import sys # System-specific parameters and functions.
|
||||
|
||||
# Default daemon parameters.
|
||||
# File mode creation mask of the daemon.
|
||||
UMASK = 0
|
||||
|
||||
# Default working directory for the daemon.
|
||||
WORKDIR = "/"
|
||||
|
||||
# Default maximum for the number of available file descriptors.
|
||||
MAXFD = 1024
|
||||
|
||||
# The standard I/O file descriptors are redirected to /dev/null by default.
|
||||
if (hasattr(os, "devnull")):
|
||||
REDIRECT_TO = os.devnull
|
||||
else:
|
||||
REDIRECT_TO = "/dev/null"
|
||||
|
||||
def createDaemon():
|
||||
"""Detach a process from the controlling terminal and run it in the
|
||||
background as a daemon.
|
||||
"""
|
||||
|
||||
try:
|
||||
# Fork a child process so the parent can exit. This returns control to
|
||||
# the command-line or shell. It also guarantees that the child will not
|
||||
# be a process group leader, since the child receives a new process ID
|
||||
# and inherits the parent's process group ID. This step is required
|
||||
# to insure that the next call to os.setsid is successful.
|
||||
pid = os.fork()
|
||||
except OSError, e:
|
||||
raise Exception, "%s [%d]" % (e.strerror, e.errno)
|
||||
|
||||
if (pid == 0): # The first child.
|
||||
# To become the session leader of this new session and the process group
|
||||
# leader of the new process group, we call os.setsid(). The process is
|
||||
# also guaranteed not to have a controlling terminal.
|
||||
os.setsid()
|
||||
|
||||
# Is ignoring SIGHUP necessary?
|
||||
#
|
||||
# It's often suggested that the SIGHUP signal should be ignored before
|
||||
# the second fork to avoid premature termination of the process. The
|
||||
# reason is that when the first child terminates, all processes, e.g.
|
||||
# the second child, in the orphaned group will be sent a SIGHUP.
|
||||
#
|
||||
# "However, as part of the session management system, there are exactly
|
||||
# two cases where SIGHUP is sent on the death of a process:
|
||||
#
|
||||
# 1) When the process that dies is the session leader of a session that
|
||||
# is attached to a terminal device, SIGHUP is sent to all processes
|
||||
# in the foreground process group of that terminal device.
|
||||
# 2) When the death of a process causes a process group to become
|
||||
# orphaned, and one or more processes in the orphaned group are
|
||||
# stopped, then SIGHUP and SIGCONT are sent to all members of the
|
||||
# orphaned group." [2]
|
||||
#
|
||||
# The first case can be ignored since the child is guaranteed not to have
|
||||
# a controlling terminal. The second case isn't so easy to dismiss.
|
||||
# The process group is orphaned when the first child terminates and
|
||||
# POSIX.1 requires that every STOPPED process in an orphaned process
|
||||
# group be sent a SIGHUP signal followed by a SIGCONT signal. Since the
|
||||
# second child is not STOPPED though, we can safely forego ignoring the
|
||||
# SIGHUP signal. In any case, there are no ill-effects if it is ignored.
|
||||
#
|
||||
# import signal # Set handlers for asynchronous events.
|
||||
# signal.signal(signal.SIGHUP, signal.SIG_IGN)
|
||||
|
||||
try:
|
||||
# Fork a second child and exit immediately to prevent zombies. This
|
||||
# causes the second child process to be orphaned, making the init
|
||||
# process responsible for its cleanup. And, since the first child is
|
||||
# a session leader without a controlling terminal, it's possible for
|
||||
# it to acquire one by opening a terminal in the future (System V-
|
||||
# based systems). This second fork guarantees that the child is no
|
||||
# longer a session leader, preventing the daemon from ever acquiring
|
||||
# a controlling terminal.
|
||||
pid = os.fork() # Fork a second child.
|
||||
except OSError, e:
|
||||
raise Exception, "%s [%d]" % (e.strerror, e.errno)
|
||||
|
||||
if (pid == 0): # The second child.
|
||||
# Since the current working directory may be a mounted filesystem, we
|
||||
# avoid the issue of not being able to unmount the filesystem at
|
||||
# shutdown time by changing it to the root directory.
|
||||
os.chdir(WORKDIR)
|
||||
# We probably don't want the file mode creation mask inherited from
|
||||
# the parent, so we give the child complete control over permissions.
|
||||
os.umask(UMASK)
|
||||
else:
|
||||
# exit() or _exit()? See below.
|
||||
os._exit(0) # Exit parent (the first child) of the second child.
|
||||
else:
|
||||
# exit() or _exit()?
|
||||
# _exit is like exit(), but it doesn't call any functions registered
|
||||
# with atexit (and on_exit) or any registered signal handlers. It also
|
||||
# closes any open file descriptors. Using exit() may cause all stdio
|
||||
# streams to be flushed twice and any temporary files may be unexpectedly
|
||||
# removed. It's therefore recommended that child branches of a fork()
|
||||
# and the parent branch(es) of a daemon use _exit().
|
||||
os._exit(0) # Exit parent of the first child.
|
||||
|
||||
# Close all open file descriptors. This prevents the child from keeping
|
||||
# open any file descriptors inherited from the parent. There is a variety
|
||||
# of methods to accomplish this task. Three are listed below.
|
||||
#
|
||||
# Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
|
||||
# number of open file descriptors to close. If it doesn't exists, use
|
||||
# the default value (configurable).
|
||||
#
|
||||
# try:
|
||||
# maxfd = os.sysconf("SC_OPEN_MAX")
|
||||
# except (AttributeError, ValueError):
|
||||
# maxfd = MAXFD
|
||||
#
|
||||
# OR
|
||||
#
|
||||
# if (os.sysconf_names.has_key("SC_OPEN_MAX")):
|
||||
# maxfd = os.sysconf("SC_OPEN_MAX")
|
||||
# else:
|
||||
# maxfd = MAXFD
|
||||
#
|
||||
# OR
|
||||
#
|
||||
# Use the getrlimit method to retrieve the maximum file descriptor number
|
||||
# that can be opened by this process. If there is not limit on the
|
||||
# resource, use the default value.
|
||||
#
|
||||
import resource # Resource usage information.
|
||||
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
|
||||
if (maxfd == resource.RLIM_INFINITY):
|
||||
maxfd = MAXFD
|
||||
|
||||
# Iterate through and close all file descriptors.
|
||||
for fd in range(0, maxfd):
|
||||
try:
|
||||
os.close(fd)
|
||||
except OSError: # ERROR, fd wasn't open to begin with (ignored)
|
||||
pass
|
||||
|
||||
# Redirect the standard I/O file descriptors to the specified file. Since
|
||||
# the daemon has no controlling terminal, most daemons redirect stdin,
|
||||
# stdout, and stderr to /dev/null. This is done to prevent side-effects
|
||||
# from reads and writes to the standard I/O file descriptors.
|
||||
|
||||
# This call to open is guaranteed to return the lowest file descriptor,
|
||||
# which will be 0 (stdin), since it was closed above.
|
||||
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
|
||||
|
||||
# Duplicate standard input to standard output and standard error.
|
||||
os.dup2(0, 1) # standard output (1)
|
||||
os.dup2(0, 2) # standard error (2)
|
||||
|
||||
return(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
retCode = createDaemon()
|
||||
|
||||
# The code, as is, will create a new file in the root directory, when
|
||||
# executed with superuser privileges. The file will contain the following
|
||||
# daemon related process parameters: return code, process ID, parent
|
||||
# process group ID, session ID, user ID, effective user ID, real group ID,
|
||||
# and the effective group ID. Notice the relationship between the daemon's
|
||||
# process ID, process group ID, and its parent's process ID.
|
||||
|
||||
procParams = """
|
||||
return code = %s
|
||||
process ID = %s
|
||||
parent process ID = %s
|
||||
process group ID = %s
|
||||
session ID = %s
|
||||
user ID = %s
|
||||
effective user ID = %s
|
||||
real group ID = %s
|
||||
effective group ID = %s
|
||||
""" % (retCode, os.getpid(), os.getppid(), os.getpgrp(), os.getsid(0),
|
||||
os.getuid(), os.geteuid(), os.getgid(), os.getegid())
|
||||
|
||||
open("createDaemon.log", "w").write(procParams + "\n")
|
||||
|
||||
sys.exit(retCode)
|
||||
## end of http://code.activestate.com/recipes/278731/ }}}
|
||||
|
@ -1,20 +1,39 @@
|
||||
#!/usr/bin/python
|
||||
from logging.handlers import RotatingFileHandler
|
||||
from optparse import OptionParser
|
||||
from os import listdir, access, X_OK
|
||||
from os.path import join, isdir
|
||||
from os import listdir, access, X_OK, getpid
|
||||
from os.path import join, isdir, abspath, dirname, exists
|
||||
from subprocess import Popen, PIPE
|
||||
from socket import gethostname, SHUT_RDWR
|
||||
from time import sleep
|
||||
import logging
|
||||
import socket
|
||||
|
||||
import sys
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
|
||||
__version__ = '1.0dev3'
|
||||
from pypmmn.daemon import createDaemon
|
||||
|
||||
|
||||
__version__ = '1.0dev4'
|
||||
|
||||
|
||||
class CmdHandler(object):
|
||||
"""
|
||||
This handler defines the protocol between munin and this munin node.
|
||||
Each method starting with ``do_`` responds to the corresponding munin
|
||||
command.
|
||||
"""
|
||||
|
||||
def __init__(self, get_fun, put_fun, options):
|
||||
"""
|
||||
Constructor
|
||||
|
||||
:param get_fun: The function used to receive a message from munin
|
||||
:param put_fun: The function used to send a message back to munin
|
||||
:param options: The command-line options object
|
||||
"""
|
||||
self.get_fun = get_fun
|
||||
self.put_fun = put_fun
|
||||
self.options = options
|
||||
@ -23,6 +42,7 @@ class CmdHandler(object):
|
||||
"""
|
||||
Prints the version of this instance.
|
||||
"""
|
||||
LOG.debug('Command "version" executed with args: %r' % arg)
|
||||
self.put_fun('# munin node at %s\n' % (
|
||||
self.options.host,
|
||||
))
|
||||
@ -31,26 +51,32 @@ class CmdHandler(object):
|
||||
"""
|
||||
Prints this hostname
|
||||
"""
|
||||
LOG.debug('Command "nodes" executed with args: %r' % arg)
|
||||
self.put_fun('%s\n' % self.options.host)
|
||||
self.put_fun('.')
|
||||
self.put_fun('.\n')
|
||||
|
||||
def do_quit(self, arg):
|
||||
"""
|
||||
Stops this process
|
||||
"""
|
||||
LOG.debug('Command "quit" executed with args: %r' % arg)
|
||||
sys.exit(0)
|
||||
|
||||
def do_list(self, arg):
|
||||
"""
|
||||
Print a list of plugins
|
||||
"""
|
||||
LOG.debug('Command "list" executed with args: %r' % arg)
|
||||
try:
|
||||
LOG.debug('Listing files inside %s' % self.options.plugin_dir)
|
||||
for filename in listdir(self.options.plugin_dir):
|
||||
if not access(join(self.options.plugin_dir, filename), X_OK):
|
||||
LOG.warning('Non-executable plugin %s found!' % filename)
|
||||
continue
|
||||
#LOG.debug('Found plugin: %s' % filename)
|
||||
self.put_fun("%s " % filename)
|
||||
except OSError, exc:
|
||||
sys.stdout.write("# ERROR: %s" % exc)
|
||||
self.put_fun("# ERROR: %s" % exc)
|
||||
self.put_fun("\n")
|
||||
|
||||
def _caf(self, arg, cmd):
|
||||
@ -60,7 +86,9 @@ class CmdHandler(object):
|
||||
"""
|
||||
plugin_filename = join(self.options.plugin_dir, arg)
|
||||
if isdir(plugin_filename) or not access(plugin_filename, X_OK):
|
||||
self.put_fun("# Unknown plugin [%s] for %s" % (arg, cmd))
|
||||
msg = "# Unknown plugin [%s] for %s" % (arg, cmd)
|
||||
LOG.warning(msg)
|
||||
self.put_fun(msg)
|
||||
return
|
||||
|
||||
if cmd == 'fetch':
|
||||
@ -69,29 +97,55 @@ class CmdHandler(object):
|
||||
arg_plugin = cmd
|
||||
|
||||
try:
|
||||
output = Popen([plugin_filename, arg_plugin], stdout=PIPE).communicate()[0]
|
||||
cmd = [plugin_filename, arg_plugin]
|
||||
LOG.debug('Executing %r' % cmd)
|
||||
output = Popen(cmd, stdout=PIPE).communicate()[0]
|
||||
except OSError, exc:
|
||||
LOG.exception()
|
||||
self.put_fun("# ERROR: %s" % exc)
|
||||
return
|
||||
self.put_fun(output)
|
||||
self.put_fun('.\n')
|
||||
|
||||
def do_alert(self, arg):
|
||||
"""
|
||||
Handle command "alert"
|
||||
"""
|
||||
LOG.debug('Command "alert" executed with args: %r' % arg)
|
||||
self._caf(arg, 'alert')
|
||||
|
||||
def do_fetch(self, arg):
|
||||
"""
|
||||
Handles command "fetch"
|
||||
"""
|
||||
LOG.debug('Command "fetch" executed with args: %r' % arg)
|
||||
self._caf(arg, 'fetch')
|
||||
|
||||
def do_config(self, arg):
|
||||
"""
|
||||
Handles command "config"
|
||||
"""
|
||||
LOG.debug('Command "config" executed with args: %r' % arg)
|
||||
self._caf(arg, 'config')
|
||||
|
||||
def do_cap(self, arg):
|
||||
"""
|
||||
Handles command "cap"
|
||||
"""
|
||||
LOG.debug('Command "cap" executed with args: %r' % arg)
|
||||
self.put_fun("cap ")
|
||||
if self.options.spoolfetch_dir:
|
||||
self.put_fun("spool")
|
||||
self.put_fun("cap \n")
|
||||
else:
|
||||
LOG.debug('No spoolfetch_dir specified. Result spooling disabled')
|
||||
|
||||
self.put_fun("\n")
|
||||
|
||||
def do_spoolfetch(self, arg):
|
||||
"""
|
||||
Handles command "spoolfetch"
|
||||
"""
|
||||
LOG.debug('Command "spellfetch" executed with args: %r' % arg)
|
||||
output = Popen(['%s/spoolfetch_%s' % (self.options.spoolfetch_dir,
|
||||
self.options.host),
|
||||
arg]).communicate()[0]
|
||||
@ -101,8 +155,10 @@ class CmdHandler(object):
|
||||
# aliases
|
||||
do_exit = do_quit
|
||||
|
||||
|
||||
def handle_input(self, line):
|
||||
"""
|
||||
Handles one input line and sends any result back using ``put_fun``
|
||||
"""
|
||||
line = line.strip()
|
||||
line = line.split(' ')
|
||||
cmd = line[0]
|
||||
@ -111,7 +167,8 @@ class CmdHandler(object):
|
||||
elif len(line) == 2:
|
||||
arg = line[1]
|
||||
else:
|
||||
raise ValueError('Invalid input: %s' % line)
|
||||
self.put_fun('# Invalid input: %s\n' % line)
|
||||
return
|
||||
|
||||
if not cmd:
|
||||
return
|
||||
@ -119,7 +176,8 @@ class CmdHandler(object):
|
||||
func = getattr(self, 'do_%s' % cmd, None)
|
||||
if not func:
|
||||
commands = [_[3:] for _ in dir(self) if _.startswith('do_')]
|
||||
self.put_fun("# Unknown command. Supported commands: %s" % commands)
|
||||
self.put_fun("# Unknown command. Supported commands: %s\n" % (
|
||||
commands))
|
||||
return
|
||||
|
||||
func(arg)
|
||||
@ -144,75 +202,169 @@ def get_options():
|
||||
' Default: <current working dir>/plugins'))
|
||||
parser.add_option('-h', '--host', dest='host',
|
||||
help=('The hostname which will be reported in the plugins.'
|
||||
' Default: %s' % gethostname()),
|
||||
default=gethostname())
|
||||
' Default: %s' % socket.gethostname()),
|
||||
default=socket.gethostname())
|
||||
parser.add_option('-n', '--no-daemon', dest='no_daemon',
|
||||
default=False,
|
||||
action='store_true',
|
||||
help='Run in foreground. Do not daemonize. '
|
||||
'Will also enable debug logging to stdout.')
|
||||
parser.add_option('-l', '--log-dir', dest='log_dir',
|
||||
default=None,
|
||||
help='The log folder. Default: disabled')
|
||||
parser.add_option('-s', '--spoolfech-dir', dest='spoolfetch_dir',
|
||||
default=None,
|
||||
help='The spoolfetch folder. Default: disabled')
|
||||
parser.add_option('--help', action='callback', callback=usage,
|
||||
help='Shows this help')
|
||||
return parser.parse_args()
|
||||
|
||||
options, args = parser.parse_args()
|
||||
|
||||
# ensure we are using absolute paths (for daemonizing)
|
||||
if options.log_dir:
|
||||
options.log_dir = abspath(options.log_dir)
|
||||
|
||||
if options.spoolfetch_dir:
|
||||
options.spoolfetch_dir = abspath(options.spoolfetch_dir)
|
||||
|
||||
if options.plugin_dir:
|
||||
options.plugin_dir = abspath(options.plugin_dir)
|
||||
|
||||
return (options, args)
|
||||
|
||||
|
||||
def main():
|
||||
options, args = get_options()
|
||||
handler = CmdHandler(None, None, options)
|
||||
if not options.port:
|
||||
handler.get_fun = sys.stdin.read
|
||||
handler.put_fun = sys.stdout.write
|
||||
else:
|
||||
import socket
|
||||
host = ''
|
||||
port = int(options.port)
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
s.bind((host, port))
|
||||
s.listen(1)
|
||||
def process_stdin(options):
|
||||
rfhandler = RotatingFileHandler(
|
||||
join(abspath(dirname(__file__)), 'log', 'pypmmn.log'),
|
||||
maxBytes=100 * 1024,
|
||||
backupCount=5
|
||||
)
|
||||
rfhandler.setFormatter(logging.Formatter(LOG_FORMAT))
|
||||
logging.getLogger().addHandler(rfhandler)
|
||||
handler = CmdHandler(sys.stdin.read, sys.stdout.write, options)
|
||||
handler.do_version(None)
|
||||
LOG.info('STDIN handler opened')
|
||||
while True:
|
||||
data = sys.stdin.readline().strip()
|
||||
handler.handle_input(data)
|
||||
|
||||
conn, addr = s.accept()
|
||||
handler.get_fun = conn.recv
|
||||
handler.put_fun = conn.send
|
||||
handler.do_version(None)
|
||||
counter = 0
|
||||
|
||||
print 'Connected by', addr
|
||||
while True:
|
||||
data = conn.recv(1024)
|
||||
if not data.strip():
|
||||
sleep(1)
|
||||
counter += 1
|
||||
if counter > 3:
|
||||
conn.shutdown(SHUT_RDWR)
|
||||
conn.close()
|
||||
conn, addr = s.accept()
|
||||
counter = 0
|
||||
handler.get_fun = conn.recv
|
||||
handler.put_fun = conn.send
|
||||
handler.do_version(None)
|
||||
print "sleep"
|
||||
try:
|
||||
data = conn.recv(1024)
|
||||
print 'data2', `data`
|
||||
except socket.error, exc:
|
||||
conn, addr = s.accept()
|
||||
counter = 0
|
||||
handler.get_fun = conn.recv
|
||||
handler.put_fun = conn.send
|
||||
handler.do_version(None)
|
||||
print "Socket error: %s" % exc
|
||||
def process_socket(options):
|
||||
|
||||
if data.strip() == 'quit':
|
||||
print 'shutting down remote connection'
|
||||
conn.shutdown(SHUT_RDWR)
|
||||
if options.no_daemon:
|
||||
# set up on-screen-logging
|
||||
console_handler = logging.StreamHandler(sys.stdout)
|
||||
console_handler.setFormatter(logging.Formatter(LOG_FORMAT))
|
||||
logging.getLogger().addHandler(console_handler)
|
||||
|
||||
retcode = 0
|
||||
if not options.no_daemon:
|
||||
retcode = createDaemon()
|
||||
rfhandler = RotatingFileHandler(
|
||||
join(options.log_dir, 'daemon.log'),
|
||||
maxBytes=100 * 1024,
|
||||
backupCount=5
|
||||
)
|
||||
rfhandler.setFormatter(logging.Formatter(LOG_FORMAT))
|
||||
logging.getLogger().addHandler(rfhandler)
|
||||
LOG.info('Process PID: %d' % getpid())
|
||||
pidfile = open(join(options.log_dir, 'pypmmn.pid'), 'w')
|
||||
pidfile.write(str(getpid()))
|
||||
pidfile.close()
|
||||
LOG.info('PID file created in %s' % join(options.log_dir,
|
||||
'pypmmn.pid'))
|
||||
|
||||
LOG.info('Socket handler started.')
|
||||
|
||||
host = ''
|
||||
port = int(options.port)
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
s.bind((host, port))
|
||||
s.listen(1)
|
||||
|
||||
LOG.info('Listening on host %r, port %r' % (host, port))
|
||||
|
||||
conn, addr = s.accept()
|
||||
handler = CmdHandler(conn.recv, conn.send, options)
|
||||
handler.do_version(None)
|
||||
counter = 0
|
||||
|
||||
LOG.info("Accepting incoming connection from %s" % (addr, ))
|
||||
while True:
|
||||
data = conn.recv(1024)
|
||||
if not data.strip():
|
||||
sleep(1)
|
||||
counter += 1
|
||||
if counter > 3:
|
||||
LOG.info('Session timeout.')
|
||||
conn.shutdown(socket.SHUT_RDWR)
|
||||
conn.close()
|
||||
|
||||
LOG.info('Listening on host %r, port %r' % (host, port))
|
||||
|
||||
conn, addr = s.accept()
|
||||
counter = 0
|
||||
handler.get_fun = conn.recv
|
||||
handler.put_fun = conn.send
|
||||
handler.do_version(None)
|
||||
continue
|
||||
|
||||
handler.handle_input(data)
|
||||
LOG.info("Accepting incoming connection from %s" % (addr, ))
|
||||
try:
|
||||
data = conn.recv(1024)
|
||||
except socket.error, exc:
|
||||
LOG.warning("Socket error. Reinitialising.: %s" % exc)
|
||||
conn, addr = s.accept()
|
||||
counter = 0
|
||||
handler.get_fun = conn.recv
|
||||
handler.put_fun = conn.send
|
||||
handler.do_version(None)
|
||||
|
||||
LOG.info("Accepting incoming connection from %s" % (addr, ))
|
||||
|
||||
if data.strip() == 'quit':
|
||||
LOG.info('Client requested session end. Closing connection.')
|
||||
conn.shutdown(socket.SHUT_RDWR)
|
||||
conn.close()
|
||||
|
||||
LOG.info('Listening on host %r, port %r' % (host, port))
|
||||
|
||||
conn, addr = s.accept()
|
||||
counter = 0
|
||||
handler.get_fun = conn.recv
|
||||
handler.put_fun = conn.send
|
||||
handler.do_version(None)
|
||||
|
||||
LOG.info("Accepting incoming connection from %s" % (addr, ))
|
||||
|
||||
continue
|
||||
|
||||
handler.handle_input(data)
|
||||
|
||||
sys.exit(retcode)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
The main entry point of the application
|
||||
"""
|
||||
options, args = get_options()
|
||||
|
||||
# Handle logging as early as possible.
|
||||
if options.log_dir:
|
||||
if not exists(options.log_dir):
|
||||
raise IOError('[Errno 2] No such file or directory: %r' % (
|
||||
options.log_dir))
|
||||
# set up logging if requested
|
||||
root_logger = logging.getLogger()
|
||||
root_logger.setLevel(logging.NOTSET) # TODO: Make configurable
|
||||
|
||||
# Start either the "stdin" interface, or the socked daemon. Depending on
|
||||
# whether a port was given on startup or not.
|
||||
if not options.port:
|
||||
process_stdin(options)
|
||||
else:
|
||||
process_socket(options)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
Loading…
Reference in New Issue
Block a user