2
0
mirror of https://github.com/munin-monitoring/contrib.git synced 2018-11-08 00:59:34 +01:00

Merge branch 'master' of github.com:munin-monitoring/contrib

This commit is contained in:
Lasse Karstensen 2012-09-17 12:08:31 +02:00
commit bcb9ed82ea
345 changed files with 9870 additions and 10051 deletions

View File

@ -34,6 +34,25 @@ This serves as a repository for examples of various configs. You know, the ''lea
## Notes to contributors
We like to have ''elementary'' commits (a good rationale is : one per Changelog entry), as it is much easier to manage for reviewing. Debugging is also usually easier that way.
### Commits, Comments & Pull requests
So please **don't** be afraid to make as many commits as needed.
We like to have ''elementary'' commits as it is much easier to manage for reviewing and debugging.
So please **don't** be afraid to make **as many** commits as needed. Merging many commits is as easy
as merging one, if not easier.
A good rationale is that each commit shall have a one-liner commit comment as its first line.
Next lines are optional and should only explain the ''why'' it is done this particular way.
On the other side, pull requests can regroup many commits at once.
Just try to explain in the pull comment the ''why'' we should merge it (if it's not obvious).
Tim Pope wrote a [very nice tuto](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html) on making good commit comments.
### Licenses
All the code here is licensed with the same terms as munin itself (GPLv2), unless specified otherwise inside a file.
In all cases the code shall have an OSI-compatible license. Asking for a pull implies that you agree with that fact.
This change was made on Jun 1st 2012. If you wrote some code earlier and you do not agree to the new licensing default, you can :
- submit a licensing change pull
- submit a removal pull

View File

@ -150,7 +150,7 @@ while (1) {
$share->lock(LOCK_EX);
# get data (may be updated by other loggers too)
my %old=%{thaw $share->fetch};
my %old=eval{%{thaw($share->fetch)}}; # using eval to suppress thaw error on empty string at the first run
foreach my $vpm (keys %temp){
# merge values

View File

@ -39,7 +39,7 @@ while (<STDIN>) {
my ($vhost,$port,$method,$bytes,$time,$status)=split(/\s/,$_);
# sanity check
next unless m/^([\d\w.-_]+\s){5}([\d\w.-_]+$)/;
next unless m/^([\d\w\.\-_]+\s){5}([\d\w\.\-_]+$)/; # escaped "." and "-"
$time=sprintf("%d",$time/1000); # microsec to millisec
# sitename to munin fieldname
@ -85,7 +85,7 @@ sub periodic_write {
$share->lock(LOCK_EX);
# get data (may be updated by other loggers too)
my %old=%{thaw $share->fetch};
my %old=eval{%{thaw($share->fetch)}}; # using eval to suppress thaw error on empty string at the first run
foreach my $vpm (keys %temp){
# merge values
@ -120,4 +120,4 @@ sub periodic_write {
# parse/write every n seconds
alarm $nsec;
}
}

View File

@ -80,7 +80,7 @@ my $share = IPC::ShareLite->new(
) or die $!;
my %data=%{thaw $share->fetch};
my %data=eval{%{thaw($share->fetch)}}; # using eval to suppress thaw error on empty string at the first run
if ( defined $ARGV[0] and $ARGV[0] eq "autoconf" ) {
if (scalar(keys %data)>0) {

View File

@ -1,36 +0,0 @@
#!/bin/sh
# -*- sh -*-
#
# Plugin to monitor the number of PHP processes on the machine.
#
# Copyright Khalid Baheyeldin 2009 http://2bits.com
#
# Parameters:
#
# config (required)
# autoconf (optional - used by munin-config)
#
# Magick markers (optional - used by munin-config and som installation
# scripts):
#%# family=manual
#%# capabilities=autoconf
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo 'graph_title Number of php-cgi processes'
echo 'graph_args --base 1000 -l 0 '
echo 'graph_vlabel number of php-cgi processes'
echo 'graph_category apache'
echo 'graph_info This graph shows the number of php-cgi processes in the system.'
echo 'php_processes.label php-cgi'
echo 'php_processes.draw LINE2'
echo 'php_processes.info The current number of php-cgi processes.'
exit 0
fi
echo -n "php_processes.value "
/bin/ps ax | /usr/bin/grep -i php-cgi | /usr/bin/grep -v grep | /usr/bin/wc -l | /usr/bin/sed 's/\t +//' | /usr/bin/sed 's/ *//'

27
plugins/apt/deb_packages/.gitignore vendored Normal file
View File

@ -0,0 +1,27 @@
*.py[co]
# Packages
*.egg
*.egg-info
dist
build
eggs
parts
bin
var
sdist
develop-eggs
.installed.cfg
# Installer logs
pip-log.txt
# Unit test / coverage reports
.coverage
.tox
#Translations
*.mo
#Mr Developer
.mr.developer.cfg

View File

@ -0,0 +1,69 @@
munin-debian-packages
=====================
## Munin Debian Plugin
With this plugin munin can give you a nice graph and some details where your
packages come from, how old or new your installation is. Furtermore it tells
you how many updates you should have been installed, how many packages are
outdated and where they come from.
![A week of upgradable packages](/munin-monitoring/contrib/raw/master/plugins/apt/deb_packages/example/packages_label_archive_upgradable-week.png)
You can sort installed or upgradable Packages by 'archive', 'origin', 'site',
'label' and 'component' and even some of them at once.
The script uses caching cause it is quite expensive. It saves the output to a
cachefile and checks on each run, if dpkg-status or downloaded Packagefile have
changed. If one of them has changed, it runs, if not it gives you the cached
version.
### Installation
This plugin has checked on Debian - Wheezy and squeeze. If you want to use it
on older installations, tell me whether it works or which errors you had. It
shoud run past python-apt 0.7 and python 2.5.
check out this git repository from
aptitude install python-apt
git clone git://github.com/munin-monitoring/contrib.git
cd contrib/plugins/apt/deb_packages
sudo cp deb_packages.py /etc/munin/plugins
sudo cp deb_packages.munin-conf /etc/munin/plugin-conf.d/deb_packages
### Configuration
If you copied deb_packages.munin-conf to plugin-conf.d you have a starting point.
A typical configuration looks like this
[deb_packages]
# plugin is quite expensive and has to write statistics to cache output
# so it has to write to plugins.cache
user munin
# Packagelists to this size are printed as extra information to munin.extinfo
env.MAX_LIST_SIZE_EXT_INFO 50
# Age in seconds an $CACHE_FILE can be. If it is older, the script updates
# default if not set is 3540 (one hour)
# at the moment this is not used, the plugin always runs (if munin calls it)
#
env.CACHE_FILE_MAX_AGE 3540
# All these numbers are only for sorting, so you can use env.graph01_sort_by_0
# and env.graph01_sort_by_2 without using env.graph01_sort_by_1.
# sort_by values ...
# possible values are 'label', 'archive', 'origin', 'site', 'component'
env.graph00_type installed
env.graph00_sort_by_0 label
env.graph00_sort_by_1 archive
env.graph00_show_ext_0 origin
env.graph00_show_ext_1 site
env.graph01_type upgradable
env.graph01_sort_by_0 label
env.graph01_sort_by_1 archive
env.graph01_show_ext_0 origin
env.graph01_show_ext_1 site
You can sort_by one or some of these possible Values

View File

@ -0,0 +1,26 @@
[deb_packages]
# plugin is quite expensive and has to write statistics to cache output
# so it has to write to plugins.cache
user munin
# Packagelists to this size are printed as extra Information to munin.extinfo
env.MAX_LIST_SIZE_EXT_INFO 50
# Age in seconds an $CACHE_FILE can be. If it is older, the script updates
# default if not set is 3540 (one hour)
env.CACHE_FILE_MAX_AGE 3540
# sort_by values ...
# possible values are 'label', 'archive', 'origin', 'site', FIXME
env.graph00_type installed
env.graph00_sort_by_0 label
env.graph00_sort_by_1 archive
env.graph00_show_ext_0 origin
env.graph00_show_ext_1 site
env.graph01_type upgradable
env.graph01_sort_by_0 label
env.graph01_sort_by_1 archive
env.graph01_show_ext_0 origin
env.graph01_show_ext_1 site

View File

@ -0,0 +1,857 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
A munin plugin that prints archive and their upgradable packets
TODO: make it usable and readable as commandline tool
(-i) interaktiv
NICETOHAVE
TODO: separate into 2 graphs
how old is my deb installation
sorting a packet to the oldest archive
sorting a packet to the newest archive
(WONTFIX unless someone asks for)
TODO:
addinge alternative names for archives "stable -> squeeze"
TODO: add gray as
foo.colour 000000
to 'now', '', '', '', '', 'Debian dpkg status file'
TODO: update only if system was updated (aptitutde update has been run)
check modification date of /var/cache/apt/pkgcache.bin
cache file must not be older than mod_date of pkgcache.bin + X
TODO: shorten ext_info with getShortestConfigOfOptions
TODO: check whether cachefile matches the config
i have no clever idea to do this without 100 lines of code
BUG: If a package will be upgraded, and brings in new dependancies,
these new deps will not be counted. WONTFIX
"""
import sys
import argparse
import apt_pkg
from apt.progress.base import OpProgress
from time import time, strftime
import os
import StringIO
import string
import re
from collections import defaultdict, namedtuple
from types import StringTypes, TupleType, DictType, ListType, BooleanType
class EnvironmentConfigBroken(Exception): pass
# print environmental things
# for k,v in os.environ.iteritems(): print >> sys.stderr, "%r : %r" % (k,v)
def getEnv(name, default=None, cast=None):
"""
function to get Environmentvars, cast them and setting defaults if they aren't
getEnv('USER', default='nouser') # 'HomerS'
getEnv('WINDOWID', cast=int) # 44040201
"""
try:
var = os.environ[name]
if cast is not None:
var = cast(var)
except KeyError:
# environment does not have this var
var = default
except:
# now probably the cast went wrong
print >> sys.stderr, "for environment variable %r, %r is no valid value"%(name, var)
var = default
return var
MAX_LIST_SIZE_EXT_INFO = getEnv('MAX_LIST_SIZE_EXT_INFO', default=50, cast=int)
""" Packagelists to this size are printed as extra Information to munin """
STATE_DIR = getEnv('MUNIN_PLUGSTATE', default='.')
CACHE_FILE = os.path.join(STATE_DIR, "deb_packages.state")
"""
There is no need to execute this script every 5 minutes.
The Results are put to this file, next munin-run can read from it
CACHE_FILE is usually /var/lib/munin/plugin-state/debian_packages.state
"""
CACHE_FILE_MAX_AGE = getEnv('CACHE_FILE_MAX_AGE', default=3540, cast=int)
"""
Age in seconds an $CACHE_FILE can be. If it is older, the script updates
"""
def Property(func):
return property(**func())
class Apt(object):
"""
lazy helperclass i need in this statisticprogram, which have alle the apt_pkg stuff
"""
def __init__(self):
# init packagesystem
apt_pkg.init_config()
apt_pkg.init_system()
# NullProgress : we do not want progress info in munin plugin
# documented None did not worked
self._cache = None
self._depcache = None
self._installedPackages = None
self._upgradablePackages = None
@Property
def cache():
doc = "apt_pkg.Cache instance, lazy instantiated"
def fget(self):
class NullProgress(OpProgress):
""" used for do not giving any progress info,
while doing apt things used, cause documented
use of None as OpProgress did not worked in
python-apt 0.7
"""
def __init__(self):
self.op=''
self.percent=0
self.subop=''
def done(self):
pass
def update(*args,**kwords):
pass
if self._cache is None:
self._cache = apt_pkg.Cache(NullProgress())
return self._cache
return locals()
@Property
def depcache():
doc = "apt_pkg.DepCache object"
def fget(self):
if self._depcache is None:
self._depcache = apt_pkg.DepCache(self.cache)
return self._depcache
return locals()
@Property
def installedPackages():
doc = """apt_pkg.PackageList with installed Packages
it is a simple ListType with Elements of apt_pkg.Package
"""
def fget(self):
""" returns a apt_pkg.PackageList with installed Packages
it is a simple ListType with Elements of apt_pkg.Package
"""
if self._installedPackages is None:
self._installedPackages = []
for p in self.cache.packages:
if not ( p.current_state == apt_pkg.CURSTATE_NOT_INSTALLED or
p.current_state == apt_pkg.CURSTATE_CONFIG_FILES ):
self._installedPackages.append(p)
return self._installedPackages
return locals()
@Property
def upgradablePackages():
doc = """apt_pkg.PackageList with Packages that are upgradable
it is a simple ListType with Elements of apt_pkg.Package
"""
def fget(self):
if self._upgradablePackages is None:
self._upgradablePackages = []
for p in self.installedPackages:
if self.depcache.is_upgradable(p):
self._upgradablePackages.append(p)
return self._upgradablePackages
return locals()
apt = Apt()
""" global instance of apt data, used here
apt.cache
apt.depcache
apt.installedPackages
apt.upgradablePackages
initialisation is lazy
"""
def weightOfPackageFile(detail_tuple, option_tuple):
"""
calculates a weight, you can sort with
if detail_tuple is: ['label', 'archive']
option_tuple is: ['Debian', 'unstable']
it calculates
sortDict['label']['Debian'] * multiplierDict['label']
+ sortDict['archive']['unstable'] * multiplierDict['archive']
= 10 * 10**4 + 50 * 10**8
= 5000100000
"""
val = 0L
for option, detail in zip(option_tuple, detail_tuple):
optionValue = PackageStat.sortDict[option][detail]
val += optionValue * PackageStat.multiplierDict[option]
return val
def Tree():
""" Tree type generator
you can put data at the end of a twig
a = Tree()
a['a']['b']['c'] # creates the tree of depth 3
a['a']['b']['d'] # creates another twig of the tree
c
a b <
d
"""
return TreeTwig(Tree)
class TreeTwig(defaultdict):
def __init__(self, defaultFactory):
super(TreeTwig, self).__init__(defaultFactory)
def printAsTree(self, indent=0):
for k, tree in self.iteritems():
print " " * indent, repr(k)
if isinstance(tree, TreeTwig):
printTree(tree, indent+1)
else:
print tree
def printAsLine(self):
print self.asLine()
def asLine(self):
values = ""
for key, residue in self.iteritems():
if residue:
values += " %r" % key
if isinstance(residue, TreeTwig):
if len(residue) == 1:
values += " - %s" % residue.asLine()
else:
values += "(%s)" % residue.asLine()
else:
values += "(%s)" % residue
else:
values += " %r," % key
return values.strip(' ,')
def getShortestConfigOfOptions(optionList = ['label', 'archive', 'site']):
"""
tries to find the order to print a tree of the optionList
with the local repositories with the shortest line
possible options are:
'component'
'label'
'site'
'archive'
'origin'
'architecture'
Architecture values are usually the same and can be ignored.
tells you wich representation of a tree as line is shortest.
Is needed to say which ext.info line would be the shortest
to write the shortest readable output.
"""
l = optionList # just because l is much shorter
# creating possible iterations
fieldCount = len(optionList)
if fieldCount == 1:
selection = l
elif fieldCount == 2:
selection = [(x,y)
for x in l
for y in l if x!=y ]
elif fieldCount == 3:
selection = [(x,y,z)
for x in l
for y in l if x!=y
for z in l if z!=y and z!=x]
else:
raise Exception("NotImplemented for size %s" % fieldCount)
# creating OptionsTree, and measuring the length of it on a line
# for every iteration
d = {}
for keys in selection:
d[keys] = len( getOptionsTree(apt.cache, keys).asLine() )
# finding the shortest variant
r = min( d.items(), key=lambda x: x[1] )
return list(r[0]), r[1]
def getOptionsTree(cache, keys=None):
"""
t = getOptionsTree(cache, ['archive', 'site', 'label'])
generates ad dict of dict of sets like:
...
it tells you:
...
"""
t = Tree()
for f in cache.file_list:
# ignoring translation indexes ...
if f.index_type != 'Debian Package Index' and f.index_type !='Debian dpkg status file':
continue
# ignoring files with 0 size
if f.size == 0L:
continue
# creating default dict in case of secondary_options are empty
d = t
for key in keys:
if not key:
print f
dKey = f.__getattribute__(key)
d = d[dKey]
return t
def createKey(key, file):
"""
createKey( (archive, origin), apt.pkg_file)
returns ('unstable', 'Debian')
"""
if type(key) in StringTypes:
return file.__getattribute__(key)
elif type(key) in (TupleType, ListType):
nKey = tuple()
for pKey in key:
nKey = nKey.__add__((file.__getattribute__(pKey),))
return nKey
else:
raise Exception("Not implemented for keytype %s" % type(key))
def getOptionsTree2(cache, primary=None, secondary=None):
"""
primary muss ein iterable oder StringType sein
secondary muss iterable oder StringType sein
t1 = getOptionsTree2(apt.cache, 'origin', ['site', 'archive'])
t2 = getOptionsTree2(apt.cache, ['origin', 'archive'], ['site', 'label'])
"""
if type(secondary) in StringTypes:
secondary = [secondary]
if type(primary) in StringTypes:
primary = [primary]
t = Tree()
for file in cache.file_list:
# ignoring translation indexes ...
if file.index_type not in ['Debian Package Index', 'Debian dpkg status file']:
continue
# ignoring files with 0 size
if file.size == 0L:
continue
# key to first Dict in Tree is a tuple
pKey = createKey(primary, file)
d = t[pKey]
if secondary is not None:
# for no, sKey in enumerate(secondary):
# dKey = file.__getattribute__(sKey)
# if no < len(secondary)-1:
# d = d[dKey]
# if isinstance(d[dKey], DictType):
# d[dKey] = []
# d[dKey].append(file)
for sKey in secondary:
dKey = file.__getattribute__(sKey)
d = d[dKey]
return t
#def getAttributeSet(iterable, attribute):
# return set(f.__getattribute__(attribute) for f in iterable)
#
#def getOrigins(cache):
# return getAttributeSet(cache.file_list, 'origin')
#
#def getArchives(cache):
# return getAttributeSet(cache.file_list, 'archive')
#
#def getComponents(cache):
# return getAttributeSet(cache.file_list, 'component')
#
#def getLabels(cache):
# return getAttributeSet(cache.file_list, 'label')
#
#def getSites(cache):
# return getAttributeSet(cache.file_list, 'site')
#
class PackageStat(defaultdict):
""" defaultdict with Tuple Keys of (label,archive) containing lists of ArchiveFiles
{('Debian Backports', 'squeeze-backports'): [...]
('The Opera web browser', 'oldstable'): [...]
('Debian', 'unstable'): [...]}
with some abilities to print output munin likes
"""
sortDict = { 'label': defaultdict( lambda : 20,
{'Debian': 90,
'' : 1,
'Debian Security' : 90,
'Debian Backports': 90}),
'archive': defaultdict( lambda : 5,
{ 'now': 0,
'experimental': 10,
'unstable': 50,
'sid': 50,
'testing': 70,
'wheezy': 70,
'squeeze-backports': 80,
'stable-backports': 80,
'proposed-updates': 84,
'stable-updates': 85,
'stable': 90,
'squeeze': 90,
'oldstable': 95,
'lenny': 95, } ),
'site': defaultdict( lambda : 5, { }),
'origin': defaultdict( lambda : 5, { 'Debian' : 90, }),
'component': defaultdict( lambda : 5, {
'non-free': 10,
'contrib' : 50,
'main' : 90, }),
}
"""
Values to sort options (label, archive, origin ...)
(0..99) is allowed.
(this is needed for other graphs to calc aggregated weights)
higher is more older and more official or better
"""
dpkgStatusValue = { 'site': '', 'origin': '', 'label': '', 'component': '', 'archive': 'now' }
""" a dict to recognize options that coming from 'Debian dpkg status file' """
viewSet = set(['label', 'archive', 'origin', 'site', 'component'])
multiplierDict = { 'label' : 10**8,
'archive' : 10**4,
'site' : 10**0,
'origin' : 10**6,
'component' : 10**2,
}
"""
Dict that stores multipliers
to compile a sorting value for each archivefile
"""
def weight(self, detail_tuple):
return weightOfPackageFile(detail_tuple=detail_tuple, option_tuple=tuple(self.option))
def __init__(self, packetHandler, apt=apt, sortBy=None, extInfo=None, includeNow=True, *args, **kwargs):
assert isinstance(packetHandler, PacketHandler)
self.packetHandler = packetHandler
self.apt = apt
self.option = sortBy if sortBy is not None else ['label', 'archive']
optionsMentionedInExtInfo = extInfo if extInfo is not None else list(self.viewSet - set(self.option))
self.options = getOptionsTree2(apt.cache, self.option, optionsMentionedInExtInfo)
self.options_sorted = self._sorted(self.options.items())
super(PackageStat, self).__init__(lambda: [], *args, **kwargs)
translationTable = string.maketrans(' -.', '___')
""" chars that must not exist in a munin system name"""
@classmethod
def generate_rrd_name_from(cls, string):
return string.translate(cls.translationTable)
def _sorted(self, key_value_pairs):
return sorted(key_value_pairs, key=lambda(x): self.weight(x[0]), reverse=True)
@classmethod
def generate_rrd_name_from(cls, keyTuple):
assert isinstance(keyTuple, TupleType) or isinstance(keyTuple, ListType)
# we have to check, whether all tuple-elements have values
l = []
for key in keyTuple:
key = key if key else "local"
l.append(key)
return string.join(l).lower().translate(cls.translationTable)
def addPackage(self, sourceFile, package):
if self.packetHandler.decider(package):
self.packetHandler.adder(package, self)
@classmethod
def configD(cls, key, value):
i = { 'rrdName': cls.generate_rrd_name_from(key),
'options': string.join(key,'/'),
'info' : "from %r" % value.asLine() }
return i
def configHead(self):
d = { 'graphName': "packages_"+ self.generate_rrd_name_from(self.option),
'option': string.join(self.option, '/'),
'type' : self.packetHandler.type
}
return "\n"\
"multigraph {graphName}_{type}\n"\
"graph_title {type} Debian packages sorted by {option}\n"\
"graph_info {type} Debian packages sorted by {option} of its repository\n"\
"graph_category debian\n"\
"graph_vlabel packages".format(**d)
def printConfig(self):
print self.configHead()
for options, item in self.options_sorted:
if not self.packetHandler.includeNow and self.optionIsDpkgStatus(details=options):
continue
i = self.configD(options, item)
print "{rrdName}.label {options}".format(**i)
print "{rrdName}.info {info}".format(**i)
print "{rrdName}.draw AREASTACK".format(**i)
def optionIsDpkgStatus(self, details, options=None):
"""
give it details and options and it tells you whether the datails looks like they come from
a 'Debian dpkg status file'.
"""
# setting defaults
if options is None:
options = self.option
assert type(details) in (TupleType, ListType), 'details must be tuple or list not %r' % type(details)
assert type(options) in (TupleType, ListType), 'options must be tuple or list not %r' % type(details)
assert len(details) == len(options)
isNow = True
for det, opt in zip(details, options):
isNow &= self.dpkgStatusValue[opt] == det
return isNow
def printValues(self):
print "\nmultigraph packages_{option}_{type}".format(option=self.generate_rrd_name_from(self.option),
type=self.packetHandler.type)
for options, item in self.options_sorted:
if not self.packetHandler.includeNow and self.optionIsDpkgStatus(details=options):
continue
i = self.configD(options, item)
i['value'] = len(self.get(options, []))
print "{rrdName}.value {value}".format(**i)
self._printExtInfoPackageList(options)
def _printExtInfoPackageList(self, options):
rrdName = self.generate_rrd_name_from(options)
packageList = self[options]
packageCount = len( packageList )
if 0 < packageCount <= MAX_LIST_SIZE_EXT_INFO:
print "%s.extinfo " % rrdName,
for item in packageList:
print self.packetHandler.extInfoItemString.format(i=item),
print
packetHandlerD = {}
""" Dictionary for PacketHandlerclasses with its 'type'-key """
class PacketHandler(object):
"""
Baseclass, that represents the Interface which is used
"""
type = None
includeNow = None
extInfoItemString = None
def __init__(self, apt):
self.apt = apt
def decider(self, package, *args, **kwords):
"""
Function works as decider
if it returns True, the package is added
if it returns False, the package is not added
"""
pass
def adder(self, package, packageStat, *args, **kwords):
"""
take the package and add it tho the packageStat dictionary in defined way
"""
pass
@classmethod
def keyOf(cls, pFile):
"""
calculates the weight of a apt_pkg.PackageFile
"""
options = ('origin', 'site', 'archive', 'component', 'label')
details = tuple()
for option in options:
details = details.__add__((pFile.__getattribute__(option),))
return weightOfPackageFile(details, options)
class PacketHandlerUpgradable(PacketHandler):
type='upgradable'
includeNow = False
extInfoItemString = " {i[0].name} <{i[1]} -> {i[2]}>"
def decider(self, package, *args, **kwords):
return self.apt.depcache.is_upgradable(package)
def adder(self, package, packageStat, *args, **kwords):
options = tuple(packageStat.option)
candidateP = self.apt.depcache.get_candidate_ver(package)
candidateFile = max(candidateP.file_list, key=lambda f: self.keyOf(f[0]) )[0]
keys = createKey(options, candidateFile)
# this item (as i) is used for input in extInfoItemString
item = (package, package.current_ver.ver_str, candidateP.ver_str)
packageStat[keys].append(item)
# registering PackageHandler for Usage
packetHandlerD[PacketHandlerUpgradable.type] = PacketHandlerUpgradable
class PacketHandlerInstalled(PacketHandler):
type = 'installed'
includeNow = True
extInfoItemString = " {i.name}"
def decider(self, package, *args, **kwords):
# this function is called with each installed package
return True
def adder(self, package, packageStat, *args, **kwords):
options = tuple(packageStat.option)
candidateP = self.apt.depcache.get_candidate_ver(package)
candidateFile = max(candidateP.file_list, key=lambda f: self.keyOf(f[0]) )[0]
keys = createKey(options, candidateFile)
# this item (as i) is used for input in extInfoItemString
item = package
packageStat[keys].append(item)
# registering PackageHandler for Usage
packetHandlerD[PacketHandlerInstalled.type] = PacketHandlerInstalled
class Munin(object):
def __init__(self, commandLineArgs=None):
self.commandLineArgs = commandLineArgs
self.argParser = self._argParser()
self.executionMatrix = {
'config': self.config,
'run' : self.run,
'autoconf' : self.autoconf,
}
self.envConfig = self._envParser()
self._envValidater()
# print >> sys.stderr, self.envConfig
self.statL = []
if self.envConfig:
for config in self.envConfig:
packetHandler = packetHandlerD[config['type']](apt)
packageStat = PackageStat(apt=apt,
packetHandler = packetHandler,
sortBy = config['sort_by'],
extInfo = config['show_ext'])
self.statL.append(packageStat)
if not self.statL:
print "# no munin config found in environment vars"
def execute(self):
self.args = self.argParser.parse_args(self.commandLineArgs)
self.executionMatrix[self.args.command]()
def _cacheIsOutdated(self):
"""
# interesting files are pkgcache.bin (if it exists (it is deleted after apt-get clean))
# if a file is intstalled or upgraded, '/var/lib/dpkg/status' is changed
"""
if os.path.isfile(CACHE_FILE):
cacheMTime = os.stat(CACHE_FILE).st_mtime
else:
# no cachestatus file exist, so it _must_ renewed
return True
# List of modify-times of different files
timeL = []
packageListsDir = "/var/lib/apt/lists"
files=os.listdir(packageListsDir)
packageFileL = [ file for file in files if file.endswith('Packages')]
for packageFile in packageFileL:
timeL.append(os.stat(os.path.join(packageListsDir, packageFile)).st_mtime)
dpkgStatusFile = '/var/lib/dpkg/status'
if os.path.isfile(dpkgStatusFile):
timeL.append(os.stat(dpkgStatusFile).st_mtime)
else:
raise Exception('DPKG-statusfile %r not found, really strange!!!'%dpkgStatusFile)
newestFileTimestamp = max(timeL)
age = newestFileTimestamp - cacheMTime
if age > 0:
return True
else:
# if we have made a timetravel, we update until we reached good times
if time() < newestFileTimestamp:
return True
return False
def _run_with_cache(self):
""" wrapper around _run with writing to file and stdout
a better way would be a 'shell' tee as stdout
"""
# cacheNeedUpdate = False
# if not self.args.nocache:
# # check, whether the cachefile has to be written again
# if os.path.isfile(CACHE_FILE):
# mtime = os.stat(CACHE_FILE).st_mtime
# age = time() - mtime
# cacheNeedUpdate = age < 0 or age > CACHE_FILE_MAX_AGE
# else:
# cacheNeedUpdate = True
if self._cacheIsOutdated() or self.args.nocache:
# save stdout
stdoutDef = sys.stdout
try:
out = StringIO.StringIO()
sys.stdout = out
# run writes now to new sys.stdout
print "# executed at %r (%r)" %(strftime("%s"), strftime("%c"))
self._run()
sys.stdout = stdoutDef
# print output to stdout
stdoutDef.write(out.getvalue())
# print output to CACHE_FILE
with open(CACHE_FILE,'w') as state:
state.write(out.getvalue())
except IOError as e:
if e.errno == 2:
sys.stderr.write("%s : %s" % (e.msg, CACHE_FILE))
# 'No such file or directory'
os.makedirs( os.path.dirname(CACHE_FILE) )
else:
print sys.stderr.write("%r : %r" % (e, CACHE_FILE))
finally:
# restore stdout
sys.stdout = stdoutDef
else:
with open(CACHE_FILE,'r') as data:
print data.read()
def _run(self):
# p … package
# do the real work
for p in apt.installedPackages:
sourceFile = max(p.current_ver.file_list, key=lambda f: PacketHandler.keyOf(f[0]) )[0]
for packageStat in self.statL:
packageStat.addPackage(sourceFile, p)
# print munin output
for stat in self.statL:
stat.printValues()
def run(self):
if self.args.nocache:
self._run()
else:
self._run_with_cache()
def config(self):
for stat in self.statL:
stat.printConfig()
def autoconf(self):
print 'yes'
def _argParser(self):
parser = argparse.ArgumentParser(description="Show some statistics "\
"about debian packages installed on system by archive",
)
parser.set_defaults(command='run', debug=True, nocache=True)
parser.add_argument('--nocache', '-n', default=False, action='store_true',
help='do not use a cache file')
helpCommand = """
config ..... writes munin config
run ........ munin run (writes values)
autoconf ... writes 'yes'
"""
parser.add_argument('command', nargs='?',
choices=['config', 'run', 'autoconf', 'drun'],
help='mode munin wants to use. "run" is default' + helpCommand)
return parser
def _envParser(self):
"""
reads environVars from [deb_packages] and generate
a list of dicts, each dict holds a set of settings made in
munin config.
[
{ 'type' = 'installed',
'sort_by' = ['label', 'archive'],
'show_ext' = ['origin', 'site'],
},
{ 'type' = 'upgraded',
'sort_by' = ['label', 'archive'],
'show_ext' = ['origin', 'site'],
}
]
"""
def configStartDict():
return { 'type': None,
'sort_by': dict(),
'show_ext' : dict(),
}
interestingVarNameL = [ var for var in os.environ if var.startswith('graph') ]
config = defaultdict(configStartDict)
regex = re.compile(r"graph(?P<graphNumber>\d+)_(?P<res>.*?)_?(?P<optNumber>\d+)?$")
for var in interestingVarNameL:
m = re.match(regex, var)
configPart = config[m.group('graphNumber')]
if m.group('res') == 'type':
configPart['type'] = os.getenv(var)
elif m.group('res') == 'sort_by':
configPart['sort_by'][m.group('optNumber')] = os.getenv(var)
elif m.group('res') == 'show_ext':
configPart['show_ext'][m.group('optNumber')] = os.getenv(var)
else:
print >> sys.stderr, "configuration option %r was ignored" % (var)
# we have now dicts for 'sort_by' and 'show_ext' keys
# changing them to lists
for graphConfig in config.itervalues():
graphConfig['sort_by'] = [val for key, val in sorted(graphConfig['sort_by'].items())]
graphConfig['show_ext'] = [val for key, val in sorted(graphConfig['show_ext'].items())]
# we do not want keynames, they are only needed for sorting environmentvars
return [val for key, val in sorted(config.items())]
def _envValidater(self):
""" takes the munin config and checks for valid configuration,
raises Exception if something is broken
"""
for graph in self.envConfig:
if graph['type'] not in ('installed', 'upgradable'):
print >> sys.stderr, \
"GraphType must be 'installed' or 'upgradable' but not %r"%(graph.type), \
graph
raise EnvironmentConfigBroken("Environment Config broken")
if not graph['sort_by']:
print >> sys.stderr, \
"Graph must be sorted by anything"
raise EnvironmentConfigBroken("Environment Config broken")
# check for valid options for sort_by
unusableOptions = set(graph['sort_by']) - PackageStat.viewSet
if unusableOptions:
print >> sys.stderr, \
"%r are not valid options for 'sort_by'" % (unusableOptions)
raise EnvironmentConfigBroken("Environment Config broken")
# check for valid options for sort_by
unusableOptions = set(graph['show_ext']) - PackageStat.viewSet
if unusableOptions:
print >> sys.stderr, \
"%r are not valid options for 'show_ext'" % (x)
raise EnvironmentConfigBroken("Environment Config broken")
if __name__=='__main__':
muninPlugin = Munin()
muninPlugin.execute()
# import IPython; IPython.embed()

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

50
plugins/aris/aris_players Executable file
View File

@ -0,0 +1,50 @@
#!/usr/bin/php
# Copyright (C) 2012 David Gagnon <djgagnon@wisc.edu>
# Plugin to monitor ARIS users
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; version 2 only
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Parameters:
#
# config (required)
#
#
#%# family=manual
<?php
$aris_db_host='localhost';
$aris_db_name='db';
$aris_db_user='user';
$aris_db_pass='password';
if ($argv[1]=='config'){
print "graph_title ARIS active players\n";
print "graph_vlabel Players Count\n";
print "graph_category ARIS\n";
print "players.label player count\n";
exit;
}
$sqlLink = mysql_connect($aris_db_host,$aris_db_user,$aris_db_pass) or die('MySQL authenticaiton error');
mysql_select_db($aris_db_name) or die('MySQL Wrong Scheme Error');
$query = 'SELECT COUNT(DISTINCT player_id) AS count FROM player_log WHERE timestamp BETWEEN DATE_SUB(NOW(), INTERVAL 5 MINUTE) AND NOW()';
$result = mysql_query($query);
$numCurrentPlayersObject = mysql_fetch_object($result);
$numCurrentPlayers = $numCurrentPlayersObject->count;
echo 'players.value '. $numCurrentPlayers;
?>

View File

@ -1,2 +0,0 @@
Check http://aouyar.github.com/PyMunin/ to get the most recent versionof the
PyMunin Multi graph Munin Plugins and documentation.

View File

@ -0,0 +1,31 @@
#! /bin/sh
# Munin plugin
# Gets number of active channels
# By Eugene Varnavsky
# Converted to /bin/sh by Steve Schnepp
# June 2012
# Version 1.0
#
# Run as root or make sure munin user can run 'asterisk -r' command
#
# [asterisk_channels]
# user root
#
# LGPL License
if [ "$1" = "autoconf" ]; then
[ -z $(which asterisk) ] && echo "no" || echo "yes"
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Asterisk Channels"
echo "graph_args --base 1000 -l 0"
echo "graph_vlabel Channels"
echo "graph_category asterisk"
echo "channels.label Channels"
exit 0
fi
asterisk -x "core show channels" | awk '/active channels/ { print "channels.value " $1 }'
exit 0

62
plugins/beboxsync/beboxstats Executable file
View File

@ -0,0 +1,62 @@
#!/usr/bin/perl -w
use strict;
my ($Args) = @ARGV;
my $expecter = "/home/alex/bin/beboxstats.expect";
if ($Args) {
# work out line to grab
if ($Args eq 'autoconf') {
# Check the expect script that polls the router exists
unless ( -e $expecter ) {
print "no (Can't find expect script. Check value of \$expecter: $expecter)\n";
} else {
print "yes\n";
}
} elsif ($Args eq 'config') { # print out plugin parameters
printf("
graph_title bebox line stats
graph_vlabel deciBels
graph_category other
graph_info This graph shows the various line parameters
attenuationdownstream.label Downstream Attenuation
attenuationupstream.label Upstream Attenuation
margindownstream.label Downstream Noise Margin
marginupstream.label Upstream Noise Margin
outputpowerdownstream.label Downstream Output Power
outputpowerupstream.label Upstream Output Power
margindownstream.type GAUGE
outputpowerupstream.type GAUGE
attenuationdownstream.type GAUGE
marginupstream.type GAUGE
outputpowerdownstream.type GAUGE
attenuationupstream.type GAUGE
");
# .label is the Key on the graph
} else {
printf("Usage: $0
No arguments: print line stats
autoconf: print 'yes'
config: print config info for Munin\n");
}
} else {
# if no arguments, just fetch the data and print it out
my @insplitted = split(' ', `$expecter | grep dB`);
print "margindownstream.value $insplitted[3]\n";
print "marginupstream.value $insplitted[4]\n";
print "attenuationdownstream.value $insplitted[8]\n";
print "attenuationupstream.value $insplitted[9]\n";
print "outputpowerdownstream.value $insplitted[13]\n";
print "outputpowerupstream.value $insplitted[14]\n";
}

View File

@ -0,0 +1,25 @@
#!/usr/bin/expect -f
# script to log on to a BeBox router [ST Speedtouch 780] and gather line stats
# set timeout for response from router to 30 seconds
set timeout 30
set router "host.or.ip.of.router"
set port "23"
set username "Administrator"
set password "routerpassword"
# telnet to $router on $port
spawn telnet $router $port
expect "Username :"
send "$username\r"
expect "Password :"
send "$password\r"
expect "}=>"
send "adsl info\r"
expect "}=>"
send "exit\r"

50
plugins/beboxsync/beboxsync Executable file
View File

@ -0,0 +1,50 @@
#!/usr/bin/perl -w
# (C) Alex Dekker <me@ale.cx>
# License is GPL
use strict;
my ($Args) = @ARGV;
my $expecter = "/home/alex/bin/beboxstats.expect";
if ($Args) {
# work out line to grab
if ($Args eq 'autoconf') {
# Check the expect script that polls the router exists
unless ( -e $expecter ) {
print "no (Can't find expect script. Check value of \$expecter: $expecter)\n";
} else {
print "yes\n";
}
} elsif ($Args eq 'config') { # print out plugin parameters
printf("
graph_title bebox sync stats
graph_vlabel ATM kbps
graph_category other
graph_info This graph shows line sync speed
syncdownstream.label Downstream Sync Speed
syncupstream.label Upstream Sync Speed
syncdownstream.type GAUGE
syncupstream.type GAUGE
");
# .label is the Key on the graph
} else {
printf("Usage: $0
No arguments: print line stats
autoconf: print 'yes'
config: print config info for Munin\n");
}
} else {
# if no arguments, just fetch the data and print it out
my @insplitted = split(' ', `$expecter | grep stream`);
print "syncdownstream.value $insplitted[11]\n";
print "syncupstream.value $insplitted[15]\n";
}

View File

@ -354,23 +354,23 @@ F</etc/munin/boinc/bar/gui_rpc_auth.cfg>.
These files are owned and readable by root, readable by group munin and not
readable by others.
There are 2 symbolic links to this plugin created in the munin plugins
directory (usually F</etc/munin/plugins/>): F<snmp_foo_boincprojs> and
F<snmp_bar_boincprojs>
directory (usually F</etc/munin/plugins/>): F<boincprojs_foo> and
F<boincprojs_bar>
[snmp_foo_boinc*]
[boincprojs_foo]
group munin
env.boinccmd /usr/local/bin/boinccmd
env.host foo
env.boincdir /etc/munin/boinc/foo
[snmp_bar_boinc*]
[boincprojs_bar]
group munin
env.boinccmd /usr/local/bin/boinccmd
env.host bar
env.boincdir /etc/munin/boinc/bar
This way the plugin can be used by Munin the same way as the Munin plugins
utilizng SNMP (although this plugin itself does not use SNMP).
This way the plugin can be used by Munin as a virtual node, akin to
SNMP and IPMI plugins.
=head1 BUGS

View File

@ -1,8 +1,9 @@
#!/usr/bin/env python
"""=cut
=head1 NAME
celery_tasks_states - Munin plugin to monitor the number of Celery tasks in each state.
celery_tasks - Munin plugin to monitor the number of Celery tasks with specified names.
=head1 REQUIREMENTS
@ -16,20 +17,16 @@ Note: don't forget to enable sending of the events on the celery daemon - run it
Default configuration:
[celery_tasks_states]
env.api_url http://localhost:8989
env.workers all
None
If workers variable is not set or set to "all", task number for all the workers is monitored.
You can optionally set the workers variable to the string of hostnames you want to monitor separated by a comma.
You must set the name of at least one task you want to monitor (multiple names are separated by a comma).
For example:
[celery_tasks]
env.workers localhost,foo.bar.net,bar.foo.net
env.tasks myapp.tasks.SendEmailTask,myapp2.tasks.FetchUserDataTask
This would only monitor the number of tasks for the workers with the hostnames "localhost", "foo.bar.net" and "bar.foo.net"
This would monitor the number of task for a task with name "myapp.tasks.SendEmailTask" and "myapp2.tasks.FetchUserDataTask".
=head1 MAGIC MARKERS
@ -64,11 +61,13 @@ URL_ENDPOINTS = {
'task_details': '/api/task/name/%s',
}
TASK_STATES = (
'task-accepted',
'task-received',
'task-succeeded',
'task-failed',
'task-retried',
'PENDING',
'RECEIVED',
'STARTED',
'SUCCESS',
'FAILURE',
'REVOKED',
'RETRY'
)
def get_data(what, api_url, *args):
@ -89,66 +88,46 @@ def check_web_server_status(api_url):
print 'Could not connect to the celerymon webserver'
sys.exit(-1)
def clean_state_name(state_name):
return state_name.replace('task-', '')
def clean_task_name(task_name):
return task_name.replace('.', '_')
# Config
def print_config(workers = None):
if workers:
print 'graph_title Celery tasks in each state [workers = %s]' % (', ' . join(workers))
else:
print 'graph_title Celery tasks in each state'
def print_config(task_names):
print 'graph_title Celery tasks'
print 'graph_args --lower-limit 0'
print 'graph_scale no'
print 'graph_vlabel tasks per ${graph_period}'
print 'graph_category celery'
for name in TASK_STATES:
name = clean_state_name(name)
print '%s.label %s' % (name, name)
print '%s.type DERIVE' % (name)
print '%s.min 0' % (name)
print '%s.info number of %s tasks' % (name, name)
for name in task_names:
print '%s.label %s' % (clean_task_name(name), name)
print '%s.type DERIVE' % (clean_task_name(name))
print '%s.min 0' % (clean_task_name(name))
print '%s.info number of %s tasks' % (clean_task_name(name), name)
# Values
def print_values(workers = None, api_url = None):
data = get_data('tasks', api_url)
counters = dict([(key, 0) for key in TASK_STATES])
for task_name, task_data in data.iteritems():
for entry in task_data:
if not entry.get('state', None):
continue
state = entry.get('state', None)
hostname = entry.get('hostname', None)
if workers and hostname not in workers:
continue
counters[state] += 1
for name in TASK_STATES:
name_cleaned = clean_state_name(name)
value = counters[name]
print '%s.value %d' % (name_cleaned, value)
def print_values(task_names = None, api_url = None):
for task_name in task_names:
count = len(get_data('task_details', api_url, task_name))
print '%s.value %d' % (clean_task_name(task_name), count)
if __name__ == '__main__':
workers = os.environ.get('workers', 'all')
task_names = os.environ.get('tasks', None)
api_url = os.environ.get('api_url', API_URL)
check_web_server_status(api_url)
if workers in [None, '', 'all']:
workers = None
else:
workers = workers.split(',')
if not task_names:
print 'You need to define at least one task name'
sys.exit(-1)
task_names = task_names.split(',')
if len(sys.argv) > 1:
if sys.argv[1] == 'config':
print_config(workers)
print_config(task_names)
elif sys.argv[1] == 'autoconf':
print 'yes'
else:
print_values(workers, api_url)
print_values(task_names, api_url)

View File

@ -1,131 +0,0 @@
#!/usr/bin/env python
"""=cut
=head1 NAME
celery_tasks - Munin plugin to monitor the number of Celery tasks with specified names.
=head1 REQUIREMENTS
- Python
- celery (http://celeryproject.org/)
- celerymon (http://github.com/ask/celerymon)
Note: don't forget to enable sending of the events on the celery daemon - run it with the --events option
=head1 CONFIGURATION
Default configuration:
None
You must set the name of at least one task you want to monitor (multiple names are separated by a comma).
For example:
[celery_tasks]
env.tasks myapp.tasks.SendEmailTask,myapp2.tasks.FetchUserDataTask
This would monitor the number of task for a task with name "myapp.tasks.SendEmailTask" and "myapp2.tasks.FetchUserDataTask".
=head1 MAGIC MARKERS
#%# family=manual
#%# capabilities=autoconf
=head1 AUTHOR
Tomaz Muraus (http://github.com/Kami/munin-celery)
=head1 LICENSE
GPLv2
=cut"""
import os
import sys
import urllib
try:
import json
except:
import simplejson as json
API_URL = 'http://localhost:8989'
URL_ENDPOINTS = {
'workers': '/api/worker/',
'worker_tasks': '/api/worker/%s/tasks',
'tasks': '/api/task/',
'task_names': '/api/task/name/',
'task_details': '/api/task/name/%s',
}
TASK_STATES = (
'task-accepted',
'task-received',
'task-succeeded',
'task-failed',
'task-retried',
)
def get_data(what, api_url, *args):
try:
request = urllib.urlopen('%s%s' % (api_url, \
URL_ENDPOINTS[what] % (args)))
response = request.read()
return json.loads(response)
except IOError:
print 'Could not connect to the celerymon webserver'
sys.exit(-1)
def check_web_server_status(api_url):
try:
request = urllib.urlopen(api_url)
response = request.read()
except IOError:
print 'Could not connect to the celerymon webserver'
sys.exit(-1)
def clean_task_name(task_name):
return task_name.replace('.', '_')
# Config
def print_config(task_names):
print 'graph_title Celery tasks'
print 'graph_args --lower-limit 0'
print 'graph_scale no'
print 'graph_vlabel tasks per ${graph_period}'
print 'graph_category celery'
for name in task_names:
print '%s.label %s' % (clean_task_name(name), name)
print '%s.type DERIVE' % (clean_task_name(name))
print '%s.min 0' % (clean_task_name(name))
print '%s.info number of %s tasks' % (clean_task_name(name), name)
# Values
def print_values(task_names = None, api_url = None):
for task_name in task_names:
count = len(get_data('task_details', api_url, task_name))
print '%s.value %d' % (clean_task_name(task_name), count)
if __name__ == '__main__':
task_names = os.environ.get('tasks', None)
api_url = os.environ.get('api_url', API_URL)
check_web_server_status(api_url)
if not task_names:
print 'You need to define at least one task name'
sys.exit(-1)
task_names = task_names.split(',')
if len(sys.argv) > 1:
if sys.argv[1] == 'config':
print_config(task_names)
elif sys.argv[1] == 'autoconf':
print 'yes'
else:
print_values(task_names, api_url)

View File

@ -0,0 +1,152 @@
#!/usr/bin/env python
"""=cut
=head1 NAME
celery_tasks_states - Munin plugin to monitor the number of Celery tasks in each state.
=head1 REQUIREMENTS
- Python
- celery (http://celeryproject.org/)
- celerymon (http://github.com/ask/celerymon)
Note: don't forget to enable sending of the events on the celery daemon - run it with the --events option
=head1 CONFIGURATION
Default configuration:
[celery_tasks_states]
env.api_url http://localhost:8989
env.workers all
If workers variable is not set or set to "all", task number for all the workers is monitored.
You can optionally set the workers variable to the string of hostnames you want to monitor separated by a comma.
For example:
[celery_tasks]
env.workers localhost,foo.bar.net,bar.foo.net
This would only monitor the number of tasks for the workers with the hostnames "localhost", "foo.bar.net" and "bar.foo.net"
=head1 MAGIC MARKERS
#%# family=manual
#%# capabilities=autoconf
=head1 AUTHOR
Tomaz Muraus (http://github.com/Kami/munin-celery)
=head1 LICENSE
GPLv2
=cut"""
import os
import sys
import urllib
try:
import json
except:
import simplejson as json
API_URL = 'http://localhost:8989'
URL_ENDPOINTS = {
'workers': '/api/worker/',
'worker_tasks': '/api/worker/%s/tasks',
'tasks': '/api/task/',
'task_names': '/api/task/name/',
'task_details': '/api/task/name/%s',
}
TASK_STATES = (
'PENDING',
'RECEIVED',
'STARTED',
'SUCCESS',
'FAILURE',
'REVOKED',
'RETRY'
)
def get_data(what, api_url, *args):
try:
request = urllib.urlopen('%s%s' % (api_url, \
URL_ENDPOINTS[what] % (args)))
response = request.read()
return json.loads(response)
except IOError:
print 'Could not connect to the celerymon webserver'
sys.exit(-1)
def check_web_server_status(api_url):
try:
request = urllib.urlopen(api_url)
response = request.read()
except IOError:
print 'Could not connect to the celerymon webserver'
sys.exit(-1)
def clean_state_name(state_name):
return state_name.lower()
# Config
def print_config(workers = None):
if workers:
print 'graph_title Celery tasks in each state [workers = %s]' % (', ' . join(workers))
else:
print 'graph_title Celery tasks in each state'
print 'graph_args --lower-limit 0'
print 'graph_scale no'
print 'graph_vlabel tasks per ${graph_period}'
print 'graph_category celery'
for name in TASK_STATES:
name = clean_state_name(name)
print '%s.label %s' % (name, name)
print '%s.type DERIVE' % (name)
print '%s.min 0' % (name)
print '%s.info number of %s tasks' % (name, name)
# Values
def print_values(workers = None, api_url = None):
data = get_data('tasks', api_url)
counters = dict([(key, 0) for key in TASK_STATES])
for task_name, task_data in data:
state = task_data['state']
hostname = task_data['worker']['hostname']
if workers and hostname not in workers:
continue
counters[state] += 1
for name in TASK_STATES:
name_cleaned = clean_state_name(name)
value = counters[name]
print '%s.value %d' % (name_cleaned, value)
if __name__ == '__main__':
workers = os.environ.get('workers', 'all')
api_url = os.environ.get('api_url', API_URL)
check_web_server_status(api_url)
if workers in [None, '', 'all']:
workers = None
else:
workers = workers.split(',')
if len(sys.argv) > 1:
if sys.argv[1] == 'config':
print_config(workers)
elif sys.argv[1] == 'autoconf':
print 'yes'
else:
print_values(workers, api_url)

View File

@ -5,10 +5,9 @@
du - Plugin to monitor multiple directories size
=head1 AUTHOR
=head1 AUTHOR AND COPYRIGHT
Luc Didry <luc AT didry.org>
April 2011
Copyright 2011-2012 Luc Didry <luc AT didry.org>
=head1 HOWTO CONFIGURE AND USE :
@ -44,6 +43,21 @@
#%# family=auto
#%# capabilities=autoconf
=head1 LICENSE
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
=cut
use warnings;
@ -99,7 +113,7 @@ while(defined (my $foo = <FILE>)) {
if ($foo =~ m/(\d+)\s+(.+)/) {
my ($field, $value) = ($2, $1);
clean_path(\$field);
print $field, ".value ", $value, "\n";
print $field, ".value ", $value, "\n";
}
}
close(FILE);
@ -107,7 +121,7 @@ daemonize();
#
##
### PUBLiC FONCTiONS
### PUBLIC FONCTIONS
###############################################################################
## Used to create the fork
sub daemonize {
@ -135,7 +149,7 @@ sub clean_path {
}
} ## clean_path
## Do you really need I told you what this function is going to check ?
## Do you really need I told you what this functions are going to check ?
sub cache_is_too_old {
return 1 if (! -e $TIMEFILE);
my ($time) = `cat $TIMEFILE`;
@ -145,21 +159,33 @@ sub cache_is_too_old {
} ## cache_is_too_old
sub du_not_running {
return 0 if (-e $LOCKFILE);
return 1;
if (-e $LOCKFILE) {
my ($time) = `cat $TIMEFILE`;
chomp $time;
if ( (time - $time) > ($ENV{interval}*60*60) ) {
# The cache is really old (60xinterval) => Maybe the lockfile wasn't properly deleted.
# Let's delete it.
system("rm $LOCKFILE;");
return 1;
} else {
return 0;
}
} else {
return 1;
}
}
sub munin_exit_done {
__munin_exit(0);
} ## sub munin_exit_done
sub munin_exit_fail {
__munin_exit(1);
} ## sub munin_exit_fail
#
##
### iNTERNALS FONCTiONS
### INTERNALS FONCTIONS
###############################################################################
sub __munin_exit {
my $exitcode = shift;

View File

@ -1,702 +0,0 @@
#!/usr/bin/perl -w
# vim: sts=4 sw=4 ts=8
# Munin markers:
#%# family=auto
#%# capabilities=autoconf suggest
# Author: Michael Renner <michael.renner@amd.co.at>
# Version: 0.0.5, 2009-05-22
=head1 NAME
linux_diskstat_ - Munin plugin to monitor various values provided
via C</proc/diskstats>
=head1 APPLICABLE SYSTEMS
Linux 2.6 systems with extended block device statistics enabled.
=head1 INTERPRETATION
Among the more self-describing or well-known values like C<throughput>
(Bytes per second) there are a few which might need further introduction.
=head2 Device Utilization
Linux provides a counter which increments in a millisecond-interval for as long
as there are outstanding I/O requests. If this counter is close to 1000msec
in a given 1 second timeframe the device is nearly 100% saturated. This plugin
provides values averaged over a 5 minute time frame per default, so it can't
catch short-lived saturations, but it'll give a nice trend for semi-uniform
load patterns as they're expected in most server or multi-user environments.
=head2 Device IO Time
The C<Device IO Time> takes the counter described under C<Device Utilization>
and divides it by the number of I/Os that happened in the given time frame,
resulting in an average time per I/O on the block-device level.
This value can give you a good comparison base amongst different controllers,
storage subsystems and disks for similiar workloads.
=head2 Syscall Wait Time
These values describe the average time it takes between an application issuing
a syscall resulting in a hit to a blockdevice to the syscall returning to the
application.
The values are bound to be higher (at least for read requests) than the time
it takes the device itself to fulfill the requests, since calling overhead,
queuing times and probably a dozen other things are included in those times.
These are the values to watch out for when an user complains that C<the disks
are too slow!>.
=head3 What causes a block device hit?
A non-exhaustive list:
=over
=item * Reads from files when the given range is not in the page cache or the O_DIRECT
flag is set.
=item * Writes to files if O_DIRECT or O_SYNC is set or sys.vm.dirty_(background_)ratio
is exceeded.
=item * Filesystem metadata operations (stat(2), getdents(2), file creation,
modification of any of the values returned by stat(2), etc.)
=item * The pdflush daemon writing out dirtied pages
=item * (f)sync
=item * Swapping
=item * raw device I/O (mkfs, dd, etc.)
=back
=head1 ACKNOWLEDGEMENTS
The core logic of this script is based on the B<iostat> tool of the B<sysstat>
package written and maintained by Sebastien Godard.
=head1 SEE ALSO
See C<Documentation/iostats.txt> in your Linux source tree for further information
about the C<numbers> involved in this module.
L<http://www.westnet.com/~gsmith/content/linux-pdflush.htm> has a nice writeup
about the pdflush daemon.
=head1 AUTHOR
Michael Renner <michael.renner@amd.co.at>
=head1 LICENSE
GPLv2
=cut
use strict;
use File::Basename;
use Carp;
use POSIX;
# We load our own version of save/restore_state if Munin::Plugin is unavailable.
# Don't try this at home
eval { require Munin::Plugin; Munin::Plugin->import; };
if ($@) {
fake_munin_plugin();
}
# Sanity check to ensure that the script is called the correct name.
if (basename($0) !~ /^linux_diskstat_/) {
die qq(Please ensure that the name of the script and it's symlinks starts with "linux_diskstat_"\n);
}
############
# autoconf #
############
if ( defined $ARGV[0] && $ARGV[0] eq 'autoconf' ) {
my %stats;
# Capture any croaks on the way
eval { %stats = parse_diskstats() };
if ( !$@ && keys %stats ) {
print "yes\n";
exit 0;
}
else {
print "no\n";
exit 1;
}
}
###########
# suggest #
###########
if ( defined $ARGV[0] && $ARGV[0] eq 'suggest' ) {
my %diskstats = parse_diskstats();
my %suggested_devices;
DEVICE:
for my $devname ( sort keys %diskstats ) {
# Skip devices without traffic
next
if ( $diskstats{$devname}->{'rd_ios'} == 0
&& $diskstats{$devname}->{'wr_ios'} == 0 );
for my $existing_device ( @{ $suggested_devices{'iops'} } ) {
# Filter out devices (partitions) which are matched by existing ones
# e.g. sda1 -> sda, c0d0p1 -> c0d0
next DEVICE if ( $devname =~ m/$existing_device/ );
}
push @{ $suggested_devices{'iops'} }, $devname;
push @{ $suggested_devices{'throughput'} }, $devname;
# Only suggest latency graphs if the device supports it
if ( $diskstats{$devname}->{'rd_ticks'} > 0
|| $diskstats{$devname}->{'wr_ticks'} > 0 )
{
push @{ $suggested_devices{'latency'} }, $devname;
}
}
for my $mode ( keys %suggested_devices ) {
for my $device ( sort @{ $suggested_devices{$mode} } ) {
my $printdev = translate_device_name($device, 'TO_FS');
print "${mode}_$printdev\n";
}
}
exit 0;
}
# Reading the scripts invocation name and setting some parameters,
# needed from here on
my $basename = basename($0);
my ( $mode, $device ) = $basename =~ m/linux_diskstat_(\w+)_([-+\w]+)$/;
if ( not defined $device ) {
croak "Didn't get a device name. Aborting\n";
}
$device = translate_device_name($device, 'FROM_FS');
##########
# config #
##########
if ( defined $ARGV[0] && $ARGV[0] eq 'config' ) {
my $pretty_device = $device;
if ($device =~ /^dm-\d+$/) {
$pretty_device = translate_devicemapper_name($device);
}
if ( $mode eq 'latency' ) {
print <<EOF;
graph_title Disk latency for /dev/$pretty_device
graph_args --base 1000
graph_category disk
util.label Device utilization (percent)
util.type GAUGE
util.info Utilization of the device. If the time spent for I/O is close to 1000msec for a given second, the device is nearly 100% saturated.
util.min 0
svctm.label Average device IO time (ms)
svctm.type GAUGE
svctm.info Average time an I/O takes on the block device
svctm.min 0
avgwait.label Average IO Wait time (ms)
avgwait.type GAUGE
avgwait.info Average wait time for an I/O from request start to finish (includes queue times et al)
avgwait.min 0
avgrdwait.label Average Read IO Wait time (ms)
avgrdwait.type GAUGE
avgrdwait.info Average wait time for a read I/O from request start to finish (includes queue times et al)
avgrdwait.min 0
avgwrwait.label Average Write IO Wait time (ms)
avgwrwait.type GAUGE
avgwrwait.info Average wait time for a write I/O from request start to finish (includes queue times et al)
avgwrwait.min 0
EOF
}
elsif ( $mode eq 'throughput' ) {
print <<EOF;
graph_title Disk throughput for /dev/$pretty_device
graph_args --base 1024
graph_vlabel Bytes/second
graph_category disk
rdbytes.label Read Bytes
rdbytes.type GAUGE
rdbytes.min 0
wrbytes.label Write Bytes
wrbytes.type GAUGE
wrbytes.min 0
EOF
}
elsif ( $mode eq 'iops' ) {
print <<EOF;
graph_title Disk IOs for /dev/$pretty_device
graph_args --base 1000
graph_vlabel Units/second
graph_category disk
rdio.label Read IO/sec
rdio.type GAUGE
rdio.min 0
wrio.label Write IO/sec
wrio.type GAUGE
wrio.min 0
avgrqsz.label Average Request Size (KiB)
avgrqsz.type GAUGE
avgrqsz.min 0
avgrdrqsz.label Average Read Request Size (KiB)
avgrdrqsz.type GAUGE
avgrdrqsz.min 0
avgwrrqsz.label Average Write Request Size (KiB)
avgwrrqsz.type GAUGE
avgwrrqsz.min 0
EOF
}
else {
croak "Unknown mode $mode\n";
}
exit 0;
}
########
# MAIN #
########
my %cur_diskstat = fetch_device_counters($device);
my ( $prev_time, %prev_diskstat ) = restore_state();
save_state( time(), %cur_diskstat );
# Probably the first run for the given device, we need state to do our job,
# so let's wait for the next run.
exit if ( not defined $prev_time or not %prev_diskstat );
calculate_and_print_values( $prev_time, \%prev_diskstat, \%cur_diskstat );
########
# SUBS #
########
sub calculate_and_print_values {
my ( $prev_time, $prev_stats, $cur_stats ) = @_;
my $bytes_per_sector = 512;
my $interval = time() - $prev_time;
my $read_ios = $cur_stats->{'rd_ios'} - $prev_stats->{'rd_ios'};
my $write_ios = $cur_stats->{'wr_ios'} - $prev_stats->{'wr_ios'};
my $rd_ticks = $cur_stats->{'rd_ticks'} - $prev_stats->{'rd_ticks'};
my $wr_ticks = $cur_stats->{'wr_ticks'} - $prev_stats->{'wr_ticks'};
my $rd_sectors = $cur_stats->{'rd_sectors'} - $prev_stats->{'rd_sectors'};
my $wr_sectors = $cur_stats->{'wr_sectors'} - $prev_stats->{'wr_sectors'};
my $tot_ticks = $cur_stats->{'tot_ticks'} - $prev_stats->{'tot_ticks'};
my $read_io_per_sec = $read_ios / $interval;
my $write_io_per_sec = $write_ios / $interval;
my $read_bytes_per_sec = $rd_sectors / $interval * $bytes_per_sector;
my $write_bytes_per_sec = $wr_sectors / $interval * $bytes_per_sector;
my $total_ios = $read_ios + $write_ios;
my $total_ios_per_sec = $total_ios / $interval;
# Utilization - or "how busy is the device"?
# If the time spent for I/O was close to 1000msec for
# a given second, the device is nearly 100% saturated.
my $utilization = $tot_ticks / $interval;
# Average time an I/O takes on the block device
my $servicetime =
$total_ios_per_sec ? $utilization / $total_ios_per_sec : 0;
# Average wait time for an I/O from start to finish
# (includes queue times et al)
my $average_wait = $total_ios ? ( $rd_ticks + $wr_ticks ) / $total_ios : 0;
my $average_rd_wait = $read_ios ? $rd_ticks / $read_ios : 0;
my $average_wr_wait = $write_ios ? $wr_ticks / $write_ios : 0;
my $average_rq_size_in_kb =
$total_ios
? ( $rd_sectors + $wr_sectors ) * $bytes_per_sector / 1024 / $total_ios
: 0;
my $average_rd_rq_size_in_kb =
$read_ios ? $rd_sectors * $bytes_per_sector / 1024 / $read_ios : 0;
my $average_wr_rq_size_in_kb =
$write_ios ? $wr_sectors * $bytes_per_sector / 1024 / $write_ios : 0;
my $util_print = $utilization / 10;
if ( $mode eq 'latency' ) {
print <<EOF;
util.value $util_print
svctm.value $servicetime
avgwait.value $average_wait
avgrdwait.value $average_rd_wait
avgwrwait.value $average_wr_wait
EOF
}
elsif ( $mode eq 'throughput' ) {
print <<EOF;
rdbytes.value $read_bytes_per_sec
wrbytes.value $write_bytes_per_sec
EOF
}
elsif ( $mode eq 'iops' ) {
print <<EOF;
rdio.value $read_io_per_sec
wrio.value $write_io_per_sec
avgrqsz.value $average_rq_size_in_kb
avgrdrqsz.value $average_rd_rq_size_in_kb
avgwrrqsz.value $average_wr_rq_size_in_kb
EOF
}
else {
croak "Unknown mode $mode\n";
}
}
sub read_diskstats {
open STAT, '< /proc/diskstats'
or croak "Failed to open '/proc/diskstats': $!\n";
my @lines;
for my $line (<STAT>) {
# Strip trailing newline and leading whitespace
chomp $line;
$line =~ s/^\s+//;
my @elems = split /\s+/, $line;
# We explicitly don't support old-style diskstats
# There are situations where only _some_ lines (e.g.
# partitions on older 2.6 kernels) have fewer stats
# numbers, therefore we'll skip them silently
if ( @elems != 14 ) {
next;
}
push @lines, \@elems;
}
close STAT or croak "Failed to close '/proc/diskstats': $!";
return @lines;
}
sub read_sysfs {
my ($want_device) = @_;
my @devices;
my @lines;
if ( defined $want_device ) {
# sysfs uses '!' as replacement for '/', e.g. cciss!c0d0
$want_device =~ tr#/#!#;
@devices = $want_device;
}
else {
@devices = glob "/sys/block/*/stat";
@devices = map { m!/sys/block/([^/]+)/stat! } @devices;
}
for my $cur_device (@devices) {
my $stats_file = "/sys/block/$cur_device/stat";
open STAT, "< $stats_file"
or croak "Failed to open '$stats_file': $!\n";
my $line = <STAT>;
# Trimming whitespace
$line =~ s/^\s+//;
chomp $line;
my @elems = split /\s+/, $line;
croak "'$stats_file' doesn't contain exactly 11 values. Aborting"
if ( @elems != 11 );
# Translate the devicename back before storing the information
$cur_device =~ tr#!#/#;
# Faking missing diskstats values
unshift @elems, ( '', '', $cur_device );
push @lines, \@elems;
close STAT or croak "Failed to close '$stats_file': $!\n";
}
return @lines;
}
sub parse_diskstats {
my ($want_device) = @_;
my @stats;
if ( glob "/sys/block/*/stat" ) {
@stats = read_sysfs($want_device);
}
else {
@stats = read_diskstats();
}
my %diskstats;
for my $entry (@stats) {
my %devstat;
# Hash-Slicing for fun and profit
@devstat{
qw(major minor devname
rd_ios rd_merges rd_sectors rd_ticks
wr_ios wr_merges wr_sectors wr_ticks
ios_in_prog tot_ticks rq_ticks)
}
= @{$entry};
$diskstats{ $devstat{'devname'} } = \%devstat;
}
return %diskstats;
}
sub fetch_device_counters {
my ($want_device) = @_;
my %diskstats = parse_diskstats($want_device);
for my $devname ( keys %diskstats ) {
if ( $want_device eq $devname ) {
return %{ $diskstats{$devname} };
}
}
return undef;
}
# We use '+' (and formerly '-') as placeholder for '/' in device-names
# used as calling name for the script.
sub translate_device_name {
my ($device, $mode) = @_;
if ($mode eq 'FROM_FS') {
# Hackaround to mitigate issues with unwisely chosen former separator
if ( not ($device =~ m/dm-\d+/)) {
$device =~ tr#-+#//#;
}
}
elsif ($mode eq 'TO_FS') {
$device =~ tr#/#+#;
}
else {
croak "translate_device_name: Unknown mode\n";
}
return $device;
}
sub fake_munin_plugin {
my $eval_code = <<'EOF';
use Storable;
my $storable_filename = basename($0);
$storable_filename = "/tmp/munin-state-$storable_filename";
sub save_state {
my @state = @_;
if ( not -e $storable_filename or -f $storable_filename ) {
store \@state, $storable_filename or croak "Failed to persist state to '$storable_filename': $!\n";
}
else {
croak "$storable_filename is probably not a regular file. Please delete it.\n";
}
}
sub restore_state {
if (-f $storable_filename) {
my $state = retrieve($storable_filename);
return @{$state};
}
else {
return undef;
}
}
EOF
eval($eval_code);
}
sub translate_devicemapper_name {
my ($device) = @_;
my ($want_minor) = $device =~ m/^dm-(\d+)$/;
croak "Failed to extract devicemapper id" unless defined ($want_minor);
my $dm_major = find_devicemapper_major();
croak "Failed to get device-mapper major number\n" unless defined $dm_major;
for my $entry (glob "/dev/mapper/\*") {
my $rdev = (stat($entry))[6];
my $major = floor($rdev / 256);
my $minor = $rdev % 256;
if ($major == $dm_major && $minor == $want_minor) {
my $pretty_name = translate_lvm_name($entry);
return defined $pretty_name ? $pretty_name : $entry;
}
}
# Return original string if the device can't be found.
return $device;
}
sub translate_lvm_name {
my ($entry) = @_;
my $device_name = basename($entry);
# Check for single-dash-occurence to see if this could be a lvm devicemapper device.
if ($device_name =~ m/(?<!-)-(?!-)/) {
# split device name into vg and lv parts
my ($vg, $lv) = split /(?<!-)-(?!-)/, $device_name, 2;
return undef unless ( defined($vg) && defined($lv) );
# remove extraneous dashes from vg and lv names
$vg =~ s/--/-/g;
$lv =~ s/--/-/g;
$device_name = "$vg/$lv";
# Sanity check - does the constructed device name exist?
if (stat("/dev/$device_name")) {
return "$device_name";
}
}
return undef;
}
sub find_devicemapper_major {
open (FH, '< /proc/devices') or croak "Failed to open '/proc/devices': $!";
my $dm_major;
for my $line (<FH>) {
chomp $line;
my ($major, $name) = split /\s+/, $line, 2;
next unless defined $name;
if ($name eq 'device-mapper') {
$dm_major = $major;
last;
}
}
close(FH);
return $dm_major;
}

View File

@ -1,27 +1,66 @@
#!/bin/sh
#
# Script to monitor disk usage.
#
# By PatrickDK
#
# Parameters understood:
#
# config (required)
# autoconf (optional - used by munin-config)
#
# $Log$
#
# Magic markers (optional - used by munin-config and installation
# scripts):
#
#%# family=auto
#%# capabilities=autoconf
# -*- sh -*-
: << EOF
=head1 NAME
lvm_ - Wildcard plugin for monitoring disk usage on LVM. Each Volume Group is graphed separately.
=head1 CONFIGURATION
This plugin needs to run as the root user in order to have permission to run lvs and vgs
[lvm_*]
user root
=head1 AUTHOR
=over 4
=item * PatrickDK (Original Author)
=item * Niall Donegan
=back
=head1 LICENSE
Unknown license
=head1 MAGIC MARKERS
=begin comment
These magic markers are used by munin-node-configure when installing
munin-node.
=end comment
#%# family=auto
#%# capabilities=autoconf suggest
=cut
EOF
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
if ! command -v lvs >/dev/null; then
echo "no (lvs not found)"
elif ! command -v vgs >/dev/null; then
echo "no (vgs not found)"
else
echo "yes"
fi
exit 0
fi
if [ "$1" = "suggest" ]; then
vgs -o vg_name --noheadings | sed -e 's/\ *//'
exit 0
fi
vg=`echo $0 | awk '{ sub(".*lvm_","",\$1); print \$1; }'`
clean_name() {
@ -31,9 +70,8 @@ clean_name() {
if [ "$1" = "config" ]; then
echo 'graph_title Logical Volume usage'
echo 'graph_args --base 1000 -l 0'
# echo 'graph_vlabel %'
echo "graph_title Logical Volume Usage($vg)"
echo 'graph_args --base 1024 -l 0'
echo 'graph_category disk'
echo 'graph_info This graph shows disk usage on the machine.'
echo "free.label free"

View File

@ -80,7 +80,7 @@ my %config = (
lookfor => 'Drive Temperature :',
label => 'Temp',
title => "MegaRAID Adapter $Adapter: Drive Temperatures",
vtitle => 'Celsius',
vlabel => 'Celsius',
graph_args => '--base 1000 -l 0',
warning => '55',
critical => '65',
@ -91,7 +91,7 @@ my %config = (
lookfor => 'Media Error Count: ',
label => 'Media Err',
title => "MegaRAID Adapter $Adapter: Media Errors (SMART)",
vtitle => 'Number of Errors',
vlabel => 'Number of Errors',
graph_args => '--base 1000 -l 0',
warning => '',
critical => '',
@ -102,7 +102,7 @@ my %config = (
lookfor => 'Other Error Count: ',
label => 'Other Err',
title => "MegaRAID Adapter $Adapter: Others Errors (SMART)",
vtitle => 'Number of Errors',
vlabel => 'Number of Errors',
graph_args => '--base 1000 -l 0',
warning => '',
critical => '',
@ -113,7 +113,7 @@ my %config = (
lookfor => 'Predictive Failure Count: ',
label => 'Predictive Err',
title => "MegaRAID Adapter $Adapter: Predictive Errors (SMART)",
vtitle => 'Number of Errors',
vlabel => 'Number of Errors',
graph_args => '--base 1000 -l 0',
warning => '',
critical => '',
@ -140,7 +140,7 @@ my @Output=qx($Command);
#Munin Config Options
if ($ARGV[0] and $ARGV[0] eq "config"){
print "graph_title $config{$Type}->{title}\n";
print "graph_vtitle $config{$Type}->{vtitle}\n";
print "graph_vlabel $config{$Type}->{vlabel}\n";
print "graph_args $config{$Type}->{graph_args}\n";
print "graph_scale yes\n";
print "graph_category disk\n";

View File

@ -1,175 +0,0 @@
#!/usr/bin/perl
=head1 NAME
snmp__netapp_diskusage_ - Munin plugin to retrieve file systems usage on
NetApp storage appliances.
=head1 APPLICABLE SYSTEMS
File systems usage stats should be reported by any NetApp storage
appliance with SNMP agent daemon activated. See na_snmp(8) for details.
=head1 CONFIGURATION
Unfortunately, SNMPv3 is not fully supported on all NetApp equipments.
For this reason, this plugin will use SNMPv2 by default, which is
insecure because it doesn't encrypt the community string.
The following parameters will help you get this plugin working :
[snmp_*]
env.community MyCommunity
If your community name is 'public', you should really worry about
security and immediately reconfigure your appliance.
Please see 'perldoc Munin::Plugin::SNMP' for further configuration.
=head1 INTERPRETATION
The plugin reports file systems usage. This can help you monitoring file
systems usage in a given period of time.
=head1 MIB INFORMATION
This plugin requires support for the NETWORK-APPLIANCE-MIB issued by
Network Appliance. It reports the content of the DfEntry OID.
=head1 MAGIC MARKERS
#%# family=snmpauto
#%# capabilities=snmpconf
=head1 VERSION
v1.0 - 06/22/2009 14:05:03 CEST
Initial revision
=head1 AUTHOR
This plugin is copyright (c) 2009 by Guillaume Blairon.
NetApp is a registered trademark and Network Appliance is a trademark
of Network Appliance, Inc. in the U.S. and other countries.
=head1 BUGS
This plugin wasn't tested on many hardware. If you encounter bugs,
please report them to Guillaume Blairon E<lt>L<g@yom.be>E<gt>.
=head1 LICENSE
GPLv2 or (at your option) any later version.
=cut
use strict;
use warnings;
use Munin::Plugin::SNMP;
use vars qw($DEBUG);
$DEBUG = $ENV{'MUNIN_DEBUG'};
my @palette =
#Better colours from munin 1.3.x
#Greens Blues Oranges Dk yel Dk blu Purple Lime Reds Gray
qw(00CC00 0066B3 FF8000 FFCC00 330099 990099 CCFF00 FF0000 808080
008F00 00487D B35A00 B38F00 6B006B 8FB300 B30000 BEBEBE
80FF80 80C9FF FFC080 FFE680 AA80FF EE00CC FF8080
666600 FFBFFF 00FFCC CC6699 999900);
my %oids = (
# - dfHigh.* : 32 most significant bits counters
# - dfLow.* : 32 least significant bits counters
dfHighTotalKBytes => '1.3.6.1.4.1.789.1.5.4.1.14.',
dfLowTotalKBytes => '1.3.6.1.4.1.789.1.5.4.1.15.',
dfHighUsedKBytes => '1.3.6.1.4.1.789.1.5.4.1.16.',
dfLowUsedKBytes => '1.3.6.1.4.1.789.1.5.4.1.17.',
dfHighAvailKBytes => '1.3.6.1.4.1.789.1.5.4.1.18.',
dfLowAvailKBytes => '1.3.6.1.4.1.789.1.5.4.1.19.',
);
sub to_32bit_int {
my ($l, $h) = @_;
return "U" if ((!defined $l) || (!defined $h));
my $bin = unpack( 'B32', pack('N', $l) . pack('N', $h) );
return unpack( 'N', pack('B32', $bin) );
}
if (defined $ARGV[0] and $ARGV[0] eq 'snmpconf') {
print "number 1.3.6.1.4.1.789.1.5.6.0\n";
print "index 1.3.6.1.4.1.789.1.5.4.1.1.\n";
foreach (keys %oids) {
print "require $oids{$_} [0-9]\n";
}
exit 0;
}
my $session = Munin::Plugin::SNMP->session();
my ($host, undef, undef, $tail) = Munin::Plugin::SNMP->config_session();
my ($df_id, $name_oid);
if ($tail =~ /^netapp_diskusage_(\d+)$/) {
$df_id = $1;
$name_oid = '1.3.6.1.4.1.789.1.5.4.1.2.' . $df_id;
} else {
die "Couldn't understand what I'm supposed to monitor";
}
if (defined $ARGV[0] and $ARGV[0] eq "config") {
my $df_name = $session->get_single($name_oid);
print "host_name $host\n" unless $host eq 'localhost';
print "graph_title $host disk usage on $df_name\n";
print "graph_args --base 1024 --lower-limit 0\n";
print "graph_vlabel bytes\n";
print "graph_category disk\n";
print "graph_info This graph shows the disk usage for $df_name on NetApp host $host\n";
print "graph_order used avail total\n";
print "used.info The total disk space in KBytes that is in use on the $df_name file system.\n";
print "used.type GAUGE\n";
print "used.draw AREA\n";
print "used.label Used\n";
print "used.cdef used,1024,*\n";
print "used.min 0\n";
print "used.colour $palette[1]\n";
print "avail.info The total disk space in KBytes that is free for use on the $df_name file system.\n";
print "avail.type GAUGE\n";
print "avail.draw STACK\n";
print "avail.label Available\n";
print "avail.cdef avail,1024,*\n";
print "avail.min 0\n";
print "avail.colour $palette[3]\n";
print "total.info The total capacity in KBytes for the $df_name file system.\n";
print "total.type GAUGE\n";
print "total.draw LINE2\n";
print "total.label Total\n";
print "total.cdef total,1024,*\n";
print "total.min 0\n";
print "total.colour $palette[7]\n";
exit 0;
}
my $used_l = $session->get_single($oids{dfLowUsedKBytes}.$df_id);
my $used_h = $session->get_single($oids{dfHighUsedKBytes}.$df_id);
my $avail_l = $session->get_single($oids{dfLowAvailKBytes}.$df_id);
my $avail_h = $session->get_single($oids{dfHighAvailKBytes}.$df_id);
my $total_l = $session->get_single($oids{dfLowTotalKBytes}.$df_id);
my $total_h = $session->get_single($oids{dfHighTotalKBytes}.$df_id);
my $used = to_32bit_int($used_l, $used_h);
my $avail = to_32bit_int($avail_l, $avail_h);
my $total = to_32bit_int($total_l, $total_h);
print "used.value $used\n";
print "avail.value $avail\n";
print "total.value $total\n";
exit 0;
__END__

View File

@ -1,144 +0,0 @@
#!/usr/bin/perl
=head1 NAME
snmp__netapp_inodeusage_ - Munin plugin to retrieve inodes usage on
NetApp storage appliances.
=head1 APPLICABLE SYSTEMS
Inodes usage stats should be reported by any NetApp storage appliance
with SNMP agent daemon activated. See na_snmp(8) for details.
=head1 CONFIGURATION
Unfortunately, SNMPv3 is not fully supported on all NetApp equipments.
For this reason, this plugin will use SNMPv2 by default, which is
insecure because it doesn't encrypt the community string.
The following parameters will help you get this plugin working :
[snmp_*]
env.community MyCommunity
If your community name is 'public', you should really worry about
security and immediately reconfigure your appliance.
Please see 'perldoc Munin::Plugin::SNMP' for further configuration.
=head1 MIB INFORMATION
This plugin requires support for the NETWORK-APPLIANCE-MIB issued by
Network Appliance. It reports the content of the DfEntry OID.
=head1 MAGIC MARKERS
#%# family=snmpauto
#%# capabilities=snmpconf
=head1 VERSION
v1.0 - 06/22/2009 14:05:03 CEST
Initial revision
=head1 AUTHOR
This plugin is copyright (c) 2009 by Guillaume Blairon.
NetApp is a registered trademark and Network Appliance is a trademark
of Network Appliance, Inc. in the U.S. and other countries.
=head1 BUGS
This plugin wasn't tested on many hardware. If you encounter bugs,
please report them to Guillaume Blairon E<lt>L<g@yom.be>E<gt>.
=head1 LICENSE
GPLv2 or (at your option) any later version.
=cut
use strict;
use warnings;
use Munin::Plugin::SNMP;
use vars qw($DEBUG);
$DEBUG = $ENV{'MUNIN_DEBUG'};
my @palette =
#Better colours from munin 1.3.x
#Greens Blues Oranges Dk yel Dk blu Purple Lime Reds Gray
qw(00CC00 0066B3 FF8000 FFCC00 330099 990099 CCFF00 FF0000 808080
008F00 00487D B35A00 B38F00 6B006B 8FB300 B30000 BEBEBE
80FF80 80C9FF FFC080 FFE680 AA80FF EE00CC FF8080
666600 FFBFFF 00FFCC CC6699 999900);
my %oids = (
dfInodesUsed => '1.3.6.1.4.1.789.1.5.4.1.7.',
dfInodesFree => '1.3.6.1.4.1.789.1.5.4.1.8.',
);
if (defined $ARGV[0] and $ARGV[0] eq 'snmpconf') {
print "number 1.3.6.1.4.1.789.1.5.6.0\n";
print "index 1.3.6.1.4.1.789.1.5.4.1.1.\n";
foreach (keys %oids) {
print "require $oids{$_} [0-9]\n";
}
exit 0;
}
my $session = Munin::Plugin::SNMP->session();
my ($host, undef, undef, $tail) = Munin::Plugin::SNMP->config_session();
my ($df_id, $name_oid);
if ($tail =~ /^netapp_inodeusage_(\d+)$/) {
$df_id = $1;
$name_oid = '1.3.6.1.4.1.789.1.5.4.1.2.' . $df_id;
} else {
die "Couldn't understand what I'm supposed to monitor";
}
if (defined $ARGV[0] and $ARGV[0] eq "config") {
my $df_name = $session->get_single($name_oid);
print "host_name $host\n" unless $host eq 'localhost';
print "graph_title $host inodes usage on $df_name\n";
print "graph_args --base 1000 --lower-limit 0\n";
print "graph_vlabel bytes\n";
print "graph_category disk\n";
print "graph_info This graph shows the inodes usage for $df_name on NetApp host $host\n";
print "graph_order used avail total\n";
print "used.info The total inodes number of inodes in use on the $df_name file system.\n";
print "used.type GAUGE\n";
print "used.draw AREA\n";
print "used.label Used\n";
print "used.min 0\n";
print "used.colour $palette[1]\n";
print "avail.info The total number of inodes that are free for use on the $df_name file system.\n";
print "avail.type GAUGE\n";
print "avail.draw STACK\n";
print "avail.label Available\n";
print "avail.min 0\n";
print "avail.colour $palette[3]\n";
print "total.info The total capacity for the $df_name file system.\n";
print "total.type GAUGE\n";
print "total.draw LINE2\n";
print "total.label Total\n";
print "total.min 0\n";
print "total.colour $palette[7]\n";
exit 0;
}
my $used = $session->get_single($oids{dfInodesUsed}.$df_id);
my $avail = $session->get_single($oids{dfInodesFree}.$df_id);
my $total = $used + $avail;
print "used.value $used\n";
print "avail.value $avail\n";
print "total.value $total\n";
exit 0;
__END__

View File

@ -1,172 +0,0 @@
#!/usr/bin/perl -w
#
# Copyright (C) 2006 Lars Strand
#
# Munin plugin to monitor swap usage by use of SNMP.
# Based on the snmp__df plugin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2 dated June,
# 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# $Log$
#
#%# family=snmpauto
#%# capabilities=snmpconf
use strict;
use Net::SNMP;
my $DEBUG = 0;
my $MAXLABEL = 20;
my $host = $ENV{host} || undef;
my $port = $ENV{port} || 161;
my $community = $ENV{community} || "public";
my $iface = $ENV{interface} || undef;
my $response;
if (defined $ARGV[0] and $ARGV[0] eq "snmpconf")
{
# HOST-RESOURCES-MIB::hrStorage
# HOST-RESOURCES-TYPES::hrStorageVirtualMemory
print "require 1.3.6.1.2.1.25.2. 1.3.6.1.2.1.25.2.1.3\n";
exit 0;
}
if ($0 =~ /^(?:|.*\/)snmp_([^_]+)_swap$/)
{
$host = $1;
if ($host =~ /^([^:]+):(\d+)$/)
{
$host = $1;
$port = $2;
}
}
elsif (!defined($host))
{
print "# Debug: $0 -- $1\n" if $DEBUG;
die "# Error: couldn't understand what I'm supposed to monitor.";
}
my ($session, $error) = Net::SNMP->session(
-hostname => $host,
-community => $community,
-port => $port
);
if (!defined ($session))
{
die "Croaking: $error";
}
my $hrStorage = "1.3.6.1.2.1.25.2.";
my $hrStorageVirtualMemory = "1.3.6.1.2.1.25.2.1.3";
my $hrStorageSize = "1.3.6.1.2.1.25.2.3.1.5.";
my $hrStorageUsed = "1.3.6.1.2.1.25.2.3.1.6.";
my $swap_d = get_by_regex($session, $hrStorage, $hrStorageVirtualMemory);
my $swapsize = 0; my $swapused = 0;
foreach my $swap (keys %$swap_d)
{
$swapsize += get_single($session, $hrStorageSize . $swap);
$swapused += get_single($session, $hrStorageUsed . $swap);
}
if (defined $ARGV[0] and $ARGV[0] eq "config")
{
print "host_name $host\n";
print "graph_title Virtual memory usage\n";
if ($swapsize > 0)
{
print "graph_args -l 0 --base 1000 --upper-limit $swapsize\n";
}
else
{
print "graph_args -l 0 --base 1000\n";
}
print "graph_vlabel Bytes\n";
print "graph_category disk\n";
print "graph_info This graph shows swap usage in bytes.\n";
print "swap.label swap\n";
print "swap.type DERIVE\n";
print "swap.min 0\n";
exit 0;
}
print "swap.value $swapused\n";
sub get_single
{
my $handle = shift;
my $oid = shift;
print "# Getting single $oid..." if $DEBUG;
$response = $handle->get_request ($oid);
if (!defined $response->{$oid})
{
print "undef\n" if $DEBUG;
return undef;
}
else
{
print "\"$response->{$oid}\"\n" if $DEBUG;
return $response->{$oid};
}
}
sub get_by_regex
{
my $handle = shift;
my $oid = shift;
my $regex = shift;
my $result = {};
my $num = 0;
my $ret = $oid . "0";
my $response;
print "# Starting browse of $oid...\n" if $DEBUG;
while (1)
{
if ($num == 0)
{
print "# Checking for $ret...\n" if $DEBUG;
$response = $handle->get_request ($ret);
}
if ($num or !defined $response)
{
print "# Checking for sibling of $ret...\n" if $DEBUG;
$response = $handle->get_next_request ($ret);
}
if (!$response)
{
return undef;
}
my @keys = keys %$response;
$ret = $keys[0];
print "# Analyzing $ret (compared to $oid)...\n" if $DEBUG;
last unless ($ret =~ /^$oid/);
$num++;
next unless ($response->{$ret} =~ /$regex/);
@keys = split (/\./, $ret);
$result->{$keys[-1]} = $response->{$ret};;
print "# Index $num: ", $keys[-1], " (", $response->{$ret}, ")\n" if $DEBUG;
};
return $result;
}

View File

@ -42,6 +42,11 @@ else
end
end
if ARGV.first == 'reset'
log_info = { :start => File.size(LOG_FILE)-1 }
puts 'Log reset'
end
new_data = ''
File.open(LOG_FILE, 'r') do |flog|
flog.seek(log_info[:start])
@ -65,6 +70,14 @@ LABELS = {
:system_ports_limit => 'System limit hit: ports', # check with length(erlang:ports())., set in ejabberdctl config file
:system_limit => 'Other system limit hit', # processes? check with erlang:system_info(process_count)., erlang:system_info(process_limit)., set in ejabberdctl cfg
:generic_server_terminating => 'Generic server terminating',
:mnesia_table_shrinked => 'Mnesia table shrinked',
:admin_access_failed => 'Admin access failed',
:mysql_sock_timedout => 'MySQL sock timedout',
:config_error => 'Configuration error',
:vcard_error => 'Strange vCard error (vhost)',
:mnesia_overload => 'Mnesia is overloaded',
:mysql_init_recv_failed => 'MySQL: init failed recv data',
:tcp_failed => 'TCP Error',
:UNKNOWN => 'Unknown error/warning'
}
def log_type(text)
@ -104,6 +117,22 @@ def log_type(text)
:system_limit
elsif text.include?('Generic server') and text.include?('terminating')
:generic_server_terminating
elsif text.include?('shrinking table')
:mnesia_table_shrinked
elsif text.include?('Access of') and text.include?('failed with error')
:admin_access_failed
elsif text.include?('mysql_') and text.include?(': Socket') and text.include?('timedout')
:mysql_sock_timedout
elsif text.include?('{badrecord,config}')
:config_error
elsif text.include?('error found when trying to get the vCard')
:vcard_error
elsif text.include?('Mnesia is overloaded')
:mnesia_overload
elsif text.include?('mysql_conn: init failed receiving data')
:mysql_init_recv_failed
elsif text.include?('Failed TCP')
:tcp_failed
else
warn "Cannot parse text: #{text}" if $debug_mode
:UNKNOWN
@ -128,7 +157,7 @@ File.open(CACHE_FILE, 'w') { |f| f.write log_info.to_yaml } unless $debug_mode
if ARGV.first == 'config'
puts <<CONFIG
graph_title Ejabberd Log
graph_vtitle per period
graph_vlabel total
graph_category ejabberd
graph_args -l 0
graph_order #{(LABELS.keys + log_info.keys.select { |k| k.is_a? String }.sort).join(' ')}

View File

@ -1,28 +0,0 @@
#!/bin/bash
if [ "$1" == "config" ] ; then
echo "graph_title HP server fan speed"
echo "graph_vlabel speed"
echo "graph_category environment"
echo "graph_info This graph shows the speed of the system fans"
echo 'processor_zone_fan.label Processor Zone fan speed'
echo 'system_board_fan.label System Board fan speed'
exit 0
fi
#ID TYPE LOCATION STATUS REDUNDANT FAN SPEED
#1 Var. Speed Processor Zone Normal N/A Low ( 10)
#2 Var. Speed System Board Normal N/A Low ( 10)
/sbin/hplog -f | ( while read a b c d e f g h i j k l m
do
if [ "$d" == "Processor" ] ; then
echo -n "processor_zone_fan.value "
echo $j | sed 's/)//'
elif [ "$d" == "System" ] ; then
echo -n "system_board_fan.value "
echo $j | sed 's/)//'
fi
done
)

View File

@ -1,43 +0,0 @@
#!/bin/bash
if [ "$1" == "config" ] ; then
echo "graph_title HP server temperatures"
echo "graph_vlabel celsius"
echo "graph_category environment"
echo "graph_info temperatures read using hpscan"
echo "processor_zone.label Processor zone temperature"
echo "io_zone.label I/O zone temperature"
echo "processor0.label Processor 0 temperature"
echo "processor1.label Processor 1 temperature"
echo "psu_bay.label PSU bay temperature"
exit 0
fi
#ID TYPE LOCATION STATUS CURRENT THRESHOLD
#1 ADM1022 Processor Zone Normal 104F/ 40C 143F/ 62C
#2 ADM1022 CPU (1) Normal 96F/ 36C 163F/ 73C
#3 ADM1022 I/O Zone Normal 118F/ 48C 154F/ 68C
#4 ADM1022 CPU (2) Normal 127F/ 53C 163F/ 73C
#5 ADM1022 Pwr. Supply Bay Normal 95F/ 35C 127F/ 53C
CPUNUMBER=0
/sbin/hplog -t | ( while read a b c d e f g h i j k l m
do
if [ "$c" == "Processor" ] ; then
echo -n "processor_zone.value "
echo $g | sed 's/C//'
elif [ "$c" == "I/O" ] ; then
echo -n "io_zone.value "
echo $g | sed 's/C//'
elif [ "$c" == "CPU" ] ; then
echo -n "processor$CPUNUMBER.value "
echo $g | sed 's/C//'
CPUNUMBER=`expr $CPUNUMBER + 1`
elif [ "$c" == "Pwr." ] ; then
echo -n "psu_bay.value "
echo $h | sed 's/C//'
fi
done
)

142
plugins/glance/glance_size_ Executable file
View File

@ -0,0 +1,142 @@
#!/usr/bin/env python
#
# Plugin to monitor used size of a tenant in glance
#
# To monitor the used size of a tenant in glance do:
# E.g.
# ln -s /usr/share/munin/plugins/glance_size_ /etc/munin/plugins/glance_size_<tenant_uuid>
#
# Needs following minimal configuration in plugin-conf.d/glance:
# [glance_*]
# user glance
#
# To show tenant name plugin must run as root
#
# Magic markers
#%# capabilities=autoconf suggest
#%# family=auto
import sys
import os
try:
from sqlalchemy.orm import joinedload
import sqlalchemy.exc
from glance.common.cfg import CommonConfigOpts
from glance.registry.db import models
from glance.registry.db.api import get_session, configure_db
from keystone.common import utils
from keystone import config
from keystone import exception
from keystone import identity
except ImportError:
succesful_import = False
else:
succesful_import = True
def get_name_from_tenant(tenant):
try:
KEYSTONE_CONF = config.CONF(config_files=[utils.find_config('keystone.conf')])
except:
# keystone configuration can not be loaded, use id as name"
return tenant
identity_api = identity.Manager()
try:
tenant_info = identity_api.get_tenant(None, tenant)
except sqlalchemy.exc.OperationalError:
# keystone database can not be connected, use id as name"
return tenant
if not tenant_info:
return tenant
else:
return tenant_info["name"]
def load_conf():
CONF = CommonConfigOpts(project="glance", prog="glance-registry")
CONF()
# Hide missing logger warning message
sys.stderr = open(os.devnull, 'w')
configure_db(CONF)
sys.stderr = sys.__stderr__
def print_config(tenant):
if tenant == "Global":
print 'graph_title Glance used size for all tenants'
print 'graph_info This graph shows the used size in glance for all tenants'
else:
print 'graph_title Glance used size for tenant %s' % get_name_from_tenant(tenant)
print 'graph_info This graph shows the used size in glance for tenant %s' % tenant
print 'graph_vlabel Bytes'
print 'graph_args --base 1024 --lower-limit 0'
print 'graph_category glance'
print '%s.label %s' % (tenant, get_name_from_tenant(tenant))
print '%s.draw LINE2' % tenant
print '%s.info %s MBytes' % (tenant, tenant)
def request(**kwargs):
session = get_session()
try:
query = session.query(models.Image).\
options(joinedload(models.Image.properties)).\
options(joinedload(models.Image.members))
if kwargs:
query = query.filter_by(**kwargs)
images = query.all()
except exc.NoResultFound:
return []
return images
def print_suggest():
print "Global"
print "\n".join(set( image["owner"] for image in request(deleted=False) ))
def print_values(tenant):
if tenant != "Global" :
images = request(deleted=False, owner=tenant)
else:
images = request(deleted=False)
total_size = sum([ image["size"] for image in images ])
print '%s.value %s' % (tenant, total_size)
if __name__ == '__main__':
argv = sys.argv[:]
tenant = argv[0].split('glance_size_').pop() or 'Global'
if len(argv) > 1:
if argv[1] == 'config':
print_config(tenant)
elif argv[1] == 'suggest' and succesful_import:
load_conf()
print_suggest()
elif argv[1] == 'autoconf':
if not succesful_import:
print 'no (failed import glance and/or sqlachemy module)'
sys.exit(0)
try:
load_conf()
get_session()
except:
print 'no (failed to connect glance backend, check user)'
sys.exit(0)
print 'yes'
elif succesful_import:
load_conf()
print_values(tenant)

103
plugins/glance/glance_status Executable file
View File

@ -0,0 +1,103 @@
#!/usr/bin/env python
#
# Plugin to monitor used size of a tenant in glance
#
# To monitor the used size of a tenant in glance do:
# E.g.
# ln -s /usr/share/munin/plugins/glance_size_ /etc/munin/plugins/glance_size_<tenant_uuid>
#
# Needs following minimal configuration in plugin-conf.d/glance:
# [glance_*]
# user glance
#
# Magic markers
#%# capabilities=autoconf
#%# family=auto
import sys
import os
try:
from sqlalchemy.orm import exc, joinedload
from glance.common.cfg import CommonConfigOpts
from glance.registry.db import models
from glance.registry.db.api import get_session, configure_db
except ImportError:
succesful_import = False
else:
succesful_import = True
def load_conf():
CONF = CommonConfigOpts(project="glance", prog="glance-registry")
CONF()
# Hide missing logger warning message
sys.stderr = open(os.devnull, 'w')
configure_db(CONF)
sys.stderr = sys.__stderr__
possible_status = [ 'queued', 'saving', 'active', 'killed', 'deleted', 'pending_delete' ]
def print_config():
print 'graph_title Glance images status'
print 'graph_vlabel images'
print 'graph_args --lower-limit 0'
print 'graph_category glance'
print 'graph_scale no'
print 'graph_info This graph show number of images by status'
for status in possible_status:
print '%s.label %s' % (status, status)
print '%s.draw LINE2' % status
print '%s.info %s image(s)' % (status, status)
def request(**kwargs):
session = get_session()
try:
query = session.query(models.Image).\
options(joinedload(models.Image.properties)).\
options(joinedload(models.Image.members))
if kwargs:
query = query.filter_by(**kwargs)
images = query.all()
except exc.NoResultFound:
return []
return images
def print_values():
images = request()
n_image_by_status = {}
for image in images:
n_image_by_status[image["status"]] = n_image_by_status.get(image["status"], 0) + 1
for status in possible_status:
print '%s.value %s' % (status, n_image_by_status.get(status, 0))
if __name__ == '__main__':
argv = sys.argv[:]
if len(argv) > 1:
if argv[1] == 'config':
print_config()
elif argv[1] == 'autoconf':
if not succesful_import:
print 'no (failed import glance and/or sqlachemy module)'
sys.exit(0)
try:
load_conf()
get_session()
except:
print 'no (failed to connect glance backend, check user)'
sys.exit(0)
print 'yes'
elif succesful_import:
load_conf()
print_values()

View File

@ -110,7 +110,16 @@ for (my $i = 0; $i < $gpuCount; $i++)
else
{
$gpuUtil = "N/A";
$memUtil = "N/A";
($ret, my $memory) = nvmlDeviceGetMemoryInfo($handle);
if ($ret == $NVML_SUCCESS)
{
$memUtil = $memory->{"used"} / $memory->{"total"} * 100;
}
else
{
$memUtil = "N/A";
}
}
print "GPU_TEMP_$i.value $gpuTemp\n";

View File

@ -0,0 +1,106 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
haproxy_aborts_backend -Haproxy Aborts Backend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rfrail3@yahoo.es>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modify ifs variable
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Find values
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Get data
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# Reset ifs
IFS=$OIFS;
}
SVNAME='BACKEND'
LIST="$backend"
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Aborts ${SVNAME}"
echo 'graph_args --base 1000 -l 0 '
echo 'graph_vlabel Aborts'
echo 'graph_scale no'
echo 'graph_category haproxy'
echo "graph_info Aborts ${SVNAME}"
for i in ${LIST}; do
echo "cli_abrt`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Aborted by client $i"
echo "cli_abrt`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "cli_abrt`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "cli_abrt`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Data transfers aborted by the client $i"
echo "srv_abrt`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Aborted by server $i"
echo "srv_abrt`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "srv_abrt`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "srv_abrt`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Data transfers aborted by the server $i"
done
exit 0
fi
for i in ${LIST}; do
CLI=`parse_url ${i} ${SVNAME} cli_abrt`
SRV=`parse_url ${i} ${SVNAME} srv_abrt`
echo "cli_abrt`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $CLI"
echo "srv_abrt`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $SRV"
done

View File

@ -0,0 +1,93 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
haproxy_active_backend -Haproxy servers active backend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rfrail3@yahoo.es>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modify ifs variable
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Find values
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Get data
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# Reset ifs
IFS=$OIFS;
}
SVNAME='BACKEND'
LIST=$backend
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Active Servers ${SVNAME}"
echo 'graph_args --base 1000 -l 0 '
echo 'graph_vlabel Servers'
echo 'graph_scale no'
echo 'graph_category haproxy'
echo "graph_info Active Servers ${SVNAME}"
for i in ${LIST}; do
echo "act`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Active Servers $i"
echo "act`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type GAUGE"
echo "act`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "act`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Active Servers $i"
done
exit 0
fi
for i in ${LIST}; do
ACT=`parse_url ${i} ${SVNAME} act`
echo "act`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $ACT"
done

View File

@ -0,0 +1,104 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
haproxy_bytes_backend -Haproxy Bytes Backend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rfrail3@yahoo.es>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modify ifs variable
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Find values
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Get data
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# Reset ifs
IFS=$OIFS;
}
SVNAME='BACKEND'
LIST=$backend
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Bytes ${SVNAME}"
echo 'graph_args --base 1000'
echo 'graph_vlabel Bytes in (-) / out (+)'
echo 'graph_category haproxy'
echo "graph_info Bytes ${SVNAME}"
for i in ${LIST}; do
echo "down`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Bytes $i"
echo "down`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "down`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.graph no"
echo "down`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.cdef down`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`,8,*"
echo "down`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "up`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Bytes $i"
echo "up`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "up`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.negative down`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`"
echo "up`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.cdef up`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`,8,*"
echo "up`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Bytes in $i"
echo "up`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
done
exit 0
fi
for i in ${LIST}; do
BIN=`parse_url ${i} ${SVNAME} bin`
BOUT=`parse_url ${i} ${SVNAME} bout`
echo "down`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $BOUT"
echo "up`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $BIN"
done

View File

@ -0,0 +1,104 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
haproxy_bytes_frontend -Haproxy Bytes Frontend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rfrail3@yahoo.es>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modify ifs variable
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Find values
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Get data
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# Reset ifs
IFS=$OIFS;
}
SVNAME='FRONTEND'
LIST=$frontend
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Bytes ${SVNAME}"
echo 'graph_args --base 1000'
echo 'graph_vlabel Bytes in (-) / out (+)'
echo 'graph_category haproxy'
echo "graph_info Bytes ${SVNAME}"
for i in ${LIST}; do
echo "down`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Bytes $i"
echo "down`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "down`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.graph no"
echo "down`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.cdef down`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`,8,*"
echo "down`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "up`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Bytes $i"
echo "up`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "up`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.negative down`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`"
echo "up`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.cdef up`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`,8,*"
echo "up`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Bytes in $i"
echo "up`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
done
exit 0
fi
for i in ${LIST}; do
BIN=`parse_url ${i} ${SVNAME} bin`
BOUT=`parse_url ${i} ${SVNAME} bout`
echo "down`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $BOUT"
echo "up`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $BIN"
done

View File

@ -0,0 +1,105 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
haproxy_denied_backend -Haproxy Denied Backend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rfrail3@yahoo.es>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modify ifs variable
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Find values
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Get data
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# Reset ifs
IFS=$OIFS;
}
SVNAME='BACKEND'
LIST="$backend"
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Denied Requests / Responses ${SVNAME}"
echo 'graph_args --base 1000 -l 0 '
echo 'graph_vlabel Requests and Responses'
echo 'graph_scale no'
echo 'graph_category haproxy'
echo "graph_info Denied Requests / Responses ${SVNAME}"
for i in ${LIST}; do
echo "dreq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Denied Requests $i"
echo "dreq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "dreq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "dreq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Denied Requests $i"
echo "dresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Denied Responses $i"
echo "dresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "dresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "dresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Denied Responses $i"
done
exit 0
fi
for i in ${LIST}; do
DREQ=`parse_url ${i} ${SVNAME} dreq`
DRESP=`parse_url ${i} ${SVNAME} dresp`
echo "dreq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $DREQ"
echo "dresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $DRESP"
done

View File

@ -0,0 +1,105 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
haproxy_denied_frontend -Haproxy Denied Frontend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rfrail3@yahoo.es>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modify ifs variable
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Find values
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Get data
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# Reset ifs
IFS=$OIFS;
}
SVNAME='FRONTEND'
LIST="$frontend"
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Denied Requests / Responses ${SVNAME}"
echo 'graph_args --base 1000 -l 0 '
echo 'graph_vlabel Requests and Responses'
echo 'graph_scale no'
echo 'graph_category haproxy'
echo "graph_info Denied Requests / Responses ${SVNAME}"
for i in ${LIST}; do
echo "dreq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Denied Requests $i"
echo "dreq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "dreq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "dreq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Denied Requests $i"
echo "dresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Denied Responses $i"
echo "dresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "dresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "dresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Denied Responses $i"
done
exit 0
fi
for i in ${LIST}; do
DREQ=`parse_url ${i} ${SVNAME} dreq`
DRESP=`parse_url ${i} ${SVNAME} dresp`
echo "dreq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $DREQ"
echo "dresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $DRESP"
done

View File

@ -0,0 +1,113 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
haproxy_errors_backend -Haproxy Errors Backend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rfrail3@yahoo.es>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modify ifs variable
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Find values
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Get data
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# Reset ifs
IFS=$OIFS;
}
SVNAME='BACKEND'
LIST="$backend"
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Errors ${SVNAME}"
echo 'graph_args --base 1000 -l 0 '
echo 'graph_vlabel Errors'
echo 'graph_scale no'
echo 'graph_category haproxy'
echo "graph_info Errors ${SVNAME}"
for i in ${LIST}; do
#echo "ereq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Request Errors $i"
#echo "ereq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
#echo "ereq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
#echo "ereq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Request Errors $i"
echo "econ`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Connection Errors $i"
echo "econ`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "econ`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "econ`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Connection Errors $i"
echo "eresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Response Errors $i"
echo "eresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "eresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "eresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Response Errors $i"
done
exit 0
fi
for i in ${LIST}; do
#EREQ=`parse_url ${i} ${SVNAME} ereq`
ECON=`parse_url ${i} ${SVNAME} econ`
ERESP=`parse_url ${i} ${SVNAME} eresp`
#echo "ereq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $EREQ"
echo "econ`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $ECON"
echo "eresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $ERESP"
done

View File

@ -0,0 +1,113 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
haproxy_errors_frontend -Haproxy Errors Frontend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rfrail3@yahoo.es>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modify ifs variable
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Find values
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Get data
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# Reset ifs
IFS=$OIFS;
}
SVNAME='FRONTEND'
LIST="$frontend"
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Errors ${SVNAME}"
echo 'graph_args --base 1000 -l 0 '
echo 'graph_vlabel Errors'
echo 'graph_scale no'
echo 'graph_category haproxy'
echo "graph_info Errors ${SVNAME}"
for i in ${LIST}; do
echo "ereq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Request Errors $i"
echo "ereq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "ereq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "ereq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Request Errors $i"
#echo "econ`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Connection Errors $i"
#echo "econ`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
#echo "econ`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
#echo "econ`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Connection Errors $i"
#echo "eresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Response Errors $i"
#echo "eresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
#echo "eresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
#echo "eresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Response Errors $i"
done
exit 0
fi
for i in ${LIST}; do
EREQ=`parse_url ${i} ${SVNAME} ereq`
#ECON=`parse_url ${i} ${SVNAME} econ`
#ERESP=`parse_url ${i} ${SVNAME} eresp`
echo "ereq`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $EREQ"
#echo "econ`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $ECON"
#echo "eresp`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $ERESP"
done

View File

@ -0,0 +1,106 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
haproxy_queue_backend -Haproxy Queued Requests Backend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rfrail3@yahoo.es>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modify ifs variable
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Find values
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Get data
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# Reset ifs
IFS=$OIFS;
}
SVNAME='BACKEND'
LIST=$backend
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Queued Request ${SVNAME}"
echo 'graph_args --base 1000 -l 0 '
echo 'graph_vlabel Queued'
echo 'graph_scale no'
echo 'graph_category haproxy'
echo "graph_info Queue Requests ${SVNAME}"
for i in ${LIST}; do
echo "qcur`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Current queued request $i"
echo "qcur`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type GAUGE"
echo "qcur`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "qcur`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Current queued request $i"
#echo "qmax`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Max $i"
#echo "qmax`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type GAUGE"
#echo "qmax`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
#echo "qmax`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Max queued request $i"
done
exit 0
fi
for i in ${LIST}; do
QCUR=`parse_url ${i} ${SVNAME} qcur`
#QMAX=`parse_url ${i} ${SVNAME} qmax`
echo "qcur`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $QCUR"
#echo "qmax`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $QMAX"
done

View File

@ -0,0 +1,112 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
haproxy_rate_backend -Haproxy Sessions Per Second Backend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rfrail3@yahoo.es>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modify ifs variable
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Find values
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Get data
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# Reset ifs
IFS=$OIFS;
}
SVNAME='BACKEND'
LIST=$backend
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Sessions per second ${SVNAME}"
echo 'graph_args --base 1000 -l 0 '
echo 'graph_vlabel Sessions'
echo 'graph_scale no'
echo 'graph_category haproxy'
echo "graph_info Sessions per second ${SVNAME}"
for i in ${LIST}; do
echo "rate`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Rate $i"
echo "rate`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type GAUGE"
echo "rate`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "rate`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Number of sessions per second over last elapsed second $i"
echo "rate_lim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Limit $i"
echo "rate_lim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type GAUGE"
echo "rate_lim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "rate_lim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Limit on new sessions per second $i"
#echo "rate_max`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Max $i"
#echo "rate_max`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type GAUGE"
#echo "rate_max`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
#echo "rate_max`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Max number of new sessions per second $i"
done
exit 0
fi
for i in ${LIST}; do
RATE=`parse_url ${i} ${SVNAME} rate`
RATEL=`parse_url ${i} ${SVNAME} rate_lim`
#RATEM=`parse_url ${i} ${SVNAME} rate_max`
echo "rate`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $RATE"
echo "rate_lim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $RATEL"
#echo "rate_max`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $RATEM"
done

View File

@ -0,0 +1,113 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
harpoxy_rate_frontend -Haproxy Sessions Per Second Frontend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rfrail3@yahoo.es>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modify ifs variable
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Find values
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Get data
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# Reset ifs
IFS=$OIFS;
}
SVNAME='FRONTEND'
LIST=$frontend
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Sessions per sencond ${SVNAME}"
echo 'graph_args --base 1000 -l 0 '
echo 'graph_vlabel Sessions'
echo 'graph_scale no'
echo 'graph_category haproxy'
echo "graph_info Sessions per second ${SVNAME}"
for i in ${LIST}; do
echo "rate`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Rate $i"
echo "rate`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type GAUGE"
echo "rate`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "rate`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Number of sessions per second over last elapsed second $i"
echo "rate_lim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Limit $i"
echo "rate_lim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type GAUGE"
echo "rate_lim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "rate_lim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Limit on new sessions per second $i"
#echo "rate_max`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Max $i"
#echo "rate_max`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type GAUGE"
#echo "rate_max`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
#echo "rate_max`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Max number of new sessions per second $i"
done
exit 0
fi
for i in ${LIST}; do
RATE=`parse_url ${i} ${SVNAME} rate`
RATEL=`parse_url ${i} ${SVNAME} rate_lim`
#RATEM=`parse_url ${i} ${SVNAME} rate_max`
echo "rate`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $RATE"
echo "rate_lim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $RATEL"
#echo "rate_max`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $RATEM"
done

View File

@ -0,0 +1,106 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
haproxy_reqrate_frontend -Haproxy request rate frontend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rfrail3@yahoo.es>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modify ifs variable
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Find values
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Get data
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# Reset ifs
IFS=$OIFS;
}
SVNAME='FRONTEND'
LIST="$frontend"
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Request Rate ${SVNAME}"
echo 'graph_args --base 1000 -l 0 '
echo 'graph_vlabel Requests'
echo 'graph_scale no'
echo 'graph_category haproxy'
echo "graph_info Request Rate ${SVNAME}"
for i in ${LIST}; do
echo "reqrt`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Requests per second $i"
echo "reqrt`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type GAUGE"
echo "reqrt`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "reqrt`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info HTTP requests per second over last elapsed second $i"
#echo "reqrtm`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Max $i"
#echo "reqrtm`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type GAUGE"
#echo "reqrtm`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
#echo "reqrtm`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info max number of HTTP requests per second observed $i"
done
exit 0
fi
for i in ${LIST}; do
REQRT=`parse_url ${i} ${SVNAME} req_rate`
#REQRTM=`parse_url ${i} ${SVNAME} req_rate_max`
echo "reqrt`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $REQRT"
#echo "reqtm`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $REQRTM"
done

View File

@ -0,0 +1,130 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
haproxy_responses_backend -Haproxy responses backend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rikr_@hotmail.com>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modificamos la variable if, al final se vuelve a dejar igual
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Recorremos el array buscando los valores esperados
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Si coincide con el valor, sacar el dato
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# se deja la variable igual
IFS=$OIFS;
}
SVNAME='BACKEND'
LIST=$backend
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title HTTP Responses ${SVNAME}"
echo 'graph_args --base 1000 -l 0 '
echo 'graph_vlabel Responses'
echo 'graph_scale no'
echo 'graph_category haproxy'
echo "graph_info HTTP Responses ${SVNAME}"
for i in ${LIST}; do
echo "hrsp_1xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label HTTP 1xx $i"
echo "hrsp_1xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "hrsp_1xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "hrsp_1xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info HTTP responses with 1xx code $i"
echo "hrsp_2xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label HTTP 2xx $i"
echo "hrsp_2xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "hrsp_2xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "hrsp_2xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info HTTP responses with 2xx code $i"
echo "hrsp_3xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label HTTP 3xx $i"
echo "hrsp_3xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "hrsp_3xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "hrsp_3xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info HTTP responses with 3xx code $i"
echo "hrsp_4xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label HTTP 4xx $i"
echo "hrsp_4xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "hrsp_4xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "hrsp_4xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info HTTP responses with 4xx code $i"
echo "hrsp_5xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label HTTP 5xx $i"
echo "hrsp_5xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "hrsp_5xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "hrsp_5xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info HTTP responses with 5xx code $i"
echo "hrsp_other`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label HTTP other $i"
echo "hrsp_other`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "hrsp_other`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "hrsp_other`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info HTTP responses with other codes $i"
done
exit 0
fi
for i in ${LIST}; do
H1xx=`parse_url ${i} ${SVNAME} hrsp_1xx`
H2xx=`parse_url ${i} ${SVNAME} hrsp_2xx`
H3xx=`parse_url ${i} ${SVNAME} hrsp_3xx`
H4xx=`parse_url ${i} ${SVNAME} hrsp_4xx`
H5xx=`parse_url ${i} ${SVNAME} hrsp_5xx`
Hoxx=`parse_url ${i} ${SVNAME} hrsp_other`
echo "hrsp_1xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $H1xx"
echo "hrsp_2xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $H2xx"
echo "hrsp_3xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $H3xx"
echo "hrsp_4xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $H4xx"
echo "hrsp_5xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $H5xx"
echo "hrsp_other`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $Hoxx"
done

View File

@ -0,0 +1,130 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
haproxy_responses_frontend -Haproxy responses frontend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rfrail3@yahoo.es>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modify ifs variable
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Find values
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Get data
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# Reset ifs
IFS=$OIFS;
}
SVNAME='FRONTEND'
LIST=$frontend
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title HTTP Responses ${SVNAME}"
echo 'graph_args --base 1000 -l 0 '
echo 'graph_vlabel Responses'
echo 'graph_scale no'
echo 'graph_category haproxy'
echo "graph_info HTTP Responses ${SVNAME}"
for i in ${LIST}; do
echo "hrsp_1xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label HTTP 1xx $i"
echo "hrsp_1xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "hrsp_1xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "hrsp_1xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info HTTP responses with 1xx code $i"
echo "hrsp_2xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label HTTP 2xx $i"
echo "hrsp_2xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "hrsp_2xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "hrsp_2xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info HTTP responses with 2xx code $i"
echo "hrsp_3xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label HTTP 3xx $i"
echo "hrsp_3xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "hrsp_3xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "hrsp_3xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info HTTP responses with 3xx code $i"
echo "hrsp_4xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label HTTP 4xx $i"
echo "hrsp_4xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "hrsp_4xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "hrsp_4xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info HTTP responses with 4xx code $i"
echo "hrsp_5xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label HTTP 5xx $i"
echo "hrsp_5xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "hrsp_5xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "hrsp_5xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info HTTP responses with 5xx code $i"
echo "hrsp_other`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label HTTP other $i"
echo "hrsp_other`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "hrsp_other`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "hrsp_other`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info HTTP responses with other codes $i"
done
exit 0
fi
for i in ${LIST}; do
H1xx=`parse_url ${i} ${SVNAME} hrsp_1xx`
H2xx=`parse_url ${i} ${SVNAME} hrsp_2xx`
H3xx=`parse_url ${i} ${SVNAME} hrsp_3xx`
H4xx=`parse_url ${i} ${SVNAME} hrsp_4xx`
H5xx=`parse_url ${i} ${SVNAME} hrsp_5xx`
Hoxx=`parse_url ${i} ${SVNAME} hrsp_other`
echo "hrsp_1xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $H1xx"
echo "hrsp_2xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $H2xx"
echo "hrsp_3xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $H3xx"
echo "hrsp_4xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $H4xx"
echo "hrsp_5xx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $H5xx"
echo "hrsp_oxx`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $Hoxx"
done

View File

@ -0,0 +1,112 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
haproxy_sessions_backend -Haproxy Sessions Backend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rfrail3@yahoo.es>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modify ifs variable
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Find values
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Get data
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# Reset ifs
IFS=$OIFS;
}
SVNAME='BACKEND'
LIST="$backend"
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Current sessions ${SVNAME}"
echo 'graph_args --base 1000 -l 0 '
echo 'graph_vlabel Sessions'
echo 'graph_scale no'
echo 'graph_category haproxy'
echo "graph_info Current sessions ${SVNAME}"
for i in ${LIST}; do
echo "scur`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Current Sessions $i"
echo "scur`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type GAUGE"
echo "scur`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "scur`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Current Sessions $i"
#echo "smax`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Max $i"
#echo "smax`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type GAUGE"
#echo "smax`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
#echo "smax`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Max sessions $i"
echo "slim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Limit Sessions $i"
echo "slim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type GAUGE"
echo "slim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "slim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Limit Sessions $i"
done
exit 0
fi
for i in ${LIST}; do
SCUR=`parse_url ${i} ${SVNAME} scur`
#SMAX=`parse_url ${i} ${SVNAME} smax`
SLIM=`parse_url ${i} ${SVNAME} slim`
echo "scur`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $SCUR"
#echo "smax`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $SMAX"
echo "slim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $SLIM"
done

View File

@ -0,0 +1,112 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
haproxy_sessions_frontend -Haproxy Sessions Frontend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rfrail3@yahoo.es>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modify ifs variable
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Find values
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Get data
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# Reset ifs
IFS=$OIFS;
}
SVNAME='FRONTEND'
LIST="$frontend"
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Current sessions ${SVNAME}"
echo 'graph_args --base 1000 -l 0 '
echo 'graph_vlabel Sessions'
echo 'graph_scale no'
echo 'graph_category haproxy'
echo "graph_info Current sessions ${SVNAME}"
for i in ${LIST}; do
echo "scur`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Current Sessions $i"
echo "scur`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type GAUGE"
echo "scur`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "scur`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Current Sessions $i"
#echo "smax`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Max $i"
#echo "smax`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type GAUGE"
#echo "smax`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
#echo "smax`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Max Sessions $i"
echo "slim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Limit Sessions $i"
echo "slim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type GAUGE"
echo "slim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "slim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Limit Sessions $i"
done
exit 0
fi
for i in ${LIST}; do
SCUR=`parse_url ${i} ${SVNAME} scur`
#SMAX=`parse_url ${i} ${SVNAME} smax`
SLIM=`parse_url ${i} ${SVNAME} slim`
echo "scur`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $SCUR"
#echo "smax`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $SMAX"
echo "slim`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $SLIM"
done

View File

@ -0,0 +1,99 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
haproxy_sessions_total_frontend -Haproxy Sessions Total Frontend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rfrail3@yahoo.es>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modify ifs variable
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Find values
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Get data
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# Reset ifs
IFS=$OIFS;
}
SVNAME='BACKEND'
LIST=$backend
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Total sessions ${SVNAME}"
echo 'graph_args --base 1000 -l 0 '
echo 'graph_vlabel Sessions'
echo 'graph_scale no'
echo 'graph_category haproxy'
echo "graph_info Total sessions ${SVNAME}"
for i in ${LIST}; do
echo "stot`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Total Sessions $i"
echo "stot`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "stot`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "stot`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Total Sessions $i"
done
exit 0
fi
for i in ${LIST}; do
STOT=`parse_url ${i} ${SVNAME} stot`
echo "stot`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $STOT"
done

View File

@ -0,0 +1,100 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
haprxoy_sessions_total_frontend -Haproxy Sessions Total Frontend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rfrail3@yahoo.es>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modify ifs variable
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Find values
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Get data
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# Reset ifs
IFS=$OIFS;
}
SVNAME='FRONTEND'
LIST=$frontend
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Total sessions ${SVNAME}"
echo 'graph_args --base 1000 -l 0 '
echo 'graph_vlabel Sessions'
echo 'graph_scale no'
echo 'graph_category haproxy'
echo "graph_info Total sessions ${SVNAME}"
for i in ${LIST}; do
echo "stot`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Total Sessions $i"
echo "stot`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "stot`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "stot`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Total Sessions $i"
done
exit 0
fi
for i in ${LIST}; do
STOT=`parse_url ${i} ${SVNAME} stot`
echo "stot`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $STOT"
done

View File

@ -0,0 +1,106 @@
#!/bin/bash
# -*- bash -*-
: << =cut
=head1 NAME
haproxy_warnings_backend -Haproxy Warnings Backend
=head1 CONFIGURATION
[haproxy*]
user root
env.backend backend_name_1 backend_name_2 backend_name_3
env.frontend frontend_name_1 frontend_name_2 frontend_name_3
env.url http://user:passwd@IP:port/admin?stats;csv
=head1 AUTHOR
Ricardo Fraile <rfrail3@yahoo.es>
=head1 LICENSE
GPLv2
=head1 MAGICK MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
function parse_url {
# Modify ifs variable
OIFS=$IFS;
IFS=",";
PXNAME="$1"
SVNAME="$2"
VALUE="$3"
LINE1=`curl -s "$url" | head -1 | sed 's/# //'`
LINE2=`curl -s "$url" | grep "$PXNAME,$SVNAME"`
ARRAY1=($LINE1);
# Find values
for ((i=0; i<${#ARRAY1[@]}; ++i));
do
# Get data
if [[ "${ARRAY1[$i]}" == "${VALUE}" ]]; then
o=$i;
o=`expr $o + 1`
echo ${LINE2} | cut -d" " -f $o
fi
done
# Reset ifs
IFS=$OIFS;
}
SVNAME='BACKEND'
LIST="$backend"
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Warnings ${SVNAME}"
echo 'graph_args --base 1000 -l 0 '
echo 'graph_vlabel Warnings'
echo 'graph_scale no'
echo 'graph_category haproxy'
echo "graph_info Warnings ${SVNAME}"
for i in ${LIST}; do
echo "wretr`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Warning Retries $i"
echo "wretr`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "wretr`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "wretr`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Warning Retries $i"
echo "wredis`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.label Warning Redispatches $i"
echo "wredis`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.type DERIVE"
echo "wredis`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.min 0"
echo "wredis`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.info Warning Redispatches $i"
done
exit 0
fi
for i in ${LIST}; do
WRETR=`parse_url ${i} ${SVNAME} wretr`
WREDIS=`parse_url ${i} ${SVNAME} wredis`
echo "wretr`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $WRETR"
echo "wredis`echo $i | md5sum | cut -d - -f1 | sed 's/ //g'`.value $WREDIS"
done

View File

@ -1,95 +0,0 @@
#!/usr/bin/env python
from commands import getstatusoutput as gso
def safe(s):
s=s.replace("-", "_")
s=s.replace(" ", "_")
s=s.replace(".", "_")
return s
def config(data,title):
for i in data:
print "%s.label %s"%(safe(i[0]), i[0])
# check for non-critical thresholds
if i[6] != 'na':
if i[7] != 'na':
warning = "%s:%s"%(i[6],i[7])
else:
warning = "%s:"%i[6]
else:
if i[7] != 'na':
warning = "%s"%i[7]
else:
warning = ""
if warning:
print "%s.warning %s"%(safe(i[0]),warning)
# check for critical thresholds
if i[5] == 'na':
i[5] == i[4] # N/A, so see if there is a non-recoverable threshold
if i[8] == 'na':
i[8] == i[9] # N/A, so see if there is a non-recoverable threshold
if i[5] != 'na':
if i[8] != 'na':
critical = "%s:%s"%(i[5],i[8])
else:
critical = "%s:"%i[5]
else:
if i[8] != 'na':
critical = "%s"%i[8]
else:
critical = ""
if critical:
print "%s.critical %s"%(safe(i[0]),critical)
print "graph_title %s"%title
if title == "Voltages":
print "graph_args -X 0 --logarithmic -l 1 -u 15"
#print "graph_args --base 1000 --logarithmic"
else:
print "graph_args -l 0"
print "graph_vlabel %s"%i[2]
print "graph_period minute"
print "graph_category IPMI"
def get_data():
import sys
category = sys.argv[0].split("_",1)[1]
data = []
if category =="Fans":
ids = ("Fan 1 Tach", "Fan 2 Tach", "Fan 3 Tach",
"Fan 4 Tach", "Fan 5 Tach", "Fan 6 Tach",)
title = "Fan Speed"
elif category == "Temperature":
ids = ("Ambient Temp", "Memory Temp",)
title = "Temperatures"
elif category == "Voltage":
ids = ("Planar 1.5V", "Planar 1.8V",
"Planar 3.3V", "Planar 5V", "Planar 12V",
"Planar VBAT", "CPU 1 VCore", "CPU 2 VCore",)
title = "Voltages"
status, output = gso("ipmitool sensor")
for row in output.split("\n"):
items = map(str.strip,row.split("|"))
field,value,units,status,lower_nonrecoverable,lower_critical,lower_non_critical,upper_non_critical,upper_critical,upper_nonrecoverable=items
if field in ids:
if value == 'na': continue
data.append(items)
return data,title
def sample(data):
for i in data:
print "%s.value %s"%(safe(i[0]),i[1])
def main():
import sys
data,title = get_data()
if 'config' in sys.argv:
return config(data,title)
sample(data)
if __name__ == '__main__':
main()

View File

@ -1,8 +0,0 @@
.DS_Store
.classpath
.project
.fatjar
target
eclipse
old
bin

View File

@ -1,79 +0,0 @@
# jmx2munin
The [jmx2munin](http://github.com/tcurdt/jmx2munin) project exposes JMX MBean attributes to [Munin](http://munin-monitoring.org/).
Some of it's features:
* strictly complies to the plugin format
* exposes composite types like Lists, Maps, Set as useful as possible
* String values can be mapped to numbers
# How to use
This is what the Munin script will call. So you should test this first. Of course with your parameters. This example expose all Cassandra information to Munin.
java -jar jmx2munin.jar \
-url service:jmx:rmi:///jndi/rmi://localhost:8080/jmxrmi \
-query "org.apache.cassandra.*:*"
The "url" parameters specifies the JMX URL, the query selects the MBeans (and optionally also the attributes) to expose.
java -jar jmx2munin.jar \
-url service:jmx:rmi:///jndi/rmi://localhost:8080/jmxrmi \
-query "org.apache.cassandra.*:*" \
-attribute org_apache_cassandra_db_storageservice_livenodes_size
The script that does the actual interaction with munin you can find in the contrib section. It's the one you should link in the your Munin plugin directory.
:/etc/munin/plugins$ ls -la cassandra_*
lrwxrwxrwx 1 root root 37 2011-04-07 19:58 cassandra_nodes_in_cluster -> /usr/share/munin/plugins/jmx2munin.sh
In the plugin conf you point to the correct configuration
[cassandra_*]
env.query org.apache.cassandra.*:*
[cassandra_nodes_in_cluster]
env.config cassandra/nodes_in_cluster
A possible configuration could look like this
graph_title Number of Nodes in Cluster
graph_vlabel org_apache_cassandra_db_storageservice_livenodes_size
org_apache_cassandra_db_storageservice_livenodes_size.label number of nodes
The script will extract the attributes from the config and caches the JMX results to reduce the load when showing many values.
# More advanced
Sometimes it can be useful to track String values by mapping them into an enum as they really describe states. To find this possible candidates you can call:
java -jar jmx2munin.jar \
-url service:jmx:rmi:///jndi/rmi://localhost:8080/jmxrmi \
-query "org.apache.cassandra.*:*" \
list
It should output a list of possible candidates. This can now be turned into a enum configuration file:
[org.apache.cassandra.db.StorageService:OperationMode]
0 = ^Normal
1 = ^Client
2 = ^Joining
3 = ^Bootstrapping
4 = ^Leaving
5 = ^Decommissioned
6 = ^Starting drain
7 = ^Node is drained
Which you then can provide:
java -jar jmx2munin.jar \
-url service:jmx:rmi:///jndi/rmi://localhost:8080/jmxrmi \
-query "org.apache.cassandra.*:*" \
-enums /path/to/enums.cfg
Now matching values get replaced by their numerical representation. On the left needs to be a unique number on the right side is a regular expression. If a string cannot be matched according to the spec "U" for "undefined" will be returned.
# License
Licensed under the Apache License, Version 2.0 (the "License")
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0

View File

@ -1,3 +0,0 @@
graph_title Number of Nodes in Cluster
graph_vlabel org_apache_cassandra_db_storageservice_livenodes_size
org_apache_cassandra_db_storageservice_livenodes_size.label number of nodes

View File

@ -1,55 +0,0 @@
#!/bin/bash
# [cassandra_nodes_in_cluster]
# env.config cassandra/nodes_in_cluster
# env.query org.apache.cassandra.*:*
if [ -z "$MUNIN_LIBDIR" ]; then
MUNIN_LIBDIR="`dirname $(dirname "$0")`"
fi
if [ -f "$MUNIN_LIBDIR/plugins/plugin.sh" ]; then
. $MUNIN_LIBDIR/plugins/plugin.sh
fi
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
fi
if [ -z "$url" ]; then
# this is very common so make it a default
url="service:jmx:rmi:///jndi/rmi://127.0.0.1:8080/jmxrmi"
fi
if [ -z "$config" -o -z "$query" -o -z "$url" ]; then
echo "Configuration needs attributes config, query and optinally url"
exit 1
fi
JMX2MUNIN_DIR="$MUNIN_LIBDIR/plugins"
CONFIG="$JMX2MUNIN_DIR/jmx2munin.cfg/$config"
if [ "$1" = "config" ]; then
cat "$CONFIG"
exit 0
fi
JAR="$JMX2MUNIN_DIR/jmx2munin.jar"
CACHED="/tmp/jmx2munin"
if test ! -f $CACHED || test `find "$CACHED" -mmin +2`; then
java -jar "$JAR" \
-url "$url" \
-query "$query" \
$ATTRIBUTES \
> $CACHED
echo "cached.value `date +%s`" >> $CACHED
fi
ATTRIBUTES=`awk '/\.label/ { gsub(/\.label/,""); print $1 }' $CONFIG`
for ATTRIBUTE in $ATTRIBUTES; do
grep $ATTRIBUTE $CACHED
done

View File

@ -1,121 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.vafer</groupId>
<artifactId>jmx2munin</artifactId>
<name>jmx2munin</name>
<version>1.0</version>
<description>
Munin plugin to access JMX information
</description>
<url>http://github.com/tcurdt/jmx2munin</url>
<developers>
<developer>
<id>tcurdt</id>
<name>Torsten Curdt</name>
<email>tcurdt at vafer.org</email>
<timezone>+1</timezone>
</developer>
</developers>
<licenses>
<license>
<name>Apache License 2</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
</license>
</licenses>
<scm>
<connection>scm:git:git://github.com:tcurdt/jmx2munin.git</connection>
<developerConnection>scm:git:git://github.com:tcurdt/jmx2munin.git</developerConnection>
<url>http://github.com/tcurdt/jmx2munin/tree/master</url>
</scm>
<dependencies>
<dependency>
<groupId>com.beust</groupId>
<artifactId>jcommander</artifactId>
<version>1.17</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.5</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>1.5</source>
<target>1.5</target>
<encoding>UTF-8</encoding>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<forkMode>never</forkMode>
<includes>
<include>**/*TestCase.java</include>
</includes>
<excludes>
<exclude>**/Abstract*</exclude>
</excludes>
<testFailureIgnore>true</testFailureIgnore>
<skip>false</skip>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>2.1</version>
<configuration>
<attach>true</attach>
</configuration>
<executions>
<execution>
<id>create-source-jar</id>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>1.4</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<minimizeJar>false</minimizeJar>
<artifactSet>
<includes>
<include>com.beust:jcommander</include>
</includes>
</artifactSet>
<transformers>
<transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
<mainClass>org.vafer.jmx.munin.Munin</mainClass>
</transformer>
</transformers>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -1,77 +0,0 @@
package org.vafer.jmx;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.TreeMap;
import java.util.regex.Pattern;
import javax.management.ObjectName;
public final class Enums {
private TreeMap<String, LinkedHashMap<Integer, Pattern>> sections = new TreeMap<String, LinkedHashMap<Integer, Pattern>>();
public boolean load(String filePath) throws IOException {
BufferedReader input = null;
LinkedHashMap<Integer, Pattern> section = new LinkedHashMap<Integer, Pattern>();
try {
input = new BufferedReader(new InputStreamReader(new FileInputStream(filePath)));
String line;
int linenr = 0;
while((line = input.readLine()) != null) {
linenr += 1;
line = line.trim();
if (line.startsWith("#")) {
continue;
}
if (line.startsWith("[") && line.endsWith("]")) {
// new section
String id = line.substring(1, line.length() - 1);
section = new LinkedHashMap<Integer, Pattern>();
sections.put(id, section);
} else {
String[] pair = line.split("=");
if (pair.length == 2) {
Integer number = Integer.parseInt(pair[0].trim());
Pattern pattern = Pattern.compile(pair[1].trim());
if (section.put(number, pattern) != null) {
System.err.println("Line " + linenr + ": previous definitions of " + number);
}
}
}
}
} finally {
if (input != null) {
input.close();
}
}
return false;
}
public static String id(ObjectName beanName, String attributeName) {
StringBuilder sb = new StringBuilder();
sb.append(beanName.getDomain());
sb.append('.');
sb.append(beanName.getKeyProperty("type"));
sb.append(':');
sb.append(attributeName);
return sb.toString();
}
public Number resolve(String id, String value) {
LinkedHashMap<Integer, Pattern> section = sections.get(id);
if (section == null) {
return null;
}
for(Map.Entry<Integer, Pattern> entry : section.entrySet()) {
if (entry.getValue().matcher(value).matches()) {
return entry.getKey();
}
}
return null;
}
}

View File

@ -1,9 +0,0 @@
package org.vafer.jmx;
import javax.management.ObjectName;
public interface Filter {
public boolean include(ObjectName bean, String attribute);
}

View File

@ -1,26 +0,0 @@
package org.vafer.jmx;
import java.util.HashSet;
import java.util.Set;
import javax.management.ObjectName;
public final class ListOutput implements Output {
private final Set<String> seen = new HashSet<String>();
public void output(ObjectName beanName, String attributeName, Object value) {
Value.flatten(beanName, attributeName, value, new Value.Listener() {
public void value(ObjectName beanName, String attributeName, String value) {
final String id = Enums.id(beanName, attributeName);
if (!seen.contains(id)) {
System.out.println("[" + id + "]");
seen.add(id);
}
}
public void value(ObjectName beanName, String attributeName, Number value) {
}
});
}
}

View File

@ -1,10 +0,0 @@
package org.vafer.jmx;
import javax.management.ObjectName;
public final class NoFilter implements Filter {
public boolean include(ObjectName bean, String attribute) {
return true;
}
}

View File

@ -1,9 +0,0 @@
package org.vafer.jmx;
import javax.management.ObjectName;
public interface Output {
public void output(ObjectName beanName, String attributeName, Object value);
}

View File

@ -1,52 +0,0 @@
package org.vafer.jmx;
import java.io.IOException;
import java.util.Collection;
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
import javax.management.IntrospectionException;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanException;
import javax.management.MBeanInfo;
import javax.management.MBeanServerConnection;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectInstance;
import javax.management.ObjectName;
import javax.management.ReflectionException;
import javax.management.remote.JMXConnector;
import javax.management.remote.JMXConnectorFactory;
import javax.management.remote.JMXServiceURL;
public final class Query {
public void run(String url, String expression, Filter filter, Output output) throws IOException, MalformedObjectNameException, InstanceNotFoundException, ReflectionException, IntrospectionException, AttributeNotFoundException, MBeanException {
JMXConnector connector = JMXConnectorFactory.connect(new JMXServiceURL(url));
MBeanServerConnection connection = connector.getMBeanServerConnection();
final Collection<ObjectInstance> mbeans = connection.queryMBeans(new ObjectName(expression), null);
for(ObjectInstance mbean : mbeans) {
final ObjectName mbeanName = mbean.getObjectName();
final MBeanInfo mbeanInfo = connection.getMBeanInfo(mbeanName);
final MBeanAttributeInfo[] attributes = mbeanInfo.getAttributes();
for (final MBeanAttributeInfo attribute : attributes) {
if (attribute.isReadable()) {
if (filter.include(mbeanName, attribute.getName())) {
final String attributeName = attribute.getName();
try {
output.output(
mbean.getObjectName(),
attributeName,
connection.getAttribute(mbeanName, attributeName)
);
} catch(Exception e) {
// System.err.println("Failed to read " + mbeanName + "." + attributeName);
}
}
}
}
}
connector.close();
}
}

View File

@ -1,52 +0,0 @@
package org.vafer.jmx;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.management.ObjectName;
public final class Value {
public interface Listener {
public void value(ObjectName beanName, String attributeName, String value);
public void value(ObjectName beanName, String attributeName, Number value);
}
public static void flatten(ObjectName beanName, String attributeName, Object value, Listener listener) {
if (value instanceof Number) {
listener.value(beanName, attributeName, (Number) value);
} else if (value instanceof String) {
listener.value(beanName, attributeName, (String) value);
} else if (value instanceof Set) {
final Set set = (Set) value;
flatten(beanName, attributeName + ".size", set.size(), listener);
for(Object entry : set) {
flatten(beanName, attributeName + "[" + entry + "]", 1, listener);
}
} else if (value instanceof List) {
final List list = (List)value;
listener.value(beanName, attributeName + ".size", list.size());
for(int i = 0; i<list.size(); i++) {
flatten(beanName, attributeName + "[" + i + "]", list.get(i), listener);
}
} else if (value instanceof Map) {
final Map<?,?> map = (Map<?,?>) value;
listener.value(beanName, attributeName + ".size", map.size());
for(Map.Entry<?, ?> entry : map.entrySet()) {
flatten(beanName, attributeName + "[" + entry.getKey() + "]", entry.getValue(), listener);
}
} else {
// System.err.println("Failed to convert " + beanName + "." + attributeName);
}
}
}

View File

@ -1,67 +0,0 @@
package org.vafer.jmx.munin;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import org.vafer.jmx.Enums;
import org.vafer.jmx.Filter;
import org.vafer.jmx.ListOutput;
import org.vafer.jmx.NoFilter;
import org.vafer.jmx.Query;
import com.beust.jcommander.JCommander;
import com.beust.jcommander.Parameter;
public final class Munin {
@Parameter(description = "")
private List<String> args = new ArrayList<String>();
@Parameter(names = "-url", description = "jmx url", required = true)
private String url;
@Parameter(names = "-query", description = "query expression", required = true)
private String query;
@Parameter(names = "-enums", description = "file string to enum config")
private String enumsPath;
@Parameter(names = "-attribute", description = "attributes to return")
private List<String> attributes = new ArrayList<String>();
private void run() throws Exception {
final Filter filter;
if (attributes == null || attributes.isEmpty()) {
filter = new NoFilter();
} else {
filter = new MuninAttributesFilter(attributes);
}
final Enums enums = new Enums();
if (enumsPath != null) {
enums.load(enumsPath);
}
final String cmd = args.toString().toLowerCase(Locale.US);
if ("[list]".equals(cmd)) {
new Query().run(url, query, filter, new ListOutput());
} else {
new Query().run(url, query, filter, new MuninOutput(enums));
}
}
public static void main(String[] args) throws Exception {
Munin m = new Munin();
JCommander cli = new JCommander(m);
try {
cli.parse(args);
} catch(Exception e) {
cli.usage();
System.exit(1);
}
m.run();
}
}

View File

@ -1,24 +0,0 @@
package org.vafer.jmx.munin;
import java.util.HashSet;
import java.util.List;
import javax.management.ObjectName;
import org.vafer.jmx.Filter;
public final class MuninAttributesFilter implements Filter {
private final HashSet<String> attributes = new HashSet<String>();
public MuninAttributesFilter(List<String> pAttributes) {
for (String attribute : pAttributes) {
attributes.add(attribute.trim().replaceAll("_size$", ""));
}
}
public boolean include(ObjectName bean, String attribute) {
return attributes.contains(MuninOutput.attributeName(bean, attribute));
}
}

View File

@ -1,93 +0,0 @@
package org.vafer.jmx.munin;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Hashtable;
import java.util.Locale;
import javax.management.ObjectName;
import org.vafer.jmx.Enums;
import org.vafer.jmx.Output;
import org.vafer.jmx.Value;
public final class MuninOutput implements Output {
private final Enums enums;
public MuninOutput(Enums enums) {
this.enums = enums;
}
public static String attributeName(ObjectName bean, String attribute) {
StringBuilder sb = new StringBuilder();
sb.append(fieldname(beanString(bean)));
sb.append('_');
sb.append(fieldname(attribute));
return sb.toString().toLowerCase(Locale.US);
}
private static String fieldname(String s) {
return s.replaceAll("[^A-Za-z0-9]", "_");
}
private static String beanString(ObjectName beanName) {
StringBuilder sb = new StringBuilder();
sb.append(beanName.getDomain());
Hashtable<String, String> properties = beanName.getKeyPropertyList();
String keyspace = "keyspace";
if (properties.containsKey(keyspace)) {
sb.append('.');
sb.append(properties.get(keyspace));
properties.remove(keyspace);
}
String type = "type";
if (properties.containsKey(type)) {
sb.append('.');
sb.append(properties.get(type));
properties.remove(type);
}
ArrayList<String> keys = new ArrayList(properties.keySet());
Collections.sort(keys);
for(String key : keys) {
sb.append('.');
sb.append(properties.get(key));
}
return sb.toString();
// return beanName.getCanonicalName();
}
public void output(ObjectName beanName, String attributeName, Object value) {
Value.flatten(beanName, attributeName, value, new Value.Listener() {
public void value(ObjectName beanName, String attributeName, String value) {
final Number v = enums.resolve(Enums.id(beanName, attributeName), value);
if (v != null) {
value(beanName, attributeName, v);
} else {
value(beanName, attributeName, Double.NaN);
}
}
public void value(ObjectName beanName, String attributeName, Number value) {
final String v;
if (Double.isNaN(value.doubleValue())) {
v = "U";
} else {
final NumberFormat f = NumberFormat.getInstance();
f.setMaximumFractionDigits(2);
f.setGroupingUsed(false);
v = f.format(value);
}
System.out.println(attributeName(beanName, attributeName) + ".value " + v);
}
});
}
}

View File

@ -0,0 +1,23 @@
# Kamailio Munin Plugin
Munin plugins for Kamailio. It monitors:
## Number of transactions, user and contact numbers.
![kamailio_transaction](http://desmond.imageshack.us/Himg820/scaled.php?server=820&filename=kamailiotransactionsuse.png&res=landing "kamailio_transaction")
## Usage of shared memory (total, used and real used).
![kamailio_shared_memory](http://desmond.imageshack.us/Himg837/scaled.php?server=837&filename=kamailiomysqlsharedmemo.png&res=landing "kamailio_shared_memory")
## Memory usage by Kamailio, Freeswitch and RTPproxy.
![kamailio_memory](http://desmond.imageshack.us/Himg851/scaled.php?server=851&filename=kamailiomemoryweek.png&res=landing "kamailio_memory")
it requires MySQL [statistics] (http://siremis.asipto.com/install-charts-panel) table created in Kamailio database.
## Configuration
edit /etc/munin/plugin-conf.d/munin-node and add:
[kamailio*]
user root
group root
env.mysql [optional-override-of-mysqladmin-path]
env.mysqlauth -u[User] -p[Password]
env.kamailiodb [kamailio data base]

121
plugins/kamailio/kamailio_memory Executable file
View File

@ -0,0 +1,121 @@
#!/usr/bin/perl
# -*- perl -*-
=head1 NAME
Munin plugin to monitor the usage of memory on Voxtrot Sip Server (Kamailio + Freeswitch + RTPproxy).
=head1 CONFIGURATION
No configuration
=head1 AUTHOR
Copyright 2012 - Voxtrot <www.voxtrot.com>
Oussama Hammami <oussamacvoxtrot.com>
=head1 LICENSE
GPLv2
=head1 VERSION
$Id: kamailio_memory 2012-04-19 15:09 $
=head1 MAGIC MARKERS
#%# family=manual
#%# capabilities=autoconf
=cut
use strict;
my %WANTED = ( "kamailio" => "ram_total",
"rtpproxy" => "ram_rtpproxy",
"freeswitch" => "ram_freeswitch",
);
my %VALUE = ( "ram_total" => 0,
"ram_rtpproxy" => 0,
"ram_freeswitch" => 0,
);
my $arg = shift();
if ($arg eq 'config') {
print_config();
exit();
} elsif ($arg eq 'autoconf') {
unless (test_service() ) {
print "yes\n";
} else {
print "no\n";
}
exit 0;
}
for my $key (keys %WANTED) {
$VALUE{$WANTED{$key}}=get_memory($key);
}
$VALUE{"ram_total"}+=$VALUE{"ram_rtpproxy"}+$VALUE{"ram_freeswitch"};
for my $key (keys %VALUE) {
print ("$key.value $VALUE{$key}\n");
}
sub print_config {
print ("graph_title Voxtrot SIP Server Memory\n");
# Arguments to "rrdtool graph". In this case, tell it that the
# lower limit of the graph is '0', and that 1k=1000 (not 1024).
print("graph_args --base 1024 --lower-limit 0\n");
print("graph_vlabel MB\n");
print("graph_scale no\n");
print("graph_category kamailio\n");
print("graph_info The graph describes the usage of memory in Voxtrot Sip Server.\n");
print("ram_total.label total (kam+fs+rtp)\n");
print("ram_freeswitch.label freeswitch\n");
print("ram_rtpproxy.label rtpproxy\n");
print("ram_total.info Average total memory used by kamailio, freeswitch and rtpproxy for the five minutes.\n");
print("ram_freeswitch.info Average used memory by freeswitch for the five minutes.\n");
print("ram_rtpproxy.info Average real used memory by rtpproxy for the five minutes.\n");
print("graph_order ram_total ram_freeswitch ram_rtpproxy\n");
print("ram_total.type GAUGE\n");
print("ram_freeswitch.type GAUGE\n");
print("ram_rtpproxy.type GAUGE\n");
print("ram_total.draw AREA\n");
print("ram_freeswitch.draw AREA\n");
print("ram_rtpproxy.draw LINE1\n");
print("ram_total.colour 6699FF\n");
print("ram_freeswitch.colour FF6633\n");
print("ram_rtpproxy.colour 993399\n");
# Ensure min values (useful when using 'DERIVE' as 'type').
print("ram_total.min 0\n");
print("ram_freeswitch.min 0\n");
print("ram_rtpproxy.min 0\n");
# Divide the got value by 1048576 to get MB.
print("ram_total.cdef ram_total,1048576,/\n");
print("ram_freeswitch.cdef ram_freeswitch,1048576,/\n");
print("ram_rtpproxy.cdef ram_rtpproxy,1048576,/\n");
}
sub test_service {
print "yes\n";
exit 0;
}
#########################
# function Get Memory
sub get_memory {
my $proc=shift;
my $i = 0;
my @cmd = `ps auwx | grep $proc | grep -v grep | grep -v kamailio_memory`;
foreach (@cmd) {
my @return = split(/ +/, $_);
$i += @return[5]*1024;
}
return $i;
}

View File

@ -0,0 +1,194 @@
#!/usr/bin/perl
# -*- perl -*-
=head1 NAME
Munin plugin to monitor the usage of shared memory in Kamailio using 'statistics' table.
=head1 APPLICABLE SYSTEMS
It requires MySQL 'statistics' table created in Kamailio database.
http://siremis.asipto.com/install-charts-panel/
=head1 CONFIGURATION
[kamailio*]
user root
group root
env.mysql <optional-override-of-mysqladmin-path>
env.mysqlauth -u<User> -p<Password>
env.kamailiodb <kamailio data base>
It is most usual that root must run the mysql command.
=head2 Proxy config
use rtimer module to run periodically a route. In that route you insert the values in database.
SIP Proxy configuration file:
loadmodule "rtimer.so"
loadmodule "sqlops.so"
loadmodule "cfgutils.so"
...
modparam("rtimer", "timer", "name=tst;interval=300;mode=1;")
modparam("rtimer", "exec", "timer=tst;route=8")
modparam("sqlops","sqlcon","ca=>mysql://openser:openserrw@localhost/openser")
...
route[8] {
sql_query("ca",
"insert into statistics (time_stamp,random,shm_used_size,shm_real_used_size,
shm_max_used_size,shm_free_used_size,ul_users,ul_contacts) values ($Ts,
$RANDOM,$stat(used_size),$stat(real_used_size),$stat(max_used_size),
$stat(free_size),$stat(location-users),$stat(location-contacts))","ra");
}
Note: second parameter of sql_query(...) is a single line. Next version, based on SIP-Router.org project will support string parameters broken in multiple lines.
=head2 Database
You have to create a new table in Kamailio (OpenSER) database:
CREATE TABLE `statistics` (
`id` int(10) unsigned NOT NULL auto_increment,
`time_stamp` int(10) unsigned NOT NULL default '0',
`random` int(10) unsigned NOT NULL default '0',
`shm_used_size` int(10) unsigned NOT NULL default '0',
`shm_real_used_size` int(10) unsigned NOT NULL default '0',
`shm_max_used_size` int(10) unsigned NOT NULL default '0',
`shm_free_used_size` int(10) unsigned NOT NULL default '0',
`ul_users` int(10) unsigned NOT NULL default '0',
`ul_contacts` int(10) unsigned NOT NULL default '0',
PRIMARY KEY (`id`)
) ENGINE=MyISAM;
Now all is ready for Kamailio (OpenSER), you can restart it.
=head1 BUGS
None known
=head1 AUTHOR
Copyright 2012 - Voxtrot <www.voxtrot.com>
Oussama Hammami <oussamacvoxtrot.com>
=head1 LICENSE
GPLv2
=head1 VERSION
$Id: kamailio_mysql_shared_memory 2012-04-19 11:24 $
=head1 MAGIC MARKERS
#%# family=manual
#%# capabilities=autoconf
=cut
use strict;
my $MYSQLADMIN = $ENV{mysql} || "mysql";
my $COMMAND = "$MYSQLADMIN $ENV{mysqlauth} $ENV{kamailiodb} -e 'select * from statistics order by id desc limit 1\\G'";
my %WANTED = ( "shm_free_used_size" => "shmem_total",
"shm_real_used_size" => "shmem_real_used",
"shm_used_size" => "shmem_used",
);
my %VALUE = ( "shmem_total" => 0,
"shmem_real_used" => 0,
"shmem_used" => 0,
);
my $arg = shift();
if ($arg eq 'config') {
print_config();
exit();
} elsif ($arg eq 'autoconf') {
unless (test_service() ) {
print "yes\n";
} else {
print "no\n";
}
exit 0;
}
open(SERVICE, "$COMMAND |")
or die("Could not execute '$COMMAND': $!");
while (<SERVICE>) {
my ($k, $v) = (m/(\w+).*?(\d+(?:\.\d+)?)/);
next unless ($k);
if (exists $WANTED{$k} ) {
$VALUE{$WANTED{$k}}=$v;
}
}
close(SERVICE);
$VALUE{"shmem_total"}+=$VALUE{"shmem_real_used"};
for my $key (keys %VALUE) {
print ("$key.value $VALUE{$key}\n");
}
sub print_config {
print ("graph_title Kamailio Shared Memory\n");
# Arguments to "rrdtool graph". In this case, tell it that the
# lower limit of the graph is '0', and that 1k=1000 (not 1024).
print("graph_args --base 1024 --lower-limit 0\n");
print("graph_vlabel MB\n");
print("graph_scale no\n");
print("graph_category kamailio\n");
print("graph_info The graph describes the usage of shared memory.\n");
print("shmem_total.label total\n");
print("shmem_used.label used\n");
print("shmem_real_used.label real used\n");
print("shmem_total.info Average total shared memory used for the five minutes.\n");
print("shmem_used.info Average used shared memory for the five minutes.\n");
print("shmem_real_used.info Average real used shared memory for the five minutes.\n");
print("graph_order shmem_total shmem_used shmem_real_used\n");
print("shmem_total.type GAUGE\n");
print("shmem_used.type GAUGE\n");
print("shmem_real_used.type GAUGE\n");
print("shmem_total.draw AREA\n");
print("shmem_used.draw AREA\n");
print("shmem_real_used.draw LINE1\n");
print("shmem_total.colour 11DB58\n");
print("shmem_used.colour F7CB03\n");
print("shmem_real_used.colour 990000\n");
# Ensure min values (useful when using 'DERIVE' as 'type').
print("shmem_total.min 0\n");
print("shmem_used.min 0\n");
print("shmem_real_used.min 0\n");
# Divide the got value by 1048576 to get MB.
print("shmem_total.cdef shmem_total,1048576,/\n");
print("shmem_used.cdef shmem_used,1048576,/\n");
print("shmem_real_used.cdef shmem_real_used,1048576,/\n");
}
sub test_service {
system ("$MYSQLADMIN --version >/dev/null 2>/dev/null");
if ($? == 0)
{
system ("$COMMAND >/dev/null 2>/dev/null");
if ($? == 0)
{
print "yes\n";
}
else
{
print "no (could not connect to mysql)\n";
}
}
else
{
print "no (mysqladmin not found)\n";
}
exit 0;
}

View File

@ -0,0 +1,195 @@
#!/usr/bin/perl
# -*- perl -*-
=head1 NAME
Munin plugin to monitor the number of users and transactions in Kamailio using 'statistics' table.
=head1 APPLICABLE SYSTEMS
It requires MySQL 'statistics' table created in Kamailio database.
http://siremis.asipto.com/install-charts-panel/
=head1 CONFIGURATION
[kamailio*]
user root
group root
env.mysql <optional-override-of-mysqladmin-path>
env.mysqlauth -u<User> -p<Password>
env.kamailiodb <kamailio data base>
It is most usual that root must run the mysql command.
=head2 Proxy config
use rtimer module to run periodically a route. In that route you insert the values in database.
SIP Proxy configuration file:
loadmodule "rtimer.so"
loadmodule "sqlops.so"
loadmodule "cfgutils.so"
...
modparam("rtimer", "timer", "name=tst;interval=300;mode=1;")
modparam("rtimer", "exec", "timer=tst;route=8")
modparam("sqlops","sqlcon","ca=>mysql://openser:openserrw@localhost/openser")
...
route[8] {
sql_query("ca",
"insert into statistics (time_stamp,random,shm_used_size,shm_real_used_size,
shm_max_used_size,shm_free_used_size,ul_users,ul_contacts) values ($Ts,
$RANDOM,$stat(used_size),$stat(real_used_size),$stat(max_used_size),
$stat(free_size),$stat(location-users),$stat(location-contacts))","ra");
}
Note: second parameter of sql_query(...) is a single line. Next version, based on SIP-Router.org project will support string parameters broken in multiple lines.
=head2 Database
You have to create a new table in Kamailio (OpenSER) database:
CREATE TABLE `statistics` (
`id` int(10) unsigned NOT NULL auto_increment,
`time_stamp` int(10) unsigned NOT NULL default '0',
`random` int(10) unsigned NOT NULL default '0',
`shm_used_size` int(10) unsigned NOT NULL default '0',
`shm_real_used_size` int(10) unsigned NOT NULL default '0',
`shm_max_used_size` int(10) unsigned NOT NULL default '0',
`shm_free_used_size` int(10) unsigned NOT NULL default '0',
`ul_users` int(10) unsigned NOT NULL default '0',
`ul_contacts` int(10) unsigned NOT NULL default '0',
PRIMARY KEY (`id`)
) ENGINE=MyISAM;
Now all is ready for Kamailio (OpenSER), you can restart it.
=head1 BUGS
None known
=head1 AUTHOR
Copyright 2012 - Voxtrot <www.voxtrot.com>
Oussama Hammami <oussamacvoxtrot.com>
=head1 LICENSE
GPLv2
=head1 VERSION
$Id: kamailio_transactions_users 2012-04-19 16:13 $
=head1 MAGIC MARKERS
#%# family=manual
#%# capabilities=autoconf
=cut
use strict;
my $MYSQL = $ENV{mysql} || "mysql";
my $COMMAND = "$MYSQL $ENV{mysqlauth} $ENV{kamailiodb} -e 'select * from statistics order by id desc limit 1\\G'";
my %WANTED = ( "ul_users" => "users",
"ul_contact" => "contacts",
"tm_active" => "transactions",
);
my %VALUE = ( "users" => 0,
"contacts" => 0,
"transactions" => 0,
);
my $arg = shift();
if ($arg eq 'config') {
print_config();
exit();
} elsif ($arg eq 'autoconf') {
unless (test_service() ) {
print "yes\n";
} else {
print "no\n";
}
exit 0;
}
open(SERVICE, "$COMMAND |")
or die("Could not execute '$COMMAND': $!");
while (<SERVICE>) {
my ($k, $v) = (m/(\w+).*?(\d+(?:\.\d+)?)/);
next unless ($k);
if (exists $WANTED{$k} ) {
$VALUE{$WANTED{$k}}=$v;
}
}
close(SERVICE);
for my $key (keys %VALUE) {
print ("$key.value $VALUE{$key}\n");
}
sub print_config {
print ("graph_title Kamailio transactions and location\n");
# Arguments to "rrdtool graph". In this case, tell it that the
# lower limit of the graph is '0', and that 1k=1000 (not 1024).
print("graph_args --base 1000 --lower-limit 0\n");
print("graph_vlabel user/transaction\n");
print("graph_scale no\n");
print("graph_category kamailio\n");
print("graph_info The graph describes the number of users/transaction on kamailio.\n");
print("users.label users\n");
print("contacts.label contacts\n");
print("transactions.label transactions\n");
print("users.info Average sip users for the five minutes.\n");
print("contacts.info Average sip contacts for the five minutes.\n");
print("transactions.info Average sip transactions for the five minutes.\n");
print("graph_order transactions users contacts\n");
print("users.type GAUGE\n");
print("contacts.type GAUGE\n");
print("transactions.type GAUGE\n");
print("users.draw LINE1\n");
print("contacts.draw LINE1\n");
print("transactions.draw AREA\n");
print("users.colour 00CCC9\n");
print("contacts.colour 8498A0\n");
print("transactions.colour E6D300\n");
# Ensure min values (useful when using 'DERIVE' as 'type').
print("users.min 0\n");
print("contacts.min 0\n");
print("transactions.min 0\n");
}
sub test_service {
system ("$MYSQL --version >/dev/null 2>/dev/null");
if ($? == 0)
{
system ("$COMMAND >/dev/null 2>/dev/null");
if ($? == 0)
{
print "yes\n";
}
else
{
print "no (could not connect to mysql)\n";
}
}
else
{
print "no (mysql not found)\n";
}
exit 0;
}

106
plugins/keystone/keystone_stats Executable file
View File

@ -0,0 +1,106 @@
#!/usr/bin/env python
#
# Plugin to monitor status of Keystone
#
# Needs following minimal configuration in plugin-conf.d/keystone:
# [keystone_*]
# user keystone
#
# Magic markers
#%# capabilities=autoconf
#%# family=auto
import sys
import traceback
try:
from keystone.common import utils
from keystone import config
from keystone import exception
from keystone import identity
except ImportError:
succesful_import = False
else:
succesful_import = True
stats = ['users', 'tenants']
def print_config():
global states
print 'graph_title Keystone Stats'
print 'graph_vlabel count'
print 'graph_args --base 1000 --lower-limit 0'
print 'graph_category keystone'
print 'graph_scale no'
print 'graph_info This graph shows stats about keystone: ' + (', ').join(stats)
for field in stats:
print '%s_enabled.label enabled %s' % (field, field)
print '%s_enabled.draw LINE2' % field
print '%s_enabled.info %s enabled' % (field, field)
print '%s_total.label total %s' % (field, field)
print '%s_total.draw LINE2' % field
print '%s_total.info %s total' % (field, field)
def get_status():
enabled = {}
total = {}
for k in stats:
enabled[k] = 0
total[k] = 0
identity_api = identity.Manager()
for user in identity_api.list_users(None):
total['users'] += 1
if user['enabled']:
enabled['users'] += 1
# Ldap and pam driver don't support get_all_tenants()
# kvs and sql implement get_tenants() instead of get_all_tenants()
# Whoo: None of backend implements the correct function
tenants = []
for api_func in [ 'get_all_tenants', 'get_tenants']:
try:
tenants = getattr(identity_api, api_func)(None)
except exception.NotImplemented, NotImplementedError:
pass
for tenant in tenants:
total['tenants'] += 1
if tenant['enabled']:
enabled['tenants'] += 1
return {'enabled': enabled, 'total': total}
def print_values():
stats = get_status()
for state in stats.keys():
for (field, value) in stats[state].iteritems():
print "%s_%s.value %s" % (field, state, value)
def load_conf():
config.CONF(config_files=[utils.find_config('keystone.conf')])
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1] == "config":
print_config()
elif sys.argv[1] == "autoconf":
if not succesful_import:
print 'no (failed import keystone module)'
sys.exit(0)
try:
load_conf()
identity.Manager()
except:
print 'no (failed to connect keystone backend: %s'%traceback.format_exc()
sys.exit(0)
print 'yes'
elif succesful_import:
load_conf()
print_values()

View File

@ -1,9 +1,9 @@
#!/usr/bin/perl
# -*- perl -*-
#
#
# Copyright 2009 by the Regents of the University of Minnesota
# Written by Munir Nassar <nassarmu@msi.umn.edu>
#
# Rewrite contribution by TSUCHIYA Masatoshi <tsuchiya@namazu.org>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
@ -19,131 +19,129 @@
#
# The Minnesota Supercomputing Institute http://www.msi.umn.edu sponsored
# the development of this software.
#
#
# Requirements:
# - lmstat
#
# Note:
# - You must provide the daemon name as it is listed in the flexlm license
# if you want it displayed differently use the LMDISPLAYNAME variable
#
#
# Parameters supported:
# - config
# - autoconf
#
#
# Configuration variables
# - LMFEATURES: The individual features of each vendor daemon to graph
# - LMFEATURES: The individual features of each vendor daemon to graph.
# If no features are given, all features
# reported by vendor daemon are treated to graph.
# - LMDISPLAYNAME: use the LMDISPLAYNAME instead of the daemon name when
# generating graph names
# - LMGRAPHISSUED: If set generate a graph of the number of licenses issued for
# each feature.
# - LMSTAT: The path to the lmstat binary
# generating graph names
# - LMGRAPHISSUED: If set generate a graph of the number of licenses issued for
# each feature.
# - LMSTAT: The path to the lmstat binary
# - LMLICFILE: The path to the FlexLM License File
# - LMLOGARITHMIC If set then graph use a logarithmic scale
#
# $Log$
# Revision 1.00 20090807 nassarmu
# Initial public release.
#
# Revision 1.10 20120625 nassarmu@msi.umn.edu
# incorporate the rewrite by TSUCHIYA Masatoshi <tsuchiya@namazu.org>
#
# Magic markers:
#%# family=licensing
#%# capabilities=autoconf
use Class::Struct;
use English qw/ $PROGRAM_NAME /;
use strict;
use warnings;
use Munin::Plugin;
# What daemon are we going to graph? if none specified exit.
$0 =~ /flexlm_(.+)*$/;
my $daemon = $1;
exit 2 unless defined $daemon;
# LMFEATURES should be provided by plugin-conf.d space delimited
if ( ! $ENV{'LMFEATURES'} ) {
print "You must provide a list of FlexLM features to monitor via the LMFEATURES variable.\n";
exit 1
}
$PROGRAM_NAME =~ /flexlm_(.+)*$/;
our $DAEMON = $1;
exit 2 unless defined $DAEMON;
our $munincommand;
# This section is for some optional values, the defaults may work for you
# if not then i recommend setting these option via plugin-conf.d
# This would also allow you to theoretically support multiple flexlmds
# This would also allow you to theoretically support multiple flexlmds
# via different license files.
our $lmstat;
our $lmlicfile;
our $lmdisplayname;
if ( $ENV{'LMSTAT'} ) {
$lmstat = $ENV{'LMSTAT'};
} else {
$lmstat = "/opt/local/flexlm/bin/lmstat";
}
if ( $ENV{'LMLICFILE'} ) {
$lmlicfile = $ENV{'LMLICFILE'};
} else {
$lmlicfile = "/opt/local/flexlm/license/license.dat";
}
if ( $ENV{'LMDISPLAYNAME'} ) {
$lmdisplayname = $ENV{'LMDISPLAYNAME'};
} else {
$lmdisplayname = $daemon;
our $LMSTAT = $ENV{'LMSTAT'} || '/opt/local/flexlm/bin/lmstat';
our $LMLICFILE = $ENV{'LMLICFILE'} || '/opt/local/flexlm/license/license.dat';;
&struct( feature => { name => '$', cleanname => '$', max => '$', used => '$' } );
sub lmstat {
my @feature;
open( my $ph, sprintf('%s -c %s -S %s|', $LMSTAT, $LMLICFILE, $DAEMON) ) or exit 2;
while( <$ph> ){
if( my( $name ) = m/\AUsers of ([^:]+):/ ){
my $x = feature->new( name => $name, max => 0, used => 0 );
$name =~ s/^[^A-Za-z_]+/_/;
$name =~ s/[^A-Za-z0-9_]/_/g;
$x->cleanname( $name );
m/Total of (\d+) licenses? issued/ and $x->max( $1 );
m/Total of (\d+) licenses? in use/ and $x->used( $1 );
push( @feature, $x );
}
elsif( m/\A\s+(\d+) RESERVATIONs? for / ){
$feature[-1]->used( $feature[-1]->used - $1 );
}
}
if( $ENV{'LMFEATURES'} ){
my %table;
for( split( /\s+/, $ENV{'LMFEATURES'} ) ){
$table{$_}++;
}
grep( $table{$_->name}, @feature );
} else {
@feature;
}
}
# Parse LMFEATURES
my @features = split(/\s+/, $ENV{'LMFEATURES'});
# try and recommend autoconf (this will most likely result in a yes)
if ( $ARGV[0] and $ARGV[0] eq "autoconf" ) {
if ( scalar @features >= 1 ) {
print "yes\n";
exit 0;
}
else {
print "no\n";
exit 1;
}
if ( $ARGV[0] ) {
$munincommand = $ARGV[0];
}
else {
$munincommand = 'none';
}
# print out a config screen when asked.
if ( $ARGV[0] and $ARGV[0] eq "config" ) {
print "graph_title FlexLM License usage for $lmdisplayname\n";
if ( $ENV{'LMLOGARITHMIC'} ) {
print "graph_args --base 1000 --vertical-label licenses --lower-limit 0.01 --logarithmic\n";
}
else {
print "graph_args --base 1000 --vertical-label licenses -l 0\n";
}
print "graph_category licensing\n";
print "graph_period minute\n";
foreach my $feature (@features) {
my $clean_feature = clean_fieldname($feature);
print "$clean_feature".".label $feature\n";
print "$clean_feature".".draw LINE2\n";
print "$clean_feature".".info The number of $feature licenses checked out\n";
if ( $ENV{'LMGRAPHISSUED'} ) {
print "$clean_feature"."max.label $feature max\n";
print "$clean_feature"."max.draw LINE3\n";
print "$clean_feature"."max.info The total number of $feature licenses available\n";
}
}
exit 0
if( $munincommand eq 'autoconf' ){
if( &lmstat > 0 ){
print "yes\n";
} else {
print "no\n";
}
}
my @results = `$lmstat -c $lmlicfile -S $daemon`;
# pull the info from lmstat and print the results
foreach my $feature (@features) {
my @results = grep(/Users of $feature/, @results);
foreach my $result (@results) {
if ($result =~ m/Users of $feature\:/ ) {
chomp ($result);
my (@fields) = split( m/\s+/, $result);
my $clean_feature = clean_fieldname($feature);
print "$clean_feature".".value $fields[10]\n";
if ( $ENV{'LMGRAPHISSUED'} ) {
print "$clean_feature"."max.value $fields[5]\n";
}
}
elsif( $munincommand eq 'config' ){
printf "graph_title FlexLM License usage for %s\n", $ENV{'LMDISPLAYNAME'} || $DAEMON;
if( $ENV{'LMLOGARITHMIC'} ){
print "graph_args --base 1000 --vertical-label licenses --lower-limit 0.01 --logarithmic\n";
} else {
print "graph_args --base 1000 --vertical-label licenses -l 0\n";
}
print "graph_category licensing\n";
print "graph_period minute\n";
for my $x ( &lmstat ){
printf "%s.label %s\n", $x->cleanname, $x->name;
printf "%s.draw LINE2\n", $x->cleanname;
printf "%s.info The number of %s licenses checked out\n", $x->cleanname, $x->name;
if( $ENV{'LMGRAPHISSUED'} ){
printf "%smax.label %s max\n", $x->cleanname, $x->name;
printf "%smax.draw LINE3\n", $x->cleanname;
printf "%smax.info The total number of %s licenses available\n", $x->cleanname, $x->name;
}
}
}
else {
for my $x ( &lmstat ){
printf "%s.value %d\n", $x->cleanname, $x->used;
if( $ENV{'LMGRAPHISSUED'} ){
printf "%smax.value %d\n", $x->cleanname, $x->max;
}
}
}
exit 0;

127
plugins/mail/imapproxy_multi Executable file
View File

@ -0,0 +1,127 @@
#!/usr/bin/env python
"""=cut
=head1 NAME
imapproxy - Munin multigraph plugin to monitor imapproxy using pimpstat
=head1 CONFIGURATION
This plugin should require no addition configuration.
=head1 MAGIC MARKERS
#%# family=auto
#%# capabilities=autoconf
=head1 Author
Niall Donegan <github@nialldonegan.me>
=head1 LICENSE
GPLv2
=cut"""
import sys
import os
import re
from subprocess import Popen,PIPE
def print_autoconf():
which = Popen("which pimpstat", shell=True, stdout=PIPE)
which.communicate()
if not bool(which.returncode):
print "yes"
else:
print "no (pimpstat not found)"
sys.exit(0)
def print_config():
print "multigraph imapproxy_cache"
print "graph_title Cache Statistics For ImapProxy"
print "graph_args -l 0 --base 1000"
print "graph_total total"
print "graph_vlabel Cache Connections / ${graph_period}"
print "graph_category imapproxy"
print "cache_hits.draw AREA"
print "cache_hits.type DERIVE"
print "cache_hits.label Cache Hits"
print "cache_hits.min 0"
print "cache_misses.draw STACK"
print "cache_misses.type DERIVE"
print "cache_misses.label Cache Misses"
print "cache_misses.min 0"
print
print "multigraph imapproxy_connections"
print "graph_title Connection Statistics For ImapProxy"
print "graph_args -l 0 --base 1000"
print "graph_total total"
print "graph_vlabel Connections / ${graph_period}"
print "graph_category imapproxy"
print "connections_reused.draw AREA"
print "connections_reused.type DERIVE"
print "connections_reused.label Reused Connections"
print "connections_reused.min 0"
print "connections_created.draw STACK"
print "connections_created.type DERIVE"
print "connections_created.label Created Connections"
print "connections_created.min 0"
sys.exit(0)
def print_fetch():
cache_hits = 0
cache_misses = 0
connections_created = 0
connections_reused = 0
connections = Popen(
"pimpstat -c | egrep '(Total (Reused|Created)|Cache (Hits|Misses))'",
shell=True,
stdout=PIPE
)
for line in connections.stdout:
if re.search(r'Hits', line):
cache_hits = line.split()[0]
if re.search(r'Misses', line):
cache_misses = line.split()[0]
if re.search(r'Created', line):
connections_created = line.split()[0]
if re.search(r'Reused', line):
connections_reused = line.split()[0]
print "multigraph imapproxy_cache"
print "cache_hits.value %s" % cache_hits
print "cache_misses.value %s" % cache_misses
print
print "multigraph imapproxy_connections"
print "connections_created.value %s" % connections_created
print "connections_reused.value %s" % connections_reused
sys.exit(0)
def main():
if len(sys.argv) > 1:
command = sys.argv[1]
else:
command = "fetch"
if command not in ["autoconf","config","fetch"]:
print >> sys.stderr, "Command %s not known, please use either autoconf or suggest" % command
sys.exit(1)
if command == "autoconf":
print_autoconf()
elif command == "config":
print_config()
else:
print_fetch()
if __name__ == '__main__':
main()
# vim:syntax=python

View File

@ -1,2 +0,0 @@
Check http://aouyar.github.com/PyMunin/
to get the most recent versionof the PyMunin Multi graph Munin Plugins and documentation.

61
plugins/mongodb/mongo_lag Executable file
View File

@ -0,0 +1,61 @@
#!/usr/bin/env python
"""
MongoDB Replication Lag
~~~~~~~~~~~~~~~~~~~~~~~
Connects to a single mongo instance and retrieve
replication lag for all connected members.
munin-node.conf:
[mongodb_lag]
env.host 127.0.0.1
env.port 27017
:author: Stefan Andersen <stefan@stefanandersen.dk>
:license: The Beer Ware License (Revision 42)
<stefan@stefanandersen.dk> wrote this file. As long
as you retain this notice you can do whatever you want
with this stuff. If we meet some day, and you think
this stuff is worth it, you can buy me a beer in return.
"""
import os
import sys
import pymongo
def _get_members():
host = os.environ.get('host', '127.0.0.1')
port = os.environ.get('port', 27017)
conn = pymongo.Connection(host)
repl_status = conn.admin.command("replSetGetStatus")
members = {}
for member in repl_status['members']:
name = member['name'].split('.')[0]
members[name] = {'state': member['state'], 'optimeDate': member['optimeDate']}
return members
def run():
members = _get_members();
for member in members:
if members[member]['state'] == 1:
primary_optime = members[member]['optimeDate']
for member in members:
lag = (primary_optime - members[member]['optimeDate']).seconds
print "{}.value {}".format(member, lag)
def config():
print """graph_title MongoDB replication lag
graph_args --base 1000
graph_vlabel Replication lag (seconds)
graph_category MongoDB
"""
for member in _get_members():
print "{0}.label {0}".format(member)
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "config":
config()
else:
run()

View File

@ -1,64 +0,0 @@
#!/bin/bash
#
# Munin plugin to monitor free space in MySQL's InnoDB tablespace.
# Mostly useful if you use InnoDB on a block device, or if you for
# some reason don't want to do autoextend on the last file.
#
# 2007-03-18 Stig Sandbeck Mathisen <ssm@fnord.no>
#
# Configuration parameters for /etc/munin/plugin-conf.d/mysql_innodb,
# if you need to override the defaults below:
#
# [mysql_innodb]
# env.mysqlopts - Options to pass to mysql (host, username, password)
# env.warning - Generate a warning if free space goes below this level
# env.critical - Generate a critical if free space goes below this level
#
# For security reasons, this plugin uses its own schema with a simple,
# empty table using the InnoDB engine.
#
# You need to run this to get this plugin to work:
# mysql> CREATE DATABASE munin_innodb;
# mysql> USE munin_innodb
# mysql> CREATE TABLE something (anything int) ENGINE=InnoDB;
## Tunable parameters with defaults
MYSQL="${mysql:-/usr/bin/mysql}"
MYSQLOPTS="${mysqlopts:---user=munin --password=munin --host=localhost}"
WARNING=${warning:-2147483648} # 2GB
CRITICAL=${critical:-1073741824} # 1GB
## No user serviceable parts below
if [ "$1" = "config" ]; then
echo 'graph_title MySQL InnoDB free tablespace'
echo 'graph_args --base 1024'
echo 'graph_vlabel Bytes'
echo 'graph_category mysql'
echo 'graph_info Amount of free bytes in the InnoDB tablespace'
echo 'free.label Bytes free'
echo 'free.type GAUGE'
echo 'free.min 0'
echo 'free.warning' $WARNING:
echo 'free.critical' $CRITICAL:
exit 0
fi
# Get freespace from mysql
freespace=$($MYSQL $MYSQLOPTS --batch --skip-column-names --execute \
"SELECT table_comment FROM tables WHERE TABLE_SCHEMA = 'munin_innodb'" \
information_schema);
retval=$?
# Sanity checks
if (( retval > 0 )); then
echo "Error: mysql command returned status $retval" 1>&2
exit -1
fi
if [ -z "$freespace" ]; then
echo "Error: mysql command returned no output" 1>&2
exit -1
fi
# Return freespace
echo $freespace | awk '/InnoDB free:/ {print "free.value", $3 * 1024}'

View File

@ -171,9 +171,9 @@ sub getDBList {
my @dbs;
foreach my $f (glob("/var/lib/mysql/*")) {
if (-d $f) {
$f =~ s/\@002d/-/g;
$f =~ s!.*/!!;
@dbs[$#dbs+1]=$f };
}
return @dbs;
}

View File

@ -143,7 +143,7 @@ $graphs{services} = {
$graphs{svcchkdetail} = {
config => {
args => '--lower-limit 0',
vlabel => 'Total # of Service Checks',
vlabel => 'Total Number of Service Checks',
category => 'details',
title => 'Detailed Service Info',
info => 'Detailed Service Check Information',
@ -227,7 +227,7 @@ $graphs{hosts} = {
$graphs{hostchkdetail} = {
config => {
args => '--lower-limit 0',
vlabel => 'Total # of Host Checks',
vlabel => 'Total Number of Host Checks',
category => 'details',
title => 'Detailed Host Info',
info => 'Detailed Host Check Information',
@ -294,22 +294,22 @@ $graphs{hostchkext} = {
$graphs{checks} = {
config => {
args => '--lower-limit 0',
vlabel => 'Total # of Checks',
vlabel => 'Total Number of Checks',
category => 'nagios',
title => 'Totals',
info => 'Total Number of Service and Host Checks',
},
keys => [ 'NUMSERVICES', 'NUMHOSTS' ],
datasrc => [
{ name => 'NUMSERVICES', label => '# of Services', min => '0', type => 'GAUGE', info => 'total number of services.', draw => 'LINE2' },
{ name => 'NUMHOSTS', label => '# of Hosts', min => '0', type => 'GAUGE', info => 'total number of hosts.', draw => 'LINE2' },
{ name => 'NUMSERVICES', label => 'Number of Services', min => '0', type => 'GAUGE', info => 'total number of services.', draw => 'LINE2' },
{ name => 'NUMHOSTS', label => 'Number of Hosts', min => '0', type => 'GAUGE', info => 'total number of hosts.', draw => 'LINE2' },
],
};
# multi-graph for number of host checks in x mins ( sub graph of checks graph )
$graphs{hostchkactcount} = {
config => {
args => '--lower-limit 0',
vlabel => '# Host Checks',
vlabel => 'Number Host Checks',
category => 'active',
title => 'Host Checks',
info => 'Total Number of Active Host Checks',
@ -327,7 +327,7 @@ $graphs{hostchkactcount} = {
$graphs{hostchkpsvcount} = {
config => {
args => '--lower-limit 0',
vlabel => '# Host Checks',
vlabel => 'Number Host Checks',
category => 'passive',
title => 'Host Checks',
info => 'Total Number of Passive Host Checks',
@ -345,7 +345,7 @@ $graphs{hostchkpsvcount} = {
$graphs{svcchkactcount} = {
config => {
args => '--lower-limit 0',
vlabel => '# of Service Checks',
vlabel => 'Number of Service Checks',
category => 'active',
title => 'Service Checks',
info => 'Total Number of Active Service Checks',
@ -363,7 +363,7 @@ $graphs{svcchkactcount} = {
$graphs{svcchkpsvcount} = {
config => {
args => '--lower-limit 0',
vlabel => '# of Service Checks',
vlabel => 'Number of Service Checks',
category => 'passive',
title => 'Service Checks',
info => 'Total Number of Passive Service Checks',
@ -381,7 +381,7 @@ $graphs{svcchkpsvcount} = {
$graphs{extcmdcount} = {
config => {
args => '--lower-limit 0',
vlabel => '# of Ext Command Slots',
vlabel => 'Number of Ext Command Slots',
category => 'externalcmds',
title => 'External Commands',
info => 'External Command Buffer Slot Information',

View File

@ -1,67 +0,0 @@
#!/usr/bin/env python
"""Thomas R. N. Jansson (tjansson@tjansson.dk)
16-MAY-2010
"""
# The SNMP traps for the NetApp filer can be found in
# /net/netappfiler/vol0/etc/mib/traps.dat if the filer is
# NFS automounted mounted on server.
# Example: the SNMP id for cpuBusyTimePerCent is
# snmp.1.3.6.1.4.1.789.1.2.1.3.0
# and retrival of this value is done by
# snmpget -v 1 -c public netappfiler 1.3.6.1.4.1.789.1.2.1.3.0
#
# Requires snmpget and assumes public community.
import commands
import sys
import time
# Provided a servername and a snmpid it returns the value stripped of bogus information.
def snmpget(iservername,isnmpid):
runcmd = 'snmpget -v 1 -c public ' + iservername + ' ' + isnmpid
output = commands.getoutput(runcmd)
return output.split()[3]
# Calculates the bps by asking twice divided per second.
def calcbps(iservername,isnmpid):
val_first = int(snmpget(iservername,isnmpid))
time.sleep(1)
val_second = int(snmpget(iservername,isnmpid))
return str(val_second-val_first)
# The interface number corresponds to vif1 on my netapp
iface = '8'
ifEntryDescr = '1.3.6.1.2.1.2.2.1.2.'+iface
ifEntrySpeed = '1.3.6.1.2.1.2.2.1.5.'+iface
ifEntryStatus = '1.3.6.1.2.1.2.2.1.8.'+iface
ifEntryInOctets = '1.3.6.1.2.1.2.2.1.10.'+iface
ifEntryOutOctets = '1.3.6.1.2.1.2.2.1.16.'+iface
servername = sys.argv[0].split('_')[2]
ifacename = snmpget(servername,ifEntryDescr)
if len(sys.argv) == 2 and sys.argv[1] == "config":
print 'graph_title Network usage on '+servername+' inteface '+ifacename
print 'graph_order recv send'
print 'graph_args --base 1000'
print 'graph_vlabel bits in (-) / out (+) per \${graph_period}'
print 'graph_category netapp'
print 'graph_info This graph shows traffic for the '+ifacename+' network interface.'
print 'recv.label recv'
print 'recv.graph no'
print 'recv.cdef recv,8,*'
print 'recv.max 2000000000'
print 'recv.min 0'
print 'send.info Bits sent/received by the '+ifacename+' interface.'
print 'send.label bps'
print 'send.negative recv'
print 'send.cdef send,8,*'
print 'send.max 2000000000'
print 'send.min 0'
sys.exit(0)
# Gathers info from the servers and gathers data
print 'send.value '+calcbps(servername,ifEntryOutOctets)
print 'recv.value '+str(int(calcbps(servername,ifEntryInOctets))*-1)

232
plugins/network/bird Executable file
View File

@ -0,0 +1,232 @@
#!/usr/bin/perl
use IO::Socket::UNIX;
use Munin::Plugin;
use strict;
use warnings;
use v5.10;
=head1 NAME
bird - Munin multigraph plugin to monitor BIRD routing daemon activity
=head1 APPLICABLE SYSTEMS
Every system with running bird
=head1 CONFIGURATION
The plugin must run with a user or group that could connect to bird
control socket.
This configuration snipplet is an example with the defaults:
[bird]
user root
protocols BGP
socket /var/run/bird.ctl
=head1 USAGE
Link this plugin to /etc/munin/plugins/ and restart the munin-node.
=head1 MAGIC MARKERS
#%# family=auto
#%# capabilities=autoconf
=head1 BUGS
Not known
=head1 AUTHOR
Luben Karavelov (karavelov at mail.bg)
=head1 LICENSE
Same as perl
=cut
need_multigraph();
my $protocols = [ split(/ /, $ENV{'protocols'} || 'BGP') ];
my $socket = $ENV{'socket'} || '/var/run/bird.ctl';
sub get_stats {
state $stats;
return $stats if defined $stats;
my $bird_ctl = IO::Socket::UNIX->new(
Type => SOCK_STREAM,
Peer => $socket
) or die $!;
my ($protocol,$name);
while (<$bird_ctl>) {
given($_) {
when (/1002-(\w+)\s+(\w+)\s+.*/) {
($name, $protocol) = ($1,$2);
next unless $protocol ~~ $protocols;
$stats->{$name}->{protocol} = $protocol;
}
when (/^0001 /) {
print $bird_ctl "show protocols all\n";
next;
}
when (/^0000 /) {
last;
}
when (/^1002- /) {
print;
}
when (/^1006-\s+Description:\s+(.+)$/){
next unless $protocol ~~ $protocols;
$stats->{$name}->{title} = $1;
}
when (/^\s+Routes:\s+(\d+)\s+imported,\s+(\d+)\s+exported,\s+(\d+)\s+preferred$/){
next unless $protocol ~~ $protocols;
$stats->{$name}->{imported} = $1;
$stats->{$name}->{exported} = $2;
$stats->{$name}->{preferred} = $3;
}
# received rejected filtered ignored accepted
when (/^\s+(Import|Export)\s(updates|withdraws):\s+(\d+|-+)\s+(\d+|-+)\s+(\d+|-+)\s+(\d+|-+)\s+(\d+|-+)$/){
next unless $protocol ~~ $protocols;
$stats->{$name}->{ lc("$1_$2_received") } = $3;
$stats->{$name}->{ lc("$1_$2_rejected") } = $4;
$stats->{$name}->{ lc("$1_$2_filtered") } = $5;
$stats->{$name}->{ lc("$1_$2_ignored" ) } = $6;
$stats->{$name}->{ lc("$1_$2_accepted") } = $7;
}
when (/^$/) {
undef $protocol;
undef $name;
}
}
}
$bird_ctl->close;
return $stats;
}
sub autoconf {
if (-S $socket) {
say 'yes';
exit 0;
} else {
say 'no';
exit 1;
}
}
sub config {
my $stats = get_stats;
while ( my ($name,$proto) = each %$stats) {
print <<HEREDOC;
multigraph ${name}_routes
graph_title $proto->{title} routes
graph_args --base 1000
graph_vlabel routes
graph_category bird
exported.label Exported routes
exported.type GAUGE
exported.info Exported routes
exported.min 0
exported.draw LINE1
imported.label Imported routes
imported.type GAUGE
imported.info Impored routes
imported.min 0
imported.draw LINE1
preferred.label Preferred routes
preferred.type GAUGE
preferred.info Preferred routes
preferred.min 0
preferred.draw LINE1
multigraph ${name}_activity
graph_title $proto->{title} activity
graph_args --base 1000
graph_vlabel routes per second
graph_category bird
import_updates_received.label Import updates received
import_updates_received.type DERIVE
import_updates_received.draw LINE1
import_updates_rejected.label Import updates rejected
import_updates_rejected.type DERIVE
import_updates_rejected.draw LINE1
import_updates_filtered.label Import updates filtered
import_updates_filtered.type DERIVE
import_updates_filtered.draw LINE1
import_updates_ignored.label Import updates ignored
import_updates_ignored.type DERIVE
import_updates_ignored.draw LINE1
import_updates_accepted.label Import updates accepted
import_updates_accepted.type DERIVE
import_updates_accepted.draw LINE1
import_withdraws_received.label Import withdraws_received
import_withdraws_received.type DERIVE
import_withdraws_received.draw LINE1
import_withdraws_rejected.label Import withdraws rejected
import_withdraws_rejected.type DERIVE
import_withdraws_rejected.draw LINE1
import_withdraws_ignored.label Import withdraws ignored
import_withdraws_ignored.type DERIVE
import_withdraws_ignored.draw LINE1
import_withdraws_accepted.label Import withdraws accepted
import_withdraws_accepted.type DERIVE
import_withdraws_accepted.draw LINE1
export_updates_received.label Export updates received
export_updates_received.type DERIVE
export_updates_received.draw LINE1
export_updates_rejected.label Export updates rejected
export_updates_rejected.type DERIVE
export_updates_rejected.draw LINE1
export_updates_filtered.label Export updates filtered
export_updates_filtered.type DERIVE
export_updates_filtered.draw LINE1
export_updates_accepted.label Export updates accepted
export_updates_accepted.type DERIVE
export_updates_accepted.draw LINE1
export_withdraws_received.draw LINE1
export_withdraws_received.label Export withdraws received
export_withdraws_received.type DERIVE
export_withdraws_accepted.label Export withdraws accepted
export_withdraws_accepted.type DERIVE
export_withdraws_accepted.draw LINE1
HEREDOC
}
}
sub fetch {
my $stats = get_stats;
while ( my ($name,$proto) = each %$stats) {
print <<HEREDOC;
multigraph ${name}_routes
exported.value $proto->{exported}
imported.value $proto->{imported}
preferred.value $proto->{preferred}
multigraph ${name}_activity
import_updates_received.value $proto->{import_updates_received}
import_updates_rejected.value $proto->{import_updates_rejected}
import_updates_filtered.value $proto->{import_updates_filtered}
import_updates_ignored.value $proto->{import_updates_ignored}
import_updates_accepted.value $proto->{import_updates_accepted}
import_withdraws_received.value $proto->{import_withdraws_received}
import_withdraws_rejected.value $proto->{import_withdraws_rejected}
import_withdraws_ignored.value $proto->{import_withdraws_ignored}
import_withdraws_accepted.value $proto->{import_withdraws_accepted}
export_updates_received.value $proto->{export_updates_received}
export_updates_rejected.value $proto->{export_updates_rejected}
export_updates_filtered.value $proto->{export_updates_filtered}
export_updates_accepted.value $proto->{export_updates_accepted}
export_withdraws_received.value $proto->{export_withdraws_received}
export_withdraws_accepted.value $proto->{export_withdraws_accepted}
HEREDOC
}
}
given ($ARGV[0]) {
when ('autoconf') { autoconf }
when ('config') { config }
default { fetch }
}

View File

@ -1,106 +0,0 @@
#!/usr/bin/perl -w
#
# Plugin to monitor BGP table summary statistics on a cisco router.
#
# Original Author: Peter Holzleitner
#
# Revision 1.1 2010/10/14 19:19
#
# Configuration variables:
#
# iosuser - username (default "")
# iospass - password (default "")
#
# Parameters:
#
# config (required)
#
# Magic markers (optional - only used by munin-config and some
# installation scripts):
#%# family=auto
use Net::Telnet::Cisco;
use Sys::Syslog;
if ($0 =~ /^(?:|.*\/)cisco_bgp_([^_]+)$/) {
$host = $1;
}
($^O eq "linux" || $^O eq "openbsd") && Sys::Syslog::setlogsock('unix');
openlog('munin.bgp', 'cons,pid', 'daemon');
my @BGP_nbr;
my @BGP_pfx;
my $tot_pfx;
my $iosuser = $ENV{iosuser} || "";
my $iospass = $ENV{iospass} || "";
&fetch_bgpstats($host, $iosuser, $iospass);
if ($ARGV[0] and $ARGV[0] eq "config") {
print "host_name $host\n";
print "graph_args --base 1024 -l 0 --vertical-label Prefixes\n";
print "graph_title BGP Neighbour Statistics\n";
print "graph_category network\n";
print "graph_info This graph shows the number of BGP prefixes received by neighbour.\n";
my($n, $i); $n = scalar @BGP_nbr; $i = 0;
while($n--) {
my $neigh = $BGP_nbr[$i++];
print "n$i.label $neigh\n";
}
# print "total.label Total\n";
# print "total.info Total number of prefixes in the BGP table\n";
} else {
my($n, $i); $n = scalar @BGP_nbr; $i = 0;
while($n--) {
my $pfx = $BGP_pfx[$i++];
print "n$i.value $pfx\n";
}
# print "total.value $tot_pfx\n";
}
sub fetch_bgpstats
{
my $hostname = shift;
my $username = shift;
my $password = shift;
my $session = Net::Telnet::Cisco->new(Host => $host);
$session->login($username, $password);
$session->cmd('terminal length 200');
$session->cmd('terminal width 200');
my @output = $session->cmd('show ip bgp summary');
# example output of router
# ------------------------
# [...]
# Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd
# 11.111.11.111 4 98765 12403694 509571 308911893 0 0 1d23h 329193
# 122.122.122.122 4 1234 13242856 383827 308911879 0 0 00:08:22 330761
foreach(@output) {
chomp; s/\r//g;
$tot_pfx = $1 if /^BGP activity (\d+)\/(\d+) prefixes/;
syslog('debug', "$hostname: $_\n");
next unless /^(\d+\.\d+\.\d+\.\d+)\s+\d+\s+(\d+)\s+\d+\s+\d+\s+\d+\s+\d+\s+\d+\s+[0-9a-z:]+\s+(\d+)/;
my ($neigh, $as, $pfx) = ($1, $2, $3);
syslog('debug', "$neigh (AS $as)");
push @BGP_nbr, "$neigh (AS $as)";
push @BGP_pfx, $pfx;
}
}
# vim:syntax=perl:ts=8

Some files were not shown because too many files have changed in this diff Show More