mirror of
https://github.com/munin-monitoring/contrib.git
synced 2018-11-08 00:59:34 +01:00
in ceph-osd-info:
Break up some functions into more manageble steps. If json from ceph is invalid, handle it correctly (i.e. dont return anything) Handle larger chunks of data from ceph (100k instead of 10k per osd).
This commit is contained in:
parent
aedea208a5
commit
0d5391f903
@ -95,10 +95,21 @@ def read_osd(filename):
|
||||
s=socket.socket(socket.AF_UNIX,socket.SOCK_STREAM)
|
||||
s.connect(filename)
|
||||
s.send("{\"prefix\": \"perf dump\"}\0")
|
||||
return json.loads(s.recv(10240)[4:])
|
||||
result=s.recv(102400)
|
||||
result=result[4:]
|
||||
try:
|
||||
return json.loads(result)
|
||||
except:
|
||||
print >> sys.stderr, "Result from %s: %s" % (filename,result)
|
||||
return None
|
||||
|
||||
def osd_list():
|
||||
return dict([(osd.split(".")[1],read_osd(osd)) for osd in glob.glob("/var/run/ceph/ceph-osd.*.asok")])
|
||||
result={}
|
||||
for osd in glob.glob("/var/run/ceph/ceph-osd.*.asok"):
|
||||
data=read_osd(osd)
|
||||
if data:
|
||||
result[osd.split(".")[1]]=data
|
||||
return result
|
||||
|
||||
def collapse_one(inputdict,newkeyformat="%s_%s"):
|
||||
"""map inputdict["a"]["b"]=val to outdict["a_b"]=val"""
|
||||
@ -116,7 +127,7 @@ def sortlist(listtosort):
|
||||
|
||||
# get and tidy osd_list, get derived keys.
|
||||
data=osd_list()
|
||||
osds=list(data.keys())
|
||||
osds=[key for key in data.keys() if data[key]!=None]
|
||||
osds.sort()
|
||||
for key in osds:
|
||||
data[key]=collapse_one(data[key])
|
||||
|
Loading…
Reference in New Issue
Block a user