Create package ffmap, add wiki input, remove old code

This commit is contained in:
Jan-Philipp Litza 2014-07-08 14:20:47 +02:00
commit e54e7467fc
21 changed files with 272 additions and 282 deletions

42
ffmap/__init__.py Normal file
View file

@ -0,0 +1,42 @@
import importlib
from ffmap.nodedb import NodeDB
def run(inputs, outputs):
"""Fill the database with given inputs and give it to given outputs.
Arguments:
inputs -- list of Input instances (with a compatible get_data(nodedb) method)
outputs -- list of Output instances (with a compatible output(nodedb) method)
"""
db = NodeDB()
for input_ in inputs:
input_.get_data(db)
for output in outputs:
output.output(db)
def run_names(inputs, outputs):
"""Fill the database with inputs and give it to outputs, each given
by names.
In contrast to run(inputs, outputs), this method expects only the
names of the modules to use, not instances thereof.
Arguments:
inputs -- list of dicts, each dict having the keys "name" with the
name of the input to use (directory name in inputs/), and
the key "options" with a dict of input-dependent options.
outputs -- list of dicts, see inputs.
"""
input_instances = []
output_instances = []
for input_ in inputs:
module = importlib.import_module(".inputs." + input_["name"], "ffmap")
input_instances.append(module.Input(**input_["options"]))
for output in outputs:
module = importlib.import_module(".outputs." + output["name"], "ffmap")
output_instances.append(module.Output(**output["options"]))
run(input_instances, output_instances)

0
ffmap/inputs/__init__.py Normal file
View file

18
ffmap/inputs/alfred.py Normal file
View file

@ -0,0 +1,18 @@
import subprocess
import json
class Input:
def __init__(self,request_data_type = 158):
self.request_data_type = request_data_type
def get_data(self, nodedb):
"""Add data from alfred to the supplied nodedb"""
output = subprocess.check_output([
"alfred-json",
"-r", str(self.request_data_type),
"-f", "json",
])
alfred_data = json.loads(output.decode("utf-8"))
for mac, node in alfred_data.items():
nodedb.add_or_update([mac], node)

100
ffmap/inputs/batadv.py Normal file
View file

@ -0,0 +1,100 @@
import subprocess
import json
class Input:
"""Fill the NodeDB with links from batadv-vis.
The links are added as lists containing the neighboring nodes, not
only their identifiers! Mind this when exporting the database, as
it probably leads to recursion.
"""
def __init__(self, mesh_interface="bat0"):
self.mesh_interface = mesh_interface
@staticmethod
def _is_similar_mac(a, b):
"""Determine if two MAC addresses are similar."""
if a == b:
return True
# Split the address into bytes
try:
mac_a = list(int(i, 16) for i in a.split(":"))
mac_b = list(int(i, 16) for i in b.split(":"))
except ValueError:
return False
# Second and third byte musn't differ
if mac_a[1] != mac_b[1] or mac_a[2] != mac_b[2]:
return False
# First byte must only differ in bit 2
if mac_a[0] | 2 != mac_b[0] | 2:
return False
# Count differing bytes after the third
c = [x for x in zip(mac_a[3:], mac_b[3:]) if x[0] != x[1]]
# No more than two additional bytes must differ
if len(c) > 2:
return False
# If no more bytes differ, they are very similar
if len(c) == 0:
return True
# If the sum of absolute differences isn't greater than 2, they
# are pretty similar
delta = sum(abs(i[0] - i[1]) for i in c)
return delta < 2
def get_data(self, nodedb):
"""Add data from batadv-vis to the supplied nodedb"""
output = subprocess.check_output([
"batadv-vis",
"-i", str(self.mesh_interface),
"-f", "jsondoc",
])
data = json.loads(output.decode("utf-8"))
# First pass
for node in data["vis"]:
# Determine possible other MAC addresses of this node by
# comparing all its client's MAC addresses to its primary
# MAC address. If they are similar, it probably is another
# address of the node itself! If it isn't, it is a real
# client.
node['aliases'] = [node["primary"]]
if 'secondary' in node:
node['aliases'].extend(node['secondary'])
real_clients = []
for mac in node["clients"]:
if self._is_similar_mac(mac, node["primary"]):
node['aliases'].append(mac)
else:
real_clients.append(mac)
node['clients'] = real_clients
# Add nodes and aliases without any information at first.
# This way, we can later link the objects themselves.
nodedb.add_or_update(node['aliases'])
# Second pass
for node in data["vis"]:
# We only need the primary address now, all aliases are
# already present in the database. Furthermore, we can be
# sure that all neighbors are in the database as well. If
# a neighbor isn't added already, we simply ignore it.
nodedb.add_or_update(
[node["primary"]],
{
"clients": node["clients"],
"neighbors": [
{
"metric": neighbor['metric'],
"neighbor": nodedb[neighbor['neighbor']],
} for neighbor in node["neighbors"]
if neighbor['neighbor'] in nodedb
]
}
)

71
ffmap/inputs/wiki.py Executable file
View file

@ -0,0 +1,71 @@
import json
import argparse
from itertools import zip_longest
from urllib.request import urlopen
from bs4 import BeautifulSoup
class Input:
def __init__(self, url="http://luebeck.freifunk.net/wiki/Knoten"):
self.url = url
def fetch_wikitable(self):
f = urlopen(self.url)
soup = BeautifulSoup(f)
table = soup.find("table")
rows = table.find_all("tr")
headers = []
data = []
def maybe_strip(x):
if isinstance(x.string, str):
return x.string.strip()
else:
return ""
for row in rows:
tds = list([maybe_strip(x) for x in row.find_all("td")])
ths = list([maybe_strip(x) for x in row.find_all("th")])
if any(tds):
data.append(tds)
if any(ths):
headers = ths
return [dict(zip(headers, d)) for d in data]
def get_data(self, nodedb):
nodes = self.fetch_wikitable()
for node in nodes:
if "MAC" not in node or not node["MAC"]:
# without MAC, we cannot merge this data with others, so
# we might as well ignore it
continue
newnode = {
"network": {
"mac": node.get("MAC").lower(),
},
"location": {
"latitude": float(node.get("GPS", " ").split(" ")[0]),
"longitude": float(node.get("GPS", " ").split(" ")[1]),
"description": node.get("Ort"),
} if " " in node.get("GPS", "") else None,
"hostname": node.get("Knotenname"),
"hardware": {
"model": node["Router"],
} if node.get("Router") else None,
"software": {
"firmware": {
"base": "LFF",
"release": node.get("LFF Version"),
},
},
"owner": {
"contact": node["Betreiber"],
} if node.get("Betreiber") else None,
}
# remove keys with None as value
newnode = {k: v for k,v in newnode.items() if v is not None}
nodedb.add_or_update([newnode["network"]["mac"]], newnode)

89
ffmap/node.py Normal file
View file

@ -0,0 +1,89 @@
from collections import defaultdict
class NoneDict:
"""Act like None but return a NoneDict for every item request.
This is similar to the behaviour of collections.defaultdict in that
even previously inexistent keys can be accessed, but nothing is
stored permanently in this class.
"""
def __repr__(self):
return 'NoneDict()'
def __bool__(self):
return False
def __getitem__(self, k):
return NoneDict()
def __json__(self):
return None
def __float__(self):
return float('NaN')
def __iter__(self):
# empty generator
return
yield
def __setitem__(self, key, value):
raise RuntimeError("NoneDict is readonly")
class Node(defaultdict):
_id = None
def __init__(self, id_=None):
self._id = id_
super().__init__(NoneDict)
def __repr__(self):
return "Node(%s)" % self.id
@property
def id(self):
return self._id
def __hash__(self):
"""Generate hash from the node's id.
WARNING: Obviously this hash doesn't cover all of the node's
data, but we need nodes to be hashable in order to eliminate
duplicates in the NodeDB.
At least the id cannot change after initialization...
"""
return hash(self.id)
def deep_update(self, other):
"""Update the dictionary like dict.update() but recursively."""
def dmerge(a, b):
for k, v in b.items():
if isinstance(v, dict) and isinstance(a.get(k), dict):
dmerge(a[k], v)
else:
a[k] = v
dmerge(self, other)
@property
def vpn_neighbors(self):
try:
vpn_neighbors = []
for neighbor in self['neighbors']:
if neighbor['neighbor']['vpn']:
vpn_neighbors.append(neighbor)
return vpn_neighbors
except TypeError:
return []
def export(self):
"""Generate a serializable dict of the node.
In particular, this replaces any references to other nodes by
their id to prevent circular references.
"""
ret = dict(self)
if "neighbors" in self:
ret["neighbors"] = []
for neighbor in self["neighbors"]:
new_neighbor = {}
for key, val in neighbor.items():
if isinstance(val, Node):
new_neighbor[key] = val.id
else:
new_neighbor[key] = val
ret["neighbors"].append(new_neighbor)
return ret

60
ffmap/nodedb.py Normal file
View file

@ -0,0 +1,60 @@
from .node import Node
class AmbiguityError(Exception):
"""Indicate the ambiguity of identifiers.
This exception is raised if there is more than one match for a set
of identifiers.
Attributes:
identifiers -- set of ambiguous identifiers
"""
identifiers = []
def __init__(self, identifiers):
self.identifiers = identifiers
def __str__(self):
return "Ambiguous identifiers: %s" % ", ".join(self.identifiers)
class NodeDB(dict):
def add_or_update(self, ids, other=None):
"""Add or update a node in the database.
Searches for an already existing node and updates it, or adds a new
one if no existing one is found. Raises an AmbiguityException if
more than one different nodes are found matching the criteria.
Arguments:
ids -- list of possible identifiers (probably MAC addresses) of the
node
other -- dict of values to update in an existing node or add to
the new one. Defaults to None, in which case no values
are added or updated, only the aliases of the
(possibly freshly created) node are updated.
"""
# Find existing node, if any
node = None
node_id = None
for id_ in ids:
if id_ == node_id:
continue
if id_ in self:
if node is not None:
raise AmbiguityError([node_id, id_])
node = self[id_]
node_id = id_
# If no node was found, create a new one
if node is None:
node = Node(ids[0])
# Update the node with the given properties using its own update method.
if other is not None:
node.deep_update(other)
# Add new aliases if any
for id_ in ids:
self[id_] = node

View file

@ -0,0 +1 @@

77
ffmap/outputs/d3json.py Normal file
View file

@ -0,0 +1,77 @@
import json
from datetime import datetime
__all__ = ["Exporter"]
class CustomJSONEncoder(json.JSONEncoder):
"""
JSON encoder that uses an object's __json__() method to convert it
to something JSON-compatible.
"""
def default(self, obj):
try:
return obj.__json__()
except AttributeError:
pass
return super().default(obj)
class Output:
def __init__(self, filepath="nodes.json"):
self.filepath = filepath
@staticmethod
def generate(nodedb):
indexes = {}
nodes = []
count = 0
for node in set(nodedb.values()):
nodes.append(node.export())
indexes[node.id] = count
count += 1
links = []
for node in set(nodedb.values()):
if "neighbors" in node:
links.extend(
{
"source": indexes[node.id],
"target": indexes[neighbor["neighbor"].id],
"quality": neighbor["metric"],
"type": "vpn" if neighbor["neighbor"]["vpn"] else None,
"id": "-".join((node.id, neighbor["neighbor"].id)),
} for neighbor in node["neighbors"]
)
if "clients" in node:
for client in node["clients"]:
if not client in indexes:
nodes.append({
"id": client,
})
indexes[client] = count
count += 1
links.append({
"source": indexes[node.id],
"target": indexes[client],
"quality": "TT",
"type": "client",
"id": "-".join((node.id, client)),
})
return {
"nodes": nodes,
"links": links,
"meta": {
"timestamp": datetime.utcnow()
.replace(microsecond=0)
.isoformat()
}
}
def output(self, nodedb):
with open(self.filepath, "w") as nodes_json:
json.dump(
self.generate(nodedb),
nodes_json,
cls=CustomJSONEncoder
)

30
ffmap/outputs/rrd.py Normal file
View file

@ -0,0 +1,30 @@
import os
from ffmap.rrd.rrds import NodeRRD, GlobalRRD
class Output:
def __init__(self, directory="nodedb"):
self.directory = directory
try:
os.mkdir(self.directory)
except OSError:
pass
def output(self, nodedb):
nodes = set(nodedb.values())
clients = 0
nodecount = 0
for node in nodes:
clients += len(node.get("clients", []))
nodecount += 1
NodeRRD(
os.path.join(
self.directory,
str(node.id).replace(':', '') + '.rrd'
),
node
).update()
GlobalRRD(os.path.join(self.directory, "nodes.rrd")).update(
nodecount,
clients
)

336
ffmap/rrd/__init__.py Normal file
View file

@ -0,0 +1,336 @@
import subprocess
import re
import io
import os
from tempfile import TemporaryFile
from operator import xor, eq
from functools import reduce
from itertools import starmap
import math
class RRDIncompatibleException(Exception):
"""
Is raised when an RRD doesn't have the desired definition and cannot be
upgraded to it.
"""
pass
class RRDOutdatedException(Exception):
"""
Is raised when an RRD doesn't have the desired definition, but can be
upgraded to it.
"""
pass
if not hasattr(__builtins__, "FileNotFoundError"):
class FileNotFoundError(Exception):
pass
class RRD:
"""
An RRD is a Round Robin Database, a database which forgets old data and
aggregates multiple records into new ones.
It contains multiple Data Sources (DS) which can be thought of as columns,
and Round Robin Archives (RRA) which can be thought of as tables with the
DS as columns and time-dependant rows.
"""
# rra[2].cdp_prep[0].value = 1,8583033333e+03
_info_regex = re.compile("""
(?P<section>[a-z_]+)
\[ (?P<key>[a-zA-Z0-9_]+) \]
\.
|
(?P<name>[a-z_]+)
\s*=\s*
"? (?P<value>.*?) "?
$""", re.X)
_cached_info = None
def _exec_rrdtool(self, cmd, *args, **kwargs):
pargs = ["rrdtool", cmd, self.filename]
for k,v in kwargs.items():
pargs.extend(["--" + k, str(v)])
pargs.extend(args)
subprocess.check_output(pargs)
def __init__(self, filename):
self.filename = filename
def ensureSanity(self, ds_list, rra_list, **kwargs):
"""
Create or upgrade the RRD file if necessary to contain all DS in
ds_list. If it needs to be created, the RRAs in rra_list and any kwargs
will be used for creation. Note that RRAs and options of an existing
database are NOT modified!
"""
try:
self.checkSanity(ds_list)
except FileNotFoundError:
self.create(ds_list, rra_list, **kwargs)
except RRDOutdatedException:
self.upgrade(ds_list)
def checkSanity(self, ds_list=()):
"""
Check if the RRD file exists and contains (at least) the DS listed in
ds_list.
"""
if not os.path.exists(self.filename):
raise FileNotFoundError(self.filename)
info = self.info()
if set(ds_list) - set(info['ds'].values()) != set():
for ds in ds_list:
if ds.name in info['ds'] and ds.type != info['ds'][ds.name].type:
raise RRDIncompatibleException("%s is %s but should be %s" % (ds.name, ds.type, info['ds'][ds.name].type))
else:
raise RRDOutdatedException()
def upgrade(self, dss):
"""
Upgrade the DS definitions (!) of this RRD.
(To update its values, use update())
The list dss contains DSS objects to be updated or added. The
parameters of a DS can be changed, but not its type. New DS are always
added at the end in the order of their appearance in the list.
This is done internally via an rrdtool dump -> rrdtool restore and
modifying the dump on the fly.
"""
info = self.info()
new_ds = list(info['ds'].values())
new_ds.sort(key=lambda ds: ds.index)
for ds in dss:
if ds.name in info['ds']:
old_ds = info['ds'][ds.name]
if info['ds'][ds.name].type != ds.type:
raise RuntimeError('Cannot convert existing DS "%s" from type "%s" to "%s"' %
(ds.name, old_ds.type, ds.type))
ds.index = old_ds.index
new_ds[ds.index] = ds
else:
ds.index = len(new_ds)
new_ds.append(ds)
added_ds_num = len(new_ds) - len(info['ds'])
dump = subprocess.Popen(
["rrdtool", "dump", self.filename],
stdout=subprocess.PIPE
)
restore = subprocess.Popen(
["rrdtool", "restore", "-", self.filename + ".new"],
stdin=subprocess.PIPE
)
echo = True
ds_definitions = True
for line in dump.stdout:
if ds_definitions and b'<ds>' in line:
echo = False
if b'<!-- Round Robin Archives -->' in line:
ds_definitions = False
for ds in new_ds:
restore.stdin.write(bytes("""
<ds>
<name> %s </name>
<type> %s </type>
<minimal_heartbeat>%i</minimal_heartbeat>
<min>%s</min>
<max>%s</max>
<!-- PDP Status -->
<last_ds>%s</last_ds>
<value>%s</value>
<unknown_sec> %i </unknown_sec>
</ds>
""" % (
ds.name,
ds.type,
ds.args[0],
ds.args[1],
ds.args[2],
ds.last_ds,
ds.value,
ds.unknown_sec)
, "utf-8"))
if b'</cdp_prep>' in line:
restore.stdin.write(added_ds_num*b"""
<ds>
<primary_value> NaN </primary_value>
<secondary_value> NaN </secondary_value>
<value> NaN </value>
<unknown_datapoints> 0 </unknown_datapoints>
</ds>
""")
# echoing of input line
if echo:
restore.stdin.write(
line.replace(
b'</row>',
(added_ds_num*b'<v>NaN</v>')+b'</row>'
)
)
if ds_definitions and b'</ds>' in line:
echo = True
dump.stdout.close()
restore.stdin.close()
dump.wait()
restore.wait()
os.rename(self.filename + ".new", self.filename)
self._cached_info = None
def create(self, ds_list, rra_list, **kwargs):
"""
Create a new RRD file with the specified list of RRAs and DSs.
Any kwargs are passed as --key=value to rrdtool create.
"""
self._exec_rrdtool(
"create",
*map(str, rra_list + ds_list),
**kwargs
)
self._cached_info = None
def update(self, V):
"""
Update the RRD with new values V.
V can be either list or dict:
* If it is a dict, its keys must be DS names in the RRD and it is
ensured that the correct DS are updated with the correct values, by
passing a "template" to rrdtool update (see man rrdupdate).
* If it is a list, no template is generated and the order of the
values in V must be the same as that of the DS in the RRD.
"""
try:
args = ['N:' + ':'.join(map(str, V.values()))]
kwargs = {'template': ':'.join(V.keys())}
except AttributeError:
args = ['N:' + ':'.join(map(str, V))]
kwargs = {}
self._exec_rrdtool("update", *args, **kwargs)
self._cached_info = None
def info(self):
"""
Return a dictionary with information about the RRD.
See `man rrdinfo` for more details.
"""
if self._cached_info:
return self._cached_info
env = os.environ.copy()
env["LC_ALL"] = "C"
proc = subprocess.Popen(
["rrdtool", "info", self.filename],
stdout=subprocess.PIPE,
env=env
)
out, err = proc.communicate()
out = out.decode()
info = {}
for line in out.splitlines():
base = info
for match in self._info_regex.finditer(line):
section, key, name, value = match.group("section", "key", "name", "value")
if section and key:
try:
key = int(key)
except ValueError:
pass
if section not in base:
base[section] = {}
if key not in base[section]:
base[section][key] = {}
base = base[section][key]
if name and value:
try:
base[name] = int(value)
except ValueError:
try:
base[name] = float(value)
except:
base[name] = value
dss = {}
for name, ds in info['ds'].items():
ds_obj = DS(name, ds['type'], ds['minimal_heartbeat'], ds['min'], ds['max'])
ds_obj.index = ds['index']
ds_obj.last_ds = ds['last_ds']
ds_obj.value = ds['value']
ds_obj.unknown_sec = ds['unknown_sec']
dss[name] = ds_obj
info['ds'] = dss
rras = []
for rra in info['rra'].values():
rras.append(RRA(rra['cf'], rra['xff'], rra['pdp_per_row'], rra['rows']))
info['rra'] = rras
self._cached_info = info
return info
class DS:
"""
DS stands for Data Source and represents one line of data points in a Round
Robin Database (RRD).
"""
name = None
type = None
args = []
index = -1
last_ds = 'U'
value = 0
unknown_sec = 0
def __init__(self, name, dst, *args):
self.name = name
self.type = dst
self.args = args
def __str__(self):
return "DS:%s:%s:%s" % (
self.name,
self.type,
":".join(map(str, self._nan_to_U_args()))
)
def __repr__(self):
return "%s(%r, %r, %s)" % (
self.__class__.__name__,
self.name,
self.type,
", ".join(map(repr, self.args))
)
def __eq__(self, other):
return all(starmap(eq, zip(self._compare_keys(), other._compare_keys())))
def __hash__(self):
return reduce(xor, map(hash, self._compare_keys()))
def _nan_to_U_args(self):
return tuple(
'U' if type(arg) is float and math.isnan(arg)
else arg
for arg in self.args
)
def _compare_keys(self):
return (self.name, self.type, self._nan_to_U_args())
class RRA:
def __init__(self, cf, *args):
self.cf = cf
self.args = args
def __str__(self):
return "RRA:%s:%s" % (self.cf, ":".join(map(str, self.args)))
def __repr__(self):
return "%s(%r, %s)" % (
self.__class__.__name__,
self.cf,
", ".join(map(repr, self.args))
)

115
ffmap/rrd/rrds.py Normal file
View file

@ -0,0 +1,115 @@
import os
import subprocess
from ffmap.node import Node
from . import RRD, DS, RRA
class NodeRRD(RRD):
ds_list = [
DS('upstate', 'GAUGE', 120, 0, 1),
DS('clients', 'GAUGE', 120, 0, float('NaN')),
DS('neighbors', 'GAUGE', 120, 0, float('NaN')),
DS('vpn_neighbors', 'GAUGE', 120, 0, float('NaN')),
DS('loadavg', 'GAUGE', 120, 0, float('NaN')),
DS('rx_bytes', 'DERIVE', 120, 0, float('NaN')),
DS('rx_packets', 'DERIVE', 120, 0, float('NaN')),
DS('tx_bytes', 'DERIVE', 120, 0, float('NaN')),
DS('tx_packets', 'DERIVE', 120, 0, float('NaN')),
DS('mgmt_rx_bytes', 'DERIVE', 120, 0, float('NaN')),
DS('mgmt_rx_packets', 'DERIVE', 120, 0, float('NaN')),
DS('mgmt_tx_bytes', 'DERIVE', 120, 0, float('NaN')),
DS('mgmt_tx_packets', 'DERIVE', 120, 0, float('NaN')),
DS('forward_bytes', 'DERIVE', 120, 0, float('NaN')),
DS('forward_packets', 'DERIVE', 120, 0, float('NaN')),
]
rra_list = [
RRA('AVERAGE', 0.5, 1, 120), # 2 hours of 1 minute samples
RRA('AVERAGE', 0.5, 5, 1440), # 5 days of 5 minute samples
RRA('AVERAGE', 0.5, 60, 720), # 30 days of 1 hour samples
RRA('AVERAGE', 0.5, 720, 730), # 1 year of 12 hour samples
]
def __init__(self, filename, node = None):
"""
Create a new RRD for a given node.
If the RRD isn't supposed to be updated, the node can be omitted.
"""
self.node = node
super().__init__(filename)
self.ensureSanity(self.ds_list, self.rra_list, step=60)
@property
def imagename(self):
return os.path.basename(self.filename).rsplit('.', 2)[0] + ".png"
def update(self):
values = {
'upstate': 1,
'clients': float(len(self.node.get('clients', []))),
'neighbors': float(len(self.node.get('neighbors', []))),
'vpn_neighbors': float(len(self.node.vpn_neighbors)),
'loadavg': float(self.node['statistics']['loadavg']),
}
for item in ('rx', 'tx', 'mgmt_rx', 'mgmt_tx', 'forward'):
try:
values[item + '_bytes'] = int(self.node['statistics']['traffic'][item]['bytes'])
except TypeError:
pass
try:
values[item + '_packets'] = int(self.node['statistics']['traffic'][item]['packets'])
except TypeError:
pass
super().update(values)
def graph(self, directory, timeframe):
"""
Create a graph in the given directory. The file will be named
basename.png if the RRD file is named basename.rrd
"""
args = ['rrdtool','graph', os.path.join(directory, self.imagename),
'-s', '-' + timeframe ,
'-w', '800',
'-h', '400',
'-l', '0',
'-y', '1:1',
'DEF:clients=' + self.filename + ':clients:AVERAGE',
'VDEF:maxc=clients,MAXIMUM',
'CDEF:c=0,clients,ADDNAN',
'CDEF:d=clients,UN,maxc,UN,1,maxc,IF,*',
'AREA:c#0F0:up\\l',
'AREA:d#F00:down\\l',
'LINE1:c#00F:clients connected\\l',
]
subprocess.check_output(args)
class GlobalRRD(RRD):
ds_list = [
# Number of nodes available
DS('nodes', 'GAUGE', 120, 0, float('NaN')),
# Number of client available
DS('clients', 'GAUGE', 120, 0, float('NaN')),
]
rra_list = [
RRA('AVERAGE', 0.5, 1, 120), # 2 hours of 1 minute samples
RRA('AVERAGE', 0.5, 60, 744), # 31 days of 1 hour samples
RRA('AVERAGE', 0.5, 1440, 1780),# ~5 years of 1 day samples
]
def __init__(self, filepath):
super().__init__(filepath)
self.ensureSanity(self.ds_list, self.rra_list, step=60)
def update(self, nodeCount, clientCount):
super().update({'nodes': nodeCount, 'clients': clientCount})
def graph(self, filename, timeframe):
args = ["rrdtool", 'graph', filename,
'-s', '-' + timeframe,
'-w', '800',
'-h' '400',
'DEF:nodes=' + self.filename + ':nodes:AVERAGE',
'LINE1:nodes#F00:nodes\\l',
'DEF:clients=' + self.filename + ':clients:AVERAGE',
'LINE2:clients#00F:clients',
]
subprocess.check_output(args)

69
ffmap/run.py Normal file
View file

@ -0,0 +1,69 @@
#!/usr/bin/env python3
import argparse
import sys
from ffmap import run_names
class MyAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if self.dest.startswith(("input_", "output_")):
collection_name = self.dest.split("_")[0] + "s"
name = self.dest.split("_", 1)[1]
if not hasattr(namespace, collection_name):
setattr(namespace, collection_name, [])
collection = getattr(namespace, collection_name)
collection.append({
"name": name,
"options": {self.metavar.lower(): values}
if values is not None else {}
})
else:
raise Exception("Unexpected dest=" + self.dest)
def parser_add_myarg(parser, name, metavar="OPT", help=None):
parser.add_argument("--" + name,
metavar=metavar,
type=str,
nargs='?',
const=None,
action=MyAction,
help=help)
parser = argparse.ArgumentParser(
description="""Merge node data from multiple sources and generate
various output formats from this data""",
)
input_group = parser.add_argument_group("Inputs", description="""
Inputs are used in the order given on the command line, where later
inputs can overwrite attributes of earlier inputs if named equally,
but the first input encountering a node sets its id, which is
immutable afterwards.
The same input can be given multiple times, probably with different
options.
""")
output_group = parser.add_argument_group("Outputs")
parser_add_myarg(input_group, 'input-alfred', metavar="REQUEST_DATA_TYPE",
help="read node details from A.L.F.R.E.D.")
parser_add_myarg(input_group, 'input-wiki', metavar="URL",
help="read node details from a Wiki page")
parser_add_myarg(input_group, 'input-batadv', metavar="MESH_INTERFACE",
help="add node's neighbors and clients from batadv-vis")
parser_add_myarg(output_group, 'output-d3json', metavar="FILEPATH",
help="generate JSON file compatible with ffmap-d3")
parser_add_myarg(output_group, 'output-rrd', metavar="DIRECTORY",
help="update RRDs with statistics, one global and one per node")
args = parser.parse_args()
if "inputs" not in args or not args.inputs:
parser.print_help(sys.stderr)
sys.stderr.write("\nERROR: No input has been defined!\n")
sys.exit(1)
if "outputs" not in args or not args.outputs:
parser.print_help(sys.stderr)
sys.stderr.write("\nERROR: No output has been defined!\n")
sys.exit(1)
run_names(inputs=args.inputs, outputs=args.outputs)