Compare commits

..

1 commit

Author SHA1 Message Date
Nils Schneider 820da07451 remove RRD support 2015-04-23 19:16:07 +02:00
20 changed files with 28 additions and 1062 deletions

View file

@ -18,13 +18,6 @@ For the script's regular execution add the following to the crontab:
* * * * * backend.py -d /path/to/output -a /path/to/aliases.json --vpn ae:7f:58:7d:6c:2a d2:d0:93:63:f7:da
</pre>
# Dependencies
- Python 3
- Python 3 Package [Networkx](https://networkx.github.io/)
- [alfred-json](https://github.com/tcatm/alfred-json)
- rrdtool (if run with `--with-rrd`)
# Running as unprivileged user
Some information collected by ffmap-backend requires access to specific system resources.
@ -79,21 +72,6 @@ will prefix `sudo` where necessary.
- online
- gateway
## Old data format
If you want to still use the old [ffmap-d3](https://github.com/ffnord/ffmap-d3)
front end, you can use the file `ffmap-d3.jq` to convert the new output to the
old one:
```
jq -n -f ffmap-d3.jq \
--argfile nodes nodedb/nodes.json \
--argfile graph nodedb/graph.json \
> nodedb/ffmap-d3.json
```
Then point your ffmap-d3 instance to the `ffmap-d3.json` file.
# Removing owner information
If you'd like to redact information about the node owner from `nodes.json`,

View file

@ -1,42 +0,0 @@
#!/usr/bin/env python3
import subprocess
import json
from collections import MutableMapping
def rec_merge(d1, d2):
'''
Update two dicts of dicts recursively,
if either mapping has leaves that are non-dicts,
the second's leaf overwrites the first's.
'''
for k, v in d1.items(): # in Python 2, use .iteritems()!
if k in d2:
# this next check is the only difference!
if all(isinstance(e, MutableMapping) for e in (v, d2[k])):
d2[k] = rec_merge(v, d2[k])
# we could further check types and merge as appropriate here.
d3 = d1.copy()
d3.update(d2)
return d3
class alfred_merge:
def __init__(self,request_data_type_1 = 158, request_data_type_2 = 159):
self.request_data_type_1 = request_data_type_1
self.request_data_type_2 = request_data_type_2
def aliases(self):
output = subprocess.check_output(["/usr/local/bin/alfred-json","-z", "-r",str(self.request_data_type_1),"-f","json"])
alfred_data_1 = json.loads(output.decode("utf-8"))
output = subprocess.check_output(["/usr/local/bin/alfred-json","-z", "-r",str(self.request_data_type_2),"-f","json"])
alfred_data_2 = json.loads(output.decode("utf-8"))
return json.dumps(rec_merge(alfred_data_1, alfred_data_2))
if __name__ == "__main__":
ad = alfred_merge()
al = ad.aliases()
print(al)

View file

@ -7,30 +7,18 @@
"latitude": 53.86
},
"network": {
"mesh": {
"bat0": {
"interfaces": {
"tunnel": [
"mesh_interfaces": [
"00:25:86:e6:f1:bf"
]
}
}
}
}
},
{
"node_id": "gw1",
"hostname": "burgtor",
"network": {
"mesh": {
"bat0": {
"interfaces": {
"tunnel": [
"mesh_interfaces": [
"52:54:00:f3:62:d9"
]
}
}
}
}
}
]

View file

@ -15,9 +15,7 @@ from networkx.readwrite import json_graph
from lib import graph, nodes
from lib.alfred import Alfred
from lib.batman import Batman
from lib.rrddb import RRD
from lib.nodelist import export_nodelist
from lib.validate import validate_nodeinfos
NODES_VERSION = 1
GRAPH_VERSION = 1
@ -27,13 +25,8 @@ def main(params):
os.makedirs(params['dest_dir'], exist_ok=True)
nodes_fn = os.path.join(params['dest_dir'], 'nodes.json')
tmp_nodes_fn = os.path.join(params['dest_dir'], 'nodes.json.tmp')
graph_fn = os.path.join(params['dest_dir'], 'graph.json')
tmp_graph_fn = os.path.join(params['dest_dir'], 'graph.json.tmp')
nodelist_fn = os.path.join(params['dest_dir'], 'nodelist.json')
tmp_nodelist_fn = os.path.join(params['dest_dir'], 'nodelist.json.tmp')
now = datetime.utcnow().replace(microsecond=0)
@ -82,16 +75,14 @@ def main(params):
# integrate alfred nodeinfo
for alfred in alfred_instances:
nodeinfo = validate_nodeinfos(alfred.nodeinfo())
nodes.import_nodeinfo(nodedb['nodes'], nodeinfo,
nodes.import_nodeinfo(nodedb['nodes'], alfred.nodeinfo(),
now, assume_online=True)
# integrate static aliases data
for aliases in params['aliases']:
with open(aliases, 'r') as f:
# nodeinfo = validate_nodeinfos(json.load(f))
nodes.import_nodeinfo(nodedb['nodes'], json.load(f),
now, assume_online=False, statics=True)
now, assume_online=False)
nodes.reset_statistics(nodedb['nodes'])
for alfred in alfred_instances:
@ -102,6 +93,7 @@ def main(params):
for batman in batman_instances:
vd = batman.vis_data()
gwl = batman.gateway_list()
mesh_info.append((vd, gwl))
# update nodedb from batman-adv data
@ -124,48 +116,23 @@ def main(params):
if params['vpn']:
graph.mark_vpn(batadv_graph, frozenset(params['vpn']))
def extract_tunnel(nodes):
macs = set()
for id, node in nodes.items():
try:
for mac in node["nodeinfo"]["network"]["mesh"]["bat0"]["interfaces"]["tunnel"]:
macs.add(mac)
for mac in node["nodeinfo"]["network"]["mesh"]["bat-ffhh"]["interfaces"]["tunnel"]:
macs.add(mac)
except KeyError:
pass
return macs
graph.mark_vpn(batadv_graph, extract_tunnel(nodedb['nodes']))
batadv_graph = graph.merge_nodes(batadv_graph)
batadv_graph = graph.to_undirected(batadv_graph)
# write processed data to dest dir
with open(tmp_nodes_fn, 'w') as f:
with open(nodes_fn, 'w') as f:
json.dump(nodedb, f)
graph_out = {'batadv': json_graph.node_link_data(batadv_graph),
'version': GRAPH_VERSION}
with open(tmp_graph_fn, 'w') as f:
with open(graph_fn, 'w') as f:
json.dump(graph_out, f)
with open(tmp_nodelist_fn, 'w') as f:
with open(nodelist_fn, 'w') as f:
json.dump(export_nodelist(now, nodedb), f)
os.rename(tmp_nodes_fn, nodes_fn)
os.rename(tmp_graph_fn, graph_fn)
os.rename(tmp_nodelist_fn, nodelist_fn)
# optional rrd graphs (trigger with --rrd)
if params['rrd']:
script_directory = os.path.dirname(os.path.realpath(__file__))
rrd = RRD(os.path.join(script_directory, 'nodedb'),
os.path.join(params['dest_dir'], 'nodes'))
rrd.update_database(nodedb['nodes'])
rrd.update_images()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
@ -185,10 +152,6 @@ if __name__ == '__main__':
help='Assume MAC addresses are part of vpn')
parser.add_argument('-p', '--prune', metavar='DAYS', type=int,
help='forget nodes offline for at least DAYS')
parser.add_argument('--with-rrd', dest='rrd', action='store_true',
default=False,
help='enable the rendering of RRD graphs (cpu '
'intensive)')
options = vars(parser.parse_args())
main(options)

View file

@ -1,52 +0,0 @@
{
"meta": {
"timestamp": $nodes.timestamp
},
"nodes": (
$graph.batadv.nodes
| map(
if has("node_id") and .node_id
then (
$nodes.nodes[.node_id] as $node
| {
"id": .id,
"uptime": $node.statistics.uptime,
"flags": ($node.flags + {"client": false}),
"name": $node.nodeinfo.hostname,
"clientcount": (if $node.statistics.clients >= 0 then $node.statistics.clients else 0 end),
"hardware": $node.nodeinfo.hardware.model,
"firmware": $node.nodeinfo.software.firmware.release,
"geo": (if $node.nodeinfo.location then [$node.nodeinfo.location.latitude, $node.nodeinfo.location.longitude] else null end),
#"lastseen": $node.lastseen,
"network": $node.nodeinfo.network
}
)
else
{
"flags": {},
"id": .id,
"geo": null,
"clientcount": 0
}
end
)
),
"links": (
$graph.batadv.links
| map(
$graph.batadv.nodes[.source].node_id as $source_id
| $graph.batadv.nodes[.target].node_id as $target_id
| select(
$source_id and $target_id and
($nodes.nodes | (has($source_id) and has($target_id)))
)
| {
"target": .target,
"source": .source,
"quality": "\(.tq), \(.tq)",
"id": ($source_id + "-" + $target_id),
"type": (if .vpn then "vpn" else null end)
}
)
)
}

View file

@ -1,29 +0,0 @@
[
{
"node_id": "deadbfff0101",
"hostname": "gw01"
},
{
"node_id": "deadbeef0505",
"hostname": "gw02.hamburg.freifunk.net",
"network": {
"mac": "de:ad:be:ef:05:05",
"mesh": {
"bat0": {
"interfaces": {
"tunnel": [
"de:ad:be:ff:05:05",
"de:ad:be:fc:05:05",
"de:ad:bf:ff:05:05"
]
}
}
}
}
},
{
"node_id": "00163efb9d8d",
"hostname": "gw03"
}
]

View file

@ -1,110 +0,0 @@
#!/usr/bin/env python2
from __future__ import print_function
import json
import os
import sys
if len(sys.argv) != 2:
print('usage: ' + sys.argv[0] + ' /path/to/peers')
sys.exit(1)
peersDir = sys.argv[1]
def normalizeMac(mac):
mac = mac.lower()
normalized = ''
n = 0
for c in mac:
if c != ':':
if n > 0 and n % 2 == 0:
normalized = normalized + ':'
normalized = normalized + c
n += 1
return normalized
def toAlias(peer):
alias = {}
if not (peer.has_key('name') and peer.has_key('mac')):
return None
name = peer['name']
mac = peer['mac']
alias['node_id'] = mac.replace(':', '')
alias['hostname'] = name
if peer.has_key('geo'):
geo = peer['geo']
location = {}
if geo.has_key('lon'): location['longitude'] = geo['lon']
if geo.has_key('lat'): location['latitude'] = geo['lat']
alias['location'] = location
#alias['network'] = {}
#alias['network']['mesh_interfaces'] = [mac]
return alias
aliases = []
for filename in os.listdir(peersDir):
if len(filename) == 0 or filename[0] == '.':
continue
isGateway = False
absFilename = peersDir + '/' + filename
if os.path.isfile(absFilename):
peerFile = open(absFilename, 'r')
try:
peerLines = peerFile.readlines()
peer = {}
for line in peerLines:
parts = line.split()
if len(parts) > 2:
if parts[1] == 'Knotenname:':
peer['name'] = parts[2]
elif parts[0] == 'remote':
isGateway = True
elif parts[1] == 'MAC:':
peer['mac'] = normalizeMac(parts[2])
elif parts[1] == 'Koordinaten:' and len(parts) > 3:
try:
peer['geo'] = {'lat': float(parts[2]), 'lon': float(parts[3])}
except ValueError:
print('Error in %s: Invalid coordinates: %s' % (absFilename, parts[2:4]), file = sys.stderr)
elif len(parts) == 2 and parts[0] == 'key':
keyParts = parts[1].split('"')
if len(keyParts) > 1:
peer['vpn'] = keyParts[1].lower()
if isGateway:
continue
alias = toAlias(peer)
if alias:
aliases.append(alias)
except Exception as e:
print('Error in %s, ignoring peer: %s' % (absFilename, e), file = sys.stderr)
finally:
peerFile.close()
print(json.dumps(aliases))

View file

@ -1,112 +0,0 @@
#!/usr/bin/env python2
from __future__ import print_function
import json
import os
import sys
if len(sys.argv) != 2:
print('usage: ' + sys.argv[0] + ' /path/to/peers')
sys.exit(1)
peersDir = sys.argv[1]
def normalizeMac(mac):
mac = mac.lower()
normalized = ''
n = 0
for c in mac:
if c != ':':
if n > 0 and n % 2 == 0:
normalized = normalized + ':'
normalized = normalized + c
n += 1
return normalized
def toAlias(peer):
alias = {}
if not (peer.has_key('name') and peer.has_key('mac')):
return None
name = peer['name']
mac = peer['mac']
alias['node_id'] = mac.replace(':', '')
alias['hostname'] = name
if peer.has_key('geo'):
geo = peer['geo']
location = {}
if geo.has_key('lon'): location['longitude'] = geo['lon']
if geo.has_key('lat'): location['latitude'] = geo['lat']
alias['location'] = location
#alias['network'] = {}
#alias['network']['mesh_interfaces'] = [mac]
return {'nodeinfo':alias}
aliases = {}
for filename in os.listdir(peersDir):
if len(filename) == 0 or filename[0] == '.':
continue
isGateway = False
absFilename = peersDir + '/' + filename
if os.path.isfile(absFilename):
peerFile = open(absFilename, 'r')
try:
peerLines = peerFile.readlines()
peer = {}
for line in peerLines:
parts = line.split()
if len(parts) > 2:
if parts[1] == 'Knotenname:':
peer['name'] = parts[2]
elif parts[0] == 'remote':
isGateway = True
elif parts[1] == 'MAC:':
peer['mac'] = normalizeMac(parts[2])
elif parts[1] == 'Koordinaten:' and len(parts) > 3:
try:
peer['geo'] = {'lat': float(parts[2]), 'lon': float(parts[3])}
except ValueError:
print('Error in %s: Invalid coordinates: %s' % (absFilename, parts[2:4]), file = sys.stderr)
elif len(parts) == 2 and parts[0] == 'key':
keyParts = parts[1].split('"')
if len(keyParts) > 1:
peer['vpn'] = keyParts[1].lower()
if isGateway:
continue
alias = toAlias(peer)
if alias:
tmpid = alias['nodeinfo']['node_id']
# alias['nodeinfo'].pop('node_id')
aliases[tmpid] = alias
except Exception as e:
print('Error in %s, ignoring peer: %s' % (absFilename, e), file = sys.stderr)
finally:
peerFile.close()
print(json.dumps(aliases))

View file

@ -1,40 +0,0 @@
import os
import subprocess
from lib.RRD import DS, RRA, RRD
class GlobalRRD(RRD):
ds_list = [
# Number of nodes available
DS('nodes', 'GAUGE', 120, 0, float('NaN')),
# Number of client available
DS('clients', 'GAUGE', 120, 0, float('NaN')),
]
rra_list = [
# 2 hours of 1 minute samples
RRA('AVERAGE', 0.5, 1, 120),
# 31 days of 1 hour samples
RRA('AVERAGE', 0.5, 60, 744),
# ~5 years of 1 day samples
RRA('AVERAGE', 0.5, 1440, 1780),
]
def __init__(self, directory):
super().__init__(os.path.join(directory, "nodes.rrd"))
self.ensure_sanity(self.ds_list, self.rra_list, step=60)
# TODO: fix this, python does not support function overloading
def update(self, node_count, client_count):
super().update({'nodes': node_count, 'clients': client_count})
def graph(self, filename, timeframe):
args = ["rrdtool", 'graph', filename,
'-s', '-' + timeframe,
'-w', '800',
'-h' '400',
'DEF:nodes=' + self.filename + ':nodes:AVERAGE',
'LINE1:nodes#F00:nodes\\l',
'DEF:clients=' + self.filename + ':clients:AVERAGE',
'LINE2:clients#00F:clients']
subprocess.check_output(args)

View file

@ -1,61 +0,0 @@
import os
import subprocess
from lib.RRD import DS, RRA, RRD
class NodeRRD(RRD):
ds_list = [
DS('upstate', 'GAUGE', 120, 0, 1),
DS('clients', 'GAUGE', 120, 0, float('NaN')),
]
rra_list = [
# 2 hours of 1 minute samples
RRA('AVERAGE', 0.5, 1, 120),
# 5 days of 5 minute samples
RRA('AVERAGE', 0.5, 5, 1440),
# 30 days of 1 hour samples
RRA('AVERAGE', 0.5, 60, 720),
# 1 year of 12 hour samples
RRA('AVERAGE', 0.5, 720, 730),
]
def __init__(self, filename, node=None):
"""
Create a new RRD for a given node.
If the RRD isn't supposed to be updated, the node can be omitted.
"""
self.node = node
super().__init__(filename)
self.ensure_sanity(self.ds_list, self.rra_list, step=60)
@property
def imagename(self):
return "{basename}.png".format(
basename=os.path.basename(self.filename).rsplit('.', 2)[0])
# TODO: fix this, python does not support function overloading
def update(self):
super().update({'upstate': int(self.node['flags']['online']),
'clients': self.node['statistics']['clients']})
def graph(self, directory, timeframe):
"""
Create a graph in the given directory. The file will be named
basename.png if the RRD file is named basename.rrd
"""
args = ['rrdtool', 'graph', os.path.join(directory, self.imagename),
'-s', '-' + timeframe,
'-w', '800',
'-h', '400',
'-l', '0',
'-y', '1:1',
'DEF:clients=' + self.filename + ':clients:AVERAGE',
'VDEF:maxc=clients,MAXIMUM',
'CDEF:c=0,clients,ADDNAN',
'CDEF:d=clients,UN,maxc,UN,1,maxc,IF,*',
'AREA:c#0F0:up\\l',
'AREA:d#F00:down\\l',
'LINE1:c#00F:clients connected\\l']
subprocess.check_output(args)

View file

@ -1,346 +0,0 @@
import subprocess
import re
import os
from operator import xor, eq
from functools import reduce
from itertools import starmap
import math
class RRDIncompatibleException(Exception):
"""
Is raised when an RRD doesn't have the desired definition and cannot be
upgraded to it.
"""
pass
class RRDOutdatedException(Exception):
"""
Is raised when an RRD doesn't have the desired definition, but can be
upgraded to it.
"""
pass
if not hasattr(__builtins__, "FileNotFoundError"):
class FileNotFoundError(Exception):
pass
class RRD(object):
"""
An RRD is a Round Robin Database, a database which forgets old data and
aggregates multiple records into new ones.
It contains multiple Data Sources (DS) which can be thought of as columns,
and Round Robin Archives (RRA) which can be thought of as tables with the
DS as columns and time-dependant rows.
"""
# rra[2].cdp_prep[0].value = 1,8583033333e+03
_info_regex = re.compile("""
(?P<section>[a-z_]+)
\[ (?P<key>[a-zA-Z0-9_]+) \]
\.
|
(?P<name>[a-z_]+)
\s*=\s*
"? (?P<value>.*?) "?
$""", re.X)
_cached_info = None
def _exec_rrdtool(self, cmd, *args, **kwargs):
pargs = ["rrdtool", cmd, self.filename]
for k, v in kwargs.items():
pargs.extend(["--" + k, str(v)])
pargs.extend(args)
subprocess.check_output(pargs)
def __init__(self, filename):
self.filename = filename
def ensure_sanity(self, ds_list, rra_list, **kwargs):
"""
Create or upgrade the RRD file if necessary to contain all DS in
ds_list. If it needs to be created, the RRAs in rra_list and any kwargs
will be used for creation. Note that RRAs and options of an existing
database are NOT modified!
"""
try:
self.check_sanity(ds_list)
except FileNotFoundError:
self.create(ds_list, rra_list, **kwargs)
except RRDOutdatedException:
self.upgrade(ds_list)
def check_sanity(self, ds_list=()):
"""
Check if the RRD file exists and contains (at least) the DS listed in
ds_list.
"""
if not os.path.exists(self.filename):
raise FileNotFoundError(self.filename)
info = self.info()
if set(ds_list) - set(info['ds'].values()) != set():
for ds in ds_list:
if ds.name in info['ds'] and\
ds.type != info['ds'][ds.name].type:
raise RRDIncompatibleException(
"{} is {} but should be {}".format(
ds.name, ds.type, info['ds'][ds.name].type))
else:
raise RRDOutdatedException()
def upgrade(self, dss):
"""
Upgrade the DS definitions (!) of this RRD.
(To update its values, use update())
The list dss contains DSS objects to be updated or added. The
parameters of a DS can be changed, but not its type. New DS are always
added at the end in the order of their appearance in the list.
This is done internally via an rrdtool dump -> rrdtool restore and
modifying the dump on the fly.
"""
info = self.info()
new_ds = list(info['ds'].values())
new_ds.sort(key=lambda ds: ds.index)
for ds in dss:
if ds.name in info['ds']:
old_ds = info['ds'][ds.name]
if info['ds'][ds.name].type != ds.type:
raise RuntimeError(
"Cannot convert existing DS '{}'"
"from type '{}' to '{}'".format(
ds.name, old_ds.type, ds.type))
ds.index = old_ds.index
new_ds[ds.index] = ds
else:
ds.index = len(new_ds)
new_ds.append(ds)
added_ds_num = len(new_ds) - len(info['ds'])
dump = subprocess.Popen(
["rrdtool", "dump", self.filename],
stdout=subprocess.PIPE)
restore = subprocess.Popen(
["rrdtool", "restore", "-", self.filename + ".new"],
stdin=subprocess.PIPE)
echo = True
ds_definitions = True
for line in dump.stdout:
if ds_definitions and b'<ds>' in line:
echo = False
if b'<!-- Round Robin Archives -->' in line:
ds_definitions = False
for ds in new_ds:
restore.stdin.write(bytes("""
<ds>
<name> %s </name>
<type> %s </type>
<minimal_heartbeat>%i</minimal_heartbeat>
<min>%s</min>
<max>%s</max>
<!-- PDP Status -->
<last_ds>%s</last_ds>
<value>%s</value>
<unknown_sec> %i </unknown_sec>
</ds>
""" % (ds.name,
ds.type,
ds.args[0],
ds.args[1],
ds.args[2],
ds.last_ds,
ds.value,
ds.unknown_sec), "utf-8"))
if b'</cdp_prep>' in line:
restore.stdin.write(added_ds_num * b"""
<ds>
<primary_value> NaN </primary_value>
<secondary_value> NaN </secondary_value>
<value> NaN </value>
<unknown_datapoints> 0 </unknown_datapoints>
</ds>
""")
# echoing of input line
if echo:
restore.stdin.write(
line.replace(
b'</row>',
(added_ds_num * b'<v>NaN</v>') + b'</row>'
)
)
if ds_definitions and b'</ds>' in line:
echo = True
dump.stdout.close()
restore.stdin.close()
dump.wait()
restore.wait()
os.rename(self.filename + ".new", self.filename)
self._cached_info = None
def create(self, ds_list, rra_list, **kwargs):
"""
Create a new RRD file with the specified list of RRAs and DSs.
Any kwargs are passed as --key=value to rrdtool create.
"""
self._exec_rrdtool(
"create",
*map(str, rra_list + ds_list),
**kwargs
)
self._cached_info = None
def update(self, V):
"""
Update the RRD with new values V.
V can be either list or dict:
* If it is a dict, its keys must be DS names in the RRD and it is
ensured that the correct DS are updated with the correct values, by
passing a "template" to rrdtool update (see man rrdupdate).
* If it is a list, no template is generated and the order of the
values in V must be the same as that of the DS in the RRD.
"""
try:
args = ['N:' + ':'.join(map(str, V.values()))]
kwargs = {'template': ':'.join(V.keys())}
except AttributeError:
args = ['N:' + ':'.join(map(str, V))]
kwargs = {}
self._exec_rrdtool("update", *args, **kwargs)
self._cached_info = None
def info(self):
"""
Return a dictionary with information about the RRD.
See `man rrdinfo` for more details.
"""
if self._cached_info:
return self._cached_info
env = os.environ.copy()
env["LC_ALL"] = "C"
proc = subprocess.Popen(
["rrdtool", "info", self.filename],
stdout=subprocess.PIPE,
env=env
)
out, err = proc.communicate()
out = out.decode()
info = {}
for line in out.splitlines():
base = info
for match in self._info_regex.finditer(line):
section, key, name, value = match.group(
"section", "key", "name", "value")
if section and key:
try:
key = int(key)
except ValueError:
pass
if section not in base:
base[section] = {}
if key not in base[section]:
base[section][key] = {}
base = base[section][key]
if name and value:
try:
base[name] = int(value)
except ValueError:
try:
base[name] = float(value)
except:
base[name] = value
dss = {}
for name, ds in info['ds'].items():
ds_obj = DS(name, ds['type'], ds['minimal_heartbeat'],
ds['min'], ds['max'])
ds_obj.index = ds['index']
ds_obj.last_ds = ds['last_ds']
ds_obj.value = ds['value']
ds_obj.unknown_sec = ds['unknown_sec']
dss[name] = ds_obj
info['ds'] = dss
rras = []
for rra in info['rra'].values():
rras.append(RRA(rra['cf'], rra['xff'],
rra['pdp_per_row'], rra['rows']))
info['rra'] = rras
self._cached_info = info
return info
class DS(object):
"""
DS stands for Data Source and represents one line of data points in a Round
Robin Database (RRD).
"""
name = None
type = None
args = []
index = -1
last_ds = 'U'
value = 0
unknown_sec = 0
def __init__(self, name, dst, *args):
self.name = name
self.type = dst
self.args = args
def __str__(self):
return "DS:%s:%s:%s" % (
self.name,
self.type,
":".join(map(str, self._nan_to_u_args()))
)
def __repr__(self):
return "%s(%r, %r, %s)" % (
self.__class__.__name__,
self.name,
self.type,
", ".join(map(repr, self.args))
)
def __eq__(self, other):
return all(starmap(eq, zip(self.compare_keys(), other.compare_keys())))
def __hash__(self):
return reduce(xor, map(hash, self.compare_keys()))
def _nan_to_u_args(self):
return tuple(
'U' if type(arg) is float and math.isnan(arg)
else arg
for arg in self.args
)
def compare_keys(self):
return self.name, self.type, self._nan_to_u_args()
class RRA(object):
def __init__(self, cf, *args):
self.cf = cf
self.args = args
def __str__(self):
return "RRA:%s:%s" % (self.cf, ":".join(map(str, self.args)))
def __repr__(self):
return "%s(%r, %s)" % (
self.__class__.__name__,
self.cf,
", ".join(map(repr, self.args))
)

View file

@ -13,7 +13,7 @@ class Alfred(object):
raise RuntimeError('alfred: invalid unix socket path given')
def _fetch(self, data_type):
cmd = ['/usr/local/bin/alfred-json',
cmd = ['alfred-json',
'-z',
'-f', 'json',
'-r', str(data_type)]

View file

@ -96,3 +96,7 @@ if __name__ == "__main__":
bc = Batman()
vd = bc.vis_data()
gw = bc.gateway_list()
for x in vd:
print(x)
print(gw)
print(bc.gateway_mode())

View file

@ -25,6 +25,7 @@ def mark_vpn(graph, vpn_macs):
components = map(frozenset, nx.weakly_connected_components(graph))
components = filter(vpn_macs.intersection, components)
nodes = reduce(lambda a, b: a | b, components, set())
for node in nodes:
for k, v in graph[node].items():
v['vpn'] = True

View file

@ -13,9 +13,6 @@ def export_nodelist(now, nodedb):
node_out["status"] = dict()
node_out["status"]["online"] = node["flags"]["online"]
if "firstseen" in node:
node_out["status"]["firstcontact"] = node["firstseen"]
if "lastseen" in node:
node_out["status"]["lastcontact"] = node["lastseen"]

View file

@ -6,39 +6,11 @@ from functools import reduce
def build_mac_table(nodes):
macs = dict()
for node_id, node in nodes.items():
try:
macs[node['network']['mac']] = node_id
except KeyError:
pass
try:
for mac in node['nodeinfo']['network']['mesh_interfaces']:
macs[mac] = node_id
except KeyError:
pass
try:
for mac in node['nodeinfo']['network']['mesh']['bat0']['interfaces']['wireless']:
macs[mac] = node_id
except KeyError:
pass
try:
for mac in node['nodeinfo']['network']['mesh']['bat0']['interfaces']['tunnel']:
macs[mac] = node_id
except KeyError:
pass
try:
for mac in node['nodeinfo']['network']['mesh']['bat-ffhh']['interfaces']['tunnel']:
macs[mac] = node_id
except KeyError:
pass
try:
for mac in node['nodeinfo']['network']['mesh']['bat0']['interfaces']['other']:
macs[mac] = node_id
except KeyError:
pass
return macs
@ -65,23 +37,12 @@ def mark_online(node, now):
node['flags']['online'] = True
def overrideFields(dest, src, fields):
for field in fields:
if field in src:
dest[field] = src[field]
else:
dest.pop(field, None)
def import_nodeinfo(nodes, nodeinfos, now, assume_online=False, statics=False):
def import_nodeinfo(nodes, nodeinfos, now, assume_online=False):
for nodeinfo in filter(lambda d: 'node_id' in d, nodeinfos):
node = nodes.setdefault(nodeinfo['node_id'], {'flags': {'online': False, 'gateway': False}})
if statics:
node['nodeinfo'] = node.setdefault('nodeinfo', {})
overrideFields(node['nodeinfo'], nodeinfo, ['hostname', 'location', 'node_id'])
else:
node = nodes.setdefault(nodeinfo['node_id'], {'flags': dict()})
node['nodeinfo'] = nodeinfo
node['flags']['online'] = False
node['flags']['gateway'] = False
if assume_online:
mark_online(node, now)
@ -98,7 +59,7 @@ def import_statistics(nodes, stats):
node['statistics'][target] = f(reduce(dict.__getitem__,
source,
statistics))
except (KeyError, TypeError, ZeroDivisionError):
except (KeyError, TypeError):
pass
macs = build_mac_table(nodes)
@ -112,7 +73,6 @@ def import_statistics(nodes, stats):
add(node, stats, 'memory_usage', ['memory'],
lambda d: 1 - d['free'] / d['total'])
add(node, stats, 'rootfs_usage', ['rootfs_usage'])
add(node, stats, 'traffic', ['traffic'])
def import_mesh_ifs_vis_data(nodes, vis_data):
@ -137,34 +97,12 @@ def import_mesh_ifs_vis_data(nodes, vis_data):
for v in mesh_nodes:
node = v[0]
ifs = set()
try:
ifs = ifs.union(set(node['nodeinfo']['network']['mesh_interfaces']))
mesh_ifs = set(node['nodeinfo']['network']['mesh_interfaces'])
except KeyError:
pass
mesh_ifs = set()
try:
ifs = ifs.union(set(node['nodeinfo']['network']['mesh']['bat0']['interfaces']['wireless']))
except KeyError:
pass
try:
ifs = ifs.union(set(node['nodeinfo']['network']['mesh']['bat0']['interfaces']['tunnel']))
except KeyError:
pass
try:
ifs = ifs.union(set(node['nodeinfo']['network']['mesh']['bat-ffhh']['interfaces']['tunnel']))
except KeyError:
pass
try:
ifs = ifs.union(set(node['nodeinfo']['network']['mesh']['bat0']['interfaces']['other']))
except KeyError:
pass
node['nodeinfo']['network']['mesh_interfaces'] = list(ifs | v[1])
node['nodeinfo']['network']['mesh_interfaces'] = list(mesh_ifs | v[1])
def import_vis_clientcount(nodes, vis_data):
@ -180,6 +118,7 @@ def import_vis_clientcount(nodes, vis_data):
def mark_gateways(nodes, gateways):
macs = build_mac_table(nodes)
gateways = filter(lambda d: d in macs, gateways)
for node in map(lambda d: nodes[macs[d]], gateways):
node['flags']['gateway'] = True

View file

@ -1,54 +0,0 @@
#!/usr/bin/env python3
import time
import os
from lib.GlobalRRD import GlobalRRD
from lib.NodeRRD import NodeRRD
class RRD(object):
def __init__(self,
database_directory,
image_path,
display_time_global="7d",
display_time_node="1d"):
self.dbPath = database_directory
self.globalDb = GlobalRRD(self.dbPath)
self.imagePath = image_path
self.displayTimeGlobal = display_time_global
self.displayTimeNode = display_time_node
self.currentTimeInt = (int(time.time()) / 60) * 60
self.currentTime = str(self.currentTimeInt)
try:
os.stat(self.imagePath)
except OSError:
os.mkdir(self.imagePath)
def update_database(self, nodes):
online_nodes = dict(filter(
lambda d: d[1]['flags']['online'], nodes.items()))
client_count = sum(map(
lambda d: d['statistics']['clients'], online_nodes.values()))
self.globalDb.update(len(online_nodes), client_count)
for node_id, node in online_nodes.items():
rrd = NodeRRD(os.path.join(self.dbPath, node_id + '.rrd'), node)
rrd.update()
def update_images(self):
self.globalDb.graph(os.path.join(self.imagePath, "globalGraph.png"),
self.displayTimeGlobal)
nodedb_files = os.listdir(self.dbPath)
for file_name in nodedb_files:
if not os.path.isfile(os.path.join(self.dbPath, file_name)):
continue
node_name = os.path.basename(file_name).split('.')
if node_name[1] == 'rrd' and not node_name[0] == "nodes":
rrd = NodeRRD(os.path.join(self.dbPath, file_name))
rrd.graph(self.imagePath, self.displayTimeNode)

View file

@ -1,19 +0,0 @@
import json
def validate_nodeinfos(nodeinfos):
result = []
for nodeinfo in nodeinfos:
if validate_nodeinfo(nodeinfo):
result.append(nodeinfo)
return result
def validate_nodeinfo(nodeinfo):
if 'location' in nodeinfo:
if 'latitude' not in nodeinfo['location'] or 'longitude' not in nodeinfo['location']:
return False
return True

View file

@ -1,7 +0,0 @@
#!/bin/bash
FFMAPPATH='/opt/ffmap-backend/'
PEERS="/etc/fastd/ffhh-mesh-vpn/peers"
python2 $FFMAPPATH/generate_aliases.py $PEERS > $FFMAPPATH/aliases.json
#python3 $FFMAPPATH/backend.py -d /var/www/meshviewer/ --aliases $FFMAPPATH/aliases.json $FFMAPPATH/gateway.json -m bat0:/var/run/alfred.sock -p 30 --vpn de:ad:be:ff:01:01 --vpn de:ad:be:ff:05:05 --vpn de:ad:be:ff:05:06 --vpn de:ad:be:ff:03:03 --vpn de:ad:be:ff:22:22 --vpn de:ad:be:ff:22:23 --vpn de:ad:be:ff:88:88 --vpn de:ad:be:ff:88:89 --vpn de:ad:bf:ff:88:88 --vpn de:ad:bf:ff:22:22 --vpn de:ad:bf:ff:03:03 --vpn de:ad:bf:ff:05:05 --vpn de:ad:bf:ff:01:01 --vpn de:ad:be:fc:03:03 --vpn 00:16:3e:53:75:0d --vpn de:ad:be:fc:05:05 --vpn de:ad:be:fc:01:01 --vpn de:ad:be:ef:03:03 --vpn de:ad:be:ef:01:01 --vpn de:ad:be:ef:05:05 --vpn 00:16:3e:fb:9d:8d --vpn 00:16:3e:fb:9d:9d
python3 $FFMAPPATH/backend.py -d /var/www/meshviewer/ --aliases $FFMAPPATH/aliases.json $FFMAPPATH/gateway.json -m bat0:/var/run/alfred.sock -p 30 --vpn de:ad:be:ff:01:01 de:ad:be:ff:05:05 de:ad:be:ff:05:06 de:ad:be:ff:03:03 de:ad:be:ff:22:22 de:ad:be:ff:22:23 de:ad:be:ff:88:88 de:ad:be:ff:88:89 de:ad:bf:ff:88:88 de:ad:bf:ff:22:22 de:ad:bf:ff:03:03 de:ad:bf:ff:05:05 de:ad:bf:ff:01:01 de:ad:be:fc:03:03 00:16:3e:53:75:0d de:ad:be:fc:05:05 de:ad:be:fc:01:01 de:ad:be:ef:03:03 de:ad:be:ef:01:01 de:ad:be:ef:05:05 00:16:3e:fb:9d:8d 00:16:3e:fb:9d:9d

View file

@ -1,32 +0,0 @@
#!/usr/bin/env python
#Bibliotheken importieren
import time
import datetime
import json
import urllib2
#Datei oeffnen
Datei = urllib2.urlopen('https://map.hamburg.freifunk.net/nodes.json')
Datei_Sued = urllib2.urlopen('https://map.hamburg.freifunk.net/hhsued/mv1/nodes.json')
Text = Datei.read()
Knotenzahl = Text.count('"online": true')
Text = Datei_Sued.read()
Knotenzahl = Knotenzahl + Text.count('"online":true')
#Zeit holen
thetime = datetime.datetime.now().isoformat()
ffhh = None
#Freifunk API-Datei einladen und JSON lesen
with open('/var/www/meta/ffhh.json', 'r') as fp:
ffhh = json.load(fp)
#Attribute Zeitstempel und Knotenanzahl setzen
ffhh['state']['lastchange'] = thetime
ffhh['state']['nodes'] = Knotenzahl
#Freifunk API-Datein mit geaenderten werten schreiben
with open('/var/www/meta/ffhh.json', 'w') as fp:
json.dump(ffhh, fp, indent=2, separators=(',', ': '))