Browse Source

Reworked import process to make it faaaast

master
Johann Schmitz 5 years ago
parent
commit
aae8ad9243
Signed by: ercpe GPG Key ID: A084064277C501ED
  1. 289
      src/djunin/management/commands/load_munin.py
  2. 130
      src/djunin/objects.py
  3. 284
      src/djunin/updater.py
  4. 10
      src/djunin/views/base.py

289
src/djunin/management/commands/load_munin.py

@ -1,294 +1,9 @@
# -*- coding: utf-8 -*-
import argparse
import logging
import shlex
import itertools
from django.core.management.base import BaseCommand
from django.db import transaction
from django.db.models import Q
from django.db.models.fields import NOT_PROVIDED, CharField, TextField
from djunin.models import Node, Graph, DataRow
from djunin.objects import MuninDataFile
from bulk_update.helper import bulk_update
from djunin.updater import Updater
logger = logging.getLogger(__file__)
class Command(BaseCommand):
def handle(self, *args, **options):
with transaction.atomic():
datafile = MuninDataFile()
self._process_nodes(datafile)
self._process_graphs(datafile)
self._process_datarows(datafile)
def _process_nodes(self, datafile):
logger.info("Processing nodes")
logger.info("Creating nodes")
# first, create missing nodes
existing_nodes = Node.objects.values_list('group', 'name')
Node.objects.bulk_create((
Node(group=n.group, name=n.name) for n in datafile.nodes if (n.group, n.name) not in existing_nodes
))
logger.info("Removing nodes")
# then remove all other nodes
q_filter = None
for g, n in itertools.chain(existing_nodes, [(n.group, n.name) for n in datafile.nodes]):
f = Q(group=g, name=n)
if q_filter is None:
q_filter = f
else:
q_filter = q_filter | f
if q_filter:
Node.objects.exclude(q_filter).delete()
def _process_graphs(self, datafile):
logger.info("Processing graphs")
existing_nodes = Node.objects.all()
logger.info("Creating root graphs")
existing_parent_graphs = Graph.objects.filter(parent=None).values_list('node__group', 'node__name', 'name')
Graph.objects.bulk_create((
Graph(node=[n for n in existing_nodes if n.group == group_name and n.name == node_name][0], name=graph_name)
for group_name, nodes in datafile.raw.items()
for node_name, graphs in nodes.items()
for graph_name, graph_data in graphs.items()
if (group_name, node_name, graph_name) not in existing_parent_graphs
))
logger.info("Creating subgraphs")
parent_graphs = Graph.objects.filter(parent=None).select_related('node')
existing_subgraphs = Graph.objects.exclude(parent=None).values_list('node__group', 'node__name', 'parent__name', 'name')
Graph.objects.bulk_create((
Graph(
node=[n for n in existing_nodes if n.group == group_name and n.name == node_name][0],
parent=[p for p in parent_graphs if p.node.group == group_name and p.node.name == node_name and p.name == graph_name][0],
name=subgraph_name)
for group_name, nodes in datafile.raw.items()
for node_name, graphs in nodes.items()
for graph_name, graph_data in graphs.items()
for subgraph_name, subgraph_data in graph_data.get('subgraphs', {}).items()
if (group_name, node_name, graph_name, subgraph_name) not in existing_subgraphs
))
logger.info("Updating all graphs")
all_graphs = Graph.objects.select_related('node', 'parent')
for graph in all_graphs:
graph_options = {}
try:
if graph.parent:
graph_options = datafile.raw[graph.node.group][graph.node.name][graph.parent.name]['subgraphs'][graph.name]['options']
else:
graph_options = datafile.raw[graph.node.group][graph.node.name][graph.name]['options']
except KeyError:
logger.warning("No graph options found for %s", graph.name)
opts = self._get_model_attributes(Graph, lambda f: not f.name.startswith('graph_'))
opts.update(graph_options)
opts.update(self.parse_graph_args(graph_options.get('graph_args', '')))
if 'host_name' in opts:
del opts['host_name']
opts['graph_category'] = (opts.get('graph_category', 'other') or 'other').lower()
if not isinstance(opts['graph_scale'], bool):
opts['graph_scale'] = opts.get('graph_scale', 'yes').lower() == "yes"
for k, v in opts.items():
setattr(graph, k, v)
bulk_update(all_graphs)
logger.info("Deleting graphs")
q_filter = None
for g, n, p, name in all_graphs.values_list('node__group', 'node__name', 'parent__id', 'name'):
f = Q(node__group=g, node__name=n, parent__id=p, name=name)
if q_filter is None:
q_filter = f
else:
q_filter = q_filter | f
if q_filter:
Graph.objects.exclude(q_filter).delete()
def _process_datarows(self, datafile):
logger.info("Processing datarows")
logger.info("... for root graphs")
parent_graphs = Graph.objects.filter(parent=None).select_related('node')
existing_parent_graph_datarows = DataRow.objects.filter(graph__in=parent_graphs).values_list('graph__node__group', 'graph__node__name', 'graph__name', 'name')
x = (
DataRow(
graph=[p for p in parent_graphs if p.node.group == group_name and p.node.name == node_name and p.name == graph_name][0],
name=datarow_name
)
for group_name, nodes in datafile.raw.items()
for node_name, graphs in nodes.items()
for graph_name, graph_data in graphs.items()
for datarow_name, datarow_data in graph_data.get('datarows', {}).items()
if (group_name, node_name, graph_name, datarow_name) not in existing_parent_graph_datarows
)
DataRow.objects.bulk_create(x)
logger.info("... for subgraphs")
subgraphs = Graph.objects.exclude(parent=None).select_related('node', 'parent')
existing_subgraph_datarows = DataRow.objects.filter(graph__in=subgraphs).values_list('graph__node__group', 'graph__node__name', 'graph__parent__name', 'graph__name', 'name')
DataRow.objects.bulk_create((
DataRow(
graph=[sg for sg in subgraphs if sg.node.group == group_name and sg.node.name == node_name and sg.parent.name == graph_name and sg.name == subgraph_name][0],
name=datarow_name,
)
for group_name, nodes in datafile.raw.items()
for node_name, graphs in nodes.items()
for graph_name, graph_data in graphs.items()
for subgraph_name, subgraph_data in graph_data.get('subgraphs', {}).items()
for datarow_name, datarow_data in subgraph_data.get('datarows', {}).items()
if (group_name, node_name, graph_name, subgraph_name, datarow_name) not in existing_subgraph_datarows
))
logger.info("Updating all datarows")
all_datarows = DataRow.objects.select_related('graph', 'graph__parent', 'graph__node')
for datarow in all_datarows:
try:
if datarow.graph.parent:
datarow_options = datafile.raw[datarow.graph.node.group][datarow.graph.node.name][datarow.graph.parent.name]['subgraphs'][datarow.graph.name]['datarows'][datarow.name]
else:
datarow_options = datafile.raw[datarow.graph.node.group][datarow.graph.node.name][datarow.graph.name]['datarows'][datarow.name]
except KeyError:
logger.info("Missing datarow: %s", datarow.name)
opts = self._get_model_attributes(DataRow, lambda f: f.name in ('graph', 'name'))
opts.update(datarow_options)
opts['rrdfile'] = self.get_rrdfilename(datarow.graph, datarow.name, datarow_options)
if 'graph' in opts:
opts['do_graph'] = opts['graph'].lower() == "yes"
del opts['graph']
#for k, v in opts.items():
# setattr(datarow, k, v)
#datarow.save()
DataRow.objects.filter(pk=datarow.pk).update(**opts)
#bulk_update(all_datarows)
#raise Exception()
logger.info("Removing datarows")
q_filter = None
for g, n, pid, gid, name in all_datarows.values_list('graph__node__group', 'graph__node__name', 'graph__parent__id', 'graph__id', 'name'):
f = Q(graph__node__group=g, graph__node__name=n, graph__parent__id=pid, graph__id=gid, name=name)
if q_filter is None:
q_filter = f
else:
q_filter = q_filter | f
if q_filter:
DataRow.objects.exclude(q_filter).delete()
def _get_model_attributes(self, clazz, exclude=None):
def _gen():
for field in clazz._meta.fields:
if field.name == 'id' or (exclude and exclude(field)):
continue
default_value = field.default
if default_value == NOT_PROVIDED:
if isinstance(field, (CharField, TextField)) and field.blank:
default_value = ''
else:
default_value = None
yield field.name, default_value
return dict(_gen())
def parse_graph_args(self, args_s):
if not (args_s or "").strip():
return {}
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-s', '--start')
parser.add_argument('-e', '--end')
parser.add_argument('-S', '--step')
parser.add_argument('-t', '--title')
parser.add_argument('-v', '--vertical-label')
parser.add_argument('-w', '--width')
parser.add_argument('-h', '--height')
parser.add_argument('-j', '--only-graph', action='store_true')
parser.add_argument('-D', '--full-size-mode', action='store_true')
parser.add_argument('-u', '--upper-limit')
parser.add_argument('-l', '--lower-limit')
parser.add_argument('-r', '--rigid', action='store_true')
parser.add_argument('-A', '--alt-autoscale', action='store_true')
parser.add_argument('-J', '--alt-autoscale-min')
parser.add_argument('-M', '--alt-autoscale-max')
parser.add_argument('-N', '--no-gridfit')
parser.add_argument('-x', '--x-grid')
parser.add_argument('--week-fmt')
parser.add_argument('-y', '--y-grid')
parser.add_argument('--left-axis-formatter')
parser.add_argument('--left-axis-format')
parser.add_argument('-Y', '--alt-y-grid', action='store_true')
parser.add_argument('-o', '--logarithmic', action='store_true')
parser.add_argument('-X', '--units-exponent')
parser.add_argument('-L', '--units-length')
parser.add_argument('--units')
parser.add_argument('--right-axis')
parser.add_argument('--right-axis-label')
parser.add_argument('--right-axis-formatter')
parser.add_argument('--right-axis-format')
parser.add_argument('-g', '--no-legend', action='store_true')
parser.add_argument('-F', '--force-rules-legend', action='store_true')
parser.add_argument('--legend-position')
parser.add_argument('--legend-direction')
parser.add_argument('-z', '--lazy', action='store_true')
parser.add_argument('-d', '--daemon')
parser.add_argument('-f', '--imginfo')
parser.add_argument('-c', '--color')
parser.add_argument('--grid-dash')
parser.add_argument('--border')
parser.add_argument('--dynamic-labels', action='store_true')
parser.add_argument('-m', '--zoom')
parser.add_argument('-n', '--font')
parser.add_argument('-R', '--font-render-mode')
parser.add_argument('-B', '--font-smoothing-threshold')
parser.add_argument('-P', '--pango-markup', action='store_true')
parser.add_argument('-G', '--graph-render-mode')
parser.add_argument('-E', '--slope-mode', action='store_true')
parser.add_argument('-a', '--imgformat')
parser.add_argument('-i', '--interlaced', action='store_true')
parser.add_argument('-T', '--tabwidth')
parser.add_argument('-b', '--base')
parser.add_argument('-W', '--watermark')
parser.add_argument('-Z', '--use-nan-for-all-missing-data', action='store_true')
try:
x = parser.parse_args(shlex.split(args_s))
supported_args = 'base', 'lower_limit', 'upper_limit'
return dict(("graph_args_%s" % a, getattr(x, a, None)) for a in supported_args if getattr(x, a, None))
except:
logger.exception("Could not parse args '%s'", args_s)
return {}
def get_rrdfilename(self, graph, datarow_name, datarow_options):
dr_type_name = datarow_options.get('type', 'GAUGE')
pattern = "{group}/{node}-{graph}-{datarow}-{datarow_type}.rrd"
if graph.parent:
pattern = "{group}/{node}-{graph}-{subgraph}-{datarow}-{datarow_type}.rrd"
return pattern.format(group=graph.node.group, node=graph.node.name,
graph=graph.parent.name if graph.parent else graph.name,
subgraph=graph.name if graph.parent else None,
datarow=datarow_name, datarow_type=dr_type_name.lower()[0])
Updater().run()

130
src/djunin/objects.py

@ -1,130 +0,0 @@
# -*- coding: utf-8 -*-
import itertools
import os
import re
from django.conf import settings
line_re = re.compile("^(?P<group_name>.+);(?P<node_name>[^:]+):(?P<graph>[^\.]+)\.(?P<key>[^\s]+?) (?P<value>.*)$")
class Node(object):
def __init__(self, group, name, graphs=None):
self.group = group
self.name = name
self.graphs = graphs or []
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
class Graph(object):
def __init__(self, name, options=None, data_rows=None, subgraphs=None):
self.name = name
self.options = options or {}
self.datarows = data_rows or {}
self.subgraphs = subgraphs or {}
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
class MuninDataFile(object):
def __init__(self):
self._raw = None
self._nodes = None
with open(os.path.join(settings.MUNIN_DATA_DIR, 'datafile'), 'r') as f:
self.parse(f.readlines()[1:])
def parse(self, lines):
self._nodes = []
d = {}
for line in lines:
m = line_re.match(line)
key = m.group('key')
key_chunks = key.split('.')
group_name, node_name, graph_name, value = m.group('group_name'), m.group('node_name'), m.group('graph'), m.group('value')
if group_name not in d:
d[group_name] = {}
if node_name not in d[group_name]:
d[group_name][node_name] = {}
if graph_name not in d[group_name][node_name]:
d[group_name][node_name][graph_name] = {
'options': {},
'datarows': {},
'subgraphs': {}
}
if len(key_chunks) == 1:
assert key.startswith('graph_') or key == 'host_name', "Key should start with 'graph_': %s" % key
d[group_name][node_name][graph_name]['options'][key] = value
elif len(key_chunks) == 2:
a, b = key_chunks
if b.startswith('graph_') and b != 'graph_data_size':
# this is a graph option for a subgraph
if a not in d[group_name][node_name][graph_name]['subgraphs']:
d[group_name][node_name][graph_name]['subgraphs'][a] = {
'options': {},
'datarows': {}
}
d[group_name][node_name][graph_name]['subgraphs'][a]['options'][b] = value
else:
# this this is an option of a datarow of a parent graph
if a not in d[group_name][node_name][graph_name]['datarows']:
d[group_name][node_name][graph_name]['datarows'][a] = {}
d[group_name][node_name][graph_name]['datarows'][a][b]= value
elif len(key_chunks) == 3:
subgraph, datarow, key = key_chunks
if subgraph not in d[group_name][node_name][graph_name]['subgraphs']:
d[group_name][node_name][graph_name]['subgraphs'] = {
subgraph: {
'options': {},
'datarows': {}
}
}
if datarow not in d[group_name][node_name][graph_name]['subgraphs'][subgraph]['datarows']:
d[group_name][node_name][graph_name]['subgraphs'][subgraph]['datarows'][datarow] = {}
d[group_name][node_name][graph_name]['subgraphs'][subgraph]['datarows'][datarow][key] = value
self._raw = d
for group_name, nodes in d.items():
for node_name, graphs in nodes.items():
node_graphs = []
for graph_name, graph_data in graphs.items():
subgraphs = {}
for subgraph_name, subgraph_data in graph_data.get('subgraphs', {}).items():
subgraphs[subgraph_name] = Graph(subgraph_name, subgraph_data['options'], subgraph_data['datarows'])
g = Graph(graph_name, graph_data['options'], graph_data['datarows'], subgraphs)
node_graphs.append(g)
n = Node(group_name, node_name, graphs=node_graphs)
self._nodes.append(n)
@property
def nodes(self):
return sorted(self._nodes, key=lambda x: (x.group, x.name))
@property
def raw(self):
return self._raw

284
src/djunin/updater.py

@ -0,0 +1,284 @@
# -*- coding: utf-8 -*-
import os
import argparse
import shlex
import itertools
import logging
import collections
from django.conf import settings
from django.db import transaction
from pyparsing import Word, alphanums, Suppress, Regex, White, ParseException, Optional, Group, Combine
from djunin.models import Node, Graph, DataRow
chars = alphanums + '-_'
host_service_chars = chars + '.'
# covers group;node:graph.
base_pattern = Word(host_service_chars).setResultsName('group') + Suppress(';') + \
Word(host_service_chars).setResultsName('host') + Suppress(':') + \
Word(chars).setResultsName('graph') + Suppress('.')
key_value = Word(chars).setResultsName('attribute') + Suppress(White(' ', exact=1)) + Regex('.*').setResultsName('value')
# Note: subgraph_attributes and root_graph_datarow_attributes are only distinguishable by the fact
# that root graph attributes start with 'graph_', except for graph_data_size which can be found
# in subgraph attributes
abc = ~Group('graph_data_size') + Group(Combine(Group('graph_' + Word(chars))))
graph_attribute_key_value = ~Group('graph_data_size') + Combine(Group('graph_' + Word(chars))).setResultsName('attribute') + \
Suppress(White(' ', exact=1)) + Regex('.*').setResultsName('value')
subgraph_pattern = Word(chars).setResultsName('subgraph') + Suppress('.')
datarow_pattern = Word(chars).setResultsName('datarow') + Suppress('.')
# group;node:graph.attribute value
root_graph_attributes = base_pattern + key_value
# group;node:graph.datarow.attribute value
root_graph_datarow_attributes = base_pattern + datarow_pattern + key_value
# group;node:graph.subgraph.attribute value
subgraph_attributes = base_pattern + subgraph_pattern + graph_attribute_key_value
# group;node:graph.subgraph.datarow.attribute value
subgraph_datarow_attributes = base_pattern + subgraph_pattern + datarow_pattern + key_value
patterns = [
subgraph_datarow_attributes,
subgraph_attributes,
root_graph_datarow_attributes,
root_graph_attributes,
]
Row = collections.namedtuple('DataRow', ('group', 'node', 'graph', 'subgraph', 'datarow', 'attribute', 'value'))
logger = logging.getLogger(__file__)
class Updater(object):
def __init__(self, datafile=None):
self.datafile = datafile or os.path.join(settings.MUNIN_DATA_DIR, 'datafile')
self.graphs = None
self.nodes = None
def run(self):
with open(self.datafile) as fp:
data = self.prepare(fp)
with transaction.atomic():
self.update(list(data))
def prepare(self, fp):
def _read_and_parse():
for i, line in enumerate((x.strip() for x in fp.readlines())):
if i == 0:
continue
m = None
for p in patterns:
try:
m = p.parseString(line, parseAll=True)
fields = m['group'], m['host'], m['graph'], m.get('subgraph', None), m.get('datarow', None), \
m['attribute'], m['value']
#logging.debug("%-30s%-30s%-30s%-30s%-30s%-30s%s", *fields)
yield Row(*fields)
break
except ParseException:
continue
if not m:
logger.error("No pattern matched line: %s", line)
for f in sorted(_read_and_parse()):
yield f
def update(self, data):
# Create nodes
logger.info("Creating nodes")
Node.objects.all().delete()
Node.objects.bulk_create((
Node(group=group, name=node) for group, node in set([(row.group, row.node) for row in data])
))
self.nodes = dict((((n.group, n.name), n) for n in Node.objects.all()))
# create root graphs
self.create_root_graphs(data)
self.graphs = dict((
((g.node.group, g.node.name, g.name), g) for g in Graph.objects.filter(parent=None).select_related('node')
))
# create subgraphs
logger.info("Creating sub graphs")
self.create_subgraphs(data)
self.graphs = dict(
self.graphs.items() +
[((g.node.group, g.node.name, g.parent.name, g.name), g) for g in Graph.objects.exclude(parent=None).select_related('node', 'parent')]
)
# create datarows
logger.info("Creating datarows")
self.create_datarows(data)
def create_root_graphs(self, data):
logger.info("Creating root graphs")
root_graph_filter = lambda row: row.subgraph is None and row.datarow is None
root_graph_grouped = itertools.groupby(filter(root_graph_filter, data),
lambda row: (row.group, row.node, row.graph))
def _build_root_graphs():
for group_key, items in root_graph_grouped:
group, node, graph_name = group_key
g = Graph(node=self.nodes[(group, node)], name=graph_name)
for _, _, _, _, _, key, value in items:
setattr(g, key, value)
if key == 'graph_args' and value:
for k, v in self.parse_graph_args(value).items():
setattr(g, k, v)
if not g.graph_category:
g.graph_category = 'other'
yield g
Graph.objects.bulk_create(_build_root_graphs())
def create_subgraphs(self, data):
# group.node.graph.attribute = value
# group.node.graph.datarow.attribute = value
# group.node.graph.subgraph.attribute = value
# group.node.graph.subgraph.datarow.attribute = value
subgraph_filter = lambda row: row.subgraph is not None and row.datarow is None
subgraph_grouped = itertools.groupby(filter(subgraph_filter, data), lambda row: (row.group, row.node, row.graph, row.subgraph))
def _build_subgraphs():
for group_key, items in subgraph_grouped:
group, node, root_graph_name, graph_name = group_key
g = Graph(node=self.nodes[(group, node)],
parent=self.graphs[(group, node, root_graph_name)],
name=graph_name)
g.graph_category = g.parent.graph_category or 'other'
for _, _, _, _, _, key, value in items:
setattr(g, key, value)
if key == 'graph_args' and value:
for k, v in self.parse_graph_args(value).items():
setattr(g, k, v)
yield g
Graph.objects.bulk_create(_build_subgraphs())
def create_datarows(self, data):
datarow_filter = lambda row: row.datarow is not None
datarow_grouped = itertools.groupby(filter(datarow_filter, data), lambda row: (row.group, row.node, row.graph, row.subgraph, row.datarow))
def _build_datarows():
for group_key, items in datarow_grouped:
group, node, root_graph_name, graph_name, datarow_name = group_key
graph_key = (group, node, root_graph_name, graph_name) if graph_name else (
group, node, root_graph_name)
graph = self.graphs[graph_key]
dropts = dict(
(key, value) for _, _, _, _, _, key, value in items
)
if 'graph' in dropts:
dropts['do_graph'] = dropts['graph']
del dropts['graph']
dr = DataRow(graph=graph, name=datarow_name,
rrdfile=self.get_rrdfilename(graph, datarow_name, dropts),
**dropts)
yield dr
DataRow.objects.bulk_create(_build_datarows())
def parse_graph_args(self, args_s):
if not (args_s or "").strip():
return {}
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-s', '--start')
parser.add_argument('-e', '--end')
parser.add_argument('-S', '--step')
parser.add_argument('-t', '--title')
parser.add_argument('-v', '--vertical-label')
parser.add_argument('-w', '--width')
parser.add_argument('-h', '--height')
parser.add_argument('-j', '--only-graph', action='store_true')
parser.add_argument('-D', '--full-size-mode', action='store_true')
parser.add_argument('-u', '--upper-limit')
parser.add_argument('-l', '--lower-limit')
parser.add_argument('-r', '--rigid', action='store_true')
parser.add_argument('-A', '--alt-autoscale', action='store_true')
parser.add_argument('-J', '--alt-autoscale-min')
parser.add_argument('-M', '--alt-autoscale-max')
parser.add_argument('-N', '--no-gridfit')
parser.add_argument('-x', '--x-grid')
parser.add_argument('--week-fmt')
parser.add_argument('-y', '--y-grid')
parser.add_argument('--left-axis-formatter')
parser.add_argument('--left-axis-format')
parser.add_argument('-Y', '--alt-y-grid', action='store_true')
parser.add_argument('-o', '--logarithmic', action='store_true')
parser.add_argument('-X', '--units-exponent')
parser.add_argument('-L', '--units-length')
parser.add_argument('--units')
parser.add_argument('--right-axis')
parser.add_argument('--right-axis-label')
parser.add_argument('--right-axis-formatter')
parser.add_argument('--right-axis-format')
parser.add_argument('-g', '--no-legend', action='store_true')
parser.add_argument('-F', '--force-rules-legend', action='store_true')
parser.add_argument('--legend-position')
parser.add_argument('--legend-direction')
parser.add_argument('-z', '--lazy', action='store_true')
parser.add_argument('-d', '--daemon')
parser.add_argument('-f', '--imginfo')
parser.add_argument('-c', '--color')
parser.add_argument('--grid-dash')
parser.add_argument('--border')
parser.add_argument('--dynamic-labels', action='store_true')
parser.add_argument('-m', '--zoom')
parser.add_argument('-n', '--font')
parser.add_argument('-R', '--font-render-mode')
parser.add_argument('-B', '--font-smoothing-threshold')
parser.add_argument('-P', '--pango-markup', action='store_true')
parser.add_argument('-G', '--graph-render-mode')
parser.add_argument('-E', '--slope-mode', action='store_true')
parser.add_argument('-a', '--imgformat')
parser.add_argument('-i', '--interlaced', action='store_true')
parser.add_argument('-T', '--tabwidth')
parser.add_argument('-b', '--base')
parser.add_argument('-W', '--watermark')
parser.add_argument('-Z', '--use-nan-for-all-missing-data', action='store_true')
try:
x = parser.parse_args(shlex.split(args_s))
supported_args = 'base', 'lower_limit', 'upper_limit'
return dict(("graph_args_%s" % a, getattr(x, a, None)) for a in supported_args if getattr(x, a, None))
except:
logger.exception("Could not parse args '%s'", args_s)
return {}
def get_rrdfilename(self, graph, datarow_name, datarow_options):
dr_type_name = datarow_options.get('type', 'GAUGE')
pattern = "{group}/{node}-{graph}-{datarow}-{datarow_type}.rrd"
if graph.parent:
pattern = "{group}/{node}-{graph}-{subgraph}-{datarow}-{datarow_type}.rrd"
return pattern.format(group=graph.node.group, node=graph.node.name,
graph=graph.parent.name if graph.parent else graph.name,
subgraph=graph.name if graph.parent else None,
datarow=datarow_name, datarow_type=dr_type_name.lower()[0])

10
src/djunin/views/base.py

@ -1,20 +1,10 @@
# -*- coding: utf-8 -*-
from djunin.models import Node
from djunin.objects import MuninDataFile
class BaseViewMixin(object):
page_title = None
sidebar_item = None
def __init__(self):
self._datafile = None
@property
def data_file(self):
if self._datafile is None:
self._datafile = MuninDataFile()
return self._datafile
@property
def all_node_groups(self):
return Node.objects.values_list('group', flat=True).order_by('group').distinct()