# exim: yes
# hddtemp: yes
# ipfs: yes
+# isc_dhcpd: yes
# memcached: yes
# mysql: yes
# nginx: yes
+++ /dev/null
-# netdata python.d.plugin configuration for isc dhcpd leases
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# retries sets the number of retries to be made in case of failures.
-# If unset, the default for python.d.plugin is used.
-# Attempts to restore the service are made once every update_every
-# and only if the module has collected values in the past.
-# retries: 5
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# retries: 5 # the JOB's number of restoration attempts
-#
-# Additionally to the above, nginx_log also supports the following:
-#
-# leases_path: 'PATH' # the path to openvpn status log file
-# pools: 'dhcpd pools list' # Pools in CIDR format
-#-------------------------------------------------------------------------------------------------------------------
-# IMPORTANT notes
-# 1. Make sure leases file is readable by netdata
-# 2. Current implementation will work only with 'default' db-time-format (weekday year/month/day hour:minute:second).
-# Since its default it will work in most cases. But be aware.
-# 3. Pools MUST BE in CIDR format
-#
-#--------------------------------------------------------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-#
-#-------------------------------------------------------------------------------------------------------------------
-#
-#leases:
-# leases_path: '/var/lib/dhcp/dhcpd.leases'
-# pools: '192.168.3.0/24 192.168.4.0/24 192.168.5.0/24'
--- /dev/null
+# netdata python.d.plugin configuration for isc dhcpd leases
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, nginx_log also supports the following:
+#
+# leases_path: 'PATH' # the path to openvpn status log file
+# pools: 'dhcpd pools list' # Pools in CIDR format
+#-------------------------------------------------------------------------------------------------------------------
+# IMPORTANT notes
+# 1. Make sure leases file is readable by netdata
+# 2. Current implementation will work only with 'default' db-time-format (weekday year/month/day hour:minute:second).
+# Since its default it will work in most cases. But be aware.
+# 3. Pools MUST BE in CIDR format
+#
+#--------------------------------------------------------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+#-------------------------------------------------------------------------------------------------------------------
+#
+#leases:
+# leases_path: '/var/lib/dhcp/dhcpd.leases'
+# pools: '192.168.3.0/24 192.168.4.0/24 192.168.5.0/24'
+++ /dev/null
-# -*- coding: utf-8 -*-
-# Description: isc dhcpd lease netdata python.d module
-# Author: l2isbad
-
-from base import SimpleService
-from re import compile, search
-from time import mktime, strptime, gmtime
-try:
- from ipaddress import IPv4Address as ipaddress
- from ipaddress import ip_network
- have_ipaddress = True
-except ImportError:
- have_ipaddress = False
-
-priority = 60000
-retries = 60
-update_every = 60
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.leases_path = self.configuration.get('leases_path', '/var/lib/dhcp/dhcpd.leases')
- self.pools = self.configuration.get('pools')
-
- # Will work only with 'default' db-time-format (weekday year/month/day hour:minute:second)
- # TODO: update the regex to parse correctly 'local' db-time-format
- # (epoch <seconds-since-epoch>; # <day-name> <month-name> <day-number> <hours>:<minutes>:<seconds> <year>)
- # Also only ipv4 supported
- self.regex = compile(r'(\d+(?:\.\d+){3}).*?((?<=ends )[0-9].*?(?=;))')
-
- def check(self):
- if not self._get_raw_data():
- self.error('Make sure leases_path is correct and dhcpd.leases is readable by netdata')
- return False
- elif not have_ipaddress:
- self.error('No ipaddress module. Please install (py2-ipaddress in case of python2)')
- return False
- else:
- try:
- self.pools = self.pools.split()
- if not [ip_network(pool) for pool in self.pools]:
- self.error('Pools list is empty')
- return False
- except (ValueError, IndexError, AttributeError, SyntaxError):
- self.error('Pools configurations is incorrect')
- return False
-
- # Creating dynamic charts
- self.order = ['utilization']
- self.definitions = {'utilization': {'options': [None, 'Pools utilization', 'used %', 'Utulization', 'isc_dhcpd.util', 'line'], 'lines': []} }
- for pool in self.pools:
- self.definitions['utilization']['lines'].append([''.join(['ut_', pool]), pool, 'absolute'])
- self.order.append(''.join(['leases_', pool]))
- self.definitions[''.join(['leases_', pool])] = \
- {'options': [None, 'Active leases', 'leases', 'Leases', 'isc_dhcpd.lease', 'area'],
- 'lines': [[''.join(['le_', pool]), pool, 'absolute']]}
-
- self.info('Plugin was started succesfully')
- return True
-
- def _get_raw_data(self):
- """
- Open log file
- :return: str
- """
- try:
- with open(self.leases_path, 'rt') as leases:
- result = leases.read()
- except Exception:
- return None
- else:
- return result
-
- def _get_data(self):
- """
- Parse dhcpd.leases file.
- """
- raw_leases = self._get_raw_data()
- all_leases = dict(self.regex.findall(' '.join(raw_leases.split())))
-
- if not all_leases:
- self.error('Cant parse leases file correctly')
- return None
-
- # Result: [active binding, active binding....]. (Expire time (ends date;) - current time > 0)
- active_leases = [k for k, v in all_leases.items() if is_bind_active(all_leases[k])]
-
- # Result: {pool: number of active bindings in pool, ...}
- pools_count = {pool: len([lease for lease in active_leases if is_address_in(lease, pool)]) for pool in self.pools}
-
- # Result: {pool: number of host ip addresses in pool, }
- pools_max = {pool: (2 ** (32 - int(pool.split('/')[1])) - 2) for pool in self.pools}
-
- # Result: {pool: % utilization, ....} (percent)
- pools_util = {pool:int(round(float(pools_count[pool]) / pools_max[pool] * 100, 0)) for pool in self.pools}
-
- # Bulding dicts to send to netdata
- final_count = {''.join(['le_', k]): v for k, v in pools_count.items()}
- final_util = {''.join(['ut_', k]): v for k, v in pools_util.items()}
-
- final_count.update(final_util)
-
- return final_count
-
-def is_bind_active(binding):
- return mktime(strptime(binding, '%w %Y/%m/%d %H:%M:%S')) - mktime(gmtime()) > 0
-
-def is_address_in(address, pool):
- return ipaddress(address) in ip_network(pool)
--- /dev/null
+# -*- coding: utf-8 -*-
+# Description: isc dhcpd lease netdata python.d module
+# Author: l2isbad
+
+from base import SimpleService
+from re import compile, search
+from time import mktime, strptime, gmtime
+try:
+ from ipaddress import IPv4Address as ipaddress
+ from ipaddress import ip_network
+ have_ipaddress = True
+except ImportError:
+ have_ipaddress = False
+
+priority = 60000
+retries = 60
+update_every = 60
+
+class Service(SimpleService):
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.leases_path = self.configuration.get('leases_path', '/var/lib/dhcp/dhcpd.leases')
+ self.pools = self.configuration.get('pools')
+
+ # Will work only with 'default' db-time-format (weekday year/month/day hour:minute:second)
+ # TODO: update the regex to parse correctly 'local' db-time-format
+ # (epoch <seconds-since-epoch>; # <day-name> <month-name> <day-number> <hours>:<minutes>:<seconds> <year>)
+ # Also only ipv4 supported
+ self.regex = compile(r'(\d+(?:\.\d+){3}).*?((?<=ends )[0-9].*?(?=;))')
+
+ def check(self):
+ if not self._get_raw_data():
+ self.error('Make sure leases_path is correct and dhcpd.leases is readable by netdata')
+ return False
+ elif not have_ipaddress:
+ self.error('No ipaddress module. Please install (py2-ipaddress in case of python2)')
+ return False
+ else:
+ try:
+ self.pools = self.pools.split()
+ if not [ip_network(pool) for pool in self.pools]:
+ self.error('Pools list is empty')
+ return False
+ except (ValueError, IndexError, AttributeError, SyntaxError):
+ self.error('Pools configurations is incorrect')
+ return False
+
+ # Creating dynamic charts
+ self.order = ['utilization']
+ self.definitions = {'utilization': {'options': [None, 'Pools utilization', 'used %', 'Utulization', 'isc_dhcpd.util', 'line'], 'lines': []} }
+ for pool in self.pools:
+ self.definitions['utilization']['lines'].append([''.join(['ut_', pool]), pool, 'absolute'])
+ self.order.append(''.join(['leases_', pool]))
+ self.definitions[''.join(['leases_', pool])] = \
+ {'options': [None, 'Active leases', 'leases', 'Leases', 'isc_dhcpd.lease', 'area'],
+ 'lines': [[''.join(['le_', pool]), pool, 'absolute']]}
+
+ self.info('Plugin was started succesfully')
+ return True
+
+ def _get_raw_data(self):
+ """
+ Open log file
+ :return: str
+ """
+ try:
+ with open(self.leases_path, 'rt') as leases:
+ result = leases.read()
+ except Exception:
+ return None
+ else:
+ return result
+
+ def _get_data(self):
+ """
+ Parse dhcpd.leases file.
+ """
+ raw_leases = self._get_raw_data()
+ all_leases = dict(self.regex.findall(' '.join(raw_leases.split())))
+
+ if not all_leases:
+ self.error('Cant parse leases file correctly')
+ return None
+
+ # Result: [active binding, active binding....]. (Expire time (ends date;) - current time > 0)
+ active_leases = [k for k, v in all_leases.items() if is_bind_active(all_leases[k])]
+
+ # Result: {pool: number of active bindings in pool, ...}
+ pools_count = {pool: len([lease for lease in active_leases if is_address_in(lease, pool)]) for pool in self.pools}
+
+ # Result: {pool: number of host ip addresses in pool, }
+ pools_max = {pool: (2 ** (32 - int(pool.split('/')[1])) - 2) for pool in self.pools}
+
+ # Result: {pool: % utilization, ....} (percent)
+ pools_util = {pool:int(round(float(pools_count[pool]) / pools_max[pool] * 100, 0)) for pool in self.pools}
+
+ # Bulding dicts to send to netdata
+ final_count = {''.join(['le_', k]): v for k, v in pools_count.items()}
+ final_util = {''.join(['ut_', k]): v for k, v in pools_util.items()}
+
+ final_count.update(final_util)
+
+ return final_count
+
+def is_bind_active(binding):
+ return mktime(strptime(binding, '%w %Y/%m/%d %H:%M:%S')) - mktime(gmtime()) > 0
+
+def is_address_in(address, pool):
+ return ipaddress(address) in ip_network(pool)