1 # -*- coding: utf-8 -*-
2 # Description: varnish netdata python.d module
5 from base import SimpleService
7 from os import access as is_executable, X_OK
8 from subprocess import Popen, PIPE
11 # default module values (can be overridden per job in `config`)
16 ORDER = ['session', 'hit_rate', 'chit_rate', 'expunge', 'threads', 'backend_health', 'memory_usage', 'bad', 'uptime']
18 CHARTS = {'backend_health':
19 {'lines': [['backend_conn', 'conn', 'incremental', 1, 1],
20 ['backend_unhealthy', 'unhealthy', 'incremental', 1, 1],
21 ['backend_busy', 'busy', 'incremental', 1, 1],
22 ['backend_fail', 'fail', 'incremental', 1, 1],
23 ['backend_reuse', 'reuse', 'incremental', 1, 1],
24 ['backend_recycle', 'resycle', 'incremental', 1, 1],
25 ['backend_toolate', 'toolate', 'incremental', 1, 1],
26 ['backend_retry', 'retry', 'incremental', 1, 1],
27 ['backend_req', 'req', 'incremental', 1, 1]],
28 'options': [None, 'Backend health', 'connections', 'Backend health', 'varnish.backend_traf', 'line']},
30 {'lines': [['sess_drop_b', None, 'incremental', 1, 1],
31 ['backend_unhealthy_b', None, 'incremental', 1, 1],
32 ['fetch_failed', None, 'incremental', 1, 1],
33 ['backend_busy_b', None, 'incremental', 1, 1],
34 ['threads_failed_b', None, 'incremental', 1, 1],
35 ['threads_limited_b', None, 'incremental', 1, 1],
36 ['threads_destroyed_b', None, 'incremental', 1, 1],
37 ['thread_queue_len_b', 'queue_len', 'absolute', 1, 1],
38 ['losthdr_b', None, 'incremental', 1, 1],
39 ['esi_errors_b', None, 'incremental', 1, 1],
40 ['esi_warnings_b', None, 'incremental', 1, 1],
41 ['sess_fail_b', None, 'incremental', 1, 1],
42 ['sc_pipe_overflow_b', None, 'incremental', 1, 1],
43 ['sess_pipe_overflow_b', None, 'incremental', 1, 1]],
44 'options': [None, 'Misbehavior', 'problems', 'Problems summary', 'varnish.bad', 'line']},
46 {'lines': [['n_expired', 'expired', 'incremental', 1, 1],
47 ['n_lru_nuked', 'lru_nuked', 'incremental', 1, 1]],
48 'options': [None, 'Object expunging', 'objects', 'Cache performance', 'varnish.expunge', 'line']},
50 {'lines': [['cache_hit_perc', 'hit', 'absolute', 1, 100],
51 ['cache_miss_perc', 'miss', 'absolute', 1, 100],
52 ['cache_hitpass_perc', 'hitpass', 'absolute', 1, 100]],
53 'options': [None, 'All history hit rate ratio','percent', 'Cache performance', 'varnish.hit_rate', 'stacked']},
55 {'lines': [['cache_hit_cperc', 'hit', 'absolute', 1, 100],
56 ['cache_miss_cperc', 'miss', 'absolute', 1, 100],
57 ['cache_hitpass_cperc', 'hitpass', 'absolute', 1, 100]],
58 'options': [None, 'Current poll hit rate ratio','percent', 'Cache performance', 'varnish.chit_rate', 'stacked']},
60 {'lines': [['s0.g_space', 'available', 'absolute', 1, 1048576],
61 ['s0.g_bytes', 'allocated', 'absolute', -1, 1048576]],
62 'options': [None, 'Memory usage', 'megabytes', 'Memory usage', 'varnish.memory_usage', 'stacked']},
64 {'lines': [['sess_conn', 'sess_conn', 'incremental', 1, 1],
65 ['client_req', 'client_requests', 'incremental', 1, 1],
66 ['client_conn', 'client_conn', 'incremental', 1, 1],
67 ['client_drop', 'client_drop', 'incremental', 1, 1],
68 ['sess_dropped', 'sess_dropped', 'incremental', 1, 1]],
69 'options': [None, 'Sessions', 'units', 'Client metrics', 'varnish.session', 'line']},
71 {'lines': [['threads', None, 'absolute', 1, 1],
72 ['threads_created', 'created', 'incremental', 1, 1],
73 ['threads_failed', 'failed', 'incremental', 1, 1],
74 ['threads_limited', 'limited', 'incremental', 1, 1],
75 ['thread_queue_len', 'queue_len', 'incremental', 1, 1],
76 ['sess_queued', 'sess_queued', 'incremental', 1, 1]],
77 'options': [None, 'Thread status', 'threads', 'Thread-related metrics', 'varnish.threads', 'line']},
79 {'lines': [['uptime', None, 'absolute', 1, 1]],
80 'options': [None, 'Varnish uptime', 'seconds', 'Uptime', 'varnish.uptime', 'line']}
83 DIRECTORIES = ['/bin/', '/usr/bin/', '/sbin/', '/usr/sbin/']
86 class Service(SimpleService):
87 def __init__(self, configuration=None, name=None):
88 SimpleService.__init__(self, configuration=configuration, name=name)
90 self.varnish = [''.join([directory, 'varnishstat']) for directory in DIRECTORIES
91 if is_executable(''.join([directory, 'varnishstat']), X_OK)][0]
94 self.rgx_all = compile(r'([A-Z]+\.)?([\d\w_.]+)\s+(\d+)')
96 # VBE.boot.super_backend.pipe_hdrbyte (new)
98 # VBE.default2(127.0.0.2,,81).bereq_bodybytes (old)
99 # Regex result: [('super_backend', 'beresp_hdrbytes', '0'), ('super_backend', 'beresp_bodybytes', '0')]
100 self.rgx_bck = (compile(r'VBE.([\d\w_.]+)\(.*?\).(beresp[\w_]+)\s+(\d+)'),
101 compile(r'VBE\.[\d\w-]+\.([\w\d_]+).(beresp[\w_]+)\s+(\d+)'))
102 self.cache_prev = list()
105 # Cant start without 'varnishstat' command
107 self.error('\'varnishstat\' command was not found in %s or not executable by netdata' % DIRECTORIES)
110 # If command is present and we can execute it we need to make sure..
111 # 1. STDOUT is not empty
112 reply = self._get_raw_data()
114 self.error('No output from \'varnishstat\' (not enough privileges?)')
117 # 2. Output is parsable (list is not empty after regex findall)
118 is_parsable = self.rgx_all.findall(reply)
120 self.error('Cant parse output...')
123 # We need to find the right regex for backend parse
124 self.backend_list = self.rgx_bck[0].findall(reply)[::2]
125 if self.backend_list:
126 self.rgx_bck = self.rgx_bck[0]
128 self.backend_list = self.rgx_bck[1].findall(reply)[::2]
129 self.rgx_bck = self.rgx_bck[1]
131 # We are about to start!
134 self.info('Plugin was started successfully')
137 def _get_raw_data(self):
139 reply = Popen([self.varnish, '-1'], stdout=PIPE, stderr=PIPE, shell=False)
143 raw_data = reply.communicate()[0]
152 Format data received from shell command
155 raw_data = self._get_raw_data()
156 data_all = self.rgx_all.findall(raw_data)
157 data_backend = self.rgx_bck.findall(raw_data)
162 # 1. ALL data from 'varnishstat -1'. t - type(MAIN, MEMPOOL etc)
163 to_netdata = {k: int(v) for t, k, v in data_all}
165 # 2. ADD backend statistics
166 to_netdata.update({'_'.join([n, k]): int(v) for n, k, v in data_backend})
168 # 3. ADD additional keys to dict
169 # 3.1 Cache hit/miss/hitpass OVERALL in percent
170 cache_summary = sum([to_netdata.get('cache_hit', 0), to_netdata.get('cache_miss', 0),
171 to_netdata.get('cache_hitpass', 0)])
172 to_netdata['cache_hit_perc'] = find_percent(to_netdata.get('cache_hit', 0), cache_summary, 10000)
173 to_netdata['cache_miss_perc'] = find_percent(to_netdata.get('cache_miss', 0), cache_summary, 10000)
174 to_netdata['cache_hitpass_perc'] = find_percent(to_netdata.get('cache_hitpass', 0), cache_summary, 10000)
176 # 3.2 Cache hit/miss/hitpass CURRENT in percent
178 cache_summary = sum([to_netdata.get('cache_hit', 0), to_netdata.get('cache_miss', 0),
179 to_netdata.get('cache_hitpass', 0)]) - sum(self.cache_prev)
180 to_netdata['cache_hit_cperc'] = find_percent(to_netdata.get('cache_hit', 0) - self.cache_prev[0], cache_summary, 10000)
181 to_netdata['cache_miss_cperc'] = find_percent(to_netdata.get('cache_miss', 0) - self.cache_prev[1], cache_summary, 10000)
182 to_netdata['cache_hitpass_cperc'] = find_percent(to_netdata.get('cache_hitpass', 0) - self.cache_prev[2], cache_summary, 10000)
184 to_netdata['cache_hit_cperc'] = 0
185 to_netdata['cache_miss_cperc'] = 0
186 to_netdata['cache_hitpass_cperc'] = 0
188 self.cache_prev = [to_netdata.get('cache_hit', 0), to_netdata.get('cache_miss', 0), to_netdata.get('cache_hitpass', 0)]
190 # 3.3 Problems summary chart
191 for elem in ['backend_busy', 'backend_unhealthy', 'esi_errors', 'esi_warnings', 'losthdr', 'sess_drop', 'sc_pipe_overflow',
192 'sess_fail', 'sess_pipe_overflow', 'threads_destroyed', 'threads_failed', 'threads_limited', 'thread_queue_len']:
193 if to_netdata.get(elem) is not None:
194 to_netdata[''.join([elem, '_b'])] = to_netdata.get(elem)
199 def create_charts(self):
200 # If 'all_charts' is true...ALL charts are displayed. If no only default + 'extra_charts'
201 #if self.configuration.get('all_charts'):
202 # self.order = EXTRA_ORDER
205 # extra_charts = list(filter(lambda chart: chart in EXTRA_ORDER, self.extra_charts.split()))
206 # except (AttributeError, NameError, ValueError):
207 # self.error('Extra charts disabled.')
210 self.order = ORDER[:]
211 #self.order.extend(extra_charts)
213 # Create static charts
214 #self.definitions = {chart: values for chart, values in CHARTS.items() if chart in self.order}
215 self.definitions = CHARTS
217 # Create dynamic backend charts
218 if self.backend_list:
219 for backend in self.backend_list:
220 self.order.insert(0, ''.join([backend[0], '_resp_stats']))
221 self.definitions.update({''.join([backend[0], '_resp_stats']): {
223 '%s response statistics' % backend[0].capitalize(),
228 'lines': [[''.join([backend[0], '_beresp_hdrbytes']),
229 'header', 'incremental', 8, 1000],
230 [''.join([backend[0], '_beresp_bodybytes']),
231 'body', 'incremental', -8, 1000]]}})
234 def find_percent(value1, value2, multiply):
235 # If value2 is 0 return 0
239 return round(float(value1) / float(value2) * multiply)