1 # -*- coding: utf-8 -*-
2 # Description: haproxy netdata python.d module
5 from base import UrlService, SocketService
7 # default module values (can be overridden per job in `config`)
12 # charts order (can be overridden if you want less charts, or different order)
13 ORDER = ['fbin', 'fbout', 'fscur', 'fqcur', 'bbin', 'bbout', 'bscur', 'bqcur', 'health_sdown', 'health_bdown']
16 'options': [None, "Kilobytes In", "KB/s", 'frontend', 'haproxy_f.bin', 'line'],
20 'options': [None, "Kilobytes Out", "KB/s", 'frontend', 'haproxy_f.bout', 'line'],
24 'options': [None, "Sessions Active", "sessions", 'frontend', 'haproxy_f.scur', 'line'],
28 'options': [None, "Session In Queue", "sessions", 'frontend', 'haproxy_f.qcur', 'line'],
32 'options': [None, "Kilobytes In", "KB/s", 'backend', 'haproxy_b.bin', 'line'],
36 'options': [None, "Kilobytes Out", "KB/s", 'backend', 'haproxy_b.bout', 'line'],
40 'options': [None, "Sessions Active", "sessions", 'backend', 'haproxy_b.scur', 'line'],
44 'options': [None, "Sessions In Queue", "sessions", 'backend', 'haproxy_b.qcur', 'line'],
48 'options': [None, "Backend Servers In DOWN State", "failed servers", 'health',
49 'haproxy_hs.down', 'line'],
53 'options': [None, "Is backend alive? 1 = DOWN", "failed backend", 'health', 'haproxy_hb.down', 'line'],
59 class Service(UrlService, SocketService):
60 def __init__(self, configuration=None, name=None):
61 if 'socket' in configuration:
62 SocketService.__init__(self, configuration=configuration, name=name)
63 self.poll_method = SocketService
64 self.request = 'show stat\n'
66 UrlService.__init__(self, configuration=configuration, name=name)
67 self.poll_method = UrlService
69 self.definitions = CHARTS
70 self.order_front = [_ for _ in ORDER if _.startswith('f')]
71 self.order_back = [_ for _ in ORDER if _.startswith('b')]
76 if self.poll_method.check(self):
77 self.info('Plugin was started successfully. We are using %s.' % self.poll_method.__name__)
82 def create_charts(self, front_ends, back_ends):
83 for _ in enumerate(front_ends):
85 self.definitions['fbin']['lines'].append(['_'.join(['fbin', front_ends[idx]['# pxname']]),
86 front_ends[idx]['# pxname'], 'incremental', 1, 1024])
87 self.definitions['fbout']['lines'].append(['_'.join(['fbout', front_ends[idx]['# pxname']]),
88 front_ends[idx]['# pxname'], 'incremental', 1, 1024])
89 self.definitions['fscur']['lines'].append(['_'.join(['fscur', front_ends[idx]['# pxname']]),
90 front_ends[idx]['# pxname'], 'absolute'])
91 self.definitions['fqcur']['lines'].append(['_'.join(['fqcur', front_ends[idx]['# pxname']]),
92 front_ends[idx]['# pxname'], 'absolute'])
94 for _ in enumerate(back_ends):
96 self.definitions['bbin']['lines'].append(['_'.join(['bbin', back_ends[idx]['# pxname']]),
97 back_ends[idx]['# pxname'], 'incremental', 1, 1024])
98 self.definitions['bbout']['lines'].append(['_'.join(['bbout', back_ends[idx]['# pxname']]),
99 back_ends[idx]['# pxname'], 'incremental', 1, 1024])
100 self.definitions['bscur']['lines'].append(['_'.join(['bscur', back_ends[idx]['# pxname']]),
101 back_ends[idx]['# pxname'], 'absolute'])
102 self.definitions['bqcur']['lines'].append(['_'.join(['bqcur', back_ends[idx]['# pxname']]),
103 back_ends[idx]['# pxname'], 'absolute'])
104 self.definitions['health_sdown']['lines'].append(['_'.join(['hsdown', back_ends[idx]['# pxname']]),
105 back_ends[idx]['# pxname'], 'absolute'])
106 self.definitions['health_bdown']['lines'].append(['_'.join(['hbdown', back_ends[idx]['# pxname']]),
107 back_ends[idx]['# pxname'], 'absolute'])
111 Format data received from http request
114 raw_data = self.poll_method._get_raw_data(self)
119 raw_data = raw_data.splitlines()
121 all_instances = [dict(zip(raw_data[0].split(','),
122 raw_data[_].split(','))) for _ in range(1, len(raw_data))]
124 back_ends = list(filter(is_backend, all_instances))
125 front_ends = list(filter(is_frontend, all_instances))
126 servers = list(filter(is_server, all_instances))
129 self.create_charts(front_ends, back_ends)
134 for frontend in front_ends:
135 for idx in self.order_front:
136 to_netdata.update({'_'.join([idx, frontend['# pxname']]):
137 int(frontend[idx[1:]]) if frontend.get(idx[1:]) else 0})
139 for backend in back_ends:
140 for idx in self.order_back:
141 to_netdata.update({'_'.join([idx, backend['# pxname']]):
142 int(backend[idx[1:]]) if backend.get(idx[1:]) else 0})
144 for _ in enumerate(back_ends):
146 to_netdata.update({'_'.join(['hsdown', back_ends[idx]['# pxname']]):
147 len([server for server in servers if is_server_down(server, back_ends, idx)])})
148 to_netdata.update({'_'.join(['hbdown', back_ends[idx]['# pxname']]):
149 1 if is_backend_down(back_ends, idx) else 0})
154 def _check_raw_data(data):
156 Check if all data has been gathered from socket
160 return not bool(data)
163 def is_backend(backend):
164 return backend.get('svname') == 'BACKEND' and backend.get('# pxname') != 'stats'
167 def is_frontend(frontend):
168 return frontend.get('svname') == 'FRONTEND' and frontend.get('# pxname') != 'stats'
171 def is_server(server):
172 return not server.get('svname', '').startswith(('FRONTEND', 'BACKEND'))
175 def is_server_down(server, back_ends, idx):
176 return server.get('# pxname') == back_ends[idx].get('# pxname') and server.get('status') == 'DOWN'
179 def is_backend_down(back_ends, idx):
180 return back_ends[idx].get('status') == 'DOWN'