]> arthur.barton.de Git - netdata.git/blob - python.d/postgres.chart.py
Add Facetoe and Dangtranhoang as authors.
[netdata.git] / python.d / postgres.chart.py
1 # -*- coding: utf-8 -*-
2 # Description: example netdata python.d module
3 # Authors: facetoe, dangtranhoang
4
5 from copy import deepcopy
6
7 import psycopg2
8 from psycopg2 import extensions
9 from psycopg2.extras import DictCursor
10
11 from base import SimpleService
12
13 # default module values
14 update_every = 1
15 priority = 90000
16 retries = 60
17
18 # Default Config options.
19 # {
20 #    'database': None,
21 #    'user': 'postgres',
22 #    'password': None,
23 #    'host': 'localhost',
24 #    'port': 5432
25 # }
26
27 ARCHIVE = """
28 SELECT
29     CAST(COUNT(*) AS INT) AS file_count,
30     CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)), 0) AS INT) AS ready_count,
31     CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)), 0) AS INT) AS done_count
32 FROM
33     pg_catalog.pg_ls_dir('pg_xlog/archive_status') AS archive_files (archive_file);
34 """
35
36 BACKENDS = """
37 SELECT
38     count(*) - (SELECT count(*) FROM pg_stat_activity WHERE state = 'idle') AS backends_active,
39     (SELECT count(*) FROM pg_stat_activity WHERE state = 'idle' ) AS backends_idle
40 FROM
41     pg_stat_activity;
42 """
43
44 TABLE_SIZE_ON_DISK = """
45 SELECT ((sum(relpages)* 8) * 1024) AS size_relations FROM pg_class WHERE relkind IN ('r', 't');
46 """
47
48 TABLE_COUNT = """
49 SELECT count(1) as relations FROM pg_class WHERE relkind IN ('r', 't');
50 """
51
52 INDEX_SIZE_ON_DISK = """
53 SELECT ((sum(relpages)* 8) * 1024) AS size_indexes FROM pg_class WHERE relkind = 'i';
54 """
55
56 INDEX_COUNT = """
57 SELECT count(1) as indexes FROM pg_class WHERE relkind = 'i';
58 """
59
60 DATABASE = """
61 SELECT
62   datname AS database_name,
63   sum(xact_commit) AS xact_commit,
64   sum(xact_rollback) AS xact_rollback,
65   sum(blks_read) AS blks_read,
66   sum(blks_hit) AS blks_hit,
67   sum(tup_returned) AS tup_returned,
68   sum(tup_fetched) AS tup_fetched,
69   sum(tup_inserted) AS tup_inserted,
70   sum(tup_updated) AS tup_updated,
71   sum(tup_deleted) AS tup_deleted,
72   sum(conflicts) AS conflicts
73 FROM pg_stat_database
74 WHERE NOT datname ~* '^template\d+'
75 GROUP BY database_name;
76 """
77
78 STATIO = """
79 SELECT
80     sum(heap_blks_read) AS heap_blocks_read,
81     sum(heap_blks_hit) AS heap_blocks_hit,
82     sum(idx_blks_read) AS index_blocks_read,
83     sum(idx_blks_hit) AS index_blocks_hit,
84     sum(toast_blks_read) AS toast_blocks_read,
85     sum(toast_blks_hit) AS toast_blocks_hit,
86     sum(tidx_blks_read) AS toastindex_blocks_read,
87     sum(tidx_blks_hit) AS toastindex_blocks_hit
88 FROM
89     pg_statio_all_tables
90 WHERE
91     schemaname <> 'pg_catalog';
92 """
93 BGWRITER = 'SELECT * FROM pg_stat_bgwriter;'
94 LOCKS = 'SELECT mode, count(mode) AS count FROM pg_locks GROUP BY mode ORDER BY mode;'
95 REPLICATION = """
96 SELECT
97     client_hostname,
98     client_addr,
99     state,
100     sent_offset - (
101         replay_offset - (sent_xlog - replay_xlog) * 255 * 16 ^ 6 ) AS byte_lag
102 FROM (
103     SELECT
104         client_addr, client_hostname, state,
105         ('x' || lpad(split_part(sent_location,   '/', 1), 8, '0'))::bit(32)::bigint AS sent_xlog,
106         ('x' || lpad(split_part(replay_location, '/', 1), 8, '0'))::bit(32)::bigint AS replay_xlog,
107         ('x' || lpad(split_part(sent_location,   '/', 2), 8, '0'))::bit(32)::bigint AS sent_offset,
108         ('x' || lpad(split_part(replay_location, '/', 2), 8, '0'))::bit(32)::bigint AS replay_offset
109     FROM pg_stat_replication
110 ) AS s;
111 """
112
113 LOCK_MAP = {'AccessExclusiveLock': 'lock_access_exclusive',
114             'AccessShareLock': 'lock_access_share',
115             'ExclusiveLock': 'lock_exclusive',
116             'RowExclusiveLock': 'lock_row_exclusive',
117             'RowShareLock': 'lock_row_share',
118             'ShareUpdateExclusiveLock': 'lock_update_exclusive_lock',
119             'ShareLock': 'lock_share',
120             'ShareRowExclusiveLock': 'lock_share_row_exclusive',
121             'SIReadLock': 'lock_si_read'}
122
123 ORDER = ['db_stat_transactions', 'db_stat_tuple_read', 'db_stat_tuple_returned', 'db_stat_tuple_write',
124          'backend_process', 'index_count', 'index_size', 'table_count', 'table_size', 'locks', 'wal', 'operations_heap',
125          'operations_index', 'operations_toast', 'operations_toast_index', 'background_writer']
126
127 CHARTS = {
128     'db_stat_transactions': {
129         'options': [None, ' Transactions', 'Count', ' database statistics', '.db_stat_transactions', 'line'],
130         'lines': [
131             ['db_stat_xact_commit', 'Committed', 'absolute'],
132             ['db_stat_xact_rollback', 'Rolled Back', 'absolute']
133         ]},
134     'db_stat_tuple_read': {
135         'options': [None, ' Tuple read', 'Count', ' database statistics', '.db_stat_tuple_read', 'line'],
136         'lines': [
137             ['db_stat_blks_read', 'Disk', 'absolute'],
138             ['db_stat_blks_hit', 'Cache', 'absolute']
139         ]},
140     'db_stat_tuple_returned': {
141         'options': [None, ' Tuple returned', 'Count', ' database statistics', '.db_stat_tuple_returned', 'line'],
142         'lines': [
143             ['db_stat_tup_returned', 'Sequential', 'absolute'],
144             ['db_stat_tup_fetched', 'Bitmap', 'absolute']
145         ]},
146     'db_stat_tuple_write': {
147         'options': [None, ' Tuple write', 'Count', ' database statistics', '.db_stat_tuple_write', 'line'],
148         'lines': [
149             ['db_stat_tup_inserted', 'Inserted', 'absolute'],
150             ['db_stat_tup_updated', 'Updated', 'absolute'],
151             ['db_stat_tup_deleted', 'Deleted', 'absolute'],
152             ['db_stat_conflicts', 'Conflicts', 'absolute']
153         ]},
154     'backend_process': {
155         'options': [None, 'Backend processes', 'Count', 'Backend processes', 'postgres.backend_process', 'line'],
156         'lines': [
157             ['backend_process_active', 'Active', 'absolute'],
158             ['backend_process_idle', 'Idle', 'absolute']
159         ]},
160     'index_count': {
161         'options': [None, 'Total index', 'Count', 'Index', 'postgres.index_count', 'line'],
162         'lines': [
163             ['index_count', 'Total index', 'absolute']
164         ]},
165     'index_size': {
166         'options': [None, 'Index size', 'MB', 'Index', 'postgres.index_size', 'line'],
167         'lines': [
168             ['index_size', 'Size', 'absolute', 1, 1024 * 1024]
169         ]},
170     'table_count': {
171         'options': [None, 'Total table', 'Count', 'Table', 'postgres.table_count', 'line'],
172         'lines': [
173             ['table_count', 'Total table', 'absolute']
174         ]},
175     'table_size': {
176         'options': [None, 'Table size', 'MB', 'Table', 'postgres.table_size', 'line'],
177         'lines': [
178             ['table_size', 'Size', 'absolute', 1, 1024 * 1024]
179         ]},
180     'locks': {
181         'options': [None, 'Table size', 'Count', 'Locks', 'postgres.locks', 'line'],
182         'lines': [
183             ['lock_access_exclusive', 'Access Exclusive', 'absolute'],
184             ['lock_access_share', 'Access Share', 'absolute'],
185             ['lock_exclusive', 'Exclusive', 'absolute'],
186             ['lock_row_exclusive', 'Row Exclusive', 'absolute'],
187             ['lock_row_share', 'Row Share', 'absolute'],
188             ['lock_update_exclusive_lock', 'Update Exclusive Lock', 'absolute'],
189             ['lock_share', 'Share', 'absolute'],
190             ['lock_share_row_exclusive', 'Share Row Exclusive', 'absolute'],
191             ['lock_si_read', 'SI Read', 'absolute']
192         ]},
193     'wal': {
194         'options': [None, 'WAL stats', 'Files', 'WAL', 'postgres.wal', 'line'],
195         'lines': [
196             ['wal_total', 'Total', 'absolute'],
197             ['wal_ready', 'Ready', 'absolute'],
198             ['wal_done', 'Done', 'absolute']
199         ]},
200     'operations_heap': {
201         'options': [None, 'Heap', 'iops', 'IO Operations', 'postgres.operations_heap', 'line'],
202         'lines': [
203             ['operations_heap_blocks_read', 'Read', 'absolute'],
204             ['operations_heap_blocks_hit', 'Hit', 'absolute']
205         ]},
206     'operations_index': {
207         'options': [None, 'Index', 'iops', 'IO Operations', 'postgres.operations_index', 'line'],
208         'lines': [
209             ['operations_index_blocks_read', 'Read', 'absolute'],
210             ['operations_index_blocks_hit', 'Hit', 'absolute']
211         ]},
212     'operations_toast': {
213         'options': [None, 'Toast', 'iops', 'IO Operations', 'postgres.operations_toast', 'line'],
214         'lines': [
215             ['operations_toast_blocks_read', 'Read', 'absolute'],
216             ['operations_toast_blocks_hit', 'Hit', 'absolute']
217         ]},
218     'operations_toast_index': {
219         'options': [None, 'Toast index', 'iops', 'IO Operations', 'postgres.operations_toast_index', 'line'],
220         'lines': [
221             ['operations_toastindex_blocks_read', 'Read', 'absolute'],
222             ['operations_toastindex_blocks_hit', 'Hit', 'absolute']
223         ]},
224     'background_writer': {
225         'options': [None, 'Checkpoints', 'Count', 'Background Writer', 'postgres.background_writer', 'line'],
226         'lines': [
227             ['background_writer_scheduled', 'Scheduled', 'absolute'],
228             ['background_writer_requested', 'Requested', 'absolute']
229         ]}
230 }
231
232
233 class Service(SimpleService):
234     def __init__(self, configuration=None, name=None):
235         super(self.__class__, self).__init__(configuration=configuration, name=name)
236         self.order = ORDER
237         self.definitions = CHARTS
238         self.configuration = configuration
239         self.connection = None
240         self.data = {}
241         self.old_data = {}
242
243     def connect(self):
244         params = dict(user='postgres',
245                       database=None,
246                       password=None,
247                       host='localhost',
248                       port=5432)
249         params.update(self.configuration)
250         self.connection = psycopg2.connect(**params)
251         self.connection.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)
252         self.connection.set_session(readonly=True)
253
254     def check(self):
255         try:
256             self.connect()
257             self._create_definitions()
258             return True
259         except Exception as e:
260             self.error(e)
261             return False
262
263     def _create_definitions(self):
264         cursor = self.connection.cursor()
265         cursor.execute("""
266             SELECT datname
267             FROM pg_stat_database
268             WHERE NOT datname ~* '^template\d+'
269         """)
270
271         for row in cursor:
272             database_name = row[0]
273             for chart_template_name in list(CHARTS):
274                 if not chart_template_name.startswith('db_stat'):
275                     continue
276
277                 chart_template = CHARTS[chart_template_name]
278                 chart_name = "{}_{}".format(database_name, chart_template_name)
279                 if chart_name not in self.order:
280                     self.order.insert(0, chart_name)
281                     name, title, units, family, context, chart_type = chart_template['options']
282                     self.definitions[chart_name] = {
283                         'options': [
284                             name,
285                             database_name + title,
286                             units,
287                             database_name + family,
288                             database_name + context,
289                             chart_type
290                         ]
291                     }
292
293                     self.definitions[chart_name]['lines'] = []
294                     for line in deepcopy(chart_template['lines']):
295                         line[0] = "{}_{}".format(database_name, line[0])
296                         self.definitions[chart_name]['lines'].append(line)
297
298         cursor.close()
299
300     def _get_data(self):
301         self.connect()
302
303         cursor = self.connection.cursor(cursor_factory=DictCursor)
304         self.add_stats(cursor)
305
306         cursor.close()
307         self.connection.close()
308
309         return self.data
310
311     def add_stats(self, cursor):
312         self.add_database_stats(cursor)
313         self.add_backend_stats(cursor)
314         self.add_index_stats(cursor)
315         self.add_table_stats(cursor)
316         self.add_lock_stats(cursor)
317         self.add_statio_stats(cursor)
318         self.add_bgwriter_stats(cursor)
319
320         # self.add_replication_stats(cursor)
321
322         # add_wal_metrics needs superuser to get directory listings
323         # if self.config.get('superuser', True):
324         # self.add_wal_stats(cursor)
325
326     def add_database_stats(self, cursor):
327         cursor.execute(DATABASE)
328         for row in cursor:
329             database_name = row.get('database_name')
330             self.add_derive_value('db_stat_xact_commit', prefix=database_name, value=int(row.get('xact_commit', 0)))
331             self.add_derive_value('db_stat_xact_rollback', prefix=database_name, value=int(row.get('xact_rollback', 0)))
332             self.add_derive_value('db_stat_blks_read', prefix=database_name, value=int(row.get('blks_read', 0)))
333             self.add_derive_value('db_stat_blks_hit', prefix=database_name, value=int(row.get('blks_hit', 0)))
334             self.add_derive_value('db_stat_tup_returned', prefix=database_name, value=int(row.get('tup_returned', 0)))
335             self.add_derive_value('db_stat_tup_fetched', prefix=database_name, value=int(row.get('tup_fetched', 0)))
336             self.add_derive_value('db_stat_tup_inserted', prefix=database_name, value=int(row.get('tup_inserted', 0)))
337             self.add_derive_value('db_stat_tup_updated', prefix=database_name, value=int(row.get('tup_updated', 0)))
338             self.add_derive_value('db_stat_tup_deleted', prefix=database_name, value=int(row.get('tup_deleted', 0)))
339             self.add_derive_value('db_stat_conflicts', prefix=database_name, value=int(row.get('conflicts', 0)))
340
341     def add_backend_stats(self, cursor):
342         cursor.execute(BACKENDS)
343         temp = cursor.fetchone()
344
345         self.data['backend_process_active'] = int(temp.get('backends_active', 0))
346         self.data['backend_process_idle'] = int(temp.get('backends_idle', 0))
347
348     def add_index_stats(self, cursor):
349         cursor.execute(INDEX_COUNT)
350         temp = cursor.fetchone()
351         self.data['index_count'] = int(temp.get('indexes', 0))
352
353         cursor.execute(INDEX_SIZE_ON_DISK)
354         temp = cursor.fetchone()
355         self.data['index_size'] = int(temp.get('size_indexes', 0))
356
357     def add_table_stats(self, cursor):
358         cursor.execute(TABLE_COUNT)
359         temp = cursor.fetchone()
360         self.data['table_count'] = int(temp.get('relations', 0))
361
362         cursor.execute(TABLE_SIZE_ON_DISK)
363         temp = cursor.fetchone()
364         self.data['table_size'] = int(temp.get('size_relations', 0))
365
366     def add_lock_stats(self, cursor):
367         cursor.execute(LOCKS)
368         temp = cursor.fetchall()
369         for key in LOCK_MAP:
370             found = False
371             for row in temp:
372                 if row['mode'] == key:
373                     found = True
374                     self.data[LOCK_MAP[key]] = int(row['count'])
375
376             if not found:
377                 self.data[LOCK_MAP[key]] = 0
378
379     def add_wal_stats(self, cursor):
380         cursor.execute(ARCHIVE)
381         temp = cursor.fetchone()
382         self.add_derive_value('wal_total', int(temp.get('file_count', 0)))
383         self.add_derive_value('wal_ready', int(temp.get('ready_count', 0)))
384         self.add_derive_value('wal_done', int(temp.get('done_count', 0)))
385
386     def add_statio_stats(self, cursor):
387         cursor.execute(STATIO)
388         temp = cursor.fetchone()
389         self.add_derive_value('operations_heap_blocks_read', int(temp.get('heap_blocks_read', 0)))
390         self.add_derive_value('operations_heap_blocks_hit', int(temp.get('heap_blocks_hit', 0)))
391         self.add_derive_value('operations_index_blocks_read', int(temp.get('index_blocks_read', 0)))
392         self.add_derive_value('operations_index_blocks_hit', int(temp.get('index_blocks_hit', 0)))
393         self.add_derive_value('operations_toast_blocks_read', int(temp.get('toast_blocks_read', 0)))
394         self.add_derive_value('operations_toast_blocks_hit', int(temp.get('toast_blocks_hit', 0)))
395         self.add_derive_value('operations_toastindex_blocks_read', int(temp.get('toastindex_blocks_read', 0)))
396         self.add_derive_value('operations_toastindex_blocks_hit', int(temp.get('toastindex_blocks_hit', 0)))
397
398     def add_bgwriter_stats(self, cursor):
399         cursor.execute(BGWRITER)
400         temp = cursor.fetchone()
401
402         self.add_derive_value('background_writer_scheduled', temp.get('checkpoints_timed', 0))
403         self.add_derive_value('background_writer_requested', temp.get('checkpoints_requests', 0))
404
405     def add_derive_value(self, key, value, prefix=None):
406         if prefix:
407             key = "{}_{}".format(prefix, key)
408         if key not in self.old_data.keys():
409             self.data[key] = 0
410         else:
411             self.data[key] = value - self.old_data[key]
412
413         self.old_data[key] = value
414
415
416 '''
417     def add_replication_stats(self, cursor):
418         cursor.execute(REPLICATION)
419         temp = cursor.fetchall()
420         for row in temp:
421             self.add_gauge_value('Replication/%s' % row.get('client_addr', 'Unknown'),
422                                  'byte_lag',
423                                  int(row.get('byte_lag', 0)))
424 '''