]> arthur.barton.de Git - netdata.git/blob - python.d/postgres.chart.py
Remove commented code.
[netdata.git] / python.d / postgres.chart.py
1 # -*- coding: utf-8 -*-
2 # Description: example netdata python.d module
3 # Authors: facetoe, dangtranhoang
4
5 import re
6 from copy import deepcopy
7
8 import psycopg2
9 from psycopg2 import extensions
10 from psycopg2.extras import DictCursor
11
12 from base import SimpleService
13
14 # default module values
15 update_every = 1
16 priority = 90000
17 retries = 60
18
19 # Default Config options.
20 # {
21 #    'database': None,
22 #    'user': 'postgres',
23 #    'password': None,
24 #    'host': 'localhost',
25 #    'port': 5432
26 # }
27
28 ARCHIVE = """
29 SELECT
30     CAST(COUNT(*) AS INT) AS file_count,
31     CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)), 0) AS INT) AS ready_count,
32     CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)), 0) AS INT) AS done_count
33 FROM
34     pg_catalog.pg_ls_dir('pg_xlog/archive_status') AS archive_files (archive_file);
35 """
36
37 BACKENDS = """
38 SELECT
39     count(*) - (SELECT count(*) FROM pg_stat_activity WHERE state = 'idle') AS backends_active,
40     (SELECT count(*) FROM pg_stat_activity WHERE state = 'idle' ) AS backends_idle
41 FROM
42     pg_stat_activity;
43 """
44
45 TABLE_STATS = """
46 SELECT
47   ((sum(relpages) * 8) * 1024) AS size_relations,
48   count(1)                     AS relations
49 FROM pg_class
50 WHERE relkind IN ('r', 't');
51 """
52
53 INDEX_STATS = """
54 SELECT
55   ((sum(relpages) * 8) * 1024) AS size_indexes,
56   count(1)                     AS indexes
57 FROM pg_class
58 WHERE relkind = 'i';"""
59
60 DATABASE = """
61 SELECT
62   datname AS database_name,
63   sum(xact_commit) AS xact_commit,
64   sum(xact_rollback) AS xact_rollback,
65   sum(blks_read) AS blks_read,
66   sum(blks_hit) AS blks_hit,
67   sum(tup_returned) AS tup_returned,
68   sum(tup_fetched) AS tup_fetched,
69   sum(tup_inserted) AS tup_inserted,
70   sum(tup_updated) AS tup_updated,
71   sum(tup_deleted) AS tup_deleted,
72   sum(conflicts) AS conflicts
73 FROM pg_stat_database
74 WHERE NOT datname ~* '^template\d+'
75 GROUP BY database_name;
76 """
77
78 STATIO = """
79 SELECT
80     sum(heap_blks_read) AS heap_blocks_read,
81     sum(heap_blks_hit) AS heap_blocks_hit,
82     sum(idx_blks_read) AS index_blocks_read,
83     sum(idx_blks_hit) AS index_blocks_hit,
84     sum(toast_blks_read) AS toast_blocks_read,
85     sum(toast_blks_hit) AS toast_blocks_hit,
86     sum(tidx_blks_read) AS toastindex_blocks_read,
87     sum(tidx_blks_hit) AS toastindex_blocks_hit
88 FROM
89     pg_statio_all_tables
90 WHERE
91     schemaname <> 'pg_catalog';
92 """
93 BGWRITER = 'SELECT * FROM pg_stat_bgwriter;'
94 DATABASE_LOCKS = """
95 SELECT
96   pg_database.datname as database_name,
97   mode,
98   count(mode) AS count
99 FROM pg_locks
100   INNER JOIN pg_database ON pg_database.oid = pg_locks.database
101 GROUP BY datname, mode
102 ORDER BY datname, mode;
103 """
104 REPLICATION = """
105 SELECT
106     client_hostname,
107     client_addr,
108     state,
109     sent_offset - (
110         replay_offset - (sent_xlog - replay_xlog) * 255 * 16 ^ 6 ) AS byte_lag
111 FROM (
112     SELECT
113         client_addr, client_hostname, state,
114         ('x' || lpad(split_part(sent_location,   '/', 1), 8, '0'))::bit(32)::bigint AS sent_xlog,
115         ('x' || lpad(split_part(replay_location, '/', 1), 8, '0'))::bit(32)::bigint AS replay_xlog,
116         ('x' || lpad(split_part(sent_location,   '/', 2), 8, '0'))::bit(32)::bigint AS sent_offset,
117         ('x' || lpad(split_part(replay_location, '/', 2), 8, '0'))::bit(32)::bigint AS replay_offset
118     FROM pg_stat_replication
119 ) AS s;
120 """
121
122 LOCK_TYPES = [
123     'ExclusiveLock',
124     'RowShareLock',
125     'SIReadLock',
126     'ShareUpdateExclusiveLock',
127     'AccessExclusiveLock',
128     'AccessShareLock',
129     'ShareRowExclusiveLock',
130     'ShareLock',
131     'RowExclusiveLock'
132 ]
133
134 ORDER = ['db_stat_transactions', 'db_stat_tuple_read', 'db_stat_tuple_returned', 'db_stat_tuple_write',
135          'backend_process', 'index_count', 'index_size', 'table_count', 'table_size', 'wal', 'operations_heap',
136          'operations_index', 'operations_toast', 'operations_toast_index', 'background_writer']
137
138 CHARTS = {
139     'db_stat_transactions': {
140         'options': [None, ' Transactions', 'Count', ' database statistics', '.db_stat_transactions', 'line'],
141         'lines': [
142             ['db_stat_xact_commit', 'Committed', 'absolute'],
143             ['db_stat_xact_rollback', 'Rolled Back', 'absolute']
144         ]},
145     'db_stat_tuple_read': {
146         'options': [None, ' Tuple read', 'Count', ' database statistics', '.db_stat_tuple_read', 'line'],
147         'lines': [
148             ['db_stat_blks_read', 'Disk', 'absolute'],
149             ['db_stat_blks_hit', 'Cache', 'absolute']
150         ]},
151     'db_stat_tuple_returned': {
152         'options': [None, ' Tuple returned', 'Count', ' database statistics', '.db_stat_tuple_returned', 'line'],
153         'lines': [
154             ['db_stat_tup_returned', 'Sequential', 'absolute'],
155             ['db_stat_tup_fetched', 'Bitmap', 'absolute']
156         ]},
157     'db_stat_tuple_write': {
158         'options': [None, ' Tuple write', 'Count', ' database statistics', '.db_stat_tuple_write', 'line'],
159         'lines': [
160             ['db_stat_tup_inserted', 'Inserted', 'absolute'],
161             ['db_stat_tup_updated', 'Updated', 'absolute'],
162             ['db_stat_tup_deleted', 'Deleted', 'absolute'],
163             ['db_stat_conflicts', 'Conflicts', 'absolute']
164         ]},
165     'backend_process': {
166         'options': [None, 'Backend processes', 'Count', 'Backend processes', 'postgres.backend_process', 'line'],
167         'lines': [
168             ['backend_process_active', 'Active', 'absolute'],
169             ['backend_process_idle', 'Idle', 'absolute']
170         ]},
171     'index_count': {
172         'options': [None, 'Total index', 'Count', 'Index', 'postgres.index_count', 'line'],
173         'lines': [
174             ['index_count', 'Total index', 'absolute']
175         ]},
176     'index_size': {
177         'options': [None, 'Index size', 'MB', 'Index', 'postgres.index_size', 'line'],
178         'lines': [
179             ['index_size', 'Size', 'absolute', 1, 1024 * 1024]
180         ]},
181     'table_count': {
182         'options': [None, 'Total table', 'Count', 'Table', 'postgres.table_count', 'line'],
183         'lines': [
184             ['table_count', 'Total table', 'absolute']
185         ]},
186     'table_size': {
187         'options': [None, 'Table size', 'MB', 'Table', 'postgres.table_size', 'line'],
188         'lines': [
189             ['table_size', 'Size', 'absolute', 1, 1024 * 1024]
190         ]},
191     'wal': {
192         'options': [None, 'WAL stats', 'Files', 'WAL', 'postgres.wal', 'line'],
193         'lines': [
194             ['wal_total', 'Total', 'absolute'],
195             ['wal_ready', 'Ready', 'absolute'],
196             ['wal_done', 'Done', 'absolute']
197         ]},
198     'operations_heap': {
199         'options': [None, 'Heap', 'iops', 'IO Operations', 'postgres.operations_heap', 'line'],
200         'lines': [
201             ['operations_heap_blocks_read', 'Read', 'absolute'],
202             ['operations_heap_blocks_hit', 'Hit', 'absolute']
203         ]},
204     'operations_index': {
205         'options': [None, 'Index', 'iops', 'IO Operations', 'postgres.operations_index', 'line'],
206         'lines': [
207             ['operations_index_blocks_read', 'Read', 'absolute'],
208             ['operations_index_blocks_hit', 'Hit', 'absolute']
209         ]},
210     'operations_toast': {
211         'options': [None, 'Toast', 'iops', 'IO Operations', 'postgres.operations_toast', 'line'],
212         'lines': [
213             ['operations_toast_blocks_read', 'Read', 'absolute'],
214             ['operations_toast_blocks_hit', 'Hit', 'absolute']
215         ]},
216     'operations_toast_index': {
217         'options': [None, 'Toast index', 'iops', 'IO Operations', 'postgres.operations_toast_index', 'line'],
218         'lines': [
219             ['operations_toastindex_blocks_read', 'Read', 'absolute'],
220             ['operations_toastindex_blocks_hit', 'Hit', 'absolute']
221         ]},
222     'background_writer': {
223         'options': [None, 'Checkpoints', 'Count', 'Background Writer', 'postgres.background_writer', 'line'],
224         'lines': [
225             ['background_writer_scheduled', 'Scheduled', 'absolute'],
226             ['background_writer_requested', 'Requested', 'absolute']
227         ]}
228 }
229
230
231 class Service(SimpleService):
232     def __init__(self, configuration=None, name=None):
233         super(self.__class__, self).__init__(configuration=configuration, name=name)
234         self.order = ORDER
235         self.definitions = CHARTS
236         self.configuration = configuration
237         self.connection = None
238         self.data = {}
239         self.old_data = {}
240         self.databases = set()
241
242     def connect(self):
243         params = dict(user='postgres',
244                       database=None,
245                       password=None,
246                       host='localhost',
247                       port=5432)
248         params.update(self.configuration)
249         self.connection = psycopg2.connect(**params)
250         self.connection.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)
251         self.connection.set_session(readonly=True)
252
253     def check(self):
254         try:
255             self.connect()
256             self.discover_databases()
257             self.connection.close()
258             self._create_definitions()
259             return True
260         except Exception as e:
261             self.error(e)
262             return False
263
264     def _create_definitions(self):
265         for database_name in self.databases:
266             self.databases.add(database_name)
267             for chart_template_name in list(CHARTS):
268                 if chart_template_name.startswith('db_stat'):
269                     self._add_database_stat_chart(chart_template_name, database_name)
270             self._add_database_lock_chart(database_name)
271
272     def discover_databases(self):
273         cursor = self.connection.cursor()
274         cursor.execute("""
275             SELECT datname
276             FROM pg_stat_database
277             WHERE NOT datname ~* '^template\d+'
278         """)
279         self.databases = set(r[0] for r in cursor)
280         cursor.close()
281
282     def _add_database_stat_chart(self, chart_template_name, database_name):
283         chart_template = CHARTS[chart_template_name]
284         chart_name = "{}_{}".format(database_name, chart_template_name)
285         if chart_name not in self.order:
286             self.order.insert(0, chart_name)
287             name, title, units, family, context, chart_type = chart_template['options']
288             self.definitions[chart_name] = {
289                 'options': [
290                     name,
291                     database_name + title,
292                     units,
293                     database_name + family,
294                     database_name + context,
295                     chart_type
296                 ]
297             }
298
299             self.definitions[chart_name]['lines'] = []
300             for line in deepcopy(chart_template['lines']):
301                 line[0] = "{}_{}".format(database_name, line[0])
302                 self.definitions[chart_name]['lines'].append(line)
303
304     def _add_database_lock_chart(self, database_name):
305         chart_name = "{}_locks".format(database_name)
306         if chart_name not in self.order:
307             self.order.insert(0, chart_name)
308             self.definitions[chart_name] = dict(
309                 options=
310                 [
311                     None,
312                     database_name + ' locks',
313                     'Count',
314                     database_name + ' database statistics',
315                     database_name + '.locks',
316                     'line'
317                 ],
318                 lines=[]
319             )
320
321             for lock_type in LOCK_TYPES:
322                 lock_id = "{}_{}".format(database_name, lock_type.lower())
323                 label = re.sub("([a-z])([A-Z])", "\g<1> \g<2>", lock_type)
324                 self.definitions[chart_name]['lines'].append([lock_id, label, 'absolute'])
325
326     def _get_data(self):
327         self.connect()
328
329         cursor = self.connection.cursor(cursor_factory=DictCursor)
330         self.add_stats(cursor)
331
332         cursor.close()
333         self.connection.close()
334
335         return self.data
336
337     def add_stats(self, cursor):
338         self.add_database_stats(cursor)
339         self.add_backend_stats(cursor)
340         self.add_index_stats(cursor)
341         self.add_table_stats(cursor)
342         self.add_lock_stats(cursor)
343         self.add_statio_stats(cursor)
344         self.add_bgwriter_stats(cursor)
345
346         # self.add_replication_stats(cursor)
347
348         # add_wal_metrics needs superuser to get directory listings
349         # if self.config.get('superuser', True):
350         # self.add_wal_stats(cursor)
351
352     def add_database_stats(self, cursor):
353         cursor.execute(DATABASE)
354         for row in cursor:
355             database_name = row.get('database_name')
356             self.add_derive_value('db_stat_xact_commit', prefix=database_name, value=int(row.get('xact_commit', 0)))
357             self.add_derive_value('db_stat_xact_rollback', prefix=database_name, value=int(row.get('xact_rollback', 0)))
358             self.add_derive_value('db_stat_blks_read', prefix=database_name, value=int(row.get('blks_read', 0)))
359             self.add_derive_value('db_stat_blks_hit', prefix=database_name, value=int(row.get('blks_hit', 0)))
360             self.add_derive_value('db_stat_tup_returned', prefix=database_name, value=int(row.get('tup_returned', 0)))
361             self.add_derive_value('db_stat_tup_fetched', prefix=database_name, value=int(row.get('tup_fetched', 0)))
362             self.add_derive_value('db_stat_tup_inserted', prefix=database_name, value=int(row.get('tup_inserted', 0)))
363             self.add_derive_value('db_stat_tup_updated', prefix=database_name, value=int(row.get('tup_updated', 0)))
364             self.add_derive_value('db_stat_tup_deleted', prefix=database_name, value=int(row.get('tup_deleted', 0)))
365             self.add_derive_value('db_stat_conflicts', prefix=database_name, value=int(row.get('conflicts', 0)))
366
367     def add_backend_stats(self, cursor):
368         cursor.execute(BACKENDS)
369         temp = cursor.fetchone()
370
371         self.data['backend_process_active'] = int(temp.get('backends_active', 0))
372         self.data['backend_process_idle'] = int(temp.get('backends_idle', 0))
373
374     def add_index_stats(self, cursor):
375         cursor.execute(INDEX_STATS)
376         temp = cursor.fetchone()
377         self.data['index_count'] = int(temp.get('indexes', 0))
378         self.data['index_size'] = int(temp.get('size_indexes', 0))
379
380     def add_table_stats(self, cursor):
381         cursor.execute(TABLE_STATS)
382         temp = cursor.fetchone()
383         self.data['table_count'] = int(temp.get('relations', 0))
384         self.data['table_size'] = int(temp.get('size_relations', 0))
385
386     def add_lock_stats(self, cursor):
387         cursor.execute(DATABASE_LOCKS)
388         # First zero out all current lock values.
389         for database_name in self.databases:
390             for lock_type in LOCK_TYPES:
391                 lock_id = "{}_{}".format(database_name, lock_type.lower())
392                 self.data[lock_id] = 0
393
394         # Now populate those that have current locks
395         for row in cursor:
396             database_name, lock_type, lock_count = row
397             lock_id = "{}_{}".format(database_name, lock_type.lower())
398             self.data[lock_id] = lock_count
399
400     def add_wal_stats(self, cursor):
401         cursor.execute(ARCHIVE)
402         temp = cursor.fetchone()
403         self.add_derive_value('wal_total', int(temp.get('file_count', 0)))
404         self.add_derive_value('wal_ready', int(temp.get('ready_count', 0)))
405         self.add_derive_value('wal_done', int(temp.get('done_count', 0)))
406
407     def add_statio_stats(self, cursor):
408         cursor.execute(STATIO)
409         temp = cursor.fetchone()
410         self.add_derive_value('operations_heap_blocks_read', int(temp.get('heap_blocks_read', 0)))
411         self.add_derive_value('operations_heap_blocks_hit', int(temp.get('heap_blocks_hit', 0)))
412         self.add_derive_value('operations_index_blocks_read', int(temp.get('index_blocks_read', 0)))
413         self.add_derive_value('operations_index_blocks_hit', int(temp.get('index_blocks_hit', 0)))
414         self.add_derive_value('operations_toast_blocks_read', int(temp.get('toast_blocks_read', 0)))
415         self.add_derive_value('operations_toast_blocks_hit', int(temp.get('toast_blocks_hit', 0)))
416         self.add_derive_value('operations_toastindex_blocks_read', int(temp.get('toastindex_blocks_read', 0)))
417         self.add_derive_value('operations_toastindex_blocks_hit', int(temp.get('toastindex_blocks_hit', 0)))
418
419     def add_bgwriter_stats(self, cursor):
420         cursor.execute(BGWRITER)
421         temp = cursor.fetchone()
422
423         self.add_derive_value('background_writer_scheduled', temp.get('checkpoints_timed', 0))
424         self.add_derive_value('background_writer_requested', temp.get('checkpoints_requests', 0))
425
426     def add_derive_value(self, key, value, prefix=None):
427         if prefix:
428             key = "{}_{}".format(prefix, key)
429         if key not in self.old_data.keys():
430             self.data[key] = 0
431         else:
432             self.data[key] = value - self.old_data[key]
433
434         self.old_data[key] = value
435
436
437 '''
438     def add_replication_stats(self, cursor):
439         cursor.execute(REPLICATION)
440         temp = cursor.fetchall()
441         for row in temp:
442             self.add_gauge_value('Replication/%s' % row.get('client_addr', 'Unknown'),
443                                  'byte_lag',
444                                  int(row.get('byte_lag', 0)))
445 '''