1 # -*- coding: utf-8 -*-
2 # Description: example netdata python.d module
3 # Authors: facetoe, dangtranhoang
6 from copy import deepcopy
9 from psycopg2 import extensions
10 from psycopg2.extras import DictCursor
12 from base import SimpleService
14 # default module values
19 # Default Config options.
24 # 'host': 'localhost',
30 CAST(COUNT(*) AS INT) AS file_count,
31 CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)), 0) AS INT) AS ready_count,
32 CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)), 0) AS INT) AS done_count
34 pg_catalog.pg_ls_dir('pg_xlog/archive_status') AS archive_files (archive_file);
39 count(*) - (SELECT count(*) FROM pg_stat_activity WHERE state = 'idle') AS backends_active,
40 (SELECT count(*) FROM pg_stat_activity WHERE state = 'idle' ) AS backends_idle
47 ((sum(relpages) * 8) * 1024) AS size_relations,
50 WHERE relkind IN ('r', 't');
55 ((sum(relpages) * 8) * 1024) AS size_indexes,
58 WHERE relkind = 'i';"""
62 datname AS database_name,
63 sum(xact_commit) AS xact_commit,
64 sum(xact_rollback) AS xact_rollback,
65 sum(blks_read) AS blks_read,
66 sum(blks_hit) AS blks_hit,
67 sum(tup_returned) AS tup_returned,
68 sum(tup_fetched) AS tup_fetched,
69 sum(tup_inserted) AS tup_inserted,
70 sum(tup_updated) AS tup_updated,
71 sum(tup_deleted) AS tup_deleted,
72 sum(conflicts) AS conflicts
74 WHERE NOT datname ~* '^template\d+'
75 GROUP BY database_name;
80 sum(heap_blks_read) AS heap_blocks_read,
81 sum(heap_blks_hit) AS heap_blocks_hit,
82 sum(idx_blks_read) AS index_blocks_read,
83 sum(idx_blks_hit) AS index_blocks_hit,
84 sum(toast_blks_read) AS toast_blocks_read,
85 sum(toast_blks_hit) AS toast_blocks_hit,
86 sum(tidx_blks_read) AS toastindex_blocks_read,
87 sum(tidx_blks_hit) AS toastindex_blocks_hit
91 schemaname <> 'pg_catalog';
93 BGWRITER = 'SELECT * FROM pg_stat_bgwriter;'
96 pg_database.datname as database_name,
100 INNER JOIN pg_database ON pg_database.oid = pg_locks.database
101 GROUP BY datname, mode
102 ORDER BY datname, mode;
110 replay_offset - (sent_xlog - replay_xlog) * 255 * 16 ^ 6 ) AS byte_lag
113 client_addr, client_hostname, state,
114 ('x' || lpad(split_part(sent_location, '/', 1), 8, '0'))::bit(32)::bigint AS sent_xlog,
115 ('x' || lpad(split_part(replay_location, '/', 1), 8, '0'))::bit(32)::bigint AS replay_xlog,
116 ('x' || lpad(split_part(sent_location, '/', 2), 8, '0'))::bit(32)::bigint AS sent_offset,
117 ('x' || lpad(split_part(replay_location, '/', 2), 8, '0'))::bit(32)::bigint AS replay_offset
118 FROM pg_stat_replication
122 # LOCK_MAP = {'AccessExclusiveLock': 'lock_access_exclusive',
123 # 'AccessShareLock': 'lock_access_share',
124 # 'ExclusiveLock': 'lock_exclusive',
125 # 'RowExclusiveLock': 'lock_row_exclusive',
126 # 'RowShareLock': 'lock_row_share',
127 # 'ShareUpdateExclusiveLock': 'lock_update_exclusive_lock',
128 # 'ShareLock': 'lock_share',
129 # 'ShareRowExclusiveLock': 'lock_share_row_exclusive',
130 # 'SIReadLock': 'lock_si_read'}
135 'ShareUpdateExclusiveLock',
136 'AccessExclusiveLock',
138 'ShareRowExclusiveLock',
143 ORDER = ['db_stat_transactions', 'db_stat_tuple_read', 'db_stat_tuple_returned', 'db_stat_tuple_write',
144 'backend_process', 'index_count', 'index_size', 'table_count', 'table_size', 'wal', 'operations_heap',
145 'operations_index', 'operations_toast', 'operations_toast_index', 'background_writer']
148 'db_stat_transactions': {
149 'options': [None, ' Transactions', 'Count', ' database statistics', '.db_stat_transactions', 'line'],
151 ['db_stat_xact_commit', 'Committed', 'absolute'],
152 ['db_stat_xact_rollback', 'Rolled Back', 'absolute']
154 'db_stat_tuple_read': {
155 'options': [None, ' Tuple read', 'Count', ' database statistics', '.db_stat_tuple_read', 'line'],
157 ['db_stat_blks_read', 'Disk', 'absolute'],
158 ['db_stat_blks_hit', 'Cache', 'absolute']
160 'db_stat_tuple_returned': {
161 'options': [None, ' Tuple returned', 'Count', ' database statistics', '.db_stat_tuple_returned', 'line'],
163 ['db_stat_tup_returned', 'Sequential', 'absolute'],
164 ['db_stat_tup_fetched', 'Bitmap', 'absolute']
166 'db_stat_tuple_write': {
167 'options': [None, ' Tuple write', 'Count', ' database statistics', '.db_stat_tuple_write', 'line'],
169 ['db_stat_tup_inserted', 'Inserted', 'absolute'],
170 ['db_stat_tup_updated', 'Updated', 'absolute'],
171 ['db_stat_tup_deleted', 'Deleted', 'absolute'],
172 ['db_stat_conflicts', 'Conflicts', 'absolute']
175 'options': [None, 'Backend processes', 'Count', 'Backend processes', 'postgres.backend_process', 'line'],
177 ['backend_process_active', 'Active', 'absolute'],
178 ['backend_process_idle', 'Idle', 'absolute']
181 'options': [None, 'Total index', 'Count', 'Index', 'postgres.index_count', 'line'],
183 ['index_count', 'Total index', 'absolute']
186 'options': [None, 'Index size', 'MB', 'Index', 'postgres.index_size', 'line'],
188 ['index_size', 'Size', 'absolute', 1, 1024 * 1024]
191 'options': [None, 'Total table', 'Count', 'Table', 'postgres.table_count', 'line'],
193 ['table_count', 'Total table', 'absolute']
196 'options': [None, 'Table size', 'MB', 'Table', 'postgres.table_size', 'line'],
198 ['table_size', 'Size', 'absolute', 1, 1024 * 1024]
201 'options': [None, 'WAL stats', 'Files', 'WAL', 'postgres.wal', 'line'],
203 ['wal_total', 'Total', 'absolute'],
204 ['wal_ready', 'Ready', 'absolute'],
205 ['wal_done', 'Done', 'absolute']
208 'options': [None, 'Heap', 'iops', 'IO Operations', 'postgres.operations_heap', 'line'],
210 ['operations_heap_blocks_read', 'Read', 'absolute'],
211 ['operations_heap_blocks_hit', 'Hit', 'absolute']
213 'operations_index': {
214 'options': [None, 'Index', 'iops', 'IO Operations', 'postgres.operations_index', 'line'],
216 ['operations_index_blocks_read', 'Read', 'absolute'],
217 ['operations_index_blocks_hit', 'Hit', 'absolute']
219 'operations_toast': {
220 'options': [None, 'Toast', 'iops', 'IO Operations', 'postgres.operations_toast', 'line'],
222 ['operations_toast_blocks_read', 'Read', 'absolute'],
223 ['operations_toast_blocks_hit', 'Hit', 'absolute']
225 'operations_toast_index': {
226 'options': [None, 'Toast index', 'iops', 'IO Operations', 'postgres.operations_toast_index', 'line'],
228 ['operations_toastindex_blocks_read', 'Read', 'absolute'],
229 ['operations_toastindex_blocks_hit', 'Hit', 'absolute']
231 'background_writer': {
232 'options': [None, 'Checkpoints', 'Count', 'Background Writer', 'postgres.background_writer', 'line'],
234 ['background_writer_scheduled', 'Scheduled', 'absolute'],
235 ['background_writer_requested', 'Requested', 'absolute']
240 class Service(SimpleService):
241 def __init__(self, configuration=None, name=None):
242 super(self.__class__, self).__init__(configuration=configuration, name=name)
244 self.definitions = CHARTS
245 self.configuration = configuration
246 self.connection = None
249 self.databases = set()
252 params = dict(user='postgres',
257 params.update(self.configuration)
258 self.connection = psycopg2.connect(**params)
259 self.connection.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)
260 self.connection.set_session(readonly=True)
265 self.discover_databases()
266 self._create_definitions()
268 except Exception as e:
272 def _create_definitions(self):
273 for database_name in self.databases:
274 self.databases.add(database_name)
275 for chart_template_name in list(CHARTS):
276 if chart_template_name.startswith('db_stat'):
277 self._add_database_stat_chart(chart_template_name, database_name)
278 self._add_database_lock_chart(database_name)
280 def discover_databases(self):
281 cursor = self.connection.cursor()
284 FROM pg_stat_database
285 WHERE NOT datname ~* '^template\d+'
287 self.databases = set(r[0] for r in cursor)
290 def _add_database_stat_chart(self, chart_template_name, database_name):
291 chart_template = CHARTS[chart_template_name]
292 chart_name = "{}_{}".format(database_name, chart_template_name)
293 if chart_name not in self.order:
294 self.order.insert(0, chart_name)
295 name, title, units, family, context, chart_type = chart_template['options']
296 self.definitions[chart_name] = {
299 database_name + title,
301 database_name + family,
302 database_name + context,
307 self.definitions[chart_name]['lines'] = []
308 for line in deepcopy(chart_template['lines']):
309 line[0] = "{}_{}".format(database_name, line[0])
310 self.definitions[chart_name]['lines'].append(line)
312 def _add_database_lock_chart(self, database_name):
313 chart_name = "{}_locks".format(database_name)
314 if chart_name not in self.order:
315 self.order.insert(0, chart_name)
316 self.definitions[chart_name] = dict(
320 database_name + ' locks',
322 database_name + ' database statistics',
323 database_name + '.locks',
329 for lock_type in LOCK_TYPES:
330 lock_id = "{}_{}".format(database_name, lock_type.lower())
331 label = re.sub("([a-z])([A-Z])", "\g<1> \g<2>", lock_type)
332 self.definitions[chart_name]['lines'].append([lock_id, label, 'absolute'])
337 cursor = self.connection.cursor(cursor_factory=DictCursor)
338 self.add_stats(cursor)
341 self.connection.close()
345 def add_stats(self, cursor):
346 self.add_database_stats(cursor)
347 self.add_backend_stats(cursor)
348 self.add_index_stats(cursor)
349 self.add_table_stats(cursor)
350 self.add_lock_stats(cursor)
351 self.add_statio_stats(cursor)
352 self.add_bgwriter_stats(cursor)
354 # self.add_replication_stats(cursor)
356 # add_wal_metrics needs superuser to get directory listings
357 # if self.config.get('superuser', True):
358 # self.add_wal_stats(cursor)
360 def add_database_stats(self, cursor):
361 cursor.execute(DATABASE)
363 database_name = row.get('database_name')
364 self.add_derive_value('db_stat_xact_commit', prefix=database_name, value=int(row.get('xact_commit', 0)))
365 self.add_derive_value('db_stat_xact_rollback', prefix=database_name, value=int(row.get('xact_rollback', 0)))
366 self.add_derive_value('db_stat_blks_read', prefix=database_name, value=int(row.get('blks_read', 0)))
367 self.add_derive_value('db_stat_blks_hit', prefix=database_name, value=int(row.get('blks_hit', 0)))
368 self.add_derive_value('db_stat_tup_returned', prefix=database_name, value=int(row.get('tup_returned', 0)))
369 self.add_derive_value('db_stat_tup_fetched', prefix=database_name, value=int(row.get('tup_fetched', 0)))
370 self.add_derive_value('db_stat_tup_inserted', prefix=database_name, value=int(row.get('tup_inserted', 0)))
371 self.add_derive_value('db_stat_tup_updated', prefix=database_name, value=int(row.get('tup_updated', 0)))
372 self.add_derive_value('db_stat_tup_deleted', prefix=database_name, value=int(row.get('tup_deleted', 0)))
373 self.add_derive_value('db_stat_conflicts', prefix=database_name, value=int(row.get('conflicts', 0)))
375 def add_backend_stats(self, cursor):
376 cursor.execute(BACKENDS)
377 temp = cursor.fetchone()
379 self.data['backend_process_active'] = int(temp.get('backends_active', 0))
380 self.data['backend_process_idle'] = int(temp.get('backends_idle', 0))
382 def add_index_stats(self, cursor):
383 cursor.execute(INDEX_STATS)
384 temp = cursor.fetchone()
385 self.data['index_count'] = int(temp.get('indexes', 0))
386 self.data['index_size'] = int(temp.get('size_indexes', 0))
388 def add_table_stats(self, cursor):
389 cursor.execute(TABLE_STATS)
390 temp = cursor.fetchone()
391 self.data['table_count'] = int(temp.get('relations', 0))
392 self.data['table_size'] = int(temp.get('size_relations', 0))
394 def add_lock_stats(self, cursor):
395 cursor.execute(DATABASE_LOCKS)
396 # First zero out all current lock values.
397 for database_name in self.databases:
398 for lock_type in LOCK_TYPES:
399 lock_id = "{}_{}".format(database_name, lock_type.lower())
400 self.data[lock_id] = 0
402 # Now populate those that have current locks
404 database_name, lock_type, lock_count = row
405 lock_id = "{}_{}".format(database_name, lock_type.lower())
406 self.data[lock_id] = lock_count
408 def add_wal_stats(self, cursor):
409 cursor.execute(ARCHIVE)
410 temp = cursor.fetchone()
411 self.add_derive_value('wal_total', int(temp.get('file_count', 0)))
412 self.add_derive_value('wal_ready', int(temp.get('ready_count', 0)))
413 self.add_derive_value('wal_done', int(temp.get('done_count', 0)))
415 def add_statio_stats(self, cursor):
416 cursor.execute(STATIO)
417 temp = cursor.fetchone()
418 self.add_derive_value('operations_heap_blocks_read', int(temp.get('heap_blocks_read', 0)))
419 self.add_derive_value('operations_heap_blocks_hit', int(temp.get('heap_blocks_hit', 0)))
420 self.add_derive_value('operations_index_blocks_read', int(temp.get('index_blocks_read', 0)))
421 self.add_derive_value('operations_index_blocks_hit', int(temp.get('index_blocks_hit', 0)))
422 self.add_derive_value('operations_toast_blocks_read', int(temp.get('toast_blocks_read', 0)))
423 self.add_derive_value('operations_toast_blocks_hit', int(temp.get('toast_blocks_hit', 0)))
424 self.add_derive_value('operations_toastindex_blocks_read', int(temp.get('toastindex_blocks_read', 0)))
425 self.add_derive_value('operations_toastindex_blocks_hit', int(temp.get('toastindex_blocks_hit', 0)))
427 def add_bgwriter_stats(self, cursor):
428 cursor.execute(BGWRITER)
429 temp = cursor.fetchone()
431 self.add_derive_value('background_writer_scheduled', temp.get('checkpoints_timed', 0))
432 self.add_derive_value('background_writer_requested', temp.get('checkpoints_requests', 0))
434 def add_derive_value(self, key, value, prefix=None):
436 key = "{}_{}".format(prefix, key)
437 if key not in self.old_data.keys():
440 self.data[key] = value - self.old_data[key]
442 self.old_data[key] = value
446 def add_replication_stats(self, cursor):
447 cursor.execute(REPLICATION)
448 temp = cursor.fetchall()
450 self.add_gauge_value('Replication/%s' % row.get('client_addr', 'Unknown'),
452 int(row.get('byte_lag', 0)))