1 # -*- coding: utf-8 -*-
2 # Description: mongodb netdata python.d module
5 from base import SimpleService
6 from copy import deepcopy
7 from datetime import datetime
8 from sys import exc_info
11 from pymongo import MongoClient, ASCENDING, DESCENDING
12 from pymongo.errors import PyMongoError
17 # default module values (can be overridden per job in `config`)
36 def multiply_by_100(value):
40 ('opcounters.delete', None, None),
41 ('opcounters.update', None, None),
42 ('opcounters.insert', None, None),
43 ('opcounters.query', None, None),
44 ('opcounters.getmore', None, None),
45 ('globalLock.activeClients.readers', 'activeClients_readers', None),
46 ('globalLock.activeClients.writers', 'activeClients_writers', None),
47 ('connections.available', 'connections_available', None),
48 ('connections.current', 'connections_current', None),
49 ('mem.mapped', None, None),
50 ('mem.resident', None, None),
51 ('mem.virtual', None, None),
52 ('globalLock.currentQueue.readers', 'currentQueue_readers', None),
53 ('globalLock.currentQueue.writers', 'currentQueue_writers', None),
54 ('asserts.msg', None, None),
55 ('asserts.regular', None, None),
56 ('asserts.user', None, None),
57 ('asserts.warning', None, None),
58 ('extra_info.page_faults', None, None),
59 ('metrics.record.moves', None, None),
60 ('backgroundFlushing.average_ms', None, multiply_by_100),
61 ('backgroundFlushing.last_ms', None, multiply_by_100),
62 ('backgroundFlushing.flushes', None, multiply_by_100),
63 ('metrics.cursor.timedOut', None, None),
64 ('metrics.cursor.open.total', 'cursor_total', None),
65 ('metrics.cursor.open.noTimeout', None, None),
66 ('cursors.timedOut', None, None),
67 ('cursors.totalOpen', 'cursor_total', None)
71 ('dur.commits', None, None),
72 ('dur.journaledMB', None, multiply_by_100)
76 ('wiredTiger.concurrentTransactions.read.available', 'wiredTigerRead_available', None),
77 ('wiredTiger.concurrentTransactions.read.out', 'wiredTigerRead_out', None),
78 ('wiredTiger.concurrentTransactions.write.available', 'wiredTigerWrite_available', None),
79 ('wiredTiger.concurrentTransactions.write.out', 'wiredTigerWrite_out', None),
80 ('wiredTiger.cache.bytes currently in the cache', None, None),
81 ('wiredTiger.cache.tracked dirty bytes in the cache', None, None),
82 ('wiredTiger.cache.maximum bytes configured', None, None),
83 ('wiredTiger.cache.unmodified pages evicted', 'unmodified', None),
84 ('wiredTiger.cache.modified pages evicted', 'modified', None)
88 ('tcmalloc.generic.current_allocated_bytes', None, None),
89 ('tcmalloc.generic.heap_size', None, None),
90 ('tcmalloc.tcmalloc.central_cache_free_bytes', None, None),
91 ('tcmalloc.tcmalloc.current_total_thread_cache_bytes', None, None),
92 ('tcmalloc.tcmalloc.pageheap_free_bytes', None, None),
93 ('tcmalloc.tcmalloc.pageheap_unmapped_bytes', None, None),
94 ('tcmalloc.tcmalloc.thread_cache_free_bytes', None, None),
95 ('tcmalloc.tcmalloc.transfer_cache_free_bytes', None, None)
99 ('metrics.commands.count.total', 'count_total', None),
100 ('metrics.commands.createIndexes.total', 'createIndexes_total', None),
101 ('metrics.commands.delete.total', 'delete_total', None),
102 ('metrics.commands.eval.total', 'eval_total', None),
103 ('metrics.commands.findAndModify.total', 'findAndModify_total', None),
104 ('metrics.commands.insert.total', 'insert_total', None),
105 ('metrics.commands.delete.total', 'delete_total', None),
106 ('metrics.commands.count.failed', 'count_failed', None),
107 ('metrics.commands.createIndexes.failed', 'createIndexes_failed', None),
108 ('metrics.commands.delete.failed', 'delete_failed', None),
109 ('metrics.commands.eval.failed', 'eval_failed', None),
110 ('metrics.commands.findAndModify.failed', 'findAndModify_failed', None),
111 ('metrics.commands.insert.failed', 'insert_failed', None),
112 ('metrics.commands.delete.failed', 'delete_failed', None)
116 ('locks.Collection.acquireCount.R', 'Collection_R', None),
117 ('locks.Collection.acquireCount.r', 'Collection_r', None),
118 ('locks.Collection.acquireCount.W', 'Collection_W', None),
119 ('locks.Collection.acquireCount.w', 'Collection_w', None),
120 ('locks.Database.acquireCount.R', 'Database_R', None),
121 ('locks.Database.acquireCount.r', 'Database_r', None),
122 ('locks.Database.acquireCount.W', 'Database_W', None),
123 ('locks.Database.acquireCount.w', 'Database_w', None),
124 ('locks.Global.acquireCount.R', 'Global_R', None),
125 ('locks.Global.acquireCount.r', 'Global_r', None),
126 ('locks.Global.acquireCount.W', 'Global_W', None),
127 ('locks.Global.acquireCount.w', 'Global_w', None),
128 ('locks.Metadata.acquireCount.R', 'Metadata_R', None),
129 ('locks.Metadata.acquireCount.w', 'Metadata_w', None),
130 ('locks.oplog.acquireCount.r', 'oplog_r', None),
131 ('locks.oplog.acquireCount.w', 'oplog_w', None)
141 # charts order (can be overridden if you want less charts, or different order)
142 ORDER = ['read_operations', 'write_operations', 'active_clients', 'journaling_transactions',
143 'journaling_volume', 'background_flush_average', 'background_flush_last', 'background_flush_rate',
144 'wiredtiger_read', 'wiredtiger_write', 'cursors', 'connections', 'memory', 'page_faults',
145 'queued_requests', 'record_moves', 'wiredtiger_cache', 'wiredtiger_pages_evicted', 'asserts',
146 'locks_collection', 'locks_database', 'locks_global', 'locks_metadata', 'locks_oplog',
147 'dbstats_objects', 'tcmalloc_generic', 'tcmalloc_metrics', 'command_total_rate', 'command_failed_rate']
151 'options': [None, 'Received read requests', 'requests/s', 'throughput metrics',
152 'mongodb.read_operations', 'line'],
154 ['query', None, 'incremental'],
155 ['getmore', None, 'incremental']
157 'write_operations': {
158 'options': [None, 'Received write requests', 'requests/s', 'throughput metrics',
159 'mongodb.write_operations', 'line'],
161 ['insert', None, 'incremental'],
162 ['update', None, 'incremental'],
163 ['delete', None, 'incremental']
166 'options': [None, 'Clients with read or write operations in progress or queued', 'clients',
167 'throughput metrics', 'mongodb.active_clients', 'line'],
169 ['activeClients_readers', 'readers', 'absolute'],
170 ['activeClients_writers', 'writers', 'absolute']
172 'journaling_transactions': {
173 'options': [None, 'Transactions that have been written to the journal', 'commits',
174 'database performance', 'mongodb.journaling_transactions', 'line'],
176 ['commits', None, 'absolute']
178 'journaling_volume': {
179 'options': [None, 'Volume of data written to the journal', 'MB', 'database performance',
180 'mongodb.journaling_volume', 'line'],
182 ['journaledMB', 'volume', 'absolute', 1, 100]
184 'background_flush_average': {
185 'options': [None, 'Average time taken by flushes to execute', 'ms', 'database performance',
186 'mongodb.background_flush_average', 'line'],
188 ['average_ms', 'time', 'absolute', 1, 100]
190 'background_flush_last': {
191 'options': [None, 'Time taken by the last flush operation to execute', 'ms', 'database performance',
192 'mongodb.background_flush_last', 'line'],
194 ['last_ms', 'time', 'absolute', 1, 100]
196 'background_flush_rate': {
197 'options': [None, 'Flushes rate', 'flushes', 'database performance', 'mongodb.background_flush_rate', 'line'],
199 ['flushes', 'flushes', 'incremental', 1, 1]
202 'options': [None, 'Read tickets in use and remaining', 'tickets', 'database performance',
203 'mongodb.wiredtiger_read', 'stacked'],
205 ['wiredTigerRead_available', 'available', 'absolute', 1, 1],
206 ['wiredTigerRead_out', 'inuse', 'absolute', 1, 1]
208 'wiredtiger_write': {
209 'options': [None, 'Write tickets in use and remaining', 'tickets', 'database performance',
210 'mongodb.wiredtiger_write', 'stacked'],
212 ['wiredTigerWrite_available', 'available', 'absolute', 1, 1],
213 ['wiredTigerWrite_out', 'inuse', 'absolute', 1, 1]
216 'options': [None, 'Currently openned cursors, cursors with timeout disabled and timed out cursors',
217 'cursors', 'database performance', 'mongodb.cursors', 'stacked'],
219 ['cursor_total', 'openned', 'absolute', 1, 1],
220 ['noTimeout', None, 'absolute', 1, 1],
221 ['timedOut', None, 'incremental', 1, 1]
224 'options': [None, 'Currently connected clients and unused connections', 'connections',
225 'resource utilization', 'mongodb.connections', 'stacked'],
227 ['connections_available', 'unused', 'absolute', 1, 1],
228 ['connections_current', 'connected', 'absolute', 1, 1]
231 'options': [None, 'Memory metrics', 'MB', 'resource utilization', 'mongodb.memory', 'stacked'],
233 ['virtual', None, 'absolute', 1, 1],
234 ['resident', None, 'absolute', 1, 1],
235 ['nonmapped', None, 'absolute', 1, 1],
236 ['mapped', None, 'absolute', 1, 1]
239 'options': [None, 'Number of times MongoDB had to fetch data from disk', 'request/s',
240 'resource utilization', 'mongodb.page_faults', 'line'],
242 ['page_faults', None, 'incremental', 1, 1]
245 'options': [None, 'Currently queued read and wrire requests', 'requests', 'resource saturation',
246 'mongodb.queued_requests', 'line'],
248 ['currentQueue_readers', 'readers', 'absolute', 1, 1],
249 ['currentQueue_writers', 'writers', 'absolute', 1, 1]
252 'options': [None, 'Number of times documents had to be moved on-disk', 'number',
253 'resource saturation', 'mongodb.record_moves', 'line'],
255 ['moves', None, 'incremental', 1, 1]
258 'options': [None, 'Number of message, warning, regular, corresponding to errors generated'
259 ' by users assertions raised', 'number', 'errors (asserts)', 'mongodb.asserts', 'line'],
261 ['msg', None, 'incremental', 1, 1],
262 ['warning', None, 'incremental', 1, 1],
263 ['regular', None, 'incremental', 1, 1],
264 ['user', None, 'incremental', 1, 1]
266 'wiredtiger_cache': {
267 'options': [None, 'The percentage of the wiredTiger cache that is in use and cache with dirty bytes',
268 'percent', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'],
270 ['wiredTiger_percent_clean', 'inuse', 'absolute', 1, 1000],
271 ['wiredTiger_percent_dirty', 'dirty', 'absolute', 1, 1000]
273 'wiredtiger_pages_evicted': {
274 'options': [None, 'Pages evicted from the cache',
275 'pages', 'resource utilization', 'mongodb.wiredtiger_pages_evicted', 'stacked'],
277 ['unmodified', None, 'absolute', 1, 1],
278 ['modified', None, 'absolute', 1, 1]
281 'options': [None, 'Number of documents in the database among all the collections', 'documents',
282 'storage size metrics', 'mongodb.dbstats_objects', 'stacked'],
285 'tcmalloc_generic': {
286 'options': [None, 'Tcmalloc generic metrics', 'MB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'],
288 ['current_allocated_bytes', 'allocated', 'absolute', 1, 1048576],
289 ['heap_size', 'heap_size', 'absolute', 1, 1048576]
291 'tcmalloc_metrics': {
292 'options': [None, 'Tcmalloc metrics', 'KB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'],
294 ['central_cache_free_bytes', 'central_cache_free', 'absolute', 1, 1024],
295 ['current_total_thread_cache_bytes', 'current_total_thread_cache', 'absolute', 1, 1024],
296 ['pageheap_free_bytes', 'pageheap_free', 'absolute', 1, 1024],
297 ['pageheap_unmapped_bytes', 'pageheap_unmapped', 'absolute', 1, 1024],
298 ['thread_cache_free_bytes', 'thread_cache_free', 'absolute', 1, 1024],
299 ['transfer_cache_free_bytes', 'transfer_cache_free', 'absolute', 1, 1024]
301 'command_total_rate': {
302 'options': [None, 'Commands total rate', 'commands/s', 'commands', 'mongodb.command_total_rate', 'stacked'],
304 ['count_total', 'count', 'incremental', 1, 1],
305 ['createIndexes_total', 'createIndexes', 'incremental', 1, 1],
306 ['delete_total', 'delete', 'incremental', 1, 1],
307 ['eval_total', 'eval', 'incremental', 1, 1],
308 ['findAndModify_total', 'findAndModify', 'incremental', 1, 1],
309 ['insert_total', 'insert', 'incremental', 1, 1],
310 ['update_total', 'update', 'incremental', 1, 1]
312 'command_failed_rate': {
313 'options': [None, 'Commands failed rate', 'commands/s', 'commands', 'mongodb.command_failed_rate', 'stacked'],
315 ['count_failed', 'count', 'incremental', 1, 1],
316 ['createIndexes_failed', 'createIndexes', 'incremental', 1, 1],
317 ['delete_failed', 'delete', 'incremental', 1, 1],
318 ['eval_failed', 'eval', 'incremental', 1, 1],
319 ['findAndModify_failed', 'findAndModify', 'incremental', 1, 1],
320 ['insert_failed', 'insert', 'incremental', 1, 1],
321 ['update_failed', 'update', 'incremental', 1, 1]
323 'locks_collection': {
324 'options': [None, 'Collection lock. Number of times the lock was acquired in the specified mode',
325 'locks', 'locks metrics', 'mongodb.locks_collection', 'stacked'],
327 ['Collection_R', 'shared', 'incremental'],
328 ['Collection_W', 'exclusive', 'incremental'],
329 ['Collection_r', 'intent_shared', 'incremental'],
330 ['Collection_w', 'intent_exclusive', 'incremental']
333 'options': [None, 'Database lock. Number of times the lock was acquired in the specified mode',
334 'locks', 'locks metrics', 'mongodb.locks_database', 'stacked'],
336 ['Database_R', 'shared', 'incremental'],
337 ['Database_W', 'exclusive', 'incremental'],
338 ['Database_r', 'intent_shared', 'incremental'],
339 ['Database_w', 'intent_exclusive', 'incremental']
342 'options': [None, 'Global lock. Number of times the lock was acquired in the specified mode',
343 'locks', 'locks metrics', 'mongodb.locks_global', 'stacked'],
345 ['Global_R', 'shared', 'incremental'],
346 ['Global_W', 'exclusive', 'incremental'],
347 ['Global_r', 'intent_shared', 'incremental'],
348 ['Global_w', 'intent_exclusive', 'incremental']
351 'options': [None, 'Metadata lock. Number of times the lock was acquired in the specified mode',
352 'locks', 'locks metrics', 'mongodb.locks_metadata', 'stacked'],
354 ['Metadata_R', 'shared', 'incremental'],
355 ['Metadata_w', 'intent_exclusive', 'incremental']
358 'options': [None, 'Lock on the oplog. Number of times the lock was acquired in the specified mode',
359 'locks', 'locks metrics', 'mongodb.locks_oplog', 'stacked'],
361 ['Metadata_r', 'intent_shared', 'incremental'],
362 ['Metadata_w', 'intent_exclusive', 'incremental']
367 class Service(SimpleService):
368 def __init__(self, configuration=None, name=None):
369 SimpleService.__init__(self, configuration=configuration, name=name)
370 self.order = ORDER[:]
371 self.definitions = deepcopy(CHARTS)
372 self.user = self.configuration.get('user')
373 self.password = self.configuration.get('pass')
374 self.host = self.configuration.get('host', '127.0.0.1')
375 self.port = self.configuration.get('port', 27017)
376 self.timeout = self.configuration.get('timeout', 100)
377 self.metrics_to_collect = deepcopy(DEFAULT_METRICS)
378 self.connection = None
379 self.do_replica = None
380 self.databases = list()
384 self.error('Pymongo module is needed to use mongodb.chart.py')
386 self.connection, server_status, error = self._create_connection()
391 self.build_metrics_to_collect_(server_status)
395 except (LookupError, SyntaxError, AttributeError):
396 self.error('Type: %s, error: %s' % (str(exc_info()[0]), str(exc_info()[1])))
399 self.create_charts_(server_status)
402 def build_metrics_to_collect_(self, server_status):
404 self.do_replica = 'repl' in server_status
405 if 'dur' in server_status:
406 self.metrics_to_collect.extend(DUR)
407 if 'tcmalloc' in server_status:
408 self.metrics_to_collect.extend(TCMALLOC)
409 if 'commands' in server_status['metrics']:
410 self.metrics_to_collect.extend(COMMANDS)
411 if 'wiredTiger' in server_status:
412 self.metrics_to_collect.extend(WIREDTIGER)
413 if 'Collection' in server_status['locks']:
414 self.metrics_to_collect.extend(LOCKS)
416 def create_charts_(self, server_status):
418 if 'dur' not in server_status:
419 self.order.remove('journaling_transactions')
420 self.order.remove('journaling_volume')
422 if 'backgroundFlushing' not in server_status:
423 self.order.remove('background_flush_average')
424 self.order.remove('background_flush_last')
425 self.order.remove('background_flush_rate')
427 if 'wiredTiger' not in server_status:
428 self.order.remove('wiredtiger_write')
429 self.order.remove('wiredtiger_read')
430 self.order.remove('wiredtiger_cache')
432 if 'tcmalloc' not in server_status:
433 self.order.remove('tcmalloc_generic')
434 self.order.remove('tcmalloc_metrics')
436 if 'commands' not in server_status['metrics']:
437 self.order.remove('command_total_rate')
438 self.order.remove('command_failed_rate')
440 if 'Collection' not in server_status['locks']:
441 self.order.remove('locks_collection')
442 self.order.remove('locks_database')
443 self.order.remove('locks_global')
444 self.order.remove('locks_metadata')
446 if 'oplog' not in server_status['locks']:
447 self.order.remove('locks_oplog')
449 for dbase in self.databases:
450 self.order.append('_'.join([dbase, 'dbstats']))
451 self.definitions['_'.join([dbase, 'dbstats'])] = {
452 'options': [None, '%s: size of all documents, indexes, extents' % dbase, 'KB',
453 'storage size metrics', 'mongodb.dbstats', 'line'],
455 ['_'.join([dbase, 'dataSize']), 'documents', 'absolute', 1, 1024],
456 ['_'.join([dbase, 'indexSize']), 'indexes', 'absolute', 1, 1024],
457 ['_'.join([dbase, 'storageSize']), 'extents', 'absolute', 1, 1024]
459 self.definitions['dbstats_objects']['lines'].append(['_'.join([dbase, 'objects']), dbase, 'absolute'])
462 def create_lines(hosts, string):
465 dim_id = '_'.join([host, string])
466 lines.append([dim_id, host, 'absolute', 1, 1000])
469 def create_state_lines(states):
471 for state, description in states:
472 dim_id = '_'.join([host, 'state', state])
473 lines.append([dim_id, description, 'absolute', 1, 1])
476 all_hosts = server_status['repl']['hosts']
477 this_host = server_status['repl']['me']
478 other_hosts = [host for host in all_hosts if host != this_host]
480 if 'local' in self.databases:
481 self.order.append('oplog_window')
482 self.definitions['oplog_window'] = {
483 'options': [None, 'Interval of time between the oldest and the latest entries in the oplog',
484 'seconds', 'replication and oplog', 'mongodb.oplog_window', 'line'],
485 'lines': [['timeDiff', 'window', 'absolute', 1, 1000]]}
486 # Create "heartbeat delay" chart
487 self.order.append('heartbeat_delay')
488 self.definitions['heartbeat_delay'] = {
489 'options': [None, 'Time when last heartbeat was received'
490 ' from the replica set member (lastHeartbeatRecv)',
491 'seconds ago', 'replication and oplog', 'mongodb.replication_heartbeat_delay', 'stacked'],
492 'lines': create_lines(other_hosts, 'heartbeat_lag')}
493 # Create "optimedate delay" chart
494 self.order.append('optimedate_delay')
495 self.definitions['optimedate_delay'] = {
496 'options': [None, 'Time when last entry from the oplog was applied (optimeDate)',
497 'seconds ago', 'replication and oplog', 'mongodb.replication_optimedate_delay', 'stacked'],
498 'lines': create_lines(all_hosts, 'optimedate')}
499 # Create "replica set members state" chart
500 for host in all_hosts:
501 chart_name = '_'.join([host, 'state'])
502 self.order.append(chart_name)
503 self.definitions[chart_name] = {
504 'options': [None, 'Replica set member (%s) current state' % host, 'state',
505 'replication and oplog', 'mongodb.replication_state', 'line'],
506 'lines': create_state_lines(REPLSET_STATES)}
508 def _get_raw_data(self):
511 raw_data.update(self.get_serverstatus_() or dict())
512 raw_data.update(self.get_dbstats_() or dict())
513 raw_data.update(self.get_replsetgetstatus_() or dict())
514 raw_data.update(self.get_getreplicationinfo_() or dict())
516 return raw_data or None
518 def get_serverstatus_(self):
521 raw_data['serverStatus'] = self.connection.admin.command('serverStatus')
527 def get_dbstats_(self):
528 if not self.databases:
532 raw_data['dbStats'] = dict()
534 for dbase in self.databases:
535 raw_data['dbStats'][dbase] = self.connection[dbase].command('dbStats')
541 def get_replsetgetstatus_(self):
542 if not self.do_replica:
547 raw_data['replSetGetStatus'] = self.connection.admin.command('replSetGetStatus')
553 def get_getreplicationinfo_(self):
554 if not (self.do_replica and 'local' in self.databases):
558 raw_data['getReplicationInfo'] = dict()
560 raw_data['getReplicationInfo']['ASCENDING'] = self.connection.local.oplog.rs.find().sort(
561 "$natural", ASCENDING).limit(1)[0]
562 raw_data['getReplicationInfo']['DESCENDING'] = self.connection.local.oplog.rs.find().sort(
563 "$natural", DESCENDING).limit(1)[0]
573 raw_data = self._get_raw_data()
579 serverStatus = raw_data['serverStatus']
580 dbStats = raw_data.get('dbStats')
581 replSetGetStatus = raw_data.get('replSetGetStatus')
582 getReplicationInfo = raw_data.get('getReplicationInfo')
583 utc_now = datetime.utcnow()
586 for metric, new_name, function in self.metrics_to_collect:
588 for key in metric.split('.'):
594 if not isinstance(value, dict) and key:
595 to_netdata[new_name or key] = value if not function else function(value)
597 to_netdata['nonmapped'] = to_netdata['virtual'] - serverStatus['mem'].get('mappedWithJournal',
598 to_netdata['mapped'])
599 if to_netdata.get('maximum bytes configured'):
600 maximum = to_netdata['maximum bytes configured']
601 to_netdata['wiredTiger_percent_clean'] = int(to_netdata['bytes currently in the cache']
602 * 100 / maximum * 1000)
603 to_netdata['wiredTiger_percent_dirty'] = int(to_netdata['tracked dirty bytes in the cache']
604 * 100 / maximum * 1000)
608 for dbase in dbStats:
609 for metric in DBSTATS:
610 key = '_'.join([dbase, metric])
611 to_netdata[key] = dbStats[dbase][metric]
616 members = replSetGetStatus['members']
617 unix_epoch = datetime(1970, 1, 1, 0, 0)
619 for member in members:
620 if not member.get('self'):
621 other_hosts.append(member)
622 # Replica set time diff between current time and time when last entry from the oplog was applied
623 if member['optimeDate'] != unix_epoch:
624 member_optimedate = member['name'] + '_optimedate'
625 to_netdata.update({member_optimedate: int(delta_calculation(delta=utc_now - member['optimeDate'],
627 # Replica set members state
628 member_state = member['name'] + '_state'
629 for elem in REPLSET_STATES:
631 to_netdata.update({'_'.join([member_state, state]): 0})
632 to_netdata.update({'_'.join([member_state, str(member['state'])]): member['state']})
633 # Heartbeat lag calculation
634 for other in other_hosts:
635 if other['lastHeartbeatRecv'] != unix_epoch:
636 node = other['name'] + '_heartbeat_lag'
637 to_netdata[node] = int(delta_calculation(delta=utc_now - other['lastHeartbeatRecv'],
640 if getReplicationInfo:
641 first_event = getReplicationInfo['ASCENDING']['ts'].as_datetime()
642 last_event = getReplicationInfo['DESCENDING']['ts'].as_datetime()
643 to_netdata['timeDiff'] = int(delta_calculation(delta=last_event - first_event, multiplier=1000))
647 def _create_connection(self):
648 conn_vars = {'host': self.host, 'port': self.port}
649 if hasattr(MongoClient, 'server_selection_timeout'):
650 conn_vars.update({'serverselectiontimeoutms': self.timeout})
652 connection = MongoClient(**conn_vars)
653 if self.user and self.password:
654 connection.admin.authenticate(name=self.user, password=self.password)
656 # connection.admin.authenticate(name=self.user, mechanism='MONGODB-X509')
657 server_status = connection.admin.command('serverStatus')
658 except PyMongoError as error:
659 return None, None, str(error)
662 self.databases = connection.database_names()
663 except PyMongoError as error:
664 self.info('Can\'t collect databases: %s' % str(error))
665 return connection, server_status, None
668 def delta_calculation(delta, multiplier=1):
669 if hasattr(delta, 'total_seconds'):
670 return delta.total_seconds() * multiplier
672 return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6 * multiplier