1 # -*- coding: utf-8 -*-
2 # Description: mongodb netdata python.d module
5 from base import SimpleService
6 from copy import deepcopy
7 from datetime import datetime
8 from sys import exc_info
11 from pymongo import MongoClient, ASCENDING, DESCENDING
12 from pymongo.errors import PyMongoError
17 # default module values (can be overridden per job in `config`)
36 def multiply_by_100(value):
40 ('opcounters.delete', None, None),
41 ('opcounters.update', None, None),
42 ('opcounters.insert', None, None),
43 ('opcounters.query', None, None),
44 ('opcounters.getmore', None, None),
45 ('globalLock.activeClients.readers', 'activeClients_readers', None),
46 ('globalLock.activeClients.writers', 'activeClients_writers', None),
47 ('connections.available', 'connections_available', None),
48 ('connections.current', 'connections_current', None),
49 ('mem.mapped', None, None),
50 ('mem.resident', None, None),
51 ('mem.virtual', None, None),
52 ('globalLock.currentQueue.readers', 'currentQueue_readers', None),
53 ('globalLock.currentQueue.writers', 'currentQueue_writers', None),
54 ('asserts.msg', None, None),
55 ('asserts.regular', None, None),
56 ('asserts.user', None, None),
57 ('asserts.warning', None, None),
58 ('extra_info.page_faults', None, None),
59 ('metrics.record.moves', None, None),
60 ('backgroundFlushing.average_ms', None, multiply_by_100),
61 ('backgroundFlushing.last_ms', None, multiply_by_100),
62 ('backgroundFlushing.flushes', None, multiply_by_100),
63 ('metrics.cursor.timedOut', None, None),
64 ('metrics.cursor.open.total', 'cursor_total', None),
65 ('metrics.cursor.open.noTimeout', None, None),
66 ('cursors.timedOut', None, None),
67 ('cursors.totalOpen', 'cursor_total', None)
71 ('dur.commits', None, None),
72 ('dur.journaledMB', None, multiply_by_100)
76 ('wiredTiger.concurrentTransactions.read.available', 'wiredTigerRead_available', None),
77 ('wiredTiger.concurrentTransactions.read.out', 'wiredTigerRead_out', None),
78 ('wiredTiger.concurrentTransactions.write.available', 'wiredTigerWrite_available', None),
79 ('wiredTiger.concurrentTransactions.write.out', 'wiredTigerWrite_out', None),
80 ('wiredTiger.cache.bytes currently in the cache', None, None),
81 ('wiredTiger.cache.tracked dirty bytes in the cache', None, None),
82 ('wiredTiger.cache.maximum bytes configured', None, None),
83 ('wiredTiger.cache.unmodified pages evicted', 'unmodified', None),
84 ('wiredTiger.cache.modified pages evicted', 'modified', None)
88 ('tcmalloc.generic.current_allocated_bytes', None, None),
89 ('tcmalloc.generic.heap_size', None, None),
90 ('tcmalloc.tcmalloc.central_cache_free_bytes', None, None),
91 ('tcmalloc.tcmalloc.current_total_thread_cache_bytes', None, None),
92 ('tcmalloc.tcmalloc.pageheap_free_bytes', None, None),
93 ('tcmalloc.tcmalloc.pageheap_unmapped_bytes', None, None),
94 ('tcmalloc.tcmalloc.thread_cache_free_bytes', None, None),
95 ('tcmalloc.tcmalloc.transfer_cache_free_bytes', None, None)
99 ('metrics.commands.count.total', 'count_total', None),
100 ('metrics.commands.createIndexes.total', 'createIndexes_total', None),
101 ('metrics.commands.delete.total', 'delete_total', None),
102 ('metrics.commands.eval.total', 'eval_total', None),
103 ('metrics.commands.findAndModify.total', 'findAndModify_total', None),
104 ('metrics.commands.insert.total', 'insert_total', None),
105 ('metrics.commands.delete.total', 'delete_total', None),
106 ('metrics.commands.count.failed', 'count_failed', None),
107 ('metrics.commands.createIndexes.failed', 'createIndexes_failed', None),
108 ('metrics.commands.delete.failed', 'delete_failed', None),
109 ('metrics.commands.eval.failed', 'eval_failed', None),
110 ('metrics.commands.findAndModify.failed', 'findAndModify_failed', None),
111 ('metrics.commands.insert.failed', 'insert_failed', None),
112 ('metrics.commands.delete.failed', 'delete_failed', None)
116 ('locks.Collection.acquireCount.R', 'Collection_R', None),
117 ('locks.Collection.acquireCount.r', 'Collection_r', None),
118 ('locks.Collection.acquireCount.W', 'Collection_W', None),
119 ('locks.Collection.acquireCount.w', 'Collection_w', None),
120 ('locks.Database.acquireCount.R', 'Database_R', None),
121 ('locks.Database.acquireCount.r', 'Database_r', None),
122 ('locks.Database.acquireCount.W', 'Database_W', None),
123 ('locks.Database.acquireCount.w', 'Database_w', None),
124 ('locks.Global.acquireCount.R', 'Global_R', None),
125 ('locks.Global.acquireCount.r', 'Global_r', None),
126 ('locks.Global.acquireCount.W', 'Global_W', None),
127 ('locks.Global.acquireCount.w', 'Global_w', None),
128 ('locks.Metadata.acquireCount.R', 'Metadata_R', None),
129 ('locks.Metadata.acquireCount.w', 'Metadata_w', None),
130 ('locks.oplog.acquireCount.r', 'oplog_r', None),
131 ('locks.oplog.acquireCount.w', 'oplog_w', None)
141 # charts order (can be overridden if you want less charts, or different order)
142 ORDER = ['read_operations', 'write_operations', 'active_clients', 'journaling_transactions',
143 'journaling_volume', 'background_flush_average', 'background_flush_last', 'background_flush_rate',
144 'wiredtiger_read', 'wiredtiger_write', 'cursors', 'connections', 'memory', 'page_faults',
145 'queued_requests', 'record_moves', 'wiredtiger_cache', 'wiredtiger_pages_evicted', 'asserts',
146 'locks_collection', 'locks_database', 'locks_global', 'locks_metadata', 'locks_oplog',
147 'dbstats_objects', 'tcmalloc_generic', 'tcmalloc_metrics', 'command_total_rate', 'command_failed_rate']
151 'options': [None, 'Received read requests', 'requests/s', 'throughput metrics',
152 'mongodb.read_operations', 'line'],
154 ['query', None, 'incremental'],
155 ['getmore', None, 'incremental']
157 'write_operations': {
158 'options': [None, 'Received write requests', 'requests/s', 'throughput metrics',
159 'mongodb.write_operations', 'line'],
161 ['insert', None, 'incremental'],
162 ['update', None, 'incremental'],
163 ['delete', None, 'incremental']
166 'options': [None, 'Clients with read or write operations in progress or queued', 'clients',
167 'throughput metrics', 'mongodb.active_clients', 'line'],
169 ['activeClients_readers', 'readers', 'absolute'],
170 ['activeClients_writers', 'writers', 'absolute']
172 'journaling_transactions': {
173 'options': [None, 'Transactions that have been written to the journal', 'commits',
174 'database performance', 'mongodb.journaling_transactions', 'line'],
176 ['commits', None, 'absolute']
178 'journaling_volume': {
179 'options': [None, 'Volume of data written to the journal', 'MB', 'database performance',
180 'mongodb.journaling_volume', 'line'],
182 ['journaledMB', 'volume', 'absolute', 1, 100]
184 'background_flush_average': {
185 'options': [None, 'Average time taken by flushes to execute', 'ms', 'database performance',
186 'mongodb.background_flush_average', 'line'],
188 ['average_ms', 'time', 'absolute', 1, 100]
190 'background_flush_last': {
191 'options': [None, 'Time taken by the last flush operation to execute', 'ms', 'database performance',
192 'mongodb.background_flush_last', 'line'],
194 ['last_ms', 'time', 'absolute', 1, 100]
196 'background_flush_rate': {
197 'options': [None, 'Flushes rate', 'flushes', 'database performance', 'mongodb.background_flush_rate', 'line'],
199 ['flushes', 'flushes', 'incremental', 1, 1]
202 'options': [None, 'Read tickets in use and remaining', 'tickets', 'database performance',
203 'mongodb.wiredtiger_read', 'stacked'],
205 ['wiredTigerRead_available', 'available', 'absolute', 1, 1],
206 ['wiredTigerRead_out', 'inuse', 'absolute', 1, 1]
208 'wiredtiger_write': {
209 'options': [None, 'Write tickets in use and remaining', 'tickets', 'database performance',
210 'mongodb.wiredtiger_write', 'stacked'],
212 ['wiredTigerWrite_available', 'available', 'absolute', 1, 1],
213 ['wiredTigerWrite_out', 'inuse', 'absolute', 1, 1]
216 'options': [None, 'Currently openned cursors, cursors with timeout disabled and timed out cursors',
217 'cursors', 'database performance', 'mongodb.cursors', 'stacked'],
219 ['cursor_total', 'openned', 'absolute', 1, 1],
220 ['noTimeout', None, 'absolute', 1, 1],
221 ['timedOut', None, 'incremental', 1, 1]
224 'options': [None, 'Currently connected clients and unused connections', 'connections',
225 'resource utilization', 'mongodb.connections', 'stacked'],
227 ['connections_available', 'unused', 'absolute', 1, 1],
228 ['connections_current', 'connected', 'absolute', 1, 1]
231 'options': [None, 'Memory metrics', 'MB', 'resource utilization', 'mongodb.memory', 'stacked'],
233 ['virtual', None, 'absolute', 1, 1],
234 ['resident', None, 'absolute', 1, 1],
235 ['nonmapped', None, 'absolute', 1, 1],
236 ['mapped', None, 'absolute', 1, 1]
239 'options': [None, 'Number of times MongoDB had to fetch data from disk', 'request/s',
240 'resource utilization', 'mongodb.page_faults', 'line'],
242 ['page_faults', None, 'incremental', 1, 1]
245 'options': [None, 'Currently queued read and wrire requests', 'requests', 'resource saturation',
246 'mongodb.queued_requests', 'line'],
248 ['currentQueue_readers', 'readers', 'absolute', 1, 1],
249 ['currentQueue_writers', 'writers', 'absolute', 1, 1]
252 'options': [None, 'Number of times documents had to be moved on-disk', 'number',
253 'resource saturation', 'mongodb.record_moves', 'line'],
255 ['moves', None, 'incremental', 1, 1]
258 'options': [None, 'Number of message, warning, regular, corresponding to errors generated'
259 ' by users assertions raised', 'number', 'errors (asserts)', 'mongodb.asserts', 'line'],
261 ['msg', None, 'incremental', 1, 1],
262 ['warning', None, 'incremental', 1, 1],
263 ['regular', None, 'incremental', 1, 1],
264 ['user', None, 'incremental', 1, 1]
266 'wiredtiger_cache': {
267 'options': [None, 'The percentage of the wiredTiger cache that is in use and cache with dirty bytes',
268 'percent', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'],
270 ['wiredTiger_percent_clean', 'inuse', 'absolute', 1, 1000],
271 ['wiredTiger_percent_dirty', 'dirty', 'absolute', 1, 1000]
273 'wiredtiger_pages_evicted': {
274 'options': [None, 'Pages evicted from the cache',
275 'pages', 'resource utilization', 'mongodb.wiredtiger_pages_evicted', 'stacked'],
277 ['unmodified', None, 'absolute', 1, 1],
278 ['modified', None, 'absolute', 1, 1]
281 'options': [None, 'Number of documents in the database among all the collections', 'documents',
282 'storage size metrics', 'mongodb.dbstats_objects', 'stacked'],
285 'tcmalloc_generic': {
286 'options': [None, 'Tcmalloc generic metrics', 'MB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'],
288 ['current_allocated_bytes', 'allocated', 'absolute', 1, 1048576],
289 ['heap_size', 'heap_size', 'absolute', 1, 1048576]
291 'tcmalloc_metrics': {
292 'options': [None, 'Tcmalloc metrics', 'KB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'],
294 ['central_cache_free_bytes', 'central_cache_free', 'absolute', 1, 1024],
295 ['current_total_thread_cache_bytes', 'current_total_thread_cache', 'absolute', 1, 1024],
296 ['pageheap_free_bytes', 'pageheap_free', 'absolute', 1, 1024],
297 ['pageheap_unmapped_bytes', 'pageheap_unmapped', 'absolute', 1, 1024],
298 ['thread_cache_free_bytes', 'thread_cache_free', 'absolute', 1, 1024],
299 ['transfer_cache_free_bytes', 'transfer_cache_free', 'absolute', 1, 1024]
301 'command_total_rate': {
302 'options': [None, 'Commands total rate', 'commands/s', 'commands', 'mongodb.command_total_rate', 'stacked'],
304 ['count_total', 'count', 'incremental', 1, 1],
305 ['createIndexes_total', 'createIndexes', 'incremental', 1, 1],
306 ['delete_total', 'delete', 'incremental', 1, 1],
307 ['eval_total', 'eval', 'incremental', 1, 1],
308 ['findAndModify_total', 'findAndModify', 'incremental', 1, 1],
309 ['insert_total', 'insert', 'incremental', 1, 1],
310 ['update_total', 'update', 'incremental', 1, 1]
312 'command_failed_rate': {
313 'options': [None, 'Commands failed rate', 'commands/s', 'commands', 'mongodb.command_failed_rate', 'stacked'],
315 ['count_failed', 'count', 'incremental', 1, 1],
316 ['createIndexes_failed', 'createIndexes', 'incremental', 1, 1],
317 ['delete_failed', 'delete', 'incremental', 1, 1],
318 ['eval_failed', 'eval', 'incremental', 1, 1],
319 ['findAndModify_failed', 'findAndModify', 'incremental', 1, 1],
320 ['insert_failed', 'insert', 'incremental', 1, 1],
321 ['update_failed', 'update', 'incremental', 1, 1]
323 'locks_collection': {
324 'options': [None, 'Collection lock. Number of times the lock was acquired in the specified mode',
325 'locks', 'locks metrics', 'mongodb.locks_collection', 'stacked'],
327 ['Collection_R', 'shared', 'incremental'],
328 ['Collection_W', 'exclusive', 'incremental'],
329 ['Collection_r', 'intent_shared', 'incremental'],
330 ['Collection_w', 'intent_exclusive', 'incremental']
333 'options': [None, 'Database lock. Number of times the lock was acquired in the specified mode',
334 'locks', 'locks metrics', 'mongodb.locks_database', 'stacked'],
336 ['Database_R', 'shared', 'incremental'],
337 ['Database_W', 'exclusive', 'incremental'],
338 ['Database_r', 'intent_shared', 'incremental'],
339 ['Database_w', 'intent_exclusive', 'incremental']
342 'options': [None, 'Global lock. Number of times the lock was acquired in the specified mode',
343 'locks', 'locks metrics', 'mongodb.locks_global', 'stacked'],
345 ['Global_R', 'shared', 'incremental'],
346 ['Global_W', 'exclusive', 'incremental'],
347 ['Global_r', 'intent_shared', 'incremental'],
348 ['Global_w', 'intent_exclusive', 'incremental']
351 'options': [None, 'Metadata lock. Number of times the lock was acquired in the specified mode',
352 'locks', 'locks metrics', 'mongodb.locks_metadata', 'stacked'],
354 ['Metadata_R', 'shared', 'incremental'],
355 ['Metadata_w', 'intent_exclusive', 'incremental']
358 'options': [None, 'Lock on the oplog. Number of times the lock was acquired in the specified mode',
359 'locks', 'locks metrics', 'mongodb.locks_oplog', 'stacked'],
361 ['Metadata_r', 'intent_shared', 'incremental'],
362 ['Metadata_w', 'intent_exclusive', 'incremental']
367 class Service(SimpleService):
368 def __init__(self, configuration=None, name=None):
369 SimpleService.__init__(self, configuration=configuration, name=name)
370 self.order = ORDER[:]
371 self.definitions = deepcopy(CHARTS)
372 self.user = self.configuration.get('user')
373 self.password = self.configuration.get('pass')
374 self.host = self.configuration.get('host', '127.0.0.1')
375 self.port = self.configuration.get('port', 27017)
376 self.timeout = self.configuration.get('timeout', 100)
377 self.metrics_to_collect = deepcopy(DEFAULT_METRICS)
378 self.connection = None
379 self.do_replica = None
380 self.databases = list()
384 self.error('Pymongo module is needed to use mongodb.chart.py')
386 self.connection, server_status, error = self._create_connection()
391 self.build_metrics_to_collect_(server_status)
394 data = self._get_data()
395 except (LookupError, SyntaxError, AttributeError):
396 self.error('Type: %s, error: %s' % (str(exc_info()[0]), str(exc_info()[1])))
399 if isinstance(data, dict) and data:
400 self._data_from_check = data
401 self.create_charts_(server_status)
404 self.error('_get_data() returned no data or type is not <dict>')
407 def build_metrics_to_collect_(self, server_status):
409 self.do_replica = 'repl' in server_status
410 if 'dur' in server_status:
411 self.metrics_to_collect.extend(DUR)
412 if 'tcmalloc' in server_status:
413 self.metrics_to_collect.extend(TCMALLOC)
414 if 'commands' in server_status['metrics']:
415 self.metrics_to_collect.extend(COMMANDS)
416 if 'wiredTiger' in server_status:
417 self.metrics_to_collect.extend(WIREDTIGER)
418 if 'Collection' in server_status['locks']:
419 self.metrics_to_collect.extend(LOCKS)
421 def create_charts_(self, server_status):
423 if 'dur' not in server_status:
424 self.order.remove('journaling_transactions')
425 self.order.remove('journaling_volume')
427 if 'backgroundFlushing' not in server_status:
428 self.order.remove('background_flush_average')
429 self.order.remove('background_flush_last')
430 self.order.remove('background_flush_rate')
432 if 'wiredTiger' not in server_status:
433 self.order.remove('wiredtiger_write')
434 self.order.remove('wiredtiger_read')
435 self.order.remove('wiredtiger_cache')
437 if 'tcmalloc' not in server_status:
438 self.order.remove('tcmalloc_generic')
439 self.order.remove('tcmalloc_metrics')
441 if 'commands' not in server_status['metrics']:
442 self.order.remove('command_total_rate')
443 self.order.remove('command_failed_rate')
445 if 'Collection' not in server_status['locks']:
446 self.order.remove('locks_collection')
447 self.order.remove('locks_database')
448 self.order.remove('locks_global')
449 self.order.remove('locks_metadata')
451 if 'oplog' not in server_status['locks']:
452 self.order.remove('locks_oplog')
454 for dbase in self.databases:
455 self.order.append('_'.join([dbase, 'dbstats']))
456 self.definitions['_'.join([dbase, 'dbstats'])] = {
457 'options': [None, '%s: size of all documents, indexes, extents' % dbase, 'KB',
458 'storage size metrics', 'mongodb.dbstats', 'line'],
460 ['_'.join([dbase, 'dataSize']), 'documents', 'absolute', 1, 1024],
461 ['_'.join([dbase, 'indexSize']), 'indexes', 'absolute', 1, 1024],
462 ['_'.join([dbase, 'storageSize']), 'extents', 'absolute', 1, 1024]
464 self.definitions['dbstats_objects']['lines'].append(['_'.join([dbase, 'objects']), dbase, 'absolute'])
467 def create_lines(hosts, string):
470 dim_id = '_'.join([host, string])
471 lines.append([dim_id, host, 'absolute', 1, 1000])
474 def create_state_lines(states):
476 for state, description in states:
477 dim_id = '_'.join([host, 'state', state])
478 lines.append([dim_id, description, 'absolute', 1, 1])
481 all_hosts = server_status['repl']['hosts'] + server_status['repl'].get('arbiters', list())
482 this_host = server_status['repl']['me']
483 other_hosts = [host for host in all_hosts if host != this_host]
485 if 'local' in self.databases:
486 self.order.append('oplog_window')
487 self.definitions['oplog_window'] = {
488 'options': [None, 'Interval of time between the oldest and the latest entries in the oplog',
489 'seconds', 'replication and oplog', 'mongodb.oplog_window', 'line'],
490 'lines': [['timeDiff', 'window', 'absolute', 1, 1000]]}
491 # Create "heartbeat delay" chart
492 self.order.append('heartbeat_delay')
493 self.definitions['heartbeat_delay'] = {
494 'options': [None, 'Time when last heartbeat was received'
495 ' from the replica set member (lastHeartbeatRecv)',
496 'seconds ago', 'replication and oplog', 'mongodb.replication_heartbeat_delay', 'stacked'],
497 'lines': create_lines(other_hosts, 'heartbeat_lag')}
498 # Create "optimedate delay" chart
499 self.order.append('optimedate_delay')
500 self.definitions['optimedate_delay'] = {
501 'options': [None, 'Time when last entry from the oplog was applied (optimeDate)',
502 'seconds ago', 'replication and oplog', 'mongodb.replication_optimedate_delay', 'stacked'],
503 'lines': create_lines(all_hosts, 'optimedate')}
504 # Create "replica set members state" chart
505 for host in all_hosts:
506 chart_name = '_'.join([host, 'state'])
507 self.order.append(chart_name)
508 self.definitions[chart_name] = {
509 'options': [None, 'Replica set member (%s) current state' % host, 'state',
510 'replication and oplog', 'mongodb.replication_state', 'line'],
511 'lines': create_state_lines(REPLSET_STATES)}
513 def _get_raw_data(self):
516 raw_data.update(self.get_serverstatus_() or dict())
517 raw_data.update(self.get_dbstats_() or dict())
518 raw_data.update(self.get_replsetgetstatus_() or dict())
519 raw_data.update(self.get_getreplicationinfo_() or dict())
521 return raw_data or None
523 def get_serverstatus_(self):
526 raw_data['serverStatus'] = self.connection.admin.command('serverStatus')
532 def get_dbstats_(self):
533 if not self.databases:
537 raw_data['dbStats'] = dict()
539 for dbase in self.databases:
540 raw_data['dbStats'][dbase] = self.connection[dbase].command('dbStats')
546 def get_replsetgetstatus_(self):
547 if not self.do_replica:
552 raw_data['replSetGetStatus'] = self.connection.admin.command('replSetGetStatus')
558 def get_getreplicationinfo_(self):
559 if not (self.do_replica and 'local' in self.databases):
563 raw_data['getReplicationInfo'] = dict()
565 raw_data['getReplicationInfo']['ASCENDING'] = self.connection.local.oplog.rs.find().sort(
566 "$natural", ASCENDING).limit(1)[0]
567 raw_data['getReplicationInfo']['DESCENDING'] = self.connection.local.oplog.rs.find().sort(
568 "$natural", DESCENDING).limit(1)[0]
578 raw_data = self._get_raw_data()
584 serverStatus = raw_data['serverStatus']
585 dbStats = raw_data.get('dbStats')
586 replSetGetStatus = raw_data.get('replSetGetStatus')
587 getReplicationInfo = raw_data.get('getReplicationInfo')
588 utc_now = datetime.utcnow()
591 for metric, new_name, function in self.metrics_to_collect:
593 for key in metric.split('.'):
599 if not isinstance(value, dict) and key:
600 to_netdata[new_name or key] = value if not function else function(value)
602 to_netdata['nonmapped'] = to_netdata['virtual'] - serverStatus['mem'].get('mappedWithJournal',
603 to_netdata['mapped'])
604 if to_netdata.get('maximum bytes configured'):
605 maximum = to_netdata['maximum bytes configured']
606 to_netdata['wiredTiger_percent_clean'] = int(to_netdata['bytes currently in the cache']
607 * 100 / maximum * 1000)
608 to_netdata['wiredTiger_percent_dirty'] = int(to_netdata['tracked dirty bytes in the cache']
609 * 100 / maximum * 1000)
613 for dbase in dbStats:
614 for metric in DBSTATS:
615 key = '_'.join([dbase, metric])
616 to_netdata[key] = dbStats[dbase][metric]
621 members = replSetGetStatus['members']
622 unix_epoch = datetime(1970, 1, 1, 0, 0)
624 for member in members:
625 if not member.get('self'):
626 other_hosts.append(member)
627 # Replica set time diff between current time and time when last entry from the oplog was applied
628 if member.get('optimeDate', unix_epoch) != unix_epoch:
629 member_optimedate = member['name'] + '_optimedate'
630 to_netdata.update({member_optimedate: int(delta_calculation(delta=utc_now - member['optimeDate'],
632 # Replica set members state
633 member_state = member['name'] + '_state'
634 for elem in REPLSET_STATES:
636 to_netdata.update({'_'.join([member_state, state]): 0})
637 to_netdata.update({'_'.join([member_state, str(member['state'])]): member['state']})
638 # Heartbeat lag calculation
639 for other in other_hosts:
640 if other['lastHeartbeatRecv'] != unix_epoch:
641 node = other['name'] + '_heartbeat_lag'
642 to_netdata[node] = int(delta_calculation(delta=utc_now - other['lastHeartbeatRecv'],
645 if getReplicationInfo:
646 first_event = getReplicationInfo['ASCENDING']['ts'].as_datetime()
647 last_event = getReplicationInfo['DESCENDING']['ts'].as_datetime()
648 to_netdata['timeDiff'] = int(delta_calculation(delta=last_event - first_event, multiplier=1000))
652 def _create_connection(self):
653 conn_vars = {'host': self.host, 'port': self.port}
654 if hasattr(MongoClient, 'server_selection_timeout'):
655 conn_vars.update({'serverselectiontimeoutms': self.timeout})
657 connection = MongoClient(**conn_vars)
658 if self.user and self.password:
659 connection.admin.authenticate(name=self.user, password=self.password)
661 # connection.admin.authenticate(name=self.user, mechanism='MONGODB-X509')
662 server_status = connection.admin.command('serverStatus')
663 except PyMongoError as error:
664 return None, None, str(error)
667 self.databases = connection.database_names()
668 except PyMongoError as error:
669 self.info('Can\'t collect databases: %s' % str(error))
670 return connection, server_status, None
673 def delta_calculation(delta, multiplier=1):
674 if hasattr(delta, 'total_seconds'):
675 return delta.total_seconds() * multiplier
677 return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6 * multiplier