]> arthur.barton.de Git - netdata.git/blobdiff - python.d/postgres.chart.py
Merge pull request #1887 from ktsaou/clock-fixes
[netdata.git] / python.d / postgres.chart.py
index 345a532245a14af75d29b7f14d9684be9edafc3f..eb3224bf0b3da471664c583e83052858ec7e1283 100644 (file)
@@ -8,6 +8,7 @@ from copy import deepcopy
 import psycopg2
 from psycopg2 import extensions
 from psycopg2.extras import DictCursor
+from psycopg2 import OperationalError
 
 from base import SimpleService
 
@@ -16,15 +17,6 @@ update_every = 1
 priority = 90000
 retries = 60
 
-# Default Config options.
-# {
-#    'database': None,
-#    'user': 'postgres',
-#    'password': None,
-#    'host': 'localhost',
-#    'port': 5432
-# }
-
 ARCHIVE = """
 SELECT
     CAST(COUNT(*) AS INT) AS file_count,
@@ -60,6 +52,7 @@ WHERE relkind = 'i';"""
 DATABASE = """
 SELECT
   datname AS database_name,
+  sum(numbackends) AS connections,
   sum(xact_commit) AS xact_commit,
   sum(xact_rollback) AS xact_rollback,
   sum(blks_read) AS blks_read,
@@ -75,21 +68,6 @@ WHERE NOT datname ~* '^template\d+'
 GROUP BY database_name;
 """
 
-STATIO = """
-SELECT
-    sum(heap_blks_read) AS heap_blocks_read,
-    sum(heap_blks_hit) AS heap_blocks_hit,
-    sum(idx_blks_read) AS index_blocks_read,
-    sum(idx_blks_hit) AS index_blocks_hit,
-    sum(toast_blks_read) AS toast_blocks_read,
-    sum(toast_blks_hit) AS toast_blocks_hit,
-    sum(tidx_blks_read) AS toastindex_blocks_read,
-    sum(tidx_blks_hit) AS toastindex_blocks_hit
-FROM
-    pg_statio_all_tables
-WHERE
-    schemaname <> 'pg_catalog';
-"""
 BGWRITER = 'SELECT * FROM pg_stat_bgwriter;'
 DATABASE_LOCKS = """
 SELECT
@@ -111,23 +89,14 @@ SELECT
 FROM (
     SELECT
         client_addr, client_hostname, state,
-        ('x' || lpad(split_part(sent_location,   '/', 1), 8, '0'))::bit(32)::bigint AS sent_xlog,
-        ('x' || lpad(split_part(replay_location, '/', 1), 8, '0'))::bit(32)::bigint AS replay_xlog,
-        ('x' || lpad(split_part(sent_location,   '/', 2), 8, '0'))::bit(32)::bigint AS sent_offset,
-        ('x' || lpad(split_part(replay_location, '/', 2), 8, '0'))::bit(32)::bigint AS replay_offset
+        ('x' || lpad(split_part(sent_location::text,   '/', 1), 8, '0'))::bit(32)::bigint AS sent_xlog,
+        ('x' || lpad(split_part(replay_location::text, '/', 1), 8, '0'))::bit(32)::bigint AS replay_xlog,
+        ('x' || lpad(split_part(sent_location::text,   '/', 2), 8, '0'))::bit(32)::bigint AS sent_offset,
+        ('x' || lpad(split_part(replay_location::text, '/', 2), 8, '0'))::bit(32)::bigint AS replay_offset
     FROM pg_stat_replication
 ) AS s;
 """
 
-# LOCK_MAP = {'AccessExclusiveLock': 'lock_access_exclusive',
-#             'AccessShareLock': 'lock_access_share',
-#             'ExclusiveLock': 'lock_exclusive',
-#             'RowExclusiveLock': 'lock_row_exclusive',
-#             'RowShareLock': 'lock_row_share',
-#             'ShareUpdateExclusiveLock': 'lock_update_exclusive_lock',
-#             'ShareLock': 'lock_share',
-#             'ShareRowExclusiveLock': 'lock_share_row_exclusive',
-#             'SIReadLock': 'lock_si_read'}
 LOCK_TYPES = [
     'ExclusiveLock',
     'RowShareLock',
@@ -141,98 +110,78 @@ LOCK_TYPES = [
 ]
 
 ORDER = ['db_stat_transactions', 'db_stat_tuple_read', 'db_stat_tuple_returned', 'db_stat_tuple_write',
-         'backend_process', 'index_count', 'index_size', 'table_count', 'table_size', 'wal', 'operations_heap',
-         'operations_index', 'operations_toast', 'operations_toast_index', 'background_writer']
+         'backend_process', 'index_count', 'index_size', 'table_count', 'table_size', 'wal', 'background_writer']
 
 CHARTS = {
     'db_stat_transactions': {
-        'options': [None, ' Transactions', 'Count', ' database statistics', '.db_stat_transactions', 'line'],
+        'options': [None, 'Transactions on db', 'transactions/s', 'db statistics', 'postgres.db_stat_transactions', 'line'],
+        'lines': [
+            ['db_stat_xact_commit',   'committed',   'incremental'],
+            ['db_stat_xact_rollback', 'rolled back', 'incremental']
+        ]},
+    'db_stat_connections': {
+        'options': [None, 'Current connections to db', 'count', 'db statistics', 'postgres.db_stat_connections', 'line'],
         'lines': [
-            ['db_stat_xact_commit', 'Committed', 'absolute'],
-            ['db_stat_xact_rollback', 'Rolled Back', 'absolute']
+            ['db_stat_connections', 'connections', 'absolute']
         ]},
     'db_stat_tuple_read': {
-        'options': [None, ' Tuple read', 'Count', ' database statistics', '.db_stat_tuple_read', 'line'],
+        'options': [None, 'Tuple reads from db', 'reads/s', 'db statistics', 'postgres.db_stat_tuple_read', 'line'],
         'lines': [
-            ['db_stat_blks_read', 'Disk', 'absolute'],
-            ['db_stat_blks_hit', 'Cache', 'absolute']
+            ['db_stat_blks_read', 'disk',  'incremental'],
+            ['db_stat_blks_hit',  'cache', 'incremental']
         ]},
     'db_stat_tuple_returned': {
-        'options': [None, ' Tuple returned', 'Count', ' database statistics', '.db_stat_tuple_returned', 'line'],
+        'options': [None, 'Tuples returned from db', 'tuples/s', 'db statistics', 'postgres.db_stat_tuple_returned', 'line'],
         'lines': [
-            ['db_stat_tup_returned', 'Sequential', 'absolute'],
-            ['db_stat_tup_fetched', 'Bitmap', 'absolute']
+            ['db_stat_tup_returned', 'sequential', 'incremental'],
+            ['db_stat_tup_fetched',  'bitmap',     'incremental']
         ]},
     'db_stat_tuple_write': {
-        'options': [None, ' Tuple write', 'Count', ' database statistics', '.db_stat_tuple_write', 'line'],
+        'options': [None, 'Tuples written to db', 'writes/s', 'db statistics', 'postgres.db_stat_tuple_write', 'line'],
         'lines': [
-            ['db_stat_tup_inserted', 'Inserted', 'absolute'],
-            ['db_stat_tup_updated', 'Updated', 'absolute'],
-            ['db_stat_tup_deleted', 'Deleted', 'absolute'],
-            ['db_stat_conflicts', 'Conflicts', 'absolute']
+            ['db_stat_tup_inserted', 'inserted',  'incremental'],
+            ['db_stat_tup_updated',  'updated',   'incremental'],
+            ['db_stat_tup_deleted',  'deleted',   'incremental'],
+            ['db_stat_conflicts',    'conflicts', 'incremental']
         ]},
     'backend_process': {
-        'options': [None, 'Backend processes', 'Count', 'Backend processes', 'postgres.backend_process', 'line'],
+        'options': [None, 'Current Backend Processes', 'processes', 'backend processes', 'postgres.backend_process', 'line'],
         'lines': [
-            ['backend_process_active', 'Active', 'absolute'],
-            ['backend_process_idle', 'Idle', 'absolute']
+            ['backend_process_active', 'active', 'absolute'],
+            ['backend_process_idle',   'idle',   'absolute']
         ]},
     'index_count': {
-        'options': [None, 'Total index', 'Count', 'Index', 'postgres.index_count', 'line'],
+        'options': [None, 'Total indexes', 'index', 'indexes', 'postgres.index_count', 'line'],
         'lines': [
-            ['index_count', 'Total index', 'absolute']
+            ['index_count', 'total', 'absolute']
         ]},
     'index_size': {
-        'options': [None, 'Index size', 'MB', 'Index', 'postgres.index_size', 'line'],
+        'options': [None, 'Indexes size', 'MB', 'indexes', 'postgres.index_size', 'line'],
         'lines': [
-            ['index_size', 'Size', 'absolute', 1, 1024 * 1024]
+            ['index_size', 'size', 'absolute', 1, 1024 * 1024]
         ]},
     'table_count': {
-        'options': [None, 'Total table', 'Count', 'Table', 'postgres.table_count', 'line'],
+        'options': [None, 'Total Tables', 'tables', 'tables', 'postgres.table_count', 'line'],
         'lines': [
-            ['table_count', 'Total table', 'absolute']
+            ['table_count', 'total', 'absolute']
         ]},
     'table_size': {
-        'options': [None, 'Table size', 'MB', 'Table', 'postgres.table_size', 'line'],
+        'options': [None, 'Tables size', 'MB', 'tables', 'postgres.table_size', 'line'],
         'lines': [
-            ['table_size', 'Size', 'absolute', 1, 1024 * 1024]
+            ['table_size', 'size', 'absolute', 1, 1024 * 1024]
         ]},
     'wal': {
-        'options': [None, 'WAL stats', 'Files', 'WAL', 'postgres.wal', 'line'],
-        'lines': [
-            ['wal_total', 'Total', 'absolute'],
-            ['wal_ready', 'Ready', 'absolute'],
-            ['wal_done', 'Done', 'absolute']
-        ]},
-    'operations_heap': {
-        'options': [None, 'Heap', 'iops', 'IO Operations', 'postgres.operations_heap', 'line'],
-        'lines': [
-            ['operations_heap_blocks_read', 'Read', 'absolute'],
-            ['operations_heap_blocks_hit', 'Hit', 'absolute']
-        ]},
-    'operations_index': {
-        'options': [None, 'Index', 'iops', 'IO Operations', 'postgres.operations_index', 'line'],
-        'lines': [
-            ['operations_index_blocks_read', 'Read', 'absolute'],
-            ['operations_index_blocks_hit', 'Hit', 'absolute']
-        ]},
-    'operations_toast': {
-        'options': [None, 'Toast', 'iops', 'IO Operations', 'postgres.operations_toast', 'line'],
+        'options': [None, 'Write-Ahead Logging Statistics', 'files/s', 'write ahead log', 'postgres.wal', 'line'],
         'lines': [
-            ['operations_toast_blocks_read', 'Read', 'absolute'],
-            ['operations_toast_blocks_hit', 'Hit', 'absolute']
-        ]},
-    'operations_toast_index': {
-        'options': [None, 'Toast index', 'iops', 'IO Operations', 'postgres.operations_toast_index', 'line'],
-        'lines': [
-            ['operations_toastindex_blocks_read', 'Read', 'absolute'],
-            ['operations_toastindex_blocks_hit', 'Hit', 'absolute']
+            ['wal_total', 'total', 'incremental'],
+            ['wal_ready', 'ready', 'incremental'],
+            ['wal_done',  'done',  'incremental']
         ]},
     'background_writer': {
-        'options': [None, 'Checkpoints', 'Count', 'Background Writer', 'postgres.background_writer', 'line'],
+        'options': [None, 'Checkpoints', 'writes/s', 'background writer', 'postgres.background_writer', 'line'],
         'lines': [
-            ['background_writer_scheduled', 'Scheduled', 'absolute'],
-            ['background_writer_requested', 'Requested', 'absolute']
+            ['background_writer_scheduled', 'scheduled', 'incremental'],
+            ['background_writer_requested', 'requested', 'incremental']
         ]}
 }
 
@@ -242,205 +191,202 @@ class Service(SimpleService):
         super(self.__class__, self).__init__(configuration=configuration, name=name)
         self.order = ORDER
         self.definitions = CHARTS
+        self.table_stats = configuration.pop('table_stats', True)
+        self.index_stats = configuration.pop('index_stats', True)
         self.configuration = configuration
-        self.connection = None
+        self.connection = False
+        self.is_superuser = False
         self.data = {}
-        self.old_data = {}
         self.databases = set()
 
-    def connect(self):
+    def _connect(self):
         params = dict(user='postgres',
                       database=None,
                       password=None,
-                      host='localhost',
+                      host=None,
                       port=5432)
         params.update(self.configuration)
-        self.connection = psycopg2.connect(**params)
-        self.connection.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)
-        self.connection.set_session(readonly=True)
+
+        if not self.connection:
+            try:
+                self.connection = psycopg2.connect(**params)
+                self.connection.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+                self.connection.set_session(readonly=True)
+            except OperationalError:
+                return False
+        return True
 
     def check(self):
         try:
-            self.connect()
-            self.discover_databases()
+            if not self._connect():
+                self.error('Can\'t connect to %s' % str(self.configuration))
+                return False
+            cursor = self.connection.cursor()
+            self._discover_databases(cursor)
+            self._check_if_superuser(cursor)
+            cursor.close()
+
             self._create_definitions()
             return True
         except Exception as e:
-            self.error(e)
+            self.error(str(e))
             return False
 
-    def _create_definitions(self):
-        for database_name in self.databases:
-            self.databases.add(database_name)
-            for chart_template_name in list(CHARTS):
-                if chart_template_name.startswith('db_stat'):
-                    self._add_database_stat_chart(chart_template_name, database_name)
-            self._add_database_lock_chart(database_name)
-
-    def discover_databases(self):
-        cursor = self.connection.cursor()
+    def _discover_databases(self, cursor):
         cursor.execute("""
             SELECT datname
             FROM pg_stat_database
             WHERE NOT datname ~* '^template\d+'
         """)
         self.databases = set(r[0] for r in cursor)
-        cursor.close()
+
+    def _check_if_superuser(self, cursor):
+        cursor.execute("""
+            SELECT current_setting('is_superuser') = 'on' AS is_superuser;
+        """)
+        self.is_superuser = cursor.fetchone()[0]
+
+    def _create_definitions(self):
+        for database_name in self.databases:
+            for chart_template_name in list(CHARTS):
+                if chart_template_name.startswith('db_stat'):
+                    self._add_database_stat_chart(chart_template_name, database_name)
+            self._add_database_lock_chart(database_name)
 
     def _add_database_stat_chart(self, chart_template_name, database_name):
         chart_template = CHARTS[chart_template_name]
-        chart_name = "{}_{}".format(database_name, chart_template_name)
+        chart_name = "{0}_{1}".format(database_name, chart_template_name)
         if chart_name not in self.order:
             self.order.insert(0, chart_name)
             name, title, units, family, context, chart_type = chart_template['options']
             self.definitions[chart_name] = {
                 'options': [
                     name,
-                    database_name + title,
+                    title + ': ' + database_name,
                     units,
-                    database_name + family,
-                    database_name + context,
+                    'db ' + database_name,
+                    context,
                     chart_type
                 ]
             }
 
             self.definitions[chart_name]['lines'] = []
             for line in deepcopy(chart_template['lines']):
-                line[0] = "{}_{}".format(database_name, line[0])
+                line[0] = "{0}_{1}".format(database_name, line[0])
                 self.definitions[chart_name]['lines'].append(line)
 
     def _add_database_lock_chart(self, database_name):
-        chart_name = "{}_locks".format(database_name)
+        chart_name = "{0}_locks".format(database_name)
         if chart_name not in self.order:
-            self.order.insert(0, chart_name)
+            self.order.insert(-1, chart_name)
             self.definitions[chart_name] = dict(
                 options=
                 [
                     None,
-                    database_name + ' locks',
-                    'Count',
-                    database_name + ' database statistics',
-                    database_name + '.locks',
+                    'Locks on db: ' + database_name,
+                    'locks',
+                    'db ' + database_name,
+                    'postgres.db_locks',
                     'line'
                 ],
                 lines=[]
             )
 
             for lock_type in LOCK_TYPES:
-                lock_id = "{}_{}".format(database_name, lock_type.lower())
+                lock_id = "{0}_{1}".format(database_name, lock_type)
                 label = re.sub("([a-z])([A-Z])", "\g<1> \g<2>", lock_type)
                 self.definitions[chart_name]['lines'].append([lock_id, label, 'absolute'])
 
     def _get_data(self):
-        self.connect()
-
-        cursor = self.connection.cursor(cursor_factory=DictCursor)
-        self.add_stats(cursor)
-
-        cursor.close()
-        self.connection.close()
-
-        return self.data
+        if self._connect():
+            cursor = self.connection.cursor(cursor_factory=DictCursor)
+            try:
+                self.add_stats(cursor)
+            except OperationalError:
+                self.connection = False
+                cursor.close()
+                return None
+            else:
+                cursor.close()
+                return self.data
+        else:
+            return None
 
     def add_stats(self, cursor):
         self.add_database_stats(cursor)
         self.add_backend_stats(cursor)
-        self.add_index_stats(cursor)
-        self.add_table_stats(cursor)
+        if self.index_stats:
+            self.add_index_stats(cursor)
+        if self.table_stats:
+            self.add_table_stats(cursor)
         self.add_lock_stats(cursor)
-        self.add_statio_stats(cursor)
         self.add_bgwriter_stats(cursor)
 
         # self.add_replication_stats(cursor)
 
-        # add_wal_metrics needs superuser to get directory listings
-        # if self.config.get('superuser', True):
-        # self.add_wal_stats(cursor)
+        if self.is_superuser:
+            self.add_wal_stats(cursor)
 
     def add_database_stats(self, cursor):
         cursor.execute(DATABASE)
         for row in cursor:
             database_name = row.get('database_name')
-            self.add_derive_value('db_stat_xact_commit', prefix=database_name, value=int(row.get('xact_commit', 0)))
-            self.add_derive_value('db_stat_xact_rollback', prefix=database_name, value=int(row.get('xact_rollback', 0)))
-            self.add_derive_value('db_stat_blks_read', prefix=database_name, value=int(row.get('blks_read', 0)))
-            self.add_derive_value('db_stat_blks_hit', prefix=database_name, value=int(row.get('blks_hit', 0)))
-            self.add_derive_value('db_stat_tup_returned', prefix=database_name, value=int(row.get('tup_returned', 0)))
-            self.add_derive_value('db_stat_tup_fetched', prefix=database_name, value=int(row.get('tup_fetched', 0)))
-            self.add_derive_value('db_stat_tup_inserted', prefix=database_name, value=int(row.get('tup_inserted', 0)))
-            self.add_derive_value('db_stat_tup_updated', prefix=database_name, value=int(row.get('tup_updated', 0)))
-            self.add_derive_value('db_stat_tup_deleted', prefix=database_name, value=int(row.get('tup_deleted', 0)))
-            self.add_derive_value('db_stat_conflicts', prefix=database_name, value=int(row.get('conflicts', 0)))
+            self.data["{0}_{1}".format(database_name, 'db_stat_xact_commit')]   = int(row.get('xact_commit',   0))
+            self.data["{0}_{1}".format(database_name, 'db_stat_xact_rollback')] = int(row.get('xact_rollback', 0))
+            self.data["{0}_{1}".format(database_name, 'db_stat_blks_read')]     = int(row.get('blks_read',     0))
+            self.data["{0}_{1}".format(database_name, 'db_stat_blks_hit')]      = int(row.get('blks_hit',      0))
+            self.data["{0}_{1}".format(database_name, 'db_stat_tup_returned')]  = int(row.get('tup_returned',  0))
+            self.data["{0}_{1}".format(database_name, 'db_stat_tup_fetched')]   = int(row.get('tup_fetched',   0))
+            self.data["{0}_{1}".format(database_name, 'db_stat_tup_inserted')]  = int(row.get('tup_inserted',  0))
+            self.data["{0}_{1}".format(database_name, 'db_stat_tup_updated')]   = int(row.get('tup_updated',   0))
+            self.data["{0}_{1}".format(database_name, 'db_stat_tup_deleted')]   = int(row.get('tup_deleted',   0))
+            self.data["{0}_{1}".format(database_name, 'db_stat_conflicts')]     = int(row.get('conflicts',     0))
+            self.data["{0}_{1}".format(database_name, 'db_stat_connections')]   = int(row.get('connections',   0))
 
     def add_backend_stats(self, cursor):
         cursor.execute(BACKENDS)
         temp = cursor.fetchone()
 
         self.data['backend_process_active'] = int(temp.get('backends_active', 0))
-        self.data['backend_process_idle'] = int(temp.get('backends_idle', 0))
+        self.data['backend_process_idle']   = int(temp.get('backends_idle',   0))
 
     def add_index_stats(self, cursor):
         cursor.execute(INDEX_STATS)
         temp = cursor.fetchone()
-        self.data['index_count'] = int(temp.get('indexes', 0))
-        self.data['index_size'] = int(temp.get('size_indexes', 0))
+        self.data['index_count'] = int(temp.get('indexes',      0))
+        self.data['index_size']  = int(temp.get('size_indexes', 0))
 
     def add_table_stats(self, cursor):
         cursor.execute(TABLE_STATS)
         temp = cursor.fetchone()
-        self.data['table_count'] = int(temp.get('relations', 0))
-        self.data['table_size'] = int(temp.get('size_relations', 0))
+        self.data['table_count'] = int(temp.get('relations',      0))
+        self.data['table_size']  = int(temp.get('size_relations', 0))
 
     def add_lock_stats(self, cursor):
         cursor.execute(DATABASE_LOCKS)
-        # First zero out all current lock values.
+
+        # zero out all current lock values
         for database_name in self.databases:
             for lock_type in LOCK_TYPES:
-                lock_id = "{}_{}".format(database_name, lock_type.lower())
-                self.data[lock_id] = 0
+                self.data["{0}_{1}".format(database_name, lock_type)] = 0
 
-        # Now populate those that have current locks
+        # populate those that have current locks
         for row in cursor:
             database_name, lock_type, lock_count = row
-            lock_id = "{}_{}".format(database_name, lock_type.lower())
-            self.data[lock_id] = lock_count
+            self.data["{0}_{1}".format(database_name, lock_type)] = lock_count
 
     def add_wal_stats(self, cursor):
         cursor.execute(ARCHIVE)
         temp = cursor.fetchone()
-        self.add_derive_value('wal_total', int(temp.get('file_count', 0)))
-        self.add_derive_value('wal_ready', int(temp.get('ready_count', 0)))
-        self.add_derive_value('wal_done', int(temp.get('done_count', 0)))
-
-    def add_statio_stats(self, cursor):
-        cursor.execute(STATIO)
-        temp = cursor.fetchone()
-        self.add_derive_value('operations_heap_blocks_read', int(temp.get('heap_blocks_read', 0)))
-        self.add_derive_value('operations_heap_blocks_hit', int(temp.get('heap_blocks_hit', 0)))
-        self.add_derive_value('operations_index_blocks_read', int(temp.get('index_blocks_read', 0)))
-        self.add_derive_value('operations_index_blocks_hit', int(temp.get('index_blocks_hit', 0)))
-        self.add_derive_value('operations_toast_blocks_read', int(temp.get('toast_blocks_read', 0)))
-        self.add_derive_value('operations_toast_blocks_hit', int(temp.get('toast_blocks_hit', 0)))
-        self.add_derive_value('operations_toastindex_blocks_read', int(temp.get('toastindex_blocks_read', 0)))
-        self.add_derive_value('operations_toastindex_blocks_hit', int(temp.get('toastindex_blocks_hit', 0)))
+        self.data['wal_total'] = int(temp.get('file_count',  0))
+        self.data['wal_ready'] = int(temp.get('ready_count', 0))
+        self.data['wal_done']  = int(temp.get('done_count',  0))
 
     def add_bgwriter_stats(self, cursor):
         cursor.execute(BGWRITER)
         temp = cursor.fetchone()
-
-        self.add_derive_value('background_writer_scheduled', temp.get('checkpoints_timed', 0))
-        self.add_derive_value('background_writer_requested', temp.get('checkpoints_requests', 0))
-
-    def add_derive_value(self, key, value, prefix=None):
-        if prefix:
-            key = "{}_{}".format(prefix, key)
-        if key not in self.old_data.keys():
-            self.data[key] = 0
-        else:
-            self.data[key] = value - self.old_data[key]
-
-        self.old_data[key] = value
-
+        self.data['background_writer_scheduled'] = temp.get('checkpoints_timed',    0)
+        self.data['background_writer_requested'] = temp.get('checkpoints_requests', 0)
 
 '''
     def add_replication_stats(self, cursor):