]> arthur.barton.de Git - netdata.git/commitdiff
Merge pull request #608 from candrews/patch-1
authorCosta Tsaousis <costa@tsaousis.gr>
Tue, 9 Aug 2016 16:46:27 +0000 (19:46 +0300)
committerGitHub <noreply@github.com>
Tue, 9 Aug 2016 16:46:27 +0000 (19:46 +0300)
Harden the netdata systemd service

23 files changed:
README.md
conf.d/Makefile.am
conf.d/apps_groups.conf
conf.d/python.d.conf
conf.d/python.d/dovecot.conf [new file with mode: 0644]
conf.d/python.d/hddtemp.conf
conf.d/python.d/ipfs.conf [new file with mode: 0644]
conf.d/python.d/memcached.conf [new file with mode: 0644]
configs.signatures
plugins.d/python.d.plugin
python.d/Makefile.am
python.d/dovecot.chart.py [new file with mode: 0644]
python.d/hddtemp.chart.py
python.d/ipfs.chart.py [new file with mode: 0644]
python.d/memcached.chart.py [new file with mode: 0644]
python.d/mysql.chart.py
python.d/nginx.chart.py
python.d/python_modules/base.py
python.d/tomcat.chart.py
src/appconfig.c
src/apps_plugin.c
src/sys_fs_cgroup.c
web/index.html

index 049a133d524acad73142cbb93bc88e332ba9213e..47ed5bfecfe7880905da1f1ee16e9eeeca6a04aa 100644 (file)
--- a/README.md
+++ b/README.md
@@ -107,17 +107,25 @@ This is what it currently monitors (most with zero configuration):
 
 - **Users and User Groups resource usage**, by summarizing the process tree per user and group (CPU, memory, disk reads, disk writes, swap, threads, pipes, sockets, etc)
 
-- **Apache web server** mod-status (v2.2, v2.4)
+- **Apache web server** mod-status (v2.2, v2.4) and cache log statistics (multiple servers)
 
-- **Nginx web server** stub-status
+- **Nginx web server** stub-status (multiple servers)
 
 - **mySQL databases** (multiple servers, each showing: bandwidth, queries/s, handlers, locks, issues, tmp operations, connections, binlog metrics, threads, innodb metrics, etc)
 
+- **Redis databases** (multiple servers, each showing: operations, hit rate, memory, keys, clients, slaves)
+
+- **memcached databases** (multiple servers, each showing: bandwidth, connections, items, etc)
+
 - **ISC Bind name server** (multiple servers, each showing: clients, requests, queries, updates, failures and several per view metrics)
 
 - **Postfix email server** message queue (entries, size)
 
-- **Squid proxy server** (clients bandwidth and requests, servers bandwidth and requests) 
+- **exim email server** message queue (emails queued)
+
+- **IPFS** (Bandwidth, Peers)
+
+- **Squid proxy server** (multiple servers, each showing: clients bandwidth and requests, servers bandwidth and requests)
 
 - **Hardware sensors** (temperature, voltage, fans, power, humidity, etc)
 
@@ -127,6 +135,8 @@ This is what it currently monitors (most with zero configuration):
 
 - **PHP-FPM** (multiple instances, each reporting connections, requests, performance)
 
+- **hddtemp** (disk temperatures)
+
 - **SNMP devices** can be monitored too (although you will need to configure these)
 
 And you can extend it, by writing plugins that collect data from any source, using any computer language.
@@ -160,4 +170,3 @@ It should run on **any Linux** system. It has been tested on:
 ## Documentation
 
 Check the **[netdata wiki](https://github.com/firehol/netdata/wiki)**.
-
index 20fc706230797f4e10f01e8967670b508fc683aa..6be4945e6c440189345c98af54af5d5ba4d7568b 100644 (file)
@@ -22,9 +22,12 @@ dist_pythonconfig_DATA = \
        python.d/apache.conf \
        python.d/apache_cache.conf \
        python.d/cpufreq.conf \
+       python.d/dovecot.conf \
        python.d/example.conf \
        python.d/exim.conf \
        python.d/hddtemp.conf \
+       python.d/ipfs.conf \
+       python.d/memcached.conf \
        python.d/mysql.conf \
        python.d/nginx.conf \
        python.d/phpfpm.conf \
index 42bd58d267c051e1fbfd75596c84c0da9220b3ec..0a6f55cd790c89b286fcb4dce631f262deb43e81 100644 (file)
@@ -216,3 +216,4 @@ crsproxy: crsproxy
 sidekiq: *sidekiq*
 java: java
 chat: irssi
+ipfs: ipfs
index eebf241b0c303d0f67612361918b59569c667855..18558fbd2294f1ae33308ede95949d0a4583b319 100644 (file)
@@ -21,6 +21,7 @@ enabled: yes
 example: no
 # exim: yes
 # hddtemp: yes
+# ipfs: yes
 # mysql: yes
 # nginx: yes
 # phpfpm: yes
diff --git a/conf.d/python.d/dovecot.conf b/conf.d/python.d/dovecot.conf
new file mode 100644 (file)
index 0000000..917c527
--- /dev/null
@@ -0,0 +1,89 @@
+# netdata python.d.plugin configuration for dovecot
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+#  - global variables
+#  - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+#     name: myname     # the JOB's name as it will appear at the
+#                      # dashboard (by default is the job_name)
+#                      # JOBs sharing a name are mutually exclusive
+#     update_every: 1  # the JOB's data collection frequency
+#     priority: 60000  # the JOB's order on the dashboard
+#     retries: 5       # the JOB's number of restoration attempts
+#
+# Additionally to the above, dovecot also supports the following:
+#
+#     socket: 'path/to/dovecot/stats'
+#
+#  or
+#     host: 'IP or HOSTNAME' # the host to connect to
+#     port: PORT             # the port to connect to
+#
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+  name     : 'local'
+  host     : 'localhost'
+  port     : 24242
+
+localipv4:
+  name     : 'local'
+  host     : '127.0.0.1'
+  port     : 24242
+
+localipv6:
+  name     : 'local'
+  host     : '::1'
+  port     : 24242
+
+localsocket:
+  name     : 'local'
+  socket   : '/var/run/dovecot/stats'
+
index 34790e7c6027753531ee5875bca6517463e2af59..f3cb667a5e5b0256b636d89dd003a7a475cabfd4 100644 (file)
 #     port: PORT             # the port to connect to
 #
 
+# By default this module will try to autodetect number of disks.
+# However this can be overridden by setting variable `disk_count` to
+# desired number of disks. Example for two disks:
+#
+# disk_count: 2
+#
+
 # ----------------------------------------------------------------------
 # AUTO-DETECTION JOBS
 # only one of them will run (they have the same name)
@@ -76,5 +83,5 @@ localipv4:
 
 localipv6:
   name: 'local'
-  host: '127.0.0.1'
+  host: '::1'
   port: 7634
diff --git a/conf.d/python.d/ipfs.conf b/conf.d/python.d/ipfs.conf
new file mode 100644 (file)
index 0000000..e039026
--- /dev/null
@@ -0,0 +1,67 @@
+# netdata python.d.plugin configuration for ipfs
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+#  - global variables
+#  - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+#     name: myname     # the JOB's name as it will appear at the
+#                      # dashboard (by default is the job_name)
+#                      # JOBs sharing a name are mutually exclusive
+#     update_every: 1  # the JOB's data collection frequency
+#     priority: 60000  # the JOB's order on the dashboard
+#     retries: 5       # the JOB's number of restoration attempts
+#
+# Additionally to the above, ipfs also supports the following:
+#
+#     url: 'URL'       # URL to the IPFS API
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+  name : 'local'
+  url  : 'http://localhost:5001'
diff --git a/conf.d/python.d/memcached.conf b/conf.d/python.d/memcached.conf
new file mode 100644 (file)
index 0000000..f1723dc
--- /dev/null
@@ -0,0 +1,85 @@
+# netdata python.d.plugin configuration for memcached
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+#  - global variables
+#  - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+#     name: myname     # the JOB's name as it will appear at the
+#                      # dashboard (by default is the job_name)
+#                      # JOBs sharing a name are mutually exclusive
+#     update_every: 1  # the JOB's data collection frequency
+#     priority: 60000  # the JOB's order on the dashboard
+#     retries: 5       # the JOB's number of restoration attempts
+#
+# Additionally to the above, memcached also supports the following:
+#
+#     socket: 'path/to/memcached.sock'
+#
+#  or
+#     host: 'IP or HOSTNAME' # the host to connect to
+#     port: PORT             # the port to connect to
+#
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+  name     : 'local'
+  host     : 'localhost'
+  port     : 11211
+
+localipv4:
+  name     : 'local'
+  host     : '127.0.0.1'
+  port     : 11211
+
+localipv6:
+  name     : 'local'
+  host     : '::1'
+  port     : 11211
+
index dc7e90996dc8a76e1bd54e1d1227e4891084ed6a..26d1a277de1f746602b92cc981a416f4d36d9f47 100644 (file)
@@ -3,6 +3,7 @@ declare -A configs_signatures=(
        ['18ee1c6197a4381b1c1631ef6129824f']='apps_groups.conf'
        ['2f4a85fedecce1bf425fa1039f6b021e']='apps_groups.conf'
        ['3af522d65b50a5e447607ffb28c81ff5']='apps_groups.conf'
+       ['3b1bfa40a4ff6a200bb2fc00bc51a664']='apps_groups.conf'
        ['4a448831776de8acf2e0bdc4cc994cb4']='apps_groups.conf'
        ['5bf51bb24fb41db9b1e448bd060d3f8c']='apps_groups.conf'
        ['636d032928ea0f4741eab264fb49c099']='apps_groups.conf'
@@ -42,6 +43,7 @@ declare -A configs_signatures=(
        ['b27f10a38a95edbbec20f44a4728b7c4']='python.d.conf'
        ['b32164929eda7449a9677044e11151bf']='python.d.conf'
        ['b8969be5b3ceb4a99477937119bd4323']='python.d.conf'
+       ['bba2f3886587f137ea08a6e63dd3d376']='python.d.conf'
        ['d55be5bb5e108da1e7645da007c53cd4']='python.d.conf'
        ['f82924563e41d99cdae5431f0af69155']='python.d.conf'
        ['7830066c46a7e5f9682b8d3f4566b4e5']='python.d/cpufreq.conf'
@@ -55,8 +57,12 @@ declare -A configs_signatures=(
        ['73125ae64d5c6e9361944cd9bd14844e']='python.d/exim.conf'
        ['a94af1c808aafdf00537d85ff2197ec8']='python.d/exim.conf'
        ['2a0794fd43eadf30a51805bc9ba2c64d']='python.d/hddtemp.conf'
+       ['47180421d580baeaedf8c0ef3d647fb5']='python.d/hddtemp.conf'
        ['731a1fcfe9b2da1b9d685056a59541b8']='python.d/hddtemp.conf'
        ['d74dc63fbe631dab9a2ff1b0f5d71719']='python.d/hddtemp.conf'
+       ['0e59bc11d0a869ea0247c04c08c8d72e']='python.d/ipfs.conf'
+       ['70105b1744a8e13f49083d7f1981aea2']='python.d/ipfs.conf'
+       ['97f337eb96213f3ede05e522e3743a6c']='python.d/memcached.conf'
        ['1ea8e8ef1fa8a3a0fcdfba236f4cb195']='python.d/mysql.conf'
        ['5379cdc26d7725e2b0d688d785816cef']='python.d/mysql.conf'
        ['7deb236ec68a512b9bdd18e6a51d76f7']='python.d/mysql.conf'
index 085d4fc7cfe9beb83a5c4fc6741c770957e3b448..1301777bee9debbd70108b093a4116297b8d459c 100755 (executable)
@@ -353,7 +353,7 @@ class PythonCharts(object):
                         pass
             except AttributeError as e:
                 self._stop(job)
-                msg.error(job.chart_name, "cannot find check() function.")
+                msg.error(job.chart_name, "cannot find check() function or it thrown unhandled exception.")
                 msg.debug(str(e))
             except (UnboundLocalError, Exception) as e:
                 msg.error(job.chart_name, str(e))
@@ -390,7 +390,7 @@ class PythonCharts(object):
                     # sys.stdout.flush()
                     i += 1
             except AttributeError:
-                msg.error(job.chart_name, "cannot find create() function.")
+                msg.error(job.chart_name, "cannot find create() function or it thrown unhandled exception.")
                 self._stop(job)
             except (UnboundLocalError, Exception) as e:
                 msg.error(job.chart_name, str(e))
index cdb19d2ddc7d0d4f6999d42481f3786ebf628c24..57d89bad251ed1510bcd6d573f83c92db5d68052 100644 (file)
@@ -11,9 +11,12 @@ dist_python_SCRIPTS = \
        apache.chart.py \
        apache_cache.chart.py \
        cpufreq.chart.py \
+       dovecot.chart.py \
        example.chart.py \
        exim.chart.py \
        hddtemp.chart.py \
+       ipfs.chart.py \
+       memcached.chart.py \
        mysql.chart.py \
        nginx.chart.py \
        phpfpm.chart.py \
@@ -77,4 +80,4 @@ dist_pythonyaml3_DATA = \
     python_modules/pyyaml3/scanner.py \
     python_modules/pyyaml3/serializer.py \
     python_modules/pyyaml3/tokens.py \
-    $(NULL)
\ No newline at end of file
+    $(NULL)
diff --git a/python.d/dovecot.chart.py b/python.d/dovecot.chart.py
new file mode 100644 (file)
index 0000000..4ec1acd
--- /dev/null
@@ -0,0 +1,127 @@
+# -*- coding: utf-8 -*-
+# Description: dovecot netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+
+from base import SocketService
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['sessions', 'commands',
+         'faults',
+         'context_switches',
+         'disk', 'bytes', 'syscalls',
+         'lookup', 'cache',
+         'auth', 'auth_cache']
+
+CHARTS = {
+    'sessions': {
+        'options': [None, "logins and sessions", 'number', 'IMAP', 'dovecot.imap', 'line'],
+        'lines': [
+            ['num_logins', 'logins', 'absolute'],
+            ['num_connected_sessions', 'active sessions', 'absolute']
+        ]},
+    'commands': {
+        'options': [None, "commands", "commands", 'IMAP', 'dovecot.imap', 'line'],
+        'lines': [
+            ['num_cmds', 'commands', 'absolute']
+        ]},
+    'faults': {
+        'options': [None, "faults", "faults", 'Faults', 'dovecot.faults', 'line'],
+        'lines': [
+            ['min_faults', 'minor', 'absolute'],
+            ['maj_faults', 'major', 'absolute']
+        ]},
+    'context_switches': {
+        'options': [None, "context switches", '', 'Context Switches', 'dovecot.context_switches', 'line'],
+        'lines': [
+            ['vol_cs', 'volountary', 'absolute'],
+            ['invol_cs', 'involountary', 'absolute']
+        ]},
+    'disk': {
+        'options': [None, "disk", 'bytes/s', 'Reads and Writes', 'dovecot.read_write', 'line'],
+        'lines': [
+            ['disk_input', 'read', 'incremental'],
+            ['disk_output', 'write', 'incremental']
+        ]},
+    'bytes': {
+        'options': [None, "bytes", 'bytes/s', 'Reads and Writes', 'dovecot.read_write', 'line'],
+        'lines': [
+            ['read_bytes', 'read', 'incremental'],
+            ['write_bytes', 'write', 'incremental']
+        ]},
+    'syscalls': {
+        'options': [None, "number of syscalls", 'syscalls/s', 'Reads and Writes', 'dovecot.read_write', 'line'],
+        'lines': [
+            ['read_count', 'read', 'incremental'],
+            ['write_count', 'write', 'incremental']
+        ]},
+    'lookup': {
+        'options': [None, "lookups", 'number/s', 'Mail', 'dovecot.mail', 'line'],
+        'lines': [
+            ['mail_lookup_path', 'path', 'incremental'],
+            ['mail_lookup_attr', 'attr', 'incremental']
+        ]},
+    'cache': {
+        'options': [None, "hits", 'hits/s', 'Mail', 'dovecot.mail', 'line'],
+        'lines': [
+            ['mail_cache_hits', 'hits', 'incremental']
+        ]},
+    'auth': {
+        'options': [None, "attempts", 'attempts', 'Authentication', 'dovecot.auth', 'stacked'],
+        'lines': [
+            ['auth_successes', 'success', 'absolute'],
+            ['auth_failures', 'failure', 'absolute']
+        ]},
+    'auth_cache': {
+        'options': [None, "cache", 'number', 'Authentication', 'dovecot.auth', 'stacked'],
+        'lines': [
+            ['auth_cache_hits', 'hit', 'absolute'],
+            ['auth_cache_misses', 'miss', 'absolute']
+        ]}
+}
+
+
+class Service(SocketService):
+    def __init__(self, configuration=None, name=None):
+        SocketService.__init__(self, configuration=configuration, name=name)
+        self.request = "EXPORT\tglobal\r\n"
+        self.host = None  # localhost
+        self.port = None  # 24242
+        # self._keep_alive = True
+        self.unix_socket = "/var/run/dovecot/stats"
+        self.order = ORDER
+        self.definitions = CHARTS
+
+    def _get_data(self):
+        """
+        Format data received from socket
+        :return: dict
+        """
+        try:
+            raw = self._get_raw_data()
+        except (ValueError, AttributeError):
+            return None
+
+        data = raw.split('\n')[:2]
+        desc = data[0].split('\t')
+        vals = data[1].split('\t')
+        # ret = dict(zip(desc, vals))
+        ret = {}
+        for i in range(len(desc)):
+            try:
+                #d = str(desc[i])
+                #if d in ('user_cpu', 'sys_cpu', 'clock_time'):
+                #    val = float(vals[i])
+                #else:
+                #    val = int(vals[i])
+                #ret[d] = val
+                ret[str(desc[i])] = int(vals[i])
+            except ValueError:
+                pass
+        if len(ret) == 0:
+            return None
+        return ret
index ebbe1cea9b0b4a7e3aae7baed15ce70125465c16..beef64161645ff47363f3f4109ace960fe4e3514 100644 (file)
@@ -65,7 +65,6 @@ class Service(SocketService):
         Get data from TCP/IP socket
         :return: dict
         """
-        self.disk_count = self._get_disk_count()
         try:
             raw = self._get_raw_data().split("|")[:-1]
         except AttributeError:
@@ -96,6 +95,14 @@ class Service(SocketService):
         except (KeyError, TypeError) as e:
             self.info("No excluded disks")
             self.debug(str(e))
+
+        try:
+            self.disk_count = int(self.configuration['disk_count'])
+        except (KeyError, TypeError) as e:
+            self.info("Autodetecting number of disks")
+            self.disk_count = self._get_disk_count()
+            self.debug(str(e))
+
         data = self._get_data()
         if data is None:
             return False
diff --git a/python.d/ipfs.chart.py b/python.d/ipfs.chart.py
new file mode 100644 (file)
index 0000000..b0b2a96
--- /dev/null
@@ -0,0 +1,107 @@
+# -*- coding: utf-8 -*-
+# Description: IPFS netdata python.d module
+# Authors: Pawel Krupa (paulfantom), davidak
+
+from base import UrlService
+import json
+
+# default module values (can be overridden per job in `config`)
+# update_every = 2
+priority = 60000
+retries = 60
+
+# default job configuration (overridden by python.d.plugin)
+# config = {'local': {
+#     'update_every': update_every,
+#     'retries': retries,
+#     'priority': priority,
+#     'url': 'http://localhost:5001'
+# }}
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = ['bandwidth', 'peers']
+
+CHARTS = {
+    'bandwidth': {
+        'options': [None, 'IPFS Bandwidth', 'kbits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'],
+        'lines': [
+            ["in", None, "absolute", 8, 1000],
+            ["out", None, "absolute", -8, 1000]
+        ]},
+    'peers': {
+        'options': [None, 'IPFS Peers', 'peers', 'Peers', 'ipfs.peers', 'line'],
+        'lines': [
+            ["peers", None, 'absolute']
+        ]}
+}
+
+
+class Service(UrlService):
+    def __init__(self, configuration=None, name=None):
+        UrlService.__init__(self, configuration=configuration, name=name)
+        try:
+            self.baseurl = str(self.configuration['url'])
+        except (KeyError, TypeError):
+            self.baseurl = "http://localhost:5001"
+        self.order = ORDER
+        self.definitions = CHARTS
+
+    def _get_bandwidth(self):
+        """
+        Format data received from http request
+        :return: int, int
+        """
+        self.url = self.baseurl + "/api/v0/stats/bw"
+        try:
+            raw = self._get_raw_data()
+        except AttributeError:
+            return None
+
+        try:
+            parsed = json.loads(raw)
+            bw_in = int(parsed['RateIn'])
+            bw_out = int(parsed['RateOut'])
+        except:
+            return None
+
+        return bw_in, bw_out
+
+    def _get_peers(self):
+        """
+        Format data received from http request
+        :return: int
+        """
+        self.url = self.baseurl + "/api/v0/swarm/peers"
+        try:
+            raw = self._get_raw_data()
+        except AttributeError:
+            return None
+
+        try:
+            parsed = json.loads(raw)
+            peers = len(parsed['Strings'])
+        except:
+            return None
+
+        return peers
+
+    def _get_data(self):
+        """
+        Get data from API
+        :return: dict
+        """
+        try:
+            peers = self._get_peers()
+            bandwidth_in, bandwidth_out = self._get_bandwidth()
+        except:
+            return None
+        data = {}
+        if peers is not None:
+            data['peers'] = peers
+        if bandwidth_in is not None and bandwidth_out is not None:
+            data['in'] = bandwidth_in
+            data['out'] = bandwidth_out
+
+        if len(data) == 0:
+            return None
+        return data
diff --git a/python.d/memcached.chart.py b/python.d/memcached.chart.py
new file mode 100644 (file)
index 0000000..e99186f
--- /dev/null
@@ -0,0 +1,167 @@
+# -*- coding: utf-8 -*-
+# Description: memcached netdata python.d module
+# Author: Pawel Krupa (paulfantom)
+
+from base import SocketService
+
+# default module values (can be overridden per job in `config`)
+#update_every = 2
+priority = 60000
+retries = 60
+
+# default job configuration (overridden by python.d.plugin)
+# config = {'local': {
+#             'update_every': update_every,
+#             'retries': retries,
+#             'priority': priority,
+#             'host': 'localhost',
+#             'port': 11211,
+#             'unix_socket': None
+#          }}
+
+ORDER = ['net', 'connections', 'items', 'evicted_reclaimed', 'get', 'get_rate', 'set_rate', 'delete', 'cas', 'increment', 'decrement', 'touch', 'touch_rate']
+
+CHARTS = {
+    'net': {
+        'options': [None, 'Network', 'kilobytes/s', 'Network', 'memcached.net', 'line'],
+        'lines': [
+            ['bytes_read', 'read', 'incremental', 1, 1024],
+            ['bytes_written', 'written', 'incremental', 1, 1024]
+        ]},
+    'connections': {
+        'options': [None, 'Connections', 'connections/s', 'Cluster', 'memcached.cluster', 'line'],
+        'lines': [
+            ['curr_connections', 'current', 'incremental'],
+            ['rejected_connections', 'rejected', 'incremental'],
+            ['total_connections', 'total', 'incremental']
+        ]},
+    'items': {
+        'options': [None, 'Items', 'items', 'Cluster', 'memcached.cluster', 'line'],
+        'lines': [
+            ['curr_items', 'current', 'absolute'],
+            ['total_items', 'total', 'absolute']
+        ]},
+    'evicted_reclaimed': {
+        'options': [None, 'Items', 'items', 'Evicted and Reclaimed', 'memcached.evicted_reclaimed', 'line'],
+        'lines': [
+            ['evictions', 'evicted', 'absolute'],
+            ['reclaimed', 'reclaimed', 'absolute']
+        ]},
+    'get': {
+        'options': [None, 'Requests', 'requests', 'GET', 'memcached.get', 'stacked'],
+        'lines': [
+            ['get_hits', 'hits', 'percent-of-absolute-row'],
+            ['get_misses', 'misses', 'percent-of-absolute-row']
+        ]},
+    'get_rate': {
+        'options': [None, 'Rate', 'requests/s', 'GET', 'memcached.get', 'line'],
+        'lines': [
+            ['cmd_get', 'rate', 'incremental']
+        ]},
+    'set_rate': {
+        'options': [None, 'Rate', 'requests/s', 'SET', 'memcached.set', 'line'],
+        'lines': [
+            ['cmd_set', 'rate', 'incremental']
+        ]},
+    'delete': {
+        'options': [None, 'Requests', 'requests', 'DELETE', 'memcached.delete', 'stacked'],
+        'lines': [
+            ['delete_hits', 'hits', 'percent-of-absolute-row'],
+            ['delete_misses', 'misses', 'percent-of-absolute-row'],
+        ]},
+    'cas': {
+        'options': [None, 'Requests', 'requests', 'CAS', 'memcached.cas', 'stacked'],
+        'lines': [
+            ['cas_hits', 'hits', 'percent-of-absolute-row'],
+            ['cas_misses', 'misses', 'percent-of-absolute-row'],
+            ['cas_badval', 'bad value', 'percent-of-absolute-row']
+        ]},
+    'increment': {
+        'options': [None, 'Requests', 'requests', 'Increment', 'memcached.incr', 'stacked'],
+        'lines': [
+            ['incr_hits', 'hits', 'percent-of-absolute-row'],
+            ['incr_misses', 'misses', 'percent-of-absolute-row']
+        ]},
+    'decrement': {
+        'options': [None, 'Requests', 'requests', 'Decrement', 'memcached.decr', 'stacked'],
+        'lines': [
+            ['decr_hits', 'hits', 'percent-of-absolute-row'],
+            ['decr_misses', 'misses', 'percent-of-absolute-row']
+        ]},
+    'touch': {
+        'options': [None, 'Requests', 'requests', 'Touch', 'memcached.touch', 'stacked'],
+        'lines': [
+            ['touch_hits', 'hits', 'percent-of-absolute-row'],
+            ['touch_misses', 'misses', 'percent-of-absolute-row']
+        ]},
+    'touch_rate': {
+        'options': [None, 'Rate', 'requests/s', 'Touch', 'memcached.touch', 'line'],
+        'lines': [
+            ['cmd_touch', 'rate', 'incremental']
+        ]}
+}
+
+
+class Service(SocketService):
+    def __init__(self, configuration=None, name=None):
+        SocketService.__init__(self, configuration=configuration, name=name)
+        self.request = "stats\r\n"
+        self.host = "localhost"
+        self.port = 11211
+        self._keep_alive = True
+        self.unix_socket = None
+        self.order = ORDER
+        self.definitions = CHARTS
+
+    def _get_data(self):
+        """
+        Get data from socket
+        :return: dict
+        """
+        try:
+            raw = self._get_raw_data().split("\n")
+        except AttributeError:
+            self.error("no data received")
+            return None
+        if raw[0].startswith('ERROR'):
+            self.error("Memcached returned ERROR")
+            return None
+        data = {}
+        for line in raw:
+            if line.startswith('STAT'):
+                try:
+                    t = line[5:].split(' ')
+                    data[t[0]] = int(t[1])
+                except (IndexError, ValueError):
+                    pass
+        try:
+            data['hit_rate'] = int((data['keyspace_hits'] / float(data['keyspace_hits'] + data['keyspace_misses'])) * 100)
+        except:
+            data['hit_rate'] = 0
+
+        if len(data) == 0:
+            self.error("received data doesn't have needed records")
+            return None
+        else:
+            return data
+
+    def _check_raw_data(self, data):
+        if data.endswith('END\r\n'):
+            return True
+        else:
+            return False
+
+    def check(self):
+        """
+        Parse configuration, check if memcached is available
+        :return: boolean
+        """
+        self._parse_config()
+        if self.name == "":
+            self.name = "local"
+        self.chart_name += "_" + self.name
+        data = self._get_data()
+        if data is None:
+            return False
+
+        return True
index 5c996d7768d2545308ede774884c8287f6137d0c..d84f322429b5234e923b96c1a3a0e47558e9f8a0 100644 (file)
@@ -39,7 +39,7 @@ retries = 60
 #}
 
 # query executed on MySQL server
-QUERY = "SHOW GLOBAL STATUS"
+QUERY = "SHOW GLOBAL STATUS;"
 
 ORDER = ['net',
          'queries',
@@ -351,6 +351,10 @@ class Service(SimpleService):
                                               host=self.configuration['host'],
                                               port=self.configuration['port'],
                                               connect_timeout=self.update_every)
+        except MySQLdb.OperationalError as e:
+            self.error("Cannot establish connection to MySQL.")
+            self.debug(str(e))
+            raise RuntimeError
         except Exception as e:
             self.error("problem connecting to server:", e)
             raise RuntimeError
@@ -369,11 +373,18 @@ class Service(SimpleService):
             cursor = self.connection.cursor()
             cursor.execute(QUERY)
             raw_data = cursor.fetchall()
+        except MySQLdb.OperationalError as e:
+            self.debug("Reconnecting due to", str(e))
+            self._connect()
+            cursor = self.connection.cursor()
+            cursor.execute(QUERY)
+            raw_data = cursor.fetchall()
         except Exception as e:
             self.error("cannot execute query.", e)
             self.connection.close()
             self.connection = None
             return None
+
         data = dict(raw_data)
         try:
             data["Thread_cache_misses"] = int(data["Threads_created"] * 10000 / float(data["Connections"]))
index 83b1892a9ea9e2117f184eb221661196f603f310..07196173871821c511d6dd51e587a59a94a99608 100644 (file)
@@ -63,11 +63,11 @@ class Service(UrlService):
         try:
             raw = self._get_raw_data().split(" ")
             return {'active': int(raw[2]),
-                    'requests': int(raw[7]),
+                    'requests': int(raw[9]),
                     'reading': int(raw[11]),
                     'writing': int(raw[13]),
                     'waiting': int(raw[15]),
-                    'accepts': int(raw[8]),
-                    'handled': int(raw[9])}
+                    'accepts': int(raw[7]),
+                    'handled': int(raw[8])}
         except (ValueError, AttributeError):
             return None
index 0eaf5cd613b98211752e54d0aa5fa81ab09cdda4..87c55830c32e3293d0ae1639149c815cb1a0c790 100644 (file)
@@ -412,7 +412,7 @@ class UrlService(SimpleService):
 
     def __add_openers(self):
         # TODO add error handling
-        opener = urllib2.build_opener()
+        self.opener = urllib2.build_opener()
 
         # Proxy handling
         # TODO currently self.proxies isn't parsed from configuration file
@@ -439,9 +439,10 @@ class UrlService(SimpleService):
         if self.user is not None and self.password is not None:
             passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
             passman.add_password(None, self.url, self.user, self.password)
-            opener.add_handler(urllib2.HTTPBasicAuthHandler(passman))
+            self.opener.add_handler(urllib2.HTTPBasicAuthHandler(passman))
+            self.debug("Enabling HTTP basic auth")
 
-        urllib2.install_opener(opener)
+        #urllib2.install_opener(opener)
 
     def _get_raw_data(self):
         """
@@ -450,7 +451,8 @@ class UrlService(SimpleService):
         """
         raw = None
         try:
-            f = urllib2.urlopen(self.url, timeout=self.update_every * 2)
+            f = self.opener.open(self.url, timeout=self.update_every * 2)
+            # f = urllib2.urlopen(self.url, timeout=self.update_every * 2)
         except Exception as e:
             self.error(str(e))
             return None
@@ -488,7 +490,8 @@ class UrlService(SimpleService):
 
         self.__add_openers()
 
-        if self._get_data() is None or len(self._get_data()) == 0:
+        test = self._get_data()
+        if test is None or len(test) == 0:
             return False
         else:
             return True
@@ -543,8 +546,13 @@ class SocketService(SimpleService):
                         self._disconnect()
             else:
                 # connect to unix socket
-                self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
-                self._sock.connect(self.unix_socket)
+                try:
+                    self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+                    self._sock.connect(self.unix_socket)
+                except socket.error:
+                    self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+                    self._sock.connect(self.unix_socket)
+
         except Exception as e:
             self.error(str(e),
                        "Cannot create socket with following configuration: host:", str(self.host),
index 24e12c6e8106b33fcffc7bea195c9c5a4e8e3267..266f2737648fac46bedc4d32049ff3125663d254 100644 (file)
@@ -6,6 +6,7 @@
 
 from base import UrlService
 import xml.etree.ElementTree as ET  # phone home...
+#from xml.parsers.expat import errors
 
 # default module values (can be overridden per job in `config`)
 # update_every = 2
@@ -19,12 +20,12 @@ CHARTS = {
     'accesses': {
         'options': [None, "tomcat requests", "requests/s", "statistics", "tomcat.accesses", "area"],
         'lines': [
-            ["accesses"]
+            ["accesses", None, 'incremental']
         ]},
     'volume': {
         'options': [None, "tomcat volume", "KB/s", "volume", "tomcat.volume", "area"],
         'lines': [
-            ["volume", None, 'incremental']
+            ["volume", None, 'incremental', 1, 1024]
         ]},
     'threads': {
         'options': [None, "tomcat threads", "current threads", "statistics", "tomcat.threads", "line"],
@@ -35,7 +36,7 @@ CHARTS = {
     'jvm': {
         'options': [None, "JVM Free Memory", "MB", "statistics", "tomcat.jvm", "area"],
         'lines': [
-            ["jvm", None, "absolute"]
+            ["jvm", None, "absolute", 1, 1048576]
         ]}
 }
 
@@ -47,6 +48,12 @@ class Service(UrlService):
             self.url = "http://localhost:8080/manager/status?XML=true"
         self.order = ORDER
         self.definitions = CHARTS
+        self.port = 8080
+
+    def check(self):
+        if UrlService.check(self):
+            return True
+
         # get port from url
         self.port = 0
         for i in self.url.split('/'):
@@ -59,6 +66,12 @@ class Service(UrlService):
         if self.port == 0:
             self.port = 80
 
+        test = self._get_data()
+        if test is None or len(test) == 0:
+            return False
+        else:
+            return True
+
     def _get_data(self):
         """
         Format data received from http request
@@ -66,7 +79,19 @@ class Service(UrlService):
         """
         try:
             raw = self._get_raw_data()
-            data = ET.fromstring(raw)
+            try:
+                data = ET.fromstring(raw)
+            except ET.ParseError as e:
+                # if e.code == errors.codes[errors.XML_ERROR_JUNK_AFTER_DOC_ELEMENT]:
+                if e.code == 9:
+                    end = raw.find('</status>')
+                    end += 9
+                    raw = raw[:end]
+                    self.debug(raw)
+                    data = ET.fromstring(raw)
+                else:
+                    raise Exception(e)
+
             memory = data.find('./jvm/memory')
             threads = data.find("./connector[@name='\"http-bio-" + str(self.port) + "\"']/threadInfo")
             requests = data.find("./connector[@name='\"http-bio-" + str(self.port) + "\"']/requestInfo")
@@ -76,7 +101,8 @@ class Service(UrlService):
                     'current': threads.attrib['currentThreadCount'],
                     'busy': threads.attrib['currentThreadsBusy'],
                     'jvm': memory.attrib['free']}
-        except (ValueError, AttributeError):
+        except (ValueError, AttributeError) as e:
+            self.debug(str(e))
             return None
         except SyntaxError as e:
             self.error("Tomcat module needs python 2.7 at least. Stopping")
index a7b68846d50baa31b7dea772acfad02cb31f9e52..748c6eff179a10ec1bb85d6afbb266c9073c43d1 100644 (file)
@@ -230,6 +230,8 @@ int config_rename(const char *section, const char *old, const char *new) {
        cv->name = strdup(new);
        if(!cv->name) fatal("Cannot allocate memory for config_rename()");
 
+       cv->hash = simple_hash(cv->name);
+
        config_value_index_add(co, cv);
        config_section_unlock(co);
 
index 185998153463ac5c0652535d59b0db2e724a2482..6b43216cb71a23b0b466e4f215109c20dd56555c 100644 (file)
@@ -1654,7 +1654,10 @@ void collect_data_for_pid(pid_t pid) {
        // /proc/<pid>/stat
 
        if(unlikely(read_proc_pid_stat(p))) {
-               error("Cannot process %s/proc/%d/stat", host_prefix, pid);
+               if(errno != ENOENT || debug)
+                       error("Cannot process %s/proc/%d/stat (command '%s')", host_prefix, pid, p->comm);
+               else
+                       errno = 0;
                // there is no reason to proceed if we cannot get its status
                return;
        }
@@ -1663,21 +1666,28 @@ void collect_data_for_pid(pid_t pid) {
 
        // check its parent pid
        if(unlikely(p->ppid < 0 || p->ppid > pid_max)) {
-               error("Pid %d states invalid parent pid %d. Using 0.", pid, p->ppid);
+               error("Pid %d (command '%s') states invalid parent pid %d. Using 0.", pid, p->comm, p->ppid);
                p->ppid = 0;
        }
 
        // --------------------------------------------------------------------
        // /proc/<pid>/io
 
-       if(unlikely(read_proc_pid_io(p)))
-               error("Cannot process %s/proc/%d/io", host_prefix, pid);
+       if(unlikely(read_proc_pid_io(p))) {
+               if(errno != ENOENT || debug)
+                       error("Cannot process %s/proc/%d/io (command '%s')", host_prefix, pid, p->comm);
+               else
+                       errno = 0;
+       }
 
        // --------------------------------------------------------------------
        // /proc/<pid>/statm
 
        if(unlikely(read_proc_pid_statm(p))) {
-               error("Cannot process %s/proc/%d/statm", host_prefix, pid);
+               if(errno != ENOENT || debug)
+                       error("Cannot process %s/proc/%d/statm (command '%s')", host_prefix, pid, p->comm);
+               else
+                       errno = 0;
                // there is no reason to proceed if we cannot get its memory status
                return;
        }
@@ -1690,8 +1700,12 @@ void collect_data_for_pid(pid_t pid) {
        if(unlikely(p->new_entry)) {
                // /proc/<pid>/cmdline
                if(likely(proc_pid_cmdline_is_needed)) {
-                       if(unlikely(read_proc_pid_cmdline(p)))
-                               error("Cannot process %s/proc/%d/cmdline", host_prefix, pid);
+                       if(unlikely(read_proc_pid_cmdline(p))) {
+                               if(errno != ENOENT || debug)
+                                       error("Cannot process %s/proc/%d/cmdline (command '%s')", host_prefix, pid, p->comm);
+                               else
+                                       errno = 0;
+                       }
                }
 
                if(unlikely(debug))
@@ -1729,7 +1743,10 @@ void collect_data_for_pid(pid_t pid) {
        // /proc/<pid>/fd
 
        if(unlikely(read_pid_file_descriptors(p))) {
-               error("Cannot process entries in %s/proc/%d/fd", host_prefix, pid);
+               if(errno != ENOENT || debug)
+                       error("Cannot process entries in %s/proc/%d/fd (command '%s')", host_prefix, pid, p->comm);
+               else
+                       errno = 0;
        }
 
        // --------------------------------------------------------------------
index 2c1e9b1a39a199b646bfee9874dd853a7ab9df85..5bc408c95a58ae79afbbb876a2fb03a48e0254d2 100644 (file)
@@ -8,6 +8,7 @@
 #include <sys/types.h>
 #include <dirent.h>
 #include <string.h>
+#include <sys/stat.h>
 
 #include "common.h"
 #include "appconfig.h"
@@ -923,6 +924,7 @@ void find_all_cgroups() {
        cleanup_all_cgroups();
 
        struct cgroup *cg;
+       struct stat buf;
        for(cg = cgroup_root; cg ; cg = cg->next) {
                // fprintf(stderr, " >>> CGROUP '%s' (%u - %s) with name '%s'\n", cg->id, cg->hash, cg->available?"available":"stopped", cg->name);
 
@@ -936,49 +938,76 @@ void find_all_cgroups() {
                char filename[FILENAME_MAX + 1];
                if(cgroup_enable_cpuacct_stat && !cg->cpuacct_stat.filename) {
                        snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.stat", cgroup_cpuacct_base, cg->id);
-                       cg->cpuacct_stat.filename = strdup(filename);
-                       debug(D_CGROUP, "cpuacct.stat filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_stat.filename);
+                       if(stat(filename, &buf) != -1) {
+                               cg->cpuacct_stat.filename = strdup(filename);
+                               debug(D_CGROUP, "cpuacct.stat filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_stat.filename);
+                       }
+                       else debug(D_CGROUP, "cpuacct.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename);
                }
                if(cgroup_enable_cpuacct_usage && !cg->cpuacct_usage.filename) {
                        snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.usage_percpu", cgroup_cpuacct_base, cg->id);
-                       cg->cpuacct_usage.filename = strdup(filename);
-                       debug(D_CGROUP, "cpuacct.usage_percpu filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_usage.filename);
+                       if(stat(filename, &buf) != -1) {
+                               cg->cpuacct_usage.filename = strdup(filename);
+                               debug(D_CGROUP, "cpuacct.usage_percpu filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_usage.filename);
+                       }
+                       else debug(D_CGROUP, "cpuacct.usage_percpu file for cgroup '%s': '%s' does not exist.", cg->id, filename);
                }
                if(cgroup_enable_memory && !cg->memory.filename) {
                        snprintfz(filename, FILENAME_MAX, "%s%s/memory.stat", cgroup_memory_base, cg->id);
-                       cg->memory.filename = strdup(filename);
-                       debug(D_CGROUP, "memory.stat filename for cgroup '%s': '%s'", cg->id, cg->memory.filename);
+                       if(stat(filename, &buf) != -1) {
+                               cg->memory.filename = strdup(filename);
+                               debug(D_CGROUP, "memory.stat filename for cgroup '%s': '%s'", cg->id, cg->memory.filename);
+                       }
+                       else debug(D_CGROUP, "memory.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename);
                }
                if(cgroup_enable_blkio) {
                        if(!cg->io_service_bytes.filename) {
                                snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_service_bytes", cgroup_blkio_base, cg->id);
-                               cg->io_service_bytes.filename = strdup(filename);
-                               debug(D_CGROUP, "io_service_bytes filename for cgroup '%s': '%s'", cg->id, cg->io_service_bytes.filename);
+                               if(stat(filename, &buf) != -1) {
+                                       cg->io_service_bytes.filename = strdup(filename);
+                                       debug(D_CGROUP, "io_service_bytes filename for cgroup '%s': '%s'", cg->id, cg->io_service_bytes.filename);
+                               }
+                               else debug(D_CGROUP, "io_service_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename);
                        }
                        if(!cg->io_serviced.filename) {
                                snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_serviced", cgroup_blkio_base, cg->id);
-                               cg->io_serviced.filename = strdup(filename);
-                               debug(D_CGROUP, "io_serviced filename for cgroup '%s': '%s'", cg->id, cg->io_serviced.filename);
+                               if(stat(filename, &buf) != -1) {
+                                       cg->io_serviced.filename = strdup(filename);
+                                       debug(D_CGROUP, "io_serviced filename for cgroup '%s': '%s'", cg->id, cg->io_serviced.filename);
+                               }
+                               else debug(D_CGROUP, "io_serviced file for cgroup '%s': '%s' does not exist.", cg->id, filename);
                        }
                        if(!cg->throttle_io_service_bytes.filename) {
                                snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_service_bytes", cgroup_blkio_base, cg->id);
-                               cg->throttle_io_service_bytes.filename = strdup(filename);
-                               debug(D_CGROUP, "throttle_io_service_bytes filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_service_bytes.filename);
+                               if(stat(filename, &buf) != -1) {
+                                       cg->throttle_io_service_bytes.filename = strdup(filename);
+                                       debug(D_CGROUP, "throttle_io_service_bytes filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_service_bytes.filename);
+                               }
+                               else debug(D_CGROUP, "throttle_io_service_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename);
                        }
                        if(!cg->throttle_io_serviced.filename) {
                                snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_serviced", cgroup_blkio_base, cg->id);
-                               cg->throttle_io_serviced.filename = strdup(filename);
-                               debug(D_CGROUP, "throttle_io_serviced filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_serviced.filename);
+                               if(stat(filename, &buf) != -1) {
+                                       cg->throttle_io_serviced.filename = strdup(filename);
+                                       debug(D_CGROUP, "throttle_io_serviced filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_serviced.filename);
+                               }
+                               else debug(D_CGROUP, "throttle_io_serviced file for cgroup '%s': '%s' does not exist.", cg->id, filename);
                        }
                        if(!cg->io_merged.filename) {
                                snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_merged", cgroup_blkio_base, cg->id);
-                               cg->io_merged.filename = strdup(filename);
-                               debug(D_CGROUP, "io_merged filename for cgroup '%s': '%s'", cg->id, cg->io_merged.filename);
+                               if(stat(filename, &buf) != -1) {
+                                       cg->io_merged.filename = strdup(filename);
+                                       debug(D_CGROUP, "io_merged filename for cgroup '%s': '%s'", cg->id, cg->io_merged.filename);
+                               }
+                               else debug(D_CGROUP, "io_merged file for cgroup '%s': '%s' does not exist.", cg->id, filename);
                        }
                        if(!cg->io_queued.filename) {
                                snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_queued", cgroup_blkio_base, cg->id);
-                               cg->io_queued.filename = strdup(filename);
-                               debug(D_CGROUP, "io_queued filename for cgroup '%s': '%s'", cg->id, cg->io_queued.filename);
+                               if(stat(filename, &buf) != -1) {
+                                       cg->io_queued.filename = strdup(filename);
+                                       debug(D_CGROUP, "io_queued filename for cgroup '%s': '%s'", cg->id, cg->io_queued.filename);
+                               }
+                               else debug(D_CGROUP, "io_queued file for cgroup '%s': '%s' does not exist.", cg->id, filename);
                        }
                }
        }
@@ -1242,12 +1271,12 @@ void update_cgroup_charts(int update_every) {
 // cgroups main
 
 int do_sys_fs_cgroup(int update_every, unsigned long long dt) {
+       (void)dt;
+
        static int cgroup_global_config_read = 0;
        static time_t last_run = 0;
        time_t now = time(NULL);
 
-       if(dt) {};
-
        if(unlikely(!cgroup_global_config_read)) {
                read_cgroup_plugin_configuration();
                cgroup_global_config_read = 1;
index f987c1aa1e5f9f4cc03bcddb95cd1b21889662be..89c55e228a10622bc6d06965069814ce6de7ba40 100644 (file)
@@ -1357,6 +1357,11 @@ var menuData = {
                info: undefined
        },
 
+       'memcached': {
+               title: 'memcached',
+               info: undefined
+       },
+
        'mysql': {
                title: 'MySQL',
                info: undefined
@@ -1367,6 +1372,11 @@ var menuData = {
                info: undefined
        },
 
+       'ipfs': {
+               title: 'IPFS',
+               info: undefined
+       },
+
        'phpfpm': {
                title: 'PHP-FPM',
                info: undefined,
@@ -1868,12 +1878,14 @@ function enrichChartData(chart) {
                case 'apache':
                case 'cgroup':
                case 'exim':
+               case 'memcached':
                case 'mysql':
                case 'named':
                case 'nginx':
                case 'phpfpm':
                case 'postfix':
                case 'redis':
+               case 'ipfs':
                case 'squid':
                case 'snmp':
                case 'tomcat':