-#!/bin/bash
+# no need for shebang - this file is loaded from charts.d.plugin
# _update_every is a special variable - it holds the number of seconds
# between the calls of the _update() function
# _check is called once, to find out if this chart should be enabled or not
ap_check() {
+ require_cmd iw || return 1
+
local ev=$(iw dev | awk '
BEGIN {
i = "";
-#!/bin/bash
+# no need for shebang - this file is loaded from charts.d.plugin
# the URL to download apache status info
apache_url="http://127.0.0.1:80/server-status?auto"
-#!/bin/sh
+# no need for shebang - this file is loaded from charts.d.plugin
# THIS PLUGIN IS OBSOLETE
# USE apps.plugin INSTEAD
-#!/bin/sh
+# no need for shebang - this file is loaded from charts.d.plugin
# if this chart is called X.chart.sh, then all functions and global variables
# must start with X_
-#!/bin/bash
+# no need for shebang - this file is loaded from charts.d.plugin
# if this chart is called X.chart.sh, then all functions and global variables
# must start with X_
# (just a demonstration for something that needs to be checked)
example_magic_number=
+# global variables to store our collected data
+# remember: they need to start with the module name example_
+example_value1=
+example_value2=
+example_value3=
+example_value4=
+example_last=0
+example_count=0
+
+example_get() {
+ # do all the work to collect / calculate the values
+ # for each dimension
+ #
+ # Remember:
+ # 1. KEEP IT SIMPLE AND SHORT
+ # 2. AVOID FORKS (avoid piping commands)
+ # 3. AVOID CALLING TOO MANY EXTERNAL PROGRAMS
+ # 4. USE LOCAL VARIABLES (global variables may overlap with other modules)
+
+ example_value1=$RANDOM
+ example_value2=$RANDOM
+ example_value3=$RANDOM
+ example_value4=$((8192 + (RANDOM * 16383 / 32767) ))
+
+ if [ $example_count -gt 0 ]
+ then
+ example_count=$((example_count - 1))
+
+ [ $example_last -gt 16383 ] && example_value4=$((example_last + (RANDOM * ( (32767 - example_last) / 2) / 32767)))
+ [ $example_last -le 16383 ] && example_value4=$((example_last - (RANDOM * (example_last / 2) / 32767)))
+ else
+ example_count=$((1 + (RANDOM * 5 / 32767) ))
+
+ [ $example_last -gt 16383 -a $example_value4 -gt 16383 ] && example_value4=$((value4 - 16383))
+ [ $example_last -le 16383 -a $example_value4 -lt 16383 ] && example_value4=$((value4 + 16383))
+ fi
+ example_last=$example_value4
+
+ # this should return:
+ # - 0 to send the data to netdata
+ # - 1 to report a failure to collect the data
+
+ return 0
+}
+
# _check is called once, to find out if this chart should be enabled or not
example_check() {
# this should return:
# - 0 to enable the chart
# - 1 to disable the chart
- [ "${example_magic_number}" != "12345" ] && return 1
+ # check something
+ [ "${example_magic_number}" != "12345" ] && echo >&2 "example: you have to set example_magic_number=$example_magic_number in example.conf to start example chart." && return 1
+
+ # check that we can collect data
+ example_get || return 1
+
return 0
}
}
# _update is called continiously, to collect the values
-example_last=0
-example_count=0
example_update() {
- local value1 value2 value3 value4 mode
-
# the first argument to this function is the microseconds since last update
# pass this parameter to the BEGIN statement (see bellow).
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
-
- value1=$RANDOM
- value2=$RANDOM
- value3=$RANDOM
- value4=$((8192 + (RANDOM * 16383 / 32767) ))
-
- if [ $example_count -gt 0 ]
- then
- example_count=$((example_count - 1))
-
- [ $example_last -gt 16383 ] && value4=$((example_last + (RANDOM * ( (32767 - example_last) / 2) / 32767)))
- [ $example_last -le 16383 ] && value4=$((example_last - (RANDOM * (example_last / 2) / 32767)))
- else
- example_count=$((1 + (RANDOM * 5 / 32767) ))
-
- [ $example_last -gt 16383 -a $value4 -gt 16383 ] && value4=$((value4 - 16383))
- [ $example_last -le 16383 -a $value4 -lt 16383 ] && value4=$((value4 + 16383))
- fi
- example_last=$value4
+ example_get || return 1
# write the result of the work.
cat <<VALUESEOF
BEGIN example.random $1
-SET random1 = $value1
-SET random2 = $value2
-SET random3 = $value3
+SET random1 = $example_value1
+SET random2 = $example_value2
+SET random3 = $example_value3
END
BEGIN example.random2 $1
-SET random = $value4
+SET random = $example_value4
END
VALUESEOF
# echo >&2 "example_count = $example_count value = $value4"
-#!/bin/sh
+# no need for shebang - this file is loaded from charts.d.plugin
exim_command=
-#!/bin/bash
+# no need for shebang - this file is loaded from charts.d.plugin
# if this chart is called X.chart.sh, then all functions and global variables
# must start with X_
-#!/bin/sh
+# no need for shebang - this file is loaded from charts.d.plugin
load_average_update_every=5
load_priority=100
-#!/bin/sh
+# no need for shebang - this file is loaded from charts.d.plugin
mem_apps_apps=
-#!/bin/bash
+# no need for shebang - this file is loaded from charts.d.plugin
# http://dev.mysql.com/doc/refman/5.0/en/server-status-variables.html
#
-#!/bin/bash
+# no need for shebang - this file is loaded from charts.d.plugin
# if this chart is called X.chart.sh, then all functions and global variables
# must start with X_
-#!/bin/bash
+# no need for shebang - this file is loaded from charts.d.plugin
# a space separated list of UPS names
# if empty, the list returned by 'upsc -l' will be used
-#!/bin/sh
+# no need for shebang - this file is loaded from charts.d.plugin
opensips_opts="fifo get_statistics all"
opensips_cmd=
-#!/bin/bash
+# no need for shebang - this file is loaded from charts.d.plugin
# if this chart is called X.chart.sh, then all functions and global variables
# must start with X_
-#!/bin/sh
+# no need for shebang - this file is loaded from charts.d.plugin
# the postqueue command
# if empty, it will use the one found in the system path
-#!/bin/sh
+# no need for shebang - this file is loaded from charts.d.plugin
# sensors docs
# https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface
-#!/bin/sh
+# no need for shebang - this file is loaded from charts.d.plugin
squid_host=
squid_port=
-#!/bin/bash
+# no need for shebang - this file is loaded from charts.d.plugin
# Description: Tomcat netdata charts.d plugin
# Author: Jorge Romero
# -----------------------------------------------------------------------------
# the default enable/disable for all charts.d collectors
-#enable_all_charts="yes"
-
-# per charts.d collector enable/disable
-#nut=yes
-#squid=yes
-#postfix=yes
-#sensors=yes
-#cpufreq=yes
-#mysql=yes
-#example=yes
-#load_average=yes
+# the default is "yes"
+# enable_all_charts="yes"
+
+# BY DEFAULT ENABLED MODULES
+# ap=yes
+# nut=yes
+# opensips=yes
+
+# -----------------------------------------------------------------------------
+# THESE NEED TO BE SET TO "force" TO BE ENABLED
+
+# Nothing useful.
+# Just an example charts.d plugin you can use as a template.
+# example=force
+
+# OLD MODULES THAT ARE NOW SERVED BY python.d.plugin
+# apache=force
+# cpufreq=force
+# exim=force
+# hddtemp=force
+# mysql=force
+# nginx=force
+# phpfpm=force
+# postfix=force
+# sensors=force
+# squid=force
+# tomcat=force
+
+# OLD MODULES THAT ARE NOW SERVED BY NETDATA DAEMON
+# cpu_apps=force
+# mem_apps=force
+# load_average=force
-# This is the configuration for python.d.plugin
+# netdata python.d.plugin configuration
+#
+# This file is in YaML format.
+# Generally the format is:
+#
+# name: value
+#
-# Disable all python modules
-enabled: no
+# Enable / disable the whole python.d.plugin (all its modules)
+enabled: yes
-# By default python.d.plugin enables all modules stored in python.d
-# Modules can be disabled with setting "module_name = no"
-apache: yes
-apache_cache: yes
-cpufreq: yes
-example: yes
-exim: yes
-hddtemp: yes
-mysql: yes
-nginx: yes
-phpfpm: yes
-postfix: yes
-redis: yes
-sensors: yes
-squid: yes
-tomcat: yes
+# ----------------------------------------------------------------------
+# Enable / Disable python.d.plugin modules
+#
+# The default for all modules is enabled (yes).
+# Setting any of these to no will disable it.
+# apache: yes
+# apache_cache: yes
+# cpufreq: yes
+example: no
+# exim: yes
+# hddtemp: yes
+# mysql: yes
+# nginx: yes
+# phpfpm: yes
+# postfix: yes
+# redis: yes
+# sensors: yes
+# squid: yes
+# tomcat: yes
-# Example configuration of apache.chart.py
-# YAML format
+# netdata python.d.plugin configuration for apache
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
-local:
- url: "http://localhost/server-status?auto"
- retries: 10
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, apache also supports the following:
+#
+# url: 'URL' # the URL to fetch apache's mod_status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost/server-status?auto'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1/server-status?auto'
+
+localipv6:
+ name : 'local'
+ url : 'http://::1/server-status?auto'
-# Example configuration of apache_cache.chart.py
-# YAML format
+# netdata python.d.plugin configuration for apache cache
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
-path: "/var/log/apache2/cache.log"
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, apache_cache also supports the following:
+#
+# path: 'PATH' # the path to apache's cache.log
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+apache:
+ name: 'local'
+ path: '/var/log/apache/cache.log'
+
+apache2:
+ name: 'local'
+ path: '/var/log/apache2/cache.log'
+
+httpd:
+ name: 'local'
+ path: '/var/log/httpd/cache.log'
-# Example configuration of cpufreq.chart.py
-# YAML format
+# netdata python.d.plugin configuration for cpufreq
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
-update_every : 2
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# The directory to search for the file scaling_cur_freq
sys_dir: "/sys/devices"
-update_every : 2
+# netdata python.d.plugin configuration for example
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, example also supports the following:
+#
+# - none
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
-# Example configuration of exim.chart.py
-# YAML format
+# netdata python.d.plugin configuration for postfix
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
-update_every : 8 # executing `exim -bpc` can be very slow
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# exim is slow, so once every 10 seconds
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, postfix also supports the following:
+#
+# command: 'exim -bpc' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ command: 'exim -bpc'
-# Example configuration of hddtemp.chart.py
-# YAML format
+# netdata python.d.plugin configuration for hddtemp
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
-update_every: 1
-retries: 10
-host: 'localhost'
-port: 7634
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, hddtemp also supports the following:
+#
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name: 'local'
+ host: 'localhost'
+ port: 7634
+
+localipv4:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 7634
+
+localipv6:
+ name: 'local'
+ host: '127.0.0.1'
+ port: 7634
-# Example configuration of mysql.chart.py
-# YAML format
+# netdata python.d.plugin configuration for mysql
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
-update_every: 1
-retries: 10
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
-mycnf:
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, mysql also supports the following:
+#
+# socket: 'path/to/mysql.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+# in all cases, the following can also be set:
+#
+# user: 'username' # the mysql username to use
+# pass: 'password' # the mysql password to use
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+mycnf1:
+ name : 'local'
+ 'my.cnf' : '/etc/my.cnf'
+
+mycnf2:
name : 'local'
'my.cnf' : '/etc/mysql/my.cnf'
socket1:
name : 'local'
- #user : 'root'
- #password : ''
+ # user : ''
+ # pass : ''
socket : '/var/run/mysqld/mysqld.sock'
socket2:
name : 'local'
- #user : 'root'
- #password : ''
+ # user : ''
+ # pass : ''
socket : '/var/lib/mysql/mysql.sock'
+socket3:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ socket : '/tmp/mysql.sock'
+
tcp:
name : 'local'
- #user : 'root'
- #password : ''
+ # user : ''
+ # pass : ''
host : 'localhost'
port : '3306'
+tcpipv4:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ host : '127.0.0.1'
+ port : '3306'
+
+tcpipv6:
+ name : 'local'
+ # user : ''
+ # pass : ''
+ host : '::1'
+ port : '3306'
+
+
+# Now we try the same as above with user: root
+# A few systems configure mysql to accept passwordless
+# root access.
+
+mycnf1_root:
+ name : 'local'
+ user : 'root'
+ 'my.cnf' : '/etc/my.cnf'
+
+mycnf2_root:
+ name : 'local'
+ user : 'root'
+ 'my.cnf' : '/etc/mysql/my.cnf'
+
socket1_root:
name : 'local'
user : 'root'
- #password : ''
+ # pass : ''
socket : '/var/run/mysqld/mysqld.sock'
socket2_root:
name : 'local'
user : 'root'
- #password : ''
+ # pass : ''
socket : '/var/lib/mysql/mysql.sock'
+socket3_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ socket : '/tmp/mysql.sock'
+
tcp_root:
name : 'local'
user : 'root'
- #password : ''
+ # pass : ''
host : 'localhost'
- port : '3306'
\ No newline at end of file
+ port : '3306'
+
+tcpipv4_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ host : '127.0.0.1'
+ port : '3306'
+
+tcpipv6_root:
+ name : 'local'
+ user : 'root'
+ # pass : ''
+ host : '::1'
+ port : '3306'
+
-# Example configuration of nginx.chart.py
-# YAML format
+# netdata python.d.plugin configuration for nginx
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
-retries: 10
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, nginx also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx's status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost/stub_status'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1/stub_status'
+
+localipv6:
+ name : 'local'
+ url : 'http://::1/stub_status'
-local:
- url: "http://localhost/stub_status"
-# Example configuration of phpfpm.chart.py
-# YAML format
+# netdata python.d.plugin configuration for PHP-FPM
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
-retries: 10
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, PHP-FPM also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx's status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : "http://localhost/status"
+
+localipv4:
+ name : 'local'
+ url : "http://127.0.0.1/status"
+
+localipv6:
+ name : 'local'
+ url : "http://::1/status"
-local:
- url: "http://localhost/status"
-# Example configuration of postfix.chart.py
-# YAML format
+# netdata python.d.plugin configuration for postfix
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
-update_every : 2
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# postfix is slow, so once every 10 seconds
+update_every: 10
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, postfix also supports the following:
+#
+# command: 'postqueue -p' # the command to run
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+
+local:
+ command: 'postqueue -p'
-# Example configuration of redis.chart.py
-# YAML format
+# netdata python.d.plugin configuration for redis
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
-update_every: 1
-retries: 10
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
-socket:
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, redis also supports the following:
+#
+# socket: 'path/to/mysql.sock'
+#
+# or
+# host: 'IP or HOSTNAME' # the host to connect to
+# port: PORT # the port to connect to
+#
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+socket1:
name : 'local'
socket : '/tmp/redis.sock'
-tcp:
+socket2:
+ name : 'local'
+ socket : '/var/run/redis/redis.sock'
+
+socket3:
+ name : 'local'
+ socket : '/var/lib/redis/redis.sock'
+
+localhost:
name : 'local'
host : 'localhost'
- port : '6379'
\ No newline at end of file
+ port : 6379
+
+localipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 6379
+
+localipv6:
+ name : 'local'
+ host : '::1'
+ port : 6379
+
-# Example configuration of sensors.chart.py
-# YAML format
-
-#update_every: 2
-#types:
-# - temperature
-# - fan
-# - voltage
-# - current
-# - power
-# - energy
-# - humidity
+# netdata python.d.plugin configuration for sensors
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# Limit the number of sensors types.
+# Comment the ones you want to disable.
+# Also, re-arranging this list controls the order of the charts at the
+# netdata dashboard.
+
+types:
+ - temperature
+ - fan
+ - voltage
+ - current
+ - power
+ - energy
+ - humidity
+
+# ----------------------------------------------------------------------
+# Limit the number of sensors chips.
+# Uncomment the first line (chips:) and add chip names below it.
+# The chip names that start with like that will be matched.
+# You can find the chip names using the sensors command.
+
#chips:
# - i8k
+# - coretemp
+#
+# chip names can be found using the sensors shell command
+# the prefix is matched (anything that starts like that)
-# Example configuration of squid.chart.py
-# YAML format
+# netdata python.d.plugin configuration for squid
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
-update_every: 1
-retries: 10
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
-tcp1:
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, squid also supports the following:
+#
+# host : 'IP or HOSTNAME' # the host to connect to
+# port : PORT # the port to connect to
+# request: 'URL' # the URL to request from squid
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+tcp3128new:
name : 'local'
host : 'localhost'
port : 3128
request : 'cache_object://localhost:3128/counters'
-tcp2:
+tcp8080new:
name : 'local'
host : 'localhost'
port : 8080
request : 'cache_object://localhost:3128/counters'
-tcp3:
+tcp3128old:
name : 'local'
host : 'localhost'
port : 3128
request : '/squid-internal-mgr/counters'
-tcp4:
+tcp8080old:
name : 'local'
host : 'localhost'
port : 8080
request : '/squid-internal-mgr/counters'
+# IPv4
+
+tcp3128newipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 3128
+ request : 'cache_object://127.0.0.1:3128/counters'
+
+tcp8080newipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 8080
+ request : 'cache_object://127.0.0.1:3128/counters'
+
+tcp3128oldipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 3128
+ request : '/squid-internal-mgr/counters'
+
+tcp8080oldipv4:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 8080
+ request : '/squid-internal-mgr/counters'
+
+# IPv6
+
+tcp3128newipv6:
+ name : 'local'
+ host : '::1'
+ port : 3128
+ request : 'cache_object://[::1]:3128/counters'
+
+tcp8080newipv6:
+ name : 'local'
+ host : '::1'
+ port : 8080
+ request : 'cache_object://[::1]:3128/counters'
+
+tcp3128oldipv6:
+ name : 'local'
+ host : '::1'
+ port : 3128
+ request : '/squid-internal-mgr/counters'
+
+tcp8080oldipv6:
+ name : 'local'
+ host : '::1'
+ port : 8080
+ request : '/squid-internal-mgr/counters'
+
-# Example configuration of tomcat.chart.py
-# YAML format
+# netdata python.d.plugin configuration for tomcat
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
-retries: 10
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
-local:
- url: "http://localhost:8080/manager/status?XML=true"
- user: ""
- password: ""
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, tomcat also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx's status stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost:8080/manager/status?XML=true'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1:8080/manager/status?XML=true'
+
+localipv6:
+ name : 'local'
+ url : 'http://[::1]:8080/manager/status?XML=true'
--- /dev/null
+declare -A configs_signatures=(
+ ['0056936ce99788ed9ae1c611c87aa6d8']='apps_groups.conf'
+ ['18ee1c6197a4381b1c1631ef6129824f']='apps_groups.conf'
+ ['2f4a85fedecce1bf425fa1039f6b021e']='apps_groups.conf'
+ ['3af522d65b50a5e447607ffb28c81ff5']='apps_groups.conf'
+ ['4a448831776de8acf2e0bdc4cc994cb4']='apps_groups.conf'
+ ['5bf51bb24fb41db9b1e448bd060d3f8c']='apps_groups.conf'
+ ['636d032928ea0f4741eab264fb49c099']='apps_groups.conf'
+ ['647361e99b5f4e0d73470c569bb9461c']='apps_groups.conf'
+ ['6a47af861ad3dd112124c37fbf09672b']='apps_groups.conf'
+ ['79a37756869d9b4629285922572d6b9b']='apps_groups.conf'
+ ['99c1617448abbdc493976ab9bda5ce02']='apps_groups.conf'
+ ['9c0185ceff15415bc59b2ce2c1f04367']='apps_groups.conf'
+ ['a0ee8f351f213c0e8af9eb7a4a09cb95']='apps_groups.conf'
+ ['a7cceeafb1e6ef1ead503ab65f687902']='apps_groups.conf'
+ ['a837986be634fd7648bcdf939019424a']='apps_groups.conf'
+ ['a9cd91675467c5426f5b51c47602c889']='apps_groups.conf'
+ ['acaa6731a272f6d251afb357e99b518f']='apps_groups.conf'
+ ['bb51112d01ff20053196a57632df8962']='apps_groups.conf'
+ ['d9258e671d0d0b6498af1ce16ef030d2']='apps_groups.conf'
+ ['ebd0612ccc5807524ebb2b647e3e56c9']='apps_groups.conf'
+ ['f2f1b8656f5011e965ac45b818cf668d']='apps_groups.conf'
+ ['fdea185e0e52b459b48852aa37f20e0f']='apps_groups.conf'
+ ['4ccb06fff1ce06dc5bc80e0a9f568f6e']='charts.d.conf'
+ ['4e995acb0d6fd77403a2a9dca984b55b']='charts.d.conf'
+ ['535e5113b07b0fc6f3abd59546c276f6']='charts.d.conf'
+ ['a02d14124b19c635c1426cee2e98bac5']='charts.d.conf'
+ ['ca026d7c779f0a7cb7787713c5be5c47']='charts.d.conf'
+ ['6b917300747e7e8314844237e2462261']='python.d/apache_cache.conf'
+ ['e0e96cc47ed61d6492416be5236cd4d3']='python.d/apache_cache.conf'
+ ['5278ebbae19c60db600f0a119cb3664e']='python.d/apache.conf'
+ ['5829812db29598db5857c9f433e96fef']='python.d/apache.conf'
+ ['6bf0de6e3b251b765b10a71d8c5c319d']='python.d/apache.conf'
+ ['38d1bf04fe9901481dd6febcc0404a86']='python.d.conf'
+ ['4b775fb31342f1478b3773d041a72911']='python.d.conf'
+ ['99a3de85d1e7826ed64a5f8576712e5d']='python.d.conf'
+ ['9e0553ebdc21b64295873fc104cfa79d']='python.d.conf'
+ ['a2944a309f8ce1a3195451856478d6ae']='python.d.conf'
+ ['af44cc53aa2bc5cc8935667119567522']='python.d.conf'
+ ['b27f10a38a95edbbec20f44a4728b7c4']='python.d.conf'
+ ['b32164929eda7449a9677044e11151bf']='python.d.conf'
+ ['b8969be5b3ceb4a99477937119bd4323']='python.d.conf'
+ ['d55be5bb5e108da1e7645da007c53cd4']='python.d.conf'
+ ['f82924563e41d99cdae5431f0af69155']='python.d.conf'
+ ['7830066c46a7e5f9682b8d3f4566b4e5']='python.d/cpufreq.conf'
+ ['b5b5a8d6d991fb1cef8d80afa23ba114']='python.d/cpufreq.conf'
+ ['dc0d2b96378f290eec3fcf98b89ad824']='python.d/cpufreq.conf'
+ ['a8bb4e1d0525f59692778ad8f675a77a']='python.d/example.conf'
+ ['ae5ac0a3521e50aa6f6eda2a330b4075']='python.d/example.conf'
+ ['e5f32f54d6d6728f21f9ac26f37d6573']='python.d/example.conf'
+ ['15e32114994b92be7853b88091e7c6fb']='python.d/exim.conf'
+ ['73125ae64d5c6e9361944cd9bd14844e']='python.d/exim.conf'
+ ['a94af1c808aafdf00537d85ff2197ec8']='python.d/exim.conf'
+ ['2a0794fd43eadf30a51805bc9ba2c64d']='python.d/hddtemp.conf'
+ ['731a1fcfe9b2da1b9d685056a59541b8']='python.d/hddtemp.conf'
+ ['d74dc63fbe631dab9a2ff1b0f5d71719']='python.d/hddtemp.conf'
+ ['1ea8e8ef1fa8a3a0fcdfba236f4cb195']='python.d/mysql.conf'
+ ['5379cdc26d7725e2b0d688d785816cef']='python.d/mysql.conf'
+ ['7deb236ec68a512b9bdd18e6a51d76f7']='python.d/mysql.conf'
+ ['88f77865f75c9fb61c97d700bd4561ee']='python.d/mysql.conf'
+ ['b0f0a0ac415e4b1a82187b80d211e83b']='python.d/mysql.conf'
+ ['df381f3a7ca9fb2b4b43ae7cb7a4c492']='python.d/mysql.conf'
+ ['061c45b0e34170d357e47883166ecf40']='python.d/nginx.conf'
+ ['21924a6ab8008d16ffac340f226ebad9']='python.d/nginx.conf'
+ ['c61948101e0e6846679682794ee48c5b']='python.d/nginx.conf'
+ ['3ca696189911fb38a0319ddd71e9a395']='python.d/phpfpm.conf'
+ ['8c1d41e2c88aeca78bc319ed74c8748c']='python.d/phpfpm.conf'
+ ['b8b87574fd496a66ede884c5336493bd']='python.d/phpfpm.conf'
+ ['c88fb430f35b7d8f08775d84debffbd2']='python.d/phpfpm.conf'
+ ['d7e0bd12d4a60a761dcab3531a841711']='python.d/phpfpm.conf'
+ ['142a5b693d34b0308bb0b8aec71fad79']='python.d/postfix.conf'
+ ['ca249db7a0637d55abb938d969f9b486']='python.d/postfix.conf'
+ ['39571e9fad9b759200c5d5b2ee13feb4']='python.d/redis.conf'
+ ['b915126262d08aa9da81de539a58a3fb']='python.d/redis.conf'
+ ['837480f77ba1a85677a36747fbc2cd2e']='python.d/sensors.conf'
+ ['cfecf298bdafaa7e0a3a263548e82132']='python.d/sensors.conf'
+ ['64070d856ab1b47a18ec871e49bbc13b']='python.d/squid.conf'
+ ['78bb08809dffcb62e9bc493840f9c039']='python.d/squid.conf'
+ ['78e0065738394f5bf15023f41d66ed4b']='python.d/squid.conf'
+ ['7d8bd884ec26cb35d16c4fc05f969799']='python.d/squid.conf'
+ ['91cf3b3d42cac969b8b3fd4f531ecfb3']='python.d/squid.conf'
+ ['ade389c1b6efe0cff47c33e662731f0a']='python.d/squid.conf'
+ ['e3e5bc57335c489f01b8559f5c70e112']='python.d/squid.conf'
+ ['0388b873d0d7e47c19005b7241db77d8']='python.d/tomcat.conf'
+ ['f7a99e94231beda85c6254912d8d31c1']='python.d/tomcat.conf'
+)
-#!/bin/bash
+#!/usr/bin/env bash
# reload the user profile
[ -f /etc/profile ] && . /etc/profile
USAGE
}
+md5sum="$(which md5sum 2>/dev/null || command -v md5sum 2>/dev/null)"
+get_git_config_signatures() {
+ local x s file md5
+
+ [ ! -d "conf.d" ] && echo >&2 "Wrong directory." && return 1
+ [ -z "${md5sum}" -o ! -x "${md5sum}" ] && echo >&2 "No md5sum command." && return 1
+
+ echo >configs.signatures.tmp
+
+ for x in $(find conf.d -name \*.conf)
+ do
+ x="${x/conf.d\//}"
+ echo "${x}"
+ for c in $(git log --follow "conf.d/${x}" | grep ^commit | cut -d ' ' -f 2)
+ do
+ git checkout ${c} "conf.d/${x}" || continue
+ s="$(cat "conf.d/${x}" | md5sum | cut -d ' ' -f 1)"
+ echo >>configs.signatures.tmp "${x}:${s}"
+ echo " ${s}"
+ done
+ git checkout HEAD "conf.d/${x}" || break
+ done
+
+ cat configs.signatures.tmp |\
+ grep -v "^$" |\
+ sort -u |\
+ {
+ echo "declare -A configs_signatures=("
+ IFS=":"
+ while read file md5
+ do
+ echo " ['${md5}']='${file}'"
+ done
+ echo ")"
+ } >configs.signatures
+
+ rm configs.signatures.tmp
+
+ return 0
+}
+
+
while [ ! -z "${1}" ]
do
if [ "$1" = "--install" ]
then
usage
exit 1
+ elif [ "$1" = "get_git_config_signatures" ]
+ then
+ get_git_config_signatures && exit 0
+ exit 1
else
echo >&2
echo >&2 "ERROR:"
echo >&2 "Compiling netdata ..."
run make || exit 1
+declare -A configs_signatures=()
+if [ -f "configs.signatures" ]
+ then
+ source "configs.signatures" || echo >&2 "ERROR: Failed to load configs.signatures !"
+fi
+
+# migrate existing configuration files
+# for node.d and charts.d
+if [ -d "${NETDATA_PREFIX}/etc/netdata" ]
+ then
+ # the configuration directory exists
+
+ if [ ! -d "${NETDATA_PREFIX}/etc/netdata/charts.d" ]
+ then
+ run mkdir "${NETDATA_PREFIX}/etc/netdata/charts.d"
+ fi
+
+ # move the charts.d config files
+ for x in apache ap cpu_apps cpufreq example exim hddtemp load_average mem_apps mysql nginx nut opensips phpfpm postfix sensors squid tomcat
+ do
+ for y in "" ".old" ".orig"
+ do
+ if [ -f "${NETDATA_PREFIX}/etc/netdata/${x}.conf${y}" ]
+ then
+ run mv -f "${NETDATA_PREFIX}/etc/netdata/${x}.conf${y}" "${NETDATA_PREFIX}/etc/netdata/charts.d/${x}.conf${y}"
+ fi
+ done
+ done
+
+ if [ ! -d "${NETDATA_PREFIX}/etc/netdata/node.d" ]
+ then
+ run mkdir "${NETDATA_PREFIX}/etc/netdata/node.d"
+ fi
+
+ # move the node.d config files
+ for x in named sma_webbox snmp
+ do
+ for y in "" ".old" ".orig"
+ do
+ if [ -f "${NETDATA_PREFIX}/etc/netdata/${x}.conf${y}" ]
+ then
+ run mv -f "${NETDATA_PREFIX}/etc/netdata/${x}.conf${y}" "${NETDATA_PREFIX}/etc/netdata/node.d/${x}.conf${y}"
+ fi
+ done
+ done
+fi
+
# backup user configurations
installer_backup_suffix="${PID}.${RANDOM}"
for x in $(find "${NETDATA_PREFIX}/etc/netdata/" -name '*.conf' -type f)
do
if [ -f "${x}" ]
then
- cp -p "${x}" "${x}.installer_backup.${installer_backup_suffix}"
+ # make a backup of the configuration file
+ cp -p "${x}" "${x}.old"
+
+ if [ -z "${md5sum}" -o ! -x "${md5sum}" ]
+ then
+ # we don't have md5sum - keep it
+ cp -p "${x}" "${x}.installer_backup.${installer_backup_suffix}"
+ else
+ # find it relative filename
+ f="${x/*\/etc\/netdata\//}"
+
+ # find its checksum
+ md5="$(cat "${x}" | ${md5sum} | cut -d ' ' -f 1)"
+
+ # copy the original
+ if [ -f "conf.d/${f}" ]
+ then
+ cp "conf.d/${f}" "${x}.orig"
+ fi
+
+ if [ "${configs_signatures[${md5}]}" = "${f}" ]
+ then
+ # it is a stock version - don't keep it
+ echo >&2 "File '${x}' is stock version."
+ else
+ # edited by user - keep it
+ echo >&2 "File '${x}' has been edited by user."
+ cp -p "${x}" "${x}.installer_backup.${installer_backup_suffix}"
+ fi
+ fi
elif [ -f "${x}.installer_backup.${installer_backup_suffix}" ]
then
echo >&2
echo >&2 "Fixing directories (user: ${NETDATA_USER})..."
-for x in "${NETDATA_WEB_DIR}" "${NETDATA_CONF_DIR}" "${NETDATA_CACHE_DIR}" "${NETDATA_LOG_DIR}" "${NETDATA_LIB_DIR}"
+for x in "${NETDATA_WEB_DIR}" "${NETDATA_CONF_DIR}" "${NETDATA_CACHE_DIR}" "${NETDATA_LOG_DIR}" "${NETDATA_LIB_DIR}" "${NETDATA_CONF_DIR}/python.d" "${NETDATA_CONF_DIR}/charts.d" "${NETDATA_CONF_DIR}/node.d"
do
if [ ! -d "${x}" ]
then
-#!/bin/bash
+#!/usr/bin/env bash
export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
export LC_ALL=C
-#!/bin/sh
+#!/usr/bin/env bash
# will stop the script for any error
set -e
-#!/bin/bash
+#!/usr/bin/env bash
PROGRAM_FILE="$0"
PROGRAM_NAME="$(basename $0)"
ls *.chart.sh | sed "s/\.chart\.sh$//g"
}
+declare -A charts_enable_keyword=(
+ ['apache']="force"
+ ['cpu_apps']="force"
+ ['cpufreq']="force"
+ ['example']="force"
+ ['exim']="force"
+ ['hddtemp']="force"
+ ['load_average']="force"
+ ['mem_apps']="force"
+ ['mysql']="force"
+ ['nginx']="force"
+ ['phpfpm']="force"
+ ['postfix']="force"
+ ['sensors']="force"
+ ['squid']="force"
+ ['tomcat']="force"
+ )
+
all_enabled_charts() {
- local charts= enabled=
+ local charts= enabled= required=
# find all enabled charts
enabled="${enable_all_charts}"
fi
- if [ ! "${enabled}" = "yes" ]
+ required="${charts_enable_keyword[${chart}]}"
+ [ -z "${required}" ] && required="yes"
+
+ if [ ! "${enabled}" = "${required}" ]
then
- echo >&2 "$PROGRAM_NAME: '$chart' is NOT enabled. Add a line with $chart=yes in $myconfig to enable it (or remove the line that disables it)."
+ echo >&2 "$PROGRAM_NAME: '$chart' is NOT enabled. Add a line with $chart=$required in $myconfig to enable it (or remove the line that disables it)."
else
[ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: '$chart' is enabled."
local charts="$charts $chart"
[ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: loading chart: '$chartsd/$chart.chart.sh'"
. "$chartsd/$chart.chart.sh"
- if [ -f "$confd/$chart.conf" ]
+ if [ -f "$confd/charts.d/$chart.conf" ]
+ then
+ [ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: loading chart options: '$confd/charts.d/$chart.conf'"
+ . "$confd/charts.d/$chart.conf"
+ elif [ -f "$confd/$chart.conf" ]
then
[ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: loading chart options: '$confd/$chart.conf'"
. "$confd/$chart.conf"
else
- echo >&2 "$PROGRAM_NAME: $chart: configuration file '$confd/$chart.conf' not found. Using defaults."
+ echo >&2 "$PROGRAM_NAME: $chart: configuration file '$confd/charts.d/$chart.conf' not found. Using defaults."
fi
eval "dt=\$$chart$suffix_update_every"
-#!/bin/bash
+# no need for shebang - this file is included from other scripts
# this function is used to sleep a fraction of a second
# it calculates the difference between every time is called
-#!/bin/sh
+#!/usr/bin/env bash
':' //; exec "$(command -v nodejs || command -v node || command -v js || echo "ERROR node.js IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@"
// shebang hack from:
function pluginConfig(filename) {
var f = path.basename(filename);
+ // node.d.plugin configuration
var m = f.match('.plugin' + '$');
- if(m === null) m = f.match('.node.js' + '$');
if(m !== null)
return netdata.options.paths.config + '/' + f.substring(0, m.index) + '.conf';
- return netdata.options.paths.config + '/' + f + '.conf';
+ // node.d modules configuration
+ m = f.match('.node.js' + '$');
+ if(m !== null)
+ return netdata.options.paths.config + '/node.d/' + f.substring(0, m.index) + '.conf';
+
+ return netdata.options.paths.config + '/node.d/' + f + '.conf';
}
// internal defaults
-#!/bin/bash
+#!/usr/bin/env bash
export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
-#!/bin/bash
+#!/usr/bin/env bash
umask 022
title: 'nginx',
info: undefined,
},
-/*
+
'apache': {
title: 'Apache',
info: undefined,
- },*/
+ },
'named': {
title: 'named',
}
};
+//
+// chartData works on the context of a chart
+// Its purpose is to set:
+//
+// info: the text above the charts
+// heads: the representation of the chart at the top the subsection (second level menu)
+// mainheads: the representation of the chart at the top of the section (first level menu)
+// colors: the dimension colors of the chart (the default colors are appended)
+// height: the ratio of the chart height relative to the default
+//
var chartData = {
- 'mysql.net': {
- info: 'The amount of data sent to mysql clients (<strong>out</strong>) and received from mysql clients (<strong>in</strong>).'
- },
-
- 'mysql.queries': {
- info: 'The number of statements executed by the server.<ul>' +
- '<li><strong>queries</strong> counts the statements executed within stored SQL programs.</li>' +
- '<li><strong>questions</strong> counts the statements sent to the mysql server by mysql clients.</li>' +
- '<li><strong>slow queries</strong> counts the number of statements that took more than <a href="http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_long_query_time" target="_blank">long_query_time</a> seconds to be executed.' +
- ' For more information about slow queries check the mysql <a href="http://dev.mysql.com/doc/refman/5.7/en/slow-query-log.html" target="_blank">slow query log</a>.</li>' +
- '</ul>'
- },
-
- 'mysql.handlers': {
- info: 'Usage of the internal handlers of mysql. This chart provides very good insights of what the mysql server is actually doing.' +
- ' (if the chart is not showing all these dimensions it is because they are zero - set <strong>Which dimensions to show?</strong> to <strong>All</strong> from the dashboard settings, to render even the zero values)<ul>' +
- '<li><strong>commit</strong>, the number of internal <a href="http://dev.mysql.com/doc/refman/5.7/en/commit.html" target="_blank">COMMIT</a> statements.</li>' +
- '<li><strong>delete</strong>, the number of times that rows have been deleted from tables.</li>' +
- '<li><strong>prepare</strong>, a counter for the prepare phase of two-phase commit operations.</li>' +
- '<li><strong>read first</strong>, the number of times the first entry in an index was read. A high value suggests that the server is doing a lot of full index scans; e.g. <strong>SELECT col1 FROM foo</strong>, with col1 indexed.</li>' +
- '<li><strong>read key</strong>, the number of requests to read a row based on a key. If this value is high, it is a good indication that your tables are properly indexed for your queries.</li>' +
- '<li><strong>read next</strong>, the number of requests to read the next row in key order. This value is incremented if you are querying an index column with a range constraint or if you are doing an index scan.</li>' +
- '<li><strong>read prev</strong>, the number of requests to read the previous row in key order. This read method is mainly used to optimize <strong>ORDER BY ... DESC</strong>.</li>' +
- '<li><strong>read rnd</strong>, the number of requests to read a row based on a fixed position. A high value indicates you are doing a lot of queries that require sorting of the result. You probably have a lot of queries that require MySQL to scan entire tables or you have joins that do not use keys properly.</li>' +
- '<li><strong>read rnd next</strong>, the number of requests to read the next row in the data file. This value is high if you are doing a lot of table scans. Generally this suggests that your tables are not properly indexed or that your queries are not written to take advantage of the indexes you have.</li>' +
- '<li><strong>rollback</strong>, the number of requests for a storage engine to perform a rollback operation.</li>' +
- '<li><strong>savepoint</strong>, the number of requests for a storage engine to place a savepoint.</li>' +
- '<li><strong>savepoint rollback</strong>, the number of requests for a storage engine to roll back to a savepoint.</li>' +
- '<li><strong>update</strong>, the number of requests to update a row in a table.</li>' +
- '<li><strong>write</strong>, the number of requests to insert a row in a table.</li>' +
- '</ul>'
- },
-
- 'mysql.table_locks': {
- info: 'MySQL table locks counters: <ul>' +
- '<li><strong>immediate</strong>, the number of times that a request for a table lock could be granted immediately.</li>' +
- '<li><strong>waited</strong>, the number of times that a request for a table lock could not be granted immediately and a wait was needed. If this is high and you have performance problems, you should first optimize your queries, and then either split your table or tables or use replication.</li>' +
- '</ul>'
- },
-
'system.cpu': {
info: 'Total CPU utilization (all cores). 100% here means there is no CPU idle time at all. You can get per core usage at the <a href="#cpu">CPUs</a> section and per application usage at the <a href="#apps">Applications Monitoring</a> section.<br/>Keep an eye on <b>iowait</b> ' + sparkline('system.cpu', 'iowait', '%') + '. If it is constantly high, your disks are a bottleneck and they slow your system down.<br/>Another important metric worth monitoring, is <b>softirq</b> ' + sparkline('system.cpu', 'softirq', '%') + '. A constantly high percentage of softirq may indicate network drivers issues.'
},
info: 'System swap memory, read from <code>/proc/meminfo</code>.'
},
+ // ------------------------------------------------------------------------
+ // MEMORY
+
'mem.ksm_savings': {
heads: [
gaugeChart('Saved', '12%', 'savings', '#0099CC')
colors: NETDATA.colors[3]
},
+ // ------------------------------------------------------------------------
+ // APPS
+
'apps.cpu': {
height: 2.0
},
height: 2.0
},
+ // ------------------------------------------------------------------------
+ // USERS
+
'users.cpu': {
height: 2.0
},
height: 2.0
},
+ // ------------------------------------------------------------------------
+ // GROUPS
+
'groups.cpu': {
height: 2.0
},
height: 2.0
},
+ // ------------------------------------------------------------------------
+ // NETWORK QoS
+
'tc.qos': {
heads: [
function(id) {
]
},
+ // ------------------------------------------------------------------------
+ // NETWORK INTERFACES
+
'net.net': {
heads: [
gaugeChart('Received', '12%', 'received'),
]
},
+ // ------------------------------------------------------------------------
+ // NETFILTER
+
+ 'netfilter.sockets': {
+ colors: '#88AA00',
+ heads: [
+ gaugeChart('Active Connections', '12%', '', '#88AA00')
+ ]
+ },
+
+ 'netfilter.new': {
+ heads: [
+ gaugeChart('New Connections', '12%', 'new', '#5555AA')
+ ]
+ },
+
+ // ------------------------------------------------------------------------
+ // DISKS
+
'disk.util': {
colors: '#FF5588',
heads: [
info: 'I/O operations currently in progress. This metric is a snapshot - it is not an average over the last interval.'
},
- 'netfilter.sockets': {
- colors: '#88AA00',
- heads: [
- gaugeChart('Active Connections', '12%', '', '#88AA00')
- ]
- },
-
- 'netfilter.new': {
- heads: [
- gaugeChart('New Connections', '12%', 'new', '#5555AA')
- ]
- },
-
'disk.iotime': {
height: 0.5,
info: 'The sum of the duration of all completed I/O operations. This number can exceed the interval if the disk is able to execute I/O operations in parallel.'
'disk.inodes': {
info: 'inodes (or index nodes) are filesystem objects (e.g. files and directories). On many types of file system implementations, the maximum number of inodes is fixed at filesystem creation, limiting the maximum number of files the filesystem can hold. It is possible for a device to run out of inodes. When this happens, new files cannot be created on the device, even though there may be free space available.'
},
+
+ 'mysql.net': {
+ info: 'The amount of data sent to mysql clients (<strong>out</strong>) and received from mysql clients (<strong>in</strong>).'
+ },
+
+ // ------------------------------------------------------------------------
+ // MYSQL
+
+ 'mysql.queries': {
+ info: 'The number of statements executed by the server.<ul>' +
+ '<li><strong>queries</strong> counts the statements executed within stored SQL programs.</li>' +
+ '<li><strong>questions</strong> counts the statements sent to the mysql server by mysql clients.</li>' +
+ '<li><strong>slow queries</strong> counts the number of statements that took more than <a href="http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_long_query_time" target="_blank">long_query_time</a> seconds to be executed.' +
+ ' For more information about slow queries check the mysql <a href="http://dev.mysql.com/doc/refman/5.7/en/slow-query-log.html" target="_blank">slow query log</a>.</li>' +
+ '</ul>'
+ },
+
+ 'mysql.handlers': {
+ info: 'Usage of the internal handlers of mysql. This chart provides very good insights of what the mysql server is actually doing.' +
+ ' (if the chart is not showing all these dimensions it is because they are zero - set <strong>Which dimensions to show?</strong> to <strong>All</strong> from the dashboard settings, to render even the zero values)<ul>' +
+ '<li><strong>commit</strong>, the number of internal <a href="http://dev.mysql.com/doc/refman/5.7/en/commit.html" target="_blank">COMMIT</a> statements.</li>' +
+ '<li><strong>delete</strong>, the number of times that rows have been deleted from tables.</li>' +
+ '<li><strong>prepare</strong>, a counter for the prepare phase of two-phase commit operations.</li>' +
+ '<li><strong>read first</strong>, the number of times the first entry in an index was read. A high value suggests that the server is doing a lot of full index scans; e.g. <strong>SELECT col1 FROM foo</strong>, with col1 indexed.</li>' +
+ '<li><strong>read key</strong>, the number of requests to read a row based on a key. If this value is high, it is a good indication that your tables are properly indexed for your queries.</li>' +
+ '<li><strong>read next</strong>, the number of requests to read the next row in key order. This value is incremented if you are querying an index column with a range constraint or if you are doing an index scan.</li>' +
+ '<li><strong>read prev</strong>, the number of requests to read the previous row in key order. This read method is mainly used to optimize <strong>ORDER BY ... DESC</strong>.</li>' +
+ '<li><strong>read rnd</strong>, the number of requests to read a row based on a fixed position. A high value indicates you are doing a lot of queries that require sorting of the result. You probably have a lot of queries that require MySQL to scan entire tables or you have joins that do not use keys properly.</li>' +
+ '<li><strong>read rnd next</strong>, the number of requests to read the next row in the data file. This value is high if you are doing a lot of table scans. Generally this suggests that your tables are not properly indexed or that your queries are not written to take advantage of the indexes you have.</li>' +
+ '<li><strong>rollback</strong>, the number of requests for a storage engine to perform a rollback operation.</li>' +
+ '<li><strong>savepoint</strong>, the number of requests for a storage engine to place a savepoint.</li>' +
+ '<li><strong>savepoint rollback</strong>, the number of requests for a storage engine to roll back to a savepoint.</li>' +
+ '<li><strong>update</strong>, the number of requests to update a row in a table.</li>' +
+ '<li><strong>write</strong>, the number of requests to insert a row in a table.</li>' +
+ '</ul>'
+ },
+
+ 'mysql.table_locks': {
+ info: 'MySQL table locks counters: <ul>' +
+ '<li><strong>immediate</strong>, the number of times that a request for a table lock could be granted immediately.</li>' +
+ '<li><strong>waited</strong>, the number of times that a request for a table lock could not be granted immediately and a wait was needed. If this is high and you have performance problems, you should first optimize your queries, and then either split your table or tables or use replication.</li>' +
+ '</ul>'
+ },
+
+ // ------------------------------------------------------------------------
+ // APACHE
+
'apache.connections': {
colors: NETDATA.colors[4],
mainheads: [
'apache.requests': {
colors: NETDATA.colors[0],
mainheads: [
- gaugeChart('Connections', '12%', '', NETDATA.colors[0])
+ gaugeChart('Requests', '12%', '', NETDATA.colors[0])
]
},
height: 0.5
},
- 'nginx_local.connections': {
+
+ // ------------------------------------------------------------------------
+ // NGINX
+
+ 'nginx.connections': {
colors: NETDATA.colors[4],
mainheads: [
gaugeChart('Connections', '12%', '', NETDATA.colors[4])
]
},
- 'nginx_local.requests': {
+ 'nginx.requests': {
colors: NETDATA.colors[0],
mainheads: [
gaugeChart('Requests', '12%', '', NETDATA.colors[0])
chart.menu = tmp;
break;
+ case 'apache':
+ case 'cgroup':
+ case 'exim':
case 'mysql':
- case 'redis':
- case 'phpfpm':
- case 'nginx':
-/* case 'apache':*/
case 'named':
- case 'cgroup':
+ case 'nginx':
+ case 'phpfpm':
+ case 'postfix':
+ case 'redis':
+ case 'squid':
+ case 'snmp':
+ case 'tomcat':
chart.menu = chart.type;
chart.menu_pattern = tmp;
break;
+ ' data-dimensions="in"'
+ ' data-chart-library="easypiechart"'
+ ' data-title="Disk Read"'
- + ' data-units="KB / s"'
+ ' data-width="10%"'
+ ' data-before="0"'
+ ' data-after="-' + duration.toString() + '"'
+ ' data-dimensions="out"'
+ ' data-chart-library="easypiechart"'
+ ' data-title="Disk Write"'
- + ' data-units="KB / s"'
+ ' data-width="10%"'
+ ' data-before="0"'
+ ' data-after="-' + duration.toString() + '"'
+ ' data-dimensions="received"'
+ ' data-chart-library="easypiechart"'
+ ' data-title="IPv4 Inbound"'
- + ' data-units="kbps"'
+ ' data-width="10%"'
+ ' data-before="0"'
+ ' data-after="-' + duration.toString() + '"'
+ ' data-dimensions="sent"'
+ ' data-chart-library="easypiechart"'
+ ' data-title="IPv4 Outbound"'
- + ' data-units="kbps"'
+ ' data-width="10%"'
+ ' data-before="0"'
+ ' data-after="-' + duration.toString() + '"'