netdata
apps.plugin
+freeipmi.plugin
netdata.spec
*.tar.*
src/web_client.h
src/web_server.c
src/web_server.h
- )
+ src/locks.h)
set(APPS_PLUGIN_SOURCE_FILES
src/appconfig.c
.eslintignore \
.eslintrc \
.travis \
- m4/ax_check_enable_debug.m4 \
- m4/ax_c_statement_expressions.m4 \
+ m4/jemalloc.m4 \
+ m4/ax_c___atomic.m4 \
+ m4/ax_check_enable_debug.m4 \
+ m4/ax_c_mallinfo.m4 \
+ m4/ax_gcc_func_attribute.m4 \
+ m4/ax_check_compile_flag.m4 \
+ m4/ax_c_statement_expressions.m4 \
+ m4/ax_pthread.m4 \
+ m4/ax_c_lto.m4 \
+ m4/ax_c_mallopt.m4 \
+ m4/tcmalloc.m4 \
+ m4/ax_c__generic.m4 \
autogen.sh \
README.md \
LICENSE.md \
dist_noinst_DATA= \
diagrams/config.puml \
diagrams/registry.puml \
+ diagrams/netdata-for-ephemeral-nodes.xml \
+ diagrams/netdata-proxies-example.xml \
configs.signatures \
Dockerfile \
netdata.spec \
,
[enable_x86_sse="yes"]
)
+AC_ARG_ENABLE(
+ [lto],
+ [AS_HELP_STRING([--disable-lto], [Link Time Optimizations @<:@default enabled@:>@])],
+ ,
+ [enable_lto="detect"]
+)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# operating system detection
+AC_MSG_CHECKING([operating system])
case "$host_os" in
freebsd*)
build_target=freebsd
AM_CONDITIONAL([FREEBSD], [test "${build_target}" = "freebsd"])
AM_CONDITIONAL([MACOS], [test "${build_target}" = "macos"])
AM_CONDITIONAL([LINUX], [test "${build_target}" = "linux"])
-
-
-# -----------------------------------------------------------------------------
-# compiler options
-
-AC_ARG_VAR([SSE_CANDIDATE], [C compiler flags for SSE])
-AS_CASE([$host_cpu],
- [i?86], [SSE_CANDIDATE="yes"]
-)
-AC_SUBST([SSE_CANDIDATE])
-if test "${SSE_CANDIDATE}" = "yes" -a "${enable_x86_sse}" = "yes"; then
- opt="-msse2 -mfpmath=sse"
- AX_CHECK_COMPILE_FLAG($opt, [CFLAGS="$CFLAGS $opt"], [])
-fi
-
-if test "${GCC}" = "yes"; then
- AC_DEFINE_UNQUOTED([likely(x)], [__builtin_expect(!!(x), 1)], [gcc branch optimization])
- AC_DEFINE_UNQUOTED([unlikely(x)], [__builtin_expect(!!(x), 0)], [gcc branch optimization])
-else
- AC_DEFINE_UNQUOTED([likely(x)], [(x)], [gcc branch optimization])
- AC_DEFINE_UNQUOTED([unlikely(x)], [(x)], [gcc branch optimization])
-fi
-
-if test "${enable_pedantic}" = "yes"; then
- enable_strict="yes"
- CFLAGS="${CFLAGS} -pedantic -Wall -Wextra -Wno-long-long"
-fi
+AC_MSG_RESULT([${build_target}])
# -----------------------------------------------------------------------------
CC="${PTHREAD_CC}"
-# -----------------------------------------------------------------------------
-# memory allocation library
-
-AC_MSG_CHECKING([for memory allocator])
-TS_CHECK_JEMALLOC
-if test "$has_jemalloc" = "1"; then
- AC_DEFINE([ENABLE_JEMALLOC], [1], [compile and link with jemalloc])
- AC_MSG_RESULT([jemalloc])
-else
- TS_CHECK_TCMALLOC
- if test "$has_tcmalloc" = "1"; then
- AC_DEFINE([ENABLE_TCMALLOC], [1], [compile and link with tcmalloc])
- AC_MSG_RESULT([tcmalloc])
- else
- AC_MSG_RESULT([system])
- AC_C_MALLOPT
- AC_C_MALLINFO
- fi
-fi
-
-
# -----------------------------------------------------------------------------
# libm
OPTIONAL_UUID_LIBS="${UUID_LIBS}"
+# -----------------------------------------------------------------------------
+# compiler options
+
+AC_ARG_VAR([SSE_CANDIDATE], [C compiler flags for SSE])
+AS_CASE([$host_cpu],
+ [i?86], [SSE_CANDIDATE="yes"]
+)
+AC_SUBST([SSE_CANDIDATE])
+if test "${SSE_CANDIDATE}" = "yes" -a "${enable_x86_sse}" = "yes"; then
+ opt="-msse2 -mfpmath=sse"
+ AX_CHECK_COMPILE_FLAG(${opt}, [CFLAGS="${CFLAGS} ${opt}"], [])
+fi
+
+if test "${GCC}" = "yes"; then
+ AC_DEFINE_UNQUOTED([likely(x)], [__builtin_expect(!!(x), 1)], [gcc branch optimization])
+ AC_DEFINE_UNQUOTED([unlikely(x)], [__builtin_expect(!!(x), 0)], [gcc branch optimization])
+else
+ AC_DEFINE_UNQUOTED([likely(x)], [(x)], [gcc branch optimization])
+ AC_DEFINE_UNQUOTED([unlikely(x)], [(x)], [gcc branch optimization])
+fi
+
+if test "${enable_pedantic}" = "yes"; then
+ enable_strict="yes"
+ CFLAGS="${CFLAGS} -pedantic -Wall -Wextra -Wno-long-long"
+fi
+
+
+# -----------------------------------------------------------------------------
+# memory allocation library
+
+AC_MSG_CHECKING([for memory allocator])
+TS_CHECK_JEMALLOC
+if test "$has_jemalloc" = "1"; then
+ AC_DEFINE([ENABLE_JEMALLOC], [1], [compile and link with jemalloc])
+ AC_MSG_RESULT([jemalloc])
+else
+ TS_CHECK_TCMALLOC
+ if test "$has_tcmalloc" = "1"; then
+ AC_DEFINE([ENABLE_TCMALLOC], [1], [compile and link with tcmalloc])
+ AC_MSG_RESULT([tcmalloc])
+ else
+ AC_MSG_RESULT([system])
+ AC_C_MALLOPT
+ AC_C_MALLINFO
+ fi
+fi
+
+
# -----------------------------------------------------------------------------
# libcap
AM_CONDITIONAL([ENABLE_PLUGIN_NFACCT], [test "${enable_plugin_nfacct}" = "yes"])
+# -----------------------------------------------------------------------------
+# Link-Time-Optimization
+
+if test "${enable_lto}" != "no"; then
+ opt="-flto"
+ AX_CHECK_COMPILE_FLAG(${opt}, [have_lto=yes], [have_lto=no])
+fi
+if test "${have_lto}" = "yes"; then
+ oCFLAGS="${CFLAGS}"
+ CFLAGS="${CFLAGS} -flto ${OPTIONAL_MATH_CLFAGS} ${OPTIONAL_NFACCT_CLFAGS} ${OPTIONAL_ZLIB_CLFAGS} ${OPTIONAL_UUID_CLFAGS} ${OPTIONAL_LIBCAP_CFLAGS} ${OPTIONAL_IPMIMONITORING_CFLAGS}"
+ ac_cv_c_lto_cross_compile="${enable_lto}"
+ test "${ac_cv_c_lto_cross_compile}" != "yes" && ac_cv_c_lto_cross_compile="no"
+ AC_C_LTO
+ CFLAGS="${oCFLAGS}"
+ test "${ac_cv_c_lto}" != "yes" && have_lto="no"
+fi
+test "${enable_lto}" = "yes" -a "${have_lto}" != "yes" && \
+ AC_MSG_ERROR([LTO is required but is not available.])
+AC_MSG_CHECKING([if LTO should be enabled])
+if test "${enable_lto}" != "no" -a "${have_lto}" = "yes"; then
+ enable_lto="yes"
+ CFLAGS="${CFLAGS} -flto"
+else
+ enable_lto="no"
+fi
+AC_MSG_RESULT([${enable_lto}])
+
+
# -----------------------------------------------------------------------------
AC_DEFINE_UNQUOTED([NETDATA_USER], ["${with_user}"], [use this user to drop privileged])
--- /dev/null
+# AC_C_LTO
+# -------------
+# Define HAVE_LTO if -flto works.
+AN_IDENTIFIER([lto], [AC_C_LTO])
+AC_DEFUN([AC_C_LTO],
+[AC_CACHE_CHECK([if -flto builds executables], ac_cv_c_lto,
+[AC_RUN_IFELSE(
+ [AC_LANG_SOURCE(
+ [[#include <stdio.h>
+ int main(int argc, char **argv) {
+ return 0;
+ }
+ ]])],
+ [ac_cv_c_lto=yes],
+ [ac_cv_c_lto=no],
+ [ac_cv_c_lto=${ac_cv_c_lto_cross_compile}])])
+if test "${ac_cv_c_lto}" = "yes"; then
+ AC_DEFINE([HAVE_LTO], 1,
+ [Define to 1 if -flto works.])
+fi
+])# AC_C_LTO
Enable/disable the FreeIPMI plugin.
Default: enable it when libipmimonitoring is available.
+ --enable-lto
+ --disable-lto
+
+ Enable/disable Link-Time-Optimization
+ Default: enabled
+
--zlib-is-really-here
--libs-are-really-here
then
NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS} --disable-plugin-freeipmi"
shift 1
+ elif [ "$1" = "--enable-lto" ]
+ then
+ NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS} --enable-lto"
+ shift 1
+ elif [ "$1" = "--disable-lto" ]
+ then
+ NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS} --disable-lto"
+ shift 1
elif [ "$1" = "--help" -o "$1" = "-h" ]
then
usage
fi
fi
+config_signature_matches() {
+ local md5="${1}" file="${2}"
+
+ if [ "${BASH_VERSINFO[0]}" -ge "4" ]
+ then
+ [ "${configs_signatures[${md5}]}" = "${file}" ] && return 0
+ return 1
+ fi
+
+ if [ -f "configs.signatures" ]
+ then
+ grep "\['${md5}'\]='${file}'" "configs.signatures" >/dev/null
+ return $?
+ fi
+
+ return 1
+}
+
# backup user configurations
installer_backup_suffix="${PID}.${RANDOM}"
for x in $(find "${NETDATA_PREFIX}/etc/netdata/" -name '*.conf' -type f)
cp "conf.d/${f}" "${x}.orig"
fi
- if [ "${BASH_VERSINFO[0]}" -ge "4" ]
- then
- if [ "${configs_signatures[${md5}]}" = "${f}" ]
+ if config_signature_matches "${md5}" "${f}"
then
- # it is a stock version - don't keep it
- echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' is stock version."
- else
- # edited by user - keep it
- echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' ${TPUT_RED} has been edited by user${TPUT_RESET}. Keeping it."
- run cp -p "${x}" "${x}.installer_backup.${installer_backup_suffix}"
- fi
+ # it is a stock version - don't keep it
+ echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' is stock version."
else
- echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' ${TPUT_RET}cannot be checked for custom edits${TPUT_RESET}. Keeping it."
+ # edited by user - keep it
+ echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' ${TPUT_RED} has been edited by user${TPUT_RESET}. Keeping it."
run cp -p "${x}" "${x}.installer_backup.${installer_backup_suffix}"
fi
fi
if(__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + " = " + varbinds[i].value);
- if(varbinds[i].type === net_snmp.ObjectType.OctetString)
+ if(varbinds[i].type === net_snmp.ObjectType.OctetString && service.snmp_oids_index[varbinds[i].oid].type !== 'title')
value = parseFloat(varbinds[i].value) * 1000;
else
value = varbinds[i].value;
value_string="${19}" # friendly value (with units)
old_value_string="${20}" # friendly old value (with units)
+# -----------------------------------------------------------------------------
+# find a suitable hostname to use, if netdata did not supply a hostname
+
+[ -z "${host}" ] && host="${NETDATA_HOSTNAME}"
+[ -z "${host}" ] && host="${NETDATA_REGISTRY_HOSTNAME}"
+[ -z "${host}" ] && host="$(hostname 2>/dev/null)"
+
# -----------------------------------------------------------------------------
# screen statuses we don't need to send a notification
# don't do anything if this is not WARNING, CRITICAL or CLEAR
if [ "${status}" != "WARNING" -a "${status}" != "CRITICAL" -a "${status}" != "CLEAR" ]
then
- info "not sending notification for ${status} on '${chart}.${name}'"
+ info "not sending notification for ${status} of '${host}.${chart}.${name}'"
exit 1
fi
# don't do anything if this is CLEAR, but it was not WARNING or CRITICAL
if [ "${old_status}" != "WARNING" -a "${old_status}" != "CRITICAL" -a "${status}" = "CLEAR" ]
then
- info "not sending notification for ${status} on '${chart}.${name}' (last status was ${old_status})"
+ info "not sending notification for ${status} of '${host}.${chart}.${name}' (last status was ${old_status})"
exit 1
fi
-a "${SEND_PD}" != "YES" \
]
then
- fatal "All notification methods are disabled. Not sending notification to '${roles}' for '${name}' = '${value}' of chart '${chart}' for status '${status}'."
+ fatal "All notification methods are disabled. Not sending notification for host '${host}', chart '${chart}' to '${roles}' for '${name}' = '${value}' for status '${status}'."
fi
-# -----------------------------------------------------------------------------
-# find a suitable hostname to use, if netdata did not supply a hostname
-
-[ -z "${host}" ] && host="${NETDATA_HOSTNAME}"
-[ -z "${host}" ] && host="${NETDATA_REGISTRY_HOSTNAME}"
-[ -z "${host}" ] && host="$(hostname 2>/dev/null)"
-
# -----------------------------------------------------------------------------
# get the date the alarm happened
retval=$?
if [ ${retval} -eq 0 ]
then
- info "sent pagerduty.com notification using service key ${PD_SERVICE_KEY::-26}....: ${d}"
+ info "sent pagerduty.com notification for host ${host} ${chart}.${name} using service key ${PD_SERVICE_KEY::-26}....: ${d}"
sent=$((sent + 1))
else
- error "failed to send pagerduty.com notification using service key ${PD_SERVICE_KEY::-26}.... (error code ${retval}): ${d}"
+ error "failed to send pagerduty.com notification for ${host} ${chart}.${name} using service key ${PD_SERVICE_KEY::-26}.... (error code ${retval}): ${d}"
fi
done
then
if [[ "${CGROUP}" =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]
then
+ # docker containers
+
DOCKERID="$( echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|" )"
# echo "DOCKERID=${DOCKERID}"
info "docker container '${DOCKERID}' is named '${NAME}'"
fi
fi
+ elif [[ "${CGROUP}" =~ machine.slice_machine.*-qemu ]]
+ then
+ # libvirtd / qemu virtual machines
+
+ NAME="$(echo ${CGROUP} | sed 's/machine.slice_machine.*-qemu//; s/\/x2d//; s/\/x2d/\-/g; s/\.scope//g')"
fi
[ -z "${NAME}" ] && NAME="${CGROUP}"
Module monitor elasticsearch performance and health metrics
-**Requirements:**
- * python `requests` package.
-
-You need to install it manually. (python-requests or python3-requests depending on the version of python).
-
-
It produces:
1. **Search performance** charts:
---
-# nginx_log
-
-Module monitors nginx access log and produces only one chart:
-
-1. **nginx status codes** in requests/s
- * 2xx
- * 3xx
- * 4xx
- * 5xx
-
-### configuration
-
-Sample for two vhosts:
-
-```yaml
-site_A:
- path: '/var/log/nginx/access-A.log'
-
-site_B:
- name: 'local'
- path: '/var/log/nginx/access-B.log'
-```
-
-When no configuration file is found, module tries to parse `/var/log/nginx/access.log` file.
-
----
-
# nsd
Module uses the `nsd-control stats_noreset` command to provide `nsd` statistics.
---
+# postgres
+
+Module monitors one or more postgres servers.
+
+**Requirements:**
+
+ * `python-psycopg2` package. You have to install to manually.
+
+Following charts are drawn:
+
+1. **Database size** MB
+ * size
+
+2. **Current Backend Processes** processes
+ * active
+
+3. **Write-Ahead Logging Statistics** files/s
+ * total
+ * ready
+ * done
+
+4. **Checkpoints** writes/s
+ * scheduled
+ * requested
+
+5. **Current connections to db** count
+ * connections
+
+6. **Tuples returned from db** tuples/s
+ * sequential
+ * bitmap
+
+7. **Tuple reads from db** reads/s
+ * disk
+ * cache
+
+8. **Transactions on db** transactions/s
+ * commited
+ * rolled back
+
+9. **Tuples written to db** writes/s
+ * inserted
+ * updated
+ * deleted
+ * conflicts
+
+10. **Locks on db** count per type
+ * locks
+
+### configuration
+
+```yaml
+socket:
+ name : 'socket'
+ user : 'postgres'
+ database : 'postgres'
+
+tcp:
+ name : 'tcp'
+ user : 'postgres'
+ database : 'postgres'
+ host : 'localhost'
+ port : 5432
+```
+
+When no configuration file is found, module tries to connect to TCP/IP socket: `localhost:5432`.
+
+---
+
# redis
Get INFO data from redis instance.
---
+# smartd_log
+
+Module monitor `smartd` log files to collect HDD/SSD S.M.A.R.T attributes.
+
+It produces following charts (you can add additional attributes in the module configuration file):
+
+1. **Read Error Rate** attribute 1
+
+2. **Start/Stop Count** attribute 4
+
+3. **Reallocated Sectors Count** attribute 5
+
+4. **Seek Error Rate** attribute 7
+
+5. **Power-On Hours Count** attribute 9
+
+6. **Power Cycle Count** attribute 12
+
+7. **Load/Unload Cycles** attribute 193
+
+8. **Temperature** attribute 194
+
+9. **Current Pending Sectors** attribute 197
+
+10. **Off-Line Uncorrectable** attribute 198
+
+11. **Write Error Rate** attribute 200
+
+### configuration
+
+```yaml
+local:
+ log_path : '/var/log/smartd/'
+```
+
+If no configuration is given, module will attempt to read log files in /var/log/smartd/ directory.
+
+---
+
# tomcat
Present tomcat containers memory utilization.
No configuration is needed.
---
+
+# web_log
+
+Tails the apache/nginx/lighttpd/gunicorn log files to collect real-time web-server statistics.
+
+It produces following charts:
+
+1. **Response by type** requests/s
+ * success (1xx, 2xx, 304)
+ * error (5xx)
+ * redirect (3xx except 304)
+ * bad (4xx)
+ * other (all other responses)
+
+2. **Response by code family** requests/s
+ * 1xx (informational)
+ * 2xx (successful)
+ * 3xx (redirect)
+ * 4xx (bad)
+ * 5xx (internal server errors)
+ * other (non-standart responses)
+ * unmatched (the lines in the log file that are not matched)
+
+3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
+
+4. **Bandwidth** KB/s
+ * received (bandwidth of requests)
+ * send (bandwidth of responses)
+
+5. **Timings** ms (request processing time)
+ * min (bandwidth of requests)
+ * max (bandwidth of responses)
+ * average (bandwidth of responses)
+
+6. **Request per url** requests/s (configured by user)
+
+7. **Http Methods** requests/s (requests per http method)
+
+8. **Http Versions** requests/s (requests per http version)
+
+9. **IP protocols** requests/s (requests per ip protocol version)
+
+10. **Curent Poll Unique Client IPs** unique ips/s (unique client IPs per data collection iteration)
+
+11. **All Time Unique Client IPs** unique ips/s (unique client IPs since the last restart of netdata)
+
+
+### configuration
+
+```yaml
+nginx_log:
+ name : 'nginx_log'
+ path : '/var/log/nginx/access.log'
+
+apache_log:
+ name : 'apache_log'
+ path : '/var/log/apache/other_vhosts_access.log'
+ categories:
+ cacti : 'cacti.*'
+ observium : 'observium'
+```
+
+Module has preconfigured jobs for nginx, apache and gunicorn on various distros.
+
+---
return data
else:
self.alert("accurate method failed, falling back")
+ self.accurate_exists = False
for name, paths in self.assignment.items():
# Author: l2isbad
from base import UrlService
-from requests import get
-from socket import gethostbyname
+from socket import gethostbyname, gaierror
try:
from queue import Queue
except ImportError:
from Queue import Queue
from threading import Thread
from collections import namedtuple
+from json import loads
# default module values (can be overridden per job in `config`)
# update_every = 2
self.order = ORDER
self.definitions = CHARTS
self.host = self.configuration.get('host')
- self.port = self.configuration.get('port')
- self.user = self.configuration.get('user')
- self.password = self.configuration.get('pass')
+ self.port = self.configuration.get('port', 9200)
+ self.scheme = self.configuration.get('scheme', 'http')
self.latency = dict()
self.methods = list()
- self.auth = self.user and self.password
def check(self):
# We can't start if <host> AND <port> not specified
- if not all([self.host, self.port]):
+ if not all([self.host, self.port, isinstance(self.host, str), isinstance(self.port, (str, int))]):
+ self.error('Host is not defined in the module configuration file')
return False
# It as a bad idea to use hostname.
# Hostname -> ip address
try:
self.host = gethostbyname(self.host)
- except Exception as error:
+ except gaierror as error:
self.error(str(error))
return False
+ scheme = 'http' if self.scheme else 'https'
+ # Add handlers (auth, self signed cert accept)
+ self.url = '%s://%s:%s' % (scheme, self.host, self.port)
+ self._UrlService__add_openers()
# Create URL for every Elasticsearch API
- url_node_stats = 'http://%s:%s/_nodes/_local/stats' % (self.host, self.port)
- url_cluster_health = 'http://%s:%s/_cluster/health' % (self.host, self.port)
- url_cluster_stats = 'http://%s:%s/_cluster/stats' % (self.host, self.port)
+ url_node_stats = '%s://%s:%s/_nodes/_local/stats' % (scheme, self.host, self.port)
+ url_cluster_health = '%s://%s:%s/_cluster/health' % (scheme, self.host, self.port)
+ url_cluster_stats = '%s://%s:%s/_cluster/stats' % (scheme, self.host, self.port)
# Create list of enabled API calls
user_choice = [bool(self.configuration.get('node_stats', True)),
METHODS(get_data_function=self._get_cluster_stats_, url=url_cluster_stats)]
# Remove disabled API calls from 'avail methods'
- self.methods = [avail_methods[_] for _ in range(len(avail_methods)) if user_choice[_]]
+ self.methods = [avail_methods[e[0]] for e in enumerate(avail_methods) if user_choice[e[0]]]
# Run _get_data for ALL active API calls.
api_check_result = dict()
+ data_from_check = dict()
for method in self.methods:
try:
- api_check_result[method.url] = (bool(method.get_data_function(None, method.url)))
+ api_check_result[method.url] = method.get_data_function(None, method.url)
+ data_from_check.update(api_check_result[method.url] or dict())
except KeyError as error:
self.error('Failed to parse %s. Error: %s' % (method.url, str(error)))
return False
# We can start ONLY if all active API calls returned NOT None
if not all(api_check_result.values()):
self.error('Plugin could not get data from all APIs')
- self.error('%s' % api_check_result)
return False
else:
- self.info('%s' % api_check_result)
- self.info('Plugin was started successfully')
-
+ self._data_from_check = data_from_check
return True
- def _get_raw_data(self, url):
- try:
- if not self.auth:
- raw_data = get(url)
- else:
- raw_data = get(url, auth=(self.user, self.password))
- except Exception:
- return None
-
- return raw_data
-
def _get_data(self):
threads = list()
queue = Queue()
:return: dict
"""
- data = self._get_raw_data(url)
+ raw_data = self._get_raw_data(url)
- if not data:
+ if not raw_data:
return queue.put(dict()) if queue else None
else:
- data = data.json() if hasattr(data.json, '__call__') else data.json
+ data = loads(raw_data)
to_netdata = fetch_data_(raw_data=data, metrics_list=HEALTH_STATS)
:return: dict
"""
- data = self._get_raw_data(url)
+ raw_data = self._get_raw_data(url)
- if not data:
+ if not raw_data:
return queue.put(dict()) if queue else None
else:
- data = data.json() if hasattr(data.json, '__call__') else data.json
+ data = loads(raw_data)
to_netdata = fetch_data_(raw_data=data, metrics_list=CLUSTER_STATS)
:return: dict
"""
- data = self._get_raw_data(url)
+ raw_data = self._get_raw_data(url)
- if not data:
+ if not raw_data:
return queue.put(dict()) if queue else None
else:
- data = data.json() if hasattr(data.json, '__call__') else data.json
+ data = loads(raw_data)
node = list(data['nodes'].keys())[0]
to_netdata = fetch_data_(raw_data=data['nodes'][node], metrics_list=NODE_STATS)
# Description: MySQL netdata python.d module
# Author: Pawel Krupa (paulfantom)
-from base import SimpleService
-import msg
-
-# import 3rd party library to handle MySQL communication
-try:
- import MySQLdb
-
- # https://github.com/PyMySQL/mysqlclient-python
- msg.info("using MySQLdb")
-except ImportError:
- try:
- import pymysql as MySQLdb
-
- # https://github.com/PyMySQL/PyMySQL
- msg.info("using pymysql")
- except ImportError:
- msg.error("MySQLdb or PyMySQL module is needed to use mysql.chart.py plugin")
- raise ImportError
+from base import MySQLService
# default module values (can be overridden per job in `config`)
# update_every = 3
priority = 90000
retries = 60
-# default configuration (overridden by python.d.plugin)
-# config = {
-# 'local': {
-# 'user': 'root',
-# 'pass': '',
-# 'socket': '/var/run/mysqld/mysqld.sock',
-# 'update_every': update_every,
-# 'retries': retries,
-# 'priority': priority
-# }
-#}
-
# query executed on MySQL server
-QUERY = "SHOW GLOBAL STATUS;"
-QUERY_SLAVE = "SHOW SLAVE STATUS;"
+QUERY_GLOBAL = 'SHOW GLOBAL STATUS;'
+QUERY_SLAVE = 'SHOW SLAVE STATUS;'
+
+GLOBAL_STATS = [
+ 'Bytes_received',
+ 'Bytes_sent',
+ 'Queries',
+ 'Questions',
+ 'Slow_queries',
+ 'Handler_commit',
+ 'Handler_delete',
+ 'Handler_prepare',
+ 'Handler_read_first',
+ 'Handler_read_key',
+ 'Handler_read_next',
+ 'Handler_read_prev',
+ 'Handler_read_rnd',
+ 'Handler_read_rnd_next',
+ 'Handler_rollback',
+ 'Handler_savepoint',
+ 'Handler_savepoint_rollback',
+ 'Handler_update',
+ 'Handler_write',
+ 'Table_locks_immediate',
+ 'Table_locks_waited',
+ 'Select_full_join',
+ 'Select_full_range_join',
+ 'Select_range',
+ 'Select_range_check',
+ 'Select_scan',
+ 'Sort_merge_passes',
+ 'Sort_range',
+ 'Sort_scan',
+ 'Created_tmp_disk_tables',
+ 'Created_tmp_files',
+ 'Created_tmp_tables',
+ 'Connections',
+ 'Aborted_connects',
+ 'Binlog_cache_disk_use',
+ 'Binlog_cache_use',
+ 'Threads_connected',
+ 'Threads_created',
+ 'Threads_cached',
+ 'Threads_running',
+ 'Thread_cache_misses',
+ 'Innodb_data_read',
+ 'Innodb_data_written',
+ 'Innodb_data_reads',
+ 'Innodb_data_writes',
+ 'Innodb_data_fsyncs',
+ 'Innodb_data_pending_reads',
+ 'Innodb_data_pending_writes',
+ 'Innodb_data_pending_fsyncs',
+ 'Innodb_log_waits',
+ 'Innodb_log_write_requests',
+ 'Innodb_log_writes',
+ 'Innodb_os_log_fsyncs',
+ 'Innodb_os_log_pending_fsyncs',
+ 'Innodb_os_log_pending_writes',
+ 'Innodb_os_log_written',
+ 'Innodb_row_lock_current_waits',
+ 'Innodb_rows_inserted',
+ 'Innodb_rows_read',
+ 'Innodb_rows_updated',
+ 'Innodb_rows_deleted',
+ 'Innodb_buffer_pool_pages_data',
+ 'Innodb_buffer_pool_pages_dirty',
+ 'Innodb_buffer_pool_pages_free',
+ 'Innodb_buffer_pool_pages_flushed',
+ 'Innodb_buffer_pool_pages_misc',
+ 'Innodb_buffer_pool_pages_total',
+ 'Innodb_buffer_pool_bytes_data',
+ 'Innodb_buffer_pool_bytes_dirty',
+ 'Innodb_buffer_pool_read_ahead',
+ 'Innodb_buffer_pool_read_ahead_evicted',
+ 'Innodb_buffer_pool_read_ahead_rnd',
+ 'Innodb_buffer_pool_read_requests',
+ 'Innodb_buffer_pool_write_requests',
+ 'Innodb_buffer_pool_reads',
+ 'Innodb_buffer_pool_wait_free',
+ 'Qcache_hits',
+ 'Qcache_lowmem_prunes',
+ 'Qcache_inserts',
+ 'Qcache_not_cached',
+ 'Qcache_queries_in_cache',
+ 'Qcache_free_memory',
+ 'Qcache_free_blocks',
+ 'Qcache_total_blocks',
+ 'Key_blocks_unused',
+ 'Key_blocks_used',
+ 'Key_blocks_not_flushed',
+ 'Key_read_requests',
+ 'Key_write_requests',
+ 'Key_reads',
+ 'Key_writes',
+ 'Open_files',
+ 'Opened_files',
+ 'Binlog_stmt_cache_disk_use',
+ 'Binlog_stmt_cache_use',
+ 'Connection_errors_accept',
+ 'Connection_errors_internal',
+ 'Connection_errors_max_connections',
+ 'Connection_errors_peer_address',
+ 'Connection_errors_select',
+ 'Connection_errors_tcpwrap']
+
+def slave_seconds(value):
+ return value if value else -1
+
+def slave_running(value):
+ return 1 if value == 'Yes' else -1
+
+
+SLAVE_STATS = [
+ ('Seconds_Behind_Master', slave_seconds),
+ ('Slave_SQL_Running', slave_running),
+ ('Slave_IO_Running', slave_running)
+]
ORDER = ['net',
'queries',
'net': {
'options': [None, 'mysql Bandwidth', 'kilobits/s', 'bandwidth', 'mysql.net', 'area'],
'lines': [
- ["Bytes_received", "in", "incremental", 8, 1024],
- ["Bytes_sent", "out", "incremental", -8, 1024]
+ ['Bytes_received', 'in', 'incremental', 8, 1024],
+ ['Bytes_sent', 'out', 'incremental', -8, 1024]
]},
'queries': {
'options': [None, 'mysql Queries', 'queries/s', 'queries', 'mysql.queries', 'line'],
'lines': [
- ["Queries", "queries", "incremental"],
- ["Questions", "questions", "incremental"],
- ["Slow_queries", "slow_queries", "incremental"]
+ ['Queries', 'queries', 'incremental'],
+ ['Questions', 'questions', 'incremental'],
+ ['Slow_queries', 'slow_queries', 'incremental']
]},
'handlers': {
'options': [None, 'mysql Handlers', 'handlers/s', 'handlers', 'mysql.handlers', 'line'],
'lines': [
- ["Handler_commit", "commit", "incremental"],
- ["Handler_delete", "delete", "incremental"],
- ["Handler_prepare", "prepare", "incremental"],
- ["Handler_read_first", "read_first", "incremental"],
- ["Handler_read_key", "read_key", "incremental"],
- ["Handler_read_next", "read_next", "incremental"],
- ["Handler_read_prev", "read_prev", "incremental"],
- ["Handler_read_rnd", "read_rnd", "incremental"],
- ["Handler_read_rnd_next", "read_rnd_next", "incremental"],
- ["Handler_rollback", "rollback", "incremental"],
- ["Handler_savepoint", "savepoint", "incremental"],
- ["Handler_savepoint_rollback", "savepoint_rollback", "incremental"],
- ["Handler_update", "update", "incremental"],
- ["Handler_write", "write", "incremental"]
+ ['Handler_commit', 'commit', 'incremental'],
+ ['Handler_delete', 'delete', 'incremental'],
+ ['Handler_prepare', 'prepare', 'incremental'],
+ ['Handler_read_first', 'read_first', 'incremental'],
+ ['Handler_read_key', 'read_key', 'incremental'],
+ ['Handler_read_next', 'read_next', 'incremental'],
+ ['Handler_read_prev', 'read_prev', 'incremental'],
+ ['Handler_read_rnd', 'read_rnd', 'incremental'],
+ ['Handler_read_rnd_next', 'read_rnd_next', 'incremental'],
+ ['Handler_rollback', 'rollback', 'incremental'],
+ ['Handler_savepoint', 'savepoint', 'incremental'],
+ ['Handler_savepoint_rollback', 'savepoint_rollback', 'incremental'],
+ ['Handler_update', 'update', 'incremental'],
+ ['Handler_write', 'write', 'incremental']
]},
'table_locks': {
'options': [None, 'mysql Tables Locks', 'locks/s', 'locks', 'mysql.table_locks', 'line'],
'lines': [
- ["Table_locks_immediate", "immediate", "incremental"],
- ["Table_locks_waited", "waited", "incremental", -1, 1]
+ ['Table_locks_immediate', 'immediate', 'incremental'],
+ ['Table_locks_waited', 'waited', 'incremental', -1, 1]
]},
'join_issues': {
'options': [None, 'mysql Select Join Issues', 'joins/s', 'issues', 'mysql.join_issues', 'line'],
'lines': [
- ["Select_full_join", "full_join", "incremental"],
- ["Select_full_range_join", "full_range_join", "incremental"],
- ["Select_range", "range", "incremental"],
- ["Select_range_check", "range_check", "incremental"],
- ["Select_scan", "scan", "incremental"]
+ ['Select_full_join', 'full_join', 'incremental'],
+ ['Select_full_range_join', 'full_range_join', 'incremental'],
+ ['Select_range', 'range', 'incremental'],
+ ['Select_range_check', 'range_check', 'incremental'],
+ ['Select_scan', 'scan', 'incremental']
]},
'sort_issues': {
'options': [None, 'mysql Sort Issues', 'issues/s', 'issues', 'mysql.sort_issues', 'line'],
'lines': [
- ["Sort_merge_passes", "merge_passes", "incremental"],
- ["Sort_range", "range", "incremental"],
- ["Sort_scan", "scan", "incremental"]
+ ['Sort_merge_passes', 'merge_passes', 'incremental'],
+ ['Sort_range', 'range', 'incremental'],
+ ['Sort_scan', 'scan', 'incremental']
]},
'tmp': {
'options': [None, 'mysql Tmp Operations', 'counter', 'temporaries', 'mysql.tmp', 'line'],
'lines': [
- ["Created_tmp_disk_tables", "disk_tables", "incremental"],
- ["Created_tmp_files", "files", "incremental"],
- ["Created_tmp_tables", "tables", "incremental"]
+ ['Created_tmp_disk_tables', 'disk_tables', 'incremental'],
+ ['Created_tmp_files', 'files', 'incremental'],
+ ['Created_tmp_tables', 'tables', 'incremental']
]},
'connections': {
'options': [None, 'mysql Connections', 'connections/s', 'connections', 'mysql.connections', 'line'],
'lines': [
- ["Connections", "all", "incremental"],
- ["Aborted_connects", "aborted", "incremental"]
+ ['Connections', 'all', 'incremental'],
+ ['Aborted_connects', 'aborted', 'incremental']
]},
'binlog_cache': {
'options': [None, 'mysql Binlog Cache', 'transactions/s', 'binlog', 'mysql.binlog_cache', 'line'],
'lines': [
- ["Binlog_cache_disk_use", "disk", "incremental"],
- ["Binlog_cache_use", "all", "incremental"]
+ ['Binlog_cache_disk_use', 'disk', 'incremental'],
+ ['Binlog_cache_use', 'all', 'incremental']
]},
'threads': {
'options': [None, 'mysql Threads', 'threads', 'threads', 'mysql.threads', 'line'],
'lines': [
- ["Threads_connected", "connected", "absolute"],
- ["Threads_created", "created", "incremental"],
- ["Threads_cached", "cached", "absolute", -1, 1],
- ["Threads_running", "running", "absolute"],
+ ['Threads_connected', 'connected', 'absolute'],
+ ['Threads_created', 'created', 'incremental'],
+ ['Threads_cached', 'cached', 'absolute', -1, 1],
+ ['Threads_running', 'running', 'absolute'],
]},
'thread_cache_misses': {
'options': [None, 'mysql Threads Cache Misses', 'misses', 'threads', 'mysql.thread_cache_misses', 'area'],
'lines': [
- ["Thread_cache_misses", "misses", "absolute", 1, 100]
+ ['Thread_cache_misses', 'misses', 'absolute', 1, 100]
]},
'innodb_io': {
'options': [None, 'mysql InnoDB I/O Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_io', 'area'],
'lines': [
- ["Innodb_data_read", "read", "incremental", 1, 1024],
- ["Innodb_data_written", "write", "incremental", -1, 1024]
+ ['Innodb_data_read', 'read', 'incremental', 1, 1024],
+ ['Innodb_data_written', 'write', 'incremental', -1, 1024]
]},
'innodb_io_ops': {
'options': [None, 'mysql InnoDB I/O Operations', 'operations/s', 'innodb', 'mysql.innodb_io_ops', 'line'],
'lines': [
- ["Innodb_data_reads", "reads", "incremental"],
- ["Innodb_data_writes", "writes", "incremental", -1, 1],
- ["Innodb_data_fsyncs", "fsyncs", "incremental"]
+ ['Innodb_data_reads', 'reads', 'incremental'],
+ ['Innodb_data_writes', 'writes', 'incremental', -1, 1],
+ ['Innodb_data_fsyncs', 'fsyncs', 'incremental']
]},
'innodb_io_pending_ops': {
'options': [None, 'mysql InnoDB Pending I/O Operations', 'operations', 'innodb', 'mysql.innodb_io_pending_ops', 'line'],
'lines': [
- ["Innodb_data_pending_reads", "reads", "absolute"],
- ["Innodb_data_pending_writes", "writes", "absolute", -1, 1],
- ["Innodb_data_pending_fsyncs", "fsyncs", "absolute"]
+ ['Innodb_data_pending_reads', 'reads', 'absolute'],
+ ['Innodb_data_pending_writes', 'writes', 'absolute', -1, 1],
+ ['Innodb_data_pending_fsyncs', 'fsyncs', 'absolute']
]},
'innodb_log': {
'options': [None, 'mysql InnoDB Log Operations', 'operations/s', 'innodb', 'mysql.innodb_log', 'line'],
'lines': [
- ["Innodb_log_waits", "waits", "incremental"],
- ["Innodb_log_write_requests", "write_requests", "incremental", -1, 1],
- ["Innodb_log_writes", "writes", "incremental", -1, 1],
+ ['Innodb_log_waits', 'waits', 'incremental'],
+ ['Innodb_log_write_requests', 'write_requests', 'incremental', -1, 1],
+ ['Innodb_log_writes', 'writes', 'incremental', -1, 1],
]},
'innodb_os_log': {
'options': [None, 'mysql InnoDB OS Log Operations', 'operations', 'innodb', 'mysql.innodb_os_log', 'line'],
'lines': [
- ["Innodb_os_log_fsyncs", "fsyncs", "incremental"],
- ["Innodb_os_log_pending_fsyncs", "pending_fsyncs", "absolute"],
- ["Innodb_os_log_pending_writes", "pending_writes", "absolute", -1, 1],
+ ['Innodb_os_log_fsyncs', 'fsyncs', 'incremental'],
+ ['Innodb_os_log_pending_fsyncs', 'pending_fsyncs', 'absolute'],
+ ['Innodb_os_log_pending_writes', 'pending_writes', 'absolute', -1, 1],
]},
'innodb_os_log_io': {
'options': [None, 'mysql InnoDB OS Log Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_os_log_io', 'area'],
'lines': [
- ["Innodb_os_log_written", "write", "incremental", -1, 1024],
+ ['Innodb_os_log_written', 'write', 'incremental', -1, 1024],
]},
'innodb_cur_row_lock': {
'options': [None, 'mysql InnoDB Current Row Locks', 'operations', 'innodb', 'mysql.innodb_cur_row_lock', 'area'],
'lines': [
- ["Innodb_row_lock_current_waits", "current_waits", "absolute"]
+ ['Innodb_row_lock_current_waits', 'current_waits', 'absolute']
]},
'innodb_rows': {
'options': [None, 'mysql InnoDB Row Operations', 'operations/s', 'innodb', 'mysql.innodb_rows', 'area'],
'lines': [
- ["Innodb_rows_inserted", "read", "incremental"],
- ["Innodb_rows_read", "deleted", "incremental", -1, 1],
- ["Innodb_rows_updated", "inserted", "incremental", 1, 1],
- ["Innodb_rows_deleted", "updated", "incremental", -1, 1],
+ ['Innodb_rows_inserted', 'read', 'incremental'],
+ ['Innodb_rows_read', 'deleted', 'incremental', -1, 1],
+ ['Innodb_rows_updated', 'inserted', 'incremental', 1, 1],
+ ['Innodb_rows_deleted', 'updated', 'incremental', -1, 1],
]},
'innodb_buffer_pool_pages': {
'options': [None, 'mysql InnoDB Buffer Pool Pages', 'pages', 'innodb', 'mysql.innodb_buffer_pool_pages', 'line'],
'lines': [
- ["Innodb_buffer_pool_pages_data", "data", "absolute"],
- ["Innodb_buffer_pool_pages_dirty", "dirty", "absolute", -1, 1],
- ["Innodb_buffer_pool_pages_free", "free", "absolute"],
- ["Innodb_buffer_pool_pages_flushed", "flushed", "incremental", -1, 1],
- ["Innodb_buffer_pool_pages_misc", "misc", "absolute", -1, 1],
- ["Innodb_buffer_pool_pages_total", "total", "absolute"]
+ ['Innodb_buffer_pool_pages_data', 'data', 'absolute'],
+ ['Innodb_buffer_pool_pages_dirty', 'dirty', 'absolute', -1, 1],
+ ['Innodb_buffer_pool_pages_free', 'free', 'absolute'],
+ ['Innodb_buffer_pool_pages_flushed', 'flushed', 'incremental', -1, 1],
+ ['Innodb_buffer_pool_pages_misc', 'misc', 'absolute', -1, 1],
+ ['Innodb_buffer_pool_pages_total', 'total', 'absolute']
]},
'innodb_buffer_pool_bytes': {
'options': [None, 'mysql InnoDB Buffer Pool Bytes', 'MB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'],
'lines': [
- ["Innodb_buffer_pool_bytes_data", "data", "absolute", 1, 1024 * 1024],
- ["Innodb_buffer_pool_bytes_dirty", "dirty", "absolute", -1, 1024 * 1024]
+ ['Innodb_buffer_pool_bytes_data', 'data', 'absolute', 1, 1024 * 1024],
+ ['Innodb_buffer_pool_bytes_dirty', 'dirty', 'absolute', -1, 1024 * 1024]
]},
'innodb_buffer_pool_read_ahead': {
'options': [None, 'mysql InnoDB Buffer Pool Read Ahead', 'operations/s', 'innodb', 'mysql.innodb_buffer_pool_read_ahead', 'area'],
'lines': [
- ["Innodb_buffer_pool_read_ahead", "all", "incremental"],
- ["Innodb_buffer_pool_read_ahead_evicted", "evicted", "incremental", -1, 1],
- ["Innodb_buffer_pool_read_ahead_rnd", "random", "incremental"]
+ ['Innodb_buffer_pool_read_ahead', 'all', 'incremental'],
+ ['Innodb_buffer_pool_read_ahead_evicted', 'evicted', 'incremental', -1, 1],
+ ['Innodb_buffer_pool_read_ahead_rnd', 'random', 'incremental']
]},
'innodb_buffer_pool_reqs': {
'options': [None, 'mysql InnoDB Buffer Pool Requests', 'requests/s', 'innodb', 'mysql.innodb_buffer_pool_reqs', 'area'],
'lines': [
- ["Innodb_buffer_pool_read_requests", "reads", "incremental"],
- ["Innodb_buffer_pool_write_requests", "writes", "incremental", -1, 1]
+ ['Innodb_buffer_pool_read_requests', 'reads', 'incremental'],
+ ['Innodb_buffer_pool_write_requests', 'writes', 'incremental', -1, 1]
]},
'innodb_buffer_pool_ops': {
'options': [None, 'mysql InnoDB Buffer Pool Operations', 'operations/s', 'innodb', 'mysql.innodb_buffer_pool_ops', 'area'],
'lines': [
- ["Innodb_buffer_pool_reads", "disk reads", "incremental"],
- ["Innodb_buffer_pool_wait_free", "wait free", "incremental", -1, 1]
+ ['Innodb_buffer_pool_reads', 'disk reads', 'incremental'],
+ ['Innodb_buffer_pool_wait_free', 'wait free', 'incremental', -1, 1]
]},
'qcache_ops': {
'options': [None, 'mysql QCache Operations', 'queries/s', 'qcache', 'mysql.qcache_ops', 'line'],
'lines': [
- ["Qcache_hits", "hits", "incremental"],
- ["Qcache_lowmem_prunes", "lowmem prunes", "incremental", -1, 1],
- ["Qcache_inserts", "inserts", "incremental"],
- ["Qcache_not_cached", "not cached", "incremental", -1, 1]
+ ['Qcache_hits', 'hits', 'incremental'],
+ ['Qcache_lowmem_prunes', 'lowmem prunes', 'incremental', -1, 1],
+ ['Qcache_inserts', 'inserts', 'incremental'],
+ ['Qcache_not_cached', 'not cached', 'incremental', -1, 1]
]},
'qcache': {
'options': [None, 'mysql QCache Queries in Cache', 'queries', 'qcache', 'mysql.qcache', 'line'],
'lines': [
- ["Qcache_queries_in_cache", "queries", "absolute"]
+ ['Qcache_queries_in_cache', 'queries', 'absolute']
]},
'qcache_freemem': {
'options': [None, 'mysql QCache Free Memory', 'MB', 'qcache', 'mysql.qcache_freemem', 'area'],
'lines': [
- ["Qcache_free_memory", "free", "absolute", 1, 1024 * 1024]
+ ['Qcache_free_memory', 'free', 'absolute', 1, 1024 * 1024]
]},
'qcache_memblocks': {
'options': [None, 'mysql QCache Memory Blocks', 'blocks', 'qcache', 'mysql.qcache_memblocks', 'line'],
'lines': [
- ["Qcache_free_blocks", "free", "absolute"],
- ["Qcache_total_blocks", "total", "absolute"]
+ ['Qcache_free_blocks', 'free', 'absolute'],
+ ['Qcache_total_blocks', 'total', 'absolute']
]},
'key_blocks': {
'options': [None, 'mysql MyISAM Key Cache Blocks', 'blocks', 'myisam', 'mysql.key_blocks', 'line'],
'lines': [
- ["Key_blocks_unused", "unused", "absolute"],
- ["Key_blocks_used", "used", "absolute", -1, 1],
- ["Key_blocks_not_flushed", "not flushed", "absolute"]
+ ['Key_blocks_unused', 'unused', 'absolute'],
+ ['Key_blocks_used', 'used', 'absolute', -1, 1],
+ ['Key_blocks_not_flushed', 'not flushed', 'absolute']
]},
'key_requests': {
'options': [None, 'mysql MyISAM Key Cache Requests', 'requests/s', 'myisam', 'mysql.key_requests', 'area'],
'lines': [
- ["Key_read_requests", "reads", "incremental"],
- ["Key_write_requests", "writes", "incremental", -1, 1]
+ ['Key_read_requests', 'reads', 'incremental'],
+ ['Key_write_requests', 'writes', 'incremental', -1, 1]
]},
'key_disk_ops': {
'options': [None, 'mysql MyISAM Key Cache Disk Operations', 'operations/s', 'myisam', 'mysql.key_disk_ops', 'area'],
'lines': [
- ["Key_reads", "reads", "incremental"],
- ["Key_writes", "writes", "incremental", -1, 1]
+ ['Key_reads', 'reads', 'incremental'],
+ ['Key_writes', 'writes', 'incremental', -1, 1]
]},
'files': {
'options': [None, 'mysql Open Files', 'files', 'files', 'mysql.files', 'line'],
'lines': [
- ["Open_files", "files", "absolute"]
+ ['Open_files', 'files', 'absolute']
]},
'files_rate': {
'options': [None, 'mysql Opened Files Rate', 'files/s', 'files', 'mysql.files_rate', 'line'],
'lines': [
- ["Opened_files", "files", "incremental"]
+ ['Opened_files', 'files', 'incremental']
]},
'binlog_stmt_cache': {
'options': [None, 'mysql Binlog Statement Cache', 'statements/s', 'binlog', 'mysql.binlog_stmt_cache', 'line'],
'lines': [
- ["Binlog_stmt_cache_disk_use", "disk", "incremental"],
- ["Binlog_stmt_cache_use", "all", "incremental"]
+ ['Binlog_stmt_cache_disk_use', 'disk', 'incremental'],
+ ['Binlog_stmt_cache_use', 'all', 'incremental']
]},
'connection_errors': {
'options': [None, 'mysql Connection Errors', 'connections/s', 'connections', 'mysql.connection_errors', 'line'],
'lines': [
- ["Connection_errors_accept", "accept", "incremental"],
- ["Connection_errors_internal", "internal", "incremental"],
- ["Connection_errors_max_connections", "max", "incremental"],
- ["Connection_errors_peer_address", "peer_addr", "incremental"],
- ["Connection_errors_select", "select", "incremental"],
- ["Connection_errors_tcpwrap", "tcpwrap", "incremental"]
+ ['Connection_errors_accept', 'accept', 'incremental'],
+ ['Connection_errors_internal', 'internal', 'incremental'],
+ ['Connection_errors_max_connections', 'max', 'incremental'],
+ ['Connection_errors_peer_address', 'peer_addr', 'incremental'],
+ ['Connection_errors_select', 'select', 'incremental'],
+ ['Connection_errors_tcpwrap', 'tcpwrap', 'incremental']
]},
'slave_behind': {
'options': [None, 'Slave Behind Seconds', 'seconds', 'slave', 'mysql.slave_behind', 'line'],
'lines': [
- ["slave_behind", "seconds", "absolute"]
+ ['Seconds_Behind_Master', 'seconds', 'absolute']
]},
'slave_status': {
'options': [None, 'Slave Status', 'status', 'slave', 'mysql.slave_status', 'line'],
'lines': [
- ["slave_sql", "sql_running", "absolute"],
- ["slave_io", "io_running", "absolute"]
+ ['Slave_SQL_Running', 'sql_running', 'absolute'],
+ ['Slave_IO_Running', 'io_running', 'absolute']
]}
}
-class Service(SimpleService):
+class Service(MySQLService):
def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self._parse_config(configuration)
+ MySQLService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
- self.connection = None
- self.do_slave = -1
-
- def _parse_config(self, configuration):
- """
- Parse configuration to collect data from MySQL server
- :param configuration: dict
- :return: dict
- """
- parameters = {}
- if self.name is None:
- self.name = 'local'
- if 'user' in configuration:
- parameters['user'] = self.configuration['user']
- if 'pass' in configuration:
- parameters['passwd'] = self.configuration['pass']
- if 'my.cnf' in configuration:
- parameters['read_default_file'] = self.configuration['my.cnf']
- elif 'socket' in configuration:
- parameters['unix_socket'] = self.configuration['socket']
- elif 'host' in configuration:
- parameters['host'] = self.configuration['host']
- if 'port' in configuration:
- parameters['port'] = int(self.configuration['port'])
- self.connection_parameters = parameters
-
- def _connect(self):
- """
- Try to connect to MySQL server
- """
- try:
- self.connection = MySQLdb.connect(connect_timeout=self.update_every, **self.connection_parameters)
- except MySQLdb.OperationalError as e:
- self.error("Cannot establish connection to MySQL.")
- self.debug(str(e))
- raise RuntimeError
- except Exception as e:
- self.error("problem connecting to server:", e)
- raise RuntimeError
-
- def _get_data_slave(self):
- """
- Get slave raw data from MySQL server
- :return: dict
- """
- if self.connection is None:
- try:
- self._connect()
- except RuntimeError:
- return None
-
- slave_data = None
- slave_raw_data = None
- try:
- cursor = self.connection.cursor()
- if cursor.execute(QUERY_SLAVE):
- slave_raw_data = dict(list(zip([elem[0] for elem in cursor.description], cursor.fetchone())))
-
- except MySQLdb.OperationalError as e:
- self.debug("Reconnecting for query", QUERY_SLAVE, ":", str(e))
- try:
- self._connect()
- cursor = self.connection.cursor()
- if cursor.execute(QUERY_SLAVE):
- slave_raw_data = dict(list(zip([elem[0] for elem in cursor.description], cursor.fetchone())))
- except Exception as e:
- self.error("retried, but cannot execute query", QUERY_SLAVE, ":", str(e))
- self.connection.close()
- self.connection = None
- return None
-
- except Exception as e:
- self.error("cannot execute query", QUERY_SLAVE, ":", str(e))
- self.connection.close()
- self.connection = None
- return None
-
- if slave_raw_data is not None:
- slave_data = {
- 'slave_behind': None,
- 'slave_sql': None,
- 'slave_io': None
- }
-
- try:
- slave_data['slave_behind'] = int(slave_raw_data.setdefault('Seconds_Behind_Master', -1))
- except:
- slave_data['slave_behind'] = None
-
- try:
- slave_data['slave_sql'] = 1 if slave_raw_data.get('Slave_SQL_Running') == 'Yes' else -1
- except:
- slave_data['slave_sql'] = None
-
- try:
- slave_data['slave_io'] = 1 if slave_raw_data.get('Slave_IO_Running') == 'Yes' else -1
- except:
- slave_data['slave_io'] = None
-
- return slave_data
+ self.queries = dict(global_status=QUERY_GLOBAL, slave_status=QUERY_SLAVE)
def _get_data(self):
- """
- Get raw data from MySQL server
- :return: dict
- """
- if self.connection is None:
- try:
- self._connect()
- except RuntimeError:
- return None
- try:
- cursor = self.connection.cursor()
- cursor.execute(QUERY)
- raw_data = cursor.fetchall()
- except MySQLdb.OperationalError as e:
- self.debug("Reconnecting for query", QUERY, ":", str(e))
- try:
- self._connect()
- cursor = self.connection.cursor()
- cursor.execute(QUERY)
- raw_data = cursor.fetchall()
- except Exception as e:
- self.error("retried, but cannot execute query", QUERY, ":", str(e))
- self.connection.close()
- self.connection = None
- return None
+ raw_data = self._get_raw_data(description=True)
- except Exception as e:
- self.error("cannot execute query", QUERY, ":", str(e))
- self.connection.close()
- self.connection = None
- return None
+ if not raw_data:
+ return None
- data = dict(raw_data)
+ to_netdata = dict()
- # check for slave data
- # the first time is -1 (so we do it)
- # then it is set to 1 or 0 and we keep it like that
- if self.do_slave != 0:
- slave_data = self._get_data_slave()
- if slave_data is not None:
- data.update(slave_data)
- if self.do_slave == -1:
- self.do_slave = 1
- else:
- if self.do_slave == -1:
- self.error("replication metrics will be disabled - please allow netdata to collect them.")
- self.do_slave = 0
+ if 'global_status' in raw_data:
+ global_status = dict(raw_data['global_status'][0])
+ for key in GLOBAL_STATS:
+ if key in global_status:
+ to_netdata[key] = global_status[key]
+ if 'Threads_created' in to_netdata and 'Connections' in to_netdata:
+ to_netdata['Thread_cache_misses'] = round(int(to_netdata['Threads_created']) / float(to_netdata['Connections']) * 10000)
- # do calculations
- try:
- data["Thread_cache_misses"] = round(float(data["Threads_created"]) / float(data["Connections"]) * 10000)
- except:
- data["Thread_cache_misses"] = None
+ if 'slave_status' in raw_data:
+ if raw_data['slave_status'][0]:
+ slave_raw_data = dict(zip([e[0] for e in raw_data['slave_status'][1]], raw_data['slave_status'][0][0]))
+ for key, function in SLAVE_STATS:
+ if key in slave_raw_data:
+ to_netdata[key] = function(slave_raw_data[key])
+ else:
+ self.queries.pop('slave_status')
- return data
+ return to_netdata or None
- def check(self):
- """
- Check if service is able to connect to server
- :return: boolean
- """
- try:
- self.connection = self._connect()
- return True
- except RuntimeError:
- self.connection = None
- return False
# using ".encode()" in one thread can block other threads as well (only in python2)
import time
-# import sys
import os
import socket
import select
+import threading
+import msg
+import ssl
+from subprocess import Popen, PIPE
+from sys import exc_info
+
+try:
+ from urlparse import urlparse
+except ImportError:
+ from urllib.parse import urlparse
+
try:
import urllib.request as urllib2
except ImportError:
import urllib2
-from subprocess import Popen, PIPE
-
-import threading
-import msg
-import ssl
+try:
+ import MySQLdb
+ PYMYSQL = True
+except ImportError:
+ try:
+ import pymysql as MySQLdb
+ PYMYSQL = True
+ except ImportError:
+ PYMYSQL = False
try:
PATH = os.getenv('PATH').split(':')
class UrlService(SimpleService):
- # TODO add support for https connections
def __init__(self, configuration=None, name=None):
- self.url = ""
- self.user = None
- self.password = None
- self.proxies = {}
SimpleService.__init__(self, configuration=configuration, name=name)
+ self.url = self.configuration.get('url')
+ self.user = self.configuration.get('user')
+ self.password = self.configuration.get('pass')
+ self.ss_cert = self.configuration.get('ss_cert')
def __add_openers(self):
- # TODO add error handling
- if self.ss_cert:
- try:
- ctx = ssl.create_default_context()
- ctx.check_hostname = False
- ctx.verify_mode = ssl.CERT_NONE
- self.opener = urllib2.build_opener(urllib2.HTTPSHandler(context=ctx))
- except Exception as error:
- self.error(str(error))
- self.opener = urllib2.build_opener()
- else:
- self.opener = urllib2.build_opener()
-
- # Proxy handling
- # TODO currently self.proxies isn't parsed from configuration file
- # if len(self.proxies) > 0:
- # for proxy in self.proxies:
- # url = proxy['url']
- # # TODO test this:
- # if "user" in proxy and "pass" in proxy:
- # if url.lower().startswith('https://'):
- # url = 'https://' + proxy['user'] + ':' + proxy['pass'] + '@' + url[8:]
- # else:
- # url = 'http://' + proxy['user'] + ':' + proxy['pass'] + '@' + url[7:]
- # # FIXME move proxy auth to sth like this:
- # # passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
- # # passman.add_password(None, url, proxy['user'], proxy['password'])
- # # opener.add_handler(urllib2.HTTPBasicAuthHandler(passman))
- #
- # if url.lower().startswith('https://'):
- # opener.add_handler(urllib2.ProxyHandler({'https': url}))
- # else:
- # opener.add_handler(urllib2.ProxyHandler({'https': url}))
+ def self_signed_cert(ss_cert):
+ if ss_cert:
+ try:
+ ctx = ssl.create_default_context()
+ ctx.check_hostname = False
+ ctx.verify_mode = ssl.CERT_NONE
+ return urllib2.build_opener(urllib2.HTTPSHandler(context=ctx))
+ except AttributeError:
+ return None
+ else:
+ return None
+
+ self.opener = self_signed_cert(self.ss_cert) or urllib2.build_opener()
# HTTP Basic Auth
- if self.user is not None and self.password is not None:
+ if self.user and self.password:
+ url_parse = urlparse(self.url)
+ top_level_url = '://'.join([url_parse.scheme, url_parse.netloc])
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
- passman.add_password(None, self.url, self.user, self.password)
+ passman.add_password(None, top_level_url, self.user, self.password)
self.opener.add_handler(urllib2.HTTPBasicAuthHandler(passman))
self.debug("Enabling HTTP basic auth")
- #urllib2.install_opener(opener)
-
- def _get_raw_data(self):
+ def _get_raw_data(self, custom_url=None):
"""
Get raw data from http request
:return: str
"""
- raw = None
+ raw_data = None
+ f = None
try:
- f = self.opener.open(self.url, timeout=self.update_every * 2)
- # f = urllib2.urlopen(self.url, timeout=self.update_every * 2)
- except Exception as e:
- self.error(str(e))
+ f = self.opener.open(custom_url or self.url, timeout=self.update_every * 2)
+ raw_data = f.read().decode('utf-8', 'ignore')
+ except Exception as error:
+ self.error('Url: %s. Error: %s' %(custom_url or self.url, str(error)))
return None
-
- try:
- raw = f.read().decode('utf-8', 'ignore')
- except Exception as e:
- self.error(str(e))
finally:
- f.close()
- return raw
+ if f is not None: f.close()
+
+ return raw_data or None
def check(self):
"""
Format configuration data and try to connect to server
:return: boolean
"""
- if self.name is None or self.name == str(None):
- self.name = 'local'
- self.chart_name += "_" + self.name
- else:
- self.name = str(self.name)
- try:
- self.url = str(self.configuration['url'])
- except (KeyError, TypeError):
- pass
- try:
- self.user = str(self.configuration['user'])
- except (KeyError, TypeError):
- pass
- try:
- self.password = str(self.configuration['pass'])
- except (KeyError, TypeError):
- pass
- self.ss_cert = self.configuration.get('ss_cert')
+ if not (self.url and isinstance(self.url, str)):
+ self.error('URL is not defined or type is not <str>')
+ return False
+
self.__add_openers()
- test = self._get_data()
- if test is None or len(test) == 0:
+ try:
+ data = self._get_data()
+ except Exception as error:
+ self.error('_get_data() failed. Url: %s. Error: %s' % (self.url, error))
return False
- else:
+
+ if isinstance(data, dict) and data:
+ self._data_from_check = data
return True
+ else:
+ self.error("_get_data() returned no data or type is not <dict>")
+ return False
class SocketService(SimpleService):
else:
self.error("Command", str(self.command), "returned no data")
return False
+
+
+class MySQLService(SimpleService):
+
+ def __init__(self, configuration=None, name=None):
+ SimpleService.__init__(self, configuration=configuration, name=name)
+ self.__connection = None
+ self.__conn_properties = dict()
+ self.extra_conn_properties = dict()
+ self.__queries = self.configuration.get('queries', dict())
+ self.queries = dict()
+
+ def __connect(self):
+ try:
+ connection = MySQLdb.connect(connect_timeout=self.update_every, **self.__conn_properties)
+ except (MySQLdb.MySQLError, TypeError, AttributeError) as error:
+ return None, str(error)
+ else:
+ return connection, None
+
+ def check(self):
+ def get_connection_properties(conf, extra_conf):
+ properties = dict()
+ if 'user' in conf and conf['user']:
+ properties['user'] = conf['user']
+ if 'pass' in conf and conf['pass']:
+ properties['passwd'] = conf['pass']
+ if 'socket' in conf and conf['socket']:
+ properties['unix_socket'] = conf['socket']
+ elif 'host' in conf and conf['host']:
+ properties['host'] = conf['host']
+ properties['port'] = int(conf.get('port', 3306))
+ elif 'my.cnf' in conf and conf['my.cnf']:
+ properties['read_default_file'] = conf['my.cnf']
+ if isinstance(extra_conf, dict) and extra_conf:
+ properties.update(extra_conf)
+
+ return properties or None
+
+ def is_valid_queries_dict(raw_queries, log_error):
+ """
+ :param raw_queries: dict:
+ :param log_error: function:
+ :return: dict or None
+
+ raw_queries is valid when: type <dict> and not empty after is_valid_query(for all queries)
+ """
+ def is_valid_query(query):
+ return all([isinstance(query, str),
+ query.startswith(('SELECT', 'select', 'SHOW', 'show'))])
+
+ if hasattr(raw_queries, 'keys') and raw_queries:
+ valid_queries = dict([(n, q) for n, q in raw_queries.items() if is_valid_query(q)])
+ bad_queries = set(raw_queries) - set(valid_queries)
+
+ if bad_queries:
+ log_error('Removed query(s): %s' % bad_queries)
+ return valid_queries
+ else:
+ log_error('Unsupported "queries" format. Must be not empty <dict>')
+ return None
+
+ if not PYMYSQL:
+ self.error('MySQLdb or PyMySQL module is needed to use mysql.chart.py plugin')
+ return False
+
+ # Preference: 1. "queries" from the configuration file 2. "queries" from the module
+ self.queries = self.__queries or self.queries
+ # Check if "self.queries" exist, not empty and all queries are in valid format
+ self.queries = is_valid_queries_dict(self.queries, self.error)
+ if not self.queries:
+ return None
+
+ # Get connection properties
+ self.__conn_properties = get_connection_properties(self.configuration, self.extra_conn_properties)
+ if not self.__conn_properties:
+ self.error('Connection properties are missing')
+ return False
+
+ # Create connection to the database
+ self.__connection, error = self.__connect()
+ if error:
+ self.error('Can\'t establish connection to MySQL: %s' % error)
+ return False
+
+ try:
+ data = self._get_data()
+ except Exception as error:
+ self.error('_get_data() failed. Error: %s' % error)
+ return False
+
+ if isinstance(data, dict) and data:
+ # We need this for create() method
+ self._data_from_check = data
+ return True
+ else:
+ self.error("_get_data() returned no data or type is not <dict>")
+ return False
+
+ def _get_raw_data(self, description=None):
+ """
+ Get raw data from MySQL server
+ :return: dict: fetchall() or (fetchall(), description)
+ """
+
+ if not self.__connection:
+ self.__connection, error = self.__connect()
+ if error:
+ return None
+
+ raw_data = dict()
+ queries = dict(self.queries)
+ try:
+ with self.__connection as cursor:
+ for name, query in queries.items():
+ try:
+ cursor.execute(query)
+ except (MySQLdb.ProgrammingError, MySQLdb.OperationalError) as error:
+ if self.__is_error_critical(err_class=exc_info()[0], err_text=str(error)):
+ raise RuntimeError
+ self.error('Removed query: %s[%s]. Error: %s'
+ % (name, query, error))
+ self.queries.pop(name)
+ continue
+ else:
+ raw_data[name] = (cursor.fetchall(), cursor.description) if description else cursor.fetchall()
+ self.__connection.commit()
+ except (MySQLdb.MySQLError, RuntimeError, TypeError, AttributeError):
+ self.__connection.close()
+ self.__connection = None
+ return None
+ else:
+ return raw_data or None
+
+ @staticmethod
+ def __is_error_critical(err_class, err_text):
+ return err_class == MySQLdb.OperationalError and all(['denied' not in err_text,
+ 'Unknown column' not in err_text])
endif
netdata_SOURCES = \
- appconfig.c appconfig.h \
- adaptive_resortable_list.c adaptive_resortable_list.h \
- avl.c avl.h \
- backends.c backends.h \
- clocks.c clocks.h \
- common.c common.h \
- daemon.c daemon.h \
- dictionary.c dictionary.h \
- eval.c eval.h \
- global_statistics.c global_statistics.h \
- health.c health.h health_log.c health_config.c health_json.c \
+ adaptive_resortable_list.c \
+ adaptive_resortable_list.h \
+ appconfig.c \
+ appconfig.h \
+ avl.c \
+ avl.h \
+ backends.c \
+ backends.h \
+ clocks.c \
+ clocks.h \
+ common.c \
+ common.h \
+ daemon.c \
+ daemon.h \
+ dictionary.c \
+ dictionary.h \
+ eval.c \
+ eval.h \
+ global_statistics.c \
+ global_statistics.h \
+ health.c \
+ health.h \
+ health_config.c \
+ health_json.c \
+ health_log.c \
inlined.h \
- log.c log.h \
- main.c main.h \
- plugin_checks.c plugin_checks.h \
- plugin_idlejitter.c plugin_idlejitter.h \
- plugin_nfacct.c plugin_nfacct.h \
- plugin_tc.c plugin_tc.h \
- plugins_d.c plugins_d.h \
- popen.c popen.h \
- socket.c socket.h \
- simple_pattern.c simple_pattern.h \
- sys_fs_cgroup.c \
- sys_devices_system_edac_mc.c \
- sys_devices_system_node.c \
- procfile.c procfile.h \
- proc_self_mountinfo.c proc_self_mountinfo.h \
- registry.c registry.h \
- registry_internals.c registry_internals.h \
- registry_url.c registry_url.h \
- registry_person.c registry_person.h \
- registry_machine.c registry_machine.h \
- registry_init.c \
+ locks.h \
+ log.c \
+ log.h \
+ main.c \
+ main.h \
+ plugin_checks.c \
+ plugin_checks.h \
+ plugin_idlejitter.c \
+ plugin_idlejitter.h \
+ plugin_nfacct.c \
+ plugin_nfacct.h \
+ plugin_tc.c \
+ plugin_tc.h \
+ plugins_d.c \
+ plugins_d.h \
+ popen.c \
+ popen.h \
+ proc_self_mountinfo.c \
+ proc_self_mountinfo.h \
+ procfile.c \
+ procfile.h \
+ registry.c \
+ registry.h \
registry_db.c \
+ registry_init.c \
+ registry_internals.c \
+ registry_internals.h \
registry_log.c \
- rrd.c rrd.h \
+ registry_machine.c \
+ registry_machine.h \
+ registry_person.c \
+ registry_person.h \
+ registry_url.c \
+ registry_url.h \
+ rrd.c \
+ rrd.h \
+ rrd2json.c \
+ rrd2json.h \
+ rrd2json_api_old.c \
+ rrd2json_api_old.h \
+ rrdcalc.c \
+ rrdcalctemplate.c \
rrddim.c \
+ rrddimvar.c \
rrdfamily.c \
rrdhost.c \
+ rrdpush.c \
+ rrdpush.h \
rrdset.c \
- rrdcalc.c \
- rrdcalctemplate.c \
- rrdvar.c \
- rrddimvar.c \
rrdsetvar.c \
- rrd2json.c rrd2json.h \
- rrd2json_api_old.c rrd2json_api_old.h \
- rrdpush.c rrdpush.h \
- storage_number.c storage_number.h \
- unit_test.c unit_test.h \
+ rrdvar.c \
+ simple_pattern.c \
+ simple_pattern.h \
+ socket.c \
+ socket.h \
+ storage_number.c \
+ storage_number.h \
+ sys_devices_system_edac_mc.c \
+ sys_devices_system_node.c \
+ sys_fs_cgroup.c \
+ unit_test.c \
+ unit_test.h \
url.c url.h \
- web_api_old.c web_api_old.h \
- web_api_v1.c web_api_v1.h \
- web_buffer.c web_buffer.h \
- web_buffer_svg.c web_buffer_svg.h \
- web_client.c web_client.h \
- web_server.c web_server.h \
+ web_api_old.c \
+ web_api_old.h \
+ web_api_v1.c \
+ web_api_v1.h \
+ web_buffer.c \
+ web_buffer.h \
+ web_buffer_svg.c \
+ web_buffer_svg.h \
+ web_client.c \
+ web_client.h \
+ web_server.c \
+ web_server.h \
$(NULL)
if FREEBSD
netdata_SOURCES += \
- plugin_freebsd.c plugin_freebsd.h \
+ plugin_freebsd.c \
+ plugin_freebsd.h \
freebsd_sysctl.c \
$(NULL)
else
if MACOS
netdata_SOURCES += \
- plugin_macos.c plugin_macos.h \
+ plugin_macos.c \
+ plugin_macos.h \
macos_sysctl.c \
macos_mach_smi.c \
macos_fw.c \
else
netdata_SOURCES += \
ipc.c ipc.h \
- plugin_proc.c plugin_proc.h \
- plugin_proc_diskspace.c plugin_proc_diskspace.h \
+ plugin_proc.c \
+ plugin_proc.h \
+ plugin_proc_diskspace.c \
+ plugin_proc_diskspace.h \
proc_diskstats.c \
proc_interrupts.c \
proc_softirqs.c \
struct config_option *values;
avl_tree_lock values_index;
- pthread_mutex_t mutex; // this locks only the writers, to ensure atomic updates
+ netdata_mutex_t mutex; // this locks only the writers, to ensure atomic updates
// readers are protected using the rwlock in avl_tree_lock
};
struct config netdata_config = {
.sections = NULL,
- .mutex = PTHREAD_MUTEX_INITIALIZER,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
.index = {
{ NULL, appconfig_section_compare },
AVL_LOCK_INITIALIZER
struct config stream_config = {
.sections = NULL,
- .mutex = PTHREAD_MUTEX_INITIALIZER,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
.index = {
{ NULL, appconfig_section_compare },
AVL_LOCK_INITIALIZER
// locking
static inline void appconfig_wrlock(struct config *root) {
- pthread_mutex_lock(&root->mutex);
+ netdata_mutex_lock(&root->mutex);
}
static inline void appconfig_unlock(struct config *root) {
- pthread_mutex_unlock(&root->mutex);
+ netdata_mutex_unlock(&root->mutex);
}
static inline void config_section_wrlock(struct section *co) {
- pthread_mutex_lock(&co->mutex);
+ netdata_mutex_lock(&co->mutex);
}
static inline void config_section_unlock(struct section *co) {
- pthread_mutex_unlock(&co->mutex);
+ netdata_mutex_unlock(&co->mutex);
}
struct config {
struct section *sections;
- pthread_mutex_t mutex;
+ netdata_mutex_t mutex;
avl_tree_lock index;
};
void avl_read_lock(avl_tree_lock *t) {
#ifndef AVL_WITHOUT_PTHREADS
#ifdef AVL_LOCK_WITH_MUTEX
- pthread_mutex_lock(&t->mutex);
+ netdata_mutex_lock(&t->mutex);
#else
- pthread_rwlock_rdlock(&t->rwlock);
+ netdata_rwlock_rdlock(&t->rwlock);
#endif
#endif /* AVL_WITHOUT_PTHREADS */
}
void avl_write_lock(avl_tree_lock *t) {
#ifndef AVL_WITHOUT_PTHREADS
#ifdef AVL_LOCK_WITH_MUTEX
- pthread_mutex_lock(&t->mutex);
+ netdata_mutex_lock(&t->mutex);
#else
- pthread_rwlock_wrlock(&t->rwlock);
+ netdata_rwlock_wrlock(&t->rwlock);
#endif
#endif /* AVL_WITHOUT_PTHREADS */
}
void avl_unlock(avl_tree_lock *t) {
#ifndef AVL_WITHOUT_PTHREADS
#ifdef AVL_LOCK_WITH_MUTEX
- pthread_mutex_unlock(&t->mutex);
+ netdata_mutex_unlock(&t->mutex);
#else
- pthread_rwlock_unlock(&t->rwlock);
+ netdata_rwlock_unlock(&t->rwlock);
#endif
#endif /* AVL_WITHOUT_PTHREADS */
}
int lock;
#ifdef AVL_LOCK_WITH_MUTEX
- lock = pthread_mutex_init(&t->mutex, NULL);
+ lock = netdata_mutex_init(&t->mutex, NULL);
#else
- lock = pthread_rwlock_init(&t->rwlock, NULL);
+ lock = netdata_rwlock_init(&t->rwlock);
#endif
if(lock != 0)
// #define AVL_LOCK_WITH_MUTEX 1
#ifdef AVL_LOCK_WITH_MUTEX
-#define AVL_LOCK_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+#define AVL_LOCK_INITIALIZER NETDATA_MUTEX_INITIALIZER
#else /* AVL_LOCK_WITH_MUTEX */
-#define AVL_LOCK_INITIALIZER PTHREAD_RWLOCK_INITIALIZER
+#define AVL_LOCK_INITIALIZER NETDATA_RWLOCK_INITIALIZER
#endif /* AVL_LOCK_WITH_MUTEX */
#else /* AVL_WITHOUT_PTHREADS */
#ifndef AVL_WITHOUT_PTHREADS
#ifdef AVL_LOCK_WITH_MUTEX
- pthread_mutex_t mutex;
+ netdata_mutex_t mutex;
#else /* AVL_LOCK_WITH_MUTEX */
- pthread_rwlock_t rwlock;
+ netdata_rwlock_t rwlock;
#endif /* AVL_LOCK_WITH_MUTEX */
#endif /* AVL_WITHOUT_PTHREADS */
} avl_tree_lock;
static inline int now_timeval(clockid_t clk_id, struct timeval *tv) {
struct timespec ts;
- if(unlikely(clock_gettime(clk_id, &ts) == -1))
+
+ if(unlikely(clock_gettime(clk_id, &ts) == -1)) {
+ tv->tv_sec = 0;
+ tv->tv_usec = 0;
return -1;
+ }
+
tv->tv_sec = ts.tv_sec;
tv->tv_usec = (suseconds_t)((ts.tv_nsec % NSEC_PER_SEC) / NSEC_PER_USEC);
return 0;
// ----------------------------------------------------------------------------
// netdata include files
-#include "simple_pattern.h"
-#include "avl.h"
#include "clocks.h"
#include "log.h"
+#include "locks.h"
+#include "simple_pattern.h"
+#include "avl.h"
#include "global_statistics.h"
#include "storage_number.h"
#include "web_buffer.h"
static inline void dictionary_read_lock(DICTIONARY *dict) {
if(likely(dict->rwlock)) {
// debug(D_DICTIONARY, "Dictionary READ lock");
- pthread_rwlock_rdlock(dict->rwlock);
+ netdata_rwlock_rdlock(dict->rwlock);
}
}
static inline void dictionary_write_lock(DICTIONARY *dict) {
if(likely(dict->rwlock)) {
// debug(D_DICTIONARY, "Dictionary WRITE lock");
- pthread_rwlock_wrlock(dict->rwlock);
+ netdata_rwlock_wrlock(dict->rwlock);
}
}
static inline void dictionary_unlock(DICTIONARY *dict) {
if(likely(dict->rwlock)) {
// debug(D_DICTIONARY, "Dictionary UNLOCK lock");
- pthread_rwlock_unlock(dict->rwlock);
+ netdata_rwlock_unlock(dict->rwlock);
}
}
dict->stats = callocz(1, sizeof(struct dictionary_stats));
if(!(flags & DICTIONARY_FLAG_SINGLE_THREADED)) {
- dict->rwlock = callocz(1, sizeof(pthread_rwlock_t));
- pthread_rwlock_init(dict->rwlock, NULL);
+ dict->rwlock = callocz(1, sizeof(netdata_rwlock_t));
+ netdata_rwlock_init(dict->rwlock);
}
avl_init(&dict->values_index, name_value_compare);
if(dict->stats)
freez(dict->stats);
- if(dict->rwlock)
+ if(dict->rwlock) {
+ netdata_rwlock_destroy(dict->rwlock);
freez(dict->rwlock);
+ }
freez(dict);
}
uint8_t flags;
struct dictionary_stats *stats;
- pthread_rwlock_t *rwlock;
+ netdata_rwlock_t *rwlock;
} DICTIONARY;
#define DICTIONARY_FLAG_DEFAULT 0x00000000
.compressed_content_size = 0
};
-pthread_mutex_t global_statistics_mutex = PTHREAD_MUTEX_INITIALIZER;
+netdata_mutex_t global_statistics_mutex = NETDATA_MUTEX_INITIALIZER;
inline void global_statistics_lock(void) {
- pthread_mutex_lock(&global_statistics_mutex);
+ netdata_mutex_lock(&global_statistics_mutex);
}
inline void global_statistics_unlock(void) {
- pthread_mutex_unlock(&global_statistics_mutex);
+ netdata_mutex_unlock(&global_statistics_mutex);
}
void finished_web_request_statistics(uint64_t dt,
uint32_t first_waiting = (host->health_log.alarms)?host->health_log.alarms->unique_id:0;
time_t now = now_realtime_sec();
- pthread_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
+ netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
ALARM_ENTRY *ae;
for(ae = host->health_log.alarms; ae && ae->unique_id >= stop_at_id ; ae = ae->next) {
// remember this for the next iteration
stop_at_id = first_waiting;
- pthread_rwlock_unlock(&host->health_log.alarm_log_rwlock);
+ netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
if(host->health_log.count <= host->health_log.max)
return;
// cleanup excess entries in the log
- pthread_rwlock_wrlock(&host->health_log.alarm_log_rwlock);
+ netdata_rwlock_wrlock(&host->health_log.alarm_log_rwlock);
ALARM_ENTRY *last = NULL;
unsigned int count = host->health_log.max * 2 / 3;
host->health_log.count--;
}
- pthread_rwlock_unlock(&host->health_log.alarm_log_rwlock);
+ netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
}
static inline int rrdcalc_isrunnable(RRDCALC *rc, time_t now, time_t *next_run) {
unsigned int count;
unsigned int max;
ALARM_ENTRY *alarms;
- pthread_rwlock_t alarm_log_rwlock;
+ netdata_rwlock_t alarm_log_rwlock;
} ALARM_LOG;
#include "rrd.h"
}
void health_alarm_log2json(RRDHOST *host, BUFFER *wb, uint32_t after) {
- pthread_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
+ netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
buffer_strcat(wb, "[");
buffer_strcat(wb, "\n]\n");
- pthread_rwlock_unlock(&host->health_log.alarm_log_rwlock);
+ netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
}
static inline void health_rrdcalc2json_nolock(RRDHOST *host, BUFFER *wb, RRDCALC *rc) {
size_t line = 0, len = 0;
ssize_t loaded = 0, updated = 0, errored = 0, duplicate = 0;
- pthread_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
+ netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
while((s = fgets_trim_len(buf, 65536, fp, &len))) {
host->health_log_entries_written++;
}
}
- pthread_rwlock_unlock(&host->health_log.alarm_log_rwlock);
+ netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
freez(buf);
ae->non_clear_duration += ae->duration;
// link it
- pthread_rwlock_wrlock(&host->health_log.alarm_log_rwlock);
+ netdata_rwlock_wrlock(&host->health_log.alarm_log_rwlock);
ae->next = host->health_log.alarms;
host->health_log.alarms = ae;
host->health_log.count++;
- pthread_rwlock_unlock(&host->health_log.alarm_log_rwlock);
+ netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
// match previous alarms
- pthread_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
+ netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
ALARM_ENTRY *t;
for(t = host->health_log.alarms ; t ; t = t->next) {
if(t != ae && t->alarm_id == ae->alarm_id) {
break;
}
}
- pthread_rwlock_unlock(&host->health_log.alarm_log_rwlock);
+ netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
health_alarm_log_save(host, ae);
}
inline void health_alarm_log_free(RRDHOST *host) {
rrdhost_check_wrlock(host);
- pthread_rwlock_wrlock(&host->health_log.alarm_log_rwlock);
+ netdata_rwlock_wrlock(&host->health_log.alarm_log_rwlock);
ALARM_ENTRY *ae;
while((ae = host->health_log.alarms)) {
health_alarm_log_free_one_nochecks_nounlink(ae);
}
- pthread_rwlock_unlock(&host->health_log.alarm_log_rwlock);
+ netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
}
--- /dev/null
+#ifndef NETDATA_LOCKS_H
+#define NETDATA_LOCKS_H
+
+// ----------------------------------------------------------------------------
+// mutex
+
+typedef pthread_mutex_t netdata_mutex_t;
+
+#define NETDATA_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+
+static inline int __netdata_mutex_init(netdata_mutex_t *mutex) {
+ int ret = pthread_mutex_init(mutex, NULL);
+ if(unlikely(ret != 0))
+ error("MUTEX_LOCK: failed to initialize (code %d).", ret);
+ return ret;
+}
+
+static inline int __netdata_mutex_lock(netdata_mutex_t *mutex) {
+ int ret = pthread_mutex_lock(mutex);
+ if(unlikely(ret != 0))
+ error("MUTEX_LOCK: failed to get lock (code %d)", ret);
+ return ret;
+}
+
+static inline int __netdata_mutex_trylock(netdata_mutex_t *mutex) {
+ int ret = pthread_mutex_trylock(mutex);
+ return ret;
+}
+
+static inline int __netdata_mutex_unlock(netdata_mutex_t *mutex) {
+ int ret = pthread_mutex_unlock(mutex);
+ if(unlikely(ret != 0))
+ error("MUTEX_LOCK: failed to unlock (code %d).", ret);
+ return ret;
+}
+
+#ifdef NETDATA_INTERNAL_CHECKS
+
+static inline int netdata_mutex_init_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_init(0x%p) from %lu@%s, %s()", mutex, line, file, function);
+ }
+
+ int ret = __netdata_mutex_init(mutex);
+
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_init(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+static inline int netdata_mutex_lock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_lock(0x%p) from %lu@%s, %s()", mutex, line, file, function);
+ }
+
+ int ret = __netdata_mutex_lock(mutex);
+
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_lock(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+static inline int netdata_mutex_trylock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_trylock(0x%p) from %lu@%s, %s()", mutex, line, file, function);
+ }
+
+ int ret = __netdata_mutex_trylock(mutex);
+
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_trylock(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+static inline int netdata_mutex_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_mutex_t *mutex) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_unlock(0x%p) from %lu@%s, %s()", mutex, line, file, function);
+ }
+
+ int ret = __netdata_mutex_unlock(mutex);
+
+ debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_unlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", mutex, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+#define netdata_mutex_init(mutex) netdata_mutex_init_debug(__FILE__, __FUNCTION__, __LINE__, mutex)
+#define netdata_mutex_lock(mutex) netdata_mutex_lock_debug(__FILE__, __FUNCTION__, __LINE__, mutex)
+#define netdata_mutex_trylock(mutex) netdata_mutex_trylock_debug(__FILE__, __FUNCTION__, __LINE__, mutex)
+#define netdata_mutex_unlock(mutex) netdata_mutex_unlock_debug(__FILE__, __FUNCTION__, __LINE__, mutex)
+
+#else // !NETDATA_INTERNAL_CHECKS
+
+#define netdata_mutex_init(mutex) __netdata_mutex_init(mutex)
+#define netdata_mutex_lock(mutex) __netdata_mutex_lock(mutex)
+#define netdata_mutex_trylock(mutex) __netdata_mutex_trylock(mutex)
+#define netdata_mutex_unlock(mutex) __netdata_mutex_unlock(mutex)
+
+#endif // NETDATA_INTERNAL_CHECKS
+
+
+// ----------------------------------------------------------------------------
+// r/w lock
+
+typedef pthread_rwlock_t netdata_rwlock_t;
+
+#define NETDATA_RWLOCK_INITIALIZER PTHREAD_RWLOCK_INITIALIZER
+
+static inline int __netdata_rwlock_destroy(netdata_rwlock_t *rwlock) {
+ int ret = pthread_rwlock_destroy(rwlock);
+ if(unlikely(ret != 0))
+ error("RW_LOCK: failed to destroy lock (code %d)", ret);
+ return ret;
+}
+
+static inline int __netdata_rwlock_init(netdata_rwlock_t *rwlock) {
+ int ret = pthread_rwlock_init(rwlock, NULL);
+ if(unlikely(ret != 0))
+ error("RW_LOCK: failed to initialize lock (code %d)", ret);
+ return ret;
+}
+
+static inline int __netdata_rwlock_rdlock(netdata_rwlock_t *rwlock) {
+ int ret = pthread_rwlock_rdlock(rwlock);
+ if(unlikely(ret != 0))
+ error("RW_LOCK: failed to obtain read lock (code %d)", ret);
+ return ret;
+}
+
+static inline int __netdata_rwlock_wrlock(netdata_rwlock_t *rwlock) {
+ int ret = pthread_rwlock_wrlock(rwlock);
+ if(unlikely(ret != 0))
+ error("RW_LOCK: failed to obtain write lock (code %d)", ret);
+ return ret;
+}
+
+static inline int __netdata_rwlock_unlock(netdata_rwlock_t *rwlock) {
+ int ret = pthread_rwlock_unlock(rwlock);
+ if(unlikely(ret != 0))
+ error("RW_LOCK: failed to release lock (code %d)", ret);
+ return ret;
+}
+
+static inline int __netdata_rwlock_tryrdlock(netdata_rwlock_t *rwlock) {
+ int ret = pthread_rwlock_tryrdlock(rwlock);
+ return ret;
+}
+
+static inline int __netdata_rwlock_trywrlock(netdata_rwlock_t *rwlock) {
+ int ret = pthread_rwlock_trywrlock(rwlock);
+ return ret;
+}
+
+
+#ifdef NETDATA_INTERNAL_CHECKS
+
+static inline int netdata_rwlock_destroy_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_destroy(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_destroy(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_destroy(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+static inline int netdata_rwlock_init_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_init(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_init(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_init(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+static inline int netdata_rwlock_rdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_rdlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_rdlock(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_rdlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+static inline int netdata_rwlock_wrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_wrlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_wrlock(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_wrlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+static inline int netdata_rwlock_unlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_unlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_unlock(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_unlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+static inline int netdata_rwlock_tryrdlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_tryrdlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_tryrdlock(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_tryrdlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+static inline int netdata_rwlock_trywrlock_debug( const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock) {
+ usec_t start = 0;
+
+ if(unlikely(debug_flags & D_LOCKS)) {
+ start = now_boottime_usec();
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_trywrlock(0x%p) from %lu@%s, %s()", rwlock, line, file, function);
+ }
+
+ int ret = __netdata_rwlock_trywrlock(rwlock);
+
+ debug(D_LOCKS, "RW_LOCK: netdata_rwlock_trywrlock(0x%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, now_boottime_usec() - start, line, file, function);
+
+ return ret;
+}
+
+#define netdata_rwlock_destroy(rwlock) netdata_rwlock_destroy_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+#define netdata_rwlock_init(rwlock) netdata_rwlock_init_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+#define netdata_rwlock_rdlock(rwlock) netdata_rwlock_rdlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+#define netdata_rwlock_wrlock(rwlock) netdata_rwlock_wrlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+#define netdata_rwlock_unlock(rwlock) netdata_rwlock_unlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+#define netdata_rwlock_tryrdlock(rwlock) netdata_rwlock_tryrdlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+#define netdata_rwlock_trywrlock(rwlock) netdata_rwlock_trywrlock_debug(__FILE__, __FUNCTION__, __LINE__, rwlock)
+
+#else // !NETDATA_INTERNAL_CHECKS
+
+#define netdata_rwlock_destroy(rwlock) __netdata_rwlock_destroy(rwlock)
+#define netdata_rwlock_init(rwlock) __netdata_rwlock_init(rwlock)
+#define netdata_rwlock_rdlock(rwlock) __netdata_rwlock_rdlock(rwlock)
+#define netdata_rwlock_wrlock(rwlock) __netdata_rwlock_wrlock(rwlock)
+#define netdata_rwlock_unlock(rwlock) __netdata_rwlock_unlock(rwlock)
+#define netdata_rwlock_tryrdlock(rwlock) __netdata_rwlock_tryrdlock(rwlock)
+#define netdata_rwlock_trywrlock(rwlock) __netdata_rwlock_trywrlock(rwlock)
+
+#endif // NETDATA_INTERNAL_CHECKS
+
+#endif //NETDATA_LOCKS_H
#define D_HEALTH 0x0000000000800000
#define D_CONNECT_TO 0x0000000001000000
#define D_RRDHOST 0x0000000002000000
+#define D_LOCKS 0x0000000004000000
#define D_SYSTEM 0x8000000000000000
//#define DEBUG (D_WEB_CLIENT_ACCESS|D_LISTENER|D_RRD_STATS)
web_server_mode = (mode)?WEB_SERVER_MODE_MULTI_THREADED:WEB_SERVER_MODE_SINGLE_THREADED;
}
- // move [global] options to the [api] section
+ // move [global] options to the [web] section
+ config_move(CONFIG_SECTION_GLOBAL, "http port listen backlog",
+ CONFIG_SECTION_WEB, "listen backlog");
+
config_move(CONFIG_SECTION_GLOBAL, "bind socket to IP",
CONFIG_SECTION_WEB, "bind to");
char buf[HOSTNAME_MAX + 1];
if(gethostname(buf, HOSTNAME_MAX) == -1)
- error("WARNING: Cannot get machine hostname.");
+ error("Cannot get machine hostname.");
netdata_configured_hostname = config_get(CONFIG_SECTION_GLOBAL, "hostname", buf);
debug(D_OPTIONS, "hostname set to '%s'", netdata_configured_hostname);
#include "common.h"
-#define DELAULT_EXLUDED_PATHS "/proc/* /sys/* /var/run/user/* /run/user/*"
+#define DELAULT_EXLUDED_PATHS "/proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/*"
#define DEFAULT_EXCLUDED_FILESYSTEMS ""
#define CONFIG_SECTION_DISKSPACE "plugin:proc:diskspace"
#define DISK_TYPE_PARTITION 2
#define DISK_TYPE_CONTAINER 3
+#define CONFIG_SECTION_DISKSTATS "plugin:proc:/proc/diskstats"
+#define DELAULT_EXLUDED_DISKS "loop* ram*"
+
static struct disk {
char *disk; // the name of the disk (sda, sdb, etc)
unsigned long major;
// get the default path for finding info about the block device
if(unlikely(!path_find_block_device[0])) {
snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/dev/block/%lu:%lu/%s");
- snprintfz(path_find_block_device, FILENAME_MAX, "%s", config_get("plugin:proc:/proc/diskstats", "path to get block device infos", buffer));
+ snprintfz(path_find_block_device, FILENAME_MAX, "%s", config_get(CONFIG_SECTION_DISKSTATS, "path to get block device infos", buffer));
}
// find if it is a partition
if(unlikely(!path_to_get_hw_sector_size[0])) {
snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/queue/hw_sector_size");
- snprintfz(path_to_get_hw_sector_size, FILENAME_MAX, "%s", config_get("plugin:proc:/proc/diskstats", "path to get h/w sector size", buffer));
+ snprintfz(path_to_get_hw_sector_size, FILENAME_MAX, "%s", config_get(CONFIG_SECTION_DISKSTATS, "path to get h/w sector size", buffer));
}
if(unlikely(!path_to_get_hw_sector_size_partitions[0])) {
snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/dev/block/%lu:%lu/subsystem/%s/../queue/hw_sector_size");
- snprintfz(path_to_get_hw_sector_size_partitions, FILENAME_MAX, "%s", config_get("plugin:proc:/proc/diskstats", "path to get h/w sector size for partitions", buffer));
+ snprintfz(path_to_get_hw_sector_size_partitions, FILENAME_MAX, "%s", config_get(CONFIG_SECTION_DISKSTATS, "path to get h/w sector size for partitions", buffer));
}
{
if(major_configs[major] == -1) {
char buffer[CONFIG_MAX_NAME + 1];
snprintfz(buffer, CONFIG_MAX_NAME, "performance metrics for disks with major %d", major);
- major_configs[major] = (char)config_get_boolean("plugin:proc:/proc/diskstats", buffer, 1);
+ major_configs[major] = (char)config_get_boolean(CONFIG_SECTION_DISKSTATS, buffer, 1);
}
return (int)major_configs[major];
globals_initialized = 0;
if(unlikely(!globals_initialized)) {
- global_enable_new_disks_detected_at_runtime = config_get_boolean("plugin:proc:/proc/diskstats", "enable new disks detected at runtime", global_enable_new_disks_detected_at_runtime);
-
- global_enable_performance_for_physical_disks = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "performance metrics for physical disks", global_enable_performance_for_physical_disks);
- global_enable_performance_for_virtual_disks = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "performance metrics for virtual disks", global_enable_performance_for_virtual_disks);
- global_enable_performance_for_partitions = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "performance metrics for partitions", global_enable_performance_for_partitions);
-
- global_do_io = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "bandwidth for all disks", global_do_io);
- global_do_ops = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "operations for all disks", global_do_ops);
- global_do_mops = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "merged operations for all disks", global_do_mops);
- global_do_iotime = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "i/o time for all disks", global_do_iotime);
- global_do_qops = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "queued operations for all disks", global_do_qops);
- global_do_util = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "utilization percentage for all disks", global_do_util);
- global_do_backlog = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "backlog for all disks", global_do_backlog);
+ global_enable_new_disks_detected_at_runtime = config_get_boolean(CONFIG_SECTION_DISKSTATS, "enable new disks detected at runtime", global_enable_new_disks_detected_at_runtime);
+ global_enable_performance_for_physical_disks = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "performance metrics for physical disks", global_enable_performance_for_physical_disks);
+ global_enable_performance_for_virtual_disks = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "performance metrics for virtual disks", global_enable_performance_for_virtual_disks);
+ global_enable_performance_for_partitions = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "performance metrics for partitions", global_enable_performance_for_partitions);
+
+ global_do_io = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "bandwidth for all disks", global_do_io);
+ global_do_ops = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "operations for all disks", global_do_ops);
+ global_do_mops = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "merged operations for all disks", global_do_mops);
+ global_do_iotime = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "i/o time for all disks", global_do_iotime);
+ global_do_qops = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "queued operations for all disks", global_do_qops);
+ global_do_util = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "utilization percentage for all disks", global_do_util);
+ global_do_backlog = config_get_boolean_ondemand(CONFIG_SECTION_DISKSTATS, "backlog for all disks", global_do_backlog);
globals_initialized = 1;
}
if(unlikely(!ff)) {
char filename[FILENAME_MAX + 1];
snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/diskstats");
- ff = procfile_open(config_get("plugin:proc:/proc/diskstats", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
+ ff = procfile_open(config_get(CONFIG_SECTION_DISKSTATS, "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
}
if(unlikely(!ff)) return 0;
// Check the configuration for the device
if(unlikely(!d->configured)) {
+ d->configured = 1;
+
+ static SIMPLE_PATTERN *excluded_disks = NULL;
+
+ if(unlikely(!excluded_disks)) {
+ excluded_disks = simple_pattern_create(
+ config_get(CONFIG_SECTION_DISKSTATS, "exclude disks", DELAULT_EXLUDED_DISKS),
+ SIMPLE_PATTERN_EXACT
+ );
+ }
+
+ int def_enable = global_enable_new_disks_detected_at_runtime;
+
+ if(def_enable != CONFIG_BOOLEAN_NO && simple_pattern_matches(excluded_disks, disk))
+ def_enable = CONFIG_BOOLEAN_NO;
+
char var_name[4096 + 1];
snprintfz(var_name, 4096, "plugin:proc:/proc/diskstats:%s", disk);
- int def_enable = config_get_boolean_ondemand(var_name, "enable", global_enable_new_disks_detected_at_runtime);
+ def_enable = config_get_boolean_ondemand(var_name, "enable", def_enable);
if(unlikely(def_enable == CONFIG_BOOLEAN_NO)) {
// the user does not want any metrics for this disk
d->do_io = CONFIG_BOOLEAN_NO;
d->do_util = config_get_boolean_ondemand(var_name, "utilization percentage", ddo_util);
d->do_backlog = config_get_boolean_ondemand(var_name, "backlog", ddo_backlog);
}
-
- d->configured = 1;
}
// --------------------------------------------------------------------------
// REGISTRY concurrency locking
static inline void registry_lock(void) {
- pthread_mutex_lock(®istry.lock);
+ netdata_mutex_lock(®istry.lock);
}
static inline void registry_unlock(void) {
- pthread_mutex_unlock(®istry.lock);
+ netdata_mutex_unlock(®istry.lock);
}
registry.machines_urls_memory = 0;
// initialize locks
- pthread_mutex_init(®istry.lock, NULL);
+ netdata_mutex_init(®istry.lock);
// create dictionaries
registry.persons = dictionary_create(DICTIONARY_FLAGS);
avl_tree registry_urls_root_index;
- pthread_mutex_t lock;
+ netdata_mutex_t lock;
};
extern int regenerate_guid(const char *guid, char *result);
char *cache_dir; // the directory to store dimensions
char cache_filename[FILENAME_MAX+1]; // the filename to store this set
- pthread_rwlock_t rrdset_rwlock; // protects dimensions linked list
+ netdata_rwlock_t rrdset_rwlock; // protects dimensions linked list
size_t counter; // the number of times we added values to this database
size_t counter_done; // the number of times rrdset_done() has been called
};
typedef struct rrdset RRDSET;
-#define rrdset_rdlock(st) pthread_rwlock_rdlock(&((st)->rrdset_rwlock))
-#define rrdset_wrlock(st) pthread_rwlock_wrlock(&((st)->rrdset_rwlock))
-#define rrdset_unlock(st) pthread_rwlock_unlock(&((st)->rrdset_rwlock))
+#define rrdset_rdlock(st) netdata_rwlock_rdlock(&((st)->rrdset_rwlock))
+#define rrdset_wrlock(st) netdata_rwlock_wrlock(&((st)->rrdset_rwlock))
+#define rrdset_unlock(st) netdata_rwlock_unlock(&((st)->rrdset_rwlock))
+
// ----------------------------------------------------------------------------
// these loop macros make sure the linked list is accessed with the right lock
uint32_t flags; // flags about this RRDHOST
int rrd_update_every; // the update frequency of the host
- int rrd_history_entries; // the number of history entries for the host's charts
+ long rrd_history_entries; // the number of history entries for the host's charts
RRD_MEMORY_MODE rrd_memory_mode; // the memory more for the charts of this host
char *cache_dir; // the directory to save RRD cache files
volatile int rrdpush_error_shown:1; // 1 when we have logged a communication error
int rrdpush_socket; // the fd of the socket to the remote host, or -1
pthread_t rrdpush_thread; // the sender thread
- pthread_mutex_t rrdpush_mutex; // exclusive access to rrdpush_buffer
+ netdata_mutex_t rrdpush_mutex; // exclusive access to rrdpush_buffer
int rrdpush_pipe[2]; // collector to sender thread communication
BUFFER *rrdpush_buffer; // collector fills it, sender sends them
// ------------------------------------------------------------------------
// locks
- pthread_rwlock_t rrdhost_rwlock; // lock for this RRDHOST (protects rrdset_root linked list)
+ netdata_rwlock_t rrdhost_rwlock; // lock for this RRDHOST (protects rrdset_root linked list)
avl_tree_lock rrdset_root_index; // the host's charts index (by id)
avl_tree_lock rrdset_root_index_name; // the host's charts index (by name)
typedef struct rrdhost RRDHOST;
extern RRDHOST *localhost;
-#define rrdhost_rdlock(h) pthread_rwlock_rdlock(&((h)->rrdhost_rwlock))
-#define rrdhost_wrlock(h) pthread_rwlock_wrlock(&((h)->rrdhost_rwlock))
-#define rrdhost_unlock(h) pthread_rwlock_unlock(&((h)->rrdhost_rwlock))
+#define rrdhost_rdlock(host) netdata_rwlock_rdlock(&((host)->rrdhost_rwlock))
+#define rrdhost_wrlock(host) netdata_rwlock_wrlock(&((host)->rrdhost_rwlock))
+#define rrdhost_unlock(host) netdata_rwlock_unlock(&((host)->rrdhost_rwlock))
// ----------------------------------------------------------------------------
// these loop macros make sure the linked list is accessed with the right lock
// ----------------------------------------------------------------------------
// global lock for all RRDHOSTs
-extern pthread_rwlock_t rrd_rwlock;
-#define rrd_rdlock() pthread_rwlock_rdlock(&rrd_rwlock)
-#define rrd_wrlock() pthread_rwlock_wrlock(&rrd_rwlock)
-#define rrd_unlock() pthread_rwlock_unlock(&rrd_rwlock)
+extern netdata_rwlock_t rrd_rwlock;
+
+#define rrd_rdlock() netdata_rwlock_rdlock(&rrd_rwlock)
+#define rrd_wrlock() netdata_rwlock_wrlock(&rrd_rwlock)
+#define rrd_unlock() netdata_rwlock_unlock(&rrd_rwlock)
// ----------------------------------------------------------------------------
, const char *guid
, const char *os
, int update_every
- , int history
+ , long history
, RRD_MEMORY_MODE mode
, int health_enabled
, int rrdpush_enabled
, char *rrdpush_api_key
);
-#ifdef NETDATA_INTERNAL_CHECKS
-extern void rrdhost_check_wrlock_int(RRDHOST *host, const char *file, const char *function, const unsigned long line);
-extern void rrdhost_check_rdlock_int(RRDHOST *host, const char *file, const char *function, const unsigned long line);
-extern void rrdset_check_rdlock_int(RRDSET *st, const char *file, const char *function, const unsigned long line);
-extern void rrdset_check_wrlock_int(RRDSET *st, const char *file, const char *function, const unsigned long line);
-extern void rrd_check_rdlock_int(const char *file, const char *function, const unsigned long line);
-extern void rrd_check_wrlock_int(const char *file, const char *function, const unsigned long line);
-
-#define rrdhost_check_rdlock(host) rrdhost_check_rdlock_int(host, __FILE__, __FUNCTION__, __LINE__)
-#define rrdhost_check_wrlock(host) rrdhost_check_wrlock_int(host, __FILE__, __FUNCTION__, __LINE__)
-#define rrdset_check_rdlock(st) rrdset_check_rdlock_int(st, __FILE__, __FUNCTION__, __LINE__)
-#define rrdset_check_wrlock(st) rrdset_check_wrlock_int(st, __FILE__, __FUNCTION__, __LINE__)
-#define rrd_check_rdlock() rrd_check_rdlock_int(__FILE__, __FUNCTION__, __LINE__)
-#define rrd_check_wrlock() rrd_check_wrlock_int(__FILE__, __FUNCTION__, __LINE__)
+#if defined(NETDATA_INTERNAL_CHECKS) && defined(NETDATA_VERIFY_LOCKS)
+extern void __rrdhost_check_wrlock(RRDHOST *host, const char *file, const char *function, const unsigned long line);
+extern void __rrdhost_check_rdlock(RRDHOST *host, const char *file, const char *function, const unsigned long line);
+extern void __rrdset_check_rdlock(RRDSET *st, const char *file, const char *function, const unsigned long line);
+extern void __rrdset_check_wrlock(RRDSET *st, const char *file, const char *function, const unsigned long line);
+extern void __rrd_check_rdlock(const char *file, const char *function, const unsigned long line);
+extern void __rrd_check_wrlock(const char *file, const char *function, const unsigned long line);
+
+#define rrdhost_check_rdlock(host) __rrdhost_check_rdlock(host, __FILE__, __FUNCTION__, __LINE__)
+#define rrdhost_check_wrlock(host) __rrdhost_check_wrlock(host, __FILE__, __FUNCTION__, __LINE__)
+#define rrdset_check_rdlock(st) __rrdset_check_rdlock(st, __FILE__, __FUNCTION__, __LINE__)
+#define rrdset_check_wrlock(st) __rrdset_check_wrlock(st, __FILE__, __FUNCTION__, __LINE__)
+#define rrd_check_rdlock() __rrd_check_rdlock(__FILE__, __FUNCTION__, __LINE__)
+#define rrd_check_wrlock() __rrd_check_wrlock(__FILE__, __FUNCTION__, __LINE__)
#else
#define rrdhost_check_rdlock(host) (void)0
rrd_stats_api_v1_chart_with_data(st, wb, NULL, NULL);
}
-void rrd_stats_api_v1_charts(RRDHOST *host, BUFFER *wb)
-{
+void rrd_stats_api_v1_charts(RRDHOST *host, BUFFER *wb) {
+ static char *custom_dashboard_info_js_filename = NULL;
size_t c, dimensions = 0, memory = 0, alarms = 0;
RRDSET *st;
time_t now = now_realtime_sec();
+ if(unlikely(!custom_dashboard_info_js_filename))
+ custom_dashboard_info_js_filename = config_get(CONFIG_SECTION_WEB, "custom dashboard_info.js", "");
+
buffer_sprintf(wb, "{\n"
"\t\"hostname\": \"%s\""
",\n\t\"version\": \"%s\""
",\n\t\"os\": \"%s\""
",\n\t\"update_every\": %d"
- ",\n\t\"history\": %d"
+ ",\n\t\"history\": %ld"
+ ",\n\t\"custom_info\": \"%s\""
",\n\t\"charts\": {"
, host->hostname
, program_version
, host->os
, host->rrd_update_every
, host->rrd_history_entries
+ , custom_dashboard_info_js_filename
);
c = 0;
buffer_sprintf(wb, "\n\t],\n"
"\t\"hostname\": \"%s\",\n"
"\t\"update_every\": %d,\n"
- "\t\"history\": %d,\n"
+ "\t\"history\": %ld,\n"
"\t\"memory\": %lu\n"
"}\n"
, host->hostname
RRDHOST *localhost = NULL;
size_t rrd_hosts_available = 0;
-pthread_rwlock_t rrd_rwlock = PTHREAD_RWLOCK_INITIALIZER;
+netdata_rwlock_t rrd_rwlock = NETDATA_RWLOCK_INITIALIZER;
time_t rrdset_free_obsolete_time = 3600;
time_t rrdhost_free_orphan_time = 3600;
const char *guid,
const char *os,
int update_every,
- int entries,
+ long entries,
RRD_MEMORY_MODE memory_mode,
int health_enabled,
int rrdpush_enabled,
char *rrdpush_api_key,
int is_localhost
) {
-
debug(D_RRDHOST, "Host '%s': adding with guid '%s'", hostname, guid);
+ rrd_check_wrlock();
+
RRDHOST *host = callocz(1, sizeof(RRDHOST));
host->rrd_update_every = update_every;
- host->rrd_history_entries = entries;
+ host->rrd_history_entries = align_entries_to_pagesize(memory_mode, entries);
host->rrd_memory_mode = memory_mode;
host->health_enabled = (memory_mode == RRD_MEMORY_MODE_NONE)? 0 : health_enabled;
host->rrdpush_enabled = (rrdpush_enabled && rrdpush_destination && *rrdpush_destination && rrdpush_api_key && *rrdpush_api_key);
host->rrdpush_pipe[1] = -1;
host->rrdpush_socket = -1;
- pthread_mutex_init(&host->rrdpush_mutex, NULL);
- pthread_rwlock_init(&host->rrdhost_rwlock, NULL);
+ netdata_mutex_init(&host->rrdpush_mutex);
+ netdata_rwlock_init(&host->rrdhost_rwlock);
rrdhost_init_hostname(host, hostname);
rrdhost_init_machine_guid(host, guid);
else
host->health_log.max = (unsigned int)n;
- pthread_rwlock_init(&(host->health_log.alarm_log_rwlock), NULL);
+ netdata_rwlock_init(&host->health_log.alarm_log_rwlock);
char filename[FILENAME_MAX + 1];
// ------------------------------------------------------------------------
// link it and add it to the index
- rrd_wrlock();
-
if(is_localhost) {
host->next = localhost;
localhost = host;
", os %s"
", update every %d"
", memory mode %s"
- ", history entries %d"
+ ", history entries %ld"
", streaming %s"
" (to '%s' with api key '%s')"
", health %s"
}
rrd_hosts_available++;
- rrd_unlock();
return host;
}
, const char *guid
, const char *os
, int update_every
- , int history
+ , long history
, RRD_MEMORY_MODE mode
, int health_enabled
, int rrdpush_enabled
) {
debug(D_RRDHOST, "Searching for host '%s' with guid '%s'", hostname, guid);
+ rrd_wrlock();
RRDHOST *host = rrdhost_find_by_guid(guid, 0);
if(!host) {
host = rrdhost_create(
if(strcmp(host->hostname, hostname)) {
char *t = host->hostname;
- char *n = strdupz(hostname);
- host->hostname = n;
+ host->hostname = strdupz(hostname);
+ host->hash_hostname = simple_hash(host->hostname);
freez(t);
}
error("Host '%s' has an update frequency of %d seconds, but the wanted one is %d seconds.", host->hostname, host->rrd_update_every, update_every);
if(host->rrd_history_entries != history)
- error("Host '%s' has history of %d entries, but the wanted one is %d entries.", host->hostname, host->rrd_history_entries, history);
+ error("Host '%s' has history of %ld entries, but the wanted one is %ld entries.", host->hostname, host->rrd_history_entries, history);
if(host->rrd_memory_mode != mode)
error("Host '%s' has memory mode '%s', but the wanted one is '%s'.", host->hostname, rrd_memory_mode_name(host->rrd_memory_mode), rrd_memory_mode_name(mode));
}
+ rrd_unlock();
rrdhost_cleanup_orphan(host);
return host;
}
+static inline int rrdhost_should_be_deleted(RRDHOST *host, RRDHOST *protected, time_t now) {
+ if(host != protected
+ && host != localhost
+ && !host->connected_senders
+ && host->senders_disconnected_time
+ && host->senders_disconnected_time + rrdhost_free_orphan_time < now)
+ return 1;
+
+ return 0;
+}
+
void rrdhost_cleanup_orphan(RRDHOST *protected) {
time_t now = now_realtime_sec();
restart_after_removal:
rrdhost_foreach_write(host) {
- if(host != protected
- && host != localhost
- && !host->connected_senders
- && host->senders_disconnected_time + rrdhost_free_orphan_time < now) {
+ if(rrdhost_should_be_deleted(host, protected, now)) {
info("Host '%s' with machine guid '%s' is obsolete - cleaning up.", host->hostname, host->machine_guid);
if(rrdset_flag_check(host, RRDHOST_ORPHAN))
rrdpush_init();
debug(D_RRDHOST, "Initializing localhost with hostname '%s'", hostname);
+ rrd_wrlock();
localhost = rrdhost_create(
hostname
, registry_get_this_machine_guid()
, default_rrdpush_api_key
, 1
);
+ rrd_unlock();
}
// ----------------------------------------------------------------------------
// RRDHOST - lock validations
// there are only used when NETDATA_INTERNAL_CHECKS is set
-void rrdhost_check_rdlock_int(RRDHOST *host, const char *file, const char *function, const unsigned long line) {
+void __rrdhost_check_rdlock(RRDHOST *host, const char *file, const char *function, const unsigned long line) {
debug(D_RRDHOST, "Checking read lock on host '%s'", host->hostname);
- int ret = pthread_rwlock_trywrlock(&host->rrdhost_rwlock);
+ int ret = netdata_rwlock_trywrlock(&host->rrdhost_rwlock);
if(ret == 0)
fatal("RRDHOST '%s' should be read-locked, but it is not, at function %s() at line %lu of file '%s'", host->hostname, function, line, file);
}
-void rrdhost_check_wrlock_int(RRDHOST *host, const char *file, const char *function, const unsigned long line) {
+void __rrdhost_check_wrlock(RRDHOST *host, const char *file, const char *function, const unsigned long line) {
debug(D_RRDHOST, "Checking write lock on host '%s'", host->hostname);
- int ret = pthread_rwlock_tryrdlock(&host->rrdhost_rwlock);
+ int ret = netdata_rwlock_tryrdlock(&host->rrdhost_rwlock);
if(ret == 0)
fatal("RRDHOST '%s' should be write-locked, but it is not, at function %s() at line %lu of file '%s'", host->hostname, function, line, file);
}
-void rrd_check_rdlock_int(const char *file, const char *function, const unsigned long line) {
+void __rrd_check_rdlock(const char *file, const char *function, const unsigned long line) {
debug(D_RRDHOST, "Checking read lock on all RRDs");
- int ret = pthread_rwlock_trywrlock(&rrd_rwlock);
+ int ret = netdata_rwlock_trywrlock(&rrd_rwlock);
if(ret == 0)
fatal("RRDs should be read-locked, but it are not, at function %s() at line %lu of file '%s'", function, line, file);
}
-void rrd_check_wrlock_int(const char *file, const char *function, const unsigned long line) {
+void __rrd_check_wrlock(const char *file, const char *function, const unsigned long line) {
debug(D_RRDHOST, "Checking write lock on all RRDs");
- int ret = pthread_rwlock_tryrdlock(&rrd_rwlock);
+ int ret = netdata_rwlock_tryrdlock(&rrd_rwlock);
if(ret == 0)
fatal("RRDs should be write-locked, but it are not, at function %s() at line %lu of file '%s'", function, line, file);
}
freez(host->health_log_filename);
freez(host->hostname);
rrdhost_unlock(host);
+ netdata_rwlock_destroy(&host->health_log.alarm_log_rwlock);
+ netdata_rwlock_destroy(&host->rrdhost_rwlock);
freez(host);
rrd_hosts_available--;
// this is for the first iterations of each chart
static unsigned int remote_clock_resync_iterations = 60;
-#define rrdpush_lock(host) pthread_mutex_lock(&((host)->rrdpush_mutex))
-#define rrdpush_unlock(host) pthread_mutex_unlock(&((host)->rrdpush_mutex))
+#define rrdpush_lock(host) netdata_mutex_lock(&((host)->rrdpush_mutex))
+#define rrdpush_unlock(host) netdata_mutex_unlock(&((host)->rrdpush_mutex))
// checks if the current chart definition has been sent
static inline int need_to_send_chart_definition(RRDSET *st) {
}
#ifdef NETDATA_INTERNAL_CHECKS
- info("STREAM %s [receive from [%s]:%s]: client willing to stream metrics for host '%s' with machine_guid '%s': update every = %d, history = %d, memory mode = %s, health %s"
+ info("STREAM %s [receive from [%s]:%s]: client willing to stream metrics for host '%s' with machine_guid '%s': update every = %d, history = %ld, memory mode = %s, health %s"
, hostname
, client_ip
, client_port
error("STREAM %s [receive from [%s]:%s]: disconnected (completed updates %zu).", host->hostname, client_ip, client_port, count);
rrdhost_wrlock(host);
+ host->senders_disconnected_time = now_realtime_sec();
host->connected_senders--;
if(!host->connected_senders) {
if(health_enabled == CONFIG_BOOLEAN_AUTO)
host->health_enabled = 0;
-
- host->senders_disconnected_time = now_realtime_sec();
}
rrdhost_unlock(host);
#define RRD_DEFAULT_GAP_INTERPOLATIONS 1
-void rrdset_check_rdlock_int(RRDSET *st, const char *file, const char *function, const unsigned long line) {
+void __rrdset_check_rdlock(RRDSET *st, const char *file, const char *function, const unsigned long line) {
debug(D_RRD_CALLS, "Checking read lock on chart '%s'", st->id);
- int ret = pthread_rwlock_trywrlock(&st->rrdset_rwlock);
+ int ret = netdata_rwlock_trywrlock(&st->rrdset_rwlock);
if(ret == 0)
fatal("RRDSET '%s' should be read-locked, but it is not, at function %s() at line %lu of file '%s'", st->id, function, line, file);
}
-void rrdset_check_wrlock_int(RRDSET *st, const char *file, const char *function, const unsigned long line) {
+void __rrdset_check_wrlock(RRDSET *st, const char *file, const char *function, const unsigned long line) {
debug(D_RRD_CALLS, "Checking write lock on chart '%s'", st->id);
- int ret = pthread_rwlock_tryrdlock(&st->rrdset_rwlock);
+ int ret = netdata_rwlock_tryrdlock(&st->rrdset_rwlock);
if(ret == 0)
fatal("RRDSET '%s' should be write-locked, but it is not, at function %s() at line %lu of file '%s'", st->id, function, line, file);
}
// ------------------------------------------------------------------------
// free it
+ netdata_rwlock_destroy(&st->rrdset_rwlock);
+
// free directly allocated members
freez(st->config_section);
// ----------------------------------------------------------------------------
// RRDSET - create a chart
+static inline RRDSET *rrdset_find_on_create(RRDHOST *host, const char *fullid) {
+ RRDSET *st = rrdset_find(host, fullid);
+ if(unlikely(st)) {
+ rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE);
+ debug(D_RRD_CALLS, "RRDSET '%s', already exists.", fullid);
+ return st;
+ }
+
+ return NULL;
+}
+
RRDSET *rrdset_create(
RRDHOST *host
, const char *type
char fullid[RRD_ID_LENGTH_MAX + 1];
snprintfz(fullid, RRD_ID_LENGTH_MAX, "%s.%s", type, id);
- RRDSET *st = rrdset_find(host, fullid);
+ RRDSET *st = rrdset_find_on_create(host, fullid);
+ if(st) return st;
+
+ rrdhost_wrlock(host);
+
+ st = rrdset_find_on_create(host, fullid);
if(st) {
- rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE);
- debug(D_RRD_CALLS, "RRDSET '%s', already exists.", fullid);
+ rrdhost_unlock(host);
return st;
}
memset(&st->avlname, 0, sizeof(avl));
memset(&st->variables_root_index, 0, sizeof(avl_tree_lock));
memset(&st->dimensions_index, 0, sizeof(avl_tree_lock));
- memset(&st->rrdset_rwlock, 0, sizeof(pthread_rwlock_t));
+ memset(&st->rrdset_rwlock, 0, sizeof(netdata_rwlock_t));
st->name = NULL;
st->type = NULL;
avl_init_lock(&st->dimensions_index, rrddim_compare);
avl_init_lock(&st->variables_root_index, rrdvar_compare);
- pthread_rwlock_init(&st->rrdset_rwlock, NULL);
- rrdhost_wrlock(host);
+ netdata_rwlock_init(&st->rrdset_rwlock);
if(name && *name) rrdset_set_name(st, name);
else rrdset_set_name(st, id);
RRDDIM *last;
// there is dimension to free
// upgrade our read lock to a write lock
- pthread_rwlock_unlock(&st->rrdset_rwlock);
- pthread_rwlock_wrlock(&st->rrdset_rwlock);
+ rrdset_unlock(st);
+ rrdset_wrlock(st);
for( rd = st->dimensions, last = NULL ; likely(rd) ; ) {
// remove it only it is not updated in rrd_delete_unupdated_dimensions seconds
dashboard.html \
dashboard.js \
dashboard_info.js \
+ dashboard_info_custom_example.js \
dashboard.css \
dashboard.slate.css \
favicon.ico \
var len = generateGradient.length;
while(len--) {
var pcent = generateGradient[len];
- var color = self.data('gauge-gradient-percent-color-' + pcent.toString()) || false;
+ var color = self.attr('data-gauge-gradient-percent-color-' + pcent.toString()) || false;
if(color !== false) {
var a = [];
a[0] = pcent / 100;
--- /dev/null
+/*
+ * Custom netdata information file
+ * -------------------------------
+ *
+ * Use this file to add custom information on netdata dashboards:
+ *
+ * 1. Copy it to a new filename (so that it will not be overwritten with netdata updates)
+ * 2. Edit it to fit your needs
+ * 3. Set the following option to /etc/netdata/netdata.conf :
+ *
+ * [web]
+ * custom dashboard_info.js = your_filename.js
+ *
+ * Using this file you can:
+ *
+ * 1. Overwrite or add messages to menus, submenus and charts.
+ * Use dashboard_info.js to find out what you can define.
+ *
+ * 2. Inject javascript code into the default netdata dashboard.
+ *
+ */
+
+// ----------------------------------------------------------------------------
+// MENU
+//
+// - title the menu title as to be rendered at the charts menu
+// - icon html fragment of the icon to display
+// - info html fragment for the description above all the menu charts
+
+customDashboard.menu = {
+
+};
+
+
+// ----------------------------------------------------------------------------
+// SUBMENU
+//
+// - title the submenu title as to be rendered at the charts menu
+// - info html fragment for the description above all the submenu charts
+
+customDashboard.submenu = {
+
+};
+
+
+// ----------------------------------------------------------------------------
+// CONTEXT (the template each chart is based on)
+//
+// - info html fragment for the description above the chart
+// - height a ratio to the default as a decimal number: 1.0 = 100%
+// - colors a single color or an array of colors to use for the dimensions
+// - valuerange the y-range of the chart as an array [min, max]
+// - heads an array of gauge charts to render above the submenu section
+// - mainheads an array of gauge charts to render at the menu section
+
+customDashboard.context = {
+
+};
var len, i, url, hostname, icon;
if(options.hosts.length > 1) {
+ // there are mirrored hosts here
+
el += '<li><a href="#" onClick="return false;" style="color: #666;" target="_blank">databases available on this host</a></li>';
a1 += '<li><a href="#" onClick="return false;"><i class="fa fa-info-circle" aria-hidden="true" style="color: #666;"></i></a></li>';
if(base.endsWith("/"))
base = base.substring(0, base.length - 1);
+ var master = options.hosts[0].hostname;
+ var sorted = options.hosts.sort(function(a, b) {
+ if(a.hostname === master) return -1;
+ if(a.hostname === b.hostname) return 0;
+ else if(a.hostname > b.hostname) return 1;
+ return -1;
+ });
+
i = 0;
- len = options.hosts.length;
+ len = sorted.length;
while(len--) {
- hostname = options.hosts[i].hostname;
- if(i == 0) {
+ hostname = sorted[i].hostname;
+ if(hostname == master) {
url = base + "/";
icon = "home";
}
// ----------------------------------------------------------------------------
+ // user editable information
+ var customDashboard = {
+ menu: {},
+ submenu: {},
+ context: {}
+ };
+
+ // netdata standard information
var netdataDashboard = {
sparklines_registry: {},
os: 'unknown',
chart.menu = chart.type;
if(parts.length > 2 && parts[1] === 'cache')
chart.menu_pattern = tmp + '_' + parts[1];
+ else if(parts.length > 1)
+ chart.menu_pattern = tmp;
break;
case 'bind':
chart.menu = chart.type;
if(parts.length > 2 && parts[1] === 'rndc')
chart.menu_pattern = tmp + '_' + parts[1];
+ else if(parts.length > 1)
+ chart.menu_pattern = tmp;
break;
case 'cgroup':
chart.menu = chart.type;
if(parts.length > 2 && parts[1] === 'dhcpd')
chart.menu_pattern = tmp + '_' + parts[1];
+ else if(parts.length > 1)
+ chart.menu_pattern = tmp;
break;
case 'ovpn':
chart.menu = chart.type;
if(parts.length > 3 && parts[1] === 'status' && parts[2] === 'log')
chart.menu_pattern = tmp + '_' + parts[1];
+ else if(parts.length > 1)
+ chart.menu_pattern = tmp;
break;
case 'smartd':
chart.menu = chart.type;
if(parts.length > 2 && parts[1] === 'log')
chart.menu_pattern = tmp + '_' + parts[1];
- break;
-
- case 'dovecot':
- case 'exim':
- case 'hddtemp':
- case 'ipfs':
- case 'memcached':
- case 'mysql':
- case 'named':
- case 'nginx':
- case 'nut':
- case 'phpfpm':
- case 'postfix':
- case 'postgres':
- case 'redis':
- case 'retroshare':
- case 'smawebbox':
- case 'snmp':
- case 'squid':
- case 'tomcat':
- chart.menu = chart.type;
- chart.menu_pattern = tmp;
+ else if(parts.length > 1)
+ chart.menu_pattern = tmp;
break;
case 'tc':
default:
chart.menu = chart.type;
+ if(parts.length > 1)
+ chart.menu_pattern = tmp;
break;
}
document.getElementById('alarms_count_badge').innerHTML = '';
}
+ function initializeDynamicDashboardWithData(data) {
+ if(data !== null) {
+ options.hostname = data.hostname;
+ options.data = data;
+ options.version = data.version;
+ netdataDashboard.os = data.os;
+
+ if(typeof data.hosts != 'undefined')
+ options.hosts = data.hosts;
+
+ // update the dashboard hostname
+ document.getElementById('hostname').innerHTML = options.hostname;
+ document.getElementById('hostname').href = NETDATA.serverDefault;
+ document.getElementById('netdataVersion').innerHTML = options.version;
+
+ // update the dashboard title
+ document.title = options.hostname + ' netdata dashboard';
+
+ // close the splash screen
+ $("#loadOverlay").css("display","none");
+
+ // create a chart_by_name index
+ data.charts_by_name = {};
+ var charts = data.charts;
+ var x;
+ for(x in charts) {
+ if(!charts.hasOwnProperty(x)) continue;
+
+ var chart = charts[x];
+ data.charts_by_name[chart.name] = chart;
+ }
+
+ // render all charts
+ renderChartsAndMenu(data);
+ }
+ }
+
function initializeDynamicDashboard(netdata_url) {
if(typeof netdata_url === 'undefined' || netdata_url === null)
netdata_url = NETDATA.serverDefault;
// download all the charts the server knows
NETDATA.chartRegistry.downloadAll(netdata_url, function(data) {
- if(data !== null) {
- options.hostname = data.hostname;
- options.data = data;
- options.version = data.version;
- netdataDashboard.os = data.os;
-
- if(typeof data.hosts != 'undefined')
- options.hosts = data.hosts;
-
- // update the dashboard hostname
- document.getElementById('hostname').innerHTML = options.hostname;
- document.getElementById('hostname').href = NETDATA.serverDefault;
- document.getElementById('netdataVersion').innerHTML = options.version;
-
- // update the dashboard title
- document.title = options.hostname + ' netdata dashboard';
-
- // close the splash screen
- $("#loadOverlay").css("display","none");
-
- // create a chart_by_name index
- data.charts_by_name = {};
- var charts = data.charts;
- var x;
- for(x in charts) {
- if(!charts.hasOwnProperty(x)) continue;
-
- var chart = charts[x];
- data.charts_by_name[chart.name] = chart;
+ if(data != null) {
+ if(typeof data.custom_info !== 'undefined' && data.custom_info !== "") {
+ loadJs(data.custom_info, function () {
+ $.extend(true, netdataDashboard, customDashboard);
+ initializeDynamicDashboardWithData(data);
+ });
+ }
+ else {
+ initializeDynamicDashboardWithData(data);
}
-
- // render all charts
- renderChartsAndMenu(data);
}
});
});