3 // ----------------------------------------------------------------------------
6 #define CHART_PRIORITY_SYSTEMD_SERVICES 19000
7 #define CHART_PRIORITY_CONTAINERS 40000
9 static long system_page_size = 4096; // system will be queried via sysconf() in configuration()
11 static int cgroup_enable_cpuacct_stat = CONFIG_BOOLEAN_AUTO;
12 static int cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_AUTO;
13 static int cgroup_enable_memory = CONFIG_BOOLEAN_AUTO;
14 static int cgroup_enable_detailed_memory = CONFIG_BOOLEAN_AUTO;
15 static int cgroup_enable_memory_failcnt = CONFIG_BOOLEAN_AUTO;
16 static int cgroup_enable_swap = CONFIG_BOOLEAN_AUTO;
17 static int cgroup_enable_blkio_io = CONFIG_BOOLEAN_AUTO;
18 static int cgroup_enable_blkio_ops = CONFIG_BOOLEAN_AUTO;
19 static int cgroup_enable_blkio_throttle_io = CONFIG_BOOLEAN_AUTO;
20 static int cgroup_enable_blkio_throttle_ops = CONFIG_BOOLEAN_AUTO;
21 static int cgroup_enable_blkio_merged_ops = CONFIG_BOOLEAN_AUTO;
22 static int cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_AUTO;
24 static int cgroup_enable_systemd_services = CONFIG_BOOLEAN_YES;
25 static int cgroup_enable_systemd_services_detailed_memory = CONFIG_BOOLEAN_NO;
26 static int cgroup_used_memory_without_cache = CONFIG_BOOLEAN_YES;
28 static int cgroup_search_in_devices = 1;
30 static int cgroup_enable_new_cgroups_detected_at_runtime = 1;
31 static int cgroup_check_for_new_every = 10;
32 static int cgroup_update_every = 1;
34 static int cgroup_recheck_zero_blkio_every_iterations = 10;
35 static int cgroup_recheck_zero_mem_failcnt_every_iterations = 10;
36 static int cgroup_recheck_zero_mem_detailed_every_iterations = 10;
38 static char *cgroup_cpuacct_base = NULL;
39 static char *cgroup_blkio_base = NULL;
40 static char *cgroup_memory_base = NULL;
41 static char *cgroup_devices_base = NULL;
43 static int cgroup_root_count = 0;
44 static int cgroup_root_max = 500;
45 static int cgroup_max_depth = 0;
47 static SIMPLE_PATTERN *enabled_cgroup_patterns = NULL;
48 static SIMPLE_PATTERN *enabled_cgroup_paths = NULL;
49 static SIMPLE_PATTERN *enabled_cgroup_renames = NULL;
50 static SIMPLE_PATTERN *systemd_services_cgroups = NULL;
52 static char *cgroups_rename_script = NULL;
54 static int cgroups_check = 0;
56 static uint32_t Read_hash = 0;
57 static uint32_t Write_hash = 0;
58 static uint32_t user_hash = 0;
59 static uint32_t system_hash = 0;
61 void read_cgroup_plugin_configuration() {
62 system_page_size = sysconf(_SC_PAGESIZE);
64 Read_hash = simple_hash("Read");
65 Write_hash = simple_hash("Write");
66 user_hash = simple_hash("user");
67 system_hash = simple_hash("system");
69 cgroup_update_every = (int)config_get_number("plugin:cgroups", "update every", localhost->rrd_update_every);
70 if(cgroup_update_every < localhost->rrd_update_every)
71 cgroup_update_every = localhost->rrd_update_every;
73 cgroup_check_for_new_every = (int)config_get_number("plugin:cgroups", "check for new cgroups every", cgroup_check_for_new_every * cgroup_update_every);
74 if(cgroup_check_for_new_every < cgroup_update_every)
75 cgroup_check_for_new_every = cgroup_update_every;
77 cgroup_enable_cpuacct_stat = config_get_boolean_ondemand("plugin:cgroups", "enable cpuacct stat (total CPU)", cgroup_enable_cpuacct_stat);
78 cgroup_enable_cpuacct_usage = config_get_boolean_ondemand("plugin:cgroups", "enable cpuacct usage (per core CPU)", cgroup_enable_cpuacct_usage);
80 cgroup_enable_memory = config_get_boolean_ondemand("plugin:cgroups", "enable memory (used mem including cache)", cgroup_enable_memory);
81 cgroup_enable_detailed_memory = config_get_boolean_ondemand("plugin:cgroups", "enable detailed memory", cgroup_enable_detailed_memory);
82 cgroup_enable_memory_failcnt = config_get_boolean_ondemand("plugin:cgroups", "enable memory limits fail count", cgroup_enable_memory_failcnt);
83 cgroup_enable_swap = config_get_boolean_ondemand("plugin:cgroups", "enable swap memory", cgroup_enable_swap);
85 cgroup_enable_blkio_io = config_get_boolean_ondemand("plugin:cgroups", "enable blkio bandwidth", cgroup_enable_blkio_io);
86 cgroup_enable_blkio_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio operations", cgroup_enable_blkio_ops);
87 cgroup_enable_blkio_throttle_io = config_get_boolean_ondemand("plugin:cgroups", "enable blkio throttle bandwidth", cgroup_enable_blkio_throttle_io);
88 cgroup_enable_blkio_throttle_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio throttle operations", cgroup_enable_blkio_throttle_ops);
89 cgroup_enable_blkio_queued_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio queued operations", cgroup_enable_blkio_queued_ops);
90 cgroup_enable_blkio_merged_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio merged operations", cgroup_enable_blkio_merged_ops);
92 cgroup_recheck_zero_blkio_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero blkio every iterations", cgroup_recheck_zero_blkio_every_iterations);
93 cgroup_recheck_zero_mem_failcnt_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero memory failcnt every iterations", cgroup_recheck_zero_mem_failcnt_every_iterations);
94 cgroup_recheck_zero_mem_detailed_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero detailed memory every iterations", cgroup_recheck_zero_mem_detailed_every_iterations);
96 cgroup_enable_systemd_services = config_get_boolean("plugin:cgroups", "enable systemd services", cgroup_enable_systemd_services);
97 cgroup_enable_systemd_services_detailed_memory = config_get_boolean("plugin:cgroups", "enable systemd services detailed memory", cgroup_enable_systemd_services_detailed_memory);
98 cgroup_used_memory_without_cache = config_get_boolean("plugin:cgroups", "report used memory without cache", cgroup_used_memory_without_cache);
100 char filename[FILENAME_MAX + 1], *s;
101 struct mountinfo *mi, *root = mountinfo_read(0);
103 mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "cpuacct");
104 if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "cpuacct");
106 error("Cannot find cgroup cpuacct mountinfo. Assuming default: /sys/fs/cgroup/cpuacct");
107 s = "/sys/fs/cgroup/cpuacct";
109 else s = mi->mount_point;
110 snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s);
111 cgroup_cpuacct_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/cpuacct", filename);
113 mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "blkio");
114 if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "blkio");
116 error("Cannot find cgroup blkio mountinfo. Assuming default: /sys/fs/cgroup/blkio");
117 s = "/sys/fs/cgroup/blkio";
119 else s = mi->mount_point;
120 snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s);
121 cgroup_blkio_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/blkio", filename);
123 mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "memory");
124 if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "memory");
126 error("Cannot find cgroup memory mountinfo. Assuming default: /sys/fs/cgroup/memory");
127 s = "/sys/fs/cgroup/memory";
129 else s = mi->mount_point;
130 snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s);
131 cgroup_memory_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/memory", filename);
133 mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "devices");
134 if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "devices");
136 error("Cannot find cgroup devices mountinfo. Assuming default: /sys/fs/cgroup/devices");
137 s = "/sys/fs/cgroup/devices";
139 else s = mi->mount_point;
140 snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, s);
141 cgroup_devices_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/devices", filename);
143 cgroup_root_max = (int)config_get_number("plugin:cgroups", "max cgroups to allow", cgroup_root_max);
144 cgroup_max_depth = (int)config_get_number("plugin:cgroups", "max cgroups depth to monitor", cgroup_max_depth);
146 cgroup_enable_new_cgroups_detected_at_runtime = config_get_boolean("plugin:cgroups", "enable new cgroups detected at run time", cgroup_enable_new_cgroups_detected_at_runtime);
148 enabled_cgroup_patterns = simple_pattern_create(
149 config_get("plugin:cgroups", "enable by default cgroups matching",
150 " /system.slice/docker-*.scope "
151 " /qemu.slice/*.scope " // #1949
163 " !/lxc/*/ns " // #1397
169 " * " // enable anything else
170 ), SIMPLE_PATTERN_EXACT);
172 enabled_cgroup_paths = simple_pattern_create(
173 config_get("plugin:cgroups", "search for cgroups in subpaths matching",
181 ), SIMPLE_PATTERN_EXACT);
183 snprintfz(filename, FILENAME_MAX, "%s/cgroup-name.sh", netdata_configured_plugins_dir);
184 cgroups_rename_script = config_get("plugin:cgroups", "script to get cgroup names", filename);
186 enabled_cgroup_renames = simple_pattern_create(
187 config_get("plugin:cgroups", "run script to rename cgroups matching",
188 " /qemu.slice/*.scope " // #1949
200 ), SIMPLE_PATTERN_EXACT);
202 if(cgroup_enable_systemd_services) {
203 systemd_services_cgroups = simple_pattern_create(
204 config_get("plugin:cgroups", "cgroups to match as systemd services",
205 " !/system.slice/*/*.service "
206 " /system.slice/*.service "
207 ), SIMPLE_PATTERN_EXACT);
210 mountinfo_free(root);
213 // ----------------------------------------------------------------------------
218 int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
223 unsigned long long Read;
224 unsigned long long Write;
226 unsigned long long Sync;
227 unsigned long long Async;
228 unsigned long long Total;
232 // https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt
235 ARL_ENTRY *arl_dirty;
238 int updated_detailed;
239 int updated_usage_in_bytes;
240 int updated_msw_usage_in_bytes;
243 int enabled_detailed; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
244 int enabled_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
245 int enabled_msw_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
246 int enabled_failcnt; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
248 int delay_counter_detailed;
249 int delay_counter_failcnt;
251 char *filename_detailed;
252 char *filename_usage_in_bytes;
253 char *filename_msw_usage_in_bytes;
254 char *filename_failcnt;
256 int detailed_has_dirty;
257 int detailed_has_swap;
260 unsigned long long cache;
261 unsigned long long rss;
262 unsigned long long rss_huge;
263 unsigned long long mapped_file;
264 unsigned long long writeback;
265 unsigned long long dirty;
266 unsigned long long swap;
267 unsigned long long pgpgin;
268 unsigned long long pgpgout;
269 unsigned long long pgfault;
270 unsigned long long pgmajfault;
272 unsigned long long inactive_anon;
273 unsigned long long active_anon;
274 unsigned long long inactive_file;
275 unsigned long long active_file;
276 unsigned long long unevictable;
277 unsigned long long hierarchical_memory_limit;
278 unsigned long long total_cache;
279 unsigned long long total_rss;
280 unsigned long long total_rss_huge;
281 unsigned long long total_mapped_file;
282 unsigned long long total_writeback;
283 unsigned long long total_dirty;
284 unsigned long long total_swap;
285 unsigned long long total_pgpgin;
286 unsigned long long total_pgpgout;
287 unsigned long long total_pgfault;
288 unsigned long long total_pgmajfault;
289 unsigned long long total_inactive_anon;
290 unsigned long long total_active_anon;
291 unsigned long long total_inactive_file;
292 unsigned long long total_active_file;
293 unsigned long long total_unevictable;
296 // single file metrics
297 unsigned long long usage_in_bytes;
298 unsigned long long msw_usage_in_bytes;
299 unsigned long long failcnt;
302 // https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt
303 struct cpuacct_stat {
305 int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
309 unsigned long long user;
310 unsigned long long system;
313 // https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt
314 struct cpuacct_usage {
316 int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
321 unsigned long long *cpu_percpu;
324 #define CGROUP_OPTIONS_DISABLED_DUPLICATE 0x00000001
325 #define CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE 0x00000002
330 char available; // found in the filesystem
331 char enabled; // enabled in the config
341 struct cpuacct_stat cpuacct_stat;
342 struct cpuacct_usage cpuacct_usage;
344 struct memory memory;
346 struct blkio io_service_bytes; // bytes
347 struct blkio io_serviced; // operations
349 struct blkio throttle_io_service_bytes; // bytes
350 struct blkio throttle_io_serviced; // operations
352 struct blkio io_merged; // operations
353 struct blkio io_queued; // operations
357 RRDSET *st_cpu_per_core;
359 RRDSET *st_writeback;
360 RRDSET *st_mem_activity;
362 RRDSET *st_mem_usage;
363 RRDSET *st_mem_failcnt;
365 RRDSET *st_serviced_ops;
366 RRDSET *st_throttle_io;
367 RRDSET *st_throttle_serviced_ops;
368 RRDSET *st_queued_ops;
369 RRDSET *st_merged_ops;
373 RRDDIM *rd_mem_usage;
374 RRDDIM *rd_mem_failcnt;
375 RRDDIM *rd_swap_usage;
377 RRDDIM *rd_mem_detailed_cache;
378 RRDDIM *rd_mem_detailed_rss;
379 RRDDIM *rd_mem_detailed_mapped;
380 RRDDIM *rd_mem_detailed_writeback;
381 RRDDIM *rd_mem_detailed_pgpgin;
382 RRDDIM *rd_mem_detailed_pgpgout;
383 RRDDIM *rd_mem_detailed_pgfault;
384 RRDDIM *rd_mem_detailed_pgmajfault;
386 RRDDIM *rd_io_service_bytes_read;
387 RRDDIM *rd_io_serviced_read;
388 RRDDIM *rd_throttle_io_read;
389 RRDDIM *rd_throttle_io_serviced_read;
390 RRDDIM *rd_io_queued_read;
391 RRDDIM *rd_io_merged_read;
393 RRDDIM *rd_io_service_bytes_write;
394 RRDDIM *rd_io_serviced_write;
395 RRDDIM *rd_throttle_io_write;
396 RRDDIM *rd_throttle_io_serviced_write;
397 RRDDIM *rd_io_queued_write;
398 RRDDIM *rd_io_merged_write;
402 } *cgroup_root = NULL;
404 // ----------------------------------------------------------------------------
405 // read values from /sys
407 static inline void cgroup_read_cpuacct_stat(struct cpuacct_stat *cp) {
408 static procfile *ff = NULL;
410 if(likely(cp->filename)) {
411 ff = procfile_reopen(ff, cp->filename, NULL, PROCFILE_FLAG_DEFAULT);
418 ff = procfile_readall(ff);
425 unsigned long i, lines = procfile_lines(ff);
427 if(unlikely(lines < 1)) {
428 error("File '%s' should have 1+ lines.", cp->filename);
433 for(i = 0; i < lines ; i++) {
434 char *s = procfile_lineword(ff, i, 0);
435 uint32_t hash = simple_hash(s);
437 if(unlikely(hash == user_hash && !strcmp(s, "user")))
438 cp->user = str2ull(procfile_lineword(ff, i, 1));
440 else if(unlikely(hash == system_hash && !strcmp(s, "system")))
441 cp->system = str2ull(procfile_lineword(ff, i, 1));
446 if(unlikely(cp->enabled == CONFIG_BOOLEAN_AUTO && (cp->user || cp->system)))
447 cp->enabled = CONFIG_BOOLEAN_YES;
451 static inline void cgroup_read_cpuacct_usage(struct cpuacct_usage *ca) {
452 static procfile *ff = NULL;
454 if(likely(ca->filename)) {
455 ff = procfile_reopen(ff, ca->filename, NULL, PROCFILE_FLAG_DEFAULT);
462 ff = procfile_readall(ff);
469 if(unlikely(procfile_lines(ff) < 1)) {
470 error("File '%s' should have 1+ lines but has %zu.", ca->filename, procfile_lines(ff));
475 unsigned long i = procfile_linewords(ff, 0);
476 if(unlikely(i == 0)) {
481 // we may have 1 more CPU reported
483 char *s = procfile_lineword(ff, 0, i - 1);
488 if(unlikely(i != ca->cpus)) {
489 freez(ca->cpu_percpu);
490 ca->cpu_percpu = mallocz(sizeof(unsigned long long) * i);
491 ca->cpus = (unsigned int)i;
494 unsigned long long total = 0;
495 for(i = 0; i < ca->cpus ;i++) {
496 unsigned long long n = str2ull(procfile_lineword(ff, 0, i));
497 ca->cpu_percpu[i] = n;
503 if(unlikely(ca->enabled == CONFIG_BOOLEAN_AUTO && total))
504 ca->enabled = CONFIG_BOOLEAN_YES;
508 static inline void cgroup_read_blkio(struct blkio *io) {
509 static procfile *ff = NULL;
511 if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO && io->delay_counter > 0)) {
516 if(likely(io->filename)) {
517 ff = procfile_reopen(ff, io->filename, NULL, PROCFILE_FLAG_DEFAULT);
524 ff = procfile_readall(ff);
531 unsigned long i, lines = procfile_lines(ff);
533 if(unlikely(lines < 1)) {
534 error("File '%s' should have 1+ lines.", io->filename);
547 for(i = 0; i < lines ; i++) {
548 char *s = procfile_lineword(ff, i, 1);
549 uint32_t hash = simple_hash(s);
551 if(unlikely(hash == Read_hash && !strcmp(s, "Read")))
552 io->Read += str2ull(procfile_lineword(ff, i, 2));
554 else if(unlikely(hash == Write_hash && !strcmp(s, "Write")))
555 io->Write += str2ull(procfile_lineword(ff, i, 2));
558 else if(unlikely(hash == Sync_hash && !strcmp(s, "Sync")))
559 io->Sync += str2ull(procfile_lineword(ff, i, 2));
561 else if(unlikely(hash == Async_hash && !strcmp(s, "Async")))
562 io->Async += str2ull(procfile_lineword(ff, i, 2));
564 else if(unlikely(hash == Total_hash && !strcmp(s, "Total")))
565 io->Total += str2ull(procfile_lineword(ff, i, 2));
571 if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO)) {
572 if(unlikely(io->Read || io->Write))
573 io->enabled = CONFIG_BOOLEAN_YES;
575 io->delay_counter = cgroup_recheck_zero_blkio_every_iterations;
580 static inline void cgroup_read_memory(struct memory *mem) {
581 static procfile *ff = NULL;
583 // read detailed ram usage
584 if(likely(mem->filename_detailed)) {
585 if(unlikely(mem->enabled_detailed == CONFIG_BOOLEAN_AUTO && mem->delay_counter_detailed > 0)) {
586 mem->delay_counter_detailed--;
590 ff = procfile_reopen(ff, mem->filename_detailed, NULL, PROCFILE_FLAG_DEFAULT);
592 mem->updated_detailed = 0;
597 ff = procfile_readall(ff);
599 mem->updated_detailed = 0;
604 unsigned long i, lines = procfile_lines(ff);
606 if(unlikely(lines < 1)) {
607 error("File '%s' should have 1+ lines.", mem->filename_detailed);
608 mem->updated_detailed = 0;
612 if(unlikely(!mem->arl_base)) {
613 mem->arl_base = arl_create("cgroup/memory", NULL, 60);
615 arl_expect(mem->arl_base, "cache", &mem->cache);
616 arl_expect(mem->arl_base, "rss", &mem->rss);
617 arl_expect(mem->arl_base, "rss_huge", &mem->rss_huge);
618 arl_expect(mem->arl_base, "mapped_file", &mem->mapped_file);
619 arl_expect(mem->arl_base, "writeback", &mem->writeback);
620 mem->arl_dirty = arl_expect(mem->arl_base, "dirty", &mem->dirty);
621 mem->arl_swap = arl_expect(mem->arl_base, "swap", &mem->swap);
622 arl_expect(mem->arl_base, "pgpgin", &mem->pgpgin);
623 arl_expect(mem->arl_base, "pgpgout", &mem->pgpgout);
624 arl_expect(mem->arl_base, "pgfault", &mem->pgfault);
625 arl_expect(mem->arl_base, "pgmajfault", &mem->pgmajfault);
628 arl_begin(mem->arl_base);
630 for(i = 0; i < lines ; i++) {
631 if(arl_check(mem->arl_base,
632 procfile_lineword(ff, i, 0),
633 procfile_lineword(ff, i, 1))) break;
636 if(unlikely(mem->arl_dirty->flags & ARL_ENTRY_FLAG_FOUND))
637 mem->detailed_has_dirty = 1;
639 if(unlikely(mem->arl_swap->flags & ARL_ENTRY_FLAG_FOUND))
640 mem->detailed_has_swap = 1;
642 // fprintf(stderr, "READ: '%s', cache: %llu, rss: %llu, rss_huge: %llu, mapped_file: %llu, writeback: %llu, dirty: %llu, swap: %llu, pgpgin: %llu, pgpgout: %llu, pgfault: %llu, pgmajfault: %llu, inactive_anon: %llu, active_anon: %llu, inactive_file: %llu, active_file: %llu, unevictable: %llu, hierarchical_memory_limit: %llu, total_cache: %llu, total_rss: %llu, total_rss_huge: %llu, total_mapped_file: %llu, total_writeback: %llu, total_dirty: %llu, total_swap: %llu, total_pgpgin: %llu, total_pgpgout: %llu, total_pgfault: %llu, total_pgmajfault: %llu, total_inactive_anon: %llu, total_active_anon: %llu, total_inactive_file: %llu, total_active_file: %llu, total_unevictable: %llu\n", mem->filename, mem->cache, mem->rss, mem->rss_huge, mem->mapped_file, mem->writeback, mem->dirty, mem->swap, mem->pgpgin, mem->pgpgout, mem->pgfault, mem->pgmajfault, mem->inactive_anon, mem->active_anon, mem->inactive_file, mem->active_file, mem->unevictable, mem->hierarchical_memory_limit, mem->total_cache, mem->total_rss, mem->total_rss_huge, mem->total_mapped_file, mem->total_writeback, mem->total_dirty, mem->total_swap, mem->total_pgpgin, mem->total_pgpgout, mem->total_pgfault, mem->total_pgmajfault, mem->total_inactive_anon, mem->total_active_anon, mem->total_inactive_file, mem->total_active_file, mem->total_unevictable);
644 mem->updated_detailed = 1;
646 if(unlikely(mem->enabled_detailed == CONFIG_BOOLEAN_AUTO)) {
647 if(mem->cache || mem->dirty || mem->rss || mem->rss_huge || mem->mapped_file || mem->writeback || mem->swap || mem->pgpgin || mem->pgpgout || mem->pgfault || mem->pgmajfault)
648 mem->enabled_detailed = CONFIG_BOOLEAN_YES;
650 mem->delay_counter_detailed = cgroup_recheck_zero_mem_detailed_every_iterations;
656 // read usage_in_bytes
657 if(likely(mem->filename_usage_in_bytes)) {
658 mem->updated_usage_in_bytes = !read_single_number_file(mem->filename_usage_in_bytes, &mem->usage_in_bytes);
659 if(unlikely(mem->updated_usage_in_bytes && mem->enabled_usage_in_bytes == CONFIG_BOOLEAN_AUTO && mem->usage_in_bytes))
660 mem->enabled_usage_in_bytes = CONFIG_BOOLEAN_YES;
663 // read msw_usage_in_bytes
664 if(likely(mem->filename_msw_usage_in_bytes)) {
665 mem->updated_msw_usage_in_bytes = !read_single_number_file(mem->filename_msw_usage_in_bytes, &mem->msw_usage_in_bytes);
666 if(unlikely(mem->updated_msw_usage_in_bytes && mem->enabled_msw_usage_in_bytes == CONFIG_BOOLEAN_AUTO && mem->msw_usage_in_bytes))
667 mem->enabled_msw_usage_in_bytes = CONFIG_BOOLEAN_YES;
671 if(likely(mem->filename_failcnt)) {
672 if(unlikely(mem->enabled_failcnt == CONFIG_BOOLEAN_AUTO && mem->delay_counter_failcnt > 0)) {
673 mem->updated_failcnt = 0;
674 mem->delay_counter_failcnt--;
677 mem->updated_failcnt = !read_single_number_file(mem->filename_failcnt, &mem->failcnt);
678 if(unlikely(mem->updated_failcnt && mem->enabled_failcnt == CONFIG_BOOLEAN_AUTO)) {
679 if(unlikely(!mem->failcnt))
680 mem->delay_counter_failcnt = cgroup_recheck_zero_mem_failcnt_every_iterations;
682 mem->enabled_failcnt = CONFIG_BOOLEAN_YES;
688 static inline void cgroup_read(struct cgroup *cg) {
689 debug(D_CGROUP, "reading metrics for cgroups '%s'", cg->id);
691 cgroup_read_cpuacct_stat(&cg->cpuacct_stat);
692 cgroup_read_cpuacct_usage(&cg->cpuacct_usage);
693 cgroup_read_memory(&cg->memory);
694 cgroup_read_blkio(&cg->io_service_bytes);
695 cgroup_read_blkio(&cg->io_serviced);
696 cgroup_read_blkio(&cg->throttle_io_service_bytes);
697 cgroup_read_blkio(&cg->throttle_io_serviced);
698 cgroup_read_blkio(&cg->io_merged);
699 cgroup_read_blkio(&cg->io_queued);
702 static inline void read_all_cgroups(struct cgroup *root) {
703 debug(D_CGROUP, "reading metrics for all cgroups");
707 for(cg = root; cg ; cg = cg->next)
708 if(cg->enabled && cg->available)
712 // ----------------------------------------------------------------------------
713 // add/remove/find cgroup objects
715 #define CGROUP_CHARTID_LINE_MAX 1024
717 static inline char *cgroup_title_strdupz(const char *s) {
718 if(!s || !*s) s = "/";
720 if(*s == '/' && s[1] != '\0') s++;
722 char *r = strdupz(s);
723 netdata_fix_chart_name(r);
728 static inline char *cgroup_chart_id_strdupz(const char *s) {
729 if(!s || !*s) s = "/";
731 if(*s == '/' && s[1] != '\0') s++;
733 char *r = strdupz(s);
734 netdata_fix_chart_id(r);
739 static inline void cgroup_get_chart_name(struct cgroup *cg) {
740 debug(D_CGROUP, "looking for the name of cgroup '%s' with chart id '%s' and title '%s'", cg->id, cg->chart_id, cg->chart_title);
743 char buffer[CGROUP_CHARTID_LINE_MAX + 1];
745 snprintfz(buffer, CGROUP_CHARTID_LINE_MAX, "exec %s '%s'", cgroups_rename_script, cg->chart_id);
747 debug(D_CGROUP, "executing command '%s' for cgroup '%s'", buffer, cg->id);
748 FILE *fp = mypopen(buffer, &cgroup_pid);
750 // debug(D_CGROUP, "reading from command '%s' for cgroup '%s'", buffer, cg->id);
751 char *s = fgets(buffer, CGROUP_CHARTID_LINE_MAX, fp);
752 // debug(D_CGROUP, "closing command for cgroup '%s'", cg->id);
753 mypclose(fp, cgroup_pid);
754 // debug(D_CGROUP, "closed command for cgroup '%s'", cg->id);
756 if(s && *s && *s != '\n') {
757 debug(D_CGROUP, "cgroup '%s' should be renamed to '%s'", cg->id, s);
761 freez(cg->chart_title);
762 cg->chart_title = cgroup_title_strdupz(s);
765 cg->chart_id = cgroup_chart_id_strdupz(s);
766 cg->hash_chart = simple_hash(cg->chart_id);
770 error("CGROUP: Cannot popen(\"%s\", \"r\").", buffer);
773 static inline struct cgroup *cgroup_add(const char *id) {
774 if(!id || !*id) id = "/";
775 debug(D_CGROUP, "adding to list, cgroup with id '%s'", id);
777 if(cgroup_root_count >= cgroup_root_max) {
778 info("Maximum number of cgroups reached (%d). Not adding cgroup '%s'", cgroup_root_count, id);
782 int def = simple_pattern_matches(enabled_cgroup_patterns, id)?cgroup_enable_new_cgroups_detected_at_runtime:0;
783 struct cgroup *cg = callocz(1, sizeof(struct cgroup));
785 cg->id = strdupz(id);
786 cg->hash = simple_hash(cg->id);
788 cg->chart_title = cgroup_title_strdupz(id);
790 cg->chart_id = cgroup_chart_id_strdupz(id);
791 cg->hash_chart = simple_hash(cg->chart_id);
798 for(e = cgroup_root; e->next ;e = e->next) ;
804 // fix the chart_id and title by calling the external script
805 if(simple_pattern_matches(enabled_cgroup_renames, cg->id)) {
807 cgroup_get_chart_name(cg);
809 debug(D_CGROUP, "cgroup '%s' renamed to '%s' (title: '%s')", cg->id, cg->chart_id, cg->chart_title);
812 debug(D_CGROUP, "cgroup '%s' will not be renamed - it matches the list of disabled cgroup renames (will be shown as '%s')", cg->id, cg->chart_id);
814 int user_configurable = 1;
816 // check if this cgroup should be a systemd service
817 if(cgroup_enable_systemd_services) {
818 if(simple_pattern_matches(systemd_services_cgroups, cg->id) ||
819 simple_pattern_matches(systemd_services_cgroups, cg->chart_id)) {
820 debug(D_CGROUP, "cgroup '%s' with chart id '%s' (title: '%s') matches systemd services cgroups", cg->id, cg->chart_id, cg->chart_title);
822 char buffer[CGROUP_CHARTID_LINE_MAX + 1];
823 cg->options |= CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE;
825 strncpy(buffer, cg->id, CGROUP_CHARTID_LINE_MAX);
828 //freez(cg->chart_id);
829 //cg->chart_id = cgroup_chart_id_strdupz(s);
830 //cg->hash_chart = simple_hash(cg->chart_id);
832 // skip to the last slash
833 size_t len = strlen(s);
834 while(len--) if(unlikely(s[len] == '/')) break;
835 if(len) s = &s[len + 1];
839 while(len--) if(unlikely(s[len] == '.')) break;
840 if(len) s[len] = '\0';
842 freez(cg->chart_title);
843 cg->chart_title = cgroup_title_strdupz(s);
846 user_configurable = 0;
848 debug(D_CGROUP, "cgroup '%s' renamed to '%s' (title: '%s')", cg->id, cg->chart_id, cg->chart_title);
851 debug(D_CGROUP, "cgroup '%s' with chart id '%s' (title: '%s') does not match systemd services groups", cg->id, cg->chart_id, cg->chart_title);
854 if(user_configurable) {
855 // allow the user to enable/disable this individualy
856 char option[FILENAME_MAX + 1];
857 snprintfz(option, FILENAME_MAX, "enable cgroup %s", cg->chart_title);
858 cg->enabled = (char) config_get_boolean("plugin:cgroups", option, def);
861 // detect duplicate cgroups
864 for (t = cgroup_root; t; t = t->next) {
865 if (t != cg && t->enabled && t->hash_chart == cg->hash_chart && !strcmp(t->chart_id, cg->chart_id)) {
866 if (!strncmp(t->chart_id, "/system.slice/", 14) && !strncmp(cg->chart_id, "/init.scope/system.slice/", 25)) {
867 error("Control group with chart id '%s' already exists with id '%s' and is enabled. Swapping them by enabling cgroup with id '%s' and disabling cgroup with id '%s'.",
868 cg->chart_id, t->id, cg->id, t->id);
869 debug(D_CGROUP, "Control group with chart id '%s' already exists with id '%s' and is enabled. Swapping them by enabling cgroup with id '%s' and disabling cgroup with id '%s'.",
870 cg->chart_id, t->id, cg->id, t->id);
872 t->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE;
875 error("Control group with chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.",
876 cg->chart_id, t->id, cg->id);
877 debug(D_CGROUP, "Control group with chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.",
878 cg->chart_id, t->id, cg->id);
880 cg->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE;
888 debug(D_CGROUP, "ADDED CGROUP: '%s' with chart id '%s' and title '%s' as %s (default was %s)", cg->id, cg->chart_id, cg->chart_title, (cg->enabled)?"enabled":"disabled", (def)?"enabled":"disabled");
893 static inline void cgroup_free(struct cgroup *cg) {
894 debug(D_CGROUP, "Removing cgroup '%s' with chart id '%s' (was %s and %s)", cg->id, cg->chart_id, (cg->enabled)?"enabled":"disabled", (cg->available)?"available":"not available");
896 if(cg->st_cpu) rrdset_flag_set(cg->st_cpu, RRDSET_FLAG_OBSOLETE);
897 if(cg->st_cpu_per_core) rrdset_flag_set(cg->st_cpu_per_core, RRDSET_FLAG_OBSOLETE);
898 if(cg->st_mem) rrdset_flag_set(cg->st_mem, RRDSET_FLAG_OBSOLETE);
899 if(cg->st_writeback) rrdset_flag_set(cg->st_writeback, RRDSET_FLAG_OBSOLETE);
900 if(cg->st_mem_activity) rrdset_flag_set(cg->st_mem_activity, RRDSET_FLAG_OBSOLETE);
901 if(cg->st_pgfaults) rrdset_flag_set(cg->st_pgfaults, RRDSET_FLAG_OBSOLETE);
902 if(cg->st_mem_usage) rrdset_flag_set(cg->st_mem_usage, RRDSET_FLAG_OBSOLETE);
903 if(cg->st_mem_failcnt) rrdset_flag_set(cg->st_mem_failcnt, RRDSET_FLAG_OBSOLETE);
904 if(cg->st_io) rrdset_flag_set(cg->st_io, RRDSET_FLAG_OBSOLETE);
905 if(cg->st_serviced_ops) rrdset_flag_set(cg->st_serviced_ops, RRDSET_FLAG_OBSOLETE);
906 if(cg->st_throttle_io) rrdset_flag_set(cg->st_throttle_io, RRDSET_FLAG_OBSOLETE);
907 if(cg->st_throttle_serviced_ops) rrdset_flag_set(cg->st_throttle_serviced_ops, RRDSET_FLAG_OBSOLETE);
908 if(cg->st_queued_ops) rrdset_flag_set(cg->st_queued_ops, RRDSET_FLAG_OBSOLETE);
909 if(cg->st_merged_ops) rrdset_flag_set(cg->st_merged_ops, RRDSET_FLAG_OBSOLETE);
911 freez(cg->cpuacct_usage.cpu_percpu);
913 freez(cg->cpuacct_stat.filename);
914 freez(cg->cpuacct_usage.filename);
916 arl_free(cg->memory.arl_base);
917 freez(cg->memory.filename_detailed);
918 freez(cg->memory.filename_failcnt);
919 freez(cg->memory.filename_usage_in_bytes);
920 freez(cg->memory.filename_msw_usage_in_bytes);
922 freez(cg->io_service_bytes.filename);
923 freez(cg->io_serviced.filename);
925 freez(cg->throttle_io_service_bytes.filename);
926 freez(cg->throttle_io_serviced.filename);
928 freez(cg->io_merged.filename);
929 freez(cg->io_queued.filename);
933 freez(cg->chart_title);
940 // find if a given cgroup exists
941 static inline struct cgroup *cgroup_find(const char *id) {
942 debug(D_CGROUP, "searching for cgroup '%s'", id);
944 uint32_t hash = simple_hash(id);
947 for(cg = cgroup_root; cg ; cg = cg->next) {
948 if(hash == cg->hash && strcmp(id, cg->id) == 0)
952 debug(D_CGROUP, "cgroup '%s' %s in memory", id, (cg)?"found":"not found");
956 // ----------------------------------------------------------------------------
957 // detect running cgroups
959 // callback for find_file_in_subdirs()
960 static inline void found_subdir_in_dir(const char *dir) {
961 debug(D_CGROUP, "examining cgroup dir '%s'", dir);
963 struct cgroup *cg = cgroup_find(dir);
965 if(*dir && cgroup_max_depth > 0) {
969 for(s = dir; *s ;s++)
970 if(unlikely(*s == '/'))
973 if(depth > cgroup_max_depth) {
974 info("cgroup '%s' is too deep (%d, while max is %d)", dir, depth, cgroup_max_depth);
978 // debug(D_CGROUP, "will add dir '%s' as cgroup", dir);
979 cg = cgroup_add(dir);
982 if(cg) cg->available = 1;
985 static inline int find_dir_in_subdirs(const char *base, const char *this, void (*callback)(const char *)) {
986 if(!this) this = base;
987 debug(D_CGROUP, "searching for directories in '%s' (base '%s')", this?this:"", base);
989 size_t dirlen = strlen(this), baselen = strlen(base);
994 const char *relative_path = &this[baselen];
995 if(!*relative_path) relative_path = "/";
997 DIR *dir = opendir(this);
999 error("Cannot read cgroups directory '%s'", base);
1004 callback(relative_path);
1006 struct dirent *de = NULL;
1007 while((de = readdir(dir))) {
1008 if(de->d_type == DT_DIR
1010 (de->d_name[0] == '.' && de->d_name[1] == '\0')
1011 || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
1015 if(de->d_type == DT_DIR) {
1017 const char *r = relative_path;
1018 if(*r == '\0') r = "/";
1020 // do not decent in directories we are not interested
1021 int def = simple_pattern_matches(enabled_cgroup_paths, r);
1023 // we check for this option here
1024 // so that the config will not have settings
1025 // for leaf directories
1026 char option[FILENAME_MAX + 1];
1027 snprintfz(option, FILENAME_MAX, "search for cgroups under %s", r);
1028 option[FILENAME_MAX] = '\0';
1029 enabled = config_get_boolean("plugin:cgroups", option, def);
1033 char *s = mallocz(dirlen + strlen(de->d_name) + 2);
1036 strcat(s, de->d_name);
1037 int ret2 = find_dir_in_subdirs(base, s, callback);
1038 if(ret2 > 0) ret += ret2;
1048 static inline void mark_all_cgroups_as_not_available() {
1049 debug(D_CGROUP, "marking all cgroups as not available");
1053 // mark all as not available
1054 for(cg = cgroup_root; cg ; cg = cg->next) {
1059 static inline void cleanup_all_cgroups() {
1060 struct cgroup *cg = cgroup_root, *last = NULL;
1063 if(!cg->available) {
1064 // enable the first duplicate cgroup
1067 for(t = cgroup_root; t ; t = t->next) {
1068 if(t != cg && t->available && !t->enabled && t->options & CGROUP_OPTIONS_DISABLED_DUPLICATE && t->hash_chart == cg->hash_chart && !strcmp(t->chart_id, cg->chart_id)) {
1069 debug(D_CGROUP, "Enabling duplicate of cgroup '%s' with id '%s', because the original with id '%s' stopped.", t->chart_id, t->id, cg->id);
1071 t->options &= ~CGROUP_OPTIONS_DISABLED_DUPLICATE;
1078 cgroup_root = cg->next;
1080 last->next = cg->next;
1096 static inline void find_all_cgroups() {
1097 debug(D_CGROUP, "searching for cgroups");
1099 mark_all_cgroups_as_not_available();
1101 if(cgroup_enable_cpuacct_stat || cgroup_enable_cpuacct_usage) {
1102 if(find_dir_in_subdirs(cgroup_cpuacct_base, NULL, found_subdir_in_dir) == -1) {
1103 cgroup_enable_cpuacct_stat =
1104 cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_NO;
1105 error("disabled CGROUP cpu statistics.");
1109 if(cgroup_enable_blkio_io || cgroup_enable_blkio_ops || cgroup_enable_blkio_throttle_io || cgroup_enable_blkio_throttle_ops || cgroup_enable_blkio_merged_ops || cgroup_enable_blkio_queued_ops) {
1110 if(find_dir_in_subdirs(cgroup_blkio_base, NULL, found_subdir_in_dir) == -1) {
1111 cgroup_enable_blkio_io =
1112 cgroup_enable_blkio_ops =
1113 cgroup_enable_blkio_throttle_io =
1114 cgroup_enable_blkio_throttle_ops =
1115 cgroup_enable_blkio_merged_ops =
1116 cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_NO;
1117 error("disabled CGROUP blkio statistics.");
1121 if(cgroup_enable_memory || cgroup_enable_detailed_memory || cgroup_enable_swap || cgroup_enable_memory_failcnt) {
1122 if(find_dir_in_subdirs(cgroup_memory_base, NULL, found_subdir_in_dir) == -1) {
1123 cgroup_enable_memory =
1124 cgroup_enable_detailed_memory =
1125 cgroup_enable_swap =
1126 cgroup_enable_memory_failcnt = CONFIG_BOOLEAN_NO;
1127 error("disabled CGROUP memory statistics.");
1131 if(cgroup_search_in_devices) {
1132 if(find_dir_in_subdirs(cgroup_devices_base, NULL, found_subdir_in_dir) == -1) {
1133 cgroup_search_in_devices = 0;
1134 error("disabled CGROUP devices statistics.");
1138 // remove any non-existing cgroups
1139 cleanup_all_cgroups();
1143 for(cg = cgroup_root; cg ; cg = cg->next) {
1144 // fprintf(stderr, " >>> CGROUP '%s' (%u - %s) with name '%s'\n", cg->id, cg->hash, cg->available?"available":"stopped", cg->name);
1146 if(unlikely(!cg->available))
1149 debug(D_CGROUP, "checking paths for cgroup '%s'", cg->id);
1151 // check for newly added cgroups
1152 // and update the filenames they read
1153 char filename[FILENAME_MAX + 1];
1154 if(unlikely(cgroup_enable_cpuacct_stat && !cg->cpuacct_stat.filename)) {
1155 snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.stat", cgroup_cpuacct_base, cg->id);
1156 if(likely(stat(filename, &buf) != -1)) {
1157 cg->cpuacct_stat.filename = strdupz(filename);
1158 cg->cpuacct_stat.enabled = cgroup_enable_cpuacct_stat;
1159 debug(D_CGROUP, "cpuacct.stat filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_stat.filename);
1162 debug(D_CGROUP, "cpuacct.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename);
1165 if(unlikely(cgroup_enable_cpuacct_usage && !cg->cpuacct_usage.filename && !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE))) {
1166 snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.usage_percpu", cgroup_cpuacct_base, cg->id);
1167 if(likely(stat(filename, &buf) != -1)) {
1168 cg->cpuacct_usage.filename = strdupz(filename);
1169 cg->cpuacct_usage.enabled = cgroup_enable_cpuacct_usage;
1170 debug(D_CGROUP, "cpuacct.usage_percpu filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_usage.filename);
1173 debug(D_CGROUP, "cpuacct.usage_percpu file for cgroup '%s': '%s' does not exist.", cg->id, filename);
1176 if(unlikely((cgroup_enable_detailed_memory || cgroup_used_memory_without_cache) && !cg->memory.filename_detailed && (cgroup_used_memory_without_cache || cgroup_enable_systemd_services_detailed_memory || !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)))) {
1177 snprintfz(filename, FILENAME_MAX, "%s%s/memory.stat", cgroup_memory_base, cg->id);
1178 if(likely(stat(filename, &buf) != -1)) {
1179 cg->memory.filename_detailed = strdupz(filename);
1180 cg->memory.enabled_detailed = (cgroup_enable_detailed_memory == CONFIG_BOOLEAN_YES)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_AUTO;
1181 debug(D_CGROUP, "memory.stat filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_detailed);
1184 debug(D_CGROUP, "memory.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename);
1187 if(unlikely(cgroup_enable_memory && !cg->memory.filename_usage_in_bytes)) {
1188 snprintfz(filename, FILENAME_MAX, "%s%s/memory.usage_in_bytes", cgroup_memory_base, cg->id);
1189 if(likely(stat(filename, &buf) != -1)) {
1190 cg->memory.filename_usage_in_bytes = strdupz(filename);
1191 cg->memory.enabled_usage_in_bytes = cgroup_enable_memory;
1192 debug(D_CGROUP, "memory.usage_in_bytes filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_usage_in_bytes);
1195 debug(D_CGROUP, "memory.usage_in_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename);
1198 if(unlikely(cgroup_enable_swap && !cg->memory.filename_msw_usage_in_bytes)) {
1199 snprintfz(filename, FILENAME_MAX, "%s%s/memory.msw_usage_in_bytes", cgroup_memory_base, cg->id);
1200 if(likely(stat(filename, &buf) != -1)) {
1201 cg->memory.filename_msw_usage_in_bytes = strdupz(filename);
1202 cg->memory.enabled_msw_usage_in_bytes = cgroup_enable_swap;
1203 debug(D_CGROUP, "memory.msw_usage_in_bytes filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_msw_usage_in_bytes);
1206 debug(D_CGROUP, "memory.msw_usage_in_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename);
1209 if(unlikely(cgroup_enable_memory_failcnt && !cg->memory.filename_failcnt)) {
1210 snprintfz(filename, FILENAME_MAX, "%s%s/memory.failcnt", cgroup_memory_base, cg->id);
1211 if(likely(stat(filename, &buf) != -1)) {
1212 cg->memory.filename_failcnt = strdupz(filename);
1213 cg->memory.enabled_failcnt = cgroup_enable_memory_failcnt;
1214 debug(D_CGROUP, "memory.failcnt filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_failcnt);
1217 debug(D_CGROUP, "memory.failcnt file for cgroup '%s': '%s' does not exist.", cg->id, filename);
1220 if(unlikely(cgroup_enable_blkio_io && !cg->io_service_bytes.filename)) {
1221 snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_service_bytes", cgroup_blkio_base, cg->id);
1222 if(likely(stat(filename, &buf) != -1)) {
1223 cg->io_service_bytes.filename = strdupz(filename);
1224 cg->io_service_bytes.enabled = cgroup_enable_blkio_io;
1225 debug(D_CGROUP, "io_service_bytes filename for cgroup '%s': '%s'", cg->id, cg->io_service_bytes.filename);
1228 debug(D_CGROUP, "io_service_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename);
1231 if(unlikely(cgroup_enable_blkio_ops && !cg->io_serviced.filename)) {
1232 snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_serviced", cgroup_blkio_base, cg->id);
1233 if(likely(stat(filename, &buf) != -1)) {
1234 cg->io_serviced.filename = strdupz(filename);
1235 cg->io_serviced.enabled = cgroup_enable_blkio_ops;
1236 debug(D_CGROUP, "io_serviced filename for cgroup '%s': '%s'", cg->id, cg->io_serviced.filename);
1239 debug(D_CGROUP, "io_serviced file for cgroup '%s': '%s' does not exist.", cg->id, filename);
1242 if(unlikely(cgroup_enable_blkio_throttle_io && !cg->throttle_io_service_bytes.filename)) {
1243 snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_service_bytes", cgroup_blkio_base, cg->id);
1244 if(likely(stat(filename, &buf) != -1)) {
1245 cg->throttle_io_service_bytes.filename = strdupz(filename);
1246 cg->throttle_io_service_bytes.enabled = cgroup_enable_blkio_throttle_io;
1247 debug(D_CGROUP, "throttle_io_service_bytes filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_service_bytes.filename);
1250 debug(D_CGROUP, "throttle_io_service_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename);
1253 if(unlikely(cgroup_enable_blkio_throttle_ops && !cg->throttle_io_serviced.filename)) {
1254 snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_serviced", cgroup_blkio_base, cg->id);
1255 if(likely(stat(filename, &buf) != -1)) {
1256 cg->throttle_io_serviced.filename = strdupz(filename);
1257 cg->throttle_io_serviced.enabled = cgroup_enable_blkio_throttle_ops;
1258 debug(D_CGROUP, "throttle_io_serviced filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_serviced.filename);
1261 debug(D_CGROUP, "throttle_io_serviced file for cgroup '%s': '%s' does not exist.", cg->id, filename);
1264 if(unlikely(cgroup_enable_blkio_merged_ops && !cg->io_merged.filename)) {
1265 snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_merged", cgroup_blkio_base, cg->id);
1266 if(likely(stat(filename, &buf) != -1)) {
1267 cg->io_merged.filename = strdupz(filename);
1268 cg->io_merged.enabled = cgroup_enable_blkio_merged_ops;
1269 debug(D_CGROUP, "io_merged filename for cgroup '%s': '%s'", cg->id, cg->io_merged.filename);
1272 debug(D_CGROUP, "io_merged file for cgroup '%s': '%s' does not exist.", cg->id, filename);
1275 if(unlikely(cgroup_enable_blkio_queued_ops && !cg->io_queued.filename)) {
1276 snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_queued", cgroup_blkio_base, cg->id);
1277 if(likely(stat(filename, &buf) != -1)) {
1278 cg->io_queued.filename = strdupz(filename);
1279 cg->io_queued.enabled = cgroup_enable_blkio_queued_ops;
1280 debug(D_CGROUP, "io_queued filename for cgroup '%s': '%s'", cg->id, cg->io_queued.filename);
1283 debug(D_CGROUP, "io_queued file for cgroup '%s': '%s' does not exist.", cg->id, filename);
1287 debug(D_CGROUP, "done searching for cgroups");
1291 // ----------------------------------------------------------------------------
1294 #define CHART_TITLE_MAX 300
1296 void update_systemd_services_charts(
1300 , int do_mem_detailed
1301 , int do_mem_failcnt
1305 , int do_throttle_io
1306 , int do_throttle_ops
1312 *st_mem_usage = NULL,
1313 *st_mem_failcnt = NULL,
1314 *st_swap_usage = NULL,
1316 *st_mem_detailed_cache = NULL,
1317 *st_mem_detailed_rss = NULL,
1318 *st_mem_detailed_mapped = NULL,
1319 *st_mem_detailed_writeback = NULL,
1320 *st_mem_detailed_pgfault = NULL,
1321 *st_mem_detailed_pgmajfault = NULL,
1322 *st_mem_detailed_pgpgin = NULL,
1323 *st_mem_detailed_pgpgout = NULL,
1326 *st_io_serviced_read = NULL,
1327 *st_throttle_io_read = NULL,
1328 *st_throttle_ops_read = NULL,
1329 *st_queued_ops_read = NULL,
1330 *st_merged_ops_read = NULL,
1332 *st_io_write = NULL,
1333 *st_io_serviced_write = NULL,
1334 *st_throttle_io_write = NULL,
1335 *st_throttle_ops_write = NULL,
1336 *st_queued_ops_write = NULL,
1337 *st_merged_ops_write = NULL;
1339 // create the charts
1341 if(likely(do_cpu)) {
1342 if(unlikely(!st_cpu)) {
1343 char title[CHART_TITLE_MAX + 1];
1344 snprintfz(title, CHART_TITLE_MAX, "Systemd Services CPU utilization (%d%% = %d core%s)", (processors * 100), processors, (processors > 1) ? "s" : "");
1346 st_cpu = rrdset_create_localhost(
1354 , CHART_PRIORITY_SYSTEMD_SERVICES
1356 , RRDSET_TYPE_STACKED
1361 rrdset_next(st_cpu);
1364 if(likely(do_mem_usage)) {
1365 if(unlikely(!st_mem_usage)) {
1367 st_mem_usage = rrdset_create_localhost(
1372 , "services.mem_usage"
1373 , (cgroup_used_memory_without_cache) ? "Systemd Services Used Memory without Cache"
1374 : "Systemd Services Used Memory"
1376 , CHART_PRIORITY_SYSTEMD_SERVICES + 10
1378 , RRDSET_TYPE_STACKED
1383 rrdset_next(st_mem_usage);
1386 if(likely(do_mem_detailed)) {
1387 if(unlikely(!st_mem_detailed_rss)) {
1389 st_mem_detailed_rss = rrdset_create_localhost(
1394 , "services.mem_rss"
1395 , "Systemd Services RSS Memory"
1397 , CHART_PRIORITY_SYSTEMD_SERVICES + 20
1399 , RRDSET_TYPE_STACKED
1404 rrdset_next(st_mem_detailed_rss);
1406 if(unlikely(!st_mem_detailed_mapped)) {
1408 st_mem_detailed_mapped = rrdset_create_localhost(
1413 , "services.mem_mapped"
1414 , "Systemd Services Mapped Memory"
1416 , CHART_PRIORITY_SYSTEMD_SERVICES + 30
1418 , RRDSET_TYPE_STACKED
1423 rrdset_next(st_mem_detailed_mapped);
1425 if(unlikely(!st_mem_detailed_cache)) {
1427 st_mem_detailed_cache = rrdset_create_localhost(
1432 , "services.mem_cache"
1433 , "Systemd Services Cache Memory"
1435 , CHART_PRIORITY_SYSTEMD_SERVICES + 40
1437 , RRDSET_TYPE_STACKED
1442 rrdset_next(st_mem_detailed_cache);
1444 if(unlikely(!st_mem_detailed_writeback)) {
1446 st_mem_detailed_writeback = rrdset_create_localhost(
1451 , "services.mem_writeback"
1452 , "Systemd Services Writeback Memory"
1454 , CHART_PRIORITY_SYSTEMD_SERVICES + 50
1456 , RRDSET_TYPE_STACKED
1461 rrdset_next(st_mem_detailed_writeback);
1463 if(unlikely(!st_mem_detailed_pgfault)) {
1465 st_mem_detailed_pgfault = rrdset_create_localhost(
1470 , "services.mem_pgfault"
1471 , "Systemd Services Memory Minor Page Faults"
1473 , CHART_PRIORITY_SYSTEMD_SERVICES + 60
1475 , RRDSET_TYPE_STACKED
1479 rrdset_next(st_mem_detailed_pgfault);
1481 if(unlikely(!st_mem_detailed_pgmajfault)) {
1483 st_mem_detailed_pgmajfault = rrdset_create_localhost(
1488 , "services.mem_pgmajfault"
1489 , "Systemd Services Memory Major Page Faults"
1491 , CHART_PRIORITY_SYSTEMD_SERVICES + 70
1493 , RRDSET_TYPE_STACKED
1498 rrdset_next(st_mem_detailed_pgmajfault);
1500 if(unlikely(!st_mem_detailed_pgpgin)) {
1502 st_mem_detailed_pgpgin = rrdset_create_localhost(
1507 , "services.mem_pgpgin"
1508 , "Systemd Services Memory Charging Activity"
1510 , CHART_PRIORITY_SYSTEMD_SERVICES + 80
1512 , RRDSET_TYPE_STACKED
1517 rrdset_next(st_mem_detailed_pgpgin);
1519 if(unlikely(!st_mem_detailed_pgpgout)) {
1521 st_mem_detailed_pgpgout = rrdset_create_localhost(
1526 , "services.mem_pgpgout"
1527 , "Systemd Services Memory Uncharging Activity"
1529 , CHART_PRIORITY_SYSTEMD_SERVICES + 90
1531 , RRDSET_TYPE_STACKED
1536 rrdset_next(st_mem_detailed_pgpgout);
1539 if(likely(do_mem_failcnt)) {
1540 if(unlikely(!st_mem_failcnt)) {
1542 st_mem_failcnt = rrdset_create_localhost(
1547 , "services.mem_failcnt"
1548 , "Systemd Services Memory Limit Failures"
1550 , CHART_PRIORITY_SYSTEMD_SERVICES + 110
1552 , RRDSET_TYPE_STACKED
1557 rrdset_next(st_mem_failcnt);
1560 if(likely(do_swap_usage)) {
1561 if(unlikely(!st_swap_usage)) {
1563 st_swap_usage = rrdset_create_localhost(
1568 , "services.swap_usage"
1569 , "Systemd Services Swap Memory Used"
1571 , CHART_PRIORITY_SYSTEMD_SERVICES + 100
1573 , RRDSET_TYPE_STACKED
1578 rrdset_next(st_swap_usage);
1582 if(unlikely(!st_io_read)) {
1584 st_io_read = rrdset_create_localhost(
1589 , "services.io_read"
1590 , "Systemd Services Disk Read Bandwidth"
1592 , CHART_PRIORITY_SYSTEMD_SERVICES + 120
1594 , RRDSET_TYPE_STACKED
1599 rrdset_next(st_io_read);
1601 if(unlikely(!st_io_write)) {
1603 st_io_write = rrdset_create_localhost(
1608 , "services.io_write"
1609 , "Systemd Services Disk Write Bandwidth"
1611 , CHART_PRIORITY_SYSTEMD_SERVICES + 130
1613 , RRDSET_TYPE_STACKED
1618 rrdset_next(st_io_write);
1621 if(likely(do_io_ops)) {
1622 if(unlikely(!st_io_serviced_read)) {
1624 st_io_serviced_read = rrdset_create_localhost(
1629 , "services.io_ops_read"
1630 , "Systemd Services Disk Read Operations"
1632 , CHART_PRIORITY_SYSTEMD_SERVICES + 140
1634 , RRDSET_TYPE_STACKED
1639 rrdset_next(st_io_serviced_read);
1641 if(unlikely(!st_io_serviced_write)) {
1643 st_io_serviced_write = rrdset_create_localhost(
1648 , "services.io_ops_write"
1649 , "Systemd Services Disk Write Operations"
1651 , CHART_PRIORITY_SYSTEMD_SERVICES + 150
1653 , RRDSET_TYPE_STACKED
1658 rrdset_next(st_io_serviced_write);
1661 if(likely(do_throttle_io)) {
1662 if(unlikely(!st_throttle_io_read)) {
1664 st_throttle_io_read = rrdset_create_localhost(
1666 , "throttle_io_read"
1669 , "services.throttle_io_read"
1670 , "Systemd Services Throttle Disk Read Bandwidth"
1672 , CHART_PRIORITY_SYSTEMD_SERVICES + 160
1674 , RRDSET_TYPE_STACKED
1679 rrdset_next(st_throttle_io_read);
1681 if(unlikely(!st_throttle_io_write)) {
1683 st_throttle_io_write = rrdset_create_localhost(
1685 , "throttle_io_write"
1688 , "services.throttle_io_write"
1689 , "Systemd Services Throttle Disk Write Bandwidth"
1691 , CHART_PRIORITY_SYSTEMD_SERVICES + 170
1693 , RRDSET_TYPE_STACKED
1698 rrdset_next(st_throttle_io_write);
1701 if(likely(do_throttle_ops)) {
1702 if(unlikely(!st_throttle_ops_read)) {
1704 st_throttle_ops_read = rrdset_create_localhost(
1706 , "throttle_io_ops_read"
1709 , "services.throttle_io_ops_read"
1710 , "Systemd Services Throttle Disk Read Operations"
1712 , CHART_PRIORITY_SYSTEMD_SERVICES + 180
1714 , RRDSET_TYPE_STACKED
1719 rrdset_next(st_throttle_ops_read);
1721 if(unlikely(!st_throttle_ops_write)) {
1723 st_throttle_ops_write = rrdset_create_localhost(
1725 , "throttle_io_ops_write"
1728 , "services.throttle_io_ops_write"
1729 , "Systemd Services Throttle Disk Write Operations"
1731 , CHART_PRIORITY_SYSTEMD_SERVICES + 190
1733 , RRDSET_TYPE_STACKED
1738 rrdset_next(st_throttle_ops_write);
1741 if(likely(do_queued_ops)) {
1742 if(unlikely(!st_queued_ops_read)) {
1744 st_queued_ops_read = rrdset_create_localhost(
1746 , "queued_io_ops_read"
1749 , "services.queued_io_ops_read"
1750 , "Systemd Services Queued Disk Read Operations"
1752 , CHART_PRIORITY_SYSTEMD_SERVICES + 200
1754 , RRDSET_TYPE_STACKED
1759 rrdset_next(st_queued_ops_read);
1761 if(unlikely(!st_queued_ops_write)) {
1763 st_queued_ops_write = rrdset_create_localhost(
1765 , "queued_io_ops_write"
1768 , "services.queued_io_ops_write"
1769 , "Systemd Services Queued Disk Write Operations"
1771 , CHART_PRIORITY_SYSTEMD_SERVICES + 210
1773 , RRDSET_TYPE_STACKED
1778 rrdset_next(st_queued_ops_write);
1781 if(likely(do_merged_ops)) {
1782 if(unlikely(!st_merged_ops_read)) {
1784 st_merged_ops_read = rrdset_create_localhost(
1786 , "merged_io_ops_read"
1789 , "services.merged_io_ops_read"
1790 , "Systemd Services Merged Disk Read Operations"
1792 , CHART_PRIORITY_SYSTEMD_SERVICES + 220
1794 , RRDSET_TYPE_STACKED
1799 rrdset_next(st_merged_ops_read);
1801 if(unlikely(!st_merged_ops_write)) {
1803 st_merged_ops_write = rrdset_create_localhost(
1805 , "merged_io_ops_write"
1808 , "services.merged_io_ops_write"
1809 , "Systemd Services Merged Disk Write Operations"
1811 , CHART_PRIORITY_SYSTEMD_SERVICES + 230
1813 , RRDSET_TYPE_STACKED
1818 rrdset_next(st_merged_ops_write);
1821 // update the values
1823 for(cg = cgroup_root; cg ; cg = cg->next) {
1824 if(unlikely(!cg->available || !cg->enabled || !(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)))
1827 if(likely(do_cpu && cg->cpuacct_stat.updated)) {
1828 if(unlikely(!cg->rd_cpu))
1829 cg->rd_cpu = rrddim_add(st_cpu, cg->chart_id, cg->chart_title, 100, hz, RRD_ALGORITHM_INCREMENTAL);
1831 rrddim_set_by_pointer(st_cpu, cg->rd_cpu, cg->cpuacct_stat.user + cg->cpuacct_stat.system);
1834 if(likely(do_mem_usage && cg->memory.updated_usage_in_bytes)) {
1835 if(unlikely(!cg->rd_mem_usage))
1836 cg->rd_mem_usage = rrddim_add(st_mem_usage, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
1838 rrddim_set_by_pointer(st_mem_usage, cg->rd_mem_usage, cg->memory.usage_in_bytes - ((cgroup_used_memory_without_cache)?cg->memory.cache:0));
1841 if(likely(do_mem_detailed && cg->memory.updated_detailed)) {
1842 if(unlikely(!cg->rd_mem_detailed_rss))
1843 cg->rd_mem_detailed_rss = rrddim_add(st_mem_detailed_rss, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
1845 rrddim_set_by_pointer(st_mem_detailed_rss, cg->rd_mem_detailed_rss, cg->memory.rss + cg->memory.rss_huge);
1847 if(unlikely(!cg->rd_mem_detailed_mapped))
1848 cg->rd_mem_detailed_mapped = rrddim_add(st_mem_detailed_mapped, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
1850 rrddim_set_by_pointer(st_mem_detailed_mapped, cg->rd_mem_detailed_mapped, cg->memory.mapped_file);
1852 if(unlikely(!cg->rd_mem_detailed_cache))
1853 cg->rd_mem_detailed_cache = rrddim_add(st_mem_detailed_cache, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
1855 rrddim_set_by_pointer(st_mem_detailed_cache, cg->rd_mem_detailed_cache, cg->memory.cache);
1857 if(unlikely(!cg->rd_mem_detailed_writeback))
1858 cg->rd_mem_detailed_writeback = rrddim_add(st_mem_detailed_writeback, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
1860 rrddim_set_by_pointer(st_mem_detailed_writeback, cg->rd_mem_detailed_writeback, cg->memory.writeback);
1862 if(unlikely(!cg->rd_mem_detailed_pgfault))
1863 cg->rd_mem_detailed_pgfault = rrddim_add(st_mem_detailed_pgfault, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
1865 rrddim_set_by_pointer(st_mem_detailed_pgfault, cg->rd_mem_detailed_pgfault, cg->memory.pgfault);
1867 if(unlikely(!cg->rd_mem_detailed_pgmajfault))
1868 cg->rd_mem_detailed_pgmajfault = rrddim_add(st_mem_detailed_pgmajfault, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
1870 rrddim_set_by_pointer(st_mem_detailed_pgmajfault, cg->rd_mem_detailed_pgmajfault, cg->memory.pgmajfault);
1872 if(unlikely(!cg->rd_mem_detailed_pgpgin))
1873 cg->rd_mem_detailed_pgpgin = rrddim_add(st_mem_detailed_pgpgin, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
1875 rrddim_set_by_pointer(st_mem_detailed_pgpgin, cg->rd_mem_detailed_pgpgin, cg->memory.pgpgin);
1877 if(unlikely(!cg->rd_mem_detailed_pgpgout))
1878 cg->rd_mem_detailed_pgpgout = rrddim_add(st_mem_detailed_pgpgout, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
1880 rrddim_set_by_pointer(st_mem_detailed_pgpgout, cg->rd_mem_detailed_pgpgout, cg->memory.pgpgout);
1883 if(likely(do_mem_failcnt && cg->memory.updated_failcnt)) {
1884 if(unlikely(!cg->rd_mem_failcnt))
1885 cg->rd_mem_failcnt = rrddim_add(st_mem_failcnt, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
1887 rrddim_set_by_pointer(st_mem_failcnt, cg->rd_mem_failcnt, cg->memory.failcnt);
1890 if(likely(do_swap_usage && cg->memory.updated_msw_usage_in_bytes)) {
1891 if(unlikely(!cg->rd_swap_usage))
1892 cg->rd_swap_usage = rrddim_add(st_swap_usage, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
1894 rrddim_set_by_pointer(st_swap_usage, cg->rd_swap_usage, cg->memory.msw_usage_in_bytes);
1897 if(likely(do_io && cg->io_service_bytes.updated)) {
1898 if(unlikely(!cg->rd_io_service_bytes_read))
1899 cg->rd_io_service_bytes_read = rrddim_add(st_io_read, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
1901 rrddim_set_by_pointer(st_io_read, cg->rd_io_service_bytes_read, cg->io_service_bytes.Read);
1903 if(unlikely(!cg->rd_io_service_bytes_write))
1904 cg->rd_io_service_bytes_write = rrddim_add(st_io_write, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
1906 rrddim_set_by_pointer(st_io_write, cg->rd_io_service_bytes_write, cg->io_service_bytes.Write);
1909 if(likely(do_io_ops && cg->io_serviced.updated)) {
1910 if(unlikely(!cg->rd_io_serviced_read))
1911 cg->rd_io_serviced_read = rrddim_add(st_io_serviced_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
1913 rrddim_set_by_pointer(st_io_serviced_read, cg->rd_io_serviced_read, cg->io_serviced.Read);
1915 if(unlikely(!cg->rd_io_serviced_write))
1916 cg->rd_io_serviced_write = rrddim_add(st_io_serviced_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
1918 rrddim_set_by_pointer(st_io_serviced_write, cg->rd_io_serviced_write, cg->io_serviced.Write);
1921 if(likely(do_throttle_io && cg->throttle_io_service_bytes.updated)) {
1922 if(unlikely(!cg->rd_throttle_io_read))
1923 cg->rd_throttle_io_read = rrddim_add(st_throttle_io_read, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
1925 rrddim_set_by_pointer(st_throttle_io_read, cg->rd_throttle_io_read, cg->throttle_io_service_bytes.Read);
1927 if(unlikely(!cg->rd_throttle_io_write))
1928 cg->rd_throttle_io_write = rrddim_add(st_throttle_io_write, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
1930 rrddim_set_by_pointer(st_throttle_io_write, cg->rd_throttle_io_write, cg->throttle_io_service_bytes.Write);
1933 if(likely(do_throttle_ops && cg->throttle_io_serviced.updated)) {
1934 if(unlikely(!cg->rd_throttle_io_serviced_read))
1935 cg->rd_throttle_io_serviced_read = rrddim_add(st_throttle_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
1937 rrddim_set_by_pointer(st_throttle_ops_read, cg->rd_throttle_io_serviced_read, cg->throttle_io_serviced.Read);
1939 if(unlikely(!cg->rd_throttle_io_serviced_write))
1940 cg->rd_throttle_io_serviced_write = rrddim_add(st_throttle_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
1942 rrddim_set_by_pointer(st_throttle_ops_write, cg->rd_throttle_io_serviced_write, cg->throttle_io_serviced.Write);
1945 if(likely(do_queued_ops && cg->io_queued.updated)) {
1946 if(unlikely(!cg->rd_io_queued_read))
1947 cg->rd_io_queued_read = rrddim_add(st_queued_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
1949 rrddim_set_by_pointer(st_queued_ops_read, cg->rd_io_queued_read, cg->io_queued.Read);
1951 if(unlikely(!cg->rd_io_queued_write))
1952 cg->rd_io_queued_write = rrddim_add(st_queued_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
1954 rrddim_set_by_pointer(st_queued_ops_write, cg->rd_io_queued_write, cg->io_queued.Write);
1957 if(likely(do_merged_ops && cg->io_merged.updated)) {
1958 if(unlikely(!cg->rd_io_merged_read))
1959 cg->rd_io_merged_read = rrddim_add(st_merged_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
1961 rrddim_set_by_pointer(st_merged_ops_read, cg->rd_io_merged_read, cg->io_merged.Read);
1963 if(unlikely(!cg->rd_io_merged_write))
1964 cg->rd_io_merged_write = rrddim_add(st_merged_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
1966 rrddim_set_by_pointer(st_merged_ops_write, cg->rd_io_merged_write, cg->io_merged.Write);
1970 // complete the iteration
1972 rrdset_done(st_cpu);
1974 if(likely(do_mem_usage))
1975 rrdset_done(st_mem_usage);
1977 if(unlikely(do_mem_detailed)) {
1978 rrdset_done(st_mem_detailed_cache);
1979 rrdset_done(st_mem_detailed_rss);
1980 rrdset_done(st_mem_detailed_mapped);
1981 rrdset_done(st_mem_detailed_writeback);
1982 rrdset_done(st_mem_detailed_pgfault);
1983 rrdset_done(st_mem_detailed_pgmajfault);
1984 rrdset_done(st_mem_detailed_pgpgin);
1985 rrdset_done(st_mem_detailed_pgpgout);
1988 if(likely(do_mem_failcnt))
1989 rrdset_done(st_mem_failcnt);
1991 if(likely(do_swap_usage))
1992 rrdset_done(st_swap_usage);
1995 rrdset_done(st_io_read);
1996 rrdset_done(st_io_write);
1999 if(likely(do_io_ops)) {
2000 rrdset_done(st_io_serviced_read);
2001 rrdset_done(st_io_serviced_write);
2004 if(likely(do_throttle_io)) {
2005 rrdset_done(st_throttle_io_read);
2006 rrdset_done(st_throttle_io_write);
2009 if(likely(do_throttle_ops)) {
2010 rrdset_done(st_throttle_ops_read);
2011 rrdset_done(st_throttle_ops_write);
2014 if(likely(do_queued_ops)) {
2015 rrdset_done(st_queued_ops_read);
2016 rrdset_done(st_queued_ops_write);
2019 if(likely(do_merged_ops)) {
2020 rrdset_done(st_merged_ops_read);
2021 rrdset_done(st_merged_ops_write);
2025 static inline char *cgroup_chart_type(char *buffer, const char *id, size_t len) {
2026 if(buffer[0]) return buffer;
2028 if(id[0] == '\0' || (id[0] == '/' && id[1] == '\0'))
2029 strncpy(buffer, "cgroup_root", len);
2031 snprintfz(buffer, len, "cgroup_%s", id);
2033 netdata_fix_chart_id(buffer);
2037 void update_cgroup_charts(int update_every) {
2038 debug(D_CGROUP, "updating cgroups charts");
2040 char type[RRD_ID_LENGTH_MAX + 1];
2041 char title[CHART_TITLE_MAX + 1];
2043 int services_do_cpu = 0,
2044 services_do_mem_usage = 0,
2045 services_do_mem_detailed = 0,
2046 services_do_mem_failcnt = 0,
2047 services_do_swap_usage = 0,
2049 services_do_io_ops = 0,
2050 services_do_throttle_io = 0,
2051 services_do_throttle_ops = 0,
2052 services_do_queued_ops = 0,
2053 services_do_merged_ops = 0;
2056 for(cg = cgroup_root; cg ; cg = cg->next) {
2057 if(unlikely(!cg->available || !cg->enabled))
2060 if(likely(cgroup_enable_systemd_services && cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE)) {
2061 if(cg->cpuacct_stat.updated && cg->cpuacct_stat.enabled == CONFIG_BOOLEAN_YES) services_do_cpu++;
2063 if(cgroup_enable_systemd_services_detailed_memory && cg->memory.updated_detailed && cg->memory.enabled_detailed) services_do_mem_detailed++;
2064 if(cg->memory.updated_usage_in_bytes && cg->memory.enabled_usage_in_bytes == CONFIG_BOOLEAN_YES) services_do_mem_usage++;
2065 if(cg->memory.updated_failcnt && cg->memory.enabled_failcnt == CONFIG_BOOLEAN_YES) services_do_mem_failcnt++;
2066 if(cg->memory.updated_msw_usage_in_bytes && cg->memory.enabled_msw_usage_in_bytes == CONFIG_BOOLEAN_YES) services_do_swap_usage++;
2068 if(cg->io_service_bytes.updated && cg->io_service_bytes.enabled == CONFIG_BOOLEAN_YES) services_do_io++;
2069 if(cg->io_serviced.updated && cg->io_serviced.enabled == CONFIG_BOOLEAN_YES) services_do_io_ops++;
2070 if(cg->throttle_io_service_bytes.updated && cg->throttle_io_service_bytes.enabled == CONFIG_BOOLEAN_YES) services_do_throttle_io++;
2071 if(cg->throttle_io_serviced.updated && cg->throttle_io_serviced.enabled == CONFIG_BOOLEAN_YES) services_do_throttle_ops++;
2072 if(cg->io_queued.updated && cg->io_queued.enabled == CONFIG_BOOLEAN_YES) services_do_queued_ops++;
2073 if(cg->io_merged.updated && cg->io_merged.enabled == CONFIG_BOOLEAN_YES) services_do_merged_ops++;
2079 if(likely(cg->cpuacct_stat.updated && cg->cpuacct_stat.enabled == CONFIG_BOOLEAN_YES)) {
2080 if(unlikely(!cg->st_cpu)) {
2081 snprintfz(title, CHART_TITLE_MAX, "CPU Usage (%d%% = %d core%s) for cgroup %s", (processors * 100), processors, (processors > 1) ? "s" : "", cg->chart_title);
2083 cg->st_cpu = rrdset_create_localhost(
2084 cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
2091 , CHART_PRIORITY_CONTAINERS
2093 , RRDSET_TYPE_STACKED
2096 rrddim_add(cg->st_cpu, "user", NULL, 100, hz, RRD_ALGORITHM_INCREMENTAL);
2097 rrddim_add(cg->st_cpu, "system", NULL, 100, hz, RRD_ALGORITHM_INCREMENTAL);
2100 rrdset_next(cg->st_cpu);
2102 rrddim_set(cg->st_cpu, "user", cg->cpuacct_stat.user);
2103 rrddim_set(cg->st_cpu, "system", cg->cpuacct_stat.system);
2104 rrdset_done(cg->st_cpu);
2107 if(likely(cg->cpuacct_usage.updated && cg->cpuacct_usage.enabled == CONFIG_BOOLEAN_YES)) {
2108 char id[RRD_ID_LENGTH_MAX + 1];
2111 if(unlikely(!cg->st_cpu_per_core)) {
2112 snprintfz(title, CHART_TITLE_MAX, "CPU Usage (%d%% = %d core%s) Per Core for cgroup %s", (processors * 100), processors, (processors > 1) ? "s" : "", cg->chart_title);
2114 cg->st_cpu_per_core = rrdset_create_localhost(
2115 cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
2119 , "cgroup.cpu_per_core"
2122 , CHART_PRIORITY_CONTAINERS + 100
2124 , RRDSET_TYPE_STACKED
2127 for(i = 0; i < cg->cpuacct_usage.cpus; i++) {
2128 snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i);
2129 rrddim_add(cg->st_cpu_per_core, id, NULL, 100, 1000000000, RRD_ALGORITHM_INCREMENTAL);
2133 rrdset_next(cg->st_cpu_per_core);
2135 for(i = 0; i < cg->cpuacct_usage.cpus ;i++) {
2136 snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i);
2137 rrddim_set(cg->st_cpu_per_core, id, cg->cpuacct_usage.cpu_percpu[i]);
2139 rrdset_done(cg->st_cpu_per_core);
2142 if(likely(cg->memory.updated_detailed && cg->memory.enabled_detailed == CONFIG_BOOLEAN_YES)) {
2143 if(unlikely(!cg->st_mem)) {
2144 snprintfz(title, CHART_TITLE_MAX, "Memory Usage for cgroup %s", cg->chart_title);
2146 cg->st_mem = rrdset_create_localhost(
2147 cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
2154 , CHART_PRIORITY_CONTAINERS + 210
2156 , RRDSET_TYPE_STACKED
2159 rrddim_add(cg->st_mem, "cache", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
2160 rrddim_add(cg->st_mem, "rss", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
2162 if(cg->memory.detailed_has_swap)
2163 rrddim_add(cg->st_mem, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
2165 rrddim_add(cg->st_mem, "rss_huge", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
2166 rrddim_add(cg->st_mem, "mapped_file", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
2169 rrdset_next(cg->st_mem);
2171 rrddim_set(cg->st_mem, "cache", cg->memory.cache);
2172 rrddim_set(cg->st_mem, "rss", cg->memory.rss);
2174 if(cg->memory.detailed_has_swap)
2175 rrddim_set(cg->st_mem, "swap", cg->memory.swap);
2177 rrddim_set(cg->st_mem, "rss_huge", cg->memory.rss_huge);
2178 rrddim_set(cg->st_mem, "mapped_file", cg->memory.mapped_file);
2179 rrdset_done(cg->st_mem);
2181 if(unlikely(!cg->st_writeback)) {
2182 snprintfz(title, CHART_TITLE_MAX, "Writeback Memory for cgroup %s", cg->chart_title);
2184 cg->st_writeback = rrdset_create_localhost(
2185 cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
2189 , "cgroup.writeback"
2192 , CHART_PRIORITY_CONTAINERS + 300
2197 if(cg->memory.detailed_has_dirty)
2198 rrddim_add(cg->st_writeback, "dirty", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
2200 rrddim_add(cg->st_writeback, "writeback", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
2203 rrdset_next(cg->st_writeback);
2205 if(cg->memory.detailed_has_dirty)
2206 rrddim_set(cg->st_writeback, "dirty", cg->memory.dirty);
2208 rrddim_set(cg->st_writeback, "writeback", cg->memory.writeback);
2209 rrdset_done(cg->st_writeback);
2211 if(unlikely(!cg->st_mem_activity)) {
2212 snprintfz(title, CHART_TITLE_MAX, "Memory Activity for cgroup %s", cg->chart_title);
2214 cg->st_mem_activity = rrdset_create_localhost(
2215 cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
2219 , "cgroup.mem_activity"
2222 , CHART_PRIORITY_CONTAINERS + 400
2227 rrddim_add(cg->st_mem_activity, "pgpgin", "in", system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
2228 rrddim_add(cg->st_mem_activity, "pgpgout", "out", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
2231 rrdset_next(cg->st_mem_activity);
2233 rrddim_set(cg->st_mem_activity, "pgpgin", cg->memory.pgpgin);
2234 rrddim_set(cg->st_mem_activity, "pgpgout", cg->memory.pgpgout);
2235 rrdset_done(cg->st_mem_activity);
2237 if(unlikely(!cg->st_pgfaults)) {
2238 snprintfz(title, CHART_TITLE_MAX, "Memory Page Faults for cgroup %s", cg->chart_title);
2240 cg->st_pgfaults = rrdset_create_localhost(
2241 cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
2248 , CHART_PRIORITY_CONTAINERS + 500
2253 rrddim_add(cg->st_pgfaults, "pgfault", NULL, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
2254 rrddim_add(cg->st_pgfaults, "pgmajfault", "swap", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
2257 rrdset_next(cg->st_pgfaults);
2259 rrddim_set(cg->st_pgfaults, "pgfault", cg->memory.pgfault);
2260 rrddim_set(cg->st_pgfaults, "pgmajfault", cg->memory.pgmajfault);
2261 rrdset_done(cg->st_pgfaults);
2264 if(likely(cg->memory.updated_usage_in_bytes && cg->memory.enabled_usage_in_bytes == CONFIG_BOOLEAN_YES)) {
2265 if(unlikely(!cg->st_mem_usage)) {
2266 snprintfz(title, CHART_TITLE_MAX, "Used Memory %sfor cgroup %s", (cgroup_used_memory_without_cache && cg->memory.updated_detailed)?"without Cache ":"", cg->chart_title);
2268 cg->st_mem_usage = rrdset_create_localhost(
2269 cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
2273 , "cgroup.mem_usage"
2276 , CHART_PRIORITY_CONTAINERS + 200
2278 , RRDSET_TYPE_STACKED
2281 rrddim_add(cg->st_mem_usage, "ram", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
2282 rrddim_add(cg->st_mem_usage, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
2285 rrdset_next(cg->st_mem_usage);
2287 rrddim_set(cg->st_mem_usage, "ram", cg->memory.usage_in_bytes - ((cgroup_used_memory_without_cache)?cg->memory.cache:0));
2288 rrddim_set(cg->st_mem_usage, "swap", (cg->memory.msw_usage_in_bytes > cg->memory.usage_in_bytes)?cg->memory.msw_usage_in_bytes - cg->memory.usage_in_bytes:0);
2289 rrdset_done(cg->st_mem_usage);
2292 if(likely(cg->memory.updated_failcnt && cg->memory.enabled_failcnt == CONFIG_BOOLEAN_YES)) {
2293 if(unlikely(!cg->st_mem_failcnt)) {
2294 snprintfz(title, CHART_TITLE_MAX, "Memory Limit Failures for cgroup %s", cg->chart_title);
2296 cg->st_mem_failcnt = rrdset_create_localhost(
2297 cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
2301 , "cgroup.mem_failcnt"
2304 , CHART_PRIORITY_CONTAINERS + 250
2309 rrddim_add(cg->st_mem_failcnt, "failures", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
2312 rrdset_next(cg->st_mem_failcnt);
2314 rrddim_set(cg->st_mem_failcnt, "failures", cg->memory.failcnt);
2315 rrdset_done(cg->st_mem_failcnt);
2318 if(likely(cg->io_service_bytes.updated && cg->io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) {
2319 if(unlikely(!cg->st_io)) {
2320 snprintfz(title, CHART_TITLE_MAX, "I/O Bandwidth (all disks) for cgroup %s", cg->chart_title);
2322 cg->st_io = rrdset_create_localhost(
2323 cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
2330 , CHART_PRIORITY_CONTAINERS + 1200
2335 rrddim_add(cg->st_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
2336 rrddim_add(cg->st_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
2339 rrdset_next(cg->st_io);
2341 rrddim_set(cg->st_io, "read", cg->io_service_bytes.Read);
2342 rrddim_set(cg->st_io, "write", cg->io_service_bytes.Write);
2343 rrdset_done(cg->st_io);
2346 if(likely(cg->io_serviced.updated && cg->io_serviced.enabled == CONFIG_BOOLEAN_YES)) {
2347 if(unlikely(!cg->st_serviced_ops)) {
2348 snprintfz(title, CHART_TITLE_MAX, "Serviced I/O Operations (all disks) for cgroup %s", cg->chart_title);
2350 cg->st_serviced_ops = rrdset_create_localhost(
2351 cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
2355 , "cgroup.serviced_ops"
2358 , CHART_PRIORITY_CONTAINERS + 1200
2363 rrddim_add(cg->st_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
2364 rrddim_add(cg->st_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
2367 rrdset_next(cg->st_serviced_ops);
2369 rrddim_set(cg->st_serviced_ops, "read", cg->io_serviced.Read);
2370 rrddim_set(cg->st_serviced_ops, "write", cg->io_serviced.Write);
2371 rrdset_done(cg->st_serviced_ops);
2374 if(likely(cg->throttle_io_service_bytes.updated && cg->throttle_io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) {
2375 if(unlikely(!cg->st_throttle_io)) {
2376 snprintfz(title, CHART_TITLE_MAX, "Throttle I/O Bandwidth (all disks) for cgroup %s", cg->chart_title);
2378 cg->st_throttle_io = rrdset_create_localhost(
2379 cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
2383 , "cgroup.throttle_io"
2386 , CHART_PRIORITY_CONTAINERS + 1200
2391 rrddim_add(cg->st_throttle_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
2392 rrddim_add(cg->st_throttle_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
2395 rrdset_next(cg->st_throttle_io);
2397 rrddim_set(cg->st_throttle_io, "read", cg->throttle_io_service_bytes.Read);
2398 rrddim_set(cg->st_throttle_io, "write", cg->throttle_io_service_bytes.Write);
2399 rrdset_done(cg->st_throttle_io);
2402 if(likely(cg->throttle_io_serviced.updated && cg->throttle_io_serviced.enabled == CONFIG_BOOLEAN_YES)) {
2403 if(unlikely(!cg->st_throttle_serviced_ops)) {
2404 snprintfz(title, CHART_TITLE_MAX, "Throttle Serviced I/O Operations (all disks) for cgroup %s", cg->chart_title);
2406 cg->st_throttle_serviced_ops = rrdset_create_localhost(
2407 cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
2408 , "throttle_serviced_ops"
2411 , "cgroup.throttle_serviced_ops"
2414 , CHART_PRIORITY_CONTAINERS + 1200
2419 rrddim_add(cg->st_throttle_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
2420 rrddim_add(cg->st_throttle_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
2423 rrdset_next(cg->st_throttle_serviced_ops);
2425 rrddim_set(cg->st_throttle_serviced_ops, "read", cg->throttle_io_serviced.Read);
2426 rrddim_set(cg->st_throttle_serviced_ops, "write", cg->throttle_io_serviced.Write);
2427 rrdset_done(cg->st_throttle_serviced_ops);
2430 if(likely(cg->io_queued.updated && cg->io_queued.enabled == CONFIG_BOOLEAN_YES)) {
2431 if(unlikely(!cg->st_queued_ops)) {
2432 snprintfz(title, CHART_TITLE_MAX, "Queued I/O Operations (all disks) for cgroup %s", cg->chart_title);
2434 cg->st_queued_ops = rrdset_create_localhost(
2435 cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
2439 , "cgroup.queued_ops"
2442 , CHART_PRIORITY_CONTAINERS + 2000
2447 rrddim_add(cg->st_queued_ops, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
2448 rrddim_add(cg->st_queued_ops, "write", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
2451 rrdset_next(cg->st_queued_ops);
2453 rrddim_set(cg->st_queued_ops, "read", cg->io_queued.Read);
2454 rrddim_set(cg->st_queued_ops, "write", cg->io_queued.Write);
2455 rrdset_done(cg->st_queued_ops);
2458 if(likely(cg->io_merged.updated && cg->io_merged.enabled == CONFIG_BOOLEAN_YES)) {
2459 if(unlikely(!cg->st_merged_ops)) {
2460 snprintfz(title, CHART_TITLE_MAX, "Merged I/O Operations (all disks) for cgroup %s", cg->chart_title);
2462 cg->st_merged_ops = rrdset_create_localhost(
2463 cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
2467 , "cgroup.merged_ops"
2470 , CHART_PRIORITY_CONTAINERS + 2100
2475 rrddim_add(cg->st_merged_ops, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
2476 rrddim_add(cg->st_merged_ops, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
2479 rrdset_next(cg->st_merged_ops);
2481 rrddim_set(cg->st_merged_ops, "read", cg->io_merged.Read);
2482 rrddim_set(cg->st_merged_ops, "write", cg->io_merged.Write);
2483 rrdset_done(cg->st_merged_ops);
2487 if(likely(cgroup_enable_systemd_services))
2488 update_systemd_services_charts(update_every, services_do_cpu, services_do_mem_usage, services_do_mem_detailed
2489 , services_do_mem_failcnt, services_do_swap_usage, services_do_io
2490 , services_do_io_ops, services_do_throttle_io, services_do_throttle_ops
2491 , services_do_queued_ops, services_do_merged_ops
2494 debug(D_CGROUP, "done updating cgroups charts");
2497 // ----------------------------------------------------------------------------
2500 void *cgroups_main(void *ptr) {
2501 struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
2503 info("CGROUP Plugin thread created with task id %d", gettid());
2505 if(pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL) != 0)
2506 error("Cannot set pthread cancel type to DEFERRED.");
2508 if(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
2509 error("Cannot set pthread cancel state to ENABLE.");
2511 struct rusage thread;
2513 // when ZERO, attempt to do it
2514 int vdo_cpu_netdata = config_get_boolean("plugin:cgroups", "cgroups plugin resource charts", 1);
2516 read_cgroup_plugin_configuration();
2518 RRDSET *stcpu_thread = NULL;
2521 heartbeat_init(&hb);
2522 usec_t step = cgroup_update_every * USEC_PER_SEC;
2523 usec_t find_every = cgroup_check_for_new_every * USEC_PER_SEC, find_dt = 0;
2525 usec_t hb_dt = heartbeat_next(&hb, step);
2526 if(unlikely(netdata_exit)) break;
2528 // BEGIN -- the job to be done
2531 if(unlikely(find_dt >= find_every || cgroups_check)) {
2537 read_all_cgroups(cgroup_root);
2538 update_cgroup_charts(cgroup_update_every);
2540 // END -- the job is done
2542 // --------------------------------------------------------------------
2544 if(vdo_cpu_netdata) {
2545 getrusage(RUSAGE_THREAD, &thread);
2547 if(unlikely(!stcpu_thread)) {
2549 stcpu_thread = rrdset_create_localhost(
2551 , "plugin_cgroups_cpu"
2555 , "NetData CGroups Plugin CPU usage"
2558 , cgroup_update_every
2559 , RRDSET_TYPE_STACKED
2562 rrddim_add(stcpu_thread, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
2563 rrddim_add(stcpu_thread, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
2566 rrdset_next(stcpu_thread);
2568 rrddim_set(stcpu_thread, "user" , thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
2569 rrddim_set(stcpu_thread, "system", thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
2570 rrdset_done(stcpu_thread);
2574 info("CGROUP thread exiting");
2576 static_thread->enabled = 0;