3 #define RRD_TYPE_DISK "disk"
5 #define DISK_TYPE_PHYSICAL 1
6 #define DISK_TYPE_PARTITION 2
7 #define DISK_TYPE_CONTAINER 3
10 char *disk; // the name of the disk (sda, sdb, etc)
17 // disk options caching
32 static struct mountinfo *disk_mountinfo_root = NULL;
34 static struct disk *get_disk(unsigned long major, unsigned long minor, char *disk) {
35 static char path_to_get_hw_sector_size[FILENAME_MAX + 1] = "";
36 static char path_to_get_hw_sector_size_partitions[FILENAME_MAX + 1] = "";
37 static char path_find_block_device[FILENAME_MAX + 1] = "";
40 // search for it in our RAM list.
41 // this is sequential, but since we just walk through
42 // and the number of disks / partitions in a system
43 // should not be that many, it should be acceptable
44 for(d = disk_root; d ; d = d->next)
45 if(unlikely(d->major == major && d->minor == minor))
48 // if we found it, return it
53 // create a new disk structure
54 d = (struct disk *)mallocz(sizeof(struct disk));
56 d->disk = strdupz(disk);
59 d->type = DISK_TYPE_PHYSICAL; // Default type. Changed later if not correct.
61 d->sector_size = 512; // the default, will be changed below
64 // append it to the list
69 for(last = disk_root; last->next ;last = last->next);
73 // ------------------------------------------------------------------------
74 // find the type of the device
76 char buffer[FILENAME_MAX + 1];
78 // get the default path for finding info about the block device
79 if(unlikely(!path_find_block_device[0])) {
80 snprintfz(buffer, FILENAME_MAX, "%s%s", global_host_prefix, "/sys/dev/block/%lu:%lu/%s");
81 snprintfz(path_find_block_device, FILENAME_MAX, "%s", config_get("plugin:proc:/proc/diskstats", "path to get block device infos", buffer));
84 // find if it is a partition
85 // by checking if /sys/dev/block/MAJOR:MINOR/partition is readable.
86 snprintfz(buffer, FILENAME_MAX, path_find_block_device, major, minor, "partition");
87 if(access(buffer, R_OK) == 0) {
88 d->type = DISK_TYPE_PARTITION;
90 // find if it is a container
91 // by checking if /sys/dev/block/MAJOR:MINOR/slaves has entries
92 snprintfz(buffer, FILENAME_MAX, path_find_block_device, major, minor, "slaves/");
93 DIR *dirp = opendir(buffer);
96 while( (dp = readdir(dirp)) ) {
97 // . and .. are also files in empty folders.
98 if(strcmp(dp->d_name, ".") == 0 || strcmp(dp->d_name, "..") == 0) {
102 d->type = DISK_TYPE_CONTAINER;
104 // Stop the loop after we found one file.
107 if(closedir(dirp) == -1)
108 error("Unable to close dir %s", buffer);
112 // ------------------------------------------------------------------------
113 // check if we can find its mount point
115 // mountinfo_find() can be called with NULL disk_mountinfo_root
116 struct mountinfo *mi = mountinfo_find(disk_mountinfo_root, d->major, d->minor);
118 // mountinfo_free() can be called with NULL disk_mountinfo_root
119 mountinfo_free(disk_mountinfo_root);
121 // re-read mountinfo in case something changed
122 disk_mountinfo_root = mountinfo_read();
124 // search again for this disk
125 mi = mountinfo_find(disk_mountinfo_root, d->major, d->minor);
129 d->mount_point = strdupz(mi->mount_point);
130 // no need to check for NULL
132 d->mount_point = NULL;
134 // ------------------------------------------------------------------------
135 // find the disk sector size
137 if(!path_to_get_hw_sector_size[0]) {
138 snprintfz(buffer, FILENAME_MAX, "%s%s", global_host_prefix, "/sys/block/%s/queue/hw_sector_size");
139 snprintfz(path_to_get_hw_sector_size, FILENAME_MAX, "%s", config_get("plugin:proc:/proc/diskstats", "path to get h/w sector size", buffer));
141 if(!path_to_get_hw_sector_size_partitions[0]) {
142 snprintfz(buffer, FILENAME_MAX, "%s%s", global_host_prefix, "/sys/dev/block/%lu:%lu/subsystem/%s/../queue/hw_sector_size");
143 snprintfz(path_to_get_hw_sector_size_partitions, FILENAME_MAX, "%s", config_get("plugin:proc:/proc/diskstats", "path to get h/w sector size for partitions", buffer));
147 char tf[FILENAME_MAX + 1], *t;
148 strncpyz(tf, d->disk, FILENAME_MAX);
150 // replace all / with !
152 if(*t == '/') *t = '!';
154 if(d->type == DISK_TYPE_PARTITION)
155 snprintfz(buffer, FILENAME_MAX, path_to_get_hw_sector_size_partitions, d->major, d->minor, tf);
157 snprintfz(buffer, FILENAME_MAX, path_to_get_hw_sector_size, tf);
159 FILE *fpss = fopen(buffer, "r");
161 char buffer2[1024 + 1];
162 char *tmp = fgets(buffer2, 1024, fpss);
165 d->sector_size = atoi(tmp);
166 if(d->sector_size <= 0) {
167 error("Invalid sector size %d for device %s in %s. Assuming 512.", d->sector_size, d->disk, buffer);
168 d->sector_size = 512;
171 else error("Cannot read data for sector size for device %s from %s. Assuming 512.", d->disk, buffer);
175 else error("Cannot read sector size for device %s from %s. Assuming 512.", d->disk, buffer);
181 static inline int select_positive_option(int option1, int option2) {
182 if(option1 == CONFIG_ONDEMAND_YES || option2 == CONFIG_ONDEMAND_YES)
183 return CONFIG_ONDEMAND_YES;
184 else if(option1 == CONFIG_ONDEMAND_ONDEMAND || option2 == CONFIG_ONDEMAND_ONDEMAND)
185 return CONFIG_ONDEMAND_ONDEMAND;
187 return CONFIG_ONDEMAND_NO;
190 int do_proc_diskstats(int update_every, unsigned long long dt) {
191 static procfile *ff = NULL;
192 static struct statvfs buff_statvfs;
193 static struct stat buff_stat;
194 static int global_enable_new_disks_detected_at_runtime = CONFIG_ONDEMAND_YES,
195 global_enable_performance_for_physical_disks = CONFIG_ONDEMAND_ONDEMAND,
196 global_enable_performance_for_virtual_disks = CONFIG_ONDEMAND_NO,
197 global_enable_performance_for_partitions = CONFIG_ONDEMAND_NO,
198 global_enable_performance_for_mountpoints = CONFIG_ONDEMAND_NO,
199 global_enable_performance_for_virtual_mountpoints = CONFIG_ONDEMAND_ONDEMAND,
200 global_enable_space_for_mountpoints = CONFIG_ONDEMAND_ONDEMAND,
201 global_do_io = CONFIG_ONDEMAND_ONDEMAND,
202 global_do_ops = CONFIG_ONDEMAND_ONDEMAND,
203 global_do_mops = CONFIG_ONDEMAND_ONDEMAND,
204 global_do_iotime = CONFIG_ONDEMAND_ONDEMAND,
205 global_do_qops = CONFIG_ONDEMAND_ONDEMAND,
206 global_do_util = CONFIG_ONDEMAND_ONDEMAND,
207 global_do_backlog = CONFIG_ONDEMAND_ONDEMAND,
208 global_do_space = CONFIG_ONDEMAND_ONDEMAND,
209 global_do_inodes = CONFIG_ONDEMAND_ONDEMAND,
210 globals_initialized = 0;
212 if(unlikely(!globals_initialized)) {
213 global_enable_new_disks_detected_at_runtime = config_get_boolean("plugin:proc:/proc/diskstats", "enable new disks detected at runtime", global_enable_new_disks_detected_at_runtime);
215 global_enable_performance_for_physical_disks = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "performance metrics for physical disks", global_enable_performance_for_physical_disks);
216 global_enable_performance_for_virtual_disks = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "performance metrics for virtual disks", global_enable_performance_for_virtual_disks);
217 global_enable_performance_for_partitions = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "performance metrics for partitions", global_enable_performance_for_partitions);
218 global_enable_performance_for_mountpoints = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "performance metrics for mounted filesystems", global_enable_performance_for_mountpoints);
219 global_enable_performance_for_virtual_mountpoints = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "performance metrics for mounted virtual disks", global_enable_performance_for_virtual_mountpoints);
220 global_enable_space_for_mountpoints = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "space metrics for mounted filesystems", global_enable_space_for_mountpoints);
222 global_do_io = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "bandwidth for all disks", global_do_io);
223 global_do_ops = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "operations for all disks", global_do_ops);
224 global_do_mops = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "merged operations for all disks", global_do_mops);
225 global_do_iotime = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "i/o time for all disks", global_do_iotime);
226 global_do_qops = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "queued operations for all disks", global_do_qops);
227 global_do_util = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "utilization percentage for all disks", global_do_util);
228 global_do_backlog = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "backlog for all disks", global_do_backlog);
229 global_do_space = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "space usage for all disks", global_do_space);
230 global_do_inodes = config_get_boolean_ondemand("plugin:proc:/proc/diskstats", "inodes usage for all disks", global_do_inodes);
232 globals_initialized = 1;
236 char filename[FILENAME_MAX + 1];
237 snprintfz(filename, FILENAME_MAX, "%s%s", global_host_prefix, "/proc/diskstats");
238 ff = procfile_open(config_get("plugin:proc:/proc/diskstats", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
242 ff = procfile_readall(ff);
243 if(!ff) return 0; // we return 0, so that we will retry to open it next time
245 uint32_t lines = procfile_lines(ff), l;
248 for(l = 0; l < lines ;l++) {
249 // --------------------------------------------------------------------------
253 unsigned long long major = 0, minor = 0,
254 reads = 0, mreads = 0, readsectors = 0, readms = 0,
255 writes = 0, mwrites = 0, writesectors = 0, writems = 0,
256 queued_ios = 0, busy_ms = 0, backlog_ms = 0,
257 space_avail = 0, space_avail_root = 0, space_used = 0,
258 inodes_avail = 0, inodes_avail_root = 0, inodes_used = 0;
260 unsigned long long last_reads = 0, last_readsectors = 0, last_readms = 0,
261 last_writes = 0, last_writesectors = 0, last_writems = 0,
264 words = procfile_linewords(ff, l);
265 if(words < 14) continue;
267 major = strtoull(procfile_lineword(ff, l, 0), NULL, 10);
268 minor = strtoull(procfile_lineword(ff, l, 1), NULL, 10);
269 disk = procfile_lineword(ff, l, 2);
271 // # of reads completed # of writes completed
272 // This is the total number of reads or writes completed successfully.
273 reads = strtoull(procfile_lineword(ff, l, 3), NULL, 10); // rd_ios
274 writes = strtoull(procfile_lineword(ff, l, 7), NULL, 10); // wr_ios
276 // # of reads merged # of writes merged
277 // Reads and writes which are adjacent to each other may be merged for
278 // efficiency. Thus two 4K reads may become one 8K read before it is
279 // ultimately handed to the disk, and so it will be counted (and queued)
280 mreads = strtoull(procfile_lineword(ff, l, 4), NULL, 10); // rd_merges_or_rd_sec
281 mwrites = strtoull(procfile_lineword(ff, l, 8), NULL, 10); // wr_merges
283 // # of sectors read # of sectors written
284 // This is the total number of sectors read or written successfully.
285 readsectors = strtoull(procfile_lineword(ff, l, 5), NULL, 10); // rd_sec_or_wr_ios
286 writesectors = strtoull(procfile_lineword(ff, l, 9), NULL, 10); // wr_sec
288 // # of milliseconds spent reading # of milliseconds spent writing
289 // This is the total number of milliseconds spent by all reads or writes (as
290 // measured from __make_request() to end_that_request_last()).
291 readms = strtoull(procfile_lineword(ff, l, 6), NULL, 10); // rd_ticks_or_wr_sec
292 writems = strtoull(procfile_lineword(ff, l, 10), NULL, 10); // wr_ticks
294 // # of I/Os currently in progress
295 // The only field that should go to zero. Incremented as requests are
296 // given to appropriate struct request_queue and decremented as they finish.
297 queued_ios = strtoull(procfile_lineword(ff, l, 11), NULL, 10); // ios_pgr
299 // # of milliseconds spent doing I/Os
300 // This field increases so long as field queued_ios is nonzero.
301 busy_ms = strtoull(procfile_lineword(ff, l, 12), NULL, 10); // tot_ticks
303 // weighted # of milliseconds spent doing I/Os
304 // This field is incremented at each I/O start, I/O completion, I/O
305 // merge, or read of these stats by the number of I/Os in progress
306 // (field queued_ios) times the number of milliseconds spent doing I/O since the
307 // last update of this field. This can provide an easy measure of both
308 // I/O completion time and the backlog that may be accumulating.
309 backlog_ms = strtoull(procfile_lineword(ff, l, 13), NULL, 10); // rq_ticks
312 // --------------------------------------------------------------------------
313 // remove slashes from disk names
315 for(s = disk; *s ;s++)
316 if(*s == '/') *s = '_';
318 // --------------------------------------------------------------------------
319 // get a disk structure for the disk
321 struct disk *d = get_disk(major, minor, disk);
324 // --------------------------------------------------------------------------
325 // Set its family based on mount point
327 char *family = d->mount_point;
328 if(!family) family = disk;
331 // --------------------------------------------------------------------------
332 // Check the configuration for the device
334 if(unlikely(!d->configured)) {
335 char var_name[4096 + 1];
336 snprintfz(var_name, 4096, "plugin:proc:/proc/diskstats:%s", disk);
338 int def_enable = config_get_boolean_ondemand(var_name, "enable", global_enable_new_disks_detected_at_runtime);
339 if(def_enable == CONFIG_ONDEMAND_NO) {
340 // the user does not want any metrics for this disk
341 d->do_io = CONFIG_ONDEMAND_NO;
342 d->do_ops = CONFIG_ONDEMAND_NO;
343 d->do_mops = CONFIG_ONDEMAND_NO;
344 d->do_iotime = CONFIG_ONDEMAND_NO;
345 d->do_qops = CONFIG_ONDEMAND_NO;
346 d->do_util = CONFIG_ONDEMAND_NO;
347 d->do_backlog = CONFIG_ONDEMAND_NO;
348 d->do_space = CONFIG_ONDEMAND_NO;
349 d->do_inodes = CONFIG_ONDEMAND_NO;
352 // this disk is enabled
353 // check its direct settings
355 int def_performance = CONFIG_ONDEMAND_ONDEMAND;
356 int def_space = (d->mount_point)?CONFIG_ONDEMAND_ONDEMAND:CONFIG_ONDEMAND_NO;
358 // since this is 'on demand' we can figure the performance settings
359 // based on the type of disk
362 case DISK_TYPE_PHYSICAL:
363 def_performance = global_enable_performance_for_physical_disks;
366 case DISK_TYPE_PARTITION:
367 def_performance = global_enable_performance_for_partitions;
370 case DISK_TYPE_CONTAINER:
371 def_performance = global_enable_performance_for_virtual_disks;
374 def_performance = select_positive_option(def_performance, global_enable_performance_for_virtual_mountpoints);
380 def_performance = select_positive_option(def_performance, global_enable_performance_for_mountpoints);
382 // ------------------------------------------------------------
383 // now we have def_performance and def_space
387 // check the user configuration (this will also show our 'on demand' decision)
388 def_performance = config_get_boolean_ondemand(var_name, "enable performance metrics", def_performance);
390 int ddo_io = CONFIG_ONDEMAND_NO,
391 ddo_ops = CONFIG_ONDEMAND_NO,
392 ddo_mops = CONFIG_ONDEMAND_NO,
393 ddo_iotime = CONFIG_ONDEMAND_NO,
394 ddo_qops = CONFIG_ONDEMAND_NO,
395 ddo_util = CONFIG_ONDEMAND_NO,
396 ddo_backlog = CONFIG_ONDEMAND_NO;
398 // we enable individual performance charts only when def_performance is not disabled
399 if(def_performance != CONFIG_ONDEMAND_NO) {
400 ddo_io = global_do_io,
401 ddo_ops = global_do_ops,
402 ddo_mops = global_do_mops,
403 ddo_iotime = global_do_iotime,
404 ddo_qops = global_do_qops,
405 ddo_util = global_do_util,
406 ddo_backlog = global_do_backlog;
409 d->do_io = config_get_boolean_ondemand(var_name, "bandwidth", ddo_io);
410 d->do_ops = config_get_boolean_ondemand(var_name, "operations", ddo_ops);
411 d->do_mops = config_get_boolean_ondemand(var_name, "merged operations", ddo_mops);
412 d->do_iotime = config_get_boolean_ondemand(var_name, "i/o time", ddo_iotime);
413 d->do_qops = config_get_boolean_ondemand(var_name, "queued operations", ddo_qops);
414 d->do_util = config_get_boolean_ondemand(var_name, "utilization percentage", ddo_util);
415 d->do_backlog = config_get_boolean_ondemand(var_name, "backlog", ddo_backlog);
419 // check the user configuration (this will also show our 'on demand' decision)
420 def_space = config_get_boolean_ondemand(var_name, "enable space metrics", def_space);
422 int ddo_space = def_space,
423 ddo_inodes = def_space;
425 d->do_space = config_get_boolean_ondemand(var_name, "space usage", ddo_space);
426 d->do_inodes = config_get_boolean_ondemand(var_name, "inodes usage", ddo_inodes);
429 // don't show settings for this disk
430 d->do_space = CONFIG_ONDEMAND_NO;
431 d->do_inodes = CONFIG_ONDEMAND_NO;
440 // --------------------------------------------------------------------------
441 // Do performance metrics
443 if(d->do_io == CONFIG_ONDEMAND_YES || (d->do_io == CONFIG_ONDEMAND_ONDEMAND && (readsectors || writesectors))) {
444 d->do_io = CONFIG_ONDEMAND_YES;
446 st = rrdset_find_bytype(RRD_TYPE_DISK, disk);
448 st = rrdset_create(RRD_TYPE_DISK, disk, NULL, family, "disk.io", "Disk I/O Bandwidth", "kilobytes/s", 2000, update_every, RRDSET_TYPE_AREA);
450 rrddim_add(st, "reads", NULL, d->sector_size, 1024, RRDDIM_INCREMENTAL);
451 rrddim_add(st, "writes", NULL, d->sector_size * -1, 1024, RRDDIM_INCREMENTAL);
453 else rrdset_next_usec(st, dt);
455 last_readsectors = rrddim_set(st, "reads", readsectors);
456 last_writesectors = rrddim_set(st, "writes", writesectors);
460 // --------------------------------------------------------------------
462 if(d->do_ops == CONFIG_ONDEMAND_YES || (d->do_ops == CONFIG_ONDEMAND_ONDEMAND && (reads || writes))) {
463 d->do_ops = CONFIG_ONDEMAND_YES;
465 st = rrdset_find_bytype("disk_ops", disk);
467 st = rrdset_create("disk_ops", disk, NULL, family, "disk.ops", "Disk Completed I/O Operations", "operations/s", 2001, update_every, RRDSET_TYPE_LINE);
470 rrddim_add(st, "reads", NULL, 1, 1, RRDDIM_INCREMENTAL);
471 rrddim_add(st, "writes", NULL, -1, 1, RRDDIM_INCREMENTAL);
473 else rrdset_next_usec(st, dt);
475 last_reads = rrddim_set(st, "reads", reads);
476 last_writes = rrddim_set(st, "writes", writes);
480 // --------------------------------------------------------------------
482 if(d->do_qops == CONFIG_ONDEMAND_YES || (d->do_qops == CONFIG_ONDEMAND_ONDEMAND && queued_ios)) {
483 d->do_qops = CONFIG_ONDEMAND_YES;
485 st = rrdset_find_bytype("disk_qops", disk);
487 st = rrdset_create("disk_qops", disk, NULL, family, "disk.qops", "Disk Current I/O Operations", "operations", 2002, update_every, RRDSET_TYPE_LINE);
490 rrddim_add(st, "operations", NULL, 1, 1, RRDDIM_ABSOLUTE);
492 else rrdset_next_usec(st, dt);
494 rrddim_set(st, "operations", queued_ios);
498 // --------------------------------------------------------------------
500 if(d->do_backlog == CONFIG_ONDEMAND_YES || (d->do_backlog == CONFIG_ONDEMAND_ONDEMAND && backlog_ms)) {
501 d->do_backlog = CONFIG_ONDEMAND_YES;
503 st = rrdset_find_bytype("disk_backlog", disk);
505 st = rrdset_create("disk_backlog", disk, NULL, family, "disk.backlog", "Disk Backlog", "backlog (ms)", 2003, update_every, RRDSET_TYPE_AREA);
508 rrddim_add(st, "backlog", NULL, 1, 10, RRDDIM_INCREMENTAL);
510 else rrdset_next_usec(st, dt);
512 rrddim_set(st, "backlog", backlog_ms);
516 // --------------------------------------------------------------------
518 if(d->do_util == CONFIG_ONDEMAND_YES || (d->do_util == CONFIG_ONDEMAND_ONDEMAND && busy_ms)) {
519 d->do_util = CONFIG_ONDEMAND_YES;
521 st = rrdset_find_bytype("disk_util", disk);
523 st = rrdset_create("disk_util", disk, NULL, family, "disk.util", "Disk Utilization Time", "% of time working", 2004, update_every, RRDSET_TYPE_AREA);
526 rrddim_add(st, "utilization", NULL, 1, 10, RRDDIM_INCREMENTAL);
528 else rrdset_next_usec(st, dt);
530 last_busy_ms = rrddim_set(st, "utilization", busy_ms);
534 // --------------------------------------------------------------------
536 if(d->do_mops == CONFIG_ONDEMAND_YES || (d->do_mops == CONFIG_ONDEMAND_ONDEMAND && (mreads || mwrites))) {
537 d->do_mops = CONFIG_ONDEMAND_YES;
539 st = rrdset_find_bytype("disk_mops", disk);
541 st = rrdset_create("disk_mops", disk, NULL, family, "disk.mops", "Disk Merged Operations", "merged operations/s", 2021, update_every, RRDSET_TYPE_LINE);
544 rrddim_add(st, "reads", NULL, 1, 1, RRDDIM_INCREMENTAL);
545 rrddim_add(st, "writes", NULL, -1, 1, RRDDIM_INCREMENTAL);
547 else rrdset_next_usec(st, dt);
549 rrddim_set(st, "reads", mreads);
550 rrddim_set(st, "writes", mwrites);
554 // --------------------------------------------------------------------
556 if(d->do_iotime == CONFIG_ONDEMAND_YES || (d->do_iotime == CONFIG_ONDEMAND_ONDEMAND && (readms || writems))) {
557 d->do_iotime = CONFIG_ONDEMAND_YES;
559 st = rrdset_find_bytype("disk_iotime", disk);
561 st = rrdset_create("disk_iotime", disk, NULL, family, "disk.iotime", "Disk Total I/O Time", "milliseconds/s", 2022, update_every, RRDSET_TYPE_LINE);
564 rrddim_add(st, "reads", NULL, 1, 1, RRDDIM_INCREMENTAL);
565 rrddim_add(st, "writes", NULL, -1, 1, RRDDIM_INCREMENTAL);
567 else rrdset_next_usec(st, dt);
569 last_readms = rrddim_set(st, "reads", readms);
570 last_writems = rrddim_set(st, "writes", writems);
574 // --------------------------------------------------------------------
575 // calculate differential charts
576 // only if this is not the first time we run
579 if( (d->do_iotime == CONFIG_ONDEMAND_YES || (d->do_iotime == CONFIG_ONDEMAND_ONDEMAND && (readms || writems))) &&
580 (d->do_ops == CONFIG_ONDEMAND_YES || (d->do_ops == CONFIG_ONDEMAND_ONDEMAND && (reads || writes)))) {
581 st = rrdset_find_bytype("disk_await", disk);
583 st = rrdset_create("disk_await", disk, NULL, family, "disk.await", "Average Completed I/O Operation Time", "ms per operation", 2005, update_every, RRDSET_TYPE_LINE);
586 rrddim_add(st, "reads", NULL, 1, 1, RRDDIM_ABSOLUTE);
587 rrddim_add(st, "writes", NULL, -1, 1, RRDDIM_ABSOLUTE);
589 else rrdset_next_usec(st, dt);
591 rrddim_set(st, "reads", (reads - last_reads) ? (readms - last_readms) / (reads - last_reads) : 0);
592 rrddim_set(st, "writes", (writes - last_writes) ? (writems - last_writems) / (writes - last_writes) : 0);
596 if( (d->do_io == CONFIG_ONDEMAND_YES || (d->do_io == CONFIG_ONDEMAND_ONDEMAND && (readsectors || writesectors))) &&
597 (d->do_ops == CONFIG_ONDEMAND_YES || (d->do_ops == CONFIG_ONDEMAND_ONDEMAND && (reads || writes)))) {
598 st = rrdset_find_bytype("disk_avgsz", disk);
600 st = rrdset_create("disk_avgsz", disk, NULL, family, "disk.avgsz", "Average Completed I/O Operation Bandwidth", "kilobytes per operation", 2006, update_every, RRDSET_TYPE_AREA);
603 rrddim_add(st, "reads", NULL, d->sector_size, 1024, RRDDIM_ABSOLUTE);
604 rrddim_add(st, "writes", NULL, d->sector_size * -1, 1024, RRDDIM_ABSOLUTE);
606 else rrdset_next_usec(st, dt);
608 rrddim_set(st, "reads", (reads - last_reads) ? (readsectors - last_readsectors) / (reads - last_reads) : 0);
609 rrddim_set(st, "writes", (writes - last_writes) ? (writesectors - last_writesectors) / (writes - last_writes) : 0);
613 if( (d->do_util == CONFIG_ONDEMAND_YES || (d->do_util == CONFIG_ONDEMAND_ONDEMAND && busy_ms)) &&
614 (d->do_ops == CONFIG_ONDEMAND_YES || (d->do_ops == CONFIG_ONDEMAND_ONDEMAND && (reads || writes)))) {
615 st = rrdset_find_bytype("disk_svctm", disk);
617 st = rrdset_create("disk_svctm", disk, NULL, family, "disk.svctm", "Average Service Time", "ms per operation", 2007, update_every, RRDSET_TYPE_LINE);
620 rrddim_add(st, "svctm", NULL, 1, 1, RRDDIM_ABSOLUTE);
622 else rrdset_next_usec(st, dt);
624 rrddim_set(st, "svctm", ((reads - last_reads) + (writes - last_writes)) ? (busy_ms - last_busy_ms) / ((reads - last_reads) + (writes - last_writes)) : 0);
629 // --------------------------------------------------------------------------
632 if(d->mount_point && (d->do_space || d->do_inodes) ) {
633 // collect space metrics using statvfs
635 if (statvfs(d->mount_point, &buff_statvfs) < 0)
636 error("Failed statvfs() for '%s' (disk '%s')", d->mount_point, d->disk);
638 space_avail = buff_statvfs.f_bavail * buff_statvfs.f_bsize;
639 space_avail_root = (buff_statvfs.f_bfree - buff_statvfs.f_bavail) * buff_statvfs.f_bsize;
640 space_used = (buff_statvfs.f_blocks - buff_statvfs.f_bfree) * buff_statvfs.f_bsize;
642 inodes_avail = buff_statvfs.f_favail;
643 inodes_avail_root = buff_statvfs.f_ffree - buff_statvfs.f_favail;
644 inodes_used = buff_statvfs.f_files - buff_statvfs.f_ffree;
646 // verify we collected the metrics for the right disk.
647 // if not the mountpoint has changed.
649 if(stat(d->mount_point, &buff_stat) == -1)
650 error("Failed to stat() for '%s' (disk '%s')", d->mount_point, d->disk);
652 if(major(buff_stat.st_dev) == major && minor(buff_stat.st_dev) == minor) {
654 // --------------------------------------------------------------------------
656 if(d->do_space == CONFIG_ONDEMAND_YES || (d->do_space == CONFIG_ONDEMAND_ONDEMAND && (space_avail || space_avail_root || space_used))) {
657 st = rrdset_find_bytype("disk_space", disk);
659 st = rrdset_create("disk_space", disk, NULL, family, "disk.space", "Disk Space Usage", "GB", 2023, update_every, RRDSET_TYPE_STACKED);
662 rrddim_add(st, "avail", NULL, 1, 1024*1024*1024, RRDDIM_ABSOLUTE);
663 rrddim_add(st, "used" , NULL, 1, 1024*1024*1024, RRDDIM_ABSOLUTE);
664 rrddim_add(st, "reserved_for_root", "reserved for root", 1, 1024*1024*1024, RRDDIM_ABSOLUTE);
666 else rrdset_next_usec(st, dt);
668 rrddim_set(st, "avail", space_avail);
669 rrddim_set(st, "used", space_used);
670 rrddim_set(st, "reserved_for_root", space_avail_root);
674 // --------------------------------------------------------------------------
676 if(d->do_inodes == CONFIG_ONDEMAND_YES || (d->do_inodes == CONFIG_ONDEMAND_ONDEMAND && (inodes_avail || inodes_avail_root || inodes_used))) {
677 st = rrdset_find_bytype("disk_inodes", disk);
679 st = rrdset_create("disk_inodes", disk, NULL, family, "disk.inodes", "Disk Inodes Usage", "Inodes", 2024, update_every, RRDSET_TYPE_STACKED);
682 rrddim_add(st, "avail", NULL, 1, 1, RRDDIM_ABSOLUTE);
683 rrddim_add(st, "used" , NULL, 1, 1, RRDDIM_ABSOLUTE);
684 rrddim_add(st, "reserved_for_root", "reserved for root", 1, 1, RRDDIM_ABSOLUTE);
686 else rrdset_next_usec(st, dt);
688 rrddim_set(st, "avail", inodes_avail);
689 rrddim_set(st, "used", inodes_used);
690 rrddim_set(st, "reserved_for_root", inodes_avail_root);