diff --git a/template/zbx_export_templates.yaml b/template/zbx_export_templates.yaml
new file mode 100644
index 0000000..b406bec
--- /dev/null
+++ b/template/zbx_export_templates.yaml
@@ -0,0 +1,1190 @@
+zabbix_export:
+ version: '5.4'
+ date: '2021-12-17T19:11:47Z'
+ groups:
+ -
+ uuid: 7df96b18c230490a9a0a9e2307226338
+ name: Templates
+ templates:
+ -
+ uuid: 47d3c2ff933947368d4bee4b1184d69b
+ template: 'ZFS on Linux'
+ name: 'ZFS on Linux'
+ description: |
+ OpenZFS (formerly ZFS on Linux) template.
+
+ Home of the project: https://github.com/Cosium/zabbix_zfs-on-linux
+ groups:
+ -
+ name: Templates
+ items:
+ -
+ uuid: 4ecabdcb2104460f83c2ad5f18fd98f9
+ name: 'OpenZFS version'
+ key: 'vfs.file.contents[/sys/module/zfs/version]'
+ delay: 1h
+ history: 30d
+ trends: '0'
+ value_type: TEXT
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ triggers:
+ -
+ uuid: 041efc8ff1ac40ed99953a8929ed3ff3
+ expression: '(last(/ZFS on Linux/vfs.file.contents[/sys/module/zfs/version],#1)<>last(/ZFS on Linux/vfs.file.contents[/sys/module/zfs/version],#2))>0'
+ name: 'Version of OpenZFS is now {ITEM.VALUE} on {HOST.NAME}'
+ priority: INFO
+ -
+ uuid: 6b5fc935fe194d30badea64eaf3f317f
+ name: 'ZFS ARC stat arc_dnode_limit'
+ key: 'zfs.arcstats[arc_dnode_limit]'
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: 0b7d673688e3429d92aa349762729f83
+ name: 'ZFS ARC stat arc_meta_limit'
+ key: 'zfs.arcstats[arc_meta_limit]'
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: b0b5004458494182bf874545f8eb4e41
+ name: 'ZFS ARC stat arc_meta_used'
+ key: 'zfs.arcstats[arc_meta_used]'
+ history: 30d
+ units: B
+ description: 'arc_meta_used = hdr_size + metadata_size + dbuf_size + dnode_size + bonus_size'
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: 795ab079ba13461c872ee1d5c0295704
+ name: 'ZFS ARC stat bonus_size'
+ key: 'zfs.arcstats[bonus_size]'
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: 34a1fb79b2b64ce08ec5b377211372d7
+ name: 'ZFS ARC max size'
+ key: 'zfs.arcstats[c_max]'
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: d60b8e4f7a3d4bea972e7fe04c3bb5ca
+ name: 'ZFS ARC minimum size'
+ key: 'zfs.arcstats[c_min]'
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: 5e12dd98f1644f5a87cc5ded5d2e55d8
+ name: 'ZFS ARC stat data_size'
+ key: 'zfs.arcstats[data_size]'
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: 522a0f33c90047bab4f55b7214f51dea
+ name: 'ZFS ARC stat dbuf_size'
+ key: 'zfs.arcstats[dbuf_size]'
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: a3d10ebb57984a829f780a229fc9617c
+ name: 'ZFS ARC stat dnode_size'
+ key: 'zfs.arcstats[dnode_size]'
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: 184eef57aa034cf8acaf6a8f0e02395b
+ name: 'ZFS ARC stat hdr_size'
+ key: 'zfs.arcstats[hdr_size]'
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: cb7bcc02dfc14329a361e194145871c0
+ name: 'ZFS ARC stat hits'
+ key: 'zfs.arcstats[hits]'
+ history: 30d
+ preprocessing:
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: 8df273b6e0904c9ab140f8f13f6ca973
+ name: 'ZFS ARC stat metadata_size'
+ key: 'zfs.arcstats[metadata_size]'
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: dcd96743ed984018bff5d16105693606
+ name: 'ZFS ARC stat mfu_hits'
+ key: 'zfs.arcstats[mfu_hits]'
+ history: 30d
+ preprocessing:
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: 1015ebe8ef6f4626ae7967bf6358f1b3
+ name: 'ZFS ARC stat mfu_size'
+ key: 'zfs.arcstats[mfu_size]'
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: 1298a265a6784e63a166b768e1faf67e
+ name: 'ZFS ARC stat misses'
+ key: 'zfs.arcstats[misses]'
+ history: 30d
+ preprocessing:
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: c85d0e9e1b464748a20148e2f2507609
+ name: 'ZFS ARC stat mru_hits'
+ key: 'zfs.arcstats[mru_hits]'
+ history: 30d
+ preprocessing:
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: 50954c7b43d745d09990011df4d7448c
+ name: 'ZFS ARC stat "mru_size"'
+ key: 'zfs.arcstats[mru_size]'
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: cd225da5a02346a58dbe0c9808a628eb
+ name: 'ZFS ARC current size'
+ key: 'zfs.arcstats[size]'
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: 8c8129f814fe47ae9c71e636599acd90
+ name: 'ZFS ARC Cache Hit Ratio'
+ type: CALCULATED
+ key: zfs.arcstats_hit_ratio
+ history: 30d
+ value_type: FLOAT
+ units: '%'
+ params: '100*(last(//zfs.arcstats[hits])/(last(//zfs.arcstats[hits])+count(//zfs.arcstats[hits],#1,,"0")+last(//zfs.arcstats[misses])))'
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: e644390a9c9743f2844dbc9ef8806a8f
+ name: 'ZFS ARC total read'
+ type: CALCULATED
+ key: zfs.arcstats_total_read
+ history: 30d
+ units: B
+ params: 'last(//zfs.arcstats[hits])+last(//zfs.arcstats[misses])'
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: ebfb742fb123451c9632d12bde0957c4
+ name: 'ZFS parameter zfs_arc_dnode_limit_percent'
+ key: 'zfs.get.param[zfs_arc_dnode_limit_percent]'
+ delay: 1h
+ history: 30d
+ units: '%'
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ -
+ uuid: 18d8b817852848929f4e0b421cb21532
+ name: 'ZFS parameter zfs_arc_meta_limit_percent'
+ key: 'zfs.get.param[zfs_arc_meta_limit_percent]'
+ delay: 1h
+ history: 30d
+ units: '%'
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS ARC'
+ discovery_rules:
+ -
+ uuid: a82a1b7067904fecb06bcf5b88457192
+ name: 'Zfs Dataset discovery'
+ key: zfs.fileset.discovery
+ delay: 30m
+ filter:
+ evaltype: AND
+ lifetime: 2d
+ description: 'Discover ZFS dataset.'
+ item_prototypes:
+ -
+ uuid: 4d7c96bd10b44754b2c8790b90c12046
+ name: 'Zfs dataset {#FILESETNAME} compressratio'
+ key: 'zfs.get.compressratio[{#FILESETNAME}]'
+ delay: 30m
+ history: 30d
+ value_type: FLOAT
+ units: '%'
+ preprocessing:
+ -
+ type: MULTIPLIER
+ parameters:
+ - '100'
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS dataset'
+ -
+ uuid: e9df401ae71e45c8a3fdbbd146cdd57b
+ name: 'Zfs dataset {#FILESETNAME} available'
+ key: 'zfs.get.fsinfo[{#FILESETNAME},available]'
+ delay: 5m
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS dataset'
+ -
+ uuid: ed63bb6942364281bcea80c54b6f8fcc
+ name: 'Zfs dataset {#FILESETNAME} referenced'
+ key: 'zfs.get.fsinfo[{#FILESETNAME},referenced]'
+ delay: 5m
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS dataset'
+ -
+ uuid: 7ef4530ddf464defb2a64ce674a82c8c
+ name: 'Zfs dataset {#FILESETNAME} usedbychildren'
+ key: 'zfs.get.fsinfo[{#FILESETNAME},usedbychildren]'
+ delay: 5m
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS dataset'
+ -
+ uuid: 3c7f982147be49629c78aa67a1d8d56e
+ name: 'Zfs dataset {#FILESETNAME} usedbydataset'
+ key: 'zfs.get.fsinfo[{#FILESETNAME},usedbydataset]'
+ delay: 1h
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS dataset'
+ -
+ uuid: cc0e02c58b28443eb78eeacc81095966
+ name: 'Zfs dataset {#FILESETNAME} usedbysnapshots'
+ key: 'zfs.get.fsinfo[{#FILESETNAME},usedbysnapshots]'
+ delay: 5m
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS dataset'
+ -
+ uuid: a54feffafdb34ba08f1474ab4710088d
+ name: 'Zfs dataset {#FILESETNAME} used'
+ key: 'zfs.get.fsinfo[{#FILESETNAME},used]'
+ delay: 5m
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS dataset'
+ trigger_prototypes:
+ -
+ uuid: cc0b0756d2fe42779b62adf63e38681d
+ expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) ) ) > ({$ZFS_AVERAGE_ALERT}/100)'
+ name: 'More than {$ZFS_AVERAGE_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}'
+ priority: AVERAGE
+ dependencies:
+ -
+ name: 'More than {$ZFS_HIGH_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}'
+ expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) ) ) > ({$ZFS_HIGH_ALERT}/100)'
+ -
+ uuid: 8bfb157ac42845c0b340e28ae510833c
+ expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) ) ) > ({$ZFS_DISASTER_ALERT}/100)'
+ name: 'More than {$ZFS_DISASTER_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}'
+ priority: DISASTER
+ -
+ uuid: 9b592a2cba084bec9ceb4f82367e758b
+ expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) ) ) > ({$ZFS_HIGH_ALERT}/100)'
+ name: 'More than {$ZFS_HIGH_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}'
+ priority: HIGH
+ dependencies:
+ -
+ name: 'More than {$ZFS_DISASTER_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}'
+ expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) ) ) > ({$ZFS_DISASTER_ALERT}/100)'
+ graph_prototypes:
+ -
+ uuid: 5213684719404718b8956d6faf0e6b71
+ name: 'ZFS dataset {#FILESETNAME} usage'
+ type: STACKED
+ ymin_type_1: FIXED
+ graph_items:
+ -
+ sortorder: '1'
+ color: 3333FF
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.get.fsinfo[{#FILESETNAME},usedbydataset]'
+ -
+ sortorder: '2'
+ color: FF33FF
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.get.fsinfo[{#FILESETNAME},usedbysnapshots]'
+ -
+ sortorder: '3'
+ color: FF3333
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.get.fsinfo[{#FILESETNAME},usedbychildren]'
+ -
+ sortorder: '4'
+ color: 33FF33
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.get.fsinfo[{#FILESETNAME},available]'
+ -
+ uuid: 08039e570bd7417294d043f4f7bf960f
+ name: 'Zfs Pool discovery'
+ key: zfs.pool.discovery
+ delay: 1h
+ lifetime: 3d
+ item_prototypes:
+ -
+ uuid: 9f889e9529934fdfbf47a29de32468f0
+ name: 'Zpool {#POOLNAME} available'
+ key: 'zfs.get.fsinfo[{#POOLNAME},available]'
+ delay: 5m
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS zpool'
+ -
+ uuid: 1993c04b00bc428bbdf43c909967afd2
+ name: 'Zpool {#POOLNAME} used'
+ key: 'zfs.get.fsinfo[{#POOLNAME},used]'
+ delay: 5m
+ history: 30d
+ units: B
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS zpool'
+ -
+ uuid: 472e21c79759476984cbf4ce9f12580a
+ name: 'Zpool {#POOLNAME} Health'
+ key: 'zfs.zpool.health[{#POOLNAME}]'
+ delay: 5m
+ history: 30d
+ trends: '0'
+ value_type: TEXT
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS zpool'
+ trigger_prototypes:
+ -
+ uuid: 4855fe0ed61b444daad73aa6090b46af
+ expression: 'find(/ZFS on Linux/zfs.zpool.health[{#POOLNAME}],,"like","ONLINE")=0'
+ name: 'Zpool {#POOLNAME} is {ITEM.VALUE} on {HOST.NAME}'
+ priority: HIGH
+ -
+ uuid: 3207e6ffd0fa40c4a1d6e607e4e12375
+ name: 'Zpool {#POOLNAME} read throughput'
+ key: 'zfs.zpool.iostat.nread[{#POOLNAME}]'
+ history: 30d
+ value_type: FLOAT
+ units: Bps
+ preprocessing:
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS zpool'
+ -
+ uuid: 78b418605f9b45b29bbd33b93a6b2e82
+ name: 'Zpool {#POOLNAME} write throughput'
+ key: 'zfs.zpool.iostat.nwritten[{#POOLNAME}]'
+ history: 30d
+ value_type: FLOAT
+ units: Bps
+ preprocessing:
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS zpool'
+ -
+ uuid: 6b35bf06bf4542318a7999ac4d7952f7
+ name: 'Zpool {#POOLNAME} IOPS: reads'
+ key: 'zfs.zpool.iostat.reads[{#POOLNAME}]'
+ history: 30d
+ value_type: FLOAT
+ units: iops
+ preprocessing:
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS zpool'
+ -
+ uuid: b99d5ab922324536bc2e013ac1fca306
+ name: 'Zpool {#POOLNAME} IOPS: writes'
+ key: 'zfs.zpool.iostat.writes[{#POOLNAME}]'
+ history: 30d
+ value_type: FLOAT
+ units: iops
+ preprocessing:
+ -
+ type: CHANGE_PER_SECOND
+ parameters:
+ - ''
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS zpool'
+ -
+ uuid: 867075d6eb1743069be868007472192b
+ name: 'Zpool {#POOLNAME} scrub status'
+ key: 'zfs.zpool.scrub[{#POOLNAME}]'
+ delay: 5m
+ history: 30d
+ description: |
+ Detect if the pool is currently scrubbing itself.
+
+ This is not a bad thing itself, but it slows down the entire pool and should be terminated when on production server during business hours if it causes a noticeable slowdown.
+ valuemap:
+ name: 'ZFS zpool scrub status'
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS zpool'
+ trigger_prototypes:
+ -
+ uuid: 792be07c555c4ae6a9819d69d332357b
+ expression: 'max(/ZFS on Linux/zfs.zpool.scrub[{#POOLNAME}],12h)=0'
+ name: 'Zpool {#POOLNAME} is scrubbing for more than 12h on {HOST.NAME}'
+ priority: AVERAGE
+ dependencies:
+ -
+ name: 'Zpool {#POOLNAME} is scrubbing for more than 24h on {HOST.NAME}'
+ expression: 'max(/ZFS on Linux/zfs.zpool.scrub[{#POOLNAME}],24h)=0'
+ -
+ uuid: 04cac9633f164227b1f9b2fe26923609
+ expression: 'max(/ZFS on Linux/zfs.zpool.scrub[{#POOLNAME}],24h)=0'
+ name: 'Zpool {#POOLNAME} is scrubbing for more than 24h on {HOST.NAME}'
+ priority: HIGH
+ trigger_prototypes:
+ -
+ uuid: 82fce07b30114c7e8645689317e2c1b4
+ expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) ) ) > ({$ZPOOL_AVERAGE_ALERT}/100)'
+ name: 'More than {$ZPOOL_AVERAGE_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}'
+ priority: AVERAGE
+ dependencies:
+ -
+ name: 'More than {$ZPOOL_HIGH_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}'
+ expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) ) ) > ({$ZPOOL_HIGH_ALERT}/100)'
+ -
+ uuid: ab56a2a8eb3d4b4294707e2a8aa94e22
+ expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) ) ) > ({$ZPOOL_DISASTER_ALERT}/100)'
+ name: 'More than {$ZPOOL_DISASTER_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}'
+ priority: DISASTER
+ -
+ uuid: c9c22e6617af4ad09970d2988c4a7fe7
+ expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) ) ) > ({$ZPOOL_HIGH_ALERT}/100)'
+ name: 'More than {$ZPOOL_HIGH_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}'
+ priority: HIGH
+ dependencies:
+ -
+ name: 'More than {$ZPOOL_DISASTER_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}'
+ expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) ) ) > ({$ZPOOL_DISASTER_ALERT}/100)'
+ graph_prototypes:
+ -
+ uuid: 926abae3e18144f0899711fdfd16e808
+ name: 'ZFS zpool {#POOLNAME} IOPS'
+ ymin_type_1: FIXED
+ graph_items:
+ -
+ sortorder: '1'
+ color: 5C6BC0
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.zpool.iostat.reads[{#POOLNAME}]'
+ -
+ sortorder: '2'
+ color: EF5350
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.zpool.iostat.writes[{#POOLNAME}]'
+ -
+ uuid: 63ae2d7acd4d4d15b4c5e7a5a90a063a
+ name: 'ZFS zpool {#POOLNAME} space usage'
+ type: STACKED
+ graph_items:
+ -
+ sortorder: '1'
+ color: 00EE00
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.get.fsinfo[{#POOLNAME},available]'
+ -
+ sortorder: '2'
+ color: EE0000
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.get.fsinfo[{#POOLNAME},used]'
+ -
+ uuid: aa35d164bacd45c5983fd2856781da88
+ name: 'ZFS zpool {#POOLNAME} throughput'
+ ymin_type_1: FIXED
+ graph_items:
+ -
+ sortorder: '1'
+ color: 5C6BC0
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.zpool.iostat.nread[{#POOLNAME}]'
+ -
+ sortorder: '2'
+ drawtype: BOLD_LINE
+ color: EF5350
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.zpool.iostat.nwritten[{#POOLNAME}]'
+ -
+ uuid: 6c96e092f08f4b98af9a377782180689
+ name: 'Zfs vdev discovery'
+ key: zfs.vdev.discovery
+ delay: 1h
+ lifetime: 3d
+ item_prototypes:
+ -
+ uuid: 9f63161726774a28905c87aac92cf1e9
+ name: 'vdev {#VDEV}: CHECKSUM error counter'
+ key: 'zfs.vdev.error_counter.cksum[{#VDEV}]'
+ delay: 5m
+ history: 30d
+ description: |
+ This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
+
+ If yes, use 'zpool replace' to replace the device.
+
+ If not, clear the error with 'zpool clear'.
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS vdev'
+ -
+ uuid: 48a02eb060fd4b73bdde08a2795c4717
+ name: 'vdev {#VDEV}: READ error counter'
+ key: 'zfs.vdev.error_counter.read[{#VDEV}]'
+ delay: 5m
+ history: 30d
+ description: |
+ This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
+
+ If yes, use 'zpool replace' to replace the device.
+
+ If not, clear the error with 'zpool clear'.
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS vdev'
+ -
+ uuid: 15953ba38fde4b8c8681955a27d9204a
+ name: 'vdev {#VDEV}: WRITE error counter'
+ key: 'zfs.vdev.error_counter.write[{#VDEV}]'
+ delay: 5m
+ history: 30d
+ description: |
+ This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
+
+ If yes, use 'zpool replace' to replace the device.
+
+ If not, clear the error with 'zpool clear'.
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS vdev'
+ -
+ uuid: 3e64a59d2a154a89a3bc43483942302d
+ name: 'vdev {#VDEV}: total number of errors'
+ type: CALCULATED
+ key: 'zfs.vdev.error_total[{#VDEV}]'
+ delay: 5m
+ history: 30d
+ params: 'last(//zfs.vdev.error_counter.cksum[{#VDEV}])+last(//zfs.vdev.error_counter.read[{#VDEV}])+last(//zfs.vdev.error_counter.write[{#VDEV}])'
+ description: |
+ This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
+
+ If yes, use 'zpool replace' to replace the device.
+
+ If not, clear the error with 'zpool clear'.
+ tags:
+ -
+ tag: Application
+ value: ZFS
+ -
+ tag: Application
+ value: 'ZFS vdev'
+ trigger_prototypes:
+ -
+ uuid: 44f7667c275d4a04891bc4f1d00e668b
+ expression: 'last(/ZFS on Linux/zfs.vdev.error_total[{#VDEV}])>0'
+ name: 'vdev {#VDEV} has encountered {ITEM.VALUE} errors on {HOST.NAME}'
+ priority: HIGH
+ description: |
+ This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
+
+ If yes, use 'zpool replace' to replace the device.
+
+ If not, clear the error with 'zpool clear'.
+
+ You may also run a zpool scrub to check if some other undetected errors are present on this vdev.
+ graph_prototypes:
+ -
+ uuid: ab78dba991ba4311a04740fc69b30381
+ name: 'ZFS vdev {#VDEV} errors'
+ ymin_type_1: FIXED
+ graph_items:
+ -
+ color: CC00CC
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.vdev.error_counter.cksum[{#VDEV}]'
+ -
+ sortorder: '1'
+ color: F63100
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.vdev.error_counter.read[{#VDEV}]'
+ -
+ sortorder: '2'
+ color: BBBB00
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.vdev.error_counter.write[{#VDEV}]'
+ macros:
+ -
+ macro: '{$ZFS_ARC_META_ALERT}'
+ value: '90'
+ -
+ macro: '{$ZFS_AVERAGE_ALERT}'
+ value: '90'
+ -
+ macro: '{$ZFS_DISASTER_ALERT}'
+ value: '99'
+ -
+ macro: '{$ZFS_HIGH_ALERT}'
+ value: '95'
+ -
+ macro: '{$ZPOOL_AVERAGE_ALERT}'
+ value: '85'
+ -
+ macro: '{$ZPOOL_DISASTER_ALERT}'
+ value: '99'
+ -
+ macro: '{$ZPOOL_HIGH_ALERT}'
+ value: '90'
+ dashboards:
+ -
+ uuid: 180e8c0dc05946e4b8552e3a01df347f
+ name: 'ZFS ARC'
+ pages:
+ -
+ widgets:
+ -
+ type: GRAPH_CLASSIC
+ width: '24'
+ height: '5'
+ fields:
+ -
+ type: INTEGER
+ name: source_type
+ value: '0'
+ -
+ type: GRAPH
+ name: graphid
+ value:
+ name: 'ZFS ARC memory usage'
+ host: 'ZFS on Linux'
+ -
+ type: GRAPH_CLASSIC
+ 'y': '5'
+ width: '24'
+ height: '5'
+ fields:
+ -
+ type: INTEGER
+ name: source_type
+ value: '0'
+ -
+ type: GRAPH
+ name: graphid
+ value:
+ name: 'ZFS ARC Cache Hit Ratio'
+ host: 'ZFS on Linux'
+ -
+ type: GRAPH_CLASSIC
+ 'y': '10'
+ width: '24'
+ height: '5'
+ fields:
+ -
+ type: INTEGER
+ name: source_type
+ value: '0'
+ -
+ type: GRAPH
+ name: graphid
+ value:
+ name: 'ZFS ARC breakdown'
+ host: 'ZFS on Linux'
+ -
+ type: GRAPH_CLASSIC
+ 'y': '15'
+ width: '24'
+ height: '5'
+ fields:
+ -
+ type: INTEGER
+ name: source_type
+ value: '0'
+ -
+ type: GRAPH
+ name: graphid
+ value:
+ name: 'ZFS ARC arc_meta_used breakdown'
+ host: 'ZFS on Linux'
+ -
+ uuid: 442dda5c36c04fc78c3a73eacf26bc7f
+ name: 'ZFS zpools'
+ pages:
+ -
+ widgets:
+ -
+ type: GRAPH_PROTOTYPE
+ width: '8'
+ height: '5'
+ fields:
+ -
+ type: INTEGER
+ name: source_type
+ value: '2'
+ -
+ type: INTEGER
+ name: columns
+ value: '1'
+ -
+ type: INTEGER
+ name: rows
+ value: '1'
+ -
+ type: GRAPH_PROTOTYPE
+ name: graphid
+ value:
+ name: 'ZFS zpool {#POOLNAME} IOPS'
+ host: 'ZFS on Linux'
+ -
+ type: GRAPH_PROTOTYPE
+ x: '8'
+ width: '8'
+ height: '5'
+ fields:
+ -
+ type: INTEGER
+ name: source_type
+ value: '2'
+ -
+ type: INTEGER
+ name: columns
+ value: '1'
+ -
+ type: INTEGER
+ name: rows
+ value: '1'
+ -
+ type: GRAPH_PROTOTYPE
+ name: graphid
+ value:
+ name: 'ZFS zpool {#POOLNAME} throughput'
+ host: 'ZFS on Linux'
+ -
+ type: GRAPH_PROTOTYPE
+ x: '16'
+ width: '8'
+ height: '5'
+ fields:
+ -
+ type: INTEGER
+ name: source_type
+ value: '2'
+ -
+ type: INTEGER
+ name: columns
+ value: '1'
+ -
+ type: INTEGER
+ name: rows
+ value: '1'
+ -
+ type: GRAPH_PROTOTYPE
+ name: graphid
+ value:
+ name: 'ZFS zpool {#POOLNAME} space usage'
+ host: 'ZFS on Linux'
+ valuemaps:
+ -
+ uuid: d1d7b0898d06481dbcec8b02d915fb1c
+ name: 'ZFS zpool scrub status'
+ mappings:
+ -
+ value: '0'
+ newvalue: 'Scrub in progress'
+ -
+ value: '1'
+ newvalue: 'No scrub in progress'
+ triggers:
+ -
+ uuid: 1daac44b853b4b6da767c9c3af96b774
+ expression: 'last(/ZFS on Linux/zfs.arcstats[dnode_size])>(last(/ZFS on Linux/zfs.arcstats[arc_dnode_limit])*0.9)'
+ name: 'ZFS ARC dnode size > 90% dnode max size on {HOST.NAME}'
+ priority: HIGH
+ -
+ uuid: 69c18b7ceb3d4da2bda0e05f9a12453f
+ expression: 'last(/ZFS on Linux/zfs.arcstats[arc_meta_used])>(last(/ZFS on Linux/zfs.arcstats[arc_meta_limit])*0.01*{$ZFS_ARC_META_ALERT})'
+ name: 'ZFS ARC meta size > {$ZFS_ARC_META_ALERT}% meta max size on {HOST.NAME}'
+ priority: HIGH
+ graphs:
+ -
+ uuid: 1510111dc5414e6d80a5230ce6a81f1d
+ name: 'ZFS ARC arc_meta_used breakdown'
+ type: STACKED
+ ymin_type_1: FIXED
+ graph_items:
+ -
+ color: 3333FF
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.arcstats[metadata_size]'
+ -
+ sortorder: '1'
+ color: 00EE00
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.arcstats[dnode_size]'
+ -
+ sortorder: '2'
+ color: EE0000
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.arcstats[hdr_size]'
+ -
+ sortorder: '3'
+ color: EEEE00
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.arcstats[dbuf_size]'
+ -
+ sortorder: '4'
+ color: EE00EE
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.arcstats[bonus_size]'
+ -
+ uuid: 203eeeaadc9444ccbbc31cf043e836cb
+ name: 'ZFS ARC breakdown'
+ type: STACKED
+ ymin_type_1: FIXED
+ graph_items:
+ -
+ color: 3333FF
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.arcstats[data_size]'
+ -
+ sortorder: '1'
+ color: 00AA00
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.arcstats[metadata_size]'
+ -
+ sortorder: '2'
+ color: EE0000
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.arcstats[dnode_size]'
+ -
+ sortorder: '3'
+ color: CCCC00
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.arcstats[hdr_size]'
+ -
+ sortorder: '4'
+ color: A54F10
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.arcstats[dbuf_size]'
+ -
+ sortorder: '5'
+ color: '888888'
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.arcstats[bonus_size]'
+ -
+ uuid: 4c493303be4a45a7a96d3ef7246843c0
+ name: 'ZFS ARC Cache Hit Ratio'
+ ymin_type_1: FIXED
+ ymax_type_1: FIXED
+ graph_items:
+ -
+ color: 00CC00
+ item:
+ host: 'ZFS on Linux'
+ key: zfs.arcstats_hit_ratio
+ -
+ uuid: b2fce9515a7d4218a5e9015f212c2a60
+ name: 'ZFS ARC memory usage'
+ ymin_type_1: FIXED
+ ymax_type_1: ITEM
+ ymax_item_1:
+ host: 'ZFS on Linux'
+ key: 'zfs.arcstats[c_max]'
+ graph_items:
+ -
+ drawtype: GRADIENT_LINE
+ color: 0000EE
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.arcstats[size]'
+ -
+ sortorder: '1'
+ drawtype: BOLD_LINE
+ color: DD0000
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.arcstats[c_max]'
+ -
+ sortorder: '2'
+ color: 00BB00
+ item:
+ host: 'ZFS on Linux'
+ key: 'zfs.arcstats[c_min]'
diff --git a/template/zol_template.xml b/template/zol_template.xml
deleted file mode 100644
index 4ccfb26..0000000
--- a/template/zol_template.xml
+++ /dev/null
@@ -1,3978 +0,0 @@
-
-
- 4.0
- 2021-01-04T21:27:59Z
-
-
- Templates
-
-
-
-
- ZFS on Linux
- ZFS on Linux
- OpenZFS (formerly ZFS on Linux) template.
-
-Home of the project: https://github.com/Cosium/zabbix_zfs-on-linux
-
-
- Templates
-
-
-
-
- ZFS
-
-
- ZFS ARC
-
-
- ZFS dataset
-
-
- ZFS vdev
-
-
- ZFS zpool
-
-
-
- -
- OpenZFS version
- 7
-
-
- vfs.file.contents[/sys/module/zfs/version]
- 1h
- 30d
- 0
- 0
- 4
-
-
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC stat "$1"
- 7
-
-
- zfs.arcstats[arc_dnode_limit]
- 1m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC stat "$1"
- 7
-
-
- zfs.arcstats[arc_meta_limit]
- 1m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC stat "$1"
- 7
-
-
- zfs.arcstats[arc_meta_used]
- 1m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
- arc_meta_used = hdr_size + metadata_size + dbuf_size + dnode_size + bonus_size
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC stat "$1"
- 7
-
-
- zfs.arcstats[bonus_size]
- 1m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC max size
- 7
-
-
- zfs.arcstats[c_max]
- 1m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC minimum size
- 7
-
-
- zfs.arcstats[c_min]
- 1m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC stat "$1"
- 7
-
-
- zfs.arcstats[data_size]
- 1m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC stat "$1"
- 7
-
-
- zfs.arcstats[dbuf_size]
- 1m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC stat "$1"
- 7
-
-
- zfs.arcstats[dnode_size]
- 1m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC stat "$1"
- 7
-
-
- zfs.arcstats[hdr_size]
- 1m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC stat "$1"
- 7
-
-
- zfs.arcstats[hits]
- 1m
- 30d
- 365d
- 0
- 3
-
-
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 10
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC stat "$1"
- 7
-
-
- zfs.arcstats[metadata_size]
- 1m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC stat "$1"
- 7
-
-
- zfs.arcstats[mfu_hits]
- 1m
- 30d
- 365d
- 0
- 3
-
-
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 10
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC stat "$1"
- 7
-
-
- zfs.arcstats[mfu_size]
- 1m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC stat "$1"
- 7
-
-
- zfs.arcstats[misses]
- 1m
- 30d
- 365d
- 0
- 3
-
-
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 10
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC stat "$1"
- 7
-
-
- zfs.arcstats[mru_hits]
- 1m
- 30d
- 365d
- 0
- 3
-
-
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 10
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC stat "$1"
- 7
-
-
- zfs.arcstats[mru_size]
- 1m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC current size
- 7
-
-
- zfs.arcstats[size]
- 1m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC Cache Hit Ratio
- 15
-
-
- zfs.arcstats_hit_ratio
- 1m
- 30d
- 365d
- 0
- 0
-
- %
-
-
- 0
- 0
-
- 0
-
- 100*(last(zfs.arcstats[hits])/(last(zfs.arcstats[hits])+count(zfs.arcstats[hits],#1,0)+last(zfs.arcstats[misses])))
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS ARC total read
- 15
-
-
- zfs.arcstats_total_read
- 1m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
- last(zfs.arcstats[hits])+last(zfs.arcstats[misses])
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS parameter $1
- 7
-
-
- zfs.get.param[zfs_arc_dnode_limit_percent]
- 1h
- 30d
- 365d
- 0
- 3
-
- %
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- -
- ZFS parameter $1
- 7
-
-
- zfs.get.param[zfs_arc_meta_limit_percent]
- 1h
- 30d
- 365d
- 0
- 3
-
- %
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS ARC
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
-
-
-
- Zfs Dataset discovery
- 7
-
-
- zfs.fileset.discovery
- 30m
- 0
-
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 1
-
-
-
- 2d
- Discover ZFS dataset.
-
-
- Zfs dataset $1 compressratio
- 7
-
-
- zfs.get.compressratio[{#FILESETNAME}]
- 30m
- 30d
- 365d
- 0
- 0
-
- %
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS dataset
-
-
-
-
-
-
- 1
- 100
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
-
-
- Zfs dataset $1 $2
- 7
-
-
- zfs.get.fsinfo[{#FILESETNAME},available]
- 5m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS dataset
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
-
-
- Zfs dataset $1 $2
- 7
-
-
- zfs.get.fsinfo[{#FILESETNAME},referenced]
- 5m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS dataset
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
-
-
- Zfs dataset $1 $2
- 7
-
-
- zfs.get.fsinfo[{#FILESETNAME},usedbychildren]
- 5m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS dataset
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
-
-
- Zfs dataset $1 $2
- 7
-
-
- zfs.get.fsinfo[{#FILESETNAME},usedbydataset]
- 1h
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS dataset
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
-
-
- Zfs dataset $1 $2
- 7
-
-
- zfs.get.fsinfo[{#FILESETNAME},usedbysnapshots]
- 5m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS dataset
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
-
-
- Zfs dataset $1 $2
- 7
-
-
- zfs.get.fsinfo[{#FILESETNAME},used]
- 5m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS dataset
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
-
-
-
-
- ( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} ) ) > ({$ZFS_AVERAGE_ALERT}/100)
- 0
-
- More than {$ZFS_AVERAGE_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}
- 0
-
-
- 0
- 3
-
- 0
- 0
-
-
- More than {$ZFS_HIGH_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}
- ( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} ) ) > ({$ZFS_HIGH_ALERT}/100)
-
-
-
-
-
-
- ( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} ) ) > ({$ZFS_DISASTER_ALERT}/100)
- 0
-
- More than {$ZFS_DISASTER_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}
- 0
-
-
- 0
- 5
-
- 0
- 0
-
-
-
-
- ( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} ) ) > ({$ZFS_HIGH_ALERT}/100)
- 0
-
- More than {$ZFS_HIGH_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}
- 0
-
-
- 0
- 4
-
- 0
- 0
-
-
- More than {$ZFS_DISASTER_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}
- ( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#FILESETNAME},used].last()} ) ) > ({$ZFS_DISASTER_ALERT}/100)
-
-
-
-
-
-
-
-
- ZFS dataset {#FILESETNAME} usage
- 900
- 200
- 0.0000
- 100.0000
- 1
- 1
- 1
- 1
- 0
- 0.0000
- 0.0000
- 1
- 0
- 0
- 0
-
-
- 1
- 0
- 3333FF
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.get.fsinfo[{#FILESETNAME},usedbydataset]
-
-
-
- 2
- 0
- FF33FF
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.get.fsinfo[{#FILESETNAME},usedbysnapshots]
-
-
-
- 3
- 0
- FF3333
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.get.fsinfo[{#FILESETNAME},usedbychildren]
-
-
-
- 4
- 0
- 33FF33
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.get.fsinfo[{#FILESETNAME},available]
-
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- Zfs Pool discovery
- 7
-
-
- zfs.pool.discovery
- 1h
- 0
-
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
-
- 3d
-
-
-
- Zpool {#POOLNAME}: Get iostats
- 7
-
-
- vfs.file.contents[/proc/spl/kstat/zfs/{#POOLNAME}/io]
- 1m
- 0
- 0
- 0
- 4
-
-
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS zpool
-
-
-
-
-
-
- 5
- ([0-9]+)\s+([0-9]+)\s+([0-9]+)\s+([0-9]+)\s+([0-9]+).*$
-["\1", "\2", "\3", "\4"]
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
-
-
- Zpool {#POOLNAME} available
- 7
-
-
- zfs.get.fsinfo[{#POOLNAME},available]
- 5m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS zpool
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
-
-
- Zpool {#POOLNAME} used
- 7
-
-
- zfs.get.fsinfo[{#POOLNAME},used]
- 5m
- 30d
- 365d
- 0
- 3
-
- B
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS zpool
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
-
-
- Zpool {#POOLNAME} Health
- 7
-
-
- zfs.zpool.health[{#POOLNAME}]
- 5m
- 30d
- 0
- 0
- 4
-
-
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS zpool
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
-
-
- Zpool {#POOLNAME} read throughput
- 18
-
-
- zfs.zpool.iostat.nread[{#POOLNAME}]
- 0
- 30d
- 365d
- 0
- 0
-
- Bps
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS zpool
-
-
-
-
-
-
- 12
- $[0]
-
-
- 10
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- vfs.file.contents[/proc/spl/kstat/zfs/{#POOLNAME}/io]
-
-
-
- Zpool {#POOLNAME} write throughput
- 18
-
-
- zfs.zpool.iostat.nwritten[{#POOLNAME}]
- 0
- 30d
- 365d
- 0
- 0
-
- Bps
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS zpool
-
-
-
-
-
-
- 12
- $[1]
-
-
- 10
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- vfs.file.contents[/proc/spl/kstat/zfs/{#POOLNAME}/io]
-
-
-
- Zpool {#POOLNAME} IOPS: reads
- 18
-
-
- zfs.zpool.iostat.reads[{#POOLNAME}]
- 0
- 30d
- 365d
- 0
- 0
-
- iops
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS zpool
-
-
-
-
-
-
- 12
- $[2]
-
-
- 10
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- vfs.file.contents[/proc/spl/kstat/zfs/{#POOLNAME}/io]
-
-
-
- Zpool {#POOLNAME} IOPS: writes
- 18
-
-
- zfs.zpool.iostat.writes[{#POOLNAME}]
- 0
- 30d
- 365d
- 0
- 0
-
- iops
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
- ZFS
-
-
- ZFS zpool
-
-
-
-
-
-
- 12
- $[3]
-
-
- 10
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- vfs.file.contents[/proc/spl/kstat/zfs/{#POOLNAME}/io]
-
-
-
- Zpool {#POOLNAME} scrub status
- 7
-
-
- zfs.zpool.scrub[{#POOLNAME}]
- 5m
- 30d
- 365d
- 0
- 3
-
-
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
- Detect if the pool is currently scrubbing itself.
-
-This is not a bad thing itself, but it slows down the entire pool and should be terminated when on production server during business hours if it causes a noticeable slowdown.
- 0
-
-
- ZFS
-
-
- ZFS zpool
-
-
-
- ZFS zpool scrub status
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
-
-
-
-
- ( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} ) ) > ({$ZPOOL_AVERAGE_ALERT}/100)
- 0
-
- More than {$ZPOOL_AVERAGE_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}
- 0
-
-
- 0
- 3
-
- 0
- 0
-
-
- More than {$ZPOOL_HIGH_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}
- ( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} ) ) > ({$ZPOOL_HIGH_ALERT}/100)
-
-
-
-
-
-
- ( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} ) ) > ({$ZPOOL_DISASTER_ALERT}/100)
- 0
-
- More than {$ZPOOL_DISASTER_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}
- 0
-
-
- 0
- 5
-
- 0
- 0
-
-
-
-
- ( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} ) ) > ({$ZPOOL_HIGH_ALERT}/100)
- 0
-
- More than {$ZPOOL_HIGH_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}
- 0
-
-
- 0
- 4
-
- 0
- 0
-
-
- More than {$ZPOOL_DISASTER_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}
- ( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} / ( {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},available].last()} + {ZFS on Linux:zfs.get.fsinfo[{#POOLNAME},used].last()} ) ) > ({$ZPOOL_DISASTER_ALERT}/100)
-
-
-
-
-
-
- {ZFS on Linux:zfs.zpool.scrub[{#POOLNAME}].max(12h)}=0
- 0
-
- Zpool {#POOLNAME} is scrubbing for more than 12h on {HOST.NAME}
- 0
-
-
- 0
- 3
-
- 0
- 0
-
-
- Zpool {#POOLNAME} is scrubbing for more than 24h on {HOST.NAME}
- {ZFS on Linux:zfs.zpool.scrub[{#POOLNAME}].max(24h)}=0
-
-
-
-
-
-
- {ZFS on Linux:zfs.zpool.scrub[{#POOLNAME}].max(24h)}=0
- 0
-
- Zpool {#POOLNAME} is scrubbing for more than 24h on {HOST.NAME}
- 0
-
-
- 0
- 4
-
- 0
- 0
-
-
-
-
- {ZFS on Linux:zfs.zpool.health[{#POOLNAME}].str(ONLINE)}=0
- 0
-
- Zpool {#POOLNAME} is {ITEM.VALUE} on {HOST.NAME}
- 0
-
-
- 0
- 4
-
- 0
- 0
-
-
-
-
-
-
- ZFS zpool {#POOLNAME} IOPS
- 900
- 200
- 0.0000
- 100.0000
- 1
- 1
- 0
- 1
- 0
- 0.0000
- 0.0000
- 1
- 0
- 0
- 0
-
-
- 1
- 0
- 5C6BC0
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.zpool.iostat.reads[{#POOLNAME}]
-
-
-
- 2
- 0
- EF5350
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.zpool.iostat.writes[{#POOLNAME}]
-
-
-
-
-
- ZFS zpool {#POOLNAME} space usage
- 900
- 200
- 0.0000
- 100.0000
- 1
- 1
- 1
- 1
- 0
- 0.0000
- 0.0000
- 0
- 0
- 0
- 0
-
-
- 1
- 0
- 00EE00
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.get.fsinfo[{#POOLNAME},available]
-
-
-
- 2
- 0
- EE0000
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.get.fsinfo[{#POOLNAME},used]
-
-
-
-
-
- ZFS zpool {#POOLNAME} throughput
- 900
- 200
- 0.0000
- 100.0000
- 1
- 1
- 0
- 1
- 0
- 0.0000
- 0.0000
- 1
- 0
- 0
- 0
-
-
- 1
- 0
- 5C6BC0
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.zpool.iostat.nread[{#POOLNAME}]
-
-
-
- 2
- 2
- EF5350
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.zpool.iostat.nwritten[{#POOLNAME}]
-
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
- Zfs vdev discovery
- 7
-
-
- zfs.vdev.discovery
- 1h
- 0
-
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
-
- 0
-
-
-
- 3d
-
-
-
- vdev {#VDEV}: CHECKSUM error counter
- 7
-
-
- zfs.vdev.error_counter.cksum[{#VDEV}]
- 5m
- 30d
- 365d
- 0
- 3
-
-
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
- This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
-
-If yes, use 'zpool replace' to replace the device.
-
-If not, clear the error with 'zpool clear'.
- 0
-
-
- ZFS
-
-
- ZFS vdev
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
-
-
- vdev {#VDEV}: READ error counter
- 7
-
-
- zfs.vdev.error_counter.read[{#VDEV}]
- 5m
- 30d
- 365d
- 0
- 3
-
-
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
- This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
-
-If yes, use 'zpool replace' to replace the device.
-
-If not, clear the error with 'zpool clear'.
- 0
-
-
- ZFS
-
-
- ZFS vdev
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
-
-
- vdev {#VDEV}: WRITE error counter
- 7
-
-
- zfs.vdev.error_counter.write[{#VDEV}]
- 5m
- 30d
- 365d
- 0
- 3
-
-
-
-
- 0
- 0
-
- 0
-
-
-
- 0
-
-
-
-
-
- This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
-
-If yes, use 'zpool replace' to replace the device.
-
-If not, clear the error with 'zpool clear'.
- 0
-
-
- ZFS
-
-
- ZFS vdev
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
-
-
- vdev {#VDEV}: total number of errors
- 15
-
-
- zfs.vdev.error_total[{#VDEV}]
- 5m
- 30d
- 365d
- 0
- 3
-
-
-
-
- 0
- 0
-
- 0
-
- last(zfs.vdev.error_counter.cksum[{#VDEV}])+last(zfs.vdev.error_counter.read[{#VDEV}])+last(zfs.vdev.error_counter.write[{#VDEV}])
-
- 0
-
-
-
-
-
- This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
-
-If yes, use 'zpool replace' to replace the device.
-
-If not, clear the error with 'zpool clear'.
- 0
-
-
- ZFS
-
-
- ZFS vdev
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
-
-
-
-
- {ZFS on Linux:zfs.vdev.error_total[{#VDEV}].last()}>0
- 0
-
- vdev {#VDEV} has encountered {ITEM.VALUE} errors on {HOST.NAME}
- 0
-
-
- 0
- 4
- This device has experienced an unrecoverable error. Determine if the device needs to be replaced.
-
-If yes, use 'zpool replace' to replace the device.
-
-If not, clear the error with 'zpool clear'.
-
-You may also run a zpool scrub to check if some other undetected errors are present on this vdev.
- 0
- 0
-
-
-
-
-
-
- ZFS vdev {#VDEV} errors
- 900
- 200
- 0.0000
- 100.0000
- 1
- 1
- 0
- 1
- 0
- 0.0000
- 0.0000
- 1
- 0
- 0
- 0
-
-
- 0
- 0
- CC00CC
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.vdev.error_counter.cksum[{#VDEV}]
-
-
-
- 1
- 0
- F63100
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.vdev.error_counter.read[{#VDEV}]
-
-
-
- 2
- 0
- BBBB00
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.vdev.error_counter.write[{#VDEV}]
-
-
-
-
-
-
-
- 3s
-
-
-
- 200
- 1
- 0
-
-
- 0
- 0
- 0
-
-
-
- 0
- 0
-
-
-
-
-
- {$ZFS_ARC_META_ALERT}
- 90
-
-
- {$ZFS_AVERAGE_ALERT}
- 90
-
-
- {$ZFS_DISASTER_ALERT}
- 99
-
-
- {$ZFS_HIGH_ALERT}
- 95
-
-
- {$ZPOOL_AVERAGE_ALERT}
- 85
-
-
- {$ZPOOL_DISASTER_ALERT}
- 99
-
-
- {$ZPOOL_HIGH_ALERT}
- 90
-
-
-
-
-
- ZFS ARC
- 1
- 4
-
-
- 0
- 1500
- 150
- 0
- 0
- 1
- 1
- 0
- 0
- 0
-
-
- 0
- 0
-
- ZFS ARC memory usage
- ZFS on Linux
-
- 3
-
-
-
- 0
- 1500
- 150
- 0
- 1
- 1
- 1
- 0
- 0
- 0
-
-
- 0
- 0
-
- ZFS ARC Cache Hit Ratio
- ZFS on Linux
-
- 3
-
-
-
- 0
- 1500
- 150
- 0
- 2
- 1
- 1
- 0
- 0
- 0
-
-
- 0
- 0
-
- ZFS ARC breakdown
- ZFS on Linux
-
- 3
-
-
-
- 0
- 1500
- 150
- 0
- 3
- 1
- 1
- 0
- 0
- 0
-
-
- 0
- 0
-
- ZFS ARC arc_meta_used breakdown
- ZFS on Linux
-
- 3
-
-
-
-
-
- ZFS zpools
- 3
- 1
-
-
- 20
- 400
- 100
- 0
- 0
- 1
- 1
- 0
- 0
- 0
-
-
- 0
- 0
-
- ZFS zpool {#POOLNAME} IOPS
- ZFS on Linux
-
- 1
-
-
-
- 20
- 400
- 100
- 1
- 0
- 1
- 1
- 0
- 0
- 0
-
-
- 0
- 0
-
- ZFS zpool {#POOLNAME} throughput
- ZFS on Linux
-
- 1
-
-
-
- 20
- 400
- 100
- 2
- 0
- 1
- 1
- 0
- 0
- 0
-
-
- 0
- 0
-
- ZFS zpool {#POOLNAME} space usage
- ZFS on Linux
-
- 1
-
-
-
-
-
-
-
-
-
- {ZFS on Linux:vfs.file.contents[/sys/module/zfs/version].diff(0)}>0
- 0
-
- Version of OpenZFS is now {ITEM.VALUE} on {HOST.NAME}
- 0
-
-
- 0
- 1
-
- 0
- 0
-
-
-
-
- {ZFS on Linux:zfs.arcstats[dnode_size].last()}>({ZFS on Linux:zfs.arcstats[arc_dnode_limit].last()}*0.9)
- 0
-
- ZFS ARC dnode size > 90% dnode max size on {HOST.NAME}
- 0
-
-
- 0
- 4
-
- 0
- 0
-
-
-
-
- {ZFS on Linux:zfs.arcstats[arc_meta_used].last()}>({ZFS on Linux:zfs.arcstats[arc_meta_limit].last()}*0.01*{$ZFS_ARC_META_ALERT})
- 0
-
- ZFS ARC meta size > {$ZFS_ARC_META_ALERT}% meta max size on {HOST.NAME}
- 0
-
-
- 0
- 4
-
- 0
- 0
-
-
-
-
-
-
- ZFS ARC arc_meta_used breakdown
- 900
- 200
- 0.0000
- 100.0000
- 1
- 1
- 1
- 1
- 0
- 0.0000
- 0.0000
- 1
- 0
- 0
- 0
-
-
- 0
- 0
- 3333FF
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.arcstats[metadata_size]
-
-
-
- 1
- 0
- 00EE00
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.arcstats[dnode_size]
-
-
-
- 2
- 0
- EE0000
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.arcstats[hdr_size]
-
-
-
- 3
- 0
- EEEE00
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.arcstats[dbuf_size]
-
-
-
- 4
- 0
- EE00EE
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.arcstats[bonus_size]
-
-
-
-
-
- ZFS ARC breakdown
- 900
- 200
- 0.0000
- 100.0000
- 1
- 1
- 1
- 1
- 0
- 0.0000
- 0.0000
- 1
- 0
- 0
- 0
-
-
- 0
- 0
- 3333FF
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.arcstats[data_size]
-
-
-
- 1
- 0
- 00AA00
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.arcstats[metadata_size]
-
-
-
- 2
- 0
- EE0000
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.arcstats[dnode_size]
-
-
-
- 3
- 0
- CCCC00
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.arcstats[hdr_size]
-
-
-
- 4
- 0
- A54F10
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.arcstats[dbuf_size]
-
-
-
- 5
- 0
- 888888
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.arcstats[bonus_size]
-
-
-
-
-
- ZFS ARC Cache Hit Ratio
- 900
- 200
- 0.0000
- 100.0000
- 1
- 1
- 0
- 1
- 0
- 0.0000
- 0.0000
- 1
- 1
- 0
- 0
-
-
- 0
- 0
- 00CC00
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.arcstats_hit_ratio
-
-
-
-
-
- ZFS ARC memory usage
- 900
- 200
- 0.0000
- 100.0000
- 1
- 1
- 0
- 1
- 0
- 0.0000
- 0.0000
- 1
- 2
- 0
-
- ZFS on Linux
- zfs.arcstats[c_max]
-
-
-
- 0
- 5
- 0000EE
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.arcstats[size]
-
-
-
- 1
- 2
- DD0000
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.arcstats[c_max]
-
-
-
- 2
- 0
- 00BB00
- 0
- 2
- 0
- -
- ZFS on Linux
- zfs.arcstats[c_min]
-
-
-
-
-
-
-
- ZFS zpool scrub status
-
-
- 0
- Scrub in progress
-
-
- 1
- No scrub in progress
-
-
-
-
-
diff --git a/userparameters/ZoL_with_sudo.conf b/userparameters/ZoL_with_sudo.conf
index 9cd46ec..606959b 100644
--- a/userparameters/ZoL_with_sudo.conf
+++ b/userparameters/ZoL_with_sudo.conf
@@ -1,9 +1,19 @@
# ZFS discovery and configuration
# original template from pbergbolt (source = https://www.zabbix.com/forum/showthread.php?t=43347), modified by Slash
-
# pool discovery
UserParameter=zfs.pool.discovery,/usr/bin/sudo /sbin/zpool list -H -o name | sed -e '$ ! s/\(.*\)/{"{#POOLNAME}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#POOLNAME}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/'
+
+# pool IOPS read
+UserParameter=zfs.zpool.iostat.reads[*],/usr/bin/sudo /sbin/zpool iostat $1 -H -p 1 2 | sed 's/\t/-/g' | cut -d '-' -f 4 | tail -n 1
+# pool IOPS writes
+UserParameter=zfs.zpool.iostat.writes[*],/usr/bin/sudo /sbin/zpool iostat $1 -H -p 1 2 | sed 's/\t/-/g' | cut -d '-' -f 5 | tail -n 1
+# pool read throughput
+UserParameter=zfs.zpool.iostat.nread[*],/usr/bin/sudo /sbin/zpool iostat $1 -H -p 1 2 | sed 's/\t/-/g' | cut -d '-' -f 6 | tail -n 1
+# pool write throughput
+UserParameter=zfs.zpool.iostat.nwritten[*],/usr/bin/sudo /sbin/zpool iostat $1 -H -p 1 2 | sed 's/\t/-/g' | cut -d '-' -f 7 | tail -n 1
+
+
# dataset discovery, called "fileset" in the zabbix template for legacy reasons
UserParameter=zfs.fileset.discovery,/usr/bin/sudo /sbin/zfs list -H -o name | sed -e '$ ! s/\(.*\)/{"{#FILESETNAME}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#FILESETNAME}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/'
# vdev discovery
diff --git a/userparameters/ZoL_without_sudo.conf b/userparameters/ZoL_without_sudo.conf
index 5611789..ba75d31 100644
--- a/userparameters/ZoL_without_sudo.conf
+++ b/userparameters/ZoL_without_sudo.conf
@@ -1,9 +1,18 @@
# ZFS discovery and configuration
# original template from pbergbolt (source = https://www.zabbix.com/forum/showthread.php?t=43347), modified by Slash
-
# pool discovery
UserParameter=zfs.pool.discovery,/sbin/zpool list -H -o name | sed -e '$ ! s/\(.*\)/{"{#POOLNAME}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#POOLNAME}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/'
+
+# pool IOPS read
+UserParameter=zfs.zpool.iostat.reads[*],/sbin/zpool iostat $1 -H -p 1 2 | sed 's/\t/-/g' | cut -d '-' -f 4 | tail -n 1
+# pool IOPS writes
+UserParameter=zfs.zpool.iostat.writes[*],/sbin/zpool iostat $1 -H -p 1 2 | sed 's/\t/-/g' | cut -d '-' -f 5 | tail -n 1
+# pool read throughput
+UserParameter=zfs.zpool.iostat.nread[*],/sbin/zpool iostat $1 -H -p 1 2 | sed 's/\t/-/g' | cut -d '-' -f 6 | tail -n 1
+# pool write throughput
+UserParameter=zfs.zpool.iostat.nwritten[*],/sbin/zpool iostat $1 -H -p 1 2 | sed 's/\t/-/g' | cut -d '-' -f 7 | tail -n 1
+
# dataset discovery, called "fileset" in the zabbix template for legacy reasons
UserParameter=zfs.fileset.discovery,/sbin/zfs list -H -o name | sed -e '$ ! s/\(.*\)/{"{#FILESETNAME}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#FILESETNAME}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/'
# vdev discovery