2020-07-12 02:35:58 +03:00
|
|
|
#!/usr/bin/env @PYTHON_SHEBANG@
|
2013-06-07 08:31:18 +04:00
|
|
|
#
|
|
|
|
# Print out ZFS ARC Statistics exported via kstat(1)
|
2020-05-04 11:00:59 +03:00
|
|
|
# For a definition of fields, or usage, use arcstat -v
|
2013-06-07 08:31:18 +04:00
|
|
|
#
|
2020-05-04 11:00:59 +03:00
|
|
|
# This script was originally a fork of the original arcstat.pl (0.1)
|
|
|
|
# by Neelakanth Nadgir, originally published on his Sun blog on
|
2013-06-07 08:31:18 +04:00
|
|
|
# 09/18/2007
|
|
|
|
# http://blogs.sun.com/realneel/entry/zfs_arc_statistics
|
|
|
|
#
|
2020-05-04 11:00:59 +03:00
|
|
|
# A new version aimed to improve upon the original by adding features
|
|
|
|
# and fixing bugs as needed. This version was maintained by Mike
|
|
|
|
# Harsch and was hosted in a public open source repository:
|
2013-06-07 08:31:18 +04:00
|
|
|
# http://github.com/mharsch/arcstat
|
|
|
|
#
|
2020-05-04 11:00:59 +03:00
|
|
|
# but has since moved to the illumos-gate repository.
|
|
|
|
#
|
|
|
|
# This Python port was written by John Hixson for FreeNAS, introduced
|
|
|
|
# in commit e2c29f:
|
|
|
|
# https://github.com/freenas/freenas
|
|
|
|
#
|
|
|
|
# and has been improved by many people since.
|
2013-06-07 08:31:18 +04:00
|
|
|
#
|
|
|
|
# CDDL HEADER START
|
|
|
|
#
|
|
|
|
# The contents of this file are subject to the terms of the
|
|
|
|
# Common Development and Distribution License, Version 1.0 only
|
|
|
|
# (the "License"). You may not use this file except in compliance
|
|
|
|
# with the License.
|
|
|
|
#
|
|
|
|
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
2022-07-12 00:16:13 +03:00
|
|
|
# or https://opensource.org/licenses/CDDL-1.0.
|
2013-06-07 08:31:18 +04:00
|
|
|
# See the License for the specific language governing permissions
|
|
|
|
# and limitations under the License.
|
|
|
|
#
|
|
|
|
# When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
# If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
# fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
# information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
#
|
|
|
|
# CDDL HEADER END
|
|
|
|
#
|
|
|
|
#
|
|
|
|
# Fields have a fixed width. Every interval, we fill the "v"
|
|
|
|
# hash with its corresponding value (v[field]=value) using calculate().
|
|
|
|
# @hdr is the array of fields that needs to be printed, so we
|
|
|
|
# just iterate over this array and print the values using our pretty printer.
|
|
|
|
#
|
2022-01-13 19:51:12 +03:00
|
|
|
# This script must remain compatible with Python 3.6+.
|
pyzfs: python3 support (build system)
Almost all of the Python code in the respository has been updated
to be compatibile with Python 2.6, Python 3.4, or newer. The only
exceptions are arc_summery3.py which requires Python 3, and pyzfs
which requires at least Python 2.7. This allows us to maintain a
single version of the code and support most default versions of
python. This change does the following:
* Sets the default shebang for all Python scripts to python3. If
only Python 2 is available, then at install time scripts which
are compatible with Python 2 will have their shebangs replaced
with /usr/bin/python. This is done for compatibility until
Python 2 goes end of life. Since only the installed versions
are changed this means Python 3 must be installed on the system
for test-runner when testing in-tree.
* Added --with-python=<2|3|3.4,etc> configure option which sets
the PYTHON environment variable to target a specific python
version. By default the newest installed version of Python
will be used or the preferred distribution version when
creating pacakges.
* Fixed --enable-pyzfs configure checks so they are run when
--enable-pyzfs=check and --enable-pyzfs=yes.
* Enabled pyzfs for Python 3.4 and newer, which is now supported.
* Renamed pyzfs package to python<VERSION>-pyzfs and updated to
install in the appropriate site location. For example, when
building with --with-python=3.4 a python34-pyzfs will be
created which installs in /usr/lib/python3.4/site-packages/.
* Renamed the following python scripts according to the Fedora
guidance for packaging utilities in /bin
- dbufstat.py -> dbufstat
- arcstat.py -> arcstat
- arc_summary.py -> arc_summary
- arc_summary3.py -> arc_summary3
* Updated python-cffi package name. On CentOS 6, CentOS 7, and
Amazon Linux it's called python-cffi, not python2-cffi. For
Python3 it's called python3-cffi or python3x-cffi.
* Install one version of arc_summary. Depending on the version
of Python available install either arc_summary2 or arc_summary3
as arc_summary. The user output is only slightly different.
Reviewed-by: John Ramsden <johnramsden@riseup.net>
Reviewed-by: Neal Gompa <ngompa@datto.com>
Reviewed-by: loli10K <ezomori.nozomu@gmail.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #8096
2018-10-31 19:22:59 +03:00
|
|
|
#
|
2013-06-07 08:31:18 +04:00
|
|
|
|
|
|
|
import sys
|
|
|
|
import time
|
|
|
|
import getopt
|
|
|
|
import re
|
|
|
|
import copy
|
|
|
|
|
2014-10-29 06:35:10 +03:00
|
|
|
from signal import signal, SIGINT, SIGWINCH, SIG_DFL
|
2013-06-07 08:31:18 +04:00
|
|
|
|
2019-11-11 20:24:04 +03:00
|
|
|
|
2013-06-07 08:31:18 +04:00
|
|
|
cols = {
|
|
|
|
# HDR: [Size, Scale, Description]
|
|
|
|
"time": [8, -1, "Time"],
|
2023-01-05 20:29:13 +03:00
|
|
|
"hits": [4, 1000, "ARC hits per second"],
|
|
|
|
"iohs": [4, 1000, "ARC I/O hits per second"],
|
2013-06-07 08:31:18 +04:00
|
|
|
"miss": [4, 1000, "ARC misses per second"],
|
|
|
|
"read": [4, 1000, "Total ARC accesses per second"],
|
2020-05-08 00:49:00 +03:00
|
|
|
"hit%": [4, 100, "ARC hit percentage"],
|
2023-01-05 20:29:13 +03:00
|
|
|
"ioh%": [4, 100, "ARC I/O hit percentage"],
|
2013-06-07 08:31:18 +04:00
|
|
|
"miss%": [5, 100, "ARC miss percentage"],
|
2014-10-30 22:29:58 +03:00
|
|
|
"dhit": [4, 1000, "Demand hits per second"],
|
2023-01-05 20:29:13 +03:00
|
|
|
"dioh": [4, 1000, "Demand I/O hits per second"],
|
2014-10-30 22:29:58 +03:00
|
|
|
"dmis": [4, 1000, "Demand misses per second"],
|
|
|
|
"dh%": [3, 100, "Demand hit percentage"],
|
2023-01-05 20:29:13 +03:00
|
|
|
"di%": [3, 100, "Demand I/O hit percentage"],
|
2014-10-30 22:29:58 +03:00
|
|
|
"dm%": [3, 100, "Demand miss percentage"],
|
2023-01-05 20:29:13 +03:00
|
|
|
"ddhit": [5, 1000, "Demand data hits per second"],
|
|
|
|
"ddioh": [5, 1000, "Demand data I/O hits per second"],
|
|
|
|
"ddmis": [5, 1000, "Demand data misses per second"],
|
|
|
|
"ddh%": [4, 100, "Demand data hit percentage"],
|
|
|
|
"ddi%": [4, 100, "Demand data I/O hit percentage"],
|
|
|
|
"ddm%": [4, 100, "Demand data miss percentage"],
|
|
|
|
"dmhit": [5, 1000, "Demand metadata hits per second"],
|
|
|
|
"dmioh": [5, 1000, "Demand metadata I/O hits per second"],
|
|
|
|
"dmmis": [5, 1000, "Demand metadata misses per second"],
|
|
|
|
"dmh%": [4, 100, "Demand metadata hit percentage"],
|
|
|
|
"dmi%": [4, 100, "Demand metadata I/O hit percentage"],
|
|
|
|
"dmm%": [4, 100, "Demand metadata miss percentage"],
|
2013-06-07 08:31:18 +04:00
|
|
|
"phit": [4, 1000, "Prefetch hits per second"],
|
2023-01-05 20:29:13 +03:00
|
|
|
"pioh": [4, 1000, "Prefetch I/O hits per second"],
|
2013-06-07 08:31:18 +04:00
|
|
|
"pmis": [4, 1000, "Prefetch misses per second"],
|
|
|
|
"ph%": [3, 100, "Prefetch hits percentage"],
|
2023-01-05 20:29:13 +03:00
|
|
|
"pi%": [3, 100, "Prefetch I/O hits percentage"],
|
2013-06-07 08:31:18 +04:00
|
|
|
"pm%": [3, 100, "Prefetch miss percentage"],
|
2023-01-05 20:29:13 +03:00
|
|
|
"pdhit": [5, 1000, "Prefetch data hits per second"],
|
|
|
|
"pdioh": [5, 1000, "Prefetch data I/O hits per second"],
|
|
|
|
"pdmis": [5, 1000, "Prefetch data misses per second"],
|
|
|
|
"pdh%": [4, 100, "Prefetch data hits percentage"],
|
|
|
|
"pdi%": [4, 100, "Prefetch data I/O hits percentage"],
|
|
|
|
"pdm%": [4, 100, "Prefetch data miss percentage"],
|
|
|
|
"pmhit": [5, 1000, "Prefetch metadata hits per second"],
|
|
|
|
"pmioh": [5, 1000, "Prefetch metadata I/O hits per second"],
|
|
|
|
"pmmis": [5, 1000, "Prefetch metadata misses per second"],
|
|
|
|
"pmh%": [4, 100, "Prefetch metadata hits percentage"],
|
|
|
|
"pmi%": [4, 100, "Prefetch metadata I/O hits percentage"],
|
|
|
|
"pmm%": [4, 100, "Prefetch metadata miss percentage"],
|
2013-06-07 08:31:18 +04:00
|
|
|
"mhit": [4, 1000, "Metadata hits per second"],
|
2023-01-05 20:29:13 +03:00
|
|
|
"mioh": [4, 1000, "Metadata I/O hits per second"],
|
2013-06-07 08:31:18 +04:00
|
|
|
"mmis": [4, 1000, "Metadata misses per second"],
|
2018-10-30 02:18:20 +03:00
|
|
|
"mread": [5, 1000, "Metadata accesses per second"],
|
2013-06-07 08:31:18 +04:00
|
|
|
"mh%": [3, 100, "Metadata hit percentage"],
|
2023-01-05 20:29:13 +03:00
|
|
|
"mi%": [3, 100, "Metadata I/O hit percentage"],
|
2013-06-07 08:31:18 +04:00
|
|
|
"mm%": [3, 100, "Metadata miss percentage"],
|
2020-05-08 00:49:00 +03:00
|
|
|
"arcsz": [5, 1024, "ARC size"],
|
2023-01-05 20:29:13 +03:00
|
|
|
"size": [5, 1024, "ARC size"],
|
|
|
|
"c": [5, 1024, "ARC target size"],
|
2020-05-08 00:49:00 +03:00
|
|
|
"mfu": [4, 1000, "MFU list hits per second"],
|
|
|
|
"mru": [4, 1000, "MRU list hits per second"],
|
|
|
|
"mfug": [4, 1000, "MFU ghost list hits per second"],
|
|
|
|
"mrug": [4, 1000, "MRU ghost list hits per second"],
|
2023-01-05 20:29:13 +03:00
|
|
|
"unc": [4, 1000, "Uncached list hits per second"],
|
2013-06-07 08:31:18 +04:00
|
|
|
"eskip": [5, 1000, "evict_skip per second"],
|
Add L2ARC arcstats for MFU/MRU buffers and buffer content type
Currently the ARC state (MFU/MRU) of cached L2ARC buffer and their
content type is unknown. Knowing this information may prove beneficial
in adjusting the L2ARC caching policy.
This commit adds L2ARC arcstats that display the aligned size
(in bytes) of L2ARC buffers according to their content type
(data/metadata) and according to their ARC state (MRU/MFU or
prefetch). It also expands the existing evict_l2_eligible arcstat to
differentiate between MFU and MRU buffers.
L2ARC caches buffers from the MRU and MFU lists of ARC. Upon caching a
buffer, its ARC state (MRU/MFU) is stored in the L2 header
(b_arcs_state). The l2_m{f,r}u_asize arcstats reflect the aligned size
(in bytes) of L2ARC buffers according to their ARC state (based on
b_arcs_state). We also account for the case where an L2ARC and ARC
cached MRU or MRU_ghost buffer transitions to MFU. The l2_prefetch_asize
reflects the alinged size (in bytes) of L2ARC buffers that were cached
while they had the prefetch flag set in ARC. This is dynamically updated
as the prefetch flag of L2ARC buffers changes.
When buffers are evicted from ARC, if they are determined to be L2ARC
eligible then their logical size is recorded in
evict_l2_eligible_m{r,f}u arcstats according to their ARC state upon
eviction.
Persistent L2ARC:
When committing an L2ARC buffer to a log block (L2ARC metadata) its
b_arcs_state and prefetch flag is also stored. If the buffer changes
its arcstate or prefetch flag this is reflected in the above arcstats.
However, the L2ARC metadata cannot currently be updated to reflect this
change.
Example: L2ARC caches an MRU buffer. L2ARC metadata and arcstats count
this as an MRU buffer. The buffer transitions to MFU. The arcstats are
updated to reflect this. Upon pool re-import or on/offlining the L2ARC
device the arcstats are cleared and the buffer will now be counted as an
MRU buffer, as the L2ARC metadata were not updated.
Bug fix:
- If l2arc_noprefetch is set, arc_read_done clears the L2CACHE flag of
an ARC buffer. However, prefetches may be issued in a way that
arc_read_done() is bypassed. Instead, move the related code in
l2arc_write_eligible() to account for those cases too.
Also add a test and update manpages for l2arc_mfuonly module parameter,
and update the manpages and code comments for l2arc_noprefetch.
Move persist_l2arc tests to l2arc.
Reviewed-by: Ryan Moeller <freqlabs@FreeBSD.org>
Reviewed-by: Richard Elling <Richard.Elling@RichardElling.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: George Amanakis <gamanakis@gmail.com>
Closes #10743
2020-09-14 20:10:44 +03:00
|
|
|
"el2skip": [7, 1000, "evict skip, due to l2 writes, per second"],
|
|
|
|
"el2cach": [7, 1024, "Size of L2 cached evictions per second"],
|
|
|
|
"el2el": [5, 1024, "Size of L2 eligible evictions per second"],
|
|
|
|
"el2mfu": [6, 1024, "Size of L2 eligible MFU evictions per second"],
|
|
|
|
"el2mru": [6, 1024, "Size of L2 eligible MRU evictions per second"],
|
|
|
|
"el2inel": [7, 1024, "Size of L2 ineligible evictions per second"],
|
2013-06-07 08:31:18 +04:00
|
|
|
"mtxmis": [6, 1000, "mutex_miss per second"],
|
2014-10-30 22:29:58 +03:00
|
|
|
"dread": [5, 1000, "Demand accesses per second"],
|
2023-01-05 20:29:13 +03:00
|
|
|
"ddread": [6, 1000, "Demand data accesses per second"],
|
|
|
|
"dmread": [6, 1000, "Demand metadata accesses per second"],
|
2013-06-07 08:31:18 +04:00
|
|
|
"pread": [5, 1000, "Prefetch accesses per second"],
|
2023-01-05 20:29:13 +03:00
|
|
|
"pdread": [6, 1000, "Prefetch data accesses per second"],
|
|
|
|
"pmread": [6, 1000, "Prefetch metadata accesses per second"],
|
2013-06-07 08:31:18 +04:00
|
|
|
"l2hits": [6, 1000, "L2ARC hits per second"],
|
|
|
|
"l2miss": [6, 1000, "L2ARC misses per second"],
|
|
|
|
"l2read": [6, 1000, "Total L2ARC accesses per second"],
|
|
|
|
"l2hit%": [6, 100, "L2ARC access hit percentage"],
|
|
|
|
"l2miss%": [7, 100, "L2ARC access miss percentage"],
|
Add L2ARC arcstats for MFU/MRU buffers and buffer content type
Currently the ARC state (MFU/MRU) of cached L2ARC buffer and their
content type is unknown. Knowing this information may prove beneficial
in adjusting the L2ARC caching policy.
This commit adds L2ARC arcstats that display the aligned size
(in bytes) of L2ARC buffers according to their content type
(data/metadata) and according to their ARC state (MRU/MFU or
prefetch). It also expands the existing evict_l2_eligible arcstat to
differentiate between MFU and MRU buffers.
L2ARC caches buffers from the MRU and MFU lists of ARC. Upon caching a
buffer, its ARC state (MRU/MFU) is stored in the L2 header
(b_arcs_state). The l2_m{f,r}u_asize arcstats reflect the aligned size
(in bytes) of L2ARC buffers according to their ARC state (based on
b_arcs_state). We also account for the case where an L2ARC and ARC
cached MRU or MRU_ghost buffer transitions to MFU. The l2_prefetch_asize
reflects the alinged size (in bytes) of L2ARC buffers that were cached
while they had the prefetch flag set in ARC. This is dynamically updated
as the prefetch flag of L2ARC buffers changes.
When buffers are evicted from ARC, if they are determined to be L2ARC
eligible then their logical size is recorded in
evict_l2_eligible_m{r,f}u arcstats according to their ARC state upon
eviction.
Persistent L2ARC:
When committing an L2ARC buffer to a log block (L2ARC metadata) its
b_arcs_state and prefetch flag is also stored. If the buffer changes
its arcstate or prefetch flag this is reflected in the above arcstats.
However, the L2ARC metadata cannot currently be updated to reflect this
change.
Example: L2ARC caches an MRU buffer. L2ARC metadata and arcstats count
this as an MRU buffer. The buffer transitions to MFU. The arcstats are
updated to reflect this. Upon pool re-import or on/offlining the L2ARC
device the arcstats are cleared and the buffer will now be counted as an
MRU buffer, as the L2ARC metadata were not updated.
Bug fix:
- If l2arc_noprefetch is set, arc_read_done clears the L2CACHE flag of
an ARC buffer. However, prefetches may be issued in a way that
arc_read_done() is bypassed. Instead, move the related code in
l2arc_write_eligible() to account for those cases too.
Also add a test and update manpages for l2arc_mfuonly module parameter,
and update the manpages and code comments for l2arc_noprefetch.
Move persist_l2arc tests to l2arc.
Reviewed-by: Ryan Moeller <freqlabs@FreeBSD.org>
Reviewed-by: Richard Elling <Richard.Elling@RichardElling.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: George Amanakis <gamanakis@gmail.com>
Closes #10743
2020-09-14 20:10:44 +03:00
|
|
|
"l2pref": [6, 1024, "L2ARC prefetch allocated size"],
|
|
|
|
"l2mfu": [5, 1024, "L2ARC MFU allocated size"],
|
|
|
|
"l2mru": [5, 1024, "L2ARC MRU allocated size"],
|
|
|
|
"l2data": [6, 1024, "L2ARC data allocated size"],
|
|
|
|
"l2meta": [6, 1024, "L2ARC metadata allocated size"],
|
|
|
|
"l2pref%": [7, 100, "L2ARC prefetch percentage"],
|
|
|
|
"l2mfu%": [6, 100, "L2ARC MFU percentage"],
|
|
|
|
"l2mru%": [6, 100, "L2ARC MRU percentage"],
|
|
|
|
"l2data%": [7, 100, "L2ARC data percentage"],
|
|
|
|
"l2meta%": [7, 100, "L2ARC metadata percentage"],
|
2014-02-13 00:44:07 +04:00
|
|
|
"l2asize": [7, 1024, "Actual (compressed) size of the L2ARC"],
|
2013-06-07 08:31:18 +04:00
|
|
|
"l2size": [6, 1024, "Size of the L2ARC"],
|
2020-05-08 00:49:00 +03:00
|
|
|
"l2bytes": [7, 1024, "Bytes read per second from the L2ARC"],
|
|
|
|
"grow": [4, 1000, "ARC grow disabled"],
|
2023-01-05 20:29:13 +03:00
|
|
|
"need": [5, 1024, "ARC reclaim need"],
|
|
|
|
"free": [5, 1024, "ARC free memory"],
|
arcstat: add 'avail', fix 'free'
The meaning of the `free` field is currently `zfs_arc_sys_free`, which
is the target amount of memory to leave free for the system, and is
constant after booting.
This commit changes the meaning of `free` to arc_free_memory(), the
amount of memory that the ARC considers to be free.
It also adds a new arcstat field `avail`, which tracks
`arc_available_memory()`.
Since `avail` can be negative, it also updates the arcstat script to
pretty-print negative values.
example output:
$ arcstat -f time,miss,arcsz,c,grow,need,free,avail 1
time miss arcsz c grow need free avail
15:03:02 39K 114G 114G 0 0 2.4G 407M
15:03:03 42K 114G 114G 0 0 2.1G 120M
15:03:04 40K 114G 114G 0 0 1.8G -177M
15:03:05 24K 113G 112G 0 0 1.7G -269M
15:03:06 29K 111G 110G 0 0 1.6G -385M
15:03:07 27K 110G 108G 0 0 1.4G -535M
15:03:08 13K 108G 108G 0 0 2.2G 239M
15:03:09 33K 107G 107G 0 0 1.3G -639M
15:03:10 16K 105G 102G 0 0 2.6G 704M
15:03:11 7.2K 102G 102G 0 0 5.1G 3.1G
15:03:12 42K 103G 102G 0 0 4.8G 2.8G
Reviewed-by: George Melikov <mail@gmelikov.ru>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10494
2020-06-27 04:05:28 +03:00
|
|
|
"avail": [5, 1024, "ARC available memory"],
|
Include scatter_chunk_waste in arc_size
The ARC caches data in scatter ABD's, which are collections of pages,
which are typically 4K. Therefore, the space used to cache each block
is rounded up to a multiple of 4K. The ABD subsystem tracks this wasted
memory in the `scatter_chunk_waste` kstat. However, the ARC's `size` is
not aware of the memory used by this round-up, it only accounts for the
size that it requested from the ABD subsystem.
Therefore, the ARC is effectively using more memory than it is aware of,
due to the `scatter_chunk_waste`. This impacts observability, e.g.
`arcstat` will show that the ARC is using less memory than it
effectively is. It also impacts how the ARC responds to memory
pressure. As the amount of `scatter_chunk_waste` changes, it appears to
the ARC as memory pressure, so it needs to resize `arc_c`.
If the sector size (`1<<ashift`) is the same as the page size (or
larger), there won't be any waste. If the (compressed) block size is
relatively large compared to the page size, the amount of
`scatter_chunk_waste` will be small, so the problematic effects are
minimal.
However, if using 512B sectors (`ashift=9`), and the (compressed) block
size is small (e.g. `compression=on` with the default `volblocksize=8k`
or a decreased `recordsize`), the amount of `scatter_chunk_waste` can be
very large. On a production system, with `arc_size` at a constant 50%
of memory, `scatter_chunk_waste` has been been observed to be 10-30% of
memory.
This commit adds `scatter_chunk_waste` to `arc_size`, and adds a new
`waste` field to `arcstat`. As a result, the ARC's memory usage is more
observable, and `arc_c` does not need to be adjusted as frequently.
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: George Wilson <gwilson@delphix.com>
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10701
2020-08-18 06:04:04 +03:00
|
|
|
"waste": [5, 1024, "Wasted memory due to round up to pagesize"],
|
2024-04-19 20:19:12 +03:00
|
|
|
"ztotal": [6, 1000, "zfetch total prefetcher calls per second"],
|
|
|
|
"zhits": [5, 1000, "zfetch stream hits per second"],
|
|
|
|
"zahead": [6, 1000, "zfetch hits ahead of streams per second"],
|
|
|
|
"zpast": [5, 1000, "zfetch hits behind streams per second"],
|
|
|
|
"zmisses": [7, 1000, "zfetch stream misses per second"],
|
|
|
|
"zmax": [4, 1000, "zfetch limit reached per second"],
|
|
|
|
"zfuture": [7, 1000, "zfetch stream future per second"],
|
|
|
|
"zstride": [7, 1000, "zfetch stream strides per second"],
|
|
|
|
"zissued": [7, 1000, "zfetch prefetches issued per second"],
|
|
|
|
"zactive": [7, 1000, "zfetch prefetches active per second"],
|
2013-06-07 08:31:18 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
v = {}
|
2023-01-05 20:29:13 +03:00
|
|
|
hdr = ["time", "read", "ddread", "ddh%", "dmread", "dmh%", "pread", "ph%",
|
|
|
|
"size", "c", "avail"]
|
|
|
|
xhdr = ["time", "mfu", "mru", "mfug", "mrug", "unc", "eskip", "mtxmis",
|
|
|
|
"dread", "pread", "read"]
|
2024-04-19 20:19:12 +03:00
|
|
|
zhdr = ["time", "ztotal", "zhits", "zahead", "zpast", "zmisses", "zmax",
|
|
|
|
"zfuture", "zstride", "zissued", "zactive"]
|
2013-06-07 08:31:18 +04:00
|
|
|
sint = 1 # Default interval is 1 second
|
|
|
|
count = 1 # Default count is 1
|
|
|
|
hdr_intr = 20 # Print header every 20 lines of output
|
|
|
|
opfile = None
|
|
|
|
sep = " " # Default separator is 2 spaces
|
|
|
|
l2exist = False
|
2020-10-22 00:09:14 +03:00
|
|
|
cmd = ("Usage: arcstat [-havxp] [-f fields] [-o file] [-s string] [interval "
|
2013-11-09 01:52:06 +04:00
|
|
|
"[count]]\n")
|
2013-06-07 08:31:18 +04:00
|
|
|
cur = {}
|
|
|
|
d = {}
|
|
|
|
out = None
|
|
|
|
kstat = None
|
2020-10-22 00:09:14 +03:00
|
|
|
pretty_print = True
|
2013-06-07 08:31:18 +04:00
|
|
|
|
|
|
|
|
2019-12-01 02:43:23 +03:00
|
|
|
if sys.platform.startswith('freebsd'):
|
2020-12-08 20:02:16 +03:00
|
|
|
# Requires py-sysctl on FreeBSD
|
2019-12-01 02:43:23 +03:00
|
|
|
import sysctl
|
|
|
|
|
|
|
|
def kstat_update():
|
|
|
|
global kstat
|
|
|
|
|
2020-12-08 20:02:16 +03:00
|
|
|
k = [ctl for ctl in sysctl.filter('kstat.zfs.misc.arcstats')
|
|
|
|
if ctl.type != sysctl.CTLTYPE_NODE]
|
2019-12-01 02:43:23 +03:00
|
|
|
|
|
|
|
if not k:
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
kstat = {}
|
|
|
|
|
|
|
|
for s in k:
|
|
|
|
if not s:
|
|
|
|
continue
|
|
|
|
|
|
|
|
name, value = s.name, s.value
|
|
|
|
# Trims 'kstat.zfs.misc.arcstats' from the name
|
2020-03-18 21:50:45 +03:00
|
|
|
kstat[name[24:]] = int(value)
|
2019-12-01 02:43:23 +03:00
|
|
|
|
|
|
|
elif sys.platform.startswith('linux'):
|
2019-11-11 20:24:04 +03:00
|
|
|
def kstat_update():
|
|
|
|
global kstat
|
|
|
|
|
2024-04-19 20:19:12 +03:00
|
|
|
k1 = [line.strip() for line in open('/proc/spl/kstat/zfs/arcstats')]
|
2019-11-11 20:24:04 +03:00
|
|
|
|
2024-04-19 20:19:12 +03:00
|
|
|
k2 = ["zfetch_" + line.strip() for line in
|
|
|
|
open('/proc/spl/kstat/zfs/zfetchstats')]
|
|
|
|
|
|
|
|
if k1 is None or k2 is None:
|
2019-11-11 20:24:04 +03:00
|
|
|
sys.exit(1)
|
|
|
|
|
2024-04-19 20:19:12 +03:00
|
|
|
del k1[0:2]
|
|
|
|
del k2[0:2]
|
|
|
|
k = k1 + k2
|
2019-11-11 20:24:04 +03:00
|
|
|
kstat = {}
|
|
|
|
|
|
|
|
for s in k:
|
|
|
|
if not s:
|
|
|
|
continue
|
|
|
|
|
|
|
|
name, unused, value = s.split()
|
2020-03-18 21:50:45 +03:00
|
|
|
kstat[name] = int(value)
|
2019-11-11 20:24:04 +03:00
|
|
|
|
|
|
|
|
2013-06-07 08:31:18 +04:00
|
|
|
def detailed_usage():
|
|
|
|
sys.stderr.write("%s\n" % cmd)
|
|
|
|
sys.stderr.write("Field definitions are as follows:\n")
|
|
|
|
for key in cols:
|
|
|
|
sys.stderr.write("%11s : %s\n" % (key, cols[key][2]))
|
|
|
|
sys.stderr.write("\n")
|
|
|
|
|
2016-10-05 18:41:26 +03:00
|
|
|
sys.exit(0)
|
2013-06-07 08:31:18 +04:00
|
|
|
|
|
|
|
|
|
|
|
def usage():
|
|
|
|
sys.stderr.write("%s\n" % cmd)
|
|
|
|
sys.stderr.write("\t -h : Print this help message\n")
|
2020-10-22 00:09:14 +03:00
|
|
|
sys.stderr.write("\t -a : Print all possible stats\n")
|
2013-06-07 08:31:18 +04:00
|
|
|
sys.stderr.write("\t -v : List all possible field headers and definitions"
|
2013-11-09 01:52:06 +04:00
|
|
|
"\n")
|
2013-06-07 08:31:18 +04:00
|
|
|
sys.stderr.write("\t -x : Print extended stats\n")
|
2024-04-19 20:19:12 +03:00
|
|
|
sys.stderr.write("\t -z : Print zfetch stats\n")
|
2013-06-07 08:31:18 +04:00
|
|
|
sys.stderr.write("\t -f : Specify specific fields to print (see -v)\n")
|
|
|
|
sys.stderr.write("\t -o : Redirect output to the specified file\n")
|
|
|
|
sys.stderr.write("\t -s : Override default field separator with custom "
|
2013-11-09 01:52:06 +04:00
|
|
|
"character or string\n")
|
2020-10-22 00:09:14 +03:00
|
|
|
sys.stderr.write("\t -p : Disable auto-scaling of numerical fields\n")
|
2013-06-07 08:31:18 +04:00
|
|
|
sys.stderr.write("\nExamples:\n")
|
pyzfs: python3 support (build system)
Almost all of the Python code in the respository has been updated
to be compatibile with Python 2.6, Python 3.4, or newer. The only
exceptions are arc_summery3.py which requires Python 3, and pyzfs
which requires at least Python 2.7. This allows us to maintain a
single version of the code and support most default versions of
python. This change does the following:
* Sets the default shebang for all Python scripts to python3. If
only Python 2 is available, then at install time scripts which
are compatible with Python 2 will have their shebangs replaced
with /usr/bin/python. This is done for compatibility until
Python 2 goes end of life. Since only the installed versions
are changed this means Python 3 must be installed on the system
for test-runner when testing in-tree.
* Added --with-python=<2|3|3.4,etc> configure option which sets
the PYTHON environment variable to target a specific python
version. By default the newest installed version of Python
will be used or the preferred distribution version when
creating pacakges.
* Fixed --enable-pyzfs configure checks so they are run when
--enable-pyzfs=check and --enable-pyzfs=yes.
* Enabled pyzfs for Python 3.4 and newer, which is now supported.
* Renamed pyzfs package to python<VERSION>-pyzfs and updated to
install in the appropriate site location. For example, when
building with --with-python=3.4 a python34-pyzfs will be
created which installs in /usr/lib/python3.4/site-packages/.
* Renamed the following python scripts according to the Fedora
guidance for packaging utilities in /bin
- dbufstat.py -> dbufstat
- arcstat.py -> arcstat
- arc_summary.py -> arc_summary
- arc_summary3.py -> arc_summary3
* Updated python-cffi package name. On CentOS 6, CentOS 7, and
Amazon Linux it's called python-cffi, not python2-cffi. For
Python3 it's called python3-cffi or python3x-cffi.
* Install one version of arc_summary. Depending on the version
of Python available install either arc_summary2 or arc_summary3
as arc_summary. The user output is only slightly different.
Reviewed-by: John Ramsden <johnramsden@riseup.net>
Reviewed-by: Neal Gompa <ngompa@datto.com>
Reviewed-by: loli10K <ezomori.nozomu@gmail.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #8096
2018-10-31 19:22:59 +03:00
|
|
|
sys.stderr.write("\tarcstat -o /tmp/a.log 2 10\n")
|
|
|
|
sys.stderr.write("\tarcstat -s \",\" -o /tmp/a.log 2 10\n")
|
|
|
|
sys.stderr.write("\tarcstat -v\n")
|
|
|
|
sys.stderr.write("\tarcstat -f time,hit%,dh%,ph%,mh% 1\n")
|
2013-06-07 08:31:18 +04:00
|
|
|
sys.stderr.write("\n")
|
|
|
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
def snap_stats():
|
|
|
|
global cur
|
|
|
|
global kstat
|
|
|
|
|
|
|
|
prev = copy.deepcopy(cur)
|
|
|
|
kstat_update()
|
|
|
|
|
|
|
|
cur = kstat
|
|
|
|
for key in cur:
|
|
|
|
if re.match(key, "class"):
|
|
|
|
continue
|
|
|
|
if key in prev:
|
|
|
|
d[key] = cur[key] - prev[key]
|
|
|
|
else:
|
|
|
|
d[key] = cur[key]
|
|
|
|
|
|
|
|
|
|
|
|
def prettynum(sz, scale, num=0):
|
|
|
|
suffix = [' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']
|
|
|
|
index = 0
|
|
|
|
save = 0
|
|
|
|
|
|
|
|
# Special case for date field
|
|
|
|
if scale == -1:
|
|
|
|
return "%s" % num
|
|
|
|
|
|
|
|
# Rounding error, return 0
|
2013-11-09 01:53:54 +04:00
|
|
|
elif 0 < num < 1:
|
2013-06-07 08:31:18 +04:00
|
|
|
num = 0
|
|
|
|
|
arcstat: add 'avail', fix 'free'
The meaning of the `free` field is currently `zfs_arc_sys_free`, which
is the target amount of memory to leave free for the system, and is
constant after booting.
This commit changes the meaning of `free` to arc_free_memory(), the
amount of memory that the ARC considers to be free.
It also adds a new arcstat field `avail`, which tracks
`arc_available_memory()`.
Since `avail` can be negative, it also updates the arcstat script to
pretty-print negative values.
example output:
$ arcstat -f time,miss,arcsz,c,grow,need,free,avail 1
time miss arcsz c grow need free avail
15:03:02 39K 114G 114G 0 0 2.4G 407M
15:03:03 42K 114G 114G 0 0 2.1G 120M
15:03:04 40K 114G 114G 0 0 1.8G -177M
15:03:05 24K 113G 112G 0 0 1.7G -269M
15:03:06 29K 111G 110G 0 0 1.6G -385M
15:03:07 27K 110G 108G 0 0 1.4G -535M
15:03:08 13K 108G 108G 0 0 2.2G 239M
15:03:09 33K 107G 107G 0 0 1.3G -639M
15:03:10 16K 105G 102G 0 0 2.6G 704M
15:03:11 7.2K 102G 102G 0 0 5.1G 3.1G
15:03:12 42K 103G 102G 0 0 4.8G 2.8G
Reviewed-by: George Melikov <mail@gmelikov.ru>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10494
2020-06-27 04:05:28 +03:00
|
|
|
while abs(num) > scale and index < 5:
|
2013-06-07 08:31:18 +04:00
|
|
|
save = num
|
|
|
|
num = num / scale
|
|
|
|
index += 1
|
|
|
|
|
|
|
|
if index == 0:
|
|
|
|
return "%*d" % (sz, num)
|
|
|
|
|
arcstat: add 'avail', fix 'free'
The meaning of the `free` field is currently `zfs_arc_sys_free`, which
is the target amount of memory to leave free for the system, and is
constant after booting.
This commit changes the meaning of `free` to arc_free_memory(), the
amount of memory that the ARC considers to be free.
It also adds a new arcstat field `avail`, which tracks
`arc_available_memory()`.
Since `avail` can be negative, it also updates the arcstat script to
pretty-print negative values.
example output:
$ arcstat -f time,miss,arcsz,c,grow,need,free,avail 1
time miss arcsz c grow need free avail
15:03:02 39K 114G 114G 0 0 2.4G 407M
15:03:03 42K 114G 114G 0 0 2.1G 120M
15:03:04 40K 114G 114G 0 0 1.8G -177M
15:03:05 24K 113G 112G 0 0 1.7G -269M
15:03:06 29K 111G 110G 0 0 1.6G -385M
15:03:07 27K 110G 108G 0 0 1.4G -535M
15:03:08 13K 108G 108G 0 0 2.2G 239M
15:03:09 33K 107G 107G 0 0 1.3G -639M
15:03:10 16K 105G 102G 0 0 2.6G 704M
15:03:11 7.2K 102G 102G 0 0 5.1G 3.1G
15:03:12 42K 103G 102G 0 0 4.8G 2.8G
Reviewed-by: George Melikov <mail@gmelikov.ru>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10494
2020-06-27 04:05:28 +03:00
|
|
|
if abs(save / scale) < 10:
|
2013-06-07 08:31:18 +04:00
|
|
|
return "%*.1f%s" % (sz - 1, num, suffix[index])
|
|
|
|
else:
|
|
|
|
return "%*d%s" % (sz - 1, num, suffix[index])
|
|
|
|
|
|
|
|
|
|
|
|
def print_values():
|
|
|
|
global hdr
|
|
|
|
global sep
|
|
|
|
global v
|
2020-10-22 00:09:14 +03:00
|
|
|
global pretty_print
|
2013-06-07 08:31:18 +04:00
|
|
|
|
2020-10-22 00:09:14 +03:00
|
|
|
if pretty_print:
|
|
|
|
fmt = lambda col: prettynum(cols[col][0], cols[col][1], v[col])
|
|
|
|
else:
|
2022-08-13 00:21:52 +03:00
|
|
|
fmt = lambda col: str(v[col])
|
arcstat: add 'avail', fix 'free'
The meaning of the `free` field is currently `zfs_arc_sys_free`, which
is the target amount of memory to leave free for the system, and is
constant after booting.
This commit changes the meaning of `free` to arc_free_memory(), the
amount of memory that the ARC considers to be free.
It also adds a new arcstat field `avail`, which tracks
`arc_available_memory()`.
Since `avail` can be negative, it also updates the arcstat script to
pretty-print negative values.
example output:
$ arcstat -f time,miss,arcsz,c,grow,need,free,avail 1
time miss arcsz c grow need free avail
15:03:02 39K 114G 114G 0 0 2.4G 407M
15:03:03 42K 114G 114G 0 0 2.1G 120M
15:03:04 40K 114G 114G 0 0 1.8G -177M
15:03:05 24K 113G 112G 0 0 1.7G -269M
15:03:06 29K 111G 110G 0 0 1.6G -385M
15:03:07 27K 110G 108G 0 0 1.4G -535M
15:03:08 13K 108G 108G 0 0 2.2G 239M
15:03:09 33K 107G 107G 0 0 1.3G -639M
15:03:10 16K 105G 102G 0 0 2.6G 704M
15:03:11 7.2K 102G 102G 0 0 5.1G 3.1G
15:03:12 42K 103G 102G 0 0 4.8G 2.8G
Reviewed-by: George Melikov <mail@gmelikov.ru>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10494
2020-06-27 04:05:28 +03:00
|
|
|
|
2020-10-22 00:09:14 +03:00
|
|
|
sys.stdout.write(sep.join(fmt(col) for col in hdr))
|
2013-06-07 08:31:18 +04:00
|
|
|
sys.stdout.write("\n")
|
2017-10-26 22:18:49 +03:00
|
|
|
sys.stdout.flush()
|
2013-06-07 08:31:18 +04:00
|
|
|
|
|
|
|
|
|
|
|
def print_header():
|
|
|
|
global hdr
|
|
|
|
global sep
|
2020-10-22 00:09:14 +03:00
|
|
|
global pretty_print
|
2013-06-07 08:31:18 +04:00
|
|
|
|
2020-10-22 00:09:14 +03:00
|
|
|
if pretty_print:
|
|
|
|
fmt = lambda col: "%*s" % (cols[col][0], col)
|
|
|
|
else:
|
|
|
|
fmt = lambda col: col
|
arcstat: add 'avail', fix 'free'
The meaning of the `free` field is currently `zfs_arc_sys_free`, which
is the target amount of memory to leave free for the system, and is
constant after booting.
This commit changes the meaning of `free` to arc_free_memory(), the
amount of memory that the ARC considers to be free.
It also adds a new arcstat field `avail`, which tracks
`arc_available_memory()`.
Since `avail` can be negative, it also updates the arcstat script to
pretty-print negative values.
example output:
$ arcstat -f time,miss,arcsz,c,grow,need,free,avail 1
time miss arcsz c grow need free avail
15:03:02 39K 114G 114G 0 0 2.4G 407M
15:03:03 42K 114G 114G 0 0 2.1G 120M
15:03:04 40K 114G 114G 0 0 1.8G -177M
15:03:05 24K 113G 112G 0 0 1.7G -269M
15:03:06 29K 111G 110G 0 0 1.6G -385M
15:03:07 27K 110G 108G 0 0 1.4G -535M
15:03:08 13K 108G 108G 0 0 2.2G 239M
15:03:09 33K 107G 107G 0 0 1.3G -639M
15:03:10 16K 105G 102G 0 0 2.6G 704M
15:03:11 7.2K 102G 102G 0 0 5.1G 3.1G
15:03:12 42K 103G 102G 0 0 4.8G 2.8G
Reviewed-by: George Melikov <mail@gmelikov.ru>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10494
2020-06-27 04:05:28 +03:00
|
|
|
|
2020-10-22 00:09:14 +03:00
|
|
|
sys.stdout.write(sep.join(fmt(col) for col in hdr))
|
2013-06-07 08:31:18 +04:00
|
|
|
sys.stdout.write("\n")
|
|
|
|
|
2016-10-06 20:04:54 +03:00
|
|
|
|
2014-03-11 09:27:56 +04:00
|
|
|
def get_terminal_lines():
|
|
|
|
try:
|
2016-10-06 20:04:54 +03:00
|
|
|
import fcntl
|
|
|
|
import termios
|
|
|
|
import struct
|
2014-03-11 09:27:56 +04:00
|
|
|
data = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, '1234')
|
|
|
|
sz = struct.unpack('hh', data)
|
|
|
|
return sz[0]
|
2017-10-24 00:01:43 +03:00
|
|
|
except Exception:
|
2014-03-11 09:27:56 +04:00
|
|
|
pass
|
2013-06-07 08:31:18 +04:00
|
|
|
|
2016-10-06 20:04:54 +03:00
|
|
|
|
2014-10-29 06:35:10 +03:00
|
|
|
def update_hdr_intr():
|
|
|
|
global hdr_intr
|
|
|
|
|
|
|
|
lines = get_terminal_lines()
|
|
|
|
if lines and lines > 3:
|
|
|
|
hdr_intr = lines - 3
|
|
|
|
|
2016-10-06 20:04:54 +03:00
|
|
|
|
2014-10-29 06:35:10 +03:00
|
|
|
def resize_handler(signum, frame):
|
|
|
|
update_hdr_intr()
|
|
|
|
|
|
|
|
|
2013-06-07 08:31:18 +04:00
|
|
|
def init():
|
|
|
|
global sint
|
|
|
|
global count
|
|
|
|
global hdr
|
|
|
|
global xhdr
|
2024-04-19 20:19:12 +03:00
|
|
|
global zhdr
|
2013-06-07 08:31:18 +04:00
|
|
|
global opfile
|
|
|
|
global sep
|
|
|
|
global out
|
|
|
|
global l2exist
|
2020-10-22 00:09:14 +03:00
|
|
|
global pretty_print
|
2013-06-07 08:31:18 +04:00
|
|
|
|
|
|
|
desired_cols = None
|
2020-10-22 00:09:14 +03:00
|
|
|
aflag = False
|
2013-06-07 08:31:18 +04:00
|
|
|
xflag = False
|
|
|
|
hflag = False
|
|
|
|
vflag = False
|
2024-04-19 20:19:12 +03:00
|
|
|
zflag = False
|
2013-06-07 08:31:18 +04:00
|
|
|
i = 1
|
|
|
|
|
|
|
|
try:
|
|
|
|
opts, args = getopt.getopt(
|
|
|
|
sys.argv[1:],
|
2024-04-19 20:19:12 +03:00
|
|
|
"axzo:hvs:f:p",
|
2013-06-07 08:31:18 +04:00
|
|
|
[
|
2020-10-22 00:09:14 +03:00
|
|
|
"all",
|
2013-06-07 08:31:18 +04:00
|
|
|
"extended",
|
2024-04-19 20:19:12 +03:00
|
|
|
"zfetch",
|
2013-06-07 08:31:18 +04:00
|
|
|
"outfile",
|
|
|
|
"help",
|
|
|
|
"verbose",
|
2018-02-28 19:57:10 +03:00
|
|
|
"separator",
|
2020-10-22 00:09:14 +03:00
|
|
|
"columns",
|
|
|
|
"parsable"
|
2013-06-07 08:31:18 +04:00
|
|
|
]
|
|
|
|
)
|
2013-11-09 01:53:54 +04:00
|
|
|
except getopt.error as msg:
|
2018-08-18 23:10:36 +03:00
|
|
|
sys.stderr.write("Error: %s\n" % str(msg))
|
2013-06-07 08:31:18 +04:00
|
|
|
usage()
|
2013-11-09 01:53:54 +04:00
|
|
|
opts = None
|
2013-06-07 08:31:18 +04:00
|
|
|
|
|
|
|
for opt, arg in opts:
|
2020-10-22 00:09:14 +03:00
|
|
|
if opt in ('-a', '--all'):
|
|
|
|
aflag = True
|
2013-06-07 08:31:18 +04:00
|
|
|
if opt in ('-x', '--extended'):
|
|
|
|
xflag = True
|
|
|
|
if opt in ('-o', '--outfile'):
|
|
|
|
opfile = arg
|
|
|
|
i += 1
|
|
|
|
if opt in ('-h', '--help'):
|
|
|
|
hflag = True
|
|
|
|
if opt in ('-v', '--verbose'):
|
|
|
|
vflag = True
|
2018-02-28 19:57:10 +03:00
|
|
|
if opt in ('-s', '--separator'):
|
2013-06-07 08:31:18 +04:00
|
|
|
sep = arg
|
|
|
|
i += 1
|
|
|
|
if opt in ('-f', '--columns'):
|
|
|
|
desired_cols = arg
|
|
|
|
i += 1
|
2020-10-22 00:09:14 +03:00
|
|
|
if opt in ('-p', '--parsable'):
|
|
|
|
pretty_print = False
|
2024-04-19 20:19:12 +03:00
|
|
|
if opt in ('-z', '--zfetch'):
|
|
|
|
zflag = True
|
2013-06-07 08:31:18 +04:00
|
|
|
i += 1
|
|
|
|
|
|
|
|
argv = sys.argv[i:]
|
2020-03-18 21:50:45 +03:00
|
|
|
sint = int(argv[0]) if argv else sint
|
|
|
|
count = int(argv[1]) if len(argv) > 1 else (0 if len(argv) > 0 else 1)
|
2013-06-07 08:31:18 +04:00
|
|
|
|
2024-04-19 20:19:12 +03:00
|
|
|
if hflag or (xflag and zflag) or ((zflag or xflag) and desired_cols):
|
2013-06-07 08:31:18 +04:00
|
|
|
usage()
|
|
|
|
|
|
|
|
if vflag:
|
|
|
|
detailed_usage()
|
|
|
|
|
|
|
|
if xflag:
|
|
|
|
hdr = xhdr
|
|
|
|
|
2024-04-19 20:19:12 +03:00
|
|
|
if zflag:
|
|
|
|
hdr = zhdr
|
|
|
|
|
2014-10-29 06:35:10 +03:00
|
|
|
update_hdr_intr()
|
2014-03-11 09:27:56 +04:00
|
|
|
|
2013-06-07 08:31:18 +04:00
|
|
|
# check if L2ARC exists
|
|
|
|
snap_stats()
|
|
|
|
l2_size = cur.get("l2_size")
|
|
|
|
if l2_size:
|
|
|
|
l2exist = True
|
|
|
|
|
|
|
|
if desired_cols:
|
|
|
|
hdr = desired_cols.split(",")
|
|
|
|
|
|
|
|
invalid = []
|
|
|
|
incompat = []
|
|
|
|
for ele in hdr:
|
|
|
|
if ele not in cols:
|
|
|
|
invalid.append(ele)
|
|
|
|
elif not l2exist and ele.startswith("l2"):
|
|
|
|
sys.stdout.write("No L2ARC Here\n%s\n" % ele)
|
|
|
|
incompat.append(ele)
|
|
|
|
|
|
|
|
if len(invalid) > 0:
|
|
|
|
sys.stderr.write("Invalid column definition! -- %s\n" % invalid)
|
|
|
|
usage()
|
|
|
|
|
|
|
|
if len(incompat) > 0:
|
2013-11-09 01:52:06 +04:00
|
|
|
sys.stderr.write("Incompatible field specified! -- %s\n" %
|
|
|
|
incompat)
|
2013-06-07 08:31:18 +04:00
|
|
|
usage()
|
|
|
|
|
2020-10-22 00:09:14 +03:00
|
|
|
if aflag:
|
|
|
|
if l2exist:
|
|
|
|
hdr = cols.keys()
|
|
|
|
else:
|
|
|
|
hdr = [col for col in cols.keys() if not col.startswith("l2")]
|
|
|
|
|
2013-06-07 08:31:18 +04:00
|
|
|
if opfile:
|
|
|
|
try:
|
|
|
|
out = open(opfile, "w")
|
|
|
|
sys.stdout = out
|
|
|
|
|
2013-11-09 01:53:54 +04:00
|
|
|
except IOError:
|
2013-06-07 08:31:18 +04:00
|
|
|
sys.stderr.write("Cannot open %s for writing\n" % opfile)
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
def calculate():
|
|
|
|
global d
|
|
|
|
global v
|
|
|
|
global l2exist
|
|
|
|
|
2013-11-09 01:53:54 +04:00
|
|
|
v = dict()
|
2013-06-07 08:31:18 +04:00
|
|
|
v["time"] = time.strftime("%H:%M:%S", time.localtime())
|
2021-10-08 18:32:27 +03:00
|
|
|
v["hits"] = d["hits"] // sint
|
2023-01-05 20:29:13 +03:00
|
|
|
v["iohs"] = d["iohits"] // sint
|
2021-10-08 18:32:27 +03:00
|
|
|
v["miss"] = d["misses"] // sint
|
2023-01-05 20:29:13 +03:00
|
|
|
v["read"] = v["hits"] + v["iohs"] + v["miss"]
|
2021-10-08 18:32:27 +03:00
|
|
|
v["hit%"] = 100 * v["hits"] // v["read"] if v["read"] > 0 else 0
|
2023-01-05 20:29:13 +03:00
|
|
|
v["ioh%"] = 100 * v["iohs"] // v["read"] if v["read"] > 0 else 0
|
|
|
|
v["miss%"] = 100 - v["hit%"] - v["ioh%"] if v["read"] > 0 else 0
|
2013-06-07 08:31:18 +04:00
|
|
|
|
2021-10-08 18:32:27 +03:00
|
|
|
v["dhit"] = (d["demand_data_hits"] + d["demand_metadata_hits"]) // sint
|
2023-01-05 20:29:13 +03:00
|
|
|
v["dioh"] = (d["demand_data_iohits"] + d["demand_metadata_iohits"]) // sint
|
2021-10-08 18:32:27 +03:00
|
|
|
v["dmis"] = (d["demand_data_misses"] + d["demand_metadata_misses"]) // sint
|
2013-06-07 08:31:18 +04:00
|
|
|
|
2023-01-05 20:29:13 +03:00
|
|
|
v["dread"] = v["dhit"] + v["dioh"] + v["dmis"]
|
2021-10-08 18:32:27 +03:00
|
|
|
v["dh%"] = 100 * v["dhit"] // v["dread"] if v["dread"] > 0 else 0
|
2023-01-05 20:29:13 +03:00
|
|
|
v["di%"] = 100 * v["dioh"] // v["dread"] if v["dread"] > 0 else 0
|
|
|
|
v["dm%"] = 100 - v["dh%"] - v["di%"] if v["dread"] > 0 else 0
|
|
|
|
|
|
|
|
v["ddhit"] = d["demand_data_hits"] // sint
|
|
|
|
v["ddioh"] = d["demand_data_iohits"] // sint
|
|
|
|
v["ddmis"] = d["demand_data_misses"] // sint
|
|
|
|
|
|
|
|
v["ddread"] = v["ddhit"] + v["ddioh"] + v["ddmis"]
|
|
|
|
v["ddh%"] = 100 * v["ddhit"] // v["ddread"] if v["ddread"] > 0 else 0
|
|
|
|
v["ddi%"] = 100 * v["ddioh"] // v["ddread"] if v["ddread"] > 0 else 0
|
|
|
|
v["ddm%"] = 100 - v["ddh%"] - v["ddi%"] if v["ddread"] > 0 else 0
|
|
|
|
|
|
|
|
v["dmhit"] = d["demand_metadata_hits"] // sint
|
|
|
|
v["dmioh"] = d["demand_metadata_iohits"] // sint
|
|
|
|
v["dmmis"] = d["demand_metadata_misses"] // sint
|
|
|
|
|
|
|
|
v["dmread"] = v["dmhit"] + v["dmioh"] + v["dmmis"]
|
|
|
|
v["dmh%"] = 100 * v["dmhit"] // v["dmread"] if v["dmread"] > 0 else 0
|
|
|
|
v["dmi%"] = 100 * v["dmioh"] // v["dmread"] if v["dmread"] > 0 else 0
|
|
|
|
v["dmm%"] = 100 - v["dmh%"] - v["dmi%"] if v["dmread"] > 0 else 0
|
2013-06-07 08:31:18 +04:00
|
|
|
|
2021-10-08 18:32:27 +03:00
|
|
|
v["phit"] = (d["prefetch_data_hits"] + d["prefetch_metadata_hits"]) // sint
|
2023-01-05 20:29:13 +03:00
|
|
|
v["pioh"] = (d["prefetch_data_iohits"] +
|
|
|
|
d["prefetch_metadata_iohits"]) // sint
|
2013-06-07 08:31:18 +04:00
|
|
|
v["pmis"] = (d["prefetch_data_misses"] +
|
2021-10-08 18:32:27 +03:00
|
|
|
d["prefetch_metadata_misses"]) // sint
|
2013-06-07 08:31:18 +04:00
|
|
|
|
2023-01-05 20:29:13 +03:00
|
|
|
v["pread"] = v["phit"] + v["pioh"] + v["pmis"]
|
2021-10-08 18:32:27 +03:00
|
|
|
v["ph%"] = 100 * v["phit"] // v["pread"] if v["pread"] > 0 else 0
|
2023-01-05 20:29:13 +03:00
|
|
|
v["pi%"] = 100 * v["pioh"] // v["pread"] if v["pread"] > 0 else 0
|
|
|
|
v["pm%"] = 100 - v["ph%"] - v["pi%"] if v["pread"] > 0 else 0
|
|
|
|
|
|
|
|
v["pdhit"] = d["prefetch_data_hits"] // sint
|
|
|
|
v["pdioh"] = d["prefetch_data_iohits"] // sint
|
|
|
|
v["pdmis"] = d["prefetch_data_misses"] // sint
|
|
|
|
|
|
|
|
v["pdread"] = v["pdhit"] + v["pdioh"] + v["pdmis"]
|
|
|
|
v["pdh%"] = 100 * v["pdhit"] // v["pdread"] if v["pdread"] > 0 else 0
|
|
|
|
v["pdi%"] = 100 * v["pdioh"] // v["pdread"] if v["pdread"] > 0 else 0
|
|
|
|
v["pdm%"] = 100 - v["pdh%"] - v["pdi%"] if v["pdread"] > 0 else 0
|
|
|
|
|
|
|
|
v["pmhit"] = d["prefetch_metadata_hits"] // sint
|
|
|
|
v["pmioh"] = d["prefetch_metadata_iohits"] // sint
|
|
|
|
v["pmmis"] = d["prefetch_metadata_misses"] // sint
|
|
|
|
|
|
|
|
v["pmread"] = v["pmhit"] + v["pmioh"] + v["pmmis"]
|
|
|
|
v["pmh%"] = 100 * v["pmhit"] // v["pmread"] if v["pmread"] > 0 else 0
|
|
|
|
v["pmi%"] = 100 * v["pmioh"] // v["pmread"] if v["pmread"] > 0 else 0
|
|
|
|
v["pmm%"] = 100 - v["pmh%"] - v["pmi%"] if v["pmread"] > 0 else 0
|
2013-06-07 08:31:18 +04:00
|
|
|
|
|
|
|
v["mhit"] = (d["prefetch_metadata_hits"] +
|
2021-10-08 18:32:27 +03:00
|
|
|
d["demand_metadata_hits"]) // sint
|
2023-01-05 20:29:13 +03:00
|
|
|
v["mioh"] = (d["prefetch_metadata_iohits"] +
|
|
|
|
d["demand_metadata_iohits"]) // sint
|
2013-06-07 08:31:18 +04:00
|
|
|
v["mmis"] = (d["prefetch_metadata_misses"] +
|
2021-10-08 18:32:27 +03:00
|
|
|
d["demand_metadata_misses"]) // sint
|
2013-06-07 08:31:18 +04:00
|
|
|
|
2023-01-05 20:29:13 +03:00
|
|
|
v["mread"] = v["mhit"] + v["mioh"] + v["mmis"]
|
2021-10-08 18:32:27 +03:00
|
|
|
v["mh%"] = 100 * v["mhit"] // v["mread"] if v["mread"] > 0 else 0
|
2023-01-05 20:29:13 +03:00
|
|
|
v["mi%"] = 100 * v["mioh"] // v["mread"] if v["mread"] > 0 else 0
|
|
|
|
v["mm%"] = 100 - v["mh%"] - v["mi%"] if v["mread"] > 0 else 0
|
2013-06-07 08:31:18 +04:00
|
|
|
|
|
|
|
v["arcsz"] = cur["size"]
|
arcstat: add 'avail', fix 'free'
The meaning of the `free` field is currently `zfs_arc_sys_free`, which
is the target amount of memory to leave free for the system, and is
constant after booting.
This commit changes the meaning of `free` to arc_free_memory(), the
amount of memory that the ARC considers to be free.
It also adds a new arcstat field `avail`, which tracks
`arc_available_memory()`.
Since `avail` can be negative, it also updates the arcstat script to
pretty-print negative values.
example output:
$ arcstat -f time,miss,arcsz,c,grow,need,free,avail 1
time miss arcsz c grow need free avail
15:03:02 39K 114G 114G 0 0 2.4G 407M
15:03:03 42K 114G 114G 0 0 2.1G 120M
15:03:04 40K 114G 114G 0 0 1.8G -177M
15:03:05 24K 113G 112G 0 0 1.7G -269M
15:03:06 29K 111G 110G 0 0 1.6G -385M
15:03:07 27K 110G 108G 0 0 1.4G -535M
15:03:08 13K 108G 108G 0 0 2.2G 239M
15:03:09 33K 107G 107G 0 0 1.3G -639M
15:03:10 16K 105G 102G 0 0 2.6G 704M
15:03:11 7.2K 102G 102G 0 0 5.1G 3.1G
15:03:12 42K 103G 102G 0 0 4.8G 2.8G
Reviewed-by: George Melikov <mail@gmelikov.ru>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10494
2020-06-27 04:05:28 +03:00
|
|
|
v["size"] = cur["size"]
|
2013-06-07 08:31:18 +04:00
|
|
|
v["c"] = cur["c"]
|
2021-10-08 18:32:27 +03:00
|
|
|
v["mfu"] = d["mfu_hits"] // sint
|
|
|
|
v["mru"] = d["mru_hits"] // sint
|
|
|
|
v["mrug"] = d["mru_ghost_hits"] // sint
|
|
|
|
v["mfug"] = d["mfu_ghost_hits"] // sint
|
2023-01-05 20:29:13 +03:00
|
|
|
v["unc"] = d["uncached_hits"] // sint
|
2021-10-08 18:32:27 +03:00
|
|
|
v["eskip"] = d["evict_skip"] // sint
|
|
|
|
v["el2skip"] = d["evict_l2_skip"] // sint
|
|
|
|
v["el2cach"] = d["evict_l2_cached"] // sint
|
|
|
|
v["el2el"] = d["evict_l2_eligible"] // sint
|
|
|
|
v["el2mfu"] = d["evict_l2_eligible_mfu"] // sint
|
|
|
|
v["el2mru"] = d["evict_l2_eligible_mru"] // sint
|
|
|
|
v["el2inel"] = d["evict_l2_ineligible"] // sint
|
|
|
|
v["mtxmis"] = d["mutex_miss"] // sint
|
2024-04-19 20:19:12 +03:00
|
|
|
v["ztotal"] = (d["zfetch_hits"] + d["zfetch_future"] + d["zfetch_stride"] +
|
|
|
|
d["zfetch_past"] + d["zfetch_misses"]) // sint
|
|
|
|
v["zhits"] = d["zfetch_hits"] // sint
|
|
|
|
v["zahead"] = (d["zfetch_future"] + d["zfetch_stride"]) // sint
|
|
|
|
v["zpast"] = d["zfetch_past"] // sint
|
|
|
|
v["zmisses"] = d["zfetch_misses"] // sint
|
|
|
|
v["zmax"] = d["zfetch_max_streams"] // sint
|
|
|
|
v["zfuture"] = d["zfetch_future"] // sint
|
|
|
|
v["zstride"] = d["zfetch_stride"] // sint
|
|
|
|
v["zissued"] = d["zfetch_io_issued"] // sint
|
|
|
|
v["zactive"] = d["zfetch_io_active"] // sint
|
2013-06-07 08:31:18 +04:00
|
|
|
|
|
|
|
if l2exist:
|
2021-10-08 18:32:27 +03:00
|
|
|
v["l2hits"] = d["l2_hits"] // sint
|
|
|
|
v["l2miss"] = d["l2_misses"] // sint
|
2013-06-07 08:31:18 +04:00
|
|
|
v["l2read"] = v["l2hits"] + v["l2miss"]
|
2021-10-08 18:32:27 +03:00
|
|
|
v["l2hit%"] = 100 * v["l2hits"] // v["l2read"] if v["l2read"] > 0 else 0
|
2013-06-07 08:31:18 +04:00
|
|
|
|
|
|
|
v["l2miss%"] = 100 - v["l2hit%"] if v["l2read"] > 0 else 0
|
2014-02-13 00:44:07 +04:00
|
|
|
v["l2asize"] = cur["l2_asize"]
|
2013-06-07 08:31:18 +04:00
|
|
|
v["l2size"] = cur["l2_size"]
|
2021-10-08 18:32:27 +03:00
|
|
|
v["l2bytes"] = d["l2_read_bytes"] // sint
|
2013-06-07 08:31:18 +04:00
|
|
|
|
Add L2ARC arcstats for MFU/MRU buffers and buffer content type
Currently the ARC state (MFU/MRU) of cached L2ARC buffer and their
content type is unknown. Knowing this information may prove beneficial
in adjusting the L2ARC caching policy.
This commit adds L2ARC arcstats that display the aligned size
(in bytes) of L2ARC buffers according to their content type
(data/metadata) and according to their ARC state (MRU/MFU or
prefetch). It also expands the existing evict_l2_eligible arcstat to
differentiate between MFU and MRU buffers.
L2ARC caches buffers from the MRU and MFU lists of ARC. Upon caching a
buffer, its ARC state (MRU/MFU) is stored in the L2 header
(b_arcs_state). The l2_m{f,r}u_asize arcstats reflect the aligned size
(in bytes) of L2ARC buffers according to their ARC state (based on
b_arcs_state). We also account for the case where an L2ARC and ARC
cached MRU or MRU_ghost buffer transitions to MFU. The l2_prefetch_asize
reflects the alinged size (in bytes) of L2ARC buffers that were cached
while they had the prefetch flag set in ARC. This is dynamically updated
as the prefetch flag of L2ARC buffers changes.
When buffers are evicted from ARC, if they are determined to be L2ARC
eligible then their logical size is recorded in
evict_l2_eligible_m{r,f}u arcstats according to their ARC state upon
eviction.
Persistent L2ARC:
When committing an L2ARC buffer to a log block (L2ARC metadata) its
b_arcs_state and prefetch flag is also stored. If the buffer changes
its arcstate or prefetch flag this is reflected in the above arcstats.
However, the L2ARC metadata cannot currently be updated to reflect this
change.
Example: L2ARC caches an MRU buffer. L2ARC metadata and arcstats count
this as an MRU buffer. The buffer transitions to MFU. The arcstats are
updated to reflect this. Upon pool re-import or on/offlining the L2ARC
device the arcstats are cleared and the buffer will now be counted as an
MRU buffer, as the L2ARC metadata were not updated.
Bug fix:
- If l2arc_noprefetch is set, arc_read_done clears the L2CACHE flag of
an ARC buffer. However, prefetches may be issued in a way that
arc_read_done() is bypassed. Instead, move the related code in
l2arc_write_eligible() to account for those cases too.
Also add a test and update manpages for l2arc_mfuonly module parameter,
and update the manpages and code comments for l2arc_noprefetch.
Move persist_l2arc tests to l2arc.
Reviewed-by: Ryan Moeller <freqlabs@FreeBSD.org>
Reviewed-by: Richard Elling <Richard.Elling@RichardElling.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: George Amanakis <gamanakis@gmail.com>
Closes #10743
2020-09-14 20:10:44 +03:00
|
|
|
v["l2pref"] = cur["l2_prefetch_asize"]
|
|
|
|
v["l2mfu"] = cur["l2_mfu_asize"]
|
|
|
|
v["l2mru"] = cur["l2_mru_asize"]
|
|
|
|
v["l2data"] = cur["l2_bufc_data_asize"]
|
|
|
|
v["l2meta"] = cur["l2_bufc_metadata_asize"]
|
2021-10-08 18:32:27 +03:00
|
|
|
v["l2pref%"] = 100 * v["l2pref"] // v["l2asize"]
|
|
|
|
v["l2mfu%"] = 100 * v["l2mfu"] // v["l2asize"]
|
|
|
|
v["l2mru%"] = 100 * v["l2mru"] // v["l2asize"]
|
|
|
|
v["l2data%"] = 100 * v["l2data"] // v["l2asize"]
|
|
|
|
v["l2meta%"] = 100 * v["l2meta"] // v["l2asize"]
|
Add L2ARC arcstats for MFU/MRU buffers and buffer content type
Currently the ARC state (MFU/MRU) of cached L2ARC buffer and their
content type is unknown. Knowing this information may prove beneficial
in adjusting the L2ARC caching policy.
This commit adds L2ARC arcstats that display the aligned size
(in bytes) of L2ARC buffers according to their content type
(data/metadata) and according to their ARC state (MRU/MFU or
prefetch). It also expands the existing evict_l2_eligible arcstat to
differentiate between MFU and MRU buffers.
L2ARC caches buffers from the MRU and MFU lists of ARC. Upon caching a
buffer, its ARC state (MRU/MFU) is stored in the L2 header
(b_arcs_state). The l2_m{f,r}u_asize arcstats reflect the aligned size
(in bytes) of L2ARC buffers according to their ARC state (based on
b_arcs_state). We also account for the case where an L2ARC and ARC
cached MRU or MRU_ghost buffer transitions to MFU. The l2_prefetch_asize
reflects the alinged size (in bytes) of L2ARC buffers that were cached
while they had the prefetch flag set in ARC. This is dynamically updated
as the prefetch flag of L2ARC buffers changes.
When buffers are evicted from ARC, if they are determined to be L2ARC
eligible then their logical size is recorded in
evict_l2_eligible_m{r,f}u arcstats according to their ARC state upon
eviction.
Persistent L2ARC:
When committing an L2ARC buffer to a log block (L2ARC metadata) its
b_arcs_state and prefetch flag is also stored. If the buffer changes
its arcstate or prefetch flag this is reflected in the above arcstats.
However, the L2ARC metadata cannot currently be updated to reflect this
change.
Example: L2ARC caches an MRU buffer. L2ARC metadata and arcstats count
this as an MRU buffer. The buffer transitions to MFU. The arcstats are
updated to reflect this. Upon pool re-import or on/offlining the L2ARC
device the arcstats are cleared and the buffer will now be counted as an
MRU buffer, as the L2ARC metadata were not updated.
Bug fix:
- If l2arc_noprefetch is set, arc_read_done clears the L2CACHE flag of
an ARC buffer. However, prefetches may be issued in a way that
arc_read_done() is bypassed. Instead, move the related code in
l2arc_write_eligible() to account for those cases too.
Also add a test and update manpages for l2arc_mfuonly module parameter,
and update the manpages and code comments for l2arc_noprefetch.
Move persist_l2arc tests to l2arc.
Reviewed-by: Ryan Moeller <freqlabs@FreeBSD.org>
Reviewed-by: Richard Elling <Richard.Elling@RichardElling.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: George Amanakis <gamanakis@gmail.com>
Closes #10743
2020-09-14 20:10:44 +03:00
|
|
|
|
2018-10-30 02:18:20 +03:00
|
|
|
v["grow"] = 0 if cur["arc_no_grow"] else 1
|
|
|
|
v["need"] = cur["arc_need_free"]
|
arcstat: add 'avail', fix 'free'
The meaning of the `free` field is currently `zfs_arc_sys_free`, which
is the target amount of memory to leave free for the system, and is
constant after booting.
This commit changes the meaning of `free` to arc_free_memory(), the
amount of memory that the ARC considers to be free.
It also adds a new arcstat field `avail`, which tracks
`arc_available_memory()`.
Since `avail` can be negative, it also updates the arcstat script to
pretty-print negative values.
example output:
$ arcstat -f time,miss,arcsz,c,grow,need,free,avail 1
time miss arcsz c grow need free avail
15:03:02 39K 114G 114G 0 0 2.4G 407M
15:03:03 42K 114G 114G 0 0 2.1G 120M
15:03:04 40K 114G 114G 0 0 1.8G -177M
15:03:05 24K 113G 112G 0 0 1.7G -269M
15:03:06 29K 111G 110G 0 0 1.6G -385M
15:03:07 27K 110G 108G 0 0 1.4G -535M
15:03:08 13K 108G 108G 0 0 2.2G 239M
15:03:09 33K 107G 107G 0 0 1.3G -639M
15:03:10 16K 105G 102G 0 0 2.6G 704M
15:03:11 7.2K 102G 102G 0 0 5.1G 3.1G
15:03:12 42K 103G 102G 0 0 4.8G 2.8G
Reviewed-by: George Melikov <mail@gmelikov.ru>
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10494
2020-06-27 04:05:28 +03:00
|
|
|
v["free"] = cur["memory_free_bytes"]
|
|
|
|
v["avail"] = cur["memory_available_bytes"]
|
Include scatter_chunk_waste in arc_size
The ARC caches data in scatter ABD's, which are collections of pages,
which are typically 4K. Therefore, the space used to cache each block
is rounded up to a multiple of 4K. The ABD subsystem tracks this wasted
memory in the `scatter_chunk_waste` kstat. However, the ARC's `size` is
not aware of the memory used by this round-up, it only accounts for the
size that it requested from the ABD subsystem.
Therefore, the ARC is effectively using more memory than it is aware of,
due to the `scatter_chunk_waste`. This impacts observability, e.g.
`arcstat` will show that the ARC is using less memory than it
effectively is. It also impacts how the ARC responds to memory
pressure. As the amount of `scatter_chunk_waste` changes, it appears to
the ARC as memory pressure, so it needs to resize `arc_c`.
If the sector size (`1<<ashift`) is the same as the page size (or
larger), there won't be any waste. If the (compressed) block size is
relatively large compared to the page size, the amount of
`scatter_chunk_waste` will be small, so the problematic effects are
minimal.
However, if using 512B sectors (`ashift=9`), and the (compressed) block
size is small (e.g. `compression=on` with the default `volblocksize=8k`
or a decreased `recordsize`), the amount of `scatter_chunk_waste` can be
very large. On a production system, with `arc_size` at a constant 50%
of memory, `scatter_chunk_waste` has been been observed to be 10-30% of
memory.
This commit adds `scatter_chunk_waste` to `arc_size`, and adds a new
`waste` field to `arcstat`. As a result, the ARC's memory usage is more
observable, and `arc_c` does not need to be adjusted as frequently.
Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: George Wilson <gwilson@delphix.com>
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10701
2020-08-18 06:04:04 +03:00
|
|
|
v["waste"] = cur["abd_chunk_waste_size"]
|
2018-10-30 02:18:20 +03:00
|
|
|
|
2013-06-07 08:31:18 +04:00
|
|
|
|
|
|
|
def main():
|
|
|
|
global sint
|
|
|
|
global count
|
|
|
|
global hdr_intr
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
count_flag = 0
|
|
|
|
|
|
|
|
init()
|
|
|
|
if count > 0:
|
|
|
|
count_flag = 1
|
|
|
|
|
2014-03-12 03:05:46 +04:00
|
|
|
signal(SIGINT, SIG_DFL)
|
2014-10-29 06:35:10 +03:00
|
|
|
signal(SIGWINCH, resize_handler)
|
2013-06-07 08:31:18 +04:00
|
|
|
while True:
|
|
|
|
if i == 0:
|
|
|
|
print_header()
|
|
|
|
|
|
|
|
snap_stats()
|
|
|
|
calculate()
|
|
|
|
print_values()
|
|
|
|
|
|
|
|
if count_flag == 1:
|
|
|
|
if count <= 1:
|
|
|
|
break
|
|
|
|
count -= 1
|
|
|
|
|
2014-10-29 06:35:10 +03:00
|
|
|
i = 0 if i >= hdr_intr else i + 1
|
2013-06-07 08:31:18 +04:00
|
|
|
time.sleep(sint)
|
|
|
|
|
|
|
|
if out:
|
|
|
|
out.close()
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
main()
|