Add visibility in to dmu_tx_assign times

This change adds a new kstat to gain some visibility into the
amount of time spent in each call to dmu_tx_assign. A histogram
is exported via the new dmu_tx_assign file. The information
contained in this histogram is the frequency dmu_tx_assign
took to complete given an interval range.

Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
This commit is contained in:
Brian Behlendorf 2013-10-02 11:43:52 -07:00
parent 0b1401ee91
commit 2d37239a28
3 changed files with 113 additions and 0 deletions

View File

@ -549,6 +549,7 @@ typedef struct spa_stats_history {
typedef struct spa_stats { typedef struct spa_stats {
spa_stats_history_t read_history; spa_stats_history_t read_history;
spa_stats_history_t txg_history; spa_stats_history_t txg_history;
spa_stats_history_t tx_assign_histogram;
} spa_stats_t; } spa_stats_t;
typedef enum txg_state { typedef enum txg_state {
@ -568,6 +569,7 @@ extern int spa_txg_history_set(spa_t *spa, uint64_t txg,
txg_state_t completed_state, hrtime_t completed_time); txg_state_t completed_state, hrtime_t completed_time);
extern int spa_txg_history_set_io(spa_t *spa, uint64_t txg, uint64_t nread, extern int spa_txg_history_set_io(spa_t *spa, uint64_t txg, uint64_t nread,
uint64_t nwritten, uint64_t reads, uint64_t writes, uint64_t nreserved); uint64_t nwritten, uint64_t reads, uint64_t writes, uint64_t nreserved);
extern void spa_tx_assign_add_nsecs(spa_t *spa, uint64_t nsecs);
/* Pool configuration locks */ /* Pool configuration locks */
extern int spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw); extern int spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw);

View File

@ -1077,12 +1077,15 @@ dmu_tx_unassign(dmu_tx_t *tx)
int int
dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how) dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how)
{ {
hrtime_t before;
int err; int err;
ASSERT(tx->tx_txg == 0); ASSERT(tx->tx_txg == 0);
ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT); ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT);
ASSERT(!dsl_pool_sync_context(tx->tx_pool)); ASSERT(!dsl_pool_sync_context(tx->tx_pool));
before = gethrtime();
/* If we might wait, we must not hold the config lock. */ /* If we might wait, we must not hold the config lock. */
ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool)); ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool));
@ -1097,6 +1100,8 @@ dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how)
txg_rele_to_quiesce(&tx->tx_txgh); txg_rele_to_quiesce(&tx->tx_txgh);
spa_tx_assign_add_nsecs(tx->tx_pool->dp_spa, gethrtime() - before);
return (0); return (0);
} }

View File

@ -504,16 +504,122 @@ spa_txg_history_set_io(spa_t *spa, uint64_t txg, uint64_t nread,
return (error); return (error);
} }
/*
* ==========================================================================
* SPA TX Assign Histogram Routines
* ==========================================================================
*/
/*
* Tx statistics - Information exported regarding dmu_tx_assign time.
*/
/*
* When the kstat is written zero all buckets. When the kstat is read
* count the number of trailing buckets set to zero and update ks_ndata
* such that they are not output.
*/
static int
spa_tx_assign_update(kstat_t *ksp, int rw)
{
spa_t *spa = ksp->ks_private;
spa_stats_history_t *ssh = &spa->spa_stats.tx_assign_histogram;
int i;
if (rw == KSTAT_WRITE) {
for (i = 0; i < ssh->count; i++)
((kstat_named_t *)ssh->private)[i].value.ui64 = 0;
}
for (i = ssh->count; i > 0; i--)
if (((kstat_named_t *)ssh->private)[i-1].value.ui64 != 0)
break;
ksp->ks_ndata = i;
ksp->ks_data_size = i * sizeof(kstat_named_t);
return (0);
}
static void
spa_tx_assign_init(spa_t *spa)
{
spa_stats_history_t *ssh = &spa->spa_stats.tx_assign_histogram;
char name[KSTAT_STRLEN];
kstat_named_t *ks;
kstat_t *ksp;
int i;
mutex_init(&ssh->lock, NULL, MUTEX_DEFAULT, NULL);
ssh->count = 42; /* power of two buckets for 1ns to 2,199s */
ssh->size = ssh->count * sizeof(kstat_named_t);
ssh->private = kmem_alloc(ssh->size, KM_SLEEP);
(void) snprintf(name, KSTAT_STRLEN, "zfs/%s", spa_name(spa));
name[KSTAT_STRLEN-1] = '\0';
for (i = 0; i < ssh->count; i++) {
ks = &((kstat_named_t *)ssh->private)[i];
ks->data_type = KSTAT_DATA_UINT64;
ks->value.ui64 = 0;
(void) snprintf(ks->name, KSTAT_STRLEN, "%llu ns",
(u_longlong_t)1 << i);
}
ksp = kstat_create(name, 0, "dmu_tx_assign", "misc",
KSTAT_TYPE_NAMED, 0, KSTAT_FLAG_VIRTUAL);
ssh->kstat = ksp;
if (ksp) {
ksp->ks_lock = &ssh->lock;
ksp->ks_data = ssh->private;
ksp->ks_ndata = ssh->count;
ksp->ks_data_size = ssh->size;
ksp->ks_private = spa;
ksp->ks_update = spa_tx_assign_update;
kstat_install(ksp);
}
}
static void
spa_tx_assign_destroy(spa_t *spa)
{
spa_stats_history_t *ssh = &spa->spa_stats.tx_assign_histogram;
kstat_t *ksp;
ksp = ssh->kstat;
if (ksp)
kstat_delete(ksp);
kmem_free(ssh->private, ssh->size);
mutex_destroy(&ssh->lock);
}
void
spa_tx_assign_add_nsecs(spa_t *spa, uint64_t nsecs)
{
spa_stats_history_t *ssh = &spa->spa_stats.tx_assign_histogram;
uint64_t idx = 0;
while (((1 << idx) < nsecs) && (idx < ssh->size - 1))
idx++;
atomic_inc_64(&((kstat_named_t *)ssh->private)[idx].value.ui64);
}
void void
spa_stats_init(spa_t *spa) spa_stats_init(spa_t *spa)
{ {
spa_read_history_init(spa); spa_read_history_init(spa);
spa_txg_history_init(spa); spa_txg_history_init(spa);
spa_tx_assign_init(spa);
} }
void void
spa_stats_destroy(spa_t *spa) spa_stats_destroy(spa_t *spa)
{ {
spa_tx_assign_destroy(spa);
spa_txg_history_destroy(spa); spa_txg_history_destroy(spa);
spa_read_history_destroy(spa); spa_read_history_destroy(spa);
} }