mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-12-25 02:49:32 +03:00
Improve reporting of tx assignment wait times
Some callers of dmu_tx_assign() use the TXG_NOWAIT flag and call dmu_tx_wait() themselves before retrying if the assignment fails. The wait times for such callers are not accounted for in the dmu_tx_assign kstat histogram, because the histogram only records time spent in dmu_tx_assign(). This change moves the histogram update to dmu_tx_wait() to properly account for all time spent there. One downside of this approach is that it is possible to call dmu_tx_wait() multiple times before successfully assigning a transaction, in which case the cumulative wait time would not be recorded. However, this case should not often arise in practice, because most callers currently use one of these forms: dmu_tx_assign(tx, TXG_WAIT); dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT); The first form should make just one call to dmu_tx_delay() inside of dmu_tx_assign(). The second form retries with TXG_WAITED if the first assignment fails and incurs a delay, in which case no further waiting is performed. Therefore transaction delays normally occur in one call to dmu_tx_wait() so the histogram should be fairly accurate. Another possible downside of this approach is that the histogram will no longer record overhead outside of dmu_tx_wait() such as in dmu_tx_try_assign(). While I'm not aware of any reason for concern on this point, it is conceivable that lock contention, long list traversal, etc. could cause assignment delays that would not be reflected in the histogram. Therefore the histogram should strictly be used for visibility in to the normal delay mechanisms and not as a profiling tool for code performance. Signed-off-by: Ned Bass <bass6@llnl.gov> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1915
This commit is contained in:
parent
3ccab25205
commit
a77c4c8332
@ -1251,7 +1251,6 @@ dmu_tx_unassign(dmu_tx_t *tx)
|
||||
int
|
||||
dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how)
|
||||
{
|
||||
hrtime_t before;
|
||||
int err;
|
||||
|
||||
ASSERT(tx->tx_txg == 0);
|
||||
@ -1259,8 +1258,6 @@ dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how)
|
||||
txg_how == TXG_WAITED);
|
||||
ASSERT(!dsl_pool_sync_context(tx->tx_pool));
|
||||
|
||||
before = gethrtime();
|
||||
|
||||
if (txg_how == TXG_WAITED)
|
||||
tx->tx_waited = B_TRUE;
|
||||
|
||||
@ -1278,8 +1275,6 @@ dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how)
|
||||
|
||||
txg_rele_to_quiesce(&tx->tx_txgh);
|
||||
|
||||
spa_tx_assign_add_nsecs(tx->tx_pool->dp_spa, gethrtime() - before);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -1288,10 +1283,13 @@ dmu_tx_wait(dmu_tx_t *tx)
|
||||
{
|
||||
spa_t *spa = tx->tx_pool->dp_spa;
|
||||
dsl_pool_t *dp = tx->tx_pool;
|
||||
hrtime_t before;
|
||||
|
||||
ASSERT(tx->tx_txg == 0);
|
||||
ASSERT(!dsl_pool_config_held(tx->tx_pool));
|
||||
|
||||
before = gethrtime();
|
||||
|
||||
if (tx->tx_wait_dirty) {
|
||||
uint64_t dirty;
|
||||
|
||||
@ -1343,6 +1341,8 @@ dmu_tx_wait(dmu_tx_t *tx)
|
||||
*/
|
||||
txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
|
||||
}
|
||||
|
||||
spa_tx_assign_add_nsecs(spa, gethrtime() - before);
|
||||
}
|
||||
|
||||
void
|
||||
|
Loading…
Reference in New Issue
Block a user