Annotate KM_PUSHPAGE call paths with PF_NOFS

The txg_sync(), zfs_putpage(), zvol_write(), and zvol_discard()
call paths must only use KM_PUSHPAGE to avoid potential deadlocks
during direct reclaim.

This patch annotates these call paths so any accidental use of
KM_SLEEP will be quickly detected.   In the interest of stability
if debugging is disabled the offending allocation will have its
GFP flags automatically corrected.  When debugging is enabled
any misuse will be treated as a fatal error.

This patch is entirely for debugging.  We should be careful to
NOT become dependant on it fixing up the incorrect allocations.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
This commit is contained in:
Brian Behlendorf 2012-08-18 11:17:23 -07:00
parent 86dd0fd922
commit 8630650a8d
3 changed files with 41 additions and 4 deletions

View File

@ -382,6 +382,15 @@ txg_sync_thread(dsl_pool_t *dp)
callb_cpr_t cpr;
uint64_t start, delta;
#ifdef _KERNEL
/*
* Annotate this process with a flag that indicates that it is
* unsafe to use KM_SLEEP during memory allocations due to the
* potential for a deadlock. KM_PUSHPAGE should be used instead.
*/
current->flags |= PF_NOFS;
#endif /* _KERNEL */
txg_thread_enter(tx, &cpr);
start = delta = 0;

View File

@ -357,8 +357,16 @@ zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
ASSERT(PageLocked(pp));
ASSERT(!PageWriteback(pp));
ASSERT(!(current->flags & PF_NOFS));
/*
* Annotate this call path with a flag that indicates that it is
* unsafe to use KM_SLEEP during memory allocations due to the
* potential for a deadlock. KM_PUSHPAGE should be used instead.
*/
current->flags |= PF_NOFS;
(void) zfs_putpage(mapping->host, pp, wbc);
current->flags &= ~PF_NOFS;
return (0);
}

View File

@ -540,6 +540,14 @@ zvol_write(void *arg)
dmu_tx_t *tx;
rl_t *rl;
/*
* Annotate this call path with a flag that indicates that it is
* unsafe to use KM_SLEEP during memory allocations due to the
* potential for a deadlock. KM_PUSHPAGE should be used instead.
*/
ASSERT(!(current->flags & PF_NOFS));
current->flags |= PF_NOFS;
if (req->cmd_flags & VDEV_REQ_FLUSH)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
@ -548,7 +556,7 @@ zvol_write(void *arg)
*/
if (size == 0) {
blk_end_request(req, 0, size);
return;
goto out;
}
rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);
@ -562,7 +570,7 @@ zvol_write(void *arg)
dmu_tx_abort(tx);
zfs_range_unlock(rl);
blk_end_request(req, -error, size);
return;
goto out;
}
error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
@ -578,6 +586,8 @@ zvol_write(void *arg)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
blk_end_request(req, -error, size);
out:
current->flags &= ~PF_NOFS;
}
#ifdef HAVE_BLK_QUEUE_DISCARD
@ -592,14 +602,22 @@ zvol_discard(void *arg)
int error;
rl_t *rl;
/*
* Annotate this call path with a flag that indicates that it is
* unsafe to use KM_SLEEP during memory allocations due to the
* potential for a deadlock. KM_PUSHPAGE should be used instead.
*/
ASSERT(!(current->flags & PF_NOFS));
current->flags |= PF_NOFS;
if (offset + size > zv->zv_volsize) {
blk_end_request(req, -EIO, size);
return;
goto out;
}
if (size == 0) {
blk_end_request(req, 0, size);
return;
goto out;
}
rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);
@ -613,6 +631,8 @@ zvol_discard(void *arg)
zfs_range_unlock(rl);
blk_end_request(req, -error, size);
out:
current->flags &= ~PF_NOFS;
}
#endif /* HAVE_BLK_QUEUE_DISCARD */