Revert "arc_evict, arc_evict_ghost: reduce stack usage using kmem_zalloc"

This reverts commit 16fcdea363 in preparation
for the illumos 5497 "lock contention on arcs_mtx" patch which eliminates
"marker" within the ARC code.

Signed-off-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
This commit is contained in:
Tim Chase 2015-05-10 18:34:05 -05:00 committed by Brian Behlendorf
parent 44de2f02d6
commit 7807028ccd

View File

@ -1825,15 +1825,13 @@ arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
kmutex_t *hash_lock;
boolean_t have_lock;
void *stolen = NULL;
arc_buf_hdr_t *marker;
arc_buf_hdr_t marker = {{{ 0 }}};
int count = 0;
ASSERT(state == arc_mru || state == arc_mfu);
evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
marker = kmem_zalloc(sizeof (arc_buf_hdr_t), KM_SLEEP);
top:
mutex_enter(&state->arcs_mtx);
mutex_enter(&evicted_state->arcs_mtx);
@ -1868,14 +1866,14 @@ top:
* the hot code path, so don't sleep.
*/
if (!recycle && count++ > arc_evict_iterations) {
list_insert_after(list, ab, marker);
list_insert_after(list, ab, &marker);
mutex_exit(&evicted_state->arcs_mtx);
mutex_exit(&state->arcs_mtx);
kpreempt(KPREEMPT_SYNC);
mutex_enter(&state->arcs_mtx);
mutex_enter(&evicted_state->arcs_mtx);
ab_prev = list_prev(list, marker);
list_remove(list, marker);
ab_prev = list_prev(list, &marker);
list_remove(list, &marker);
count = 0;
continue;
}
@ -1959,8 +1957,6 @@ top:
goto top;
}
kmem_free(marker, sizeof (arc_buf_hdr_t));
if (bytes_evicted < bytes)
dprintf("only evicted %lld bytes from %x\n",
(longlong_t)bytes_evicted, state->arcs_state);
@ -1990,7 +1986,7 @@ arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes,
arc_buf_contents_t type)
{
arc_buf_hdr_t *ab, *ab_prev;
arc_buf_hdr_t *marker;
arc_buf_hdr_t marker;
list_t *list = &state->arcs_list[type];
kmutex_t *hash_lock;
uint64_t bytes_deleted = 0;
@ -1998,9 +1994,7 @@ arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes,
int count = 0;
ASSERT(GHOST_STATE(state));
marker = kmem_zalloc(sizeof (arc_buf_hdr_t), KM_SLEEP);
bzero(&marker, sizeof (marker));
top:
mutex_enter(&state->arcs_mtx);
for (ab = list_tail(list); ab; ab = ab_prev) {
@ -2026,12 +2020,12 @@ top:
* before reacquiring the lock.
*/
if (count++ > arc_evict_iterations) {
list_insert_after(list, ab, marker);
list_insert_after(list, ab, &marker);
mutex_exit(&state->arcs_mtx);
kpreempt(KPREEMPT_SYNC);
mutex_enter(&state->arcs_mtx);
ab_prev = list_prev(list, marker);
list_remove(list, marker);
ab_prev = list_prev(list, &marker);
list_remove(list, &marker);
count = 0;
continue;
}
@ -2063,13 +2057,13 @@ top:
* hash lock to become available. Once its
* available, restart from where we left off.
*/
list_insert_after(list, ab, marker);
list_insert_after(list, ab, &marker);
mutex_exit(&state->arcs_mtx);
mutex_enter(hash_lock);
mutex_exit(hash_lock);
mutex_enter(&state->arcs_mtx);
ab_prev = list_prev(list, marker);
list_remove(list, marker);
ab_prev = list_prev(list, &marker);
list_remove(list, &marker);
} else {
bufs_skipped += 1;
}
@ -2082,8 +2076,6 @@ top:
goto top;
}
kmem_free(marker, sizeof (arc_buf_hdr_t));
if (bufs_skipped) {
ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
ASSERT(bytes >= 0);