Refactor dmu_req_copy for immutable biovec changes

Originally, dmu_req_copy modifies bv_len and bv_offset in bio_vec so that it
can continue in subsequent passes. However, after the immutable biovec changes
in Linux 3.14, this is not allowed. So instead, we just tell dmu_req_copy how
many bytes are already copied and it will skip to the right spot accordingly.

Signed-off-by: Chunwei Chen <tuxoko@gmail.com>
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #2124
This commit is contained in:
Chunwei Chen 2014-03-29 19:49:55 +08:00 committed by Brian Behlendorf
parent d4541210f3
commit 215b4634c7

View File

@ -1004,48 +1004,53 @@ xuio_stat_wbuf_nocopy()
/* /*
* Copy up to size bytes between arg_buf and req based on the data direction * Copy up to size bytes between arg_buf and req based on the data direction
* described by the req. If an entire req's data cannot be transfered the * described by the req. If an entire req's data cannot be transfered in one
* req's is updated such that it's current index and bv offsets correctly * pass, you should pass in @req_offset to indicate where to continue. The
* reference any residual data which could not be copied. The return value * return value is the number of bytes successfully copied to arg_buf.
* is the number of bytes successfully copied to arg_buf.
*/ */
static int static int
dmu_req_copy(void *arg_buf, int size, int *offset, struct request *req) dmu_req_copy(void *arg_buf, int size, struct request *req, size_t req_offset)
{ {
struct bio_vec *bv; struct bio_vec *bv;
struct req_iterator iter; struct req_iterator iter;
char *bv_buf; char *bv_buf;
int tocpy; int tocpy, bv_len, bv_offset;
int offset = 0;
*offset = 0;
rq_for_each_segment(bv, req, iter) { rq_for_each_segment(bv, req, iter) {
/*
* Fully consumed the passed arg_buf. We use goto here because
* rq_for_each_segment is a double loop
*/
ASSERT3S(offset, <=, size);
if (size == offset)
goto out;
/* Fully consumed the passed arg_buf */ /* Skip already copied bv */
ASSERT3S(*offset, <=, size); if (req_offset >= bv->bv_len) {
if (size == *offset) req_offset -= bv->bv_len;
break;
/* Skip fully consumed bv's */
if (bv->bv_len == 0)
continue; continue;
}
tocpy = MIN(bv->bv_len, size - *offset); bv_len = bv->bv_len - req_offset;
bv_offset = bv->bv_offset + req_offset;
req_offset = 0;
tocpy = MIN(bv_len, size - offset);
ASSERT3S(tocpy, >=, 0); ASSERT3S(tocpy, >=, 0);
bv_buf = page_address(bv->bv_page) + bv->bv_offset; bv_buf = page_address(bv->bv_page) + bv_offset;
ASSERT3P(bv_buf, !=, NULL); ASSERT3P(bv_buf, !=, NULL);
if (rq_data_dir(req) == WRITE) if (rq_data_dir(req) == WRITE)
memcpy(arg_buf + *offset, bv_buf, tocpy); memcpy(arg_buf + offset, bv_buf, tocpy);
else else
memcpy(bv_buf, arg_buf + *offset, tocpy); memcpy(bv_buf, arg_buf + offset, tocpy);
*offset += tocpy; offset += tocpy;
bv->bv_offset += tocpy;
bv->bv_len -= tocpy;
} }
out:
return (0); return (offset);
} }
static void static void
@ -1101,6 +1106,7 @@ dmu_read_req(objset_t *os, uint64_t object, struct request *req)
struct bio *bio_saved = req->bio; struct bio *bio_saved = req->bio;
dmu_buf_t **dbp; dmu_buf_t **dbp;
int numbufs, i, err; int numbufs, i, err;
size_t req_offset;
/* /*
* NB: we could do this block-at-a-time, but it's nice * NB: we could do this block-at-a-time, but it's nice
@ -1122,6 +1128,7 @@ dmu_read_req(objset_t *os, uint64_t object, struct request *req)
if (err) if (err)
goto error; goto error;
req_offset = 0;
for (i = 0; i < numbufs; i++) { for (i = 0; i < numbufs; i++) {
int tocpy, didcpy, bufoff; int tocpy, didcpy, bufoff;
dmu_buf_t *db = dbp[i]; dmu_buf_t *db = dbp[i];
@ -1133,7 +1140,8 @@ dmu_read_req(objset_t *os, uint64_t object, struct request *req)
if (tocpy == 0) if (tocpy == 0)
break; break;
err = dmu_req_copy(db->db_data + bufoff, tocpy, &didcpy, req); didcpy = dmu_req_copy(db->db_data + bufoff, tocpy, req,
req_offset);
if (didcpy < tocpy) if (didcpy < tocpy)
err = EIO; err = EIO;
@ -1143,6 +1151,7 @@ dmu_read_req(objset_t *os, uint64_t object, struct request *req)
size -= tocpy; size -= tocpy;
offset += didcpy; offset += didcpy;
req_offset += didcpy;
err = 0; err = 0;
} }
@ -1161,9 +1170,8 @@ dmu_write_req(objset_t *os, uint64_t object, struct request *req, dmu_tx_t *tx)
uint64_t offset = blk_rq_pos(req) << 9; uint64_t offset = blk_rq_pos(req) << 9;
struct bio *bio_saved = req->bio; struct bio *bio_saved = req->bio;
dmu_buf_t **dbp; dmu_buf_t **dbp;
int numbufs; int numbufs, i, err;
int err = 0; size_t req_offset;
int i;
if (size == 0) if (size == 0)
return (0); return (0);
@ -1184,6 +1192,7 @@ dmu_write_req(objset_t *os, uint64_t object, struct request *req, dmu_tx_t *tx)
if (err) if (err)
goto error; goto error;
req_offset = 0;
for (i = 0; i < numbufs; i++) { for (i = 0; i < numbufs; i++) {
int tocpy, didcpy, bufoff; int tocpy, didcpy, bufoff;
dmu_buf_t *db = dbp[i]; dmu_buf_t *db = dbp[i];
@ -1202,7 +1211,8 @@ dmu_write_req(objset_t *os, uint64_t object, struct request *req, dmu_tx_t *tx)
else else
dmu_buf_will_dirty(db, tx); dmu_buf_will_dirty(db, tx);
err = dmu_req_copy(db->db_data + bufoff, tocpy, &didcpy, req); didcpy = dmu_req_copy(db->db_data + bufoff, tocpy, req,
req_offset);
if (tocpy == db->db_size) if (tocpy == db->db_size)
dmu_buf_fill_done(db, tx); dmu_buf_fill_done(db, tx);
@ -1215,6 +1225,7 @@ dmu_write_req(objset_t *os, uint64_t object, struct request *req, dmu_tx_t *tx)
size -= tocpy; size -= tocpy;
offset += didcpy; offset += didcpy;
req_offset += didcpy;
err = 0; err = 0;
} }