Use uio for zvol_{read,write}

Since uio now supports bvec, we can convert bio into uio and reuse
dmu_{read,write}_uio. This way, we can remove some duplicate code.

Signed-off-by: Chunwei Chen <david.chen@osnexus.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #4078
This commit is contained in:
Chunwei Chen 2015-12-08 12:37:24 -08:00 committed by Brian Behlendorf
parent 502923bb44
commit 2727b9d3b6
4 changed files with 24 additions and 172 deletions

View File

@ -118,6 +118,7 @@ get_disk_ro(struct gendisk *disk)
#define BIO_BI_SECTOR(bio) (bio)->bi_iter.bi_sector
#define BIO_BI_SIZE(bio) (bio)->bi_iter.bi_size
#define BIO_BI_IDX(bio) (bio)->bi_iter.bi_idx
#define BIO_BI_SKIP(bio) (bio)->bi_iter.bi_bvec_done
#define bio_for_each_segment4(bv, bvp, b, i) \
bio_for_each_segment((bv), (b), (i))
typedef struct bvec_iter bvec_iterator_t;
@ -125,6 +126,7 @@ typedef struct bvec_iter bvec_iterator_t;
#define BIO_BI_SECTOR(bio) (bio)->bi_sector
#define BIO_BI_SIZE(bio) (bio)->bi_size
#define BIO_BI_IDX(bio) (bio)->bi_idx
#define BIO_BI_SKIP(bio) (0)
#define bio_for_each_segment4(bv, bvp, b, i) \
bio_for_each_segment((bvp), (b), (i))
typedef int bvec_iterator_t;

View File

@ -710,9 +710,6 @@ void dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx);
#ifdef _KERNEL
#include <linux/blkdev_compat.h>
int dmu_read_bio(objset_t *os, uint64_t object, struct bio *bio);
int dmu_write_bio(objset_t *os, uint64_t object, struct bio *bio,
dmu_tx_t *tx);
int dmu_read_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size);
int dmu_read_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size);
int dmu_write_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size,

View File

@ -1041,170 +1041,6 @@ xuio_stat_wbuf_nocopy()
}
#ifdef _KERNEL
/*
* Copy up to size bytes between arg_buf and req based on the data direction
* described by the req. If an entire req's data cannot be transfered in one
* pass, you should pass in @req_offset to indicate where to continue. The
* return value is the number of bytes successfully copied to arg_buf.
*/
static int
dmu_bio_copy(void *arg_buf, int size, struct bio *bio, size_t bio_offset)
{
struct bio_vec bv, *bvp = &bv;
bvec_iterator_t iter;
char *bv_buf;
int tocpy, bv_len, bv_offset;
int offset = 0;
bio_for_each_segment4(bv, bvp, bio, iter) {
/*
* Fully consumed the passed arg_buf. We use goto here because
* rq_for_each_segment is a double loop
*/
ASSERT3S(offset, <=, size);
if (size == offset)
goto out;
/* Skip already copied bvp */
if (bio_offset >= bvp->bv_len) {
bio_offset -= bvp->bv_len;
continue;
}
bv_len = bvp->bv_len - bio_offset;
bv_offset = bvp->bv_offset + bio_offset;
bio_offset = 0;
tocpy = MIN(bv_len, size - offset);
ASSERT3S(tocpy, >=, 0);
bv_buf = page_address(bvp->bv_page) + bv_offset;
ASSERT3P(bv_buf, !=, NULL);
if (bio_data_dir(bio) == WRITE)
memcpy(arg_buf + offset, bv_buf, tocpy);
else
memcpy(bv_buf, arg_buf + offset, tocpy);
offset += tocpy;
}
out:
return (offset);
}
int
dmu_read_bio(objset_t *os, uint64_t object, struct bio *bio)
{
uint64_t offset = BIO_BI_SECTOR(bio) << 9;
uint64_t size = BIO_BI_SIZE(bio);
dmu_buf_t **dbp;
int numbufs, i, err;
size_t bio_offset;
/*
* NB: we could do this block-at-a-time, but it's nice
* to be reading in parallel.
*/
err = dmu_buf_hold_array(os, object, offset, size, TRUE, FTAG,
&numbufs, &dbp);
if (err)
return (err);
bio_offset = 0;
for (i = 0; i < numbufs; i++) {
uint64_t tocpy;
int64_t bufoff;
int didcpy;
dmu_buf_t *db = dbp[i];
bufoff = offset - db->db_offset;
ASSERT3S(bufoff, >=, 0);
tocpy = MIN(db->db_size - bufoff, size);
if (tocpy == 0)
break;
didcpy = dmu_bio_copy(db->db_data + bufoff, tocpy, bio,
bio_offset);
if (didcpy < tocpy)
err = EIO;
if (err)
break;
size -= tocpy;
offset += didcpy;
bio_offset += didcpy;
err = 0;
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (err);
}
int
dmu_write_bio(objset_t *os, uint64_t object, struct bio *bio, dmu_tx_t *tx)
{
uint64_t offset = BIO_BI_SECTOR(bio) << 9;
uint64_t size = BIO_BI_SIZE(bio);
dmu_buf_t **dbp;
int numbufs, i, err;
size_t bio_offset;
if (size == 0)
return (0);
err = dmu_buf_hold_array(os, object, offset, size, FALSE, FTAG,
&numbufs, &dbp);
if (err)
return (err);
bio_offset = 0;
for (i = 0; i < numbufs; i++) {
uint64_t tocpy;
int64_t bufoff;
int didcpy;
dmu_buf_t *db = dbp[i];
bufoff = offset - db->db_offset;
ASSERT3S(bufoff, >=, 0);
tocpy = MIN(db->db_size - bufoff, size);
if (tocpy == 0)
break;
ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
if (tocpy == db->db_size)
dmu_buf_will_fill(db, tx);
else
dmu_buf_will_dirty(db, tx);
didcpy = dmu_bio_copy(db->db_data + bufoff, tocpy, bio,
bio_offset);
if (tocpy == db->db_size)
dmu_buf_fill_done(db, tx);
if (didcpy < tocpy)
err = EIO;
if (err)
break;
size -= tocpy;
offset += didcpy;
bio_offset += didcpy;
err = 0;
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (err);
}
static int
dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size)
{

View File

@ -597,6 +597,7 @@ zvol_write(struct bio *bio)
int error = 0;
dmu_tx_t *tx;
rl_t *rl;
uio_t uio;
if (bio->bi_rw & VDEV_REQ_FLUSH)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
@ -607,6 +608,14 @@ zvol_write(struct bio *bio)
if (size == 0)
goto out;
uio.uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
uio.uio_skip = BIO_BI_SKIP(bio);
uio.uio_resid = size;
uio.uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
uio.uio_loffset = offset;
uio.uio_limit = MAXOFFSET_T;
uio.uio_segflg = UIO_BVEC;
rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);
tx = dmu_tx_create(zv->zv_objset);
@ -620,7 +629,7 @@ zvol_write(struct bio *bio)
goto out;
}
error = dmu_write_bio(zv->zv_objset, ZVOL_OBJ, bio, tx);
error = dmu_write_uio(zv->zv_objset, ZVOL_OBJ, &uio, size, tx);
if (error == 0)
zvol_log_write(zv, tx, offset, size,
!!(bio->bi_rw & VDEV_REQ_FUA));
@ -686,17 +695,25 @@ zvol_read(struct bio *bio)
{
zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
uint64_t offset = BIO_BI_SECTOR(bio) << 9;
uint64_t len = BIO_BI_SIZE(bio);
uint64_t size = BIO_BI_SIZE(bio);
int error;
rl_t *rl;
uio_t uio;
if (len == 0)
if (size == 0)
return (0);
uio.uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
uio.uio_skip = BIO_BI_SKIP(bio);
uio.uio_resid = size;
uio.uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
uio.uio_loffset = offset;
uio.uio_limit = MAXOFFSET_T;
uio.uio_segflg = UIO_BVEC;
rl = zfs_range_lock(&zv->zv_znode, offset, len, RL_READER);
rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
error = dmu_read_bio(zv->zv_objset, ZVOL_OBJ, bio);
error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, &uio, size);
zfs_range_unlock(rl);