pve-qemu-qoup/debian/patches/pve/0047-glusterfs-allow-partial-reads.patch

79 lines
2.8 KiB
Diff
Raw Normal View History

2017-04-05 12:38:26 +03:00
From 2cebda37c624832599906df01f540fdc76ecac50 Mon Sep 17 00:00:00 2001
2017-04-05 11:49:19 +03:00
From: Wolfgang Bumiller <w.bumiller@proxmox.com>
Date: Wed, 30 Nov 2016 10:27:47 +0100
2017-04-05 12:38:26 +03:00
Subject: [PATCH 47/48] glusterfs: allow partial reads
2017-04-05 11:49:19 +03:00
This should deal with qemu bug #1644754 until upstream
decides which way to go. The general direction seems to be
away from sector based block APIs and with that in mind, and
when comparing to other network block backends (eg. nfs)
treating partial reads as errors doesn't seem to make much
sense.
---
block/gluster.c | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/block/gluster.c b/block/gluster.c
2017-04-05 12:38:26 +03:00
index e712dc7..daf6cec 100644
2017-04-05 11:49:19 +03:00
--- a/block/gluster.c
+++ b/block/gluster.c
2017-04-05 12:38:26 +03:00
@@ -42,6 +42,7 @@ typedef struct GlusterAIOCB {
int ret;
2017-04-05 11:49:19 +03:00
Coroutine *coroutine;
AioContext *aio_context;
+ bool is_write;
} GlusterAIOCB;
typedef struct BDRVGlusterState {
2017-04-05 12:38:26 +03:00
@@ -705,8 +706,10 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
2017-04-05 11:49:19 +03:00
acb->ret = 0; /* Success */
} else if (ret < 0) {
acb->ret = -errno; /* Read/Write failed */
+ } else if (acb->is_write) {
+ acb->ret = -EIO; /* Partial write - fail it */
} else {
- acb->ret = -EIO; /* Partial read/write - fail it */
+ acb->ret = 0; /* Success */
}
2017-04-05 12:38:26 +03:00
aio_co_schedule(acb->aio_context, acb->coroutine);
@@ -954,6 +957,7 @@ static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs,
2017-04-05 11:49:19 +03:00
acb.ret = 0;
acb.coroutine = qemu_coroutine_self();
acb.aio_context = bdrv_get_aio_context(bs);
+ acb.is_write = true;
ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
if (ret < 0) {
2017-04-05 12:38:26 +03:00
@@ -1076,9 +1080,11 @@ static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
2017-04-05 11:49:19 +03:00
acb.aio_context = bdrv_get_aio_context(bs);
if (write) {
+ acb.is_write = true;
ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
gluster_finish_aiocb, &acb);
} else {
+ acb.is_write = false;
ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
gluster_finish_aiocb, &acb);
}
2017-04-05 12:38:26 +03:00
@@ -1142,6 +1148,7 @@ static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
2017-04-05 11:49:19 +03:00
acb.ret = 0;
acb.coroutine = qemu_coroutine_self();
acb.aio_context = bdrv_get_aio_context(bs);
+ acb.is_write = true;
ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb);
if (ret < 0) {
2017-04-05 12:38:26 +03:00
@@ -1188,6 +1195,7 @@ static coroutine_fn int qemu_gluster_co_pdiscard(BlockDriverState *bs,
2017-04-05 11:49:19 +03:00
acb.ret = 0;
acb.coroutine = qemu_coroutine_self();
acb.aio_context = bdrv_get_aio_context(bs);
+ acb.is_write = true;
ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
if (ret < 0) {
--
2.1.4