a816d2969e
There doesn't seem to be any Proxmox VE code using this. Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
78 lines
2.9 KiB
Diff
78 lines
2.9 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Wolfgang Bumiller <w.bumiller@proxmox.com>
|
|
Date: Mon, 6 Apr 2020 12:16:38 +0200
|
|
Subject: [PATCH] PVE: [Up] glusterfs: allow partial reads
|
|
|
|
This should deal with qemu bug #1644754 until upstream
|
|
decides which way to go. The general direction seems to be
|
|
away from sector based block APIs and with that in mind, and
|
|
when comparing to other network block backends (eg. nfs)
|
|
treating partial reads as errors doesn't seem to make much
|
|
sense.
|
|
|
|
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
|
|
---
|
|
block/gluster.c | 10 +++++++++-
|
|
1 file changed, 9 insertions(+), 1 deletion(-)
|
|
|
|
diff --git a/block/gluster.c b/block/gluster.c
|
|
index f11a40aa9e..6756e6b886 100644
|
|
--- a/block/gluster.c
|
|
+++ b/block/gluster.c
|
|
@@ -58,6 +58,7 @@ typedef struct GlusterAIOCB {
|
|
int ret;
|
|
Coroutine *coroutine;
|
|
AioContext *aio_context;
|
|
+ bool is_write;
|
|
} GlusterAIOCB;
|
|
|
|
typedef struct BDRVGlusterState {
|
|
@@ -753,8 +754,10 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret,
|
|
acb->ret = 0; /* Success */
|
|
} else if (ret < 0) {
|
|
acb->ret = -errno; /* Read/Write failed */
|
|
+ } else if (acb->is_write) {
|
|
+ acb->ret = -EIO; /* Partial write - fail it */
|
|
} else {
|
|
- acb->ret = -EIO; /* Partial read/write - fail it */
|
|
+ acb->ret = 0; /* Success */
|
|
}
|
|
|
|
aio_co_schedule(acb->aio_context, acb->coroutine);
|
|
@@ -1021,6 +1024,7 @@ static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs,
|
|
acb.ret = 0;
|
|
acb.coroutine = qemu_coroutine_self();
|
|
acb.aio_context = bdrv_get_aio_context(bs);
|
|
+ acb.is_write = true;
|
|
|
|
ret = glfs_zerofill_async(s->fd, offset, bytes, gluster_finish_aiocb, &acb);
|
|
if (ret < 0) {
|
|
@@ -1201,9 +1205,11 @@ static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
|
|
acb.aio_context = bdrv_get_aio_context(bs);
|
|
|
|
if (write) {
|
|
+ acb.is_write = true;
|
|
ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
|
|
gluster_finish_aiocb, &acb);
|
|
} else {
|
|
+ acb.is_write = false;
|
|
ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
|
|
gluster_finish_aiocb, &acb);
|
|
}
|
|
@@ -1266,6 +1272,7 @@ static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
|
|
acb.ret = 0;
|
|
acb.coroutine = qemu_coroutine_self();
|
|
acb.aio_context = bdrv_get_aio_context(bs);
|
|
+ acb.is_write = true;
|
|
|
|
ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb);
|
|
if (ret < 0) {
|
|
@@ -1314,6 +1321,7 @@ static coroutine_fn int qemu_gluster_co_pdiscard(BlockDriverState *bs,
|
|
acb.ret = 0;
|
|
acb.coroutine = qemu_coroutine_self();
|
|
acb.aio_context = bdrv_get_aio_context(bs);
|
|
+ acb.is_write = true;
|
|
|
|
ret = glfs_discard_async(s->fd, offset, bytes, gluster_finish_aiocb, &acb);
|
|
if (ret < 0) {
|