From c9e3efdb3a6111b9795becc6594b3c52ba004522 Mon Sep 17 00:00:00 2001 From: Fabio Scaccabarozzi Date: Wed, 1 Apr 2020 18:48:54 +0200 Subject: [PATCH] Bugfix/fix uio partial copies In zfs_write(), the loop continues to the next iteration without accounting for partial copies occurring in uiomove_iov when copy_from_user/__copy_from_user_inatomic return a non-zero status. This results in "zfs: accessing past end of object..." in the kernel log, and the write failing. Account for partial copies and update uio struct before returning EFAULT, leave a comment explaining the reason why this is done. Reviewed-by: Brian Behlendorf Reviewed-by: ilbsmart Signed-off-by: Fabio Scaccabarozzi Closes #8673 Closes #10148 --- module/os/linux/zfs/zfs_vnops.c | 9 +++++++++ module/zcommon/zfs_uio.c | 25 +++++++++++++++++-------- 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/module/os/linux/zfs/zfs_vnops.c b/module/os/linux/zfs/zfs_vnops.c index 4929c97e9..aba125f3b 100644 --- a/module/os/linux/zfs/zfs_vnops.c +++ b/module/os/linux/zfs/zfs_vnops.c @@ -829,6 +829,15 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) uio->uio_fault_disable = B_FALSE; if (error == EFAULT) { dmu_tx_commit(tx); + /* + * Account for partial writes before + * continuing the loop. + * Update needs to occur before the next + * uio_prefaultpages, or prefaultpages may + * error, and we may break the loop early. + */ + if (tx_bytes != uio->uio_resid) + n -= tx_bytes - uio->uio_resid; if (uio_prefaultpages(MIN(n, max_blksz), uio)) { break; } diff --git a/module/zcommon/zfs_uio.c b/module/zcommon/zfs_uio.c index c1e31f51b..d586e0a12 100644 --- a/module/zcommon/zfs_uio.c +++ b/module/zcommon/zfs_uio.c @@ -80,22 +80,31 @@ uiomove_iov(void *p, size_t n, enum uio_rw rw, struct uio *uio) if (copy_to_user(iov->iov_base+skip, p, cnt)) return (EFAULT); } else { + unsigned long b_left = 0; if (uio->uio_fault_disable) { if (!zfs_access_ok(VERIFY_READ, (iov->iov_base + skip), cnt)) { return (EFAULT); } pagefault_disable(); - if (__copy_from_user_inatomic(p, - (iov->iov_base + skip), cnt)) { - pagefault_enable(); - return (EFAULT); - } + b_left = + __copy_from_user_inatomic(p, + (iov->iov_base + skip), cnt); pagefault_enable(); } else { - if (copy_from_user(p, - (iov->iov_base + skip), cnt)) - return (EFAULT); + b_left = + copy_from_user(p, + (iov->iov_base + skip), cnt); + } + if (b_left > 0) { + unsigned long c_bytes = + cnt - b_left; + uio->uio_skip += c_bytes; + ASSERT3U(uio->uio_skip, <, + iov->iov_len); + uio->uio_resid -= c_bytes; + uio->uio_loffset += c_bytes; + return (EFAULT); } } break;