2010-08-26 22:45:02 +04:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
|
|
* or http://www.opensolaris.org/os/licensing.
|
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
|
|
|
|
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
|
|
|
|
* Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
|
|
|
|
* LLNL-CODE-403049.
|
2018-05-31 20:36:37 +03:00
|
|
|
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
|
2010-08-26 22:45:02 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/zfs_context.h>
|
2017-09-05 23:41:32 +03:00
|
|
|
#include <sys/spa_impl.h>
|
2010-08-26 22:45:02 +04:00
|
|
|
#include <sys/vdev_disk.h>
|
|
|
|
#include <sys/vdev_impl.h>
|
2016-07-22 18:52:49 +03:00
|
|
|
#include <sys/abd.h>
|
2010-08-26 22:45:02 +04:00
|
|
|
#include <sys/fs/zfs.h>
|
|
|
|
#include <sys/zio.h>
|
2017-09-05 23:41:32 +03:00
|
|
|
#include <linux/mod_compat.h>
|
2018-05-31 20:36:37 +03:00
|
|
|
#include <linux/msdos_fs.h>
|
2010-08-26 22:45:02 +04:00
|
|
|
|
2011-02-08 00:54:59 +03:00
|
|
|
char *zfs_vdev_scheduler = VDEV_SCHEDULER;
|
2013-02-27 05:02:27 +04:00
|
|
|
static void *zfs_vdev_holder = VDEV_HOLDER;
|
2011-02-08 00:54:59 +03:00
|
|
|
|
2018-05-31 20:36:37 +03:00
|
|
|
/* size of the "reserved" partition, in blocks */
|
|
|
|
#define EFI_MIN_RESV_SIZE (16 * 1024)
|
|
|
|
|
2010-08-26 22:45:02 +04:00
|
|
|
/*
|
|
|
|
* Virtual device vector for disks.
|
|
|
|
*/
|
|
|
|
typedef struct dio_request {
|
|
|
|
zio_t *dr_zio; /* Parent ZIO */
|
2015-10-14 00:13:52 +03:00
|
|
|
atomic_t dr_ref; /* References */
|
2010-08-26 22:45:02 +04:00
|
|
|
int dr_error; /* Bio error */
|
|
|
|
int dr_bio_count; /* Count of bio's */
|
2013-11-01 23:26:11 +04:00
|
|
|
struct bio *dr_bio[0]; /* Attached bio's */
|
2010-08-26 22:45:02 +04:00
|
|
|
} dio_request_t;
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef HAVE_OPEN_BDEV_EXCLUSIVE
|
|
|
|
static fmode_t
|
|
|
|
vdev_bdev_mode(int smode)
|
|
|
|
{
|
|
|
|
fmode_t mode = 0;
|
|
|
|
|
|
|
|
ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
|
|
|
|
|
|
|
|
if (smode & FREAD)
|
|
|
|
mode |= FMODE_READ;
|
|
|
|
|
|
|
|
if (smode & FWRITE)
|
|
|
|
mode |= FMODE_WRITE;
|
|
|
|
|
2013-11-01 23:26:11 +04:00
|
|
|
return (mode);
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
static int
|
|
|
|
vdev_bdev_mode(int smode)
|
|
|
|
{
|
|
|
|
int mode = 0;
|
|
|
|
|
|
|
|
ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
|
|
|
|
|
|
|
|
if ((smode & FREAD) && !(smode & FWRITE))
|
|
|
|
mode = MS_RDONLY;
|
|
|
|
|
2013-11-01 23:26:11 +04:00
|
|
|
return (mode);
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
#endif /* HAVE_OPEN_BDEV_EXCLUSIVE */
|
|
|
|
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
/*
|
|
|
|
* Returns the usable capacity (in bytes) for the partition or disk.
|
|
|
|
*/
|
2010-08-26 22:45:02 +04:00
|
|
|
static uint64_t
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
bdev_capacity(struct block_device *bdev)
|
2010-08-26 22:45:02 +04:00
|
|
|
{
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
return (i_size_read(bdev->bd_inode));
|
|
|
|
}
|
2010-08-26 22:45:02 +04:00
|
|
|
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
/*
|
|
|
|
* Returns the maximum expansion capacity of the block device (in bytes).
|
|
|
|
*
|
|
|
|
* It is possible to expand a vdev when it has been created as a wholedisk
|
|
|
|
* and the containing block device has increased in capacity. Or when the
|
|
|
|
* partition containing the pool has been manually increased in size.
|
|
|
|
*
|
|
|
|
* This function is only responsible for calculating the potential expansion
|
|
|
|
* size so it can be reported by 'zpool list'. The efi_use_whole_disk() is
|
|
|
|
* responsible for verifying the expected partition layout in the wholedisk
|
|
|
|
* case, and updating the partition table if appropriate. Once the partition
|
|
|
|
* size has been increased the additional capacity will be visible using
|
|
|
|
* bdev_capacity().
|
|
|
|
*/
|
|
|
|
static uint64_t
|
|
|
|
bdev_max_capacity(struct block_device *bdev, uint64_t wholedisk)
|
|
|
|
{
|
|
|
|
uint64_t psize;
|
|
|
|
int64_t available;
|
|
|
|
|
|
|
|
if (wholedisk && bdev->bd_part != NULL && bdev != bdev->bd_contains) {
|
2018-05-31 20:36:37 +03:00
|
|
|
/*
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
* When reporting maximum expansion capacity for a wholedisk
|
|
|
|
* deduct any capacity which is expected to be lost due to
|
|
|
|
* alignment restrictions. Over reporting this value isn't
|
|
|
|
* harmful and would only result in slightly less capacity
|
|
|
|
* than expected post expansion.
|
2018-05-31 20:36:37 +03:00
|
|
|
*/
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
available = i_size_read(bdev->bd_contains->bd_inode) -
|
|
|
|
((EFI_MIN_RESV_SIZE + NEW_START_BLOCK +
|
|
|
|
PARTITION_END_ALIGNMENT) << SECTOR_BITS);
|
|
|
|
if (available > 0)
|
|
|
|
psize = available;
|
|
|
|
else
|
|
|
|
psize = bdev_capacity(bdev);
|
2018-05-31 20:36:37 +03:00
|
|
|
} else {
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
psize = bdev_capacity(bdev);
|
2018-05-31 20:36:37 +03:00
|
|
|
}
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
|
|
|
|
return (psize);
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
|
2010-09-28 02:30:14 +04:00
|
|
|
static void
|
|
|
|
vdev_disk_error(zio_t *zio)
|
|
|
|
{
|
2018-11-28 22:29:57 +03:00
|
|
|
/*
|
|
|
|
* This function can be called in interrupt context, for instance while
|
|
|
|
* handling IRQs coming from a misbehaving disk device; use printk()
|
|
|
|
* which is safe from any context.
|
|
|
|
*/
|
|
|
|
printk(KERN_WARNING "zio pool=%s vdev=%s error=%d type=%d "
|
|
|
|
"offset=%llu size=%llu flags=%x\n", spa_name(zio->io_spa),
|
|
|
|
zio->io_vd->vdev_path, zio->io_error, zio->io_type,
|
|
|
|
(u_longlong_t)zio->io_offset, (u_longlong_t)zio->io_size,
|
|
|
|
zio->io_flags);
|
2010-09-28 02:30:14 +04:00
|
|
|
}
|
|
|
|
|
2011-02-08 00:54:59 +03:00
|
|
|
/*
|
|
|
|
* Use the Linux 'noop' elevator for zfs managed block devices. This
|
|
|
|
* strikes the ideal balance by allowing the zfs elevator to do all
|
|
|
|
* request ordering and prioritization. While allowing the Linux
|
|
|
|
* elevator to do the maximum front/back merging allowed by the
|
|
|
|
* physical device. This yields the largest possible requests for
|
|
|
|
* the device with the lowest total overhead.
|
|
|
|
*/
|
2017-09-05 23:41:32 +03:00
|
|
|
static void
|
2011-02-25 22:26:41 +03:00
|
|
|
vdev_elevator_switch(vdev_t *v, char *elevator)
|
2011-02-08 00:54:59 +03:00
|
|
|
{
|
2011-02-25 22:26:41 +03:00
|
|
|
vdev_disk_t *vd = v->vdev_tsd;
|
2017-09-05 23:41:32 +03:00
|
|
|
struct request_queue *q;
|
|
|
|
char *device;
|
2011-04-23 00:50:17 +04:00
|
|
|
int error;
|
2011-02-25 22:26:41 +03:00
|
|
|
|
2017-09-05 23:41:32 +03:00
|
|
|
for (int c = 0; c < v->vdev_children; c++)
|
|
|
|
vdev_elevator_switch(v->vdev_child[c], elevator);
|
|
|
|
|
|
|
|
if (!v->vdev_ops->vdev_op_leaf || vd->vd_bdev == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
q = bdev_get_queue(vd->vd_bdev);
|
|
|
|
device = vd->vd_bdev->bd_disk->disk_name;
|
|
|
|
|
Set elevator for DM devices despite vdev_wholedisk
The current state of udev and devicer-mapper devices makes it difficult
to construct a mapping of DM partitions and their underlying DM device.
For example, with a /dev directory with the following contents:
$ ls -d /dev/dm-*
/dev/dm-0
/dev/dm-1
/dev/dm-2
/dev/dm-3
it is not immediately apparent if these are completely separate devices,
or partitions and real devices intermixed. In contrast, SCSI devices
would appear as so:
$ ls -d /dev/sd*
/dev/sda
/dev/sda1
/dev/sdb
/dev/sdb1
Here, one can immediately determine that there are two devices (sda and
sdb), each containing a single partition. The lack of a predictable and
consistent mapping from DM devices to DM device partitions makes it
difficult for user space to process these devices the same way it does
SCSI devices.
As a result, the ZFS utilities do not partition DM devices, and instead
set the "vdev_wholedisk" label to 0 and treat them as partitions. This
has the side effect that, even if ZFS has sole ownership of the device,
the IO scheduler will not be modified because it is treated as a
partition.
This change adds an exception for DM devices in vdev_elevator_switch,
allowing the elevator to be modified even though the "vdev_wholedisk"
property is not set.
Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #1149
2012-12-15 04:16:35 +04:00
|
|
|
/*
|
|
|
|
* Skip devices which are not whole disks (partitions).
|
|
|
|
* Device-mapper devices are excepted since they may be whole
|
|
|
|
* disks despite the vdev_wholedisk flag, in which case we can
|
|
|
|
* and should switch the elevator. If the device-mapper device
|
|
|
|
* does not have an elevator (i.e. dm-raid, dm-crypt, etc.) the
|
|
|
|
* "Skip devices without schedulers" check below will fail.
|
|
|
|
*/
|
|
|
|
if (!v->vdev_wholedisk && strncmp(device, "dm-", 3) != 0)
|
2017-09-05 23:41:32 +03:00
|
|
|
return;
|
2011-03-11 00:34:17 +03:00
|
|
|
|
2011-02-25 22:26:41 +03:00
|
|
|
/* Leave existing scheduler when set to "none" */
|
2016-04-14 10:58:09 +03:00
|
|
|
if ((strncmp(elevator, "none", 4) == 0) && (strlen(elevator) == 4))
|
2017-09-05 23:41:32 +03:00
|
|
|
return;
|
2011-02-08 00:54:59 +03:00
|
|
|
|
2018-06-20 07:52:45 +03:00
|
|
|
/*
|
|
|
|
* The elevator_change() function was available in kernels from
|
|
|
|
* 2.6.36 to 4.11. When not available fall back to using the user
|
|
|
|
* mode helper functionality to set the elevator via sysfs. This
|
|
|
|
* requires /bin/echo and sysfs to be mounted which may not be true
|
|
|
|
* early in the boot process.
|
|
|
|
*/
|
2012-08-27 00:38:06 +04:00
|
|
|
#ifdef HAVE_ELEVATOR_CHANGE
|
|
|
|
error = elevator_change(q, elevator);
|
|
|
|
#else
|
2013-11-01 23:26:11 +04:00
|
|
|
#define SET_SCHEDULER_CMD \
|
2012-08-27 00:38:06 +04:00
|
|
|
"exec 0</dev/null " \
|
|
|
|
" 1>/sys/block/%s/queue/scheduler " \
|
|
|
|
" 2>/dev/null; " \
|
|
|
|
"echo %s"
|
|
|
|
|
2017-09-05 23:41:32 +03:00
|
|
|
char *argv[] = { "/bin/sh", "-c", NULL, NULL };
|
|
|
|
char *envp[] = { NULL };
|
2012-08-27 00:38:06 +04:00
|
|
|
|
2017-09-05 23:41:32 +03:00
|
|
|
argv[2] = kmem_asprintf(SET_SCHEDULER_CMD, device, elevator);
|
|
|
|
error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
|
|
|
|
strfree(argv[2]);
|
2012-08-27 00:38:06 +04:00
|
|
|
#endif /* HAVE_ELEVATOR_CHANGE */
|
2018-06-20 07:52:45 +03:00
|
|
|
if (error) {
|
|
|
|
zfs_dbgmsg("Unable to set \"%s\" scheduler for %s (%s): %d\n",
|
|
|
|
elevator, v->vdev_path, device, error);
|
|
|
|
}
|
2011-02-08 00:54:59 +03:00
|
|
|
}
|
|
|
|
|
2010-08-26 22:45:02 +04:00
|
|
|
static int
|
2012-01-24 06:43:32 +04:00
|
|
|
vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
|
|
|
|
uint64_t *ashift)
|
2010-08-26 22:45:02 +04:00
|
|
|
{
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
struct block_device *bdev;
|
|
|
|
fmode_t mode = vdev_bdev_mode(spa_mode(v->vdev_spa));
|
|
|
|
int count = 0, block_size;
|
|
|
|
int bdev_retry_count = 50;
|
2010-08-26 22:45:02 +04:00
|
|
|
vdev_disk_t *vd;
|
|
|
|
|
|
|
|
/* Must have a pathname and it must be absolute. */
|
|
|
|
if (v->vdev_path == NULL || v->vdev_path[0] != '/') {
|
|
|
|
v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
vdev_dbgmsg(v, "invalid vdev_path");
|
2016-04-19 21:19:12 +03:00
|
|
|
return (SET_ERROR(EINVAL));
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
|
2013-02-26 23:25:55 +04:00
|
|
|
/*
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
* Reopen the device if it is currently open. When expanding a
|
|
|
|
* partition force re-scanning the partition table while closed
|
|
|
|
* in order to get an accurate updated block device size. Then
|
|
|
|
* since udev may need to recreate the device links increase the
|
|
|
|
* open retry count before reporting the device as unavailable.
|
2013-02-26 23:25:55 +04:00
|
|
|
*/
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
vd = v->vdev_tsd;
|
|
|
|
if (vd) {
|
|
|
|
char disk_name[BDEVNAME_SIZE + 6] = "/dev/";
|
|
|
|
boolean_t reread_part = B_FALSE;
|
2013-02-26 23:25:55 +04:00
|
|
|
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
rw_enter(&vd->vd_lock, RW_WRITER);
|
|
|
|
bdev = vd->vd_bdev;
|
|
|
|
vd->vd_bdev = NULL;
|
|
|
|
|
|
|
|
if (bdev) {
|
|
|
|
if (v->vdev_expanding && bdev != bdev->bd_contains) {
|
|
|
|
bdevname(bdev->bd_contains, disk_name + 5);
|
|
|
|
reread_part = B_TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
vdev_bdev_close(bdev, mode);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reread_part) {
|
|
|
|
bdev = vdev_bdev_open(disk_name, mode, zfs_vdev_holder);
|
|
|
|
if (!IS_ERR(bdev)) {
|
|
|
|
int error = vdev_bdev_reread_part(bdev);
|
|
|
|
vdev_bdev_close(bdev, mode);
|
|
|
|
if (error == 0)
|
|
|
|
bdev_retry_count = 100;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP);
|
|
|
|
|
|
|
|
rw_init(&vd->vd_lock, NULL, RW_DEFAULT, NULL);
|
|
|
|
rw_enter(&vd->vd_lock, RW_WRITER);
|
|
|
|
}
|
2010-08-26 22:45:02 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Devices are always opened by the path provided at configuration
|
|
|
|
* time. This means that if the provided path is a udev by-id path
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
* then drives may be re-cabled without an issue. If the provided
|
2013-03-30 06:27:50 +04:00
|
|
|
* path is a udev by-path path, then the physical location information
|
2010-08-26 22:45:02 +04:00
|
|
|
* will be preserved. This can be critical for more complicated
|
|
|
|
* configurations where drives are located in specific physical
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
* locations to maximize the systems tolerance to component failure.
|
|
|
|
*
|
2013-03-30 06:27:50 +04:00
|
|
|
* Alternatively, you can provide your own udev rule to flexibly map
|
2010-08-26 22:45:02 +04:00
|
|
|
* the drives as you see fit. It is not advised that you use the
|
2013-03-30 06:27:50 +04:00
|
|
|
* /dev/[hd]d devices which may be reordered due to probing order.
|
2010-08-26 22:45:02 +04:00
|
|
|
* Devices in the wrong locations will be detected by the higher
|
|
|
|
* level vdev validation.
|
2016-04-19 21:19:12 +03:00
|
|
|
*
|
|
|
|
* The specified paths may be briefly removed and recreated in
|
|
|
|
* response to udev events. This should be exceptionally unlikely
|
|
|
|
* because the zpool command makes every effort to verify these paths
|
|
|
|
* have already settled prior to reaching this point. Therefore,
|
|
|
|
* a ENOENT failure at this point is highly likely to be transient
|
|
|
|
* and it is reasonable to sleep and retry before giving up. In
|
|
|
|
* practice delays have been observed to be on the order of 100ms.
|
2010-08-26 22:45:02 +04:00
|
|
|
*/
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
bdev = ERR_PTR(-ENXIO);
|
|
|
|
while (IS_ERR(bdev) && count < bdev_retry_count) {
|
|
|
|
bdev = vdev_bdev_open(v->vdev_path, mode, zfs_vdev_holder);
|
2016-04-19 21:19:12 +03:00
|
|
|
if (unlikely(PTR_ERR(bdev) == -ENOENT)) {
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
schedule_timeout(MSEC_TO_TICK(10));
|
2016-04-19 21:19:12 +03:00
|
|
|
count++;
|
|
|
|
} else if (IS_ERR(bdev)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-26 22:45:02 +04:00
|
|
|
if (IS_ERR(bdev)) {
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
int error = -PTR_ERR(bdev);
|
|
|
|
vdev_dbgmsg(v, "open error=%d count=%d\n", error, count);
|
|
|
|
vd->vd_bdev = NULL;
|
|
|
|
v->vdev_tsd = vd;
|
|
|
|
rw_exit(&vd->vd_lock);
|
|
|
|
return (SET_ERROR(error));
|
|
|
|
} else {
|
|
|
|
vd->vd_bdev = bdev;
|
|
|
|
v->vdev_tsd = vd;
|
|
|
|
rw_exit(&vd->vd_lock);
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
|
2013-02-26 23:25:55 +04:00
|
|
|
/* Determine the physical block size */
|
|
|
|
block_size = vdev_bdev_block_size(vd->vd_bdev);
|
2010-08-26 22:45:02 +04:00
|
|
|
|
|
|
|
/* Clear the nowritecache bit, causes vdev_reopen() to try again. */
|
|
|
|
v->vdev_nowritecache = B_FALSE;
|
|
|
|
|
2015-08-29 19:01:07 +03:00
|
|
|
/* Inform the ZIO pipeline that we are non-rotational */
|
|
|
|
v->vdev_nonrot = blk_queue_nonrot(bdev_get_queue(vd->vd_bdev));
|
|
|
|
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
/* Physical volume size in bytes for the partition */
|
|
|
|
*psize = bdev_capacity(vd->vd_bdev);
|
|
|
|
|
|
|
|
/* Physical volume size in bytes including possible expansion space */
|
|
|
|
*max_psize = bdev_max_capacity(vd->vd_bdev, v->vdev_wholedisk);
|
2012-01-24 06:43:32 +04:00
|
|
|
|
2010-08-26 22:45:02 +04:00
|
|
|
/* Based on the minimum sector size set the block size */
|
2014-04-16 07:40:22 +04:00
|
|
|
*ashift = highbit64(MAX(block_size, SPA_MINBLOCKSIZE)) - 1;
|
2010-08-26 22:45:02 +04:00
|
|
|
|
2011-02-08 00:54:59 +03:00
|
|
|
/* Try to set the io scheduler elevator algorithm */
|
2011-02-25 22:26:41 +03:00
|
|
|
(void) vdev_elevator_switch(v, zfs_vdev_scheduler);
|
2011-02-08 00:54:59 +03:00
|
|
|
|
2013-11-01 23:26:11 +04:00
|
|
|
return (0);
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vdev_disk_close(vdev_t *v)
|
|
|
|
{
|
|
|
|
vdev_disk_t *vd = v->vdev_tsd;
|
|
|
|
|
2013-02-26 23:25:55 +04:00
|
|
|
if (v->vdev_reopening || vd == NULL)
|
2010-08-26 22:45:02 +04:00
|
|
|
return;
|
|
|
|
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
if (vd->vd_bdev != NULL) {
|
2010-08-26 22:45:02 +04:00
|
|
|
vdev_bdev_close(vd->vd_bdev,
|
2013-11-01 23:26:11 +04:00
|
|
|
vdev_bdev_mode(spa_mode(v->vdev_spa)));
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
}
|
2010-08-26 22:45:02 +04:00
|
|
|
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
rw_destroy(&vd->vd_lock);
|
2013-11-01 23:26:11 +04:00
|
|
|
kmem_free(vd, sizeof (vdev_disk_t));
|
2010-08-26 22:45:02 +04:00
|
|
|
v->vdev_tsd = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static dio_request_t *
|
|
|
|
vdev_disk_dio_alloc(int bio_count)
|
|
|
|
{
|
|
|
|
dio_request_t *dr;
|
|
|
|
int i;
|
|
|
|
|
2013-11-01 23:26:11 +04:00
|
|
|
dr = kmem_zalloc(sizeof (dio_request_t) +
|
2014-11-21 03:09:39 +03:00
|
|
|
sizeof (struct bio *) * bio_count, KM_SLEEP);
|
2010-08-26 22:45:02 +04:00
|
|
|
if (dr) {
|
|
|
|
atomic_set(&dr->dr_ref, 0);
|
|
|
|
dr->dr_bio_count = bio_count;
|
|
|
|
dr->dr_error = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < dr->dr_bio_count; i++)
|
|
|
|
dr->dr_bio[i] = NULL;
|
|
|
|
}
|
|
|
|
|
2013-11-01 23:26:11 +04:00
|
|
|
return (dr);
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vdev_disk_dio_free(dio_request_t *dr)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < dr->dr_bio_count; i++)
|
|
|
|
if (dr->dr_bio[i])
|
|
|
|
bio_put(dr->dr_bio[i]);
|
|
|
|
|
2013-11-01 23:26:11 +04:00
|
|
|
kmem_free(dr, sizeof (dio_request_t) +
|
|
|
|
sizeof (struct bio *) * dr->dr_bio_count);
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vdev_disk_dio_get(dio_request_t *dr)
|
|
|
|
{
|
|
|
|
atomic_inc(&dr->dr_ref);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
vdev_disk_dio_put(dio_request_t *dr)
|
|
|
|
{
|
|
|
|
int rc = atomic_dec_return(&dr->dr_ref);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free the dio_request when the last reference is dropped and
|
|
|
|
* ensure zio_interpret is called only once with the correct zio
|
|
|
|
*/
|
|
|
|
if (rc == 0) {
|
|
|
|
zio_t *zio = dr->dr_zio;
|
|
|
|
int error = dr->dr_error;
|
|
|
|
|
|
|
|
vdev_disk_dio_free(dr);
|
|
|
|
|
|
|
|
if (zio) {
|
|
|
|
zio->io_error = error;
|
2010-09-28 02:30:14 +04:00
|
|
|
ASSERT3S(zio->io_error, >=, 0);
|
|
|
|
if (zio->io_error)
|
|
|
|
vdev_disk_error(zio);
|
2016-07-22 18:52:49 +03:00
|
|
|
|
2016-05-23 20:41:29 +03:00
|
|
|
zio_delay_interrupt(zio);
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-01 23:26:11 +04:00
|
|
|
return (rc);
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
|
2015-09-23 18:55:15 +03:00
|
|
|
BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, error)
|
2010-08-26 22:45:02 +04:00
|
|
|
{
|
|
|
|
dio_request_t *dr = bio->bi_private;
|
|
|
|
int rc;
|
|
|
|
|
2015-09-23 18:55:15 +03:00
|
|
|
if (dr->dr_error == 0) {
|
|
|
|
#ifdef HAVE_1ARG_BIO_END_IO_T
|
2017-07-24 05:37:12 +03:00
|
|
|
dr->dr_error = BIO_END_IO_ERROR(bio);
|
2015-09-23 18:55:15 +03:00
|
|
|
#else
|
|
|
|
if (error)
|
|
|
|
dr->dr_error = -(error);
|
|
|
|
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
|
|
|
|
dr->dr_error = EIO;
|
|
|
|
#endif
|
|
|
|
}
|
2010-08-26 22:45:02 +04:00
|
|
|
|
2016-08-31 09:26:43 +03:00
|
|
|
/* Drop reference acquired by __vdev_disk_physio */
|
2010-08-26 22:45:02 +04:00
|
|
|
rc = vdev_disk_dio_put(dr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int
|
|
|
|
bio_map(struct bio *bio, void *bio_ptr, unsigned int bio_size)
|
|
|
|
{
|
|
|
|
unsigned int offset, size, i;
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
offset = offset_in_page(bio_ptr);
|
|
|
|
for (i = 0; i < bio->bi_max_vecs; i++) {
|
|
|
|
size = PAGE_SIZE - offset;
|
|
|
|
|
|
|
|
if (bio_size <= 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (size > bio_size)
|
|
|
|
size = bio_size;
|
|
|
|
|
2014-11-03 17:42:44 +03:00
|
|
|
if (is_vmalloc_addr(bio_ptr))
|
2010-08-26 22:45:02 +04:00
|
|
|
page = vmalloc_to_page(bio_ptr);
|
|
|
|
else
|
|
|
|
page = virt_to_page(bio_ptr);
|
|
|
|
|
2014-04-24 07:11:02 +04:00
|
|
|
/*
|
|
|
|
* Some network related block device uses tcp_sendpage, which
|
|
|
|
* doesn't behave well when using 0-count page, this is a
|
|
|
|
* safety net to catch them.
|
|
|
|
*/
|
|
|
|
ASSERT3S(page_count(page), >, 0);
|
|
|
|
|
2010-08-26 22:45:02 +04:00
|
|
|
if (bio_add_page(bio, page, size, offset) != size)
|
|
|
|
break;
|
|
|
|
|
|
|
|
bio_ptr += size;
|
|
|
|
bio_size -= size;
|
|
|
|
offset = 0;
|
|
|
|
}
|
|
|
|
|
2013-11-01 23:26:11 +04:00
|
|
|
return (bio_size);
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
|
2016-08-31 09:26:43 +03:00
|
|
|
static unsigned int
|
|
|
|
bio_map_abd_off(struct bio *bio, abd_t *abd, unsigned int size, size_t off)
|
|
|
|
{
|
|
|
|
if (abd_is_linear(abd))
|
|
|
|
return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, size));
|
|
|
|
|
|
|
|
return (abd_scatter_bio_map_off(bio, abd, size, off));
|
|
|
|
}
|
|
|
|
|
2016-07-27 05:23:53 +03:00
|
|
|
static inline void
|
2016-07-27 20:55:32 +03:00
|
|
|
vdev_submit_bio_impl(struct bio *bio)
|
2016-07-27 05:23:53 +03:00
|
|
|
{
|
|
|
|
#ifdef HAVE_1ARG_SUBMIT_BIO
|
|
|
|
submit_bio(bio);
|
|
|
|
#else
|
2016-07-27 20:55:32 +03:00
|
|
|
submit_bio(0, bio);
|
2016-07-27 05:23:53 +03:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-09-16 21:00:19 +03:00
|
|
|
#ifndef HAVE_BIO_SET_DEV
|
|
|
|
static inline void
|
|
|
|
bio_set_dev(struct bio *bio, struct block_device *bdev)
|
|
|
|
{
|
|
|
|
bio->bi_bdev = bdev;
|
|
|
|
}
|
|
|
|
#endif /* !HAVE_BIO_SET_DEV */
|
|
|
|
|
zvol processing should use struct bio
Internally, zvols are files exposed through the block device API. This
is intended to reduce overhead when things require block devices.
However, the ZoL zvol code emulates a traditional block device in that
it has a top half and a bottom half. This is an unnecessary source of
overhead that does not exist on any other OpenZFS platform does this.
This patch removes it. Early users of this patch reported double digit
performance gains in IOPS on zvols in the range of 50% to 80%.
Comments in the code suggest that the current implementation was done to
obtain IO merging from Linux's IO elevator. However, the DMU already
does write merging while arc_read() should implicitly merge read IOs
because only 1 thread is permitted to fetch the buffer into ARC. In
addition, commercial ZFSOnLinux distributions report that regular files
are more performant than zvols under the current implementation, and the
main consumers of zvols are VMs and iSCSI targets, which have their own
elevators to merge IOs.
Some minor refactoring allows us to register zfs_request() as our
->make_request() handler in place of the generic_make_request()
function. This eliminates the layer of code that broke IO requests on
zvols into a top half and a bottom half. This has several benefits:
1. No per zvol spinlocks.
2. No redundant IO elevator processing.
3. Interrupts are disabled only when actually necessary.
4. No redispatching of IOs when all taskq threads are busy.
5. Linux's page out routines will properly block.
6. Many autotools checks become obsolete.
An unfortunate consequence of eliminating the layer that
generic_make_request() is that we no longer calls the instrumentation
hooks for block IO accounting. Those hooks are GPL-exported, so we
cannot call them ourselves and consequently, we lose the ability to do
IO monitoring via iostat. Since zvols are internally files mapped as
block devices, this should be okay. Anyone who is willing to accept the
performance penalty for the block IO layer's accounting could use the
loop device in between the zvol and its consumer. Alternatively, perf
and ftrace likely could be used. Also, tools like latencytop will still
work. Tools such as latencytop sometimes provide a better view of
performance bottlenecks than the traditional block IO accounting tools
do.
Lastly, if direct reclaim occurs during spacemap loading and swap is on
a zvol, this code will deadlock. That deadlock could already occur with
sync=always on zvols. Given that swap on zvols is not yet production
ready, this is not a blocker.
Signed-off-by: Richard Yao <ryao@gentoo.org>
2014-07-05 02:43:47 +04:00
|
|
|
static inline void
|
2016-07-27 20:55:32 +03:00
|
|
|
vdev_submit_bio(struct bio *bio)
|
zvol processing should use struct bio
Internally, zvols are files exposed through the block device API. This
is intended to reduce overhead when things require block devices.
However, the ZoL zvol code emulates a traditional block device in that
it has a top half and a bottom half. This is an unnecessary source of
overhead that does not exist on any other OpenZFS platform does this.
This patch removes it. Early users of this patch reported double digit
performance gains in IOPS on zvols in the range of 50% to 80%.
Comments in the code suggest that the current implementation was done to
obtain IO merging from Linux's IO elevator. However, the DMU already
does write merging while arc_read() should implicitly merge read IOs
because only 1 thread is permitted to fetch the buffer into ARC. In
addition, commercial ZFSOnLinux distributions report that regular files
are more performant than zvols under the current implementation, and the
main consumers of zvols are VMs and iSCSI targets, which have their own
elevators to merge IOs.
Some minor refactoring allows us to register zfs_request() as our
->make_request() handler in place of the generic_make_request()
function. This eliminates the layer of code that broke IO requests on
zvols into a top half and a bottom half. This has several benefits:
1. No per zvol spinlocks.
2. No redundant IO elevator processing.
3. Interrupts are disabled only when actually necessary.
4. No redispatching of IOs when all taskq threads are busy.
5. Linux's page out routines will properly block.
6. Many autotools checks become obsolete.
An unfortunate consequence of eliminating the layer that
generic_make_request() is that we no longer calls the instrumentation
hooks for block IO accounting. Those hooks are GPL-exported, so we
cannot call them ourselves and consequently, we lose the ability to do
IO monitoring via iostat. Since zvols are internally files mapped as
block devices, this should be okay. Anyone who is willing to accept the
performance penalty for the block IO layer's accounting could use the
loop device in between the zvol and its consumer. Alternatively, perf
and ftrace likely could be used. Also, tools like latencytop will still
work. Tools such as latencytop sometimes provide a better view of
performance bottlenecks than the traditional block IO accounting tools
do.
Lastly, if direct reclaim occurs during spacemap loading and swap is on
a zvol, this code will deadlock. That deadlock could already occur with
sync=always on zvols. Given that swap on zvols is not yet production
ready, this is not a blocker.
Signed-off-by: Richard Yao <ryao@gentoo.org>
2014-07-05 02:43:47 +04:00
|
|
|
{
|
|
|
|
#ifdef HAVE_CURRENT_BIO_TAIL
|
|
|
|
struct bio **bio_tail = current->bio_tail;
|
|
|
|
current->bio_tail = NULL;
|
2016-07-27 20:55:32 +03:00
|
|
|
vdev_submit_bio_impl(bio);
|
zvol processing should use struct bio
Internally, zvols are files exposed through the block device API. This
is intended to reduce overhead when things require block devices.
However, the ZoL zvol code emulates a traditional block device in that
it has a top half and a bottom half. This is an unnecessary source of
overhead that does not exist on any other OpenZFS platform does this.
This patch removes it. Early users of this patch reported double digit
performance gains in IOPS on zvols in the range of 50% to 80%.
Comments in the code suggest that the current implementation was done to
obtain IO merging from Linux's IO elevator. However, the DMU already
does write merging while arc_read() should implicitly merge read IOs
because only 1 thread is permitted to fetch the buffer into ARC. In
addition, commercial ZFSOnLinux distributions report that regular files
are more performant than zvols under the current implementation, and the
main consumers of zvols are VMs and iSCSI targets, which have their own
elevators to merge IOs.
Some minor refactoring allows us to register zfs_request() as our
->make_request() handler in place of the generic_make_request()
function. This eliminates the layer of code that broke IO requests on
zvols into a top half and a bottom half. This has several benefits:
1. No per zvol spinlocks.
2. No redundant IO elevator processing.
3. Interrupts are disabled only when actually necessary.
4. No redispatching of IOs when all taskq threads are busy.
5. Linux's page out routines will properly block.
6. Many autotools checks become obsolete.
An unfortunate consequence of eliminating the layer that
generic_make_request() is that we no longer calls the instrumentation
hooks for block IO accounting. Those hooks are GPL-exported, so we
cannot call them ourselves and consequently, we lose the ability to do
IO monitoring via iostat. Since zvols are internally files mapped as
block devices, this should be okay. Anyone who is willing to accept the
performance penalty for the block IO layer's accounting could use the
loop device in between the zvol and its consumer. Alternatively, perf
and ftrace likely could be used. Also, tools like latencytop will still
work. Tools such as latencytop sometimes provide a better view of
performance bottlenecks than the traditional block IO accounting tools
do.
Lastly, if direct reclaim occurs during spacemap loading and swap is on
a zvol, this code will deadlock. That deadlock could already occur with
sync=always on zvols. Given that swap on zvols is not yet production
ready, this is not a blocker.
Signed-off-by: Richard Yao <ryao@gentoo.org>
2014-07-05 02:43:47 +04:00
|
|
|
current->bio_tail = bio_tail;
|
|
|
|
#else
|
|
|
|
struct bio_list *bio_list = current->bio_list;
|
|
|
|
current->bio_list = NULL;
|
2016-07-27 20:55:32 +03:00
|
|
|
vdev_submit_bio_impl(bio);
|
zvol processing should use struct bio
Internally, zvols are files exposed through the block device API. This
is intended to reduce overhead when things require block devices.
However, the ZoL zvol code emulates a traditional block device in that
it has a top half and a bottom half. This is an unnecessary source of
overhead that does not exist on any other OpenZFS platform does this.
This patch removes it. Early users of this patch reported double digit
performance gains in IOPS on zvols in the range of 50% to 80%.
Comments in the code suggest that the current implementation was done to
obtain IO merging from Linux's IO elevator. However, the DMU already
does write merging while arc_read() should implicitly merge read IOs
because only 1 thread is permitted to fetch the buffer into ARC. In
addition, commercial ZFSOnLinux distributions report that regular files
are more performant than zvols under the current implementation, and the
main consumers of zvols are VMs and iSCSI targets, which have their own
elevators to merge IOs.
Some minor refactoring allows us to register zfs_request() as our
->make_request() handler in place of the generic_make_request()
function. This eliminates the layer of code that broke IO requests on
zvols into a top half and a bottom half. This has several benefits:
1. No per zvol spinlocks.
2. No redundant IO elevator processing.
3. Interrupts are disabled only when actually necessary.
4. No redispatching of IOs when all taskq threads are busy.
5. Linux's page out routines will properly block.
6. Many autotools checks become obsolete.
An unfortunate consequence of eliminating the layer that
generic_make_request() is that we no longer calls the instrumentation
hooks for block IO accounting. Those hooks are GPL-exported, so we
cannot call them ourselves and consequently, we lose the ability to do
IO monitoring via iostat. Since zvols are internally files mapped as
block devices, this should be okay. Anyone who is willing to accept the
performance penalty for the block IO layer's accounting could use the
loop device in between the zvol and its consumer. Alternatively, perf
and ftrace likely could be used. Also, tools like latencytop will still
work. Tools such as latencytop sometimes provide a better view of
performance bottlenecks than the traditional block IO accounting tools
do.
Lastly, if direct reclaim occurs during spacemap loading and swap is on
a zvol, this code will deadlock. That deadlock could already occur with
sync=always on zvols. Given that swap on zvols is not yet production
ready, this is not a blocker.
Signed-off-by: Richard Yao <ryao@gentoo.org>
2014-07-05 02:43:47 +04:00
|
|
|
current->bio_list = bio_list;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2010-08-26 22:45:02 +04:00
|
|
|
static int
|
2016-08-31 09:26:43 +03:00
|
|
|
__vdev_disk_physio(struct block_device *bdev, zio_t *zio,
|
|
|
|
size_t io_size, uint64_t io_offset, int rw, int flags)
|
2010-08-26 22:45:02 +04:00
|
|
|
{
|
2013-11-01 23:26:11 +04:00
|
|
|
dio_request_t *dr;
|
2016-08-31 09:26:43 +03:00
|
|
|
uint64_t abd_offset;
|
2010-08-26 22:45:02 +04:00
|
|
|
uint64_t bio_offset;
|
2016-07-27 20:55:32 +03:00
|
|
|
int bio_size, bio_count = 16;
|
2011-05-27 03:48:16 +04:00
|
|
|
int i = 0, error = 0;
|
2016-09-29 23:13:31 +03:00
|
|
|
#if defined(HAVE_BLK_QUEUE_HAVE_BLK_PLUG)
|
|
|
|
struct blk_plug plug;
|
|
|
|
#endif
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
/*
|
|
|
|
* Accessing outside the block device is never allowed.
|
|
|
|
*/
|
|
|
|
if (io_offset + io_size > bdev->bd_inode->i_size) {
|
|
|
|
vdev_dbgmsg(zio->io_vd,
|
|
|
|
"Illegal access %llu size %llu, device size %llu",
|
|
|
|
io_offset, io_size, i_size_read(bdev->bd_inode));
|
|
|
|
return (SET_ERROR(EIO));
|
|
|
|
}
|
2010-11-11 00:36:18 +03:00
|
|
|
|
2010-08-26 22:45:02 +04:00
|
|
|
retry:
|
|
|
|
dr = vdev_disk_dio_alloc(bio_count);
|
|
|
|
if (dr == NULL)
|
2017-08-03 07:16:12 +03:00
|
|
|
return (SET_ERROR(ENOMEM));
|
2010-08-26 22:45:02 +04:00
|
|
|
|
2010-10-01 21:57:56 +04:00
|
|
|
if (zio && !(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))
|
2014-10-21 22:20:10 +04:00
|
|
|
bio_set_flags_failfast(bdev, &flags);
|
2010-10-01 21:57:56 +04:00
|
|
|
|
2010-08-26 22:45:02 +04:00
|
|
|
dr->dr_zio = zio;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When the IO size exceeds the maximum bio size for the request
|
|
|
|
* queue we are forced to break the IO in multiple bio's and wait
|
|
|
|
* for them all to complete. Ideally, all pool users will set
|
|
|
|
* their volume block size to match the maximum request size and
|
|
|
|
* the common case will be one bio per vdev IO request.
|
|
|
|
*/
|
2016-07-22 18:52:49 +03:00
|
|
|
|
2016-08-31 09:26:43 +03:00
|
|
|
abd_offset = 0;
|
|
|
|
bio_offset = io_offset;
|
|
|
|
bio_size = io_size;
|
2010-08-26 22:45:02 +04:00
|
|
|
for (i = 0; i <= dr->dr_bio_count; i++) {
|
|
|
|
|
|
|
|
/* Finished constructing bio's for given buffer */
|
|
|
|
if (bio_size <= 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* By default only 'bio_count' bio's per dio are allowed.
|
|
|
|
* However, if we find ourselves in a situation where more
|
|
|
|
* are needed we allocate a larger dio and warn the user.
|
|
|
|
*/
|
|
|
|
if (dr->dr_bio_count == i) {
|
|
|
|
vdev_disk_dio_free(dr);
|
|
|
|
bio_count *= 2;
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
2014-10-21 22:20:10 +04:00
|
|
|
/* bio_alloc() with __GFP_WAIT never returns NULL */
|
2014-11-03 23:15:08 +03:00
|
|
|
dr->dr_bio[i] = bio_alloc(GFP_NOIO,
|
2016-08-31 09:26:43 +03:00
|
|
|
MIN(abd_nr_pages_off(zio->io_abd, bio_size, abd_offset),
|
2016-12-12 21:46:26 +03:00
|
|
|
BIO_MAX_PAGES));
|
2014-10-21 22:20:10 +04:00
|
|
|
if (unlikely(dr->dr_bio[i] == NULL)) {
|
2010-08-26 22:45:02 +04:00
|
|
|
vdev_disk_dio_free(dr);
|
2017-08-03 07:16:12 +03:00
|
|
|
return (SET_ERROR(ENOMEM));
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Matching put called by vdev_disk_physio_completion */
|
|
|
|
vdev_disk_dio_get(dr);
|
|
|
|
|
2017-09-16 21:00:19 +03:00
|
|
|
bio_set_dev(dr->dr_bio[i], bdev);
|
2014-03-28 11:08:21 +04:00
|
|
|
BIO_BI_SECTOR(dr->dr_bio[i]) = bio_offset >> 9;
|
2010-08-26 22:45:02 +04:00
|
|
|
dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion;
|
|
|
|
dr->dr_bio[i]->bi_private = dr;
|
2016-07-27 20:55:32 +03:00
|
|
|
bio_set_op_attrs(dr->dr_bio[i], rw, flags);
|
2010-08-26 22:45:02 +04:00
|
|
|
|
|
|
|
/* Remaining size is returned to become the new size */
|
2016-08-31 09:26:43 +03:00
|
|
|
bio_size = bio_map_abd_off(dr->dr_bio[i], zio->io_abd,
|
2016-12-12 21:46:26 +03:00
|
|
|
bio_size, abd_offset);
|
2010-08-26 22:45:02 +04:00
|
|
|
|
|
|
|
/* Advance in buffer and construct another bio if needed */
|
2016-08-31 09:26:43 +03:00
|
|
|
abd_offset += BIO_BI_SIZE(dr->dr_bio[i]);
|
2014-03-28 11:08:21 +04:00
|
|
|
bio_offset += BIO_BI_SIZE(dr->dr_bio[i]);
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
|
zvol processing should use struct bio
Internally, zvols are files exposed through the block device API. This
is intended to reduce overhead when things require block devices.
However, the ZoL zvol code emulates a traditional block device in that
it has a top half and a bottom half. This is an unnecessary source of
overhead that does not exist on any other OpenZFS platform does this.
This patch removes it. Early users of this patch reported double digit
performance gains in IOPS on zvols in the range of 50% to 80%.
Comments in the code suggest that the current implementation was done to
obtain IO merging from Linux's IO elevator. However, the DMU already
does write merging while arc_read() should implicitly merge read IOs
because only 1 thread is permitted to fetch the buffer into ARC. In
addition, commercial ZFSOnLinux distributions report that regular files
are more performant than zvols under the current implementation, and the
main consumers of zvols are VMs and iSCSI targets, which have their own
elevators to merge IOs.
Some minor refactoring allows us to register zfs_request() as our
->make_request() handler in place of the generic_make_request()
function. This eliminates the layer of code that broke IO requests on
zvols into a top half and a bottom half. This has several benefits:
1. No per zvol spinlocks.
2. No redundant IO elevator processing.
3. Interrupts are disabled only when actually necessary.
4. No redispatching of IOs when all taskq threads are busy.
5. Linux's page out routines will properly block.
6. Many autotools checks become obsolete.
An unfortunate consequence of eliminating the layer that
generic_make_request() is that we no longer calls the instrumentation
hooks for block IO accounting. Those hooks are GPL-exported, so we
cannot call them ourselves and consequently, we lose the ability to do
IO monitoring via iostat. Since zvols are internally files mapped as
block devices, this should be okay. Anyone who is willing to accept the
performance penalty for the block IO layer's accounting could use the
loop device in between the zvol and its consumer. Alternatively, perf
and ftrace likely could be used. Also, tools like latencytop will still
work. Tools such as latencytop sometimes provide a better view of
performance bottlenecks than the traditional block IO accounting tools
do.
Lastly, if direct reclaim occurs during spacemap loading and swap is on
a zvol, this code will deadlock. That deadlock could already occur with
sync=always on zvols. Given that swap on zvols is not yet production
ready, this is not a blocker.
Signed-off-by: Richard Yao <ryao@gentoo.org>
2014-07-05 02:43:47 +04:00
|
|
|
/* Extra reference to protect dio_request during vdev_submit_bio */
|
2010-08-26 22:45:02 +04:00
|
|
|
vdev_disk_dio_get(dr);
|
|
|
|
|
2016-09-29 23:13:31 +03:00
|
|
|
#if defined(HAVE_BLK_QUEUE_HAVE_BLK_PLUG)
|
|
|
|
if (dr->dr_bio_count > 1)
|
|
|
|
blk_start_plug(&plug);
|
|
|
|
#endif
|
|
|
|
|
2010-08-26 22:45:02 +04:00
|
|
|
/* Submit all bio's associated with this dio */
|
|
|
|
for (i = 0; i < dr->dr_bio_count; i++)
|
|
|
|
if (dr->dr_bio[i])
|
2016-07-27 20:55:32 +03:00
|
|
|
vdev_submit_bio(dr->dr_bio[i]);
|
2010-08-26 22:45:02 +04:00
|
|
|
|
2016-09-29 23:13:31 +03:00
|
|
|
#if defined(HAVE_BLK_QUEUE_HAVE_BLK_PLUG)
|
|
|
|
if (dr->dr_bio_count > 1)
|
|
|
|
blk_finish_plug(&plug);
|
|
|
|
#endif
|
|
|
|
|
2013-11-01 23:26:11 +04:00
|
|
|
(void) vdev_disk_dio_put(dr);
|
2010-08-26 22:45:02 +04:00
|
|
|
|
2013-11-01 23:26:11 +04:00
|
|
|
return (error);
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
|
2017-07-24 05:37:12 +03:00
|
|
|
BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, error)
|
2010-08-26 22:45:02 +04:00
|
|
|
{
|
|
|
|
zio_t *zio = bio->bi_private;
|
2015-09-23 18:55:15 +03:00
|
|
|
#ifdef HAVE_1ARG_BIO_END_IO_T
|
2017-07-24 05:37:12 +03:00
|
|
|
zio->io_error = BIO_END_IO_ERROR(bio);
|
|
|
|
#else
|
|
|
|
zio->io_error = -error;
|
2015-09-23 18:55:15 +03:00
|
|
|
#endif
|
2010-08-26 22:45:02 +04:00
|
|
|
|
2017-07-24 05:37:12 +03:00
|
|
|
if (zio->io_error && (zio->io_error == EOPNOTSUPP))
|
2010-08-26 22:45:02 +04:00
|
|
|
zio->io_vd->vdev_nowritecache = B_TRUE;
|
|
|
|
|
|
|
|
bio_put(bio);
|
2010-09-28 02:30:14 +04:00
|
|
|
ASSERT3S(zio->io_error, >=, 0);
|
|
|
|
if (zio->io_error)
|
|
|
|
vdev_disk_error(zio);
|
2010-08-26 22:45:02 +04:00
|
|
|
zio_interrupt(zio);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
|
|
|
|
{
|
|
|
|
struct request_queue *q;
|
|
|
|
struct bio *bio;
|
|
|
|
|
|
|
|
q = bdev_get_queue(bdev);
|
|
|
|
if (!q)
|
2017-08-03 07:16:12 +03:00
|
|
|
return (SET_ERROR(ENXIO));
|
2010-08-26 22:45:02 +04:00
|
|
|
|
2013-07-11 01:09:08 +04:00
|
|
|
bio = bio_alloc(GFP_NOIO, 0);
|
2014-10-21 22:20:10 +04:00
|
|
|
/* bio_alloc() with __GFP_WAIT never returns NULL */
|
|
|
|
if (unlikely(bio == NULL))
|
2017-08-03 07:16:12 +03:00
|
|
|
return (SET_ERROR(ENOMEM));
|
2010-08-26 22:45:02 +04:00
|
|
|
|
|
|
|
bio->bi_end_io = vdev_disk_io_flush_completion;
|
|
|
|
bio->bi_private = zio;
|
2017-09-16 21:00:19 +03:00
|
|
|
bio_set_dev(bio, bdev);
|
2016-12-31 01:03:59 +03:00
|
|
|
bio_set_flush(bio);
|
2016-07-27 20:55:32 +03:00
|
|
|
vdev_submit_bio(bio);
|
Invalidate Linux buffer cache on vdevs upon each flush
Userland tools such as blkid, grub2-probe and zdb will go through the
buffer cache. However, ZFS uses on submit_bio() to bypass the buffer
cache when performing IO operations on vdevs for efficiency purposes.
This permits the on-disk state and buffer cache to fall out of
synchronization. That causes seemingly random failures when tools
reading stale metadata from the buffer cache try to access references to
data that is no longer there.
A particularly bad failure this causes involves grub2-probe, which is
used by grub2-mkconfig. Ordinarily, a rootfs might be called
rpool/ROOT/gentoo. However, when a failure occurs in grub2-probe,
grub2-mkconfig will generate a configuration file containing
/ROOT/gentoo, which omits the pool name and causes a boot failure.
This is avoidable by calling invalidate_bdev() on each flush, which is a
simple way to ensure that all non-dirty pages are wiped. Since userland
tools rarely access vdevs directly, this should be a fancy noop >99.999%
of the time and have little impact on IO. We could have tried a finer
grained approach for the rare instances in which the vdevs are accessed
frequently by userland. However, that would require consideration of
corner cases and it is not worth the effort.
Memory-wise, it would have been better to use a Linux kernel API hook to
disable the buffer cache on such devices, but it provides us no way of
doing that, so we opt for this approach instead. We should revisit that
idea in the future when higher priority issues have been tackled.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #2150
2014-02-27 23:03:39 +04:00
|
|
|
invalidate_bdev(bdev);
|
2010-08-26 22:45:02 +04:00
|
|
|
|
2013-11-01 23:26:11 +04:00
|
|
|
return (0);
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
|
2014-10-21 02:07:45 +04:00
|
|
|
static void
|
2010-08-26 22:45:02 +04:00
|
|
|
vdev_disk_io_start(zio_t *zio)
|
|
|
|
{
|
|
|
|
vdev_t *v = zio->io_vd;
|
|
|
|
vdev_disk_t *vd = v->vdev_tsd;
|
2016-07-27 20:55:32 +03:00
|
|
|
int rw, flags, error;
|
2010-08-26 22:45:02 +04:00
|
|
|
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
/*
|
|
|
|
* If the vdev is closed, it's likely in the REMOVED or FAULTED state.
|
|
|
|
* Nothing to be done here but return failure.
|
|
|
|
*/
|
|
|
|
if (vd == NULL) {
|
|
|
|
zio->io_error = ENXIO;
|
|
|
|
zio_interrupt(zio);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
rw_enter(&vd->vd_lock, RW_READER);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the vdev is closed, it's likely due to a failed reopen and is
|
|
|
|
* in the UNAVAIL state. Nothing to be done here but return failure.
|
|
|
|
*/
|
|
|
|
if (vd->vd_bdev == NULL) {
|
|
|
|
rw_exit(&vd->vd_lock);
|
|
|
|
zio->io_error = ENXIO;
|
|
|
|
zio_interrupt(zio);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-08-26 22:45:02 +04:00
|
|
|
switch (zio->io_type) {
|
|
|
|
case ZIO_TYPE_IOCTL:
|
|
|
|
|
|
|
|
if (!vdev_readable(v)) {
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
rw_exit(&vd->vd_lock);
|
2013-03-08 22:41:28 +04:00
|
|
|
zio->io_error = SET_ERROR(ENXIO);
|
2014-10-21 02:07:45 +04:00
|
|
|
zio_interrupt(zio);
|
|
|
|
return;
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (zio->io_cmd) {
|
|
|
|
case DKIOCFLUSHWRITECACHE:
|
|
|
|
|
|
|
|
if (zfs_nocacheflush)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (v->vdev_nowritecache) {
|
2013-03-08 22:41:28 +04:00
|
|
|
zio->io_error = SET_ERROR(ENOTSUP);
|
2010-08-26 22:45:02 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = vdev_disk_io_flush(vd->vd_bdev, zio);
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
if (error == 0) {
|
|
|
|
rw_exit(&vd->vd_lock);
|
2014-10-21 02:07:45 +04:00
|
|
|
return;
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
}
|
2010-08-26 22:45:02 +04:00
|
|
|
|
|
|
|
zio->io_error = error;
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2013-03-08 22:41:28 +04:00
|
|
|
zio->io_error = SET_ERROR(ENOTSUP);
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
rw_exit(&vd->vd_lock);
|
2014-10-21 02:07:45 +04:00
|
|
|
zio_execute(zio);
|
|
|
|
return;
|
2010-08-26 22:45:02 +04:00
|
|
|
case ZIO_TYPE_WRITE:
|
2016-07-27 20:55:32 +03:00
|
|
|
rw = WRITE;
|
Fix sync behavior for disk vdevs
Prior to b39c22b, which was first generally available in the 0.6.5
release as b39c22b, ZoL never actually submitted synchronous read or write
requests to the Linux block layer. This means the vdev_disk_dio_is_sync()
function had always returned false and, therefore, the completion in
dio_request_t.dr_comp was never actually used.
In b39c22b, synchronous ZIO operations were translated to synchronous
BIO requests in vdev_disk_io_start(). The follow-on commits 5592404 and
aa159af fixed several problems introduced by b39c22b. In particular,
5592404 introduced the new flag parameter "wait" to __vdev_disk_physio()
but under ZoL, since vdev_disk_physio() is never actually used, the wait
flag was always zero so the new code had no effect other than to cause
a bug in the use of the dio_request_t.dr_comp which was fixed by aa159af.
The original rationale for introducing synchronous operations in b39c22b
was to hurry certains requests through the BIO layer which would have
otherwise been subject to its unplug timer which would increase the
latency. This behavior of the unplug timer, however, went away during the
transition of the plug/unplug system between kernels 2.6.32 and 2.6.39.
To handle the unplug timer behavior on 2.6.32-2.6.35 kernels the
BIO_RW_UNPLUG flag is used as a hint to suppress the plugging behavior.
For kernels 2.6.36-2.6.38, the REQ_UNPLUG macro will be available and
ise used for the same purpose.
Signed-off-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #4858
2016-07-08 18:33:01 +03:00
|
|
|
#if defined(HAVE_BLK_QUEUE_HAVE_BIO_RW_UNPLUG)
|
2016-07-27 20:55:32 +03:00
|
|
|
flags = (1 << BIO_RW_UNPLUG);
|
Fix sync behavior for disk vdevs
Prior to b39c22b, which was first generally available in the 0.6.5
release as b39c22b, ZoL never actually submitted synchronous read or write
requests to the Linux block layer. This means the vdev_disk_dio_is_sync()
function had always returned false and, therefore, the completion in
dio_request_t.dr_comp was never actually used.
In b39c22b, synchronous ZIO operations were translated to synchronous
BIO requests in vdev_disk_io_start(). The follow-on commits 5592404 and
aa159af fixed several problems introduced by b39c22b. In particular,
5592404 introduced the new flag parameter "wait" to __vdev_disk_physio()
but under ZoL, since vdev_disk_physio() is never actually used, the wait
flag was always zero so the new code had no effect other than to cause
a bug in the use of the dio_request_t.dr_comp which was fixed by aa159af.
The original rationale for introducing synchronous operations in b39c22b
was to hurry certains requests through the BIO layer which would have
otherwise been subject to its unplug timer which would increase the
latency. This behavior of the unplug timer, however, went away during the
transition of the plug/unplug system between kernels 2.6.32 and 2.6.39.
To handle the unplug timer behavior on 2.6.32-2.6.35 kernels the
BIO_RW_UNPLUG flag is used as a hint to suppress the plugging behavior.
For kernels 2.6.36-2.6.38, the REQ_UNPLUG macro will be available and
ise used for the same purpose.
Signed-off-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #4858
2016-07-08 18:33:01 +03:00
|
|
|
#elif defined(REQ_UNPLUG)
|
2016-07-27 20:55:32 +03:00
|
|
|
flags = REQ_UNPLUG;
|
Fix sync behavior for disk vdevs
Prior to b39c22b, which was first generally available in the 0.6.5
release as b39c22b, ZoL never actually submitted synchronous read or write
requests to the Linux block layer. This means the vdev_disk_dio_is_sync()
function had always returned false and, therefore, the completion in
dio_request_t.dr_comp was never actually used.
In b39c22b, synchronous ZIO operations were translated to synchronous
BIO requests in vdev_disk_io_start(). The follow-on commits 5592404 and
aa159af fixed several problems introduced by b39c22b. In particular,
5592404 introduced the new flag parameter "wait" to __vdev_disk_physio()
but under ZoL, since vdev_disk_physio() is never actually used, the wait
flag was always zero so the new code had no effect other than to cause
a bug in the use of the dio_request_t.dr_comp which was fixed by aa159af.
The original rationale for introducing synchronous operations in b39c22b
was to hurry certains requests through the BIO layer which would have
otherwise been subject to its unplug timer which would increase the
latency. This behavior of the unplug timer, however, went away during the
transition of the plug/unplug system between kernels 2.6.32 and 2.6.39.
To handle the unplug timer behavior on 2.6.32-2.6.35 kernels the
BIO_RW_UNPLUG flag is used as a hint to suppress the plugging behavior.
For kernels 2.6.36-2.6.38, the REQ_UNPLUG macro will be available and
ise used for the same purpose.
Signed-off-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #4858
2016-07-08 18:33:01 +03:00
|
|
|
#else
|
2016-07-27 20:55:32 +03:00
|
|
|
flags = 0;
|
Fix sync behavior for disk vdevs
Prior to b39c22b, which was first generally available in the 0.6.5
release as b39c22b, ZoL never actually submitted synchronous read or write
requests to the Linux block layer. This means the vdev_disk_dio_is_sync()
function had always returned false and, therefore, the completion in
dio_request_t.dr_comp was never actually used.
In b39c22b, synchronous ZIO operations were translated to synchronous
BIO requests in vdev_disk_io_start(). The follow-on commits 5592404 and
aa159af fixed several problems introduced by b39c22b. In particular,
5592404 introduced the new flag parameter "wait" to __vdev_disk_physio()
but under ZoL, since vdev_disk_physio() is never actually used, the wait
flag was always zero so the new code had no effect other than to cause
a bug in the use of the dio_request_t.dr_comp which was fixed by aa159af.
The original rationale for introducing synchronous operations in b39c22b
was to hurry certains requests through the BIO layer which would have
otherwise been subject to its unplug timer which would increase the
latency. This behavior of the unplug timer, however, went away during the
transition of the plug/unplug system between kernels 2.6.32 and 2.6.39.
To handle the unplug timer behavior on 2.6.32-2.6.35 kernels the
BIO_RW_UNPLUG flag is used as a hint to suppress the plugging behavior.
For kernels 2.6.36-2.6.38, the REQ_UNPLUG macro will be available and
ise used for the same purpose.
Signed-off-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #4858
2016-07-08 18:33:01 +03:00
|
|
|
#endif
|
2010-08-26 22:45:02 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ZIO_TYPE_READ:
|
2016-07-27 20:55:32 +03:00
|
|
|
rw = READ;
|
Fix sync behavior for disk vdevs
Prior to b39c22b, which was first generally available in the 0.6.5
release as b39c22b, ZoL never actually submitted synchronous read or write
requests to the Linux block layer. This means the vdev_disk_dio_is_sync()
function had always returned false and, therefore, the completion in
dio_request_t.dr_comp was never actually used.
In b39c22b, synchronous ZIO operations were translated to synchronous
BIO requests in vdev_disk_io_start(). The follow-on commits 5592404 and
aa159af fixed several problems introduced by b39c22b. In particular,
5592404 introduced the new flag parameter "wait" to __vdev_disk_physio()
but under ZoL, since vdev_disk_physio() is never actually used, the wait
flag was always zero so the new code had no effect other than to cause
a bug in the use of the dio_request_t.dr_comp which was fixed by aa159af.
The original rationale for introducing synchronous operations in b39c22b
was to hurry certains requests through the BIO layer which would have
otherwise been subject to its unplug timer which would increase the
latency. This behavior of the unplug timer, however, went away during the
transition of the plug/unplug system between kernels 2.6.32 and 2.6.39.
To handle the unplug timer behavior on 2.6.32-2.6.35 kernels the
BIO_RW_UNPLUG flag is used as a hint to suppress the plugging behavior.
For kernels 2.6.36-2.6.38, the REQ_UNPLUG macro will be available and
ise used for the same purpose.
Signed-off-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #4858
2016-07-08 18:33:01 +03:00
|
|
|
#if defined(HAVE_BLK_QUEUE_HAVE_BIO_RW_UNPLUG)
|
2016-07-27 20:55:32 +03:00
|
|
|
flags = (1 << BIO_RW_UNPLUG);
|
Fix sync behavior for disk vdevs
Prior to b39c22b, which was first generally available in the 0.6.5
release as b39c22b, ZoL never actually submitted synchronous read or write
requests to the Linux block layer. This means the vdev_disk_dio_is_sync()
function had always returned false and, therefore, the completion in
dio_request_t.dr_comp was never actually used.
In b39c22b, synchronous ZIO operations were translated to synchronous
BIO requests in vdev_disk_io_start(). The follow-on commits 5592404 and
aa159af fixed several problems introduced by b39c22b. In particular,
5592404 introduced the new flag parameter "wait" to __vdev_disk_physio()
but under ZoL, since vdev_disk_physio() is never actually used, the wait
flag was always zero so the new code had no effect other than to cause
a bug in the use of the dio_request_t.dr_comp which was fixed by aa159af.
The original rationale for introducing synchronous operations in b39c22b
was to hurry certains requests through the BIO layer which would have
otherwise been subject to its unplug timer which would increase the
latency. This behavior of the unplug timer, however, went away during the
transition of the plug/unplug system between kernels 2.6.32 and 2.6.39.
To handle the unplug timer behavior on 2.6.32-2.6.35 kernels the
BIO_RW_UNPLUG flag is used as a hint to suppress the plugging behavior.
For kernels 2.6.36-2.6.38, the REQ_UNPLUG macro will be available and
ise used for the same purpose.
Signed-off-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #4858
2016-07-08 18:33:01 +03:00
|
|
|
#elif defined(REQ_UNPLUG)
|
2016-07-27 20:55:32 +03:00
|
|
|
flags = REQ_UNPLUG;
|
Fix sync behavior for disk vdevs
Prior to b39c22b, which was first generally available in the 0.6.5
release as b39c22b, ZoL never actually submitted synchronous read or write
requests to the Linux block layer. This means the vdev_disk_dio_is_sync()
function had always returned false and, therefore, the completion in
dio_request_t.dr_comp was never actually used.
In b39c22b, synchronous ZIO operations were translated to synchronous
BIO requests in vdev_disk_io_start(). The follow-on commits 5592404 and
aa159af fixed several problems introduced by b39c22b. In particular,
5592404 introduced the new flag parameter "wait" to __vdev_disk_physio()
but under ZoL, since vdev_disk_physio() is never actually used, the wait
flag was always zero so the new code had no effect other than to cause
a bug in the use of the dio_request_t.dr_comp which was fixed by aa159af.
The original rationale for introducing synchronous operations in b39c22b
was to hurry certains requests through the BIO layer which would have
otherwise been subject to its unplug timer which would increase the
latency. This behavior of the unplug timer, however, went away during the
transition of the plug/unplug system between kernels 2.6.32 and 2.6.39.
To handle the unplug timer behavior on 2.6.32-2.6.35 kernels the
BIO_RW_UNPLUG flag is used as a hint to suppress the plugging behavior.
For kernels 2.6.36-2.6.38, the REQ_UNPLUG macro will be available and
ise used for the same purpose.
Signed-off-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #4858
2016-07-08 18:33:01 +03:00
|
|
|
#else
|
2016-07-27 20:55:32 +03:00
|
|
|
flags = 0;
|
Fix sync behavior for disk vdevs
Prior to b39c22b, which was first generally available in the 0.6.5
release as b39c22b, ZoL never actually submitted synchronous read or write
requests to the Linux block layer. This means the vdev_disk_dio_is_sync()
function had always returned false and, therefore, the completion in
dio_request_t.dr_comp was never actually used.
In b39c22b, synchronous ZIO operations were translated to synchronous
BIO requests in vdev_disk_io_start(). The follow-on commits 5592404 and
aa159af fixed several problems introduced by b39c22b. In particular,
5592404 introduced the new flag parameter "wait" to __vdev_disk_physio()
but under ZoL, since vdev_disk_physio() is never actually used, the wait
flag was always zero so the new code had no effect other than to cause
a bug in the use of the dio_request_t.dr_comp which was fixed by aa159af.
The original rationale for introducing synchronous operations in b39c22b
was to hurry certains requests through the BIO layer which would have
otherwise been subject to its unplug timer which would increase the
latency. This behavior of the unplug timer, however, went away during the
transition of the plug/unplug system between kernels 2.6.32 and 2.6.39.
To handle the unplug timer behavior on 2.6.32-2.6.35 kernels the
BIO_RW_UNPLUG flag is used as a hint to suppress the plugging behavior.
For kernels 2.6.36-2.6.38, the REQ_UNPLUG macro will be available and
ise used for the same purpose.
Signed-off-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #4858
2016-07-08 18:33:01 +03:00
|
|
|
#endif
|
2010-08-26 22:45:02 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
rw_exit(&vd->vd_lock);
|
2013-03-08 22:41:28 +04:00
|
|
|
zio->io_error = SET_ERROR(ENOTSUP);
|
2014-10-21 02:07:45 +04:00
|
|
|
zio_interrupt(zio);
|
|
|
|
return;
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
|
2016-05-23 20:41:29 +03:00
|
|
|
zio->io_target_timestamp = zio_handle_io_delay(zio);
|
2016-08-31 09:26:43 +03:00
|
|
|
error = __vdev_disk_physio(vd->vd_bdev, zio,
|
2016-07-27 20:55:32 +03:00
|
|
|
zio->io_size, zio->io_offset, rw, flags);
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
rw_exit(&vd->vd_lock);
|
|
|
|
|
2010-08-26 22:45:02 +04:00
|
|
|
if (error) {
|
|
|
|
zio->io_error = error;
|
2014-10-21 02:07:45 +04:00
|
|
|
zio_interrupt(zio);
|
|
|
|
return;
|
2010-08-26 22:45:02 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vdev_disk_io_done(zio_t *zio)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If the device returned EIO, we revalidate the media. If it is
|
|
|
|
* determined the media has changed this triggers the asynchronous
|
|
|
|
* removal of the device from the configuration.
|
|
|
|
*/
|
|
|
|
if (zio->io_error == EIO) {
|
2013-11-01 23:26:11 +04:00
|
|
|
vdev_t *v = zio->io_vd;
|
2010-08-26 22:45:02 +04:00
|
|
|
vdev_disk_t *vd = v->vdev_tsd;
|
|
|
|
|
|
|
|
if (check_disk_change(vd->vd_bdev)) {
|
|
|
|
vdev_bdev_invalidate(vd->vd_bdev);
|
|
|
|
v->vdev_remove_wanted = B_TRUE;
|
|
|
|
spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vdev_disk_hold(vdev_t *vd)
|
|
|
|
{
|
|
|
|
ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
|
|
|
|
|
|
|
|
/* We must have a pathname, and it must be absolute. */
|
|
|
|
if (vd->vdev_path == NULL || vd->vdev_path[0] != '/')
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only prefetch path and devid info if the device has
|
|
|
|
* never been opened.
|
|
|
|
*/
|
|
|
|
if (vd->vdev_tsd != NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* XXX: Implement me as a vnode lookup for the device */
|
|
|
|
vd->vdev_name_vp = NULL;
|
|
|
|
vd->vdev_devid_vp = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vdev_disk_rele(vdev_t *vd)
|
|
|
|
{
|
|
|
|
ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
|
|
|
|
|
|
|
|
/* XXX: Implement me as a vnode rele for the device */
|
|
|
|
}
|
|
|
|
|
2017-09-05 23:41:32 +03:00
|
|
|
static int
|
|
|
|
param_set_vdev_scheduler(const char *val, zfs_kernel_param_t *kp)
|
|
|
|
{
|
|
|
|
spa_t *spa = NULL;
|
|
|
|
char *p;
|
|
|
|
|
|
|
|
if (val == NULL)
|
|
|
|
return (SET_ERROR(-EINVAL));
|
|
|
|
|
|
|
|
if ((p = strchr(val, '\n')) != NULL)
|
|
|
|
*p = '\0';
|
|
|
|
|
2018-05-11 22:46:07 +03:00
|
|
|
if (spa_mode_global != 0) {
|
2017-09-05 23:41:32 +03:00
|
|
|
mutex_enter(&spa_namespace_lock);
|
2018-05-11 22:46:07 +03:00
|
|
|
while ((spa = spa_next(spa)) != NULL) {
|
|
|
|
if (spa_state(spa) != POOL_STATE_ACTIVE ||
|
|
|
|
!spa_writeable(spa) || spa_suspended(spa))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
spa_open_ref(spa, FTAG);
|
|
|
|
mutex_exit(&spa_namespace_lock);
|
|
|
|
vdev_elevator_switch(spa->spa_root_vdev, (char *)val);
|
|
|
|
mutex_enter(&spa_namespace_lock);
|
|
|
|
spa_close(spa, FTAG);
|
|
|
|
}
|
|
|
|
mutex_exit(&spa_namespace_lock);
|
2017-09-05 23:41:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return (param_set_charp(val, kp));
|
|
|
|
}
|
|
|
|
|
2010-08-26 22:45:02 +04:00
|
|
|
vdev_ops_t vdev_disk_ops = {
|
|
|
|
vdev_disk_open,
|
|
|
|
vdev_disk_close,
|
|
|
|
vdev_default_asize,
|
|
|
|
vdev_disk_io_start,
|
|
|
|
vdev_disk_io_done,
|
|
|
|
NULL,
|
2017-05-13 03:28:03 +03:00
|
|
|
NULL,
|
2010-08-26 22:45:02 +04:00
|
|
|
vdev_disk_hold,
|
|
|
|
vdev_disk_rele,
|
OpenZFS 7614, 9064 - zfs device evacuation/removal
OpenZFS 7614 - zfs device evacuation/removal
OpenZFS 9064 - remove_mirror should wait for device removal to complete
This project allows top-level vdevs to be removed from the storage pool
with "zpool remove", reducing the total amount of storage in the pool.
This operation copies all allocated regions of the device to be removed
onto other devices, recording the mapping from old to new location.
After the removal is complete, read and free operations to the removed
(now "indirect") vdev must be remapped and performed at the new location
on disk. The indirect mapping table is kept in memory whenever the pool
is loaded, so there is minimal performance overhead when doing operations
on the indirect vdev.
The size of the in-memory mapping table will be reduced when its entries
become "obsolete" because they are no longer used by any block pointers
in the pool. An entry becomes obsolete when all the blocks that use
it are freed. An entry can also become obsolete when all the snapshots
that reference it are deleted, and the block pointers that reference it
have been "remapped" in all filesystems/zvols (and clones). Whenever an
indirect block is written, all the block pointers in it will be "remapped"
to their new (concrete) locations if possible. This process can be
accelerated by using the "zfs remap" command to proactively rewrite all
indirect blocks that reference indirect (removed) vdevs.
Note that when a device is removed, we do not verify the checksum of
the data that is copied. This makes the process much faster, but if it
were used on redundant vdevs (i.e. mirror or raidz vdevs), it would be
possible to copy the wrong data, when we have the correct data on e.g.
the other side of the mirror.
At the moment, only mirrors and simple top-level vdevs can be removed
and no removal is allowed if any of the top-level vdevs are raidz.
Porting Notes:
* Avoid zero-sized kmem_alloc() in vdev_compact_children().
The device evacuation code adds a dependency that
vdev_compact_children() be able to properly empty the vdev_child
array by setting it to NULL and zeroing vdev_children. Under Linux,
kmem_alloc() and related functions return a sentinel pointer rather
than NULL for zero-sized allocations.
* Remove comment regarding "mpt" driver where zfs_remove_max_segment
is initialized to SPA_MAXBLOCKSIZE.
Change zfs_condense_indirect_commit_entry_delay_ticks to
zfs_condense_indirect_commit_entry_delay_ms for consistency with
most other tunables in which delays are specified in ms.
* ZTS changes:
Use set_tunable rather than mdb
Use zpool sync as appropriate
Use sync_pool instead of sync
Kill jobs during test_removal_with_operation to allow unmount/export
Don't add non-disk names such as "mirror" or "raidz" to $DISKS
Use $TEST_BASE_DIR instead of /tmp
Increase HZ from 100 to 1000 which is more common on Linux
removal_multiple_indirection.ksh
Reduce iterations in order to not time out on the code
coverage builders.
removal_resume_export:
Functionally, the test case is correct but there exists a race
where the kernel thread hasn't been fully started yet and is
not visible. Wait for up to 1 second for the removal thread
to be started before giving up on it. Also, increase the
amount of data copied in order that the removal not finish
before the export has a chance to fail.
* MMP compatibility, the concept of concrete versus non-concrete devices
has slightly changed the semantics of vdev_writeable(). Update
mmp_random_leaf_impl() accordingly.
* Updated dbuf_remap() to handle the org.zfsonlinux:large_dnode pool
feature which is not supported by OpenZFS.
* Added support for new vdev removal tracepoints.
* Test cases removal_with_zdb and removal_condense_export have been
intentionally disabled. When run manually they pass as intended,
but when running in the automated test environment they produce
unreliable results on the latest Fedora release.
They may work better once the upstream pool import refectoring is
merged into ZoL at which point they will be re-enabled.
Authored by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Alex Reece <alex@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: Richard Laager <rlaager@wiktel.com>
Reviewed by: Tim Chase <tim@chase2k.com>
Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov>
Approved by: Garrett D'Amore <garrett@damore.org>
Ported-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Tim Chase <tim@chase2k.com>
OpenZFS-issue: https://www.illumos.org/issues/7614
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/f539f1eb
Closes #6900
2016-09-22 19:30:13 +03:00
|
|
|
NULL,
|
OpenZFS 9102 - zfs should be able to initialize storage devices
PROBLEM
========
The first access to a block incurs a performance penalty on some platforms
(e.g. AWS's EBS, VMware VMDKs). Therefore we recommend that volumes are
"thick provisioned", where supported by the platform (VMware). This can
create a large delay in getting a new virtual machines up and running (or
adding storage to an existing Engine). If the thick provision step is
omitted, write performance will be suboptimal until all blocks on the LUN
have been written.
SOLUTION
=========
This feature introduces a way to 'initialize' the disks at install or in the
background to make sure we don't incur this first read penalty.
When an entire LUN is added to ZFS, we make all space available immediately,
and allow ZFS to find unallocated space and zero it out. This works with
concurrent writes to arbitrary offsets, ensuring that we don't zero out
something that has been (or is in the middle of being) written. This scheme
can also be applied to existing pools (affecting only free regions on the
vdev). Detailed design:
- new subcommand:zpool initialize [-cs] <pool> [<vdev> ...]
- start, suspend, or cancel initialization
- Creates new open-context thread for each vdev
- Thread iterates through all metaslabs in this vdev
- Each metaslab:
- select a metaslab
- load the metaslab
- mark the metaslab as being zeroed
- walk all free ranges within that metaslab and translate
them to ranges on the leaf vdev
- issue a "zeroing" I/O on the leaf vdev that corresponds to
a free range on the metaslab we're working on
- continue until all free ranges for this metaslab have been
"zeroed"
- reset/unmark the metaslab being zeroed
- if more metaslabs exist, then repeat above tasks.
- if no more metaslabs, then we're done.
- progress for the initialization is stored on-disk in the vdev’s
leaf zap object. The following information is stored:
- the last offset that has been initialized
- the state of the initialization process (i.e. active,
suspended, or canceled)
- the start time for the initialization
- progress is reported via the zpool status command and shows
information for each of the vdevs that are initializing
Porting notes:
- Added zfs_initialize_value module parameter to set the pattern
written by "zpool initialize".
- Added zfs_vdev_{initializing,removal}_{min,max}_active module options.
Authored by: George Wilson <george.wilson@delphix.com>
Reviewed by: John Wren Kennedy <john.kennedy@delphix.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: loli10K <ezomori.nozomu@gmail.com>
Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov>
Approved by: Richard Lowe <richlowe@richlowe.net>
Signed-off-by: Tim Chase <tim@chase2k.com>
Ported-by: Tim Chase <tim@chase2k.com>
OpenZFS-issue: https://www.illumos.org/issues/9102
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/c3963210eb
Closes #8230
2018-12-19 17:54:59 +03:00
|
|
|
vdev_default_xlate,
|
2010-08-26 22:45:02 +04:00
|
|
|
VDEV_TYPE_DISK, /* name of this vdev type */
|
|
|
|
B_TRUE /* leaf vdev */
|
|
|
|
};
|
|
|
|
|
2017-09-05 23:41:32 +03:00
|
|
|
module_param_call(zfs_vdev_scheduler, param_set_vdev_scheduler,
|
|
|
|
param_get_charp, &zfs_vdev_scheduler, 0644);
|
2011-05-04 02:09:28 +04:00
|
|
|
MODULE_PARM_DESC(zfs_vdev_scheduler, "I/O scheduler");
|