mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-11-17 10:01:01 +03:00
FreeBSD: Touch up comments in zvol_os
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: Alexander Motin <mav@FreeBSD.org> Signed-off-by: Ryan Moeller <ryan@iXsystems.com> Closes #12934
This commit is contained in:
parent
020545a95d
commit
5a57d6f73b
@ -213,12 +213,12 @@ zvol_geom_open(struct g_provider *pp, int flag, int count)
|
|||||||
|
|
||||||
if (!zpool_on_zvol && tsd_get(zfs_geom_probe_vdev_key) != NULL) {
|
if (!zpool_on_zvol && tsd_get(zfs_geom_probe_vdev_key) != NULL) {
|
||||||
/*
|
/*
|
||||||
* if zfs_geom_probe_vdev_key is set, that means that zfs is
|
* If zfs_geom_probe_vdev_key is set, that means that zfs is
|
||||||
* attempting to probe geom providers while looking for a
|
* attempting to probe geom providers while looking for a
|
||||||
* replacement for a missing VDEV. In this case, the
|
* replacement for a missing VDEV. In this case, the
|
||||||
* spa_namespace_lock will not be held, but it is still illegal
|
* spa_namespace_lock will not be held, but it is still illegal
|
||||||
* to use a zvol as a vdev. Deadlocks can result if another
|
* to use a zvol as a vdev. Deadlocks can result if another
|
||||||
* thread has spa_namespace_lock
|
* thread has spa_namespace_lock.
|
||||||
*/
|
*/
|
||||||
return (SET_ERROR(EOPNOTSUPP));
|
return (SET_ERROR(EOPNOTSUPP));
|
||||||
}
|
}
|
||||||
@ -247,9 +247,9 @@ retry:
|
|||||||
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
|
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* make sure zvol is not suspended during first open
|
* Make sure zvol is not suspended during first open
|
||||||
* (hold zv_suspend_lock) and respect proper lock acquisition
|
* (hold zv_suspend_lock) and respect proper lock acquisition
|
||||||
* ordering - zv_suspend_lock before zv_state_lock
|
* ordering - zv_suspend_lock before zv_state_lock.
|
||||||
*/
|
*/
|
||||||
if (zv->zv_open_count == 0) {
|
if (zv->zv_open_count == 0) {
|
||||||
drop_suspend = B_TRUE;
|
drop_suspend = B_TRUE;
|
||||||
@ -257,7 +257,7 @@ retry:
|
|||||||
mutex_exit(&zv->zv_state_lock);
|
mutex_exit(&zv->zv_state_lock);
|
||||||
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
|
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
|
||||||
mutex_enter(&zv->zv_state_lock);
|
mutex_enter(&zv->zv_state_lock);
|
||||||
/* check to see if zv_suspend_lock is needed */
|
/* Check to see if zv_suspend_lock is needed. */
|
||||||
if (zv->zv_open_count != 0) {
|
if (zv->zv_open_count != 0) {
|
||||||
rw_exit(&zv->zv_suspend_lock);
|
rw_exit(&zv->zv_suspend_lock);
|
||||||
drop_suspend = B_FALSE;
|
drop_suspend = B_FALSE;
|
||||||
@ -366,9 +366,9 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
|
|||||||
ASSERT3U(zv->zv_open_count, >, 0);
|
ASSERT3U(zv->zv_open_count, >, 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* make sure zvol is not suspended during last close
|
* Make sure zvol is not suspended during last close
|
||||||
* (hold zv_suspend_lock) and respect proper lock acquisition
|
* (hold zv_suspend_lock) and respect proper lock acquisition
|
||||||
* ordering - zv_suspend_lock before zv_state_lock
|
* ordering - zv_suspend_lock before zv_state_lock.
|
||||||
*/
|
*/
|
||||||
new_open_count = zv->zv_open_count - count;
|
new_open_count = zv->zv_open_count - count;
|
||||||
if (new_open_count == 0) {
|
if (new_open_count == 0) {
|
||||||
@ -376,7 +376,7 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
|
|||||||
mutex_exit(&zv->zv_state_lock);
|
mutex_exit(&zv->zv_state_lock);
|
||||||
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
|
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
|
||||||
mutex_enter(&zv->zv_state_lock);
|
mutex_enter(&zv->zv_state_lock);
|
||||||
/* check to see if zv_suspend_lock is needed */
|
/* Check to see if zv_suspend_lock is needed. */
|
||||||
new_open_count = zv->zv_open_count - count;
|
new_open_count = zv->zv_open_count - count;
|
||||||
if (new_open_count != 0) {
|
if (new_open_count != 0) {
|
||||||
rw_exit(&zv->zv_suspend_lock);
|
rw_exit(&zv->zv_suspend_lock);
|
||||||
@ -707,7 +707,7 @@ zvol_geom_bio_strategy(struct bio *bp)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (error) {
|
if (error) {
|
||||||
/* convert checksum errors into IO errors */
|
/* Convert checksum errors into IO errors. */
|
||||||
if (error == ECKSUM)
|
if (error == ECKSUM)
|
||||||
error = SET_ERROR(EIO);
|
error = SET_ERROR(EIO);
|
||||||
break;
|
break;
|
||||||
@ -785,13 +785,13 @@ zvol_cdev_read(struct cdev *dev, struct uio *uio_s, int ioflag)
|
|||||||
while (zfs_uio_resid(&uio) > 0 && zfs_uio_offset(&uio) < volsize) {
|
while (zfs_uio_resid(&uio) > 0 && zfs_uio_offset(&uio) < volsize) {
|
||||||
uint64_t bytes = MIN(zfs_uio_resid(&uio), DMU_MAX_ACCESS >> 1);
|
uint64_t bytes = MIN(zfs_uio_resid(&uio), DMU_MAX_ACCESS >> 1);
|
||||||
|
|
||||||
/* don't read past the end */
|
/* Don't read past the end. */
|
||||||
if (bytes > volsize - zfs_uio_offset(&uio))
|
if (bytes > volsize - zfs_uio_offset(&uio))
|
||||||
bytes = volsize - zfs_uio_offset(&uio);
|
bytes = volsize - zfs_uio_offset(&uio);
|
||||||
|
|
||||||
error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes);
|
error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes);
|
||||||
if (error) {
|
if (error) {
|
||||||
/* convert checksum errors into IO errors */
|
/* Convert checksum errors into IO errors. */
|
||||||
if (error == ECKSUM)
|
if (error == ECKSUM)
|
||||||
error = SET_ERROR(EIO);
|
error = SET_ERROR(EIO);
|
||||||
break;
|
break;
|
||||||
@ -838,7 +838,7 @@ zvol_cdev_write(struct cdev *dev, struct uio *uio_s, int ioflag)
|
|||||||
uint64_t off = zfs_uio_offset(&uio);
|
uint64_t off = zfs_uio_offset(&uio);
|
||||||
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
|
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
|
||||||
|
|
||||||
if (bytes > volsize - off) /* don't write past the end */
|
if (bytes > volsize - off) /* Don't write past the end. */
|
||||||
bytes = volsize - off;
|
bytes = volsize - off;
|
||||||
|
|
||||||
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
|
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
|
||||||
@ -896,9 +896,9 @@ retry:
|
|||||||
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_DEV);
|
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_DEV);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* make sure zvol is not suspended during first open
|
* Make sure zvol is not suspended during first open
|
||||||
* (hold zv_suspend_lock) and respect proper lock acquisition
|
* (hold zv_suspend_lock) and respect proper lock acquisition
|
||||||
* ordering - zv_suspend_lock before zv_state_lock
|
* ordering - zv_suspend_lock before zv_state_lock.
|
||||||
*/
|
*/
|
||||||
if (zv->zv_open_count == 0) {
|
if (zv->zv_open_count == 0) {
|
||||||
drop_suspend = B_TRUE;
|
drop_suspend = B_TRUE;
|
||||||
@ -906,7 +906,7 @@ retry:
|
|||||||
mutex_exit(&zv->zv_state_lock);
|
mutex_exit(&zv->zv_state_lock);
|
||||||
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
|
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
|
||||||
mutex_enter(&zv->zv_state_lock);
|
mutex_enter(&zv->zv_state_lock);
|
||||||
/* check to see if zv_suspend_lock is needed */
|
/* Check to see if zv_suspend_lock is needed. */
|
||||||
if (zv->zv_open_count != 0) {
|
if (zv->zv_open_count != 0) {
|
||||||
rw_exit(&zv->zv_suspend_lock);
|
rw_exit(&zv->zv_suspend_lock);
|
||||||
drop_suspend = B_FALSE;
|
drop_suspend = B_FALSE;
|
||||||
@ -1012,16 +1012,16 @@ zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
|
|||||||
*/
|
*/
|
||||||
ASSERT3U(zv->zv_open_count, >, 0);
|
ASSERT3U(zv->zv_open_count, >, 0);
|
||||||
/*
|
/*
|
||||||
* make sure zvol is not suspended during last close
|
* Make sure zvol is not suspended during last close
|
||||||
* (hold zv_suspend_lock) and respect proper lock acquisition
|
* (hold zv_suspend_lock) and respect proper lock acquisition
|
||||||
* ordering - zv_suspend_lock before zv_state_lock
|
* ordering - zv_suspend_lock before zv_state_lock.
|
||||||
*/
|
*/
|
||||||
if (zv->zv_open_count == 1) {
|
if (zv->zv_open_count == 1) {
|
||||||
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
|
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
|
||||||
mutex_exit(&zv->zv_state_lock);
|
mutex_exit(&zv->zv_state_lock);
|
||||||
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
|
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
|
||||||
mutex_enter(&zv->zv_state_lock);
|
mutex_enter(&zv->zv_state_lock);
|
||||||
/* check to see if zv_suspend_lock is needed */
|
/* Check to see if zv_suspend_lock is needed. */
|
||||||
if (zv->zv_open_count != 1) {
|
if (zv->zv_open_count != 1) {
|
||||||
rw_exit(&zv->zv_suspend_lock);
|
rw_exit(&zv->zv_suspend_lock);
|
||||||
drop_suspend = B_FALSE;
|
drop_suspend = B_FALSE;
|
||||||
@ -1216,7 +1216,7 @@ zvol_rename_minor(zvol_state_t *zv, const char *newname)
|
|||||||
ASSERT(RW_LOCK_HELD(&zvol_state_lock));
|
ASSERT(RW_LOCK_HELD(&zvol_state_lock));
|
||||||
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
|
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
|
||||||
|
|
||||||
/* move to new hashtable entry */
|
/* Move to a new hashtable entry. */
|
||||||
zv->zv_hash = zvol_name_hash(zv->zv_name);
|
zv->zv_hash = zvol_name_hash(zv->zv_name);
|
||||||
hlist_del(&zv->zv_hlink);
|
hlist_del(&zv->zv_hlink);
|
||||||
hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
|
hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
|
||||||
@ -1346,7 +1346,7 @@ zvol_create_minor_impl(const char *name)
|
|||||||
|
|
||||||
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
|
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
|
||||||
|
|
||||||
/* lie and say we're read-only */
|
/* Lie and say we're read-only. */
|
||||||
error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
|
error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
|
||||||
if (error)
|
if (error)
|
||||||
goto out_doi;
|
goto out_doi;
|
||||||
|
Loading…
Reference in New Issue
Block a user