mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-12-25 18:59:33 +03:00
Fix typos in module/zfs/
Reviewed-by: Matt Ahrens <matt@delphix.com> Reviewed-by: Ryan Moeller <ryan@ixsystems.com> Reviewed-by: Richard Laager <rlaager@wiktel.com> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Andrea Gelmini <andrea.gelmini@gelma.net> Closes #9240
This commit is contained in:
parent
7859537768
commit
e1cfd73f7f
@ -62,7 +62,7 @@
|
||||
* elements of the cache are therefore exactly the same size. So
|
||||
* when adjusting the cache size following a cache miss, its simply
|
||||
* a matter of choosing a single page to evict. In our model, we
|
||||
* have variable sized cache blocks (rangeing from 512 bytes to
|
||||
* have variable sized cache blocks (ranging from 512 bytes to
|
||||
* 128K bytes). We therefore choose a set of blocks to evict to make
|
||||
* space for a cache miss that approximates as closely as possible
|
||||
* the space used by the new block.
|
||||
@ -262,7 +262,7 @@
|
||||
* The L1ARC has a slightly different system for storing encrypted data.
|
||||
* Raw (encrypted + possibly compressed) data has a few subtle differences from
|
||||
* data that is just compressed. The biggest difference is that it is not
|
||||
* possible to decrypt encrypted data (or visa versa) if the keys aren't loaded.
|
||||
* possible to decrypt encrypted data (or vice-versa) if the keys aren't loaded.
|
||||
* The other difference is that encryption cannot be treated as a suggestion.
|
||||
* If a caller would prefer compressed data, but they actually wind up with
|
||||
* uncompressed data the worst thing that could happen is there might be a
|
||||
@ -2151,7 +2151,7 @@ arc_buf_fill(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
|
||||
}
|
||||
|
||||
/*
|
||||
* Adjust encrypted and authenticated headers to accomodate
|
||||
* Adjust encrypted and authenticated headers to accommodate
|
||||
* the request if needed. Dnode blocks (ARC_FILL_IN_PLACE) are
|
||||
* allowed to fail decryption due to keys not being loaded
|
||||
* without being marked as an IO error.
|
||||
@ -2220,7 +2220,7 @@ arc_buf_fill(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
|
||||
if (arc_buf_is_shared(buf)) {
|
||||
ASSERT(ARC_BUF_COMPRESSED(buf));
|
||||
|
||||
/* We need to give the buf it's own b_data */
|
||||
/* We need to give the buf its own b_data */
|
||||
buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
|
||||
buf->b_data =
|
||||
arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
|
||||
@ -2836,7 +2836,7 @@ arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf)
|
||||
* sufficient to make this guarantee, however it's possible
|
||||
* (specifically in the rare L2ARC write race mentioned in
|
||||
* arc_buf_alloc_impl()) there will be an existing uncompressed buf that
|
||||
* is sharable, but wasn't at the time of its allocation. Rather than
|
||||
* is shareable, but wasn't at the time of its allocation. Rather than
|
||||
* allow a new shared uncompressed buf to be created and then shuffle
|
||||
* the list around to make it the last element, this simply disallows
|
||||
* sharing if the new buf isn't the first to be added.
|
||||
@ -2895,7 +2895,7 @@ arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb,
|
||||
|
||||
/*
|
||||
* Only honor requests for compressed bufs if the hdr is actually
|
||||
* compressed. This must be overriden if the buffer is encrypted since
|
||||
* compressed. This must be overridden if the buffer is encrypted since
|
||||
* encrypted buffers cannot be decompressed.
|
||||
*/
|
||||
if (encrypted) {
|
||||
@ -3199,7 +3199,7 @@ arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf)
|
||||
}
|
||||
|
||||
/*
|
||||
* Free up buf->b_data and pull the arc_buf_t off of the the arc_buf_hdr_t's
|
||||
* Free up buf->b_data and pull the arc_buf_t off of the arc_buf_hdr_t's
|
||||
* list and free it.
|
||||
*/
|
||||
static void
|
||||
@ -3658,7 +3658,7 @@ arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt)
|
||||
/*
|
||||
* This function is used by the send / receive code to convert a newly
|
||||
* allocated arc_buf_t to one that is suitable for a raw encrypted write. It
|
||||
* is also used to allow the root objset block to be uupdated without altering
|
||||
* is also used to allow the root objset block to be updated without altering
|
||||
* its embedded MACs. Both block types will always be uncompressed so we do not
|
||||
* have to worry about compression type or psize.
|
||||
*/
|
||||
@ -6189,7 +6189,7 @@ top:
|
||||
|
||||
/*
|
||||
* Determine if we have an L1 cache hit or a cache miss. For simplicity
|
||||
* we maintain encrypted data seperately from compressed / uncompressed
|
||||
* we maintain encrypted data separately from compressed / uncompressed
|
||||
* data. If the user is requesting raw encrypted data and we don't have
|
||||
* that in the header we will read from disk to guarantee that we can
|
||||
* get it even if the encryption keys aren't loaded.
|
||||
|
@ -2337,7 +2337,7 @@ dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
|
||||
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
|
||||
|
||||
/*
|
||||
* Quick check for dirtyness. For already dirty blocks, this
|
||||
* Quick check for dirtiness. For already dirty blocks, this
|
||||
* reduces runtime of this function by >90%, and overall performance
|
||||
* by 50% for some workloads (e.g. file deletion with indirect blocks
|
||||
* cached).
|
||||
@ -2892,7 +2892,7 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
|
||||
* Hold the dn_dbufs_mtx while we get the new dbuf
|
||||
* in the hash table *and* added to the dbufs list.
|
||||
* This prevents a possible deadlock with someone
|
||||
* trying to look up this dbuf before its added to the
|
||||
* trying to look up this dbuf before it's added to the
|
||||
* dn_dbufs list.
|
||||
*/
|
||||
mutex_enter(&dn->dn_dbufs_mtx);
|
||||
@ -3337,7 +3337,7 @@ dbuf_hold_impl_arg(struct dbuf_hold_arg *dh)
|
||||
ASSERT(dh->dh_db->db_buf == NULL || arc_referenced(dh->dh_db->db_buf));
|
||||
|
||||
/*
|
||||
* If this buffer is currently syncing out, and we are are
|
||||
* If this buffer is currently syncing out, and we are
|
||||
* still referencing it from db_data, we need to make a copy
|
||||
* of it in case we decide we want to dirty it again in this txg.
|
||||
*/
|
||||
@ -3812,7 +3812,7 @@ dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
|
||||
/*
|
||||
* This buffer was allocated at a time when there was
|
||||
* no available blkptrs from the dnode, or it was
|
||||
* inappropriate to hook it in (i.e., nlevels mis-match).
|
||||
* inappropriate to hook it in (i.e., nlevels mismatch).
|
||||
*/
|
||||
ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
|
||||
ASSERT(db->db_parent == NULL);
|
||||
|
@ -639,11 +639,11 @@ dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag)
|
||||
|
||||
/*
|
||||
* Issue prefetch i/os for the given blocks. If level is greater than 0, the
|
||||
* indirect blocks prefeteched will be those that point to the blocks containing
|
||||
* indirect blocks prefetched will be those that point to the blocks containing
|
||||
* the data starting at offset, and continuing to offset + len.
|
||||
*
|
||||
* Note that if the indirect blocks above the blocks being prefetched are not
|
||||
* in cache, they will be asychronously read in.
|
||||
* in cache, they will be asynchronously read in.
|
||||
*/
|
||||
void
|
||||
dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
|
||||
@ -2176,7 +2176,7 @@ dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
|
||||
* Determine dedup setting. If we are in dmu_sync(),
|
||||
* we won't actually dedup now because that's all
|
||||
* done in syncing context; but we do want to use the
|
||||
* dedup checkum. If the checksum is not strong
|
||||
* dedup checksum. If the checksum is not strong
|
||||
* enough to ensure unique signatures, force
|
||||
* dedup_verify.
|
||||
*/
|
||||
|
@ -1028,7 +1028,7 @@ dmu_objset_create_impl_dnstats(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
|
||||
|
||||
/*
|
||||
* We don't want to have to increase the meta-dnode's nlevels
|
||||
* later, because then we could do it in quescing context while
|
||||
* later, because then we could do it in quiescing context while
|
||||
* we are also accessing it in open context.
|
||||
*
|
||||
* This precaution is not necessary for the MOS (ds == NULL),
|
||||
@ -2648,7 +2648,7 @@ dmu_objset_find_dp_cb(void *arg)
|
||||
|
||||
/*
|
||||
* We need to get a pool_config_lock here, as there are several
|
||||
* asssert(pool_config_held) down the stack. Getting a lock via
|
||||
* assert(pool_config_held) down the stack. Getting a lock via
|
||||
* dsl_pool_config_enter is risky, as it might be stalled by a
|
||||
* pending writer. This would deadlock, as the write lock can
|
||||
* only be granted when our parent thread gives up the lock.
|
||||
|
@ -548,7 +548,7 @@ dump_write(dmu_send_cookie_t *dscp, dmu_object_type_t type, uint64_t object,
|
||||
/*
|
||||
* There's no pre-computed checksum for partial-block writes,
|
||||
* embedded BP's, or encrypted BP's that are being sent as
|
||||
* plaintext, so (like fletcher4-checkummed blocks) userland
|
||||
* plaintext, so (like fletcher4-checksummed blocks) userland
|
||||
* will have to compute a dedup-capable checksum itself.
|
||||
*/
|
||||
drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
|
||||
@ -2262,7 +2262,7 @@ setup_send_progress(struct dmu_send_params *dspp)
|
||||
*
|
||||
* The final case is a simple zfs full or incremental send. The to_ds traversal
|
||||
* thread behaves the same as always. The redact list thread is never started.
|
||||
* The send merge thread takes all the blocks that the to_ds traveral thread
|
||||
* The send merge thread takes all the blocks that the to_ds traversal thread
|
||||
* sends it, prefetches the data, and sends the blocks on to the main thread.
|
||||
* The main thread sends the data over the wire.
|
||||
*
|
||||
|
@ -221,7 +221,7 @@ dmu_zfetch(zfetch_t *zf, uint64_t blkid, uint64_t nblks, boolean_t fetch_data,
|
||||
* can only read from blocks that we carefully ensure are on
|
||||
* concrete vdevs (or previously-loaded indirect vdevs). So we
|
||||
* can't allow the predictive prefetcher to attempt reads of other
|
||||
* blocks (e.g. of the MOS's dnode obejct).
|
||||
* blocks (e.g. of the MOS's dnode object).
|
||||
*/
|
||||
if (!spa_indirect_vdevs_loaded(spa))
|
||||
return;
|
||||
|
@ -1787,7 +1787,7 @@ dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
|
||||
dn->dn_indblkshift = ibs;
|
||||
dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs;
|
||||
}
|
||||
/* rele after we have fixed the blocksize in the dnode */
|
||||
/* release after we have fixed the blocksize in the dnode */
|
||||
if (db)
|
||||
dbuf_rele(db, FTAG);
|
||||
|
||||
|
@ -88,7 +88,7 @@ dsl_bookmark_lookup_impl(dsl_dataset_t *ds, const char *shortname,
|
||||
}
|
||||
|
||||
/*
|
||||
* If later_ds is non-NULL, this will return EXDEV if the the specified bookmark
|
||||
* If later_ds is non-NULL, this will return EXDEV if the specified bookmark
|
||||
* does not represents an earlier point in later_ds's timeline. However,
|
||||
* bmp will still be filled in if we return EXDEV.
|
||||
*
|
||||
|
@ -227,7 +227,7 @@ dsl_crypto_params_create_nvlist(dcp_cmd_t cmd, nvlist_t *props,
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* if the user asked for the deault crypt, determine that now */
|
||||
/* if the user asked for the default crypt, determine that now */
|
||||
if (dcp->cp_crypt == ZIO_CRYPT_ON)
|
||||
dcp->cp_crypt = ZIO_CRYPT_ON_VALUE;
|
||||
|
||||
@ -1596,7 +1596,7 @@ spa_keystore_change_key(const char *dsname, dsl_crypto_params_t *dcp)
|
||||
/*
|
||||
* Perform the actual work in syncing context. The blocks modified
|
||||
* here could be calculated but it would require holding the pool
|
||||
* lock and tarversing all of the datasets that will have their keys
|
||||
* lock and traversing all of the datasets that will have their keys
|
||||
* changed.
|
||||
*/
|
||||
return (dsl_sync_task(dsname, spa_keystore_change_key_check,
|
||||
@ -1714,7 +1714,7 @@ dsl_dataset_promote_crypt_sync(dsl_dir_t *target, dsl_dir_t *origin,
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the target is being promoted to the encyrption root update the
|
||||
* If the target is being promoted to the encryption root update the
|
||||
* DSL Crypto Key and keylocation to reflect that. We also need to
|
||||
* update the DSL Crypto Keys of all children inheritting their
|
||||
* encryption root to point to the new target. Otherwise, the check
|
||||
|
@ -393,7 +393,7 @@ load_zfeature(objset_t *mos, dsl_dataset_t *ds, spa_feature_t f)
|
||||
}
|
||||
|
||||
/*
|
||||
* We have to release the fsid syncronously or we risk that a subsequent
|
||||
* We have to release the fsid synchronously or we risk that a subsequent
|
||||
* mount of the same dataset will fail to unique_insert the fsid. This
|
||||
* failure would manifest itself as the fsid of this dataset changing
|
||||
* between mounts which makes NFS clients quite unhappy.
|
||||
@ -2308,7 +2308,7 @@ get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
|
||||
* We use nvlist_alloc() instead of fnvlist_alloc() because the
|
||||
* latter would allocate the list with NV_UNIQUE_NAME flag.
|
||||
* As a result, every time a clone name is appended to the list
|
||||
* it would be (linearly) searched for for a duplicate name.
|
||||
* it would be (linearly) searched for a duplicate name.
|
||||
* We already know that all clone names must be unique and we
|
||||
* want avoid the quadratic complexity of double-checking that
|
||||
* because we can have a large number of clones.
|
||||
@ -2683,7 +2683,7 @@ dsl_get_mountpoint(dsl_dataset_t *ds, const char *dsname, char *value,
|
||||
int error;
|
||||
dsl_pool_t *dp = ds->ds_dir->dd_pool;
|
||||
|
||||
/* Retrieve the mountpoint value stored in the zap opbject */
|
||||
/* Retrieve the mountpoint value stored in the zap object */
|
||||
error = dsl_prop_get_ds(ds, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1,
|
||||
ZAP_MAXVALUELEN, value, source);
|
||||
if (error != 0) {
|
||||
@ -3961,7 +3961,7 @@ dsl_dataset_clone_swap_check_impl(dsl_dataset_t *clone,
|
||||
* The clone can't be too much over the head's refquota.
|
||||
*
|
||||
* To ensure that the entire refquota can be used, we allow one
|
||||
* transaction to exceed the the refquota. Therefore, this check
|
||||
* transaction to exceed the refquota. Therefore, this check
|
||||
* needs to also allow for the space referenced to be more than the
|
||||
* refquota. The maximum amount of space that one transaction can use
|
||||
* on disk is DMU_MAX_ACCESS * spa_asize_inflation. Allowing this
|
||||
|
@ -667,7 +667,7 @@ dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
|
||||
|
||||
/*
|
||||
* lzc_destroy_snaps() is documented to fill the errlist with
|
||||
* int32 values, so we need to covert the int64 values that are
|
||||
* int32 values, so we need to convert the int64 values that are
|
||||
* returned from LUA.
|
||||
*/
|
||||
int rv = 0;
|
||||
|
@ -97,7 +97,7 @@
|
||||
* limit set. If there is a limit at any initialized level up the tree, the
|
||||
* check must pass or the creation will fail. Likewise, when a filesystem or
|
||||
* snapshot is destroyed, the counts are recursively adjusted all the way up
|
||||
* the initizized nodes in the tree. Renaming a filesystem into different point
|
||||
* the initialized nodes in the tree. Renaming a filesystem into different point
|
||||
* in the tree will first validate, then update the counts on each branch up to
|
||||
* the common ancestor. A receive will also validate the counts and then update
|
||||
* them.
|
||||
@ -1467,7 +1467,7 @@ dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
|
||||
* less than the amount specified.
|
||||
*
|
||||
* NOTE: The behavior of this function is identical to the Illumos / FreeBSD
|
||||
* version however it has been adjusted to use an iterative rather then
|
||||
* version however it has been adjusted to use an iterative rather than
|
||||
* recursive algorithm to minimize stack usage.
|
||||
*/
|
||||
void
|
||||
|
@ -1912,7 +1912,7 @@ dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
|
||||
|
||||
/*
|
||||
* This debugging is commented out to conserve stack space. This
|
||||
* function is called recursively and the debugging addes several
|
||||
* function is called recursively and the debugging adds several
|
||||
* bytes to the stack for each call. It can be commented back in
|
||||
* if required to debug an issue in dsl_scan_visitbp().
|
||||
*
|
||||
@ -3373,7 +3373,7 @@ dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx)
|
||||
/*
|
||||
* This is the primary entry point for scans that is called from syncing
|
||||
* context. Scans must happen entirely during syncing context so that we
|
||||
* cna guarantee that blocks we are currently scanning will not change out
|
||||
* can guarantee that blocks we are currently scanning will not change out
|
||||
* from under us. While a scan is active, this function controls how quickly
|
||||
* transaction groups proceed, instead of the normal handling provided by
|
||||
* txg_sync_thread().
|
||||
@ -3977,7 +3977,7 @@ scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
|
||||
* As can be seen, at fill_ratio=3, the algorithm is slightly biased towards
|
||||
* extents that are more completely filled (in a 3:2 ratio) vs just larger.
|
||||
* Note that as an optimization, we replace multiplication and division by
|
||||
* 100 with bitshifting by 7 (which effecitvely multiplies and divides by 128).
|
||||
* 100 with bitshifting by 7 (which effectively multiplies and divides by 128).
|
||||
*/
|
||||
static int
|
||||
ext_size_compare(const void *x, const void *y)
|
||||
|
@ -143,7 +143,7 @@ dsl_sync_task(const char *pool, dsl_checkfunc_t *checkfunc,
|
||||
* For that reason, early synctasks can affect the process of writing dirty
|
||||
* changes to disk for the txg that they run and should be used with caution.
|
||||
* In addition, early synctasks should not dirty any metaslabs as this would
|
||||
* invalidate the precodition/invariant for subsequent early synctasks.
|
||||
* invalidate the precondition/invariant for subsequent early synctasks.
|
||||
* [see dsl_pool_sync() and dsl_early_sync_task_verify()]
|
||||
*/
|
||||
int
|
||||
|
@ -302,7 +302,7 @@ dsl_dataset_user_hold_sync(void *arg, dmu_tx_t *tx)
|
||||
* holds is nvl of snapname -> holdname
|
||||
* errlist will be filled in with snapname -> error
|
||||
*
|
||||
* The snaphosts must all be in the same pool.
|
||||
* The snapshots must all be in the same pool.
|
||||
*
|
||||
* Holds for snapshots that don't exist will be skipped.
|
||||
*
|
||||
@ -556,9 +556,9 @@ dsl_dataset_user_release_sync(void *arg, dmu_tx_t *tx)
|
||||
* errlist will be filled in with snapname -> error
|
||||
*
|
||||
* If tmpdp is not NULL the names for holds should be the dsobj's of snapshots,
|
||||
* otherwise they should be the names of shapshots.
|
||||
* otherwise they should be the names of snapshots.
|
||||
*
|
||||
* As a release may cause snapshots to be destroyed this trys to ensure they
|
||||
* As a release may cause snapshots to be destroyed this tries to ensure they
|
||||
* aren't mounted.
|
||||
*
|
||||
* The release of non-existent holds are skipped.
|
||||
|
@ -31,7 +31,7 @@
|
||||
* Name-Value Pair Lists
|
||||
*
|
||||
* The embodiment of an FMA protocol element (event, fmri or authority) is a
|
||||
* name-value pair list (nvlist_t). FMA-specific nvlist construtor and
|
||||
* name-value pair list (nvlist_t). FMA-specific nvlist constructor and
|
||||
* destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used
|
||||
* to create an nvpair list using custom allocators. Callers may choose to
|
||||
* allocate either from the kernel memory allocator, or from a preallocated
|
||||
@ -784,7 +784,7 @@ zfs_zevent_destroy(zfs_zevent_t *ze)
|
||||
#endif /* _KERNEL */
|
||||
|
||||
/*
|
||||
* Wrapppers for FM nvlist allocators
|
||||
* Wrappers for FM nvlist allocators
|
||||
*/
|
||||
/* ARGSUSED */
|
||||
static void *
|
||||
|
@ -112,7 +112,7 @@ int zfs_mg_noalloc_threshold = 0;
|
||||
|
||||
/*
|
||||
* Metaslab groups are considered eligible for allocations if their
|
||||
* fragmenation metric (measured as a percentage) is less than or
|
||||
* fragmentation metric (measured as a percentage) is less than or
|
||||
* equal to zfs_mg_fragmentation_threshold. If a metaslab group
|
||||
* exceeds this threshold then it will be skipped unless all metaslab
|
||||
* groups within the metaslab class have also crossed this threshold.
|
||||
@ -1285,7 +1285,7 @@ metaslab_largest_unflushed_free(metaslab_t *msp)
|
||||
* deferred. Similar logic applies to the ms_freed tree. See
|
||||
* metaslab_load() for more details.
|
||||
*
|
||||
* There are two primary sources of innacuracy in this estimate. Both
|
||||
* There are two primary sources of inaccuracy in this estimate. Both
|
||||
* are tolerated for performance reasons. The first source is that we
|
||||
* only check the largest segment for overlaps. Smaller segments may
|
||||
* have more favorable overlaps with the other trees, resulting in
|
||||
@ -1874,7 +1874,7 @@ metaslab_verify_weight_and_frag(metaslab_t *msp)
|
||||
* If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from
|
||||
* this class that was used longest ago, and attempt to unload it. We don't
|
||||
* want to spend too much time in this loop to prevent performance
|
||||
* degredation, and we expect that most of the time this operation will
|
||||
* degradation, and we expect that most of the time this operation will
|
||||
* succeed. Between that and the normal unloading processing during txg sync,
|
||||
* we expect this to keep the metaslab memory usage under control.
|
||||
*/
|
||||
@ -3060,7 +3060,7 @@ metaslab_passivate(metaslab_t *msp, uint64_t weight)
|
||||
* we either fail an allocation attempt (similar to space-based metaslabs)
|
||||
* or have exhausted the free space in zfs_metaslab_switch_threshold
|
||||
* buckets since the metaslab was activated. This function checks to see
|
||||
* if we've exhaused the zfs_metaslab_switch_threshold buckets in the
|
||||
* if we've exhausted the zfs_metaslab_switch_threshold buckets in the
|
||||
* metaslab and passivates it proactively. This will allow us to select a
|
||||
* metaslab with a larger contiguous region, if any, remaining within this
|
||||
* metaslab group. If we're in sync pass > 1, then we continue using this
|
||||
@ -4294,7 +4294,7 @@ metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
|
||||
* have selected, we may not try the newly-activated metaslab, and instead
|
||||
* activate another metaslab. This is not optimal, but generally does not cause
|
||||
* any problems (a possible exception being if every metaslab is completely full
|
||||
* except for the the newly-activated metaslab which we fail to examine).
|
||||
* except for the newly-activated metaslab which we fail to examine).
|
||||
*/
|
||||
static metaslab_t *
|
||||
find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
|
||||
@ -4441,7 +4441,7 @@ metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
|
||||
/*
|
||||
* Even though we don't hold the ms_lock for the
|
||||
* primary metaslab, those fields should not
|
||||
* change while we hold the mg_lock. Thus is is
|
||||
* change while we hold the mg_lock. Thus it is
|
||||
* safe to make assertions on them.
|
||||
*/
|
||||
ASSERT(msp->ms_primary);
|
||||
@ -4879,7 +4879,7 @@ top:
|
||||
|
||||
/*
|
||||
* If we don't need to try hard, then require that the
|
||||
* block be on an different metaslab from any other DVAs
|
||||
* block be on a different metaslab from any other DVAs
|
||||
* in this BP (unique=true). If we are trying hard, then
|
||||
* allow any metaslab to be used (unique=false).
|
||||
*/
|
||||
@ -5685,7 +5685,7 @@ metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
|
||||
*
|
||||
* It would intuitively make sense to also check the current allocating
|
||||
* tree since metaslab_unalloc_dva() exists for extents that are
|
||||
* allocated and freed in the same sync pass withing the same txg.
|
||||
* allocated and freed in the same sync pass within the same txg.
|
||||
* Unfortunately there are places (e.g. the ZIL) where we allocate a
|
||||
* segment but then we free part of it within the same txg
|
||||
* [see zil_sync()]. Thus, we don't call range_tree_verify() in the
|
||||
|
@ -87,12 +87,12 @@
|
||||
*
|
||||
* In this case, a weak guarantee is provided. Since the host which last had
|
||||
* the pool imported will suspend the pool if no mmp writes land within
|
||||
* fail_intervals * multihost_interval ms, the absense of writes during that
|
||||
* fail_intervals * multihost_interval ms, the absence of writes during that
|
||||
* time means either the pool is not imported, or it is imported but the pool
|
||||
* is suspended and no further writes will occur.
|
||||
*
|
||||
* Note that resuming the suspended pool on the remote host would invalidate
|
||||
* this gurantee, and so it is not allowed.
|
||||
* this guarantee, and so it is not allowed.
|
||||
*
|
||||
* The factor of 2 provides a conservative safety factor and derives from
|
||||
* MMP_IMPORT_SAFETY_FACTOR;
|
||||
|
@ -70,7 +70,7 @@ static int
|
||||
priv_policy_user(const cred_t *cr, int capability, boolean_t all, int err)
|
||||
{
|
||||
/*
|
||||
* All priv_policy_user checks are preceeded by kuid/kgid_has_mapping()
|
||||
* All priv_policy_user checks are preceded by kuid/kgid_has_mapping()
|
||||
* checks. If we cannot do them, we shouldn't be using ns_capable()
|
||||
* since we don't know whether the affected files are valid in our
|
||||
* namespace. Note that kuid_has_mapping() came after cred->user_ns, so
|
||||
|
@ -85,7 +85,7 @@ typedef struct qat_stats {
|
||||
* Number of fails in the QAT compression / decompression engine.
|
||||
* Note: when a QAT error happens, it doesn't necessarily indicate a
|
||||
* critical hardware issue. Sometimes it is because the output buffer
|
||||
* is not big enough. The compression job will be transfered to the
|
||||
* is not big enough. The compression job will be transferred to the
|
||||
* gzip software implementation so the functionality of ZFS is not
|
||||
* impacted.
|
||||
*/
|
||||
@ -118,7 +118,7 @@ typedef struct qat_stats {
|
||||
/*
|
||||
* Number of fails in the QAT encryption / decryption engine.
|
||||
* Note: when a QAT error happens, it doesn't necessarily indicate a
|
||||
* critical hardware issue. The encryption job will be transfered
|
||||
* critical hardware issue. The encryption job will be transferred
|
||||
* to the software implementation so the functionality of ZFS is
|
||||
* not impacted.
|
||||
*/
|
||||
@ -135,7 +135,7 @@ typedef struct qat_stats {
|
||||
/*
|
||||
* Number of fails in the QAT checksum engine.
|
||||
* Note: when a QAT error happens, it doesn't necessarily indicate a
|
||||
* critical hardware issue. The checksum job will be transfered to the
|
||||
* critical hardware issue. The checksum job will be transferred to the
|
||||
* software implementation so the functionality of ZFS is not impacted.
|
||||
*/
|
||||
kstat_named_t cksum_fails;
|
||||
|
@ -83,7 +83,7 @@
|
||||
* Layouts are simply an array of the attributes and their
|
||||
* ordering i.e. [0, 1, 4, 5, 2]
|
||||
*
|
||||
* Each distinct layout is given a unique layout number and that is whats
|
||||
* Each distinct layout is given a unique layout number and that is what's
|
||||
* stored in the header at the beginning of the SA data buffer.
|
||||
*
|
||||
* A layout only covers a single dbuf (bonus or spill). If a set of
|
||||
@ -95,7 +95,7 @@
|
||||
* Adding a single attribute will cause the entire set of attributes to
|
||||
* be rewritten and could result in a new layout number being constructed
|
||||
* as part of the rewrite if no such layout exists for the new set of
|
||||
* attribues. The new attribute will be appended to the end of the already
|
||||
* attributes. The new attribute will be appended to the end of the already
|
||||
* existing attributes.
|
||||
*
|
||||
* Both the attribute registration and attribute layout information are
|
||||
|
@ -2204,7 +2204,7 @@ spa_load_verify_done(zio_t *zio)
|
||||
}
|
||||
|
||||
/*
|
||||
* Maximum number of inflight bytes is the log2 faction of the arc size.
|
||||
* Maximum number of inflight bytes is the log2 fraction of the arc size.
|
||||
* By default, we set it to 1/16th of the arc.
|
||||
*/
|
||||
int spa_load_verify_shift = 4;
|
||||
@ -3030,7 +3030,7 @@ spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
|
||||
|
||||
} else if (MMP_VALID(ub)) {
|
||||
/*
|
||||
* zfs-0.7 compatability case
|
||||
* zfs-0.7 compatibility case
|
||||
*/
|
||||
|
||||
import_delay = MAX(import_delay, (multihost_interval +
|
||||
@ -4339,7 +4339,7 @@ spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg,
|
||||
need_update = B_TRUE;
|
||||
|
||||
/*
|
||||
* Update the config cache asychronously in case we're the
|
||||
* Update the config cache asynchronously in case we're the
|
||||
* root pool, in which case the config cache isn't writable yet.
|
||||
*/
|
||||
if (need_update)
|
||||
@ -4652,7 +4652,7 @@ spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport)
|
||||
return (error);
|
||||
|
||||
/*
|
||||
* Redo the loading process process again with the
|
||||
* Redo the loading process again with the
|
||||
* checkpointed uberblock.
|
||||
*/
|
||||
spa_ld_prepare_for_reload(spa);
|
||||
@ -8441,7 +8441,7 @@ spa_sync_props(void *arg, dmu_tx_t *tx)
|
||||
case ZPOOL_PROP_READONLY:
|
||||
case ZPOOL_PROP_CACHEFILE:
|
||||
/*
|
||||
* 'readonly' and 'cachefile' are also non-persisitent
|
||||
* 'readonly' and 'cachefile' are also non-persistent
|
||||
* properties.
|
||||
*/
|
||||
break;
|
||||
@ -9278,7 +9278,7 @@ EXPORT_SYMBOL(spa_inject_delref);
|
||||
EXPORT_SYMBOL(spa_scan_stat_init);
|
||||
EXPORT_SYMBOL(spa_scan_get_stats);
|
||||
|
||||
/* device maniion */
|
||||
/* device manipulation */
|
||||
EXPORT_SYMBOL(spa_vdev_add);
|
||||
EXPORT_SYMBOL(spa_vdev_attach);
|
||||
EXPORT_SYMBOL(spa_vdev_detach);
|
||||
|
@ -102,7 +102,7 @@
|
||||
* Once the synctask is done and the discarding zthr is awake, we discard
|
||||
* the checkpointed data over multiple TXGs by having the zthr prefetching
|
||||
* entries from vdev_checkpoint_sm and then starting a synctask that places
|
||||
* them as free blocks in to their respective ms_allocatable and ms_sm
|
||||
* them as free blocks into their respective ms_allocatable and ms_sm
|
||||
* structures.
|
||||
* [see spa_checkpoint_discard_thread()]
|
||||
*
|
||||
|
@ -31,7 +31,7 @@
|
||||
* and the current log. All errors seen are logged to the current log. When a
|
||||
* scrub completes, the current log becomes the last log, the last log is thrown
|
||||
* out, and the current log is reinitialized. This way, if an error is somehow
|
||||
* corrected, a new scrub will show that that it no longer exists, and will be
|
||||
* corrected, a new scrub will show that it no longer exists, and will be
|
||||
* deleted from the log when the scrub completes.
|
||||
*
|
||||
* The log is stored using a ZAP object whose key is a string form of the
|
||||
|
@ -63,7 +63,7 @@
|
||||
* overwrite the original creation of the pool. 'sh_phys_max_off' is the
|
||||
* physical ending offset in bytes of the log. This tells you the length of
|
||||
* the buffer. 'sh_eof' is the logical EOF (in bytes). Whenever a record
|
||||
* is added, 'sh_eof' is incremented by the the size of the record.
|
||||
* is added, 'sh_eof' is incremented by the size of the record.
|
||||
* 'sh_eof' is never decremented. 'sh_bof' is the logical BOF (in bytes).
|
||||
* This is where the consumer should start reading from after reading in
|
||||
* the 'zpool create' portion of the log.
|
||||
|
@ -180,7 +180,7 @@
|
||||
unsigned long zfs_log_sm_blksz = 1ULL << 17;
|
||||
|
||||
/*
|
||||
* Percentage of the overall system’s memory that ZFS allows to be
|
||||
* Percentage of the overall system's memory that ZFS allows to be
|
||||
* used for unflushed changes (e.g. the sum of size of all the nodes
|
||||
* in the unflushed trees).
|
||||
*
|
||||
@ -392,7 +392,7 @@ summary_entry_is_full(spa_t *spa, log_summary_entry_t *e)
|
||||
* Update the log summary information to reflect the fact that a metaslab
|
||||
* was flushed or destroyed (e.g due to device removal or pool export/destroy).
|
||||
*
|
||||
* We typically flush the oldest flushed metaslab so the first (and olderst)
|
||||
* We typically flush the oldest flushed metaslab so the first (and oldest)
|
||||
* entry of the summary is updated. However if that metaslab is getting loaded
|
||||
* we may flush the second oldest one which may be part of an entry later in
|
||||
* the summary. Moreover, if we call into this function from metaslab_fini()
|
||||
@ -838,7 +838,7 @@ spa_flush_metaslabs(spa_t *spa, dmu_tx_t *tx)
|
||||
|
||||
/*
|
||||
* Close the log space map for this TXG and update the block counts
|
||||
* for the the log's in-memory structure and the summary.
|
||||
* for the log's in-memory structure and the summary.
|
||||
*/
|
||||
void
|
||||
spa_sync_close_syncing_log_sm(spa_t *spa)
|
||||
|
@ -644,8 +644,8 @@ txg_quiesce_thread(void *arg)
|
||||
|
||||
/*
|
||||
* Delay this thread by delay nanoseconds if we are still in the open
|
||||
* transaction group and there is already a waiting txg quiesing or quiesced.
|
||||
* Abort the delay if this txg stalls or enters the quiesing state.
|
||||
* transaction group and there is already a waiting txg quiescing or quiesced.
|
||||
* Abort the delay if this txg stalls or enters the quiescing state.
|
||||
*/
|
||||
void
|
||||
txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution)
|
||||
@ -768,7 +768,7 @@ txg_wait_open(dsl_pool_t *dp, uint64_t txg, boolean_t should_quiesce)
|
||||
|
||||
/*
|
||||
* If there isn't a txg syncing or in the pipeline, push another txg through
|
||||
* the pipeline by queiscing the open txg.
|
||||
* the pipeline by quiescing the open txg.
|
||||
*/
|
||||
void
|
||||
txg_kick(dsl_pool_t *dp)
|
||||
|
@ -223,7 +223,7 @@ vdev_default_xlate(vdev_t *vd, const range_seg_t *in, range_seg_t *res)
|
||||
}
|
||||
|
||||
/*
|
||||
* Derive the enumerated alloction bias from string input.
|
||||
* Derive the enumerated allocation bias from string input.
|
||||
* String origin is either the per-vdev zap or zpool(1M).
|
||||
*/
|
||||
static vdev_alloc_bias_t
|
||||
@ -1321,7 +1321,7 @@ vdev_metaslab_init(vdev_t *vd, uint64_t txg)
|
||||
|
||||
#ifndef _KERNEL
|
||||
/*
|
||||
* To accomodate zdb_leak_init() fake indirect
|
||||
* To accommodate zdb_leak_init() fake indirect
|
||||
* metaslabs, we allocate a metaslab group for
|
||||
* indirect vdevs which normally don't have one.
|
||||
*/
|
||||
@ -4191,7 +4191,7 @@ vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
|
||||
* Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
|
||||
* factor. We must calculate this here and not at the root vdev
|
||||
* because the root vdev's psize-to-asize is simply the max of its
|
||||
* childrens', thus not accurate enough for us.
|
||||
* children's, thus not accurate enough for us.
|
||||
*/
|
||||
dspace_delta = vdev_deflated_space(vd, space_delta);
|
||||
|
||||
|
@ -46,7 +46,7 @@
|
||||
* terribly wasteful of bandwidth. A more intelligent version of the cache
|
||||
* could keep track of access patterns and not do read-ahead unless it sees
|
||||
* at least two temporally close I/Os to the same region. Currently, only
|
||||
* metadata I/O is inflated. A futher enhancement could take advantage of
|
||||
* metadata I/O is inflated. A further enhancement could take advantage of
|
||||
* more semantic information about the I/O. And it could use something
|
||||
* faster than an AVL tree; that was chosen solely for convenience.
|
||||
*
|
||||
|
@ -602,7 +602,7 @@ vdev_initialize_stop_wait(spa_t *spa, list_t *vd_list)
|
||||
}
|
||||
|
||||
/*
|
||||
* Stop initializing a device, with the resultant initialing state being
|
||||
* Stop initializing a device, with the resultant initializing state being
|
||||
* tgt_state. For blocking behavior pass NULL for vd_list. Otherwise, when
|
||||
* a list_t is provided the stopping vdev is inserted in to the list. Callers
|
||||
* are then required to call vdev_initialize_stop_wait() to block for all the
|
||||
|
@ -485,7 +485,7 @@ vdev_mirror_preferred_child_randomize(zio_t *zio)
|
||||
|
||||
/*
|
||||
* Try to find a vdev whose DTL doesn't contain the block we want to read
|
||||
* prefering vdevs based on determined load.
|
||||
* preferring vdevs based on determined load.
|
||||
*
|
||||
* Try to find a child whose DTL doesn't contain the block we want to read.
|
||||
* If we can't, try the read on any vdev we haven't already tried.
|
||||
|
@ -893,7 +893,7 @@ vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority)
|
||||
* ZIO_PRIORITY_NOW is used by the vdev cache code and the aggregate zio
|
||||
* code to issue IOs without adding them to the vdev queue. In this
|
||||
* case, the zio is already going to be issued as quickly as possible
|
||||
* and so it doesn't need any reprioitization to help.
|
||||
* and so it doesn't need any reprioritization to help.
|
||||
*/
|
||||
if (zio->io_priority == ZIO_PRIORITY_NOW)
|
||||
return;
|
||||
|
@ -98,7 +98,7 @@
|
||||
* R = 4^n-1 * D_0 + 4^n-2 * D_1 + ... + 4^1 * D_n-2 + 4^0 * D_n-1
|
||||
* = ((...((D_0) * 4 + D_1) * 4 + ...) * 4 + D_n-2) * 4 + D_n-1
|
||||
*
|
||||
* We chose 1, 2, and 4 as our generators because 1 corresponds to the trival
|
||||
* We chose 1, 2, and 4 as our generators because 1 corresponds to the trivial
|
||||
* XOR operation, and 2 and 4 can be computed quickly and generate linearly-
|
||||
* independent coefficients. (There are no additional coefficients that have
|
||||
* this property which is why the uncorrected Plank method breaks down.)
|
||||
@ -447,7 +447,7 @@ vdev_raidz_map_alloc(zio_t *zio, uint64_t ashift, uint64_t dcols,
|
||||
/*
|
||||
* If all data stored spans all columns, there's a danger that parity
|
||||
* will always be on the same device and, since parity isn't read
|
||||
* during normal operation, that that device's I/O bandwidth won't be
|
||||
* during normal operation, that device's I/O bandwidth won't be
|
||||
* used effectively. We therefore switch the parity every 1MB.
|
||||
*
|
||||
* ... at least that was, ostensibly, the theory. As a practical
|
||||
@ -2336,7 +2336,7 @@ vdev_raidz_state_change(vdev_t *vd, int faulted, int degraded)
|
||||
/*
|
||||
* Determine if any portion of the provided block resides on a child vdev
|
||||
* with a dirty DTL and therefore needs to be resilvered. The function
|
||||
* assumes that at least one DTL is dirty which imples that full stripe
|
||||
* assumes that at least one DTL is dirty which implies that full stripe
|
||||
* width blocks must be resilvered.
|
||||
*/
|
||||
static boolean_t
|
||||
|
@ -42,7 +42,7 @@
|
||||
/*
|
||||
* Here we need registers not used otherwise.
|
||||
* They will be used in unused ASM for the case
|
||||
* with more registers than required... but GGC
|
||||
* with more registers than required... but GCC
|
||||
* will still need to make sure the constraints
|
||||
* are correct, and duplicate constraints are illegal
|
||||
* ... and we use the "register" number as a name
|
||||
|
@ -66,7 +66,7 @@
|
||||
* consuming excessive system or running forever. If one of these limits is
|
||||
* hit, the channel program will be stopped immediately and return from
|
||||
* zcp_eval() with an error code. No attempt will be made to roll back or undo
|
||||
* any changes made by the channel program before the error occured.
|
||||
* any changes made by the channel program before the error occurred.
|
||||
* Consumers invoking zcp_eval() from elsewhere in the kernel may pass a time
|
||||
* limit of 0, disabling the time limit.
|
||||
*
|
||||
@ -77,7 +77,7 @@
|
||||
* In place of a return value, an error message will also be returned in the
|
||||
* 'result' nvlist containing information about the error. No attempt will be
|
||||
* made to roll back or undo any changes made by the channel program before the
|
||||
* error occured.
|
||||
* error occurred.
|
||||
*
|
||||
* 3. If an error occurs inside a ZFS library call which returns an error code,
|
||||
* the error is returned to the Lua script to be handled as desired.
|
||||
@ -160,7 +160,7 @@ zcp_argerror(lua_State *state, int narg, const char *msg, ...)
|
||||
* of a function call.
|
||||
*
|
||||
* If an error occurs, the cleanup function will be invoked exactly once and
|
||||
* then unreigstered.
|
||||
* then unregistered.
|
||||
*
|
||||
* Returns the registered cleanup handler so the caller can deregister it
|
||||
* if no error occurs.
|
||||
|
@ -547,7 +547,7 @@ get_zap_prop(lua_State *state, dsl_dataset_t *ds, zfs_prop_t zfs_prop)
|
||||
error = dsl_prop_get_ds(ds, prop_name, sizeof (numval),
|
||||
1, &numval, setpoint);
|
||||
|
||||
/* Fill in temorary value for prop, if applicable */
|
||||
/* Fill in temporary value for prop, if applicable */
|
||||
(void) get_temporary_prop(ds, zfs_prop, &numval, setpoint);
|
||||
|
||||
/* Push value to lua stack */
|
||||
@ -678,7 +678,7 @@ parse_userquota_prop(const char *prop_name, zfs_userquota_prop_t *type,
|
||||
if (strncmp(cp, "S-1-", 4) == 0) {
|
||||
/*
|
||||
* It's a numeric SID (eg "S-1-234-567-89") and we want to
|
||||
* seperate the domain id and the rid
|
||||
* separate the domain id and the rid
|
||||
*/
|
||||
int domain_len = strrchr(cp, '-') - cp;
|
||||
domain_val = kmem_alloc(domain_len + 1, KM_SLEEP);
|
||||
|
@ -457,7 +457,7 @@ static zcp_list_info_t zcp_system_props_list_info = {
|
||||
};
|
||||
|
||||
/*
|
||||
* Get a list of all visble system properties and their values for a given
|
||||
* Get a list of all visible system properties and their values for a given
|
||||
* dataset. Returned on the stack as a Lua table.
|
||||
*/
|
||||
static int
|
||||
|
@ -810,7 +810,7 @@ zfs_acl_xform(znode_t *zp, zfs_acl_t *aclp, cred_t *cr)
|
||||
* for zfs_copy_ace_2_fuid().
|
||||
*
|
||||
* We only convert an ACL once, so this won't happen
|
||||
* everytime.
|
||||
* every time.
|
||||
*/
|
||||
oldaclp = kmem_alloc(sizeof (zfs_oldace_t) * aclp->z_acl_count,
|
||||
KM_SLEEP);
|
||||
|
@ -44,7 +44,7 @@ zfs_oldace_byteswap(ace_t *ace, int ace_cnt)
|
||||
}
|
||||
|
||||
/*
|
||||
* swap ace_t and ace_oject_t
|
||||
* swap ace_t and ace_object_t
|
||||
*/
|
||||
void
|
||||
zfs_ace_byteswap(void *buf, size_t size, boolean_t zfs_layout)
|
||||
@ -70,7 +70,7 @@ zfs_ace_byteswap(void *buf, size_t size, boolean_t zfs_layout)
|
||||
* larger than needed to hold the aces
|
||||
* present. As long as we do not do any
|
||||
* swapping beyond the end of our block we are
|
||||
* okay. It it safe to swap any non-ace data
|
||||
* okay. It is safe to swap any non-ace data
|
||||
* within the block since it is just zeros.
|
||||
*/
|
||||
if (ptr + sizeof (zfs_ace_hdr_t) > end) {
|
||||
|
@ -596,7 +596,7 @@ zfsctl_root(znode_t *zp)
|
||||
|
||||
/*
|
||||
* Generate a long fid to indicate a snapdir. We encode whether snapdir is
|
||||
* already monunted in gen field. We do this because nfsd lookup will not
|
||||
* already mounted in gen field. We do this because nfsd lookup will not
|
||||
* trigger automount. Next time the nfsd does fh_to_dentry, we will notice
|
||||
* this and do automount and return ESTALE to force nfsd revalidate and follow
|
||||
* mount.
|
||||
|
@ -55,7 +55,7 @@
|
||||
#include <sys/zfs_sa.h>
|
||||
|
||||
/*
|
||||
* zfs_match_find() is used by zfs_dirent_lock() to peform zap lookups
|
||||
* zfs_match_find() is used by zfs_dirent_lock() to perform zap lookups
|
||||
* of names after deciding which is the appropriate lookup interface.
|
||||
*/
|
||||
static int
|
||||
@ -232,7 +232,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
|
||||
/*
|
||||
* Wait until there are no locks on this name.
|
||||
*
|
||||
* Don't grab the the lock if it is already held. However, cannot
|
||||
* Don't grab the lock if it is already held. However, cannot
|
||||
* have both ZSHARED and ZHAVELOCK together.
|
||||
*/
|
||||
ASSERT(!(flag & ZSHARED) || !(flag & ZHAVELOCK));
|
||||
|
@ -2106,7 +2106,7 @@ zfs_ioc_objset_stats_impl(zfs_cmd_t *zc, objset_t *os)
|
||||
* which we aren't supposed to do with a
|
||||
* DS_MODE_USER hold, because it could be
|
||||
* inconsistent. So this is a bit of a workaround...
|
||||
* XXX reading with out owning
|
||||
* XXX reading without owning
|
||||
*/
|
||||
if (!zc->zc_objset_stats.dds_inconsistent &&
|
||||
dmu_objset_type(os) == DMU_OST_ZVOL) {
|
||||
@ -7097,7 +7097,7 @@ zfs_check_input_nvpairs(nvlist_t *innvl, const zfs_ioc_vec_t *vec)
|
||||
continue;
|
||||
|
||||
if (nvl_keys[k].zkey_flags & ZK_WILDCARDLIST) {
|
||||
/* at least one non-optionial key is expected here */
|
||||
/* at least one non-optional key is expected here */
|
||||
if (!required_keys_found)
|
||||
return (SET_ERROR(ZFS_ERR_IOC_ARG_REQUIRED));
|
||||
continue;
|
||||
|
@ -1477,7 +1477,7 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp)
|
||||
* "preferred" size.
|
||||
*/
|
||||
|
||||
/* Round up so we never have a filesytem using 0 blocks. */
|
||||
/* Round up so we never have a filesystem using 0 blocks. */
|
||||
refdbytes = P2ROUNDUP(refdbytes, statp->f_bsize);
|
||||
statp->f_blocks = (refdbytes + availbytes) >> bshift;
|
||||
statp->f_bfree = availbytes >> bshift;
|
||||
@ -2431,7 +2431,7 @@ zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if the coresponding vfs's unmounted flag is set.
|
||||
* Return true if the corresponding vfs's unmounted flag is set.
|
||||
* Otherwise return false.
|
||||
* If this function returns true we know VFS unmount has been initiated.
|
||||
*/
|
||||
|
@ -889,7 +889,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
|
||||
* Clear Set-UID/Set-GID bits on successful write if not
|
||||
* privileged and at least one of the execute bits is set.
|
||||
*
|
||||
* It would be nice to to this after all writes have
|
||||
* It would be nice to do this after all writes have
|
||||
* been done, but that would still expose the ISUID/ISGID
|
||||
* to another app after the partial write is committed.
|
||||
*
|
||||
@ -4378,7 +4378,7 @@ top:
|
||||
uint64_t txtype = TX_LINK;
|
||||
/*
|
||||
* tmpfile is created to be in z_unlinkedobj, so remove it.
|
||||
* Also, we don't log in ZIL, be cause all previous file
|
||||
* Also, we don't log in ZIL, because all previous file
|
||||
* operation on the tmpfile are ignored by ZIL. Instead we
|
||||
* always wait for txg to sync to make sure all previous
|
||||
* operation are sync safe.
|
||||
@ -4638,7 +4638,7 @@ zfs_dirty_inode(struct inode *ip, int flags)
|
||||
|
||||
#ifdef I_DIRTY_TIME
|
||||
/*
|
||||
* This is the lazytime semantic indroduced in Linux 4.0
|
||||
* This is the lazytime semantic introduced in Linux 4.0
|
||||
* This flag will only be called from update_time when lazytime is set.
|
||||
* (Note, I_DIRTY_SYNC will also set if not lazytime)
|
||||
* Fortunately mtime and ctime are managed within ZFS itself, so we
|
||||
|
@ -788,7 +788,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
|
||||
}
|
||||
|
||||
/*
|
||||
* No execs denied will be deterimed when zfs_mode_compute() is called.
|
||||
* No execs denied will be determined when zfs_mode_compute() is called.
|
||||
*/
|
||||
pflags |= acl_ids->z_aclp->z_hints &
|
||||
(ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT|
|
||||
@ -1271,7 +1271,7 @@ zfs_rezget(znode_t *zp)
|
||||
* If the file has zero links, then it has been unlinked on the send
|
||||
* side and it must be in the received unlinked set.
|
||||
* We call zfs_znode_dmu_fini() now to prevent any accesses to the
|
||||
* stale data and to prevent automatical removal of the file in
|
||||
* stale data and to prevent automatic removal of the file in
|
||||
* zfs_zinactive(). The file will be removed either when it is removed
|
||||
* on the send side and the next incremental stream is received or
|
||||
* when the unlinked set gets processed.
|
||||
|
@ -58,7 +58,7 @@
|
||||
*
|
||||
* In the event of a crash or power loss, the itxs contained by each
|
||||
* dataset's on-disk ZIL will be replayed when that dataset is first
|
||||
* instantiated (e.g. if the dataset is a normal fileystem, when it is
|
||||
* instantiated (e.g. if the dataset is a normal filesystem, when it is
|
||||
* first mounted).
|
||||
*
|
||||
* As hinted at above, there is one ZIL per dataset (both the in-memory
|
||||
@ -2002,7 +2002,7 @@ zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
|
||||
/*
|
||||
* If there are any in-memory intent log transactions which have now been
|
||||
* synced then start up a taskq to free them. We should only do this after we
|
||||
* have written out the uberblocks (i.e. txg has been comitted) so that
|
||||
* have written out the uberblocks (i.e. txg has been committed) so that
|
||||
* don't inadvertently clean out in-memory log records that would be required
|
||||
* by zil_commit().
|
||||
*/
|
||||
|
@ -308,7 +308,7 @@ zio_checksum_template_init(enum zio_checksum checksum, spa_t *spa)
|
||||
mutex_exit(&spa->spa_cksum_tmpls_lock);
|
||||
}
|
||||
|
||||
/* convenience function to update a checksum to accomodate an encryption MAC */
|
||||
/* convenience function to update a checksum to accommodate an encryption MAC */
|
||||
static void
|
||||
zio_checksum_handle_crypt(zio_cksum_t *cksum, zio_cksum_t *saved, boolean_t xor)
|
||||
{
|
||||
|
@ -155,7 +155,7 @@ zio_decompress_data(enum zio_compress c, abd_t *src, void *dst,
|
||||
abd_return_buf(src, tmp, s_len);
|
||||
|
||||
/*
|
||||
* Decompression shouldn't fail, because we've already verifyied
|
||||
* Decompression shouldn't fail, because we've already verified
|
||||
* the checksum. However, for extra protection (e.g. against bitflips
|
||||
* in non-ECC RAM), we handle this error (and test it).
|
||||
*/
|
||||
|
@ -369,7 +369,7 @@ error:
|
||||
/*
|
||||
* This function handles all encryption and decryption in zfs. When
|
||||
* encrypting it expects puio to reference the plaintext and cuio to
|
||||
* reference the cphertext. cuio must have enough space for the
|
||||
* reference the ciphertext. cuio must have enough space for the
|
||||
* ciphertext + room for a MAC. datalen should be the length of the
|
||||
* plaintext / ciphertext alone.
|
||||
*/
|
||||
@ -934,7 +934,7 @@ zio_crypt_bp_zero_nonportable_blkprop(blkptr_t *bp, uint64_t version)
|
||||
|
||||
/*
|
||||
* At L0 we want to verify these fields to ensure that data blocks
|
||||
* can not be reinterpretted. For instance, we do not want an attacker
|
||||
* can not be reinterpreted. For instance, we do not want an attacker
|
||||
* to trick us into returning raw lz4 compressed data to the user
|
||||
* by modifying the compression bits. At higher levels, we cannot
|
||||
* enforce this policy since raw sends do not convey any information
|
||||
|
@ -113,7 +113,7 @@ freq_triggered(uint32_t frequency)
|
||||
return (B_TRUE);
|
||||
|
||||
/*
|
||||
* Note: we still handle legacy (unscaled) frequecy values
|
||||
* Note: we still handle legacy (unscaled) frequency values
|
||||
*/
|
||||
uint32_t maximum = (frequency <= 100) ? 100 : ZI_PERCENTAGE_MAX;
|
||||
|
||||
|
@ -297,7 +297,7 @@ zpl_mount_impl(struct file_system_type *fs_type, int flags, zfs_mnt_t *zm)
|
||||
* The dsl pool lock must be released prior to calling sget().
|
||||
* It is possible sget() may block on the lock in grab_super()
|
||||
* while deactivate_super() holds that same lock and waits for
|
||||
* a txg sync. If the dsl_pool lock is held over over sget()
|
||||
* a txg sync. If the dsl_pool lock is held over sget()
|
||||
* this can prevent the pool sync and cause a deadlock.
|
||||
*/
|
||||
dsl_pool_rele(dmu_objset_pool(os), FTAG);
|
||||
|
@ -1997,7 +1997,7 @@ zvol_create_snap_minor_cb(const char *dsname, void *arg)
|
||||
/* at this point, the dsname should name a snapshot */
|
||||
if (strchr(dsname, '@') == 0) {
|
||||
dprintf("zvol_create_snap_minor_cb(): "
|
||||
"%s is not a shapshot name\n", dsname);
|
||||
"%s is not a snapshot name\n", dsname);
|
||||
} else {
|
||||
minors_job_t *job;
|
||||
char *n = strdup(dsname);
|
||||
|
Loading…
Reference in New Issue
Block a user