mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-12-26 03:09:34 +03:00
55d7afa4ad
Both Clang's Static Analyzer and Synopsys' Coverity would ignore assertions. Following Clang's advice, we annotate our assertions: https://clang-analyzer.llvm.org/annotations.html#custom_assertions This makes both Clang's Static Analyzer and Coverity properly identify assertions. This change reduced Clang's reported defects from 246 to 180. It also reduced the false positives reported by Coverityi by 10, while enabling Coverity to find 9 more defects that previously were false negatives. A couple examples of this would be CID-1524417 and CID-1524423. After submitting a build to coverity with the modified assertions, CID-1524417 disappeared while the report for CID-1524423 no longer claimed that the assertion tripped. Coincidentally, it turns out that it is possible to more accurately annotate our headers than the Coverity modelling file permits in the case of format strings. Since we can do that and this patch annotates headers whenever `__coverity_panic__()` would have been used in the model file, we drop all models that use `__coverity_panic__()` from the model file. Upon seeing the success in eliminating false positives involving assertions, it occurred to me that we could also modify our headers to eliminate coverity's false positives involving byte swaps. We now have coverity specific byteswap macros, that do nothing, to disable Coverity's false positives when we do byte swaps. This allowed us to also drop the byteswap definitions from the model file. Lastly, a model file update has been done beyond the mentioned deletions: * The definitions of `umem_alloc_aligned()`, `umem_alloc()` andi `umem_zalloc()` were originally implemented in a way that was intended to inform coverity that when KM_SLEEP has been passed these functions, they do not return NULL. A small error in how this was done was found, so we correct it. * Definitions for umem_cache_alloc() and umem_cache_free() have been added. In practice, no false positives were avoided by making these changes, but in the interest of correctness from future coverity builds, we make them anyway. Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: Ryan Moeller <ryan@iXsystems.com> Signed-off-by: Richard Yao <richard.yao@alumni.stonybrook.edu> Closes #13902
360 lines
5.7 KiB
C
360 lines
5.7 KiB
C
/*
|
|
* Coverity Scan model
|
|
* https://scan.coverity.com/models
|
|
*
|
|
* This is a modeling file for Coverity Scan.
|
|
* Modeling helps to avoid false positives.
|
|
*
|
|
* - Modeling doesn't need full structs and typedefs. Rudimentary structs
|
|
* and similar types are sufficient.
|
|
* - An uninitialized local pointer is not an error. It signifies that the
|
|
* variable could be either NULL or have some data.
|
|
*
|
|
* Coverity Scan doesn't pick up modifications automatically. The model file
|
|
* must be uploaded by an admin in the analysis settings.
|
|
*
|
|
* Some of this initially cribbed from:
|
|
*
|
|
* https://github.com/kees/coverity-linux/blob/trunk/model.c
|
|
*
|
|
* The below model was based on the original model by Brian Behlendorf for the
|
|
* original zfsonlinux/zfs repository. Some inspiration was taken from
|
|
* kees/coverity-linux, specifically involving memory copies.
|
|
*/
|
|
|
|
#include <stdarg.h>
|
|
|
|
#define UMEM_DEFAULT 0x0000 /* normal -- may fail */
|
|
#define UMEM_NOFAIL 0x0100 /* Never fails */
|
|
|
|
#define NULL (0)
|
|
|
|
int condition0, condition1;
|
|
|
|
int
|
|
ddi_copyin(const void *from, void *to, size_t len, int flags)
|
|
{
|
|
__coverity_tainted_data_argument__(from);
|
|
__coverity_tainted_data_argument__(to);
|
|
__coverity_writeall__(to);
|
|
}
|
|
|
|
void *
|
|
memset(void *dst, int c, size_t len)
|
|
{
|
|
__coverity_writeall__(dst);
|
|
return (dst);
|
|
}
|
|
|
|
void *
|
|
memmove(void *dst, void *src, size_t len)
|
|
{
|
|
__coverity_writeall__(dst);
|
|
return (dst);
|
|
}
|
|
|
|
void *
|
|
memcpy(void *dst, void *src, size_t len)
|
|
{
|
|
__coverity_writeall__(dst);
|
|
return (dst);
|
|
}
|
|
|
|
void *
|
|
umem_alloc_aligned(size_t size, size_t align, int kmflags)
|
|
{
|
|
(void) align;
|
|
|
|
if ((UMEM_NOFAIL & kmflags) == UMEM_NOFAIL)
|
|
return (__coverity_alloc__(size));
|
|
else if (condition0)
|
|
return (__coverity_alloc__(size));
|
|
else
|
|
return (NULL);
|
|
}
|
|
|
|
void *
|
|
umem_alloc(size_t size, int kmflags)
|
|
{
|
|
if ((UMEM_NOFAIL & kmflags) == UMEM_NOFAIL)
|
|
return (__coverity_alloc__(size));
|
|
else if (condition0)
|
|
return (__coverity_alloc__(size));
|
|
else
|
|
return (NULL);
|
|
}
|
|
|
|
void *
|
|
umem_zalloc(size_t size, int kmflags)
|
|
{
|
|
if ((UMEM_NOFAIL & kmflags) == UMEM_NOFAIL)
|
|
return (__coverity_alloc__(size));
|
|
else if (condition0)
|
|
return (__coverity_alloc__(size));
|
|
else
|
|
return (NULL);
|
|
}
|
|
|
|
void
|
|
umem_free(void *buf, size_t size)
|
|
{
|
|
(void) size;
|
|
|
|
__coverity_free__(buf);
|
|
}
|
|
|
|
typedef struct {} umem_cache_t;
|
|
|
|
void *
|
|
umem_cache_alloc(umem_cache_t *skc, int flags)
|
|
{
|
|
(void) skc;
|
|
|
|
if (condition1)
|
|
__coverity_sleep__();
|
|
|
|
if ((UMEM_NOFAIL & flags) == UMEM_NOFAIL)
|
|
return (__coverity_alloc_nosize__());
|
|
else if (condition0)
|
|
return (__coverity_alloc_nosize__());
|
|
else
|
|
return (NULL);
|
|
}
|
|
|
|
void
|
|
umem_cache_free(umem_cache_t *skc, void *obj)
|
|
{
|
|
(void) skc;
|
|
|
|
__coverity_free__(obj);
|
|
}
|
|
|
|
void *
|
|
spl_kmem_alloc(size_t sz, int fl, const char *func, int line)
|
|
{
|
|
(void) func;
|
|
(void) line;
|
|
|
|
if (condition1)
|
|
__coverity_sleep__();
|
|
|
|
if (fl == 0) {
|
|
return (__coverity_alloc__(sz));
|
|
} else if (condition0)
|
|
return (__coverity_alloc__(sz));
|
|
else
|
|
return (NULL);
|
|
}
|
|
|
|
void *
|
|
spl_kmem_zalloc(size_t sz, int fl, const char *func, int line)
|
|
{
|
|
(void) func;
|
|
(void) line;
|
|
|
|
if (condition1)
|
|
__coverity_sleep__();
|
|
|
|
if (fl == 0) {
|
|
return (__coverity_alloc__(sz));
|
|
} else if (condition0)
|
|
return (__coverity_alloc__(sz));
|
|
else
|
|
return (NULL);
|
|
}
|
|
|
|
void
|
|
spl_kmem_free(const void *ptr, size_t sz)
|
|
{
|
|
(void) sz;
|
|
|
|
__coverity_free__(ptr);
|
|
}
|
|
|
|
typedef struct {} spl_kmem_cache_t;
|
|
|
|
void *
|
|
spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
|
|
{
|
|
(void) skc;
|
|
|
|
if (condition1)
|
|
__coverity_sleep__();
|
|
|
|
if (flags == 0) {
|
|
return (__coverity_alloc_nosize__());
|
|
} else if (condition0)
|
|
return (__coverity_alloc_nosize__());
|
|
else
|
|
return (NULL);
|
|
}
|
|
|
|
void
|
|
spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
|
|
{
|
|
(void) skc;
|
|
|
|
__coverity_free__(obj);
|
|
}
|
|
|
|
void
|
|
malloc(size_t size)
|
|
{
|
|
__coverity_alloc__(size);
|
|
}
|
|
|
|
void
|
|
free(void *buf)
|
|
{
|
|
__coverity_free__(buf);
|
|
}
|
|
|
|
int
|
|
sched_yield(void)
|
|
{
|
|
__coverity_sleep__();
|
|
}
|
|
|
|
typedef struct {} kmutex_t;
|
|
typedef struct {} krwlock_t;
|
|
typedef int krw_t;
|
|
|
|
/*
|
|
* Coverty reportedly does not support macros, so this only works for
|
|
* userspace.
|
|
*/
|
|
|
|
void
|
|
mutex_enter(kmutex_t *mp)
|
|
{
|
|
if (condition0)
|
|
__coverity_sleep__();
|
|
|
|
__coverity_exclusive_lock_acquire__(mp);
|
|
}
|
|
|
|
int
|
|
mutex_tryenter(kmutex_t *mp)
|
|
{
|
|
if (condition0) {
|
|
__coverity_exclusive_lock_acquire__(mp);
|
|
return (1);
|
|
}
|
|
|
|
return (0);
|
|
}
|
|
|
|
void
|
|
mutex_exit(kmutex_t *mp)
|
|
{
|
|
__coverity_exclusive_lock_release__(mp);
|
|
}
|
|
|
|
void
|
|
rw_enter(krwlock_t *rwlp, krw_t rw)
|
|
{
|
|
(void) rw;
|
|
|
|
if (condition0)
|
|
__coverity_sleep__();
|
|
|
|
__coverity_recursive_lock_acquire__(rwlp);
|
|
}
|
|
|
|
void
|
|
rw_exit(krwlock_t *rwlp)
|
|
{
|
|
__coverity_recursive_lock_release__(rwlp);
|
|
|
|
}
|
|
|
|
int
|
|
rw_tryenter(krwlock_t *rwlp, krw_t rw)
|
|
{
|
|
if (condition0) {
|
|
__coverity_recursive_lock_acquire__(rwlp);
|
|
return (1);
|
|
}
|
|
|
|
return (0);
|
|
}
|
|
|
|
/* Thus, we fallback to the Linux kernel locks */
|
|
struct {} mutex;
|
|
struct {} rw_semaphore;
|
|
|
|
void
|
|
mutex_lock(struct mutex *lock)
|
|
{
|
|
if (condition0) {
|
|
__coverity_sleep__();
|
|
}
|
|
__coverity_exclusive_lock_acquire__(lock);
|
|
}
|
|
|
|
void
|
|
mutex_unlock(struct mutex *lock)
|
|
{
|
|
__coverity_exclusive_lock_release__(lock);
|
|
}
|
|
|
|
void
|
|
down_read(struct rw_semaphore *sem)
|
|
{
|
|
if (condition0) {
|
|
__coverity_sleep__();
|
|
}
|
|
__coverity_recursive_lock_acquire__(sem);
|
|
}
|
|
|
|
void
|
|
down_write(struct rw_semaphore *sem)
|
|
{
|
|
if (condition0) {
|
|
__coverity_sleep__();
|
|
}
|
|
__coverity_recursive_lock_acquire__(sem);
|
|
}
|
|
|
|
int
|
|
down_read_trylock(struct rw_semaphore *sem)
|
|
{
|
|
if (condition0) {
|
|
__coverity_recursive_lock_acquire__(sem);
|
|
return (1);
|
|
}
|
|
|
|
return (0);
|
|
}
|
|
|
|
int
|
|
down_write_trylock(struct rw_semaphore *sem)
|
|
{
|
|
if (condition0) {
|
|
__coverity_recursive_lock_acquire__(sem);
|
|
return (1);
|
|
}
|
|
|
|
return (0);
|
|
}
|
|
|
|
void
|
|
up_read(struct rw_semaphore *sem)
|
|
{
|
|
__coverity_recursive_lock_release__(sem);
|
|
}
|
|
|
|
void
|
|
up_write(struct rw_semaphore *sem)
|
|
{
|
|
__coverity_recursive_lock_release__(sem);
|
|
}
|
|
|
|
int
|
|
__cond_resched(void)
|
|
{
|
|
if (condition0) {
|
|
__coverity_sleep__();
|
|
}
|
|
}
|