mirror of
				https://git.proxmox.com/git/mirror_zfs.git
				synced 2025-10-26 18:05:04 +03:00 
			
		
		
		
	Use KM_NODEBUG macro in preference to __GFP_NOWARN.
This commit is contained in:
		
							parent
							
								
									3626ae6a70
								
							
						
					
					
						commit
						23d91792ef
					
				| @ -49,6 +49,7 @@ | ||||
| #define KM_PUSHPAGE                     (KM_SLEEP | __GFP_HIGH) | ||||
| #define KM_VMFLAGS                      GFP_LEVEL_MASK | ||||
| #define KM_FLAGS                        __GFP_BITS_MASK | ||||
| #define KM_NODEBUG                      __GFP_NOWARN | ||||
| 
 | ||||
| /*
 | ||||
|  * Used internally, the kernel does not need to support this flag | ||||
|  | ||||
| @ -390,7 +390,7 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line, | ||||
| 	} else { | ||||
| 		/* Marked unlikely because we should never be doing this,
 | ||||
| 		 * we tolerate to up 2 pages but a single page is best.   */ | ||||
| 		if (unlikely((size > PAGE_SIZE*2) && !(flags & __GFP_NOWARN))) { | ||||
| 		if (unlikely((size > PAGE_SIZE*2) && !(flags & KM_NODEBUG))) { | ||||
| 			CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n", | ||||
| 			    (unsigned long long) size, flags, | ||||
| 			    kmem_alloc_used_read(), kmem_alloc_max); | ||||
| @ -605,7 +605,7 @@ kmem_alloc_debug(size_t size, int flags, const char *func, int line, | ||||
| 
 | ||||
| 	/* Marked unlikely because we should never be doing this,
 | ||||
| 	 * we tolerate to up 2 pages but a single page is best.   */ | ||||
| 	if (unlikely((size > PAGE_SIZE * 2) && !(flags & __GFP_NOWARN))) { | ||||
| 	if (unlikely((size > PAGE_SIZE * 2) && !(flags & KM_NODEBUG))) { | ||||
| 		CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n", | ||||
| 		    (unsigned long long) size, flags, | ||||
| 		    kmem_alloc_used_read(), kmem_alloc_max); | ||||
| @ -1243,9 +1243,9 @@ spl_kmem_cache_create(char *name, size_t size, size_t align, | ||||
| 	 * this usually ends up being a large allocation of ~32k because | ||||
| 	 * we need to allocate enough memory for the worst case number of | ||||
| 	 * cpus in the magazine, skc_mag[NR_CPUS].  Because of this we | ||||
| 	 * explicitly pass __GFP_NOWARN to suppress the kmem warning */ | ||||
| 	 * explicitly pass KM_NODEBUG to suppress the kmem warning */ | ||||
| 	skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc), | ||||
| 	                                      kmem_flags | __GFP_NOWARN); | ||||
| 	                                      kmem_flags | KM_NODEBUG); | ||||
| 	if (skc == NULL) | ||||
| 		RETURN(NULL); | ||||
| 
 | ||||
| @ -1438,7 +1438,7 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags) | ||||
| 	} | ||||
| 
 | ||||
| 	/* Allocate a new slab for the cache */ | ||||
| 	sks = spl_slab_alloc(skc, flags | __GFP_NORETRY | __GFP_NOWARN); | ||||
| 	sks = spl_slab_alloc(skc, flags | __GFP_NORETRY | KM_NODEBUG); | ||||
| 	if (sks == NULL) | ||||
| 		GOTO(out, sks = NULL); | ||||
| 
 | ||||
|  | ||||
| @ -94,7 +94,7 @@ splat_kmem_test1(struct file *file, void *arg) | ||||
| 		count = 0; | ||||
| 
 | ||||
| 		for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) { | ||||
| 			ptr[i] = kmem_alloc(size, KM_SLEEP | __GFP_NOWARN); | ||||
| 			ptr[i] = kmem_alloc(size, KM_SLEEP | KM_NODEBUG); | ||||
| 			if (ptr[i]) | ||||
| 				count++; | ||||
| 		} | ||||
| @ -126,7 +126,7 @@ splat_kmem_test2(struct file *file, void *arg) | ||||
| 		count = 0; | ||||
| 
 | ||||
| 		for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) { | ||||
| 			ptr[i] = kmem_zalloc(size, KM_SLEEP | __GFP_NOWARN); | ||||
| 			ptr[i] = kmem_zalloc(size, KM_SLEEP | KM_NODEBUG); | ||||
| 			if (ptr[i]) | ||||
| 				count++; | ||||
| 		} | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user
	 Brian Behlendorf
						Brian Behlendorf