mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-12-25 02:49:32 +03:00
Fix stack lzjb
Reduce kernel stack usage by lzjb_compress() by moving uint16 array off the stack and on to the heap. The exact performance implications of this I have not measured but we absolutely need to keep stack usage to a minimum. If/when this becomes and issue we optimize. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
This commit is contained in:
parent
bf701a83c5
commit
18a89ba43d
@ -36,7 +36,7 @@
|
||||
* source length if compression would overflow the destination buffer.
|
||||
*/
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/zfs_context.h>
|
||||
|
||||
#define MATCH_BITS 6
|
||||
#define MATCH_MIN 3
|
||||
@ -54,12 +54,15 @@ lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
|
||||
int copymask = 1 << (NBBY - 1);
|
||||
int mlen, offset, hash;
|
||||
uint16_t *hp;
|
||||
uint16_t lempel[LEMPEL_SIZE] = { 0 };
|
||||
uint16_t *lempel;
|
||||
|
||||
lempel = kmem_zalloc(LEMPEL_SIZE * sizeof (uint16_t), KM_SLEEP);
|
||||
while (src < (uchar_t *)s_start + s_len) {
|
||||
if ((copymask <<= 1) == (1 << NBBY)) {
|
||||
if (dst >= (uchar_t *)d_start + d_len - 1 - 2 * NBBY)
|
||||
if (dst >= (uchar_t *)d_start + d_len - 1 - 2 * NBBY) {
|
||||
kmem_free(lempel, LEMPEL_SIZE*sizeof(uint16_t));
|
||||
return (s_len);
|
||||
}
|
||||
copymask = 1;
|
||||
copymap = dst;
|
||||
*dst++ = 0;
|
||||
@ -89,6 +92,8 @@ lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
|
||||
*dst++ = *src++;
|
||||
}
|
||||
}
|
||||
|
||||
kmem_free(lempel, LEMPEL_SIZE * sizeof (uint16_t));
|
||||
return (dst - (uchar_t *)d_start);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user