mirror_zfs/tests/zfs-tests/cmd/randwritecomp/randwritecomp.c
Serapheim Dimitropoulos d2734cce68 OpenZFS 9166 - zfs storage pool checkpoint
Details about the motivation of this feature and its usage can
be found in this blogpost:

    https://sdimitro.github.io/post/zpool-checkpoint/

A lightning talk of this feature can be found here:
https://www.youtube.com/watch?v=fPQA8K40jAM

Implementation details can be found in big block comment of
spa_checkpoint.c

Side-changes that are relevant to this commit but not explained
elsewhere:

* renames members of "struct metaslab trees to be shorter without
  losing meaning

* space_map_{alloc,truncate}() accept a block size as a
  parameter. The reason is that in the current state all space
  maps that we allocate through the DMU use a global tunable
  (space_map_blksz) which defauls to 4KB. This is ok for metaslab
  space maps in terms of bandwirdth since they are scattered all
  over the disk. But for other space maps this default is probably
  not what we want. Examples are device removal's vdev_obsolete_sm
  or vdev_chedkpoint_sm from this review. Both of these have a
  1:1 relationship with each vdev and could benefit from a bigger
  block size.

Porting notes:

* The part of dsl_scan_sync() which handles async destroys has
  been moved into the new dsl_process_async_destroys() function.

* Remove "VERIFY(!(flags & FWRITE))" in "kernel.c" so zhack can write
  to block device backed pools.

* ZTS:
  * Fix get_txg() in zpool_sync_001_pos due to "checkpoint_txg".

  * Don't use large dd block sizes on /dev/urandom under Linux in
    checkpoint_capacity.

  * Adopt Delphix-OS's setting of 4 (spa_asize_inflation =
    SPA_DVAS_PER_BP + 1) for the checkpoint_capacity test to speed
    its attempts to fill the pool

  * Create the base and nested pools with sync=disabled to speed up
    the "setup" phase.

  * Clear labels in test pool between checkpoint tests to avoid
    duplicate pool issues.

  * The import_rewind_device_replaced test has been marked as "known
    to fail" for the reasons listed in its DISCLAIMER.

  * New module parameters:

      zfs_spa_discard_memory_limit,
      zfs_remove_max_bytes_pause (not documented - debugging only)
      vdev_max_ms_count (formerly metaslabs_per_vdev)
      vdev_min_ms_count

Authored by: Serapheim Dimitropoulos <serapheim.dimitro@delphix.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: John Kennedy <john.kennedy@delphix.com>
Reviewed by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov>
Approved by: Richard Lowe <richlowe@richlowe.net>
Ported-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Tim Chase <tim@chase2k.com>

OpenZFS-issue: https://illumos.org/issues/9166
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/7159fdb8
Closes #7570
2018-06-26 10:07:42 -07:00

195 lines
3.6 KiB
C

/*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*/
/*
* Copyright (c) 2017 by Delphix. All rights reserved.
*/
/*
* The following is defined so the source can use
* lrand48() and srand48().
*/
#define __EXTENSIONS__
#include <stdint.h>
#include <string.h>
#include "../file_common.h"
/*
* The following sample was derived from real-world data
* of a production Oracle database.
*/
static uint64_t size_distribution[] = {
0,
1499018,
352084,
1503485,
4206227,
5626657,
5387001,
3733756,
2233094,
874652,
238635,
81434,
33357,
13106,
2009,
1,
23660,
};
static uint64_t distribution_n;
static uint8_t randbuf[BLOCKSZ];
static void
rwc_pwrite(int fd, const void *buf, size_t nbytes, off_t offset)
{
size_t nleft = nbytes;
ssize_t nwrite = 0;
nwrite = pwrite(fd, buf, nbytes, offset);
if (nwrite < 0) {
perror("pwrite");
exit(EXIT_FAILURE);
}
nleft -= nwrite;
if (nleft != 0) {
(void) fprintf(stderr, "warning: pwrite: "
"wrote %zu out of %zu bytes\n",
(nbytes - nleft), nbytes);
}
}
static void
fillbuf(char *buf)
{
uint64_t rv = lrand48() % distribution_n;
uint64_t sum = 0;
uint64_t i;
for (i = 0;
i < sizeof (size_distribution) / sizeof (size_distribution[0]);
i++) {
sum += size_distribution[i];
if (rv < sum)
break;
}
bcopy(randbuf, buf, BLOCKSZ);
if (i == 0)
bzero(buf, BLOCKSZ - 10);
else if (i < 16)
bzero(buf, BLOCKSZ - i * 512 + 256);
/*LINTED: E_BAD_PTR_CAST_ALIGN*/
((uint32_t *)buf)[0] = lrand48();
}
static void
exit_usage(void)
{
(void) printf("usage: ");
(void) printf("randwritecomp <file> [-s] [nwrites]\n");
exit(EXIT_FAILURE);
}
static void
sequential_writes(int fd, char *buf, uint64_t nblocks, int64_t n)
{
for (int64_t i = 0; n == -1 || i < n; i++) {
fillbuf(buf);
static uint64_t j = 0;
if (j == 0)
j = lrand48() % nblocks;
rwc_pwrite(fd, buf, BLOCKSZ, j * BLOCKSZ);
j++;
if (j >= nblocks)
j = 0;
}
}
static void
random_writes(int fd, char *buf, uint64_t nblocks, int64_t n)
{
for (int64_t i = 0; n == -1 || i < n; i++) {
fillbuf(buf);
rwc_pwrite(fd, buf, BLOCKSZ, (lrand48() % nblocks) * BLOCKSZ);
}
}
int
main(int argc, char *argv[])
{
int fd, err;
char *filename = NULL;
char buf[BLOCKSZ];
struct stat ss;
uint64_t nblocks;
int64_t n = -1;
int sequential = 0;
if (argc < 2)
exit_usage();
argv++;
if (strcmp("-s", argv[0]) == 0) {
sequential = 1;
argv++;
}
if (argv[0] == NULL)
exit_usage();
else
filename = argv[0];
argv++;
if (argv[0] != NULL)
n = strtoull(argv[0], NULL, 0);
fd = open(filename, O_RDWR|O_CREAT, 0666);
err = fstat(fd, &ss);
if (err != 0) {
(void) fprintf(stderr,
"error: fstat returned error code %d\n", err);
exit(EXIT_FAILURE);
}
nblocks = ss.st_size / BLOCKSZ;
if (nblocks == 0) {
(void) fprintf(stderr, "error: "
"file is too small (min allowed size is %d bytes)\n",
BLOCKSZ);
exit(EXIT_FAILURE);
}
srand48(getpid());
for (int i = 0; i < BLOCKSZ; i++)
randbuf[i] = lrand48();
distribution_n = 0;
for (uint64_t i = 0;
i < sizeof (size_distribution) / sizeof (size_distribution[0]);
i++) {
distribution_n += size_distribution[i];
}
if (sequential)
sequential_writes(fd, buf, nblocks, n);
else
random_writes(fd, buf, nblocks, n);
return (0);
}