mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-12-25 18:59:33 +03:00
Only commit the ZIL once in zpl_writepages() (msync() case).
Currently, using msync() results in the following code path: sys_msync -> zpl_fsync -> filemap_write_and_wait_range -> zpl_writepages -> write_cache_pages -> zpl_putpage In such a code path, zil_commit() is called as part of zpl_putpage(). This means that for each page, the write is handed to the DMU, the ZIL is committed, and only then do we move on to the next page. As one might imagine, this results in atrocious performance where there is a large number of pages to write: instead of committing a batch of N writes, we do N commits containing one page each. In some extreme cases this can result in msync() being ~700 times slower than it should be, as well as very inefficient use of ZIL resources. This patch fixes this issue by making sure that the requested writes are batched and then committed only once. Unfortunately, the implementation is somewhat non-trivial because there is no way to run write_cache_pages in SYNC mode (so that we get all pages) without making it wait on the writeback tag for each page. The solution implemented here is composed of two parts: - I added a new callback system to the ZIL, which allows the caller to be notified when its ITX gets written to stable storage. One nice thing is that the callback is called not only in zil_commit() but in zil_sync() as well, which means that the caller doesn't have to care whether the write ended up in the ZIL or the DMU: it will get notified as soon as it's safe, period. This is an improvement over dmu_tx_callback_register() that was used previously, which only supports DMU writes. The rationale for this change is to allow zpl_putpage() to be notified when a ZIL commit is completed without having to block on zil_commit() itself. - zpl_writepages() now calls write_cache_pages in non-SYNC mode, which will prevent (1) write_cache_pages from blocking, and (2) zpl_putpage from issuing ZIL commits. zpl_writepages() will issue the commit itself instead of relying on zpl_putpage() to do it, thus nicely batching the writes. Note, however, that we still have to call write_cache_pages() again in SYNC mode because there is an edge case documented in the implementation of write_cache_pages() whereas it will not give us all dirty pages when running in non-SYNC mode. Thus we need to run it at least once in SYNC mode to make sure we honor persistency guarantees. This only happens when the pages are modified at the same time msync() is running, which should be rare. In most cases there won't be any additional pages and this second call will do nothing. Note that this change also fixes a bug related to #907 whereas calling msync() on pages that were already handed over to the DMU in a previous writepages() call would make msync() block until the next TXG sync instead of returning as soon as the ZIL commit is complete. The new callback system fixes that problem. Signed-off-by: Richard Yao <ryao@gentoo.org> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1849 Closes #907
This commit is contained in:
parent
14cecbb159
commit
119a394ab0
@ -353,7 +353,8 @@ extern void zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
|
|||||||
extern void zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
|
extern void zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
|
||||||
znode_t *sdzp, char *sname, znode_t *tdzp, char *dname, znode_t *szp);
|
znode_t *sdzp, char *sname, znode_t *tdzp, char *dname, znode_t *szp);
|
||||||
extern void zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
|
extern void zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
|
||||||
znode_t *zp, offset_t off, ssize_t len, int ioflag);
|
znode_t *zp, offset_t off, ssize_t len, int ioflag,
|
||||||
|
zil_callback_t callback, void *callback_data);
|
||||||
extern void zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype,
|
extern void zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype,
|
||||||
znode_t *zp, uint64_t off, uint64_t len);
|
znode_t *zp, uint64_t off, uint64_t len);
|
||||||
extern void zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
|
extern void zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
|
||||||
|
@ -361,11 +361,15 @@ typedef enum {
|
|||||||
WR_NUM_STATES /* number of states */
|
WR_NUM_STATES /* number of states */
|
||||||
} itx_wr_state_t;
|
} itx_wr_state_t;
|
||||||
|
|
||||||
|
typedef void (*zil_callback_t)(void *data);
|
||||||
|
|
||||||
typedef struct itx {
|
typedef struct itx {
|
||||||
list_node_t itx_node; /* linkage on zl_itx_list */
|
list_node_t itx_node; /* linkage on zl_itx_list */
|
||||||
void *itx_private; /* type-specific opaque data */
|
void *itx_private; /* type-specific opaque data */
|
||||||
itx_wr_state_t itx_wr_state; /* write state */
|
itx_wr_state_t itx_wr_state; /* write state */
|
||||||
uint8_t itx_sync; /* synchronous transaction */
|
uint8_t itx_sync; /* synchronous transaction */
|
||||||
|
zil_callback_t itx_callback; /* Called when the itx is persistent */
|
||||||
|
void *itx_callback_data; /* User data for the callback */
|
||||||
uint64_t itx_sod; /* record size on disk */
|
uint64_t itx_sod; /* record size on disk */
|
||||||
uint64_t itx_oid; /* object id */
|
uint64_t itx_oid; /* object id */
|
||||||
lr_t itx_lr; /* common part of log record */
|
lr_t itx_lr; /* common part of log record */
|
||||||
|
@ -445,21 +445,27 @@ zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Handles TX_WRITE transactions.
|
* zfs_log_write() handles TX_WRITE transactions. The specified callback is
|
||||||
|
* called as soon as the write is on stable storage (be it via a DMU sync or a
|
||||||
|
* ZIL commit).
|
||||||
*/
|
*/
|
||||||
long zfs_immediate_write_sz = 32768;
|
long zfs_immediate_write_sz = 32768;
|
||||||
|
|
||||||
void
|
void
|
||||||
zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
|
zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
|
||||||
znode_t *zp, offset_t off, ssize_t resid, int ioflag)
|
znode_t *zp, offset_t off, ssize_t resid, int ioflag,
|
||||||
|
zil_callback_t callback, void *callback_data)
|
||||||
{
|
{
|
||||||
itx_wr_state_t write_state;
|
itx_wr_state_t write_state;
|
||||||
boolean_t slogging;
|
boolean_t slogging;
|
||||||
uintptr_t fsync_cnt;
|
uintptr_t fsync_cnt;
|
||||||
ssize_t immediate_write_sz;
|
ssize_t immediate_write_sz;
|
||||||
|
|
||||||
if (zil_replaying(zilog, tx) || zp->z_unlinked)
|
if (zil_replaying(zilog, tx) || zp->z_unlinked) {
|
||||||
|
if (callback != NULL)
|
||||||
|
callback(callback_data);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
|
immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
|
||||||
? 0 : (ssize_t)zfs_immediate_write_sz;
|
? 0 : (ssize_t)zfs_immediate_write_sz;
|
||||||
@ -516,6 +522,8 @@ zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
|
|||||||
(fsync_cnt == 0))
|
(fsync_cnt == 0))
|
||||||
itx->itx_sync = B_FALSE;
|
itx->itx_sync = B_FALSE;
|
||||||
|
|
||||||
|
itx->itx_callback = callback;
|
||||||
|
itx->itx_callback_data = callback_data;
|
||||||
zil_itx_assign(zilog, itx, tx);
|
zil_itx_assign(zilog, itx, tx);
|
||||||
|
|
||||||
off += len;
|
off += len;
|
||||||
|
@ -892,7 +892,8 @@ again:
|
|||||||
|
|
||||||
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
|
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
|
||||||
|
|
||||||
zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag);
|
zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
|
||||||
|
NULL, NULL);
|
||||||
dmu_tx_commit(tx);
|
dmu_tx_commit(tx);
|
||||||
|
|
||||||
if (error != 0)
|
if (error != 0)
|
||||||
@ -3822,19 +3823,11 @@ top:
|
|||||||
EXPORT_SYMBOL(zfs_link);
|
EXPORT_SYMBOL(zfs_link);
|
||||||
|
|
||||||
static void
|
static void
|
||||||
zfs_putpage_commit_cb(void *arg, int error)
|
zfs_putpage_commit_cb(void *arg)
|
||||||
{
|
{
|
||||||
struct page *pp = arg;
|
struct page *pp = arg;
|
||||||
|
|
||||||
if (error) {
|
|
||||||
__set_page_dirty_nobuffers(pp);
|
|
||||||
|
|
||||||
if (error != ECANCELED)
|
|
||||||
SetPageError(pp);
|
|
||||||
} else {
|
|
||||||
ClearPageError(pp);
|
ClearPageError(pp);
|
||||||
}
|
|
||||||
|
|
||||||
end_page_writeback(pp);
|
end_page_writeback(pp);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3868,7 +3861,6 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
|
|||||||
uint64_t mtime[2], ctime[2];
|
uint64_t mtime[2], ctime[2];
|
||||||
sa_bulk_attr_t bulk[3];
|
sa_bulk_attr_t bulk[3];
|
||||||
int cnt = 0;
|
int cnt = 0;
|
||||||
int sync;
|
|
||||||
|
|
||||||
ZFS_ENTER(zsb);
|
ZFS_ENTER(zsb);
|
||||||
ZFS_VERIFY_ZP(zp);
|
ZFS_VERIFY_ZP(zp);
|
||||||
@ -3909,11 +3901,6 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
|
|||||||
rl = zfs_range_lock(zp, pgoff, pglen, RL_WRITER);
|
rl = zfs_range_lock(zp, pgoff, pglen, RL_WRITER);
|
||||||
tx = dmu_tx_create(zsb->z_os);
|
tx = dmu_tx_create(zsb->z_os);
|
||||||
|
|
||||||
sync = ((zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) ||
|
|
||||||
(wbc->sync_mode == WB_SYNC_ALL));
|
|
||||||
if (!sync)
|
|
||||||
dmu_tx_callback_register(tx, zfs_putpage_commit_cb, pp);
|
|
||||||
|
|
||||||
dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen);
|
dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen);
|
||||||
|
|
||||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||||
@ -3923,16 +3910,10 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
|
|||||||
if (err == ERESTART)
|
if (err == ERESTART)
|
||||||
dmu_tx_wait(tx);
|
dmu_tx_wait(tx);
|
||||||
|
|
||||||
/* Will call all registered commit callbacks */
|
|
||||||
dmu_tx_abort(tx);
|
dmu_tx_abort(tx);
|
||||||
|
__set_page_dirty_nobuffers(pp);
|
||||||
/*
|
ClearPageError(pp);
|
||||||
* For the synchronous case the commit callback must be
|
end_page_writeback(pp);
|
||||||
* explicitly called because there is no registered callback.
|
|
||||||
*/
|
|
||||||
if (sync)
|
|
||||||
zfs_putpage_commit_cb(pp, ECANCELED);
|
|
||||||
|
|
||||||
zfs_range_unlock(rl);
|
zfs_range_unlock(rl);
|
||||||
ZFS_EXIT(zsb);
|
ZFS_EXIT(zsb);
|
||||||
return (err);
|
return (err);
|
||||||
@ -3955,14 +3936,19 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
|
|||||||
|
|
||||||
err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
|
err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
|
||||||
|
|
||||||
zfs_log_write(zsb->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0);
|
zfs_log_write(zsb->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0,
|
||||||
|
zfs_putpage_commit_cb, pp);
|
||||||
dmu_tx_commit(tx);
|
dmu_tx_commit(tx);
|
||||||
|
|
||||||
zfs_range_unlock(rl);
|
zfs_range_unlock(rl);
|
||||||
|
|
||||||
if (sync) {
|
if (wbc->sync_mode != WB_SYNC_NONE) {
|
||||||
|
/*
|
||||||
|
* Note that this is rarely called under writepages(), because
|
||||||
|
* writepages() normally handles the entire commit for
|
||||||
|
* performance reasons.
|
||||||
|
*/
|
||||||
zil_commit(zsb->z_log, zp->z_id);
|
zil_commit(zsb->z_log, zp->z_id);
|
||||||
zfs_putpage_commit_cb(pp, err);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ZFS_EXIT(zsb);
|
ZFS_EXIT(zsb);
|
||||||
|
@ -1184,6 +1184,8 @@ zil_itx_create(uint64_t txtype, size_t lrsize)
|
|||||||
itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
|
itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
|
||||||
itx->itx_lr.lrc_seq = 0; /* defensive */
|
itx->itx_lr.lrc_seq = 0; /* defensive */
|
||||||
itx->itx_sync = B_TRUE; /* default is synchronous */
|
itx->itx_sync = B_TRUE; /* default is synchronous */
|
||||||
|
itx->itx_callback = NULL;
|
||||||
|
itx->itx_callback_data = NULL;
|
||||||
|
|
||||||
return (itx);
|
return (itx);
|
||||||
}
|
}
|
||||||
@ -1209,6 +1211,8 @@ zil_itxg_clean(itxs_t *itxs)
|
|||||||
|
|
||||||
list = &itxs->i_sync_list;
|
list = &itxs->i_sync_list;
|
||||||
while ((itx = list_head(list)) != NULL) {
|
while ((itx = list_head(list)) != NULL) {
|
||||||
|
if (itx->itx_callback != NULL)
|
||||||
|
itx->itx_callback(itx->itx_callback_data);
|
||||||
list_remove(list, itx);
|
list_remove(list, itx);
|
||||||
kmem_free(itx, offsetof(itx_t, itx_lr) +
|
kmem_free(itx, offsetof(itx_t, itx_lr) +
|
||||||
itx->itx_lr.lrc_reclen);
|
itx->itx_lr.lrc_reclen);
|
||||||
@ -1219,6 +1223,8 @@ zil_itxg_clean(itxs_t *itxs)
|
|||||||
while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
|
while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
|
||||||
list = &ian->ia_list;
|
list = &ian->ia_list;
|
||||||
while ((itx = list_head(list)) != NULL) {
|
while ((itx = list_head(list)) != NULL) {
|
||||||
|
if (itx->itx_callback != NULL)
|
||||||
|
itx->itx_callback(itx->itx_callback_data);
|
||||||
list_remove(list, itx);
|
list_remove(list, itx);
|
||||||
kmem_free(itx, offsetof(itx_t, itx_lr) +
|
kmem_free(itx, offsetof(itx_t, itx_lr) +
|
||||||
itx->itx_lr.lrc_reclen);
|
itx->itx_lr.lrc_reclen);
|
||||||
@ -1285,6 +1291,8 @@ zil_remove_async(zilog_t *zilog, uint64_t oid)
|
|||||||
mutex_exit(&itxg->itxg_lock);
|
mutex_exit(&itxg->itxg_lock);
|
||||||
}
|
}
|
||||||
while ((itx = list_head(&clean_list)) != NULL) {
|
while ((itx = list_head(&clean_list)) != NULL) {
|
||||||
|
if (itx->itx_callback != NULL)
|
||||||
|
itx->itx_callback(itx->itx_callback_data);
|
||||||
list_remove(&clean_list, itx);
|
list_remove(&clean_list, itx);
|
||||||
kmem_free(itx, offsetof(itx_t, itx_lr) +
|
kmem_free(itx, offsetof(itx_t, itx_lr) +
|
||||||
itx->itx_lr.lrc_reclen);
|
itx->itx_lr.lrc_reclen);
|
||||||
@ -1530,15 +1538,13 @@ zil_commit_writer(zilog_t *zilog)
|
|||||||
}
|
}
|
||||||
|
|
||||||
DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
|
DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
|
||||||
while ((itx = list_head(&zilog->zl_itx_commit_list))) {
|
for (itx = list_head(&zilog->zl_itx_commit_list); itx != NULL;
|
||||||
|
itx = list_next(&zilog->zl_itx_commit_list, itx)) {
|
||||||
txg = itx->itx_lr.lrc_txg;
|
txg = itx->itx_lr.lrc_txg;
|
||||||
ASSERT(txg);
|
ASSERT(txg);
|
||||||
|
|
||||||
if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa))
|
if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa))
|
||||||
lwb = zil_lwb_commit(zilog, itx, lwb);
|
lwb = zil_lwb_commit(zilog, itx, lwb);
|
||||||
list_remove(&zilog->zl_itx_commit_list, itx);
|
|
||||||
kmem_free(itx, offsetof(itx_t, itx_lr)
|
|
||||||
+ itx->itx_lr.lrc_reclen);
|
|
||||||
}
|
}
|
||||||
DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
|
DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
|
||||||
|
|
||||||
@ -1560,6 +1566,17 @@ zil_commit_writer(zilog_t *zilog)
|
|||||||
if (error || lwb == NULL)
|
if (error || lwb == NULL)
|
||||||
txg_wait_synced(zilog->zl_dmu_pool, 0);
|
txg_wait_synced(zilog->zl_dmu_pool, 0);
|
||||||
|
|
||||||
|
while ((itx = list_head(&zilog->zl_itx_commit_list))) {
|
||||||
|
txg = itx->itx_lr.lrc_txg;
|
||||||
|
ASSERT(txg);
|
||||||
|
|
||||||
|
if (itx->itx_callback != NULL)
|
||||||
|
itx->itx_callback(itx->itx_callback_data);
|
||||||
|
list_remove(&zilog->zl_itx_commit_list, itx);
|
||||||
|
kmem_free(itx, offsetof(itx_t, itx_lr)
|
||||||
|
+ itx->itx_lr.lrc_reclen);
|
||||||
|
}
|
||||||
|
|
||||||
mutex_enter(&zilog->zl_lock);
|
mutex_enter(&zilog->zl_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
#include <sys/dmu_objset.h>
|
||||||
#include <sys/zfs_vfsops.h>
|
#include <sys/zfs_vfsops.h>
|
||||||
#include <sys/zfs_vnops.h>
|
#include <sys/zfs_vnops.h>
|
||||||
#include <sys/zfs_znode.h>
|
#include <sys/zfs_znode.h>
|
||||||
@ -420,7 +421,43 @@ zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
|
|||||||
static int
|
static int
|
||||||
zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
|
zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
|
||||||
{
|
{
|
||||||
return write_cache_pages(mapping, wbc, zpl_putpage, mapping);
|
znode_t *zp = ITOZ(mapping->host);
|
||||||
|
zfs_sb_t *zsb = ITOZSB(mapping->host);
|
||||||
|
enum writeback_sync_modes sync_mode;
|
||||||
|
int result;
|
||||||
|
|
||||||
|
ZFS_ENTER(zsb);
|
||||||
|
if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
|
||||||
|
wbc->sync_mode = WB_SYNC_ALL;
|
||||||
|
ZFS_EXIT(zsb);
|
||||||
|
sync_mode = wbc->sync_mode;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We don't want to run write_cache_pages() in SYNC mode here, because
|
||||||
|
* that would make putpage() wait for a single page to be committed to
|
||||||
|
* disk every single time, resulting in atrocious performance. Instead
|
||||||
|
* we run it once in non-SYNC mode so that the ZIL gets all the data,
|
||||||
|
* and then we commit it all in one go.
|
||||||
|
*/
|
||||||
|
wbc->sync_mode = WB_SYNC_NONE;
|
||||||
|
result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
|
||||||
|
if (sync_mode != wbc->sync_mode) {
|
||||||
|
ZFS_ENTER(zsb);
|
||||||
|
ZFS_VERIFY_ZP(zp);
|
||||||
|
zil_commit(zsb->z_log, zp->z_id);
|
||||||
|
ZFS_EXIT(zsb);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to call write_cache_pages() again (we can't just
|
||||||
|
* return after the commit) because the previous call in
|
||||||
|
* non-SYNC mode does not guarantee that we got all the dirty
|
||||||
|
* pages (see the implementation of write_cache_pages() for
|
||||||
|
* details). That being said, this is a no-op in most cases.
|
||||||
|
*/
|
||||||
|
wbc->sync_mode = sync_mode;
|
||||||
|
result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
|
||||||
|
}
|
||||||
|
return (result);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -432,7 +469,10 @@ zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
|
|||||||
static int
|
static int
|
||||||
zpl_writepage(struct page *pp, struct writeback_control *wbc)
|
zpl_writepage(struct page *pp, struct writeback_control *wbc)
|
||||||
{
|
{
|
||||||
return zpl_putpage(pp, wbc, pp->mapping);
|
if (ITOZSB(pp->mapping->host)->z_os->os_sync == ZFS_SYNC_ALWAYS)
|
||||||
|
wbc->sync_mode = WB_SYNC_ALL;
|
||||||
|
|
||||||
|
return (zpl_putpage(pp, wbc, pp->mapping));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user