1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00

NFSD: Implement NFSD_IO_DIRECT for NFS READ

Add an experimental option that forces NFS READ operations to use
direct I/O instead of reading through the NFS server's page cache.

There is already at least one other layer of read caching: the page
cache on NFS clients.

The server's page cache, in many cases, is unlikely to provide
additional benefit. Some benchmarks have demonstrated that the
server's page cache is actively detrimental for workloads whose
working set is larger than the server's available physical memory.

For instance, on small NFS servers, cached NFS file content can
squeeze out local memory consumers. For large sequential workloads,
an enormous amount of data flows into and out of the page cache
and is consumed by NFS clients exactly once -- caching that data
is expensive to do and totally valueless.

For now this is a hidden option that can be enabled on test
systems for benchmarking. In the longer term, this option might
be enabled persistently or per-export. When the exported file
system does not support direct I/O, NFSD falls back to using
either DONTCACHE or buffered I/O to fulfill NFS READ requests.

Suggested-by: Mike Snitzer <snitzer@kernel.org>
Reviewed-by: Mike Snitzer <snitzer@kernel.org>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Reviewed-by: NeilBrown <neil@brown.name>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
This commit is contained in:
Chuck Lever 2025-10-08 09:52:30 -04:00
parent d7de37d6d7
commit d686e64e93
4 changed files with 87 additions and 0 deletions

View File

@ -44,6 +44,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(nfsd_dsr_fops, nfsd_dsr_get, nfsd_dsr_set, "%llu\n");
* Contents:
* %0: NFS READ will use buffered IO
* %1: NFS READ will use dontcache (buffered IO w/ dropbehind)
* %2: NFS READ will use direct IO
*
* This setting takes immediate effect for all NFS versions,
* all exports, and in all NFSD net namespaces.
@ -64,6 +65,7 @@ static int nfsd_io_cache_read_set(void *data, u64 val)
nfsd_io_cache_read = NFSD_IO_BUFFERED;
break;
case NFSD_IO_DONTCACHE:
case NFSD_IO_DIRECT:
/*
* Must disable splice_read when enabling
* NFSD_IO_DONTCACHE.

View File

@ -160,6 +160,7 @@ enum {
/* Any new NFSD_IO enum value must be added at the end */
NFSD_IO_BUFFERED,
NFSD_IO_DONTCACHE,
NFSD_IO_DIRECT,
};
extern u64 nfsd_io_cache_read __read_mostly;

View File

@ -464,6 +464,7 @@ DEFINE_EVENT(nfsd_io_class, nfsd_##name, \
DEFINE_NFSD_IO_EVENT(read_start);
DEFINE_NFSD_IO_EVENT(read_splice);
DEFINE_NFSD_IO_EVENT(read_vector);
DEFINE_NFSD_IO_EVENT(read_direct);
DEFINE_NFSD_IO_EVENT(read_io_done);
DEFINE_NFSD_IO_EVENT(read_done);
DEFINE_NFSD_IO_EVENT(write_start);

View File

@ -1074,6 +1074,83 @@ __be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err);
}
/*
* The byte range of the client's READ request is expanded on both ends
* until it meets the underlying file system's direct I/O alignment
* requirements. After the internal read is complete, the byte range of
* the NFS READ payload is reduced to the byte range that was originally
* requested.
*
* Note that a direct read can be done only when the xdr_buf containing
* the NFS READ reply does not already have contents in its .pages array.
* This is due to potentially restrictive alignment requirements on the
* read buffer. When .page_len and @base are zero, the .pages array is
* guaranteed to be page-aligned.
*/
static noinline_for_stack __be32
nfsd_direct_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct nfsd_file *nf, loff_t offset, unsigned long *count,
u32 *eof)
{
u64 dio_start, dio_end;
unsigned long v, total;
struct iov_iter iter;
struct kiocb kiocb;
ssize_t host_err;
size_t len;
init_sync_kiocb(&kiocb, nf->nf_file);
kiocb.ki_flags |= IOCB_DIRECT;
/* Read a properly-aligned region of bytes into rq_bvec */
dio_start = round_down(offset, nf->nf_dio_read_offset_align);
dio_end = round_up((u64)offset + *count, nf->nf_dio_read_offset_align);
kiocb.ki_pos = dio_start;
v = 0;
total = dio_end - dio_start;
while (total && v < rqstp->rq_maxpages &&
rqstp->rq_next_page < rqstp->rq_page_end) {
len = min_t(size_t, total, PAGE_SIZE);
bvec_set_page(&rqstp->rq_bvec[v], *rqstp->rq_next_page,
len, 0);
total -= len;
++rqstp->rq_next_page;
++v;
}
trace_nfsd_read_direct(rqstp, fhp, offset, *count - total);
iov_iter_bvec(&iter, ITER_DEST, rqstp->rq_bvec, v,
dio_end - dio_start - total);
host_err = vfs_iocb_iter_read(nf->nf_file, &kiocb, &iter);
if (host_err >= 0) {
unsigned int pad = offset - dio_start;
/* The returned payload starts after the pad */
rqstp->rq_res.page_base = pad;
/* Compute the count of bytes to be returned */
if (host_err > pad + *count)
host_err = *count;
else if (host_err > pad)
host_err -= pad;
else
host_err = 0;
} else if (unlikely(host_err == -EINVAL)) {
struct inode *inode = d_inode(fhp->fh_dentry);
pr_info_ratelimited("nfsd: Direct I/O alignment failure on %s/%ld\n",
inode->i_sb->s_id, inode->i_ino);
host_err = -ESERVERFAULT;
}
return nfsd_finish_read(rqstp, fhp, nf->nf_file, offset, count,
eof, host_err);
}
/**
* nfsd_iter_read - Perform a VFS read using an iterator
* @rqstp: RPC transaction context
@ -1106,6 +1183,12 @@ __be32 nfsd_iter_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
switch (nfsd_io_cache_read) {
case NFSD_IO_BUFFERED:
break;
case NFSD_IO_DIRECT:
/* When dio_read_offset_align is zero, dio is not supported */
if (nf->nf_dio_read_offset_align && !rqstp->rq_res.page_len)
return nfsd_direct_read(rqstp, fhp, nf, offset,
count, eof);
fallthrough;
case NFSD_IO_DONTCACHE:
if (file->f_op->fop_flags & FOP_DONTCACHE)
kiocb.ki_flags = IOCB_DONTCACHE;