Skip to content

Commit

Permalink
netfs: Use new folio_queue data type and iterator instead of xarray iter
Browse files Browse the repository at this point in the history
Make the netfs write-side routines use the new folio_queue struct to hold a
rolling buffer of folios, with the issuer adding folios at the tail and the
collector removing them from the head as they're processed instead of using
an xarray.

This will allow a subsequent patch to simplify the write collector.

The primary mark (as tested by folioq_is_marked()) is used to note if the
corresponding folio needs putting.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Link: https://lore.kernel.org/r/20240814203850.2240469-16-dhowells@redhat.com/ # v2
Signed-off-by: Christian Brauner <brauner@kernel.org>
  • Loading branch information
David Howells authored and Christian Brauner committed Sep 12, 2024
1 parent c45ebd6 commit cd0277e
Show file tree
Hide file tree
Showing 8 changed files with 150 additions and 61 deletions.
9 changes: 8 additions & 1 deletion fs/netfs/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/folio_queue.h>
#include <linux/netfs.h>
#include <linux/fscache.h>
#include <linux/fscache-cache.h>
Expand Down Expand Up @@ -64,6 +65,10 @@ static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
/*
* misc.c
*/
int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
bool needs_put);
struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq);
void netfs_clear_buffer(struct netfs_io_request *rreq);

/*
* objects.c
Expand Down Expand Up @@ -120,6 +125,7 @@ extern atomic_t netfs_n_wh_write_done;
extern atomic_t netfs_n_wh_write_failed;
extern atomic_t netfs_n_wb_lock_skip;
extern atomic_t netfs_n_wb_lock_wait;
extern atomic_t netfs_n_folioq;

int netfs_stats_show(struct seq_file *m, void *v);

Expand Down Expand Up @@ -153,7 +159,8 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
loff_t start,
enum netfs_io_origin origin);
void netfs_reissue_write(struct netfs_io_stream *stream,
struct netfs_io_subrequest *subreq);
struct netfs_io_subrequest *subreq,
struct iov_iter *source);
int netfs_advance_write(struct netfs_io_request *wreq,
struct netfs_io_stream *stream,
loff_t start, size_t len, bool to_eof);
Expand Down
76 changes: 76 additions & 0 deletions fs/netfs/misc.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,82 @@
#include <linux/swap.h>
#include "internal.h"

/*
* Append a folio to the rolling queue.
*/
int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
bool needs_put)
{
struct folio_queue *tail = rreq->buffer_tail;
unsigned int slot, order = folio_order(folio);

if (WARN_ON_ONCE(!rreq->buffer && tail) ||
WARN_ON_ONCE(rreq->buffer && !tail))
return -EIO;

if (!tail || folioq_full(tail)) {
tail = kmalloc(sizeof(*tail), GFP_NOFS);
if (!tail)
return -ENOMEM;
netfs_stat(&netfs_n_folioq);
folioq_init(tail);
tail->prev = rreq->buffer_tail;
if (tail->prev)
tail->prev->next = tail;
rreq->buffer_tail = tail;
if (!rreq->buffer) {
rreq->buffer = tail;
iov_iter_folio_queue(&rreq->io_iter, ITER_SOURCE, tail, 0, 0, 0);
}
rreq->buffer_tail_slot = 0;
}

rreq->io_iter.count += PAGE_SIZE << order;

slot = folioq_append(tail, folio);
/* Store the counter after setting the slot. */
smp_store_release(&rreq->buffer_tail_slot, slot);
return 0;
}

/*
* Delete the head of a rolling queue.
*/
struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq)
{
struct folio_queue *head = wreq->buffer, *next = head->next;

if (next)
next->prev = NULL;
netfs_stat_d(&netfs_n_folioq);
kfree(head);
wreq->buffer = next;
return next;
}

/*
* Clear out a rolling queue.
*/
void netfs_clear_buffer(struct netfs_io_request *rreq)
{
struct folio_queue *p;

while ((p = rreq->buffer)) {
rreq->buffer = p->next;
for (int slot = 0; slot < folioq_nr_slots(p); slot++) {
struct folio *folio = folioq_folio(p, slot);
if (!folio)
continue;
if (folioq_is_marked(p, slot)) {
trace_netfs_folio(folio, netfs_folio_trace_put);
folio_put(folio);
}
}
netfs_stat_d(&netfs_n_folioq);
kfree(p);
}
}

/**
* netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
* @mapping: The mapping the folio belongs to.
Expand Down
1 change: 1 addition & 0 deletions fs/netfs/objects.c
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,7 @@ static void netfs_free_request(struct work_struct *work)
}
kvfree(rreq->direct_bv);
}
netfs_clear_buffer(rreq);

if (atomic_dec_and_test(&ictx->io_count))
wake_up_var(&ictx->io_count);
Expand Down
4 changes: 3 additions & 1 deletion fs/netfs/stats.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ atomic_t netfs_n_wh_write_done;
atomic_t netfs_n_wh_write_failed;
atomic_t netfs_n_wb_lock_skip;
atomic_t netfs_n_wb_lock_wait;
atomic_t netfs_n_folioq;

int netfs_stats_show(struct seq_file *m, void *v)
{
Expand Down Expand Up @@ -76,9 +77,10 @@ int netfs_stats_show(struct seq_file *m, void *v)
atomic_read(&netfs_n_wh_write),
atomic_read(&netfs_n_wh_write_done),
atomic_read(&netfs_n_wh_write_failed));
seq_printf(m, "Objs : rr=%u sr=%u wsc=%u\n",
seq_printf(m, "Objs : rr=%u sr=%u foq=%u wsc=%u\n",
atomic_read(&netfs_n_rh_rreq),
atomic_read(&netfs_n_rh_sreq),
atomic_read(&netfs_n_folioq),
atomic_read(&netfs_n_wh_wstream_conflict));
seq_printf(m, "WbLock : skip=%u wait=%u\n",
atomic_read(&netfs_n_wb_lock_skip),
Expand Down
84 changes: 44 additions & 40 deletions fs/netfs/write_collect.c
Original file line number Diff line number Diff line change
Expand Up @@ -81,56 +81,32 @@ int netfs_folio_written_back(struct folio *folio)
return gcount;
}

/*
* Get hold of a folio we have under writeback. We don't want to get the
* refcount on it.
*/
static struct folio *netfs_writeback_lookup_folio(struct netfs_io_request *wreq, loff_t pos)
{
XA_STATE(xas, &wreq->mapping->i_pages, pos / PAGE_SIZE);
struct folio *folio;

rcu_read_lock();

for (;;) {
xas_reset(&xas);
folio = xas_load(&xas);
if (xas_retry(&xas, folio))
continue;

if (!folio || xa_is_value(folio))
kdebug("R=%08x: folio %lx (%llx) not present",
wreq->debug_id, xas.xa_index, pos / PAGE_SIZE);
BUG_ON(!folio || xa_is_value(folio));

if (folio == xas_reload(&xas))
break;
}

rcu_read_unlock();

if (WARN_ONCE(!folio_test_writeback(folio),
"R=%08x: folio %lx is not under writeback\n",
wreq->debug_id, folio->index)) {
trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
}
return folio;
}

/*
* Unlock any folios we've finished with.
*/
static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
unsigned long long collected_to,
unsigned int *notes)
{
struct folio_queue *folioq = wreq->buffer;
unsigned int slot = wreq->buffer_head_slot;

if (slot >= folioq_nr_slots(folioq)) {
folioq = netfs_delete_buffer_head(wreq);
slot = 0;
}

for (;;) {
struct folio *folio;
struct netfs_folio *finfo;
unsigned long long fpos, fend;
size_t fsize, flen;

folio = netfs_writeback_lookup_folio(wreq, wreq->cleaned_to);
folio = folioq_folio(folioq, slot);
if (WARN_ONCE(!folio_test_writeback(folio),
"R=%08x: folio %lx is not under writeback\n",
wreq->debug_id, folio->index))
trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);

fpos = folio_pos(folio);
fsize = folio_size(folio);
Expand All @@ -155,9 +131,25 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
wreq->cleaned_to = fpos + fsize;
*notes |= MADE_PROGRESS;

/* Clean up the head folioq. If we clear an entire folioq, then
* we can get rid of it provided it's not also the tail folioq
* being filled by the issuer.
*/
folioq_clear(folioq, slot);
slot++;
if (slot >= folioq_nr_slots(folioq)) {
if (READ_ONCE(wreq->buffer_tail) == folioq)
break;
folioq = netfs_delete_buffer_head(wreq);
slot = 0;
}

if (fpos + fsize >= collected_to)
break;
}

wreq->buffer = folioq;
wreq->buffer_head_slot = slot;
}

/*
Expand Down Expand Up @@ -188,9 +180,12 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
break;
if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
struct iov_iter source = subreq->io_iter;

iov_iter_revert(&source, subreq->len - source.count);
__set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
netfs_reissue_write(stream, subreq);
netfs_reissue_write(stream, subreq, &source);
}
}
return;
Expand All @@ -200,6 +195,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,

do {
struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp;
struct iov_iter source;
unsigned long long start, len;
size_t part;
bool boundary = false;
Expand Down Expand Up @@ -227,6 +223,14 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
len += to->len;
}

/* Determine the set of buffers we're going to use. Each
* subreq gets a subset of a single overall contiguous buffer.
*/
source = from->io_iter;
iov_iter_revert(&source, subreq->len - source.count);
iov_iter_advance(&source, from->transferred);
source.count = len;

/* Work through the sublist. */
subreq = from;
list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
Expand All @@ -249,7 +253,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
boundary = true;

netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
netfs_reissue_write(stream, subreq);
netfs_reissue_write(stream, subreq, &source);
if (subreq == to)
break;
}
Expand Down Expand Up @@ -316,7 +320,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
boundary = false;
}

netfs_reissue_write(stream, subreq);
netfs_reissue_write(stream, subreq, &source);
if (!len)
break;

Expand Down
28 changes: 13 additions & 15 deletions fs/netfs/write_issue.c
Original file line number Diff line number Diff line change
Expand Up @@ -213,37 +213,32 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
* netfs_write_subrequest_terminated() when complete.
*/
static void netfs_do_issue_write(struct netfs_io_stream *stream,
struct netfs_io_subrequest *subreq)
struct netfs_io_subrequest *subreq,
struct iov_iter *source)
{
struct netfs_io_request *wreq = subreq->rreq;
size_t size = subreq->len - subreq->transferred;

_enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len);

if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
return netfs_write_subrequest_terminated(subreq, subreq->error, false);

// TODO: Use encrypted buffer
if (test_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags)) {
subreq->io_iter = wreq->io_iter;
iov_iter_advance(&subreq->io_iter,
subreq->start + subreq->transferred - wreq->start);
iov_iter_truncate(&subreq->io_iter,
subreq->len - subreq->transferred);
} else {
iov_iter_xarray(&subreq->io_iter, ITER_SOURCE, &wreq->mapping->i_pages,
subreq->start + subreq->transferred,
subreq->len - subreq->transferred);
}
subreq->io_iter = *source;
iov_iter_advance(source, size);
iov_iter_truncate(&subreq->io_iter, size);

trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
stream->issue_write(subreq);
}

void netfs_reissue_write(struct netfs_io_stream *stream,
struct netfs_io_subrequest *subreq)
struct netfs_io_subrequest *subreq,
struct iov_iter *source)
{
__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
netfs_do_issue_write(stream, subreq);
netfs_do_issue_write(stream, subreq, source);
}

static void netfs_issue_write(struct netfs_io_request *wreq,
Expand All @@ -257,7 +252,7 @@ static void netfs_issue_write(struct netfs_io_request *wreq,

if (subreq->start + subreq->len > wreq->start + wreq->submitted)
WRITE_ONCE(wreq->submitted, subreq->start + subreq->len - wreq->start);
netfs_do_issue_write(stream, subreq);
netfs_do_issue_write(stream, subreq, &wreq->io_iter);
}

/*
Expand Down Expand Up @@ -422,6 +417,9 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
trace_netfs_folio(folio, netfs_folio_trace_store_plus);
}

/* Attach the folio to the rolling buffer. */
netfs_buffer_append_folio(wreq, folio, false);

/* Move the submission point forward to allow for write-streaming data
* not starting at the front of the page. We don't do write-streaming
* with the cache as the cache requires DIO alignment.
Expand Down
8 changes: 4 additions & 4 deletions include/linux/netfs.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,6 @@ static inline void folio_start_private_2(struct folio *folio)
folio_set_private_2(folio);
}

/* Marks used on xarray-based buffers */
#define NETFS_BUF_PUT_MARK XA_MARK_0 /* - Page needs putting */
#define NETFS_BUF_PAGECACHE_MARK XA_MARK_1 /* - Page needs wb/dirty flag wrangling */

enum netfs_io_source {
NETFS_SOURCE_UNKNOWN,
NETFS_FILL_WITH_ZEROES,
Expand Down Expand Up @@ -233,6 +229,8 @@ struct netfs_io_request {
struct netfs_io_stream io_streams[2]; /* Streams of parallel I/O operations */
#define NR_IO_STREAMS 2 //wreq->nr_io_streams
struct netfs_group *group; /* Writeback group being written back */
struct folio_queue *buffer; /* Head of I/O buffer */
struct folio_queue *buffer_tail; /* Tail of I/O buffer */
struct iov_iter iter; /* Unencrypted-side iterator */
struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */
void *netfs_priv; /* Private data for the netfs */
Expand All @@ -254,6 +252,8 @@ struct netfs_io_request {
short error; /* 0 or error that occurred */
enum netfs_io_origin origin; /* Origin of the request */
bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */
u8 buffer_head_slot; /* First slot in ->buffer */
u8 buffer_tail_slot; /* Next slot in ->buffer_tail */
unsigned long long i_size; /* Size of the file */
unsigned long long start; /* Start position */
atomic64_t issued_to; /* Write issuer folio cursor */
Expand Down
Loading

0 comments on commit cd0277e

Please sign in to comment.