Skip to content

Commit

Permalink
afs: Don't use folio->private to record partial modification
Browse files Browse the repository at this point in the history
AFS currently uses folio->private to store the range of bytes within a
folio that have been modified - the idea being that if we have, say, a 2MiB
folio and someone writes a single byte, we only have to write back that
single page and not the whole 2MiB folio - thereby saving on network
bandwidth.

Remove this, at least for now, and accept the extra network load (which
doesn't matter in the common case of writing a whole file at a time from
beginning to end).

This makes folio->private available for netfslib to use.

Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
cc: linux-cachefs@redhat.com
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org
  • Loading branch information
David Howells committed Dec 24, 2023
1 parent 5f5ce7b commit a34847d
Show file tree
Hide file tree
Showing 4 changed files with 42 additions and 285 deletions.
67 changes: 0 additions & 67 deletions fs/afs/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -386,63 +386,6 @@ const struct netfs_request_ops afs_req_ops = {
.issue_read = afs_issue_read,
};

/*
* Adjust the dirty region of the page on truncation or full invalidation,
* getting rid of the markers altogether if the region is entirely invalidated.
*/
static void afs_invalidate_dirty(struct folio *folio, size_t offset,
size_t length)
{
struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
unsigned long priv;
unsigned int f, t, end = offset + length;

priv = (unsigned long)folio_get_private(folio);

/* we clean up only if the entire page is being invalidated */
if (offset == 0 && length == folio_size(folio))
goto full_invalidate;

/* If the page was dirtied by page_mkwrite(), the PTE stays writable
* and we don't get another notification to tell us to expand it
* again.
*/
if (afs_is_folio_dirty_mmapped(priv))
return;

/* We may need to shorten the dirty region */
f = afs_folio_dirty_from(folio, priv);
t = afs_folio_dirty_to(folio, priv);

if (t <= offset || f >= end)
return; /* Doesn't overlap */

if (f < offset && t > end)
return; /* Splits the dirty region - just absorb it */

if (f >= offset && t <= end)
goto undirty;

if (f < offset)
t = offset;
else
f = end;
if (f == t)
goto undirty;

priv = afs_folio_dirty(folio, f, t);
folio_change_private(folio, (void *)priv);
trace_afs_folio_dirty(vnode, tracepoint_string("trunc"), folio);
return;

undirty:
trace_afs_folio_dirty(vnode, tracepoint_string("undirty"), folio);
folio_clear_dirty_for_io(folio);
full_invalidate:
trace_afs_folio_dirty(vnode, tracepoint_string("inval"), folio);
folio_detach_private(folio);
}

/*
* invalidate part or all of a page
* - release a page and clean up its private data if offset is 0 (indicating
Expand All @@ -453,11 +396,6 @@ static void afs_invalidate_folio(struct folio *folio, size_t offset,
{
_enter("{%lu},%zu,%zu", folio->index, offset, length);

BUG_ON(!folio_test_locked(folio));

if (folio_get_private(folio))
afs_invalidate_dirty(folio, offset, length);

folio_wait_fscache(folio);
_leave("");
}
Expand Down Expand Up @@ -485,11 +423,6 @@ static bool afs_release_folio(struct folio *folio, gfp_t gfp)
fscache_note_page_release(afs_vnode_cache(vnode));
#endif

if (folio_test_private(folio)) {
trace_afs_folio_dirty(vnode, tracepoint_string("rel"), folio);
folio_detach_private(folio);
}

/* Indicate that the folio can be released */
_leave(" = T");
return true;
Expand Down
56 changes: 0 additions & 56 deletions fs/afs/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -894,62 +894,6 @@ static inline void afs_invalidate_cache(struct afs_vnode *vnode, unsigned int fl
i_size_read(&vnode->netfs.inode), flags);
}

/*
* We use folio->private to hold the amount of the folio that we've written to,
* splitting the field into two parts. However, we need to represent a range
* 0...FOLIO_SIZE, so we reduce the resolution if the size of the folio
* exceeds what we can encode.
*/
#ifdef CONFIG_64BIT
#define __AFS_FOLIO_PRIV_MASK 0x7fffffffUL
#define __AFS_FOLIO_PRIV_SHIFT 32
#define __AFS_FOLIO_PRIV_MMAPPED 0x80000000UL
#else
#define __AFS_FOLIO_PRIV_MASK 0x7fffUL
#define __AFS_FOLIO_PRIV_SHIFT 16
#define __AFS_FOLIO_PRIV_MMAPPED 0x8000UL
#endif

static inline unsigned int afs_folio_dirty_resolution(struct folio *folio)
{
int shift = folio_shift(folio) - (__AFS_FOLIO_PRIV_SHIFT - 1);
return (shift > 0) ? shift : 0;
}

static inline size_t afs_folio_dirty_from(struct folio *folio, unsigned long priv)
{
unsigned long x = priv & __AFS_FOLIO_PRIV_MASK;

/* The lower bound is inclusive */
return x << afs_folio_dirty_resolution(folio);
}

static inline size_t afs_folio_dirty_to(struct folio *folio, unsigned long priv)
{
unsigned long x = (priv >> __AFS_FOLIO_PRIV_SHIFT) & __AFS_FOLIO_PRIV_MASK;

/* The upper bound is immediately beyond the region */
return (x + 1) << afs_folio_dirty_resolution(folio);
}

static inline unsigned long afs_folio_dirty(struct folio *folio, size_t from, size_t to)
{
unsigned int res = afs_folio_dirty_resolution(folio);
from >>= res;
to = (to - 1) >> res;
return (to << __AFS_FOLIO_PRIV_SHIFT) | from;
}

static inline unsigned long afs_folio_dirty_mmapped(unsigned long priv)
{
return priv | __AFS_FOLIO_PRIV_MMAPPED;
}

static inline bool afs_is_folio_dirty_mmapped(unsigned long priv)
{
return priv & __AFS_FOLIO_PRIV_MMAPPED;
}

#include <trace/events/afs.h>

/*****************************************************************************/
Expand Down
Loading

0 comments on commit a34847d

Please sign in to comment.