Skip to content

Commit

Permalink
dax: Convert dax_lock_mapping_entry to XArray
Browse files Browse the repository at this point in the history
Instead of always retrying when we slept, only retry if the page has
moved.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
  • Loading branch information
Matthew Wilcox committed Oct 21, 2018
1 parent 9fc747f commit 9f32d22
Showing 1 changed file with 35 additions and 48 deletions.
83 changes: 35 additions & 48 deletions fs/dax.c
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,17 @@ static void *dax_make_locked(unsigned long pfn, unsigned long flags)
DAX_LOCKED);
}

static void *dax_make_entry(pfn_t pfn, unsigned long flags)
{
return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
}

static void *dax_make_page_entry(struct page *page)
{
pfn_t pfn = page_to_pfn_t(page);
return dax_make_entry(pfn, PageHead(page) ? DAX_PMD : 0);
}

static bool dax_is_locked(void *entry)
{
return xa_to_value(entry) & DAX_LOCKED;
Expand Down Expand Up @@ -487,33 +498,16 @@ static struct page *dax_busy_page(void *entry)
return NULL;
}

static bool entry_wait_revalidate(void)
{
rcu_read_unlock();
schedule();
rcu_read_lock();

/*
* Tell __get_unlocked_mapping_entry() to take a break, we need
* to revalidate page->mapping after dropping locks
*/
return true;
}

bool dax_lock_mapping_entry(struct page *page)
{
pgoff_t index;
struct inode *inode;
bool did_lock = false;
void *entry = NULL, **slot;
struct address_space *mapping;
XA_STATE(xas, NULL, 0);
void *entry;

rcu_read_lock();
for (;;) {
mapping = READ_ONCE(page->mapping);
struct address_space *mapping = READ_ONCE(page->mapping);

if (!dax_mapping(mapping))
break;
return false;

/*
* In the device-dax case there's no need to lock, a
Expand All @@ -522,47 +516,40 @@ bool dax_lock_mapping_entry(struct page *page)
* otherwise we would not have a valid pfn_to_page()
* translation.
*/
inode = mapping->host;
if (S_ISCHR(inode->i_mode)) {
did_lock = true;
break;
}
if (S_ISCHR(mapping->host->i_mode))
return true;

xa_lock_irq(&mapping->i_pages);
xas.xa = &mapping->i_pages;
xas_lock_irq(&xas);
if (mapping != page->mapping) {
xa_unlock_irq(&mapping->i_pages);
xas_unlock_irq(&xas);
continue;
}
index = page->index;

entry = __get_unlocked_mapping_entry(mapping, index, &slot,
entry_wait_revalidate);
if (!entry) {
xa_unlock_irq(&mapping->i_pages);
break;
} else if (IS_ERR(entry)) {
WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
continue;
xas_set(&xas, page->index);
entry = xas_load(&xas);
if (dax_is_locked(entry)) {
entry = get_unlocked_entry(&xas);
/* Did the page move while we slept? */
if (dax_to_pfn(entry) != page_to_pfn(page)) {
xas_unlock_irq(&xas);
continue;
}
}
lock_slot(mapping, slot);
did_lock = true;
xa_unlock_irq(&mapping->i_pages);
break;
dax_lock_entry(&xas, entry);
xas_unlock_irq(&xas);
return true;
}
rcu_read_unlock();

return did_lock;
}

void dax_unlock_mapping_entry(struct page *page)
{
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
XA_STATE(xas, &mapping->i_pages, page->index);

if (S_ISCHR(inode->i_mode))
if (S_ISCHR(mapping->host->i_mode))
return;

unlock_mapping_entry(mapping, page->index);
dax_unlock_entry(&xas, dax_make_page_entry(page));
}

/*
Expand Down

0 comments on commit 9f32d22

Please sign in to comment.