Skip to content

Commit

Permalink
btrfs: access eb::blocking_writers according to ACCESS_ONCE policies
Browse files Browse the repository at this point in the history
A nice writeup of the LKMM (Linux Kernel Memory Model) rules for access
once policies can be found here
https://lwn.net/Articles/799218/#Access-Marking%20Policies .

The locked and unlocked access to eb::blocking_writers should be
annotated accordingly, following this:

Writes:

- locked write must use ONCE, may use plain read
- unlocked write must use ONCE

Reads:

- unlocked read must use ONCE
- locked read may use plain read iff not mixed with unlocked read
- unlocked read then locked must use ONCE

There's one difference on the assembly level, where
btrfs_tree_read_lock_atomic and btrfs_try_tree_read_lock used the cached
value and did not reevaluate it after taking the lock. This could have
missed some opportunities to take the lock in case blocking writers
changed between the calls, but the window is just a few instructions
long. As this is in try-lock, the callers handle that.

Signed-off-by: David Sterba <dsterba@suse.com>
  • Loading branch information
David Sterba committed Nov 18, 2019
1 parent 40d38f5 commit a447798
Showing 1 changed file with 21 additions and 11 deletions.
32 changes: 21 additions & 11 deletions fs/btrfs/locking.c
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
if (eb->blocking_writers == 0) {
btrfs_assert_spinning_writers_put(eb);
btrfs_assert_tree_locked(eb);
eb->blocking_writers = 1;
WRITE_ONCE(eb->blocking_writers, 1);
write_unlock(&eb->lock);
}
}
Expand Down Expand Up @@ -145,7 +145,7 @@ void btrfs_tree_read_lock(struct extent_buffer *eb)
}
read_unlock(&eb->lock);
wait_event(eb->write_lock_wq,
eb->blocking_writers == 0);
READ_ONCE(eb->blocking_writers) == 0);
goto again;
}
btrfs_assert_tree_read_locks_get(eb);
Expand All @@ -160,11 +160,12 @@ void btrfs_tree_read_lock(struct extent_buffer *eb)
*/
int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
{
if (eb->blocking_writers)
if (READ_ONCE(eb->blocking_writers))
return 0;

read_lock(&eb->lock);
if (eb->blocking_writers) {
/* Refetch value after lock */
if (READ_ONCE(eb->blocking_writers)) {
read_unlock(&eb->lock);
return 0;
}
Expand All @@ -180,13 +181,14 @@ int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
*/
int btrfs_try_tree_read_lock(struct extent_buffer *eb)
{
if (eb->blocking_writers)
if (READ_ONCE(eb->blocking_writers))
return 0;

if (!read_trylock(&eb->lock))
return 0;

if (eb->blocking_writers) {
/* Refetch value after lock */
if (READ_ONCE(eb->blocking_writers)) {
read_unlock(&eb->lock);
return 0;
}
Expand All @@ -202,11 +204,12 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb)
*/
int btrfs_try_tree_write_lock(struct extent_buffer *eb)
{
if (eb->blocking_writers || atomic_read(&eb->blocking_readers))
if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers))
return 0;

write_lock(&eb->lock);
if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) {
/* Refetch value after lock */
if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers)) {
write_unlock(&eb->lock);
return 0;
}
Expand Down Expand Up @@ -277,9 +280,11 @@ void btrfs_tree_lock(struct extent_buffer *eb)
WARN_ON(eb->lock_owner == current->pid);
again:
wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
wait_event(eb->write_lock_wq, eb->blocking_writers == 0);
wait_event(eb->write_lock_wq, READ_ONCE(eb->blocking_writers) == 0);
write_lock(&eb->lock);
if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) {
/* Refetch value after lock */
if (atomic_read(&eb->blocking_readers) ||
READ_ONCE(eb->blocking_writers)) {
write_unlock(&eb->lock);
goto again;
}
Expand All @@ -294,6 +299,10 @@ void btrfs_tree_lock(struct extent_buffer *eb)
*/
void btrfs_tree_unlock(struct extent_buffer *eb)
{
/*
* This is read both locked and unlocked but always by the same thread
* that already owns the lock so we don't need to use READ_ONCE
*/
int blockers = eb->blocking_writers;

BUG_ON(blockers > 1);
Expand All @@ -305,7 +314,8 @@ void btrfs_tree_unlock(struct extent_buffer *eb)

if (blockers) {
btrfs_assert_no_spinning_writers(eb);
eb->blocking_writers = 0;
/* Unlocked write */
WRITE_ONCE(eb->blocking_writers, 0);
/*
* We need to order modifying blocking_writers above with
* actually waking up the sleepers to ensure they see the
Expand Down

0 comments on commit a447798

Please sign in to comment.