Skip to content

Commit

Permalink
Merge tag 'for-6.15-rc3-tag' of git://git.kernel.org/pub/scm/linux/ke…
Browse files Browse the repository at this point in the history
…rnel/git/kdave/linux

Pull btrfs fixes from David Sterba:

 - subpage mode fixes:
     - access correct object (folio) when looking up bit offset
     - fix assertion condition for number of blocks per folio
     - fix upper boundary of locking range in hole punch

 - zoned fixes:
     - fix potential deadlock caught by lockdep when zone reporting and
       device freeze run in parallel
     - fix zone write pointer mismatch and NULL pointer dereference when
       metadata are converted from DUP to RAID1

 - fix error handling when reloc inode creation fails

 - in tree-checker, unify error code for header level check

 - block layer: add helpers to read zone capacity

* tag 'for-6.15-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: zoned: skip reporting zone for new block group
  block: introduce zone capacity helper
  btrfs: tree-checker: adjust error code for header level check
  btrfs: fix invalid inode pointer after failure to create reloc inode
  btrfs: zoned: return EIO on RAID1 block group write pointer mismatch
  btrfs: fix the ASSERT() inside GET_SUBPAGE_BITMAP()
  btrfs: avoid page_lockend underflow in btrfs_punch_hole_lock_range()
  btrfs: subpage: access correct object when reading bitmap start in subpage_calc_start_bit()
  • Loading branch information
Linus Torvalds committed Apr 22, 2025
2 parents e4b51cb + 866bafa commit bc33723
Show file tree
Hide file tree
Showing 6 changed files with 72 additions and 31 deletions.
9 changes: 7 additions & 2 deletions fs/btrfs/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -2104,15 +2104,20 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
* will always return true.
* So here we need to do extra page alignment for
* filemap_range_has_page().
*
* And do not decrease page_lockend right now, as it can be 0.
*/
const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE);

while (1) {
truncate_pagecache_range(inode, lockstart, lockend);

lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
cached_state);
/* The same page or adjacent pages. */
if (page_lockend <= page_lockstart)
break;
/*
* We can't have ordered extents in the range, nor dirty/writeback
* pages, because we have locked the inode's VFS lock in exclusive
Expand All @@ -2124,7 +2129,7 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
* we do, unlock the range and retry.
*/
if (!filemap_range_has_page(inode->i_mapping, page_lockstart,
page_lockend))
page_lockend - 1))
break;

unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/relocation.c
Original file line number Diff line number Diff line change
Expand Up @@ -3803,7 +3803,7 @@ static noinline_for_stack struct inode *create_reloc_inode(
if (ret) {
if (inode)
iput(&inode->vfs_inode);
inode = ERR_PTR(ret);
return ERR_PTR(ret);
}
return &inode->vfs_inode;
}
Expand Down
4 changes: 2 additions & 2 deletions fs/btrfs/subpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
btrfs_blocks_per_folio(fs_info, folio); \
\
btrfs_subpage_assert(fs_info, folio, start, len); \
__start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
__start_bit = offset_in_folio(folio, start) >> fs_info->sectorsize_bits; \
__start_bit += blocks_per_folio * btrfs_bitmap_nr_##name; \
__start_bit; \
})
Expand Down Expand Up @@ -666,7 +666,7 @@ IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
btrfs_blocks_per_folio(fs_info, folio); \
const struct btrfs_subpage *subpage = folio_get_private(folio); \
\
ASSERT(blocks_per_folio < BITS_PER_LONG); \
ASSERT(blocks_per_folio <= BITS_PER_LONG); \
*dst = bitmap_read(subpage->bitmaps, \
blocks_per_folio * btrfs_bitmap_nr_##name, \
blocks_per_folio); \
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/tree-checker.c
Original file line number Diff line number Diff line change
Expand Up @@ -2235,7 +2235,7 @@ int btrfs_verify_level_key(struct extent_buffer *eb,
btrfs_err(fs_info,
"tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
eb->start, check->level, found_level);
return -EIO;
return -EUCLEAN;
}

if (!check->has_first_key)
Expand Down
19 changes: 16 additions & 3 deletions fs/btrfs/zoned.c
Original file line number Diff line number Diff line change
Expand Up @@ -1277,7 +1277,7 @@ struct zone_info {

static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
struct zone_info *info, unsigned long *active,
struct btrfs_chunk_map *map)
struct btrfs_chunk_map *map, bool new)
{
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
struct btrfs_device *device;
Expand Down Expand Up @@ -1307,6 +1307,8 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
return 0;
}

ASSERT(!new || btrfs_dev_is_empty_zone(device, info->physical));

/* This zone will be used for allocation, so mark this zone non-empty. */
btrfs_dev_clear_zone_empty(device, info->physical);

Expand All @@ -1319,6 +1321,18 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
* to determine the allocation offset within the zone.
*/
WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));

if (new) {
sector_t capacity;

capacity = bdev_zone_capacity(device->bdev, info->physical >> SECTOR_SHIFT);
up_read(&dev_replace->rwsem);
info->alloc_offset = 0;
info->capacity = capacity << SECTOR_SHIFT;

return 0;
}

nofs_flag = memalloc_nofs_save();
ret = btrfs_get_dev_zone(device, info->physical, &zone);
memalloc_nofs_restore(nofs_flag);
Expand Down Expand Up @@ -1588,7 +1602,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
}

for (i = 0; i < map->num_stripes; i++) {
ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map);
ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map, new);
if (ret)
goto out;

Expand Down Expand Up @@ -1659,7 +1673,6 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
* stripe.
*/
cache->alloc_offset = cache->zone_capacity;
ret = 0;
}

out:
Expand Down
67 changes: 45 additions & 22 deletions include/linux/blkdev.h
Original file line number Diff line number Diff line change
Expand Up @@ -712,35 +712,13 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
(q->limits.features & BLK_FEAT_ZONED);
}

#ifdef CONFIG_BLK_DEV_ZONED
static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
return disk->nr_zones;
}
bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
#else /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
return 0;
}
static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
{
return false;
}
#endif /* CONFIG_BLK_DEV_ZONED */

static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
{
if (!blk_queue_is_zoned(disk->queue))
return 0;
return sector >> ilog2(disk->queue->limits.chunk_sectors);
}

static inline unsigned int bdev_nr_zones(struct block_device *bdev)
{
return disk_nr_zones(bdev->bd_disk);
}

static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
{
return bdev->bd_disk->queue->limits.max_open_zones;
Expand Down Expand Up @@ -847,6 +825,51 @@ static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
(sb->s_blocksize_bits - SECTOR_SHIFT);
}

#ifdef CONFIG_BLK_DEV_ZONED
static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
return disk->nr_zones;
}
bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);

/**
* disk_zone_capacity - returns the zone capacity of zone containing @sector
* @disk: disk to work with
* @sector: sector number within the querying zone
*
* Returns the zone capacity of a zone containing @sector. @sector can be any
* sector in the zone.
*/
static inline unsigned int disk_zone_capacity(struct gendisk *disk,
sector_t sector)
{
sector_t zone_sectors = disk->queue->limits.chunk_sectors;

if (sector + zone_sectors >= get_capacity(disk))
return disk->last_zone_capacity;
return disk->zone_capacity;
}
static inline unsigned int bdev_zone_capacity(struct block_device *bdev,
sector_t pos)
{
return disk_zone_capacity(bdev->bd_disk, pos);
}
#else /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
return 0;
}
static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
{
return false;
}
#endif /* CONFIG_BLK_DEV_ZONED */

static inline unsigned int bdev_nr_zones(struct block_device *bdev)
{
return disk_nr_zones(bdev->bd_disk);
}

int bdev_disk_changed(struct gendisk *disk, bool invalidate);

void put_disk(struct gendisk *disk);
Expand Down

0 comments on commit bc33723

Please sign in to comment.