Skip to content

Commit

Permalink
writeback: remove bdi->congested_fn
Browse files Browse the repository at this point in the history
Except for pktdvd, the only places setting congested bits are file
systems that allocate their own backing_dev_info structures.  And
pktdvd is a deprecated driver that isn't useful in stack setup
either.  So remove the dead congested_fn stacking infrastructure.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Song Liu <song@kernel.org>
Acked-by: David Sterba <dsterba@suse.com>
[axboe: fixup unused variables in bcache/request.c]
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Christoph Hellwig authored and Jens Axboe committed Jul 8, 2020
1 parent 8c911f3 commit 21cf866
Show file tree
Hide file tree
Showing 23 changed files with 1 addition and 468 deletions.
59 changes: 0 additions & 59 deletions drivers/block/drbd/drbd_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -2415,62 +2415,6 @@ static void drbd_cleanup(void)
pr_info("module cleanup done.\n");
}

/**
* drbd_congested() - Callback for the flusher thread
* @congested_data: User data
* @bdi_bits: Bits the BDI flusher thread is currently interested in
*
* Returns 1<<WB_async_congested and/or 1<<WB_sync_congested if we are congested.
*/
static int drbd_congested(void *congested_data, int bdi_bits)
{
struct drbd_device *device = congested_data;
struct request_queue *q;
char reason = '-';
int r = 0;

if (!may_inc_ap_bio(device)) {
/* DRBD has frozen IO */
r = bdi_bits;
reason = 'd';
goto out;
}

if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) {
r |= (1 << WB_async_congested);
/* Without good local data, we would need to read from remote,
* and that would need the worker thread as well, which is
* currently blocked waiting for that usermode helper to
* finish.
*/
if (!get_ldev_if_state(device, D_UP_TO_DATE))
r |= (1 << WB_sync_congested);
else
put_ldev(device);
r &= bdi_bits;
reason = 'c';
goto out;
}

if (get_ldev(device)) {
q = bdev_get_queue(device->ldev->backing_bdev);
r = bdi_congested(q->backing_dev_info, bdi_bits);
put_ldev(device);
if (r)
reason = 'b';
}

if (bdi_bits & (1 << WB_async_congested) &&
test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) {
r |= (1 << WB_async_congested);
reason = reason == 'b' ? 'a' : 'n';
}

out:
device->congestion_reason = reason;
return r;
}

static void drbd_init_workqueue(struct drbd_work_queue* wq)
{
spin_lock_init(&wq->q_lock);
Expand Down Expand Up @@ -2825,9 +2769,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
/* we have no partitions. we contain only ourselves. */
device->this_bdev->bd_contains = device->this_bdev;

q->backing_dev_info->congested_fn = drbd_congested;
q->backing_dev_info->congested_data = device;

blk_queue_write_cache(q, true, true);
/* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */
Expand Down
47 changes: 0 additions & 47 deletions drivers/md/bcache/request.c
Original file line number Diff line number Diff line change
Expand Up @@ -1228,36 +1228,8 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
}

static int cached_dev_congested(void *data, int bits)
{
struct bcache_device *d = data;
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
struct request_queue *q = bdev_get_queue(dc->bdev);
int ret = 0;

if (bdi_congested(q->backing_dev_info, bits))
return 1;

if (cached_dev_get(dc)) {
unsigned int i;
struct cache *ca;

for_each_cache(ca, d->c, i) {
q = bdev_get_queue(ca->bdev);
ret |= bdi_congested(q->backing_dev_info, bits);
}

cached_dev_put(dc);
}

return ret;
}

void bch_cached_dev_request_init(struct cached_dev *dc)
{
struct gendisk *g = dc->disk.disk;

g->queue->backing_dev_info->congested_fn = cached_dev_congested;
dc->disk.cache_miss = cached_dev_cache_miss;
dc->disk.ioctl = cached_dev_ioctl;
}
Expand Down Expand Up @@ -1341,27 +1313,8 @@ static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
return -ENOTTY;
}

static int flash_dev_congested(void *data, int bits)
{
struct bcache_device *d = data;
struct request_queue *q;
struct cache *ca;
unsigned int i;
int ret = 0;

for_each_cache(ca, d->c, i) {
q = bdev_get_queue(ca->bdev);
ret |= bdi_congested(q->backing_dev_info, bits);
}

return ret;
}

void bch_flash_dev_request_init(struct bcache_device *d)
{
struct gendisk *g = d->disk;

g->queue->backing_dev_info->congested_fn = flash_dev_congested;
d->cache_miss = flash_dev_cache_miss;
d->ioctl = flash_dev_ioctl;
}
Expand Down
1 change: 0 additions & 1 deletion drivers/md/bcache/super.c
Original file line number Diff line number Diff line change
Expand Up @@ -885,7 +885,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
return -ENOMEM;

d->disk->queue = q;
q->backing_dev_info->congested_data = d;
q->limits.max_hw_sectors = UINT_MAX;
q->limits.max_sectors = UINT_MAX;
q->limits.max_segment_size = UINT_MAX;
Expand Down
19 changes: 0 additions & 19 deletions drivers/md/dm-cache-target.c
Original file line number Diff line number Diff line change
Expand Up @@ -421,8 +421,6 @@ struct cache {

struct rw_semaphore quiesce_lock;

struct dm_target_callbacks callbacks;

/*
* origin_blocks entries, discarded if set.
*/
Expand Down Expand Up @@ -2423,20 +2421,6 @@ static void set_cache_size(struct cache *cache, dm_cblock_t size)
cache->cache_size = size;
}

static int is_congested(struct dm_dev *dev, int bdi_bits)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return bdi_congested(q->backing_dev_info, bdi_bits);
}

static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
{
struct cache *cache = container_of(cb, struct cache, callbacks);

return is_congested(cache->origin_dev, bdi_bits) ||
is_congested(cache->cache_dev, bdi_bits);
}

#define DEFAULT_MIGRATION_THRESHOLD 2048

static int cache_create(struct cache_args *ca, struct cache **result)
Expand Down Expand Up @@ -2471,9 +2455,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
goto bad;
}

cache->callbacks.congested_fn = cache_is_congested;
dm_table_add_target_callbacks(ti->table, &cache->callbacks);

cache->metadata_dev = ca->metadata_dev;
cache->origin_dev = ca->origin_dev;
cache->cache_dev = ca->cache_dev;
Expand Down
15 changes: 0 additions & 15 deletions drivers/md/dm-clone-target.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ struct hash_table_bucket;

struct clone {
struct dm_target *ti;
struct dm_target_callbacks callbacks;

struct dm_dev *metadata_dev;
struct dm_dev *dest_dev;
Expand Down Expand Up @@ -1518,18 +1517,6 @@ static void clone_status(struct dm_target *ti, status_type_t type,
DMEMIT("Error");
}

static int clone_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
{
struct request_queue *dest_q, *source_q;
struct clone *clone = container_of(cb, struct clone, callbacks);

source_q = bdev_get_queue(clone->source_dev->bdev);
dest_q = bdev_get_queue(clone->dest_dev->bdev);

return (bdi_congested(dest_q->backing_dev_info, bdi_bits) |
bdi_congested(source_q->backing_dev_info, bdi_bits));
}

static sector_t get_dev_size(struct dm_dev *dev)
{
return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
Expand Down Expand Up @@ -1930,8 +1917,6 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto out_with_mempool;

mutex_init(&clone->commit_lock);
clone->callbacks.congested_fn = clone_is_congested;
dm_table_add_target_callbacks(ti->table, &clone->callbacks);

/* Enable flushes */
ti->num_flush_bios = 1;
Expand Down
15 changes: 0 additions & 15 deletions drivers/md/dm-era-target.c
Original file line number Diff line number Diff line change
Expand Up @@ -1137,7 +1137,6 @@ static int metadata_get_stats(struct era_metadata *md, void *ptr)

struct era {
struct dm_target *ti;
struct dm_target_callbacks callbacks;

struct dm_dev *metadata_dev;
struct dm_dev *origin_dev;
Expand Down Expand Up @@ -1375,18 +1374,6 @@ static void stop_worker(struct era *era)
/*----------------------------------------------------------------
* Target methods
*--------------------------------------------------------------*/
static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return bdi_congested(q->backing_dev_info, bdi_bits);
}

static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
{
struct era *era = container_of(cb, struct era, callbacks);
return dev_is_congested(era->origin_dev, bdi_bits);
}

static void era_destroy(struct era *era)
{
if (era->md)
Expand Down Expand Up @@ -1514,8 +1501,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->flush_supported = true;

ti->num_discard_bios = 1;
era->callbacks.congested_fn = era_is_congested;
dm_table_add_target_callbacks(ti->table, &era->callbacks);

return 0;
}
Expand Down
12 changes: 0 additions & 12 deletions drivers/md/dm-raid.c
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,6 @@ struct raid_set {

struct mddev md;
struct raid_type *raid_type;
struct dm_target_callbacks callbacks;

sector_t array_sectors;
sector_t dev_sectors;
Expand Down Expand Up @@ -1705,13 +1704,6 @@ static void do_table_event(struct work_struct *ws)
dm_table_event(rs->ti->table);
}

static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
{
struct raid_set *rs = container_of(cb, struct raid_set, callbacks);

return mddev_congested(&rs->md, bits);
}

/*
* Make sure a valid takover (level switch) is being requested on @rs
*
Expand Down Expand Up @@ -3248,9 +3240,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_md_start;
}

rs->callbacks.congested_fn = raid_is_congested;
dm_table_add_target_callbacks(ti->table, &rs->callbacks);

/* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
Expand Down Expand Up @@ -3310,7 +3299,6 @@ static void raid_dtr(struct dm_target *ti)
{
struct raid_set *rs = ti->private;

list_del_init(&rs->callbacks.list);
md_stop(&rs->md);
raid_set_free(rs);
}
Expand Down
37 changes: 1 addition & 36 deletions drivers/md/dm-table.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,6 @@ struct dm_table {
void *event_context;

struct dm_md_mempools *mempools;

struct list_head target_callbacks;
};

/*
Expand Down Expand Up @@ -190,7 +188,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
return -ENOMEM;

INIT_LIST_HEAD(&t->devices);
INIT_LIST_HEAD(&t->target_callbacks);

if (!num_targets)
num_targets = KEYS_PER_NODE;
Expand Down Expand Up @@ -361,7 +358,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
* This upgrades the mode on an already open dm_dev, being
* careful to leave things as they were if we fail to reopen the
* device and not to touch the existing bdev field in case
* it is accessed concurrently inside dm_table_any_congested().
* it is accessed concurrently.
*/
static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
struct mapped_device *md)
Expand Down Expand Up @@ -2052,38 +2049,6 @@ int dm_table_resume_targets(struct dm_table *t)
return 0;
}

void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
{
list_add(&cb->list, &t->target_callbacks);
}
EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);

int dm_table_any_congested(struct dm_table *t, int bdi_bits)
{
struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
struct dm_target_callbacks *cb;
int r = 0;

list_for_each_entry(dd, devices, list) {
struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
char b[BDEVNAME_SIZE];

if (likely(q))
r |= bdi_congested(q->backing_dev_info, bdi_bits);
else
DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
dm_device_name(t->md),
bdevname(dd->dm_dev->bdev, b));
}

list_for_each_entry(cb, &t->target_callbacks, list)
if (cb->congested_fn)
r |= cb->congested_fn(cb, bdi_bits);

return r;
}

struct mapped_device *dm_table_get_md(struct dm_table *t)
{
return t->md;
Expand Down
16 changes: 0 additions & 16 deletions drivers/md/dm-thin.c
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,6 @@ struct pool_c {
struct pool *pool;
struct dm_dev *data_dev;
struct dm_dev *metadata_dev;
struct dm_target_callbacks callbacks;

dm_block_t low_water_blocks;
struct pool_features requested_pf; /* Features requested during table load */
Expand Down Expand Up @@ -2796,18 +2795,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
}
}

static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
{
struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
struct request_queue *q;

if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
return 1;

q = bdev_get_queue(pt->data_dev->bdev);
return bdi_congested(q->backing_dev_info, bdi_bits);
}

static void requeue_bios(struct pool *pool)
{
struct thin_c *tc;
Expand Down Expand Up @@ -3420,9 +3407,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
dm_pool_register_pre_commit_callback(pool->pmd,
metadata_pre_commit_callback, pool);

pt->callbacks.congested_fn = pool_is_congested;
dm_table_add_target_callbacks(ti->table, &pt->callbacks);

mutex_unlock(&dm_thin_pool_table.mutex);

return 0;
Expand Down
Loading

0 comments on commit 21cf866

Please sign in to comment.