Skip to content

Commit

Permalink
blk-cgroup: pass a gendisk to blkcg_schedule_throttle
Browse files Browse the repository at this point in the history
Pass the gendisk to blkcg_schedule_throttle as part of moving the
blk-cgroup infrastructure to be gendisk based.  Remove the unused
!BLK_CGROUP stub while we're at it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Andreas Herrmann <aherrmann@suse.de>
Acked-by: Tejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/r/20220921180501.1539876-17-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Christoph Hellwig authored and Jens Axboe committed Sep 27, 2022
1 parent 00ad699 commit de185b5
Show file tree
Hide file tree
Showing 5 changed files with 11 additions and 10 deletions.
8 changes: 5 additions & 3 deletions block/blk-cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -1792,13 +1792,13 @@ void blkcg_maybe_throttle_current(void)

/**
* blkcg_schedule_throttle - this task needs to check for throttling
* @q: the request queue IO was submitted on
* @gendisk: disk to throttle
* @use_memdelay: do we charge this to memory delay for PSI
*
* This is called by the IO controller when we know there's delay accumulated
* for the blkg for this task. We do not pass the blkg because there are places
* we call this that may not have that information, the swapping code for
* instance will only have a request_queue at that point. This set's the
* instance will only have a block_device at that point. This set's the
* notify_resume for the task to check and see if it requires throttling before
* returning to user space.
*
Expand All @@ -1807,8 +1807,10 @@ void blkcg_maybe_throttle_current(void)
* throttle once. If the task needs to be throttled again it'll need to be
* re-set at the next time we see the task.
*/
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay)
{
struct request_queue *q = disk->queue;

if (unlikely(current->flags & PF_KTHREAD))
return;

Expand Down
4 changes: 2 additions & 2 deletions block/blk-iocost.c
Original file line number Diff line number Diff line change
Expand Up @@ -2636,7 +2636,7 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
if (use_debt) {
iocg_incur_debt(iocg, abs_cost, &now);
if (iocg_kick_delay(iocg, &now))
blkcg_schedule_throttle(rqos->q,
blkcg_schedule_throttle(rqos->q->disk,
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
iocg_unlock(iocg, ioc_locked, &flags);
return;
Expand Down Expand Up @@ -2737,7 +2737,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
if (likely(!list_empty(&iocg->active_list))) {
iocg_incur_debt(iocg, abs_cost, &now);
if (iocg_kick_delay(iocg, &now))
blkcg_schedule_throttle(rqos->q,
blkcg_schedule_throttle(rqos->q->disk,
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
} else {
iocg_commit_bio(iocg, bio, abs_cost, cost);
Expand Down
2 changes: 1 addition & 1 deletion block/blk-iolatency.c
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);

if (use_delay)
blkcg_schedule_throttle(rqos->q, use_memdelay);
blkcg_schedule_throttle(rqos->q->disk, use_memdelay);

/*
* To avoid priority inversions we want to just take a slot if we are
Expand Down
5 changes: 2 additions & 3 deletions include/linux/blk-cgroup.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,14 @@

struct bio;
struct cgroup_subsys_state;
struct request_queue;
struct gendisk;

#define FC_APPID_LEN 129

#ifdef CONFIG_BLK_CGROUP
extern struct cgroup_subsys_state * const blkcg_root_css;

void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay);
void blkcg_maybe_throttle_current(void);
bool blk_cgroup_congested(void);
void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css);
Expand All @@ -39,7 +39,6 @@ struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio);

static inline void blkcg_maybe_throttle_current(void) { }
static inline bool blk_cgroup_congested(void) { return false; }
static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
static inline struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
{
return NULL;
Expand Down
2 changes: 1 addition & 1 deletion mm/swapfile.c
Original file line number Diff line number Diff line change
Expand Up @@ -3655,7 +3655,7 @@ void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
avail_lists[nid]) {
if (si->bdev) {
blkcg_schedule_throttle(bdev_get_queue(si->bdev), true);
blkcg_schedule_throttle(si->bdev->bd_disk, true);
break;
}
}
Expand Down

0 comments on commit de185b5

Please sign in to comment.