Skip to content

Commit

Permalink
Merge branch 'for-5.10/block' into for-5.10/drivers
Browse files Browse the repository at this point in the history
* for-5.10/block: (140 commits)
  bdi: replace BDI_CAP_NO_{WRITEBACK,ACCT_DIRTY} with a single flag
  bdi: invert BDI_CAP_NO_ACCT_WB
  bdi: replace BDI_CAP_STABLE_WRITES with a queue and a sb flag
  mm: use SWP_SYNCHRONOUS_IO more intelligently
  bdi: remove BDI_CAP_SYNCHRONOUS_IO
  bdi: remove BDI_CAP_CGROUP_WRITEBACK
  block: lift setting the readahead size into the block layer
  md: update the optimal I/O size on reshape
  bdi: initialize ->ra_pages and ->io_pages in bdi_init
  aoe: set an optimal I/O size
  bcache: inherit the optimal I/O size
  drbd: remove dead code in device_to_statistics
  fs: remove the unused SB_I_MULTIROOT flag
  block: mark blkdev_get static
  PM: mm: cleanup swsusp_swap_check
  mm: split swap_type_of
  PM: rewrite is_hibernate_resume_dev to not require an inode
  mm: cleanup claim_swapfile
  ocfs2: cleanup o2hb_region_dev_store
  dasd: cleanup dasd_scan_partitions
  ...
  • Loading branch information
Jens Axboe committed Sep 24, 2020
2 parents 805c6d3 + f56753a commit ac8f7a0
Show file tree
Hide file tree
Showing 117 changed files with 2,663 additions and 2,094 deletions.
3 changes: 0 additions & 3 deletions Documentation/filesystems/locking.rst
Original file line number Diff line number Diff line change
Expand Up @@ -488,9 +488,6 @@ getgeo: no
swap_slot_free_notify: no (see below)
======================= ===================

unlock_native_capacity and revalidate_disk are called only from
check_disk_change().

swap_slot_free_notify is called with swap_lock and sometimes the page lock
held.

Expand Down
2 changes: 0 additions & 2 deletions block/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -161,8 +161,6 @@ config BLK_WBT_MQ
depends on BLK_WBT
help
Enable writeback throttling by default on multiqueue devices.
Multiqueue currently doesn't have support for IO scheduling,
enabling this option is recommended.

config BLK_DEBUG_FS
bool "Block layer debugging information in debugfs"
Expand Down
9 changes: 7 additions & 2 deletions block/bfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -4640,6 +4640,9 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
{
struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;

if (!atomic_read(&hctx->elevator_queued))
return false;

/*
* Avoiding lock: a race on bfqd->busy_queues should cause at
* most a call to dispatch for nothing
Expand Down Expand Up @@ -5554,6 +5557,7 @@ static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist);
bfq_insert_request(hctx, rq, at_head);
atomic_inc(&hctx->elevator_queued);
}
}

Expand Down Expand Up @@ -5921,6 +5925,7 @@ static void bfq_finish_requeue_request(struct request *rq)

bfq_completed_request(bfqq, bfqd);
bfq_finish_requeue_request_body(bfqq);
atomic_dec(&rq->mq_hctx->elevator_queued);

spin_unlock_irqrestore(&bfqd->lock, flags);
} else {
Expand Down Expand Up @@ -6360,8 +6365,8 @@ static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
struct blk_mq_tags *tags = hctx->sched_tags;
unsigned int min_shallow;

min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags);
sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow);
min_shallow = bfq_update_depths(bfqd, tags->bitmap_tags);
sbitmap_queue_min_shallow_depth(tags->bitmap_tags, min_shallow);
}

static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
Expand Down
32 changes: 26 additions & 6 deletions block/blk-cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -119,15 +119,24 @@ static void blkg_async_bio_workfn(struct work_struct *work)
async_bio_work);
struct bio_list bios = BIO_EMPTY_LIST;
struct bio *bio;
struct blk_plug plug;
bool need_plug = false;

/* as long as there are pending bios, @blkg can't go away */
spin_lock_bh(&blkg->async_bio_lock);
bio_list_merge(&bios, &blkg->async_bios);
bio_list_init(&blkg->async_bios);
spin_unlock_bh(&blkg->async_bio_lock);

/* start plug only when bio_list contains at least 2 bios */
if (bios.head && bios.head->bi_next) {
need_plug = true;
blk_start_plug(&plug);
}
while ((bio = bio_list_pop(&bios)))
submit_bio(bio);
if (need_plug)
blk_finish_plug(&plug);
}

/**
Expand Down Expand Up @@ -1613,16 +1622,24 @@ static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
{
unsigned long pflags;
bool clamp;
u64 now = ktime_to_ns(ktime_get());
u64 exp;
u64 delay_nsec = 0;
int tok;

while (blkg->parent) {
if (atomic_read(&blkg->use_delay)) {
int use_delay = atomic_read(&blkg->use_delay);

if (use_delay) {
u64 this_delay;

blkcg_scale_delay(blkg, now);
delay_nsec = max_t(u64, delay_nsec,
atomic64_read(&blkg->delay_nsec));
this_delay = atomic64_read(&blkg->delay_nsec);
if (this_delay > delay_nsec) {
delay_nsec = this_delay;
clamp = use_delay > 0;
}
}
blkg = blkg->parent;
}
Expand All @@ -1634,10 +1651,13 @@ static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
* Let's not sleep for all eternity if we've amassed a huge delay.
* Swapping or metadata IO can accumulate 10's of seconds worth of
* delay, and we want userspace to be able to do _something_ so cap the
* delays at 1 second. If there's 10's of seconds worth of delay then
* the tasks will be delayed for 1 second for every syscall.
* delays at 0.25s. If there's 10's of seconds worth of delay then the
* tasks will be delayed for 0.25 second for every syscall. If
* blkcg_set_delay() was used as indicated by negative use_delay, the
* caller is responsible for regulating the range.
*/
delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
if (clamp)
delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);

if (use_memdelay)
psi_memstall_enter(&pflags);
Expand Down
Loading

0 comments on commit ac8f7a0

Please sign in to comment.