Skip to content

Commit

Permalink
Merge tag 'block-6.13-20242901' of git://git.kernel.dk/linux
Browse files Browse the repository at this point in the history
Pull more block updates from Jens Axboe:

 - NVMe pull request via Keith:
      - Use correct srcu list traversal (Breno)
      - Scatter-gather support for metadata (Keith)
      - Fabrics shutdown race condition fix (Nilay)
      - Persistent reservations updates (Guixin)

 - Add the required bits for MD atomic write support for raid0/1/10

 - Correct return value for unknown opcode in ublk

 - Fix deadlock with zone revalidation

 - Fix for the io priority request vs bio cleanups

 - Use the correct unsigned int type for various limit helpers

 - Fix for a race in loop

 - Cleanup blk_rq_prep_clone() to prevent uninit-value warning and make
   it easier for actual humans to read

 - Fix potential UAF when iterating tags

 - A few fixes for bfq-iosched UAF issues

 - Fix for brd discard not decrementing the allocated page count

 - Various little fixes and cleanups

* tag 'block-6.13-20242901' of git://git.kernel.dk/linux: (36 commits)
  brd: decrease the number of allocated pages which discarded
  block, bfq: fix bfqq uaf in bfq_limit_depth()
  block: Don't allow an atomic write be truncated in blkdev_write_iter()
  mq-deadline: don't call req_get_ioprio from the I/O completion handler
  block: Prevent potential deadlock in blk_revalidate_disk_zones()
  block: Remove extra part pointer NULLify in blk_rq_init()
  nvme: tuning pr code by using defined structs and macros
  nvme: introduce change ptpl and iekey definition
  block: return bool from get_disk_ro and bdev_read_only
  block: remove a duplicate definition for bdev_read_only
  block: return bool from blk_rq_aligned
  block: return unsigned int from blk_lim_dma_alignment_and_pad
  block: return unsigned int from queue_dma_alignment
  block: return unsigned int from bdev_io_opt
  block: req->bio is always set in the merge code
  block: don't bother checking the data direction for merges
  block: blk-mq: fix uninit-value in blk_rq_prep_clone and refactor
  Revert "block, bfq: merge bfq_release_process_ref() into bfq_put_cooperator()"
  md/raid10: Atomic write support
  md/raid1: Atomic write support
  ...
  • Loading branch information
Linus Torvalds committed Nov 30, 2024
2 parents dd54fcc + 8273420 commit cfd4730
Show file tree
Hide file tree
Showing 27 changed files with 547 additions and 192 deletions.
1 change: 1 addition & 0 deletions block/bfq-cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -736,6 +736,7 @@ static void bfq_sync_bfqq_move(struct bfq_data *bfqd,
*/
bfq_put_cooperator(sync_bfqq);
bic_set_bfqq(bic, NULL, true, act_idx);
bfq_release_process_ref(bfqd, sync_bfqq);
}
}

Expand Down
43 changes: 28 additions & 15 deletions block/bfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -582,23 +582,31 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd,
#define BFQ_LIMIT_INLINE_DEPTH 16

#ifdef CONFIG_BFQ_GROUP_IOSCHED
static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
static bool bfqq_request_over_limit(struct bfq_data *bfqd,
struct bfq_io_cq *bic, blk_opf_t opf,
unsigned int act_idx, int limit)
{
struct bfq_data *bfqd = bfqq->bfqd;
struct bfq_entity *entity = &bfqq->entity;
struct bfq_entity *inline_entities[BFQ_LIMIT_INLINE_DEPTH];
struct bfq_entity **entities = inline_entities;
int depth, level, alloc_depth = BFQ_LIMIT_INLINE_DEPTH;
int class_idx = bfqq->ioprio_class - 1;
int alloc_depth = BFQ_LIMIT_INLINE_DEPTH;
struct bfq_sched_data *sched_data;
struct bfq_entity *entity;
struct bfq_queue *bfqq;
unsigned long wsum;
bool ret = false;

if (!entity->on_st_or_in_serv)
return false;
int depth;
int level;

retry:
spin_lock_irq(&bfqd->lock);
bfqq = bic_to_bfqq(bic, op_is_sync(opf), act_idx);
if (!bfqq)
goto out;

entity = &bfqq->entity;
if (!entity->on_st_or_in_serv)
goto out;

/* +1 for bfqq entity, root cgroup not included */
depth = bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css.cgroup->level + 1;
if (depth > alloc_depth) {
Expand Down Expand Up @@ -643,7 +651,7 @@ static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
* class.
*/
wsum = 0;
for (i = 0; i <= class_idx; i++) {
for (i = 0; i <= bfqq->ioprio_class - 1; i++) {
wsum = wsum * IOPRIO_BE_NR +
sched_data->service_tree[i].wsum;
}
Expand All @@ -666,7 +674,9 @@ static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
return ret;
}
#else
static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
static bool bfqq_request_over_limit(struct bfq_data *bfqd,
struct bfq_io_cq *bic, blk_opf_t opf,
unsigned int act_idx, int limit)
{
return false;
}
Expand Down Expand Up @@ -704,16 +714,17 @@ static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
}

for (act_idx = 0; bic && act_idx < bfqd->num_actuators; act_idx++) {
struct bfq_queue *bfqq =
bic_to_bfqq(bic, op_is_sync(opf), act_idx);
/* Fast path to check if bfqq is already allocated. */
if (!bic_to_bfqq(bic, op_is_sync(opf), act_idx))
continue;

/*
* Does queue (or any parent entity) exceed number of
* requests that should be available to it? Heavily
* limit depth so that it cannot consume more
* available requests and thus starve other entities.
*/
if (bfqq && bfqq_request_over_limit(bfqq, limit)) {
if (bfqq_request_over_limit(bfqd, bic, opf, act_idx, limit)) {
depth = 1;
break;
}
Expand Down Expand Up @@ -5434,8 +5445,6 @@ void bfq_put_cooperator(struct bfq_queue *bfqq)
bfq_put_queue(__bfqq);
__bfqq = next;
}

bfq_release_process_ref(bfqq->bfqd, bfqq);
}

static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
Expand All @@ -5448,6 +5457,8 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);

bfq_put_cooperator(bfqq);

bfq_release_process_ref(bfqd, bfqq);
}

static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync,
Expand Down Expand Up @@ -6734,6 +6745,8 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
bic_set_bfqq(bic, NULL, true, bfqq->actuator_idx);

bfq_put_cooperator(bfqq);

bfq_release_process_ref(bfqq->bfqd, bfqq);
return NULL;
}

Expand Down
35 changes: 7 additions & 28 deletions block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -864,17 +864,10 @@ static struct request *attempt_merge(struct request_queue *q,
if (req_op(req) != req_op(next))
return NULL;

if (rq_data_dir(req) != rq_data_dir(next))
if (req->bio->bi_write_hint != next->bio->bi_write_hint)
return NULL;
if (req->bio->bi_ioprio != next->bio->bi_ioprio)
return NULL;

if (req->bio && next->bio) {
/* Don't merge requests with different write hints. */
if (req->bio->bi_write_hint != next->bio->bi_write_hint)
return NULL;
if (req->bio->bi_ioprio != next->bio->bi_ioprio)
return NULL;
}

if (!blk_atomic_write_mergeable_rqs(req, next))
return NULL;

Expand Down Expand Up @@ -986,30 +979,16 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (req_op(rq) != bio_op(bio))
return false;

/* different data direction or already started, don't merge */
if (bio_data_dir(bio) != rq_data_dir(rq))
return false;

/* don't merge across cgroup boundaries */
if (!blk_cgroup_mergeable(rq, bio))
return false;

/* only merge integrity protected bio into ditto rq */
if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
return false;

/* Only merge if the crypt contexts are compatible */
if (!bio_crypt_rq_ctx_compatible(rq, bio))
return false;

if (rq->bio) {
/* Don't merge requests with different write hints. */
if (rq->bio->bi_write_hint != bio->bi_write_hint)
return false;
if (rq->bio->bi_ioprio != bio->bi_ioprio)
return false;
}

if (rq->bio->bi_write_hint != bio->bi_write_hint)
return false;
if (rq->bio->bi_ioprio != bio->bi_ioprio)
return false;
if (blk_atomic_write_mergeable_rq_bio(rq, bio) == false)
return false;

Expand Down
14 changes: 6 additions & 8 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->tag = BLK_MQ_NO_TAG;
rq->internal_tag = BLK_MQ_NO_TAG;
rq->start_time_ns = blk_time_get_ns();
rq->part = NULL;
blk_crypto_rq_set_defaults(rq);
}
EXPORT_SYMBOL(blk_rq_init);
Expand Down Expand Up @@ -3273,27 +3272,28 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
int (*bio_ctr)(struct bio *, struct bio *, void *),
void *data)
{
struct bio *bio, *bio_src;
struct bio *bio_src;

if (!bs)
bs = &fs_bio_set;

__rq_for_each_bio(bio_src, rq_src) {
bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
bs);
struct bio *bio = bio_alloc_clone(rq->q->disk->part0, bio_src,
gfp_mask, bs);
if (!bio)
goto free_and_out;

if (bio_ctr && bio_ctr(bio, bio_src, data))
if (bio_ctr && bio_ctr(bio, bio_src, data)) {
bio_put(bio);
goto free_and_out;
}

if (rq->bio) {
rq->biotail->bi_next = bio;
rq->biotail = bio;
} else {
rq->bio = rq->biotail = bio;
}
bio = NULL;
}

/* Copy attributes of the original request to the clone request. */
Expand All @@ -3311,8 +3311,6 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
return 0;

free_and_out:
if (bio)
bio_put(bio);
blk_rq_unprep_clone(rq);

return -ENOMEM;
Expand Down
Loading

0 comments on commit cfd4730

Please sign in to comment.