Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
031b814
Documentation
arch
block
certs
crypto
drivers
accessibility
acpi
amba
android
ata
atm
auxdisplay
base
bcma
block
bluetooth
bus
cdrom
char
clk
clocksource
connector
cpufreq
cpuidle
crypto
dax
dca
devfreq
dio
dma-buf
dma
edac
eisa
extcon
firewire
firmware
fmc
fpga
fsi
gpio
gpu
hid
hsi
hv
hwmon
hwspinlock
hwtracing
i2c
ide
idle
iio
infiniband
input
iommu
ipack
irqchip
isdn
leds
lightnvm
macintosh
mailbox
mcb
md
media
memory
memstick
message
mfd
misc
mmc
core
Kconfig
Makefile
block.c
block.h
bus.c
bus.h
card.h
core.c
core.h
debugfs.c
host.c
host.h
mmc.c
mmc_ops.c
mmc_ops.h
mmc_test.c
pwrseq.c
pwrseq.h
pwrseq_emmc.c
pwrseq_sd8787.c
pwrseq_simple.c
queue.c
queue.h
quirks.h
sd.c
sd.h
sd_ops.c
sd_ops.h
sdio.c
sdio_bus.c
sdio_bus.h
sdio_cis.c
sdio_cis.h
sdio_io.c
sdio_irq.c
sdio_ops.c
sdio_ops.h
sdio_uart.c
slot-gpio.c
slot-gpio.h
host
Kconfig
Makefile
mtd
mux
net
nfc
ntb
nubus
nvdimm
nvme
nvmem
of
oprofile
parisc
parport
pci
pcmcia
perf
phy
pinctrl
platform
pnp
power
powercap
pps
ps3
ptp
pwm
rapidio
ras
regulator
remoteproc
reset
rpmsg
rtc
s390
sbus
scsi
sfi
sh
sn
soc
spi
spmi
ssb
staging
target
tc
tee
thermal
thunderbolt
tty
uio
usb
uwb
vfio
vhost
video
virt
virtio
vlynq
vme
w1
watchdog
xen
zorro
Kconfig
Makefile
firmware
fs
include
init
ipc
kernel
lib
mm
net
samples
scripts
security
sound
tools
usr
virt
.cocciconfig
.get_maintainer.ignore
.gitattributes
.gitignore
.mailmap
COPYING
CREDITS
Kbuild
Kconfig
MAINTAINERS
Makefile
README
Breadcrumbs
linux
/
drivers
/
mmc
/
core
/
queue.c
Blame
Blame
Latest commit
History
History
318 lines (265 loc) · 7.44 KB
Breadcrumbs
linux
/
drivers
/
mmc
/
core
/
queue.c
Top
File metadata and controls
Code
Blame
318 lines (265 loc) · 7.44 KB
Raw
/* * Copyright (C) 2003 Russell King, All Rights Reserved. * Copyright 2006-2007 Pierre Ossman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/slab.h> #include <linux/module.h> #include <linux/blkdev.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include "queue.h" #include "block.h" #include "core.h" #include "card.h" /* * Prepare a MMC request. This just filters out odd stuff. */ static int mmc_prep_request(struct request_queue *q, struct request *req) { struct mmc_queue *mq = q->queuedata; if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) return BLKPREP_KILL; req->rq_flags |= RQF_DONTPREP; return BLKPREP_OK; } static int mmc_queue_thread(void *d) { struct mmc_queue *mq = d; struct request_queue *q = mq->queue; struct mmc_context_info *cntx = &mq->card->host->context_info; current->flags |= PF_MEMALLOC; down(&mq->thread_sem); do { struct request *req; spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); req = blk_fetch_request(q); mq->asleep = false; cntx->is_waiting_last_req = false; cntx->is_new_req = false; if (!req) { /* * Dispatch queue is empty so set flags for * mmc_request_fn() to wake us up. */ if (mq->qcnt) cntx->is_waiting_last_req = true; else mq->asleep = true; } spin_unlock_irq(q->queue_lock); if (req || mq->qcnt) { set_current_state(TASK_RUNNING); mmc_blk_issue_rq(mq, req); cond_resched(); } else { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); break; } up(&mq->thread_sem); schedule(); down(&mq->thread_sem); } } while (1); up(&mq->thread_sem); return 0; } /* * Generic MMC request handler. This is called for any queue on a * particular host. When the host is not busy, we look for a request * on any queue on this host, and attempt to issue it. This may * not be the queue we were asked to process. */ static void mmc_request_fn(struct request_queue *q) { struct mmc_queue *mq = q->queuedata; struct request *req; struct mmc_context_info *cntx; if (!mq) { while ((req = blk_fetch_request(q)) != NULL) { req->rq_flags |= RQF_QUIET; __blk_end_request_all(req, BLK_STS_IOERR); } return; } cntx = &mq->card->host->context_info; if (cntx->is_waiting_last_req) { cntx->is_new_req = true; wake_up_interruptible(&cntx->wait); } if (mq->asleep) wake_up_process(mq->thread); } static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) { struct scatterlist *sg; sg = kmalloc_array(sg_len, sizeof(*sg), gfp); if (sg) sg_init_table(sg, sg_len); return sg; } static void mmc_queue_setup_discard(struct request_queue *q, struct mmc_card *card) { unsigned max_discard; max_discard = mmc_calc_max_discard(card); if (!max_discard) return; queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); blk_queue_max_discard_sectors(q, max_discard); q->limits.discard_granularity = card->pref_erase << 9; /* granularity must not be greater than max. discard */ if (card->pref_erase > max_discard) q->limits.discard_granularity = 0; if (mmc_can_secure_erase_trim(card)) queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); } /** * mmc_init_request() - initialize the MMC-specific per-request data * @q: the request queue * @req: the request * @gfp: memory allocation policy */ static int mmc_init_request(struct request_queue *q, struct request *req, gfp_t gfp) { struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); struct mmc_queue *mq = q->queuedata; struct mmc_card *card = mq->card; struct mmc_host *host = card->host; mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); if (!mq_rq->sg) return -ENOMEM; return 0; } static void mmc_exit_request(struct request_queue *q, struct request *req) { struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); kfree(mq_rq->sg); mq_rq->sg = NULL; } /** * mmc_init_queue - initialise a queue structure. * @mq: mmc queue * @card: mmc card to attach this queue * @lock: queue lock * @subname: partition subname * * Initialise a MMC card request queue. */ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock, const char *subname) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; int ret = -ENOMEM; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; mq->card = card; mq->queue = blk_alloc_queue(GFP_KERNEL); if (!mq->queue) return -ENOMEM; mq->queue->queue_lock = lock; mq->queue->request_fn = mmc_request_fn; mq->queue->init_rq_fn = mmc_init_request; mq->queue->exit_rq_fn = mmc_exit_request; mq->queue->cmd_size = sizeof(struct mmc_queue_req); mq->queue->queuedata = mq; mq->qcnt = 0; ret = blk_init_allocated_queue(mq->queue); if (ret) { blk_cleanup_queue(mq->queue); return ret; } blk_queue_prep_rq(mq->queue, mmc_prep_request); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); if (mmc_can_erase(card)) mmc_queue_setup_discard(mq->queue, card); blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); sema_init(&mq->thread_sem, 1); mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", host->index, subname ? subname : ""); if (IS_ERR(mq->thread)) { ret = PTR_ERR(mq->thread); goto cleanup_queue; } return 0; cleanup_queue: blk_cleanup_queue(mq->queue); return ret; } void mmc_cleanup_queue(struct mmc_queue *mq) { struct request_queue *q = mq->queue; unsigned long flags; /* Make sure the queue isn't suspended, as that will deadlock */ mmc_queue_resume(mq); /* Then terminate our worker thread */ kthread_stop(mq->thread); /* Empty the queue */ spin_lock_irqsave(q->queue_lock, flags); q->queuedata = NULL; blk_start_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); mq->card = NULL; } EXPORT_SYMBOL(mmc_cleanup_queue); /** * mmc_queue_suspend - suspend a MMC request queue * @mq: MMC queue to suspend * * Stop the block request queue, and wait for our thread to * complete any outstanding requests. This ensures that we * won't suspend while a request is being processed. */ void mmc_queue_suspend(struct mmc_queue *mq) { struct request_queue *q = mq->queue; unsigned long flags; if (!mq->suspended) { mq->suspended |= true; spin_lock_irqsave(q->queue_lock, flags); blk_stop_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); down(&mq->thread_sem); } } /** * mmc_queue_resume - resume a previously suspended MMC request queue * @mq: MMC queue to resume */ void mmc_queue_resume(struct mmc_queue *mq) { struct request_queue *q = mq->queue; unsigned long flags; if (mq->suspended) { mq->suspended = false; up(&mq->thread_sem); spin_lock_irqsave(q->queue_lock, flags); blk_start_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); } } /* * Prepare the sg list(s) to be handed of to the host driver */ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) { struct request *req = mmc_queue_req_to_req(mqrq); return blk_rq_map_sg(mq->queue, req, mqrq->sg); }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
You can’t perform that action at this time.