Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 142035
b: refs/heads/master
c: a7d878a
h: refs/heads/master
i:
  142033: 2bc013e
  142031: 5f64dda
v: v3
  • Loading branch information
Trent Piepho authored and Richard Purdie committed Apr 6, 2009
1 parent a386f95 commit 5424ba3
Show file tree
Hide file tree
Showing 17 changed files with 328 additions and 225 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1aa2a7cc6fd7b5c86681a6ae9dfd1072c261a435
refs/heads/master: a7d878af94b223013a48078e0c8c0a654c24a057
46 changes: 37 additions & 9 deletions trunk/Documentation/powerpc/dts-bindings/gpio/led.txt
Original file line number Diff line number Diff line change
@@ -1,15 +1,43 @@
LED connected to GPIO
LEDs connected to GPIO lines

Required properties:
- compatible : should be "gpio-led".
- label : (optional) the label for this LED. If omitted, the label is
- compatible : should be "gpio-leds".

Each LED is represented as a sub-node of the gpio-leds device. Each
node's name represents the name of the corresponding LED.

LED sub-node properties:
- gpios : Should specify the LED's GPIO, see "Specifying GPIO information
for devices" in Documentation/powerpc/booting-without-of.txt. Active
low LEDs should be indicated using flags in the GPIO specifier.
- label : (optional) The label for this LED. If omitted, the label is
taken from the node name (excluding the unit address).
- gpios : should specify LED GPIO.
- linux,default-trigger : (optional) This parameter, if present, is a
string defining the trigger assigned to the LED. Current triggers are:
"backlight" - LED will act as a back-light, controlled by the framebuffer
system
"default-on" - LED will turn on
"heartbeat" - LED "double" flashes at a load average based rate
"ide-disk" - LED indicates disk activity
"timer" - LED flashes at a fixed, configurable rate

Example:
Examples:

led@0 {
compatible = "gpio-led";
label = "hdd";
gpios = <&mcu_pio 0 1>;
leds {
compatible = "gpio-leds";
hdd {
label = "IDE Activity";
gpios = <&mcu_pio 0 1>; /* Active low */
linux,default-trigger = "ide-disk";
};
};

run-control {
compatible = "gpio-leds";
red {
gpios = <&mpc8572 6 0>;
};
green {
gpios = <&mpc8572 7 0>;
};
}
85 changes: 37 additions & 48 deletions trunk/block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -484,11 +484,11 @@ static int blk_init_free_list(struct request_queue *q)
{
struct request_list *rl = &q->rq;

rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
rl->count[READ] = rl->count[WRITE] = 0;
rl->starved[READ] = rl->starved[WRITE] = 0;
rl->elvpriv = 0;
init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
init_waitqueue_head(&rl->wait[READ]);
init_waitqueue_head(&rl->wait[WRITE]);

rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
mempool_free_slab, request_cachep, q->node);
Expand Down Expand Up @@ -699,37 +699,37 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
ioc->last_waited = jiffies;
}

static void __freed_request(struct request_queue *q, int sync)
static void __freed_request(struct request_queue *q, int rw)
{
struct request_list *rl = &q->rq;

if (rl->count[sync] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, sync);
if (rl->count[rw] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, rw);

if (rl->count[sync] + 1 <= q->nr_requests) {
if (waitqueue_active(&rl->wait[sync]))
wake_up(&rl->wait[sync]);
if (rl->count[rw] + 1 <= q->nr_requests) {
if (waitqueue_active(&rl->wait[rw]))
wake_up(&rl->wait[rw]);

blk_clear_queue_full(q, sync);
blk_clear_queue_full(q, rw);
}
}

/*
* A request has just been released. Account for it, update the full and
* congestion status, wake up any waiters. Called under q->queue_lock.
*/
static void freed_request(struct request_queue *q, int sync, int priv)
static void freed_request(struct request_queue *q, int rw, int priv)
{
struct request_list *rl = &q->rq;

rl->count[sync]--;
rl->count[rw]--;
if (priv)
rl->elvpriv--;

__freed_request(q, sync);
__freed_request(q, rw);

if (unlikely(rl->starved[sync ^ 1]))
__freed_request(q, sync ^ 1);
if (unlikely(rl->starved[rw ^ 1]))
__freed_request(q, rw ^ 1);
}

/*
Expand All @@ -743,25 +743,25 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
struct request *rq = NULL;
struct request_list *rl = &q->rq;
struct io_context *ioc = NULL;
const bool is_sync = rw_is_sync(rw_flags) != 0;
const int rw = rw_flags & 0x01;
int may_queue, priv;

may_queue = elv_may_queue(q, rw_flags);
if (may_queue == ELV_MQUEUE_NO)
goto rq_starved;

if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
if (rl->count[is_sync]+1 >= q->nr_requests) {
if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
if (rl->count[rw]+1 >= q->nr_requests) {
ioc = current_io_context(GFP_ATOMIC, q->node);
/*
* The queue will fill after this allocation, so set
* it as full, and mark this process as "batching".
* This process will be allowed to complete a batch of
* requests, others will be blocked.
*/
if (!blk_queue_full(q, is_sync)) {
if (!blk_queue_full(q, rw)) {
ioc_set_batching(q, ioc);
blk_set_queue_full(q, is_sync);
blk_set_queue_full(q, rw);
} else {
if (may_queue != ELV_MQUEUE_MUST
&& !ioc_batching(q, ioc)) {
Expand All @@ -774,19 +774,19 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
}
}
}
blk_set_queue_congested(q, is_sync);
blk_set_queue_congested(q, rw);
}

/*
* Only allow batching queuers to allocate up to 50% over the defined
* limit of requests, otherwise we could have thousands of requests
* allocated with any setting of ->nr_requests
*/
if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
if (rl->count[rw] >= (3 * q->nr_requests / 2))
goto out;

rl->count[is_sync]++;
rl->starved[is_sync] = 0;
rl->count[rw]++;
rl->starved[rw] = 0;

priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
if (priv)
Expand All @@ -804,7 +804,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
* wait queue, but this is pretty rare.
*/
spin_lock_irq(q->queue_lock);
freed_request(q, is_sync, priv);
freed_request(q, rw, priv);

/*
* in the very unlikely event that allocation failed and no
Expand All @@ -814,8 +814,8 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
* rq mempool into READ and WRITE
*/
rq_starved:
if (unlikely(rl->count[is_sync] == 0))
rl->starved[is_sync] = 1;
if (unlikely(rl->count[rw] == 0))
rl->starved[rw] = 1;

goto out;
}
Expand All @@ -829,7 +829,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;

trace_block_getrq(q, bio, rw_flags & 1);
trace_block_getrq(q, bio, rw);
out:
return rq;
}
Expand All @@ -843,7 +843,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
static struct request *get_request_wait(struct request_queue *q, int rw_flags,
struct bio *bio)
{
const bool is_sync = rw_is_sync(rw_flags) != 0;
const int rw = rw_flags & 0x01;
struct request *rq;

rq = get_request(q, rw_flags, bio, GFP_NOIO);
Expand All @@ -852,10 +852,10 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
struct io_context *ioc;
struct request_list *rl = &q->rq;

prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE);

trace_block_sleeprq(q, bio, rw_flags & 1);
trace_block_sleeprq(q, bio, rw);

__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock);
Expand All @@ -871,7 +871,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
ioc_set_batching(q, ioc);

spin_lock_irq(q->queue_lock);
finish_wait(&rl->wait[is_sync], &wait);
finish_wait(&rl->wait[rw], &wait);

rq = get_request(q, rw_flags, bio, GFP_NOIO);
};
Expand Down Expand Up @@ -1070,14 +1070,14 @@ void __blk_put_request(struct request_queue *q, struct request *req)
* it didn't come out of our reserved rq pools
*/
if (req->cmd_flags & REQ_ALLOCED) {
int is_sync = rq_is_sync(req) != 0;
int rw = rq_data_dir(req);
int priv = req->cmd_flags & REQ_ELVPRIV;

BUG_ON(!list_empty(&req->queuelist));
BUG_ON(!hlist_unhashed(&req->hash));

blk_free_request(q, req);
freed_request(q, is_sync, priv);
freed_request(q, rw, priv);
}
}
EXPORT_SYMBOL_GPL(__blk_put_request);
Expand Down Expand Up @@ -1128,8 +1128,6 @@ void init_request_from_bio(struct request *req, struct bio *bio)
req->cmd_flags |= REQ_UNPLUG;
if (bio_rw_meta(bio))
req->cmd_flags |= REQ_RW_META;
if (bio_noidle(bio))
req->cmd_flags |= REQ_NOIDLE;

req->errors = 0;
req->hard_sector = req->sector = bio->bi_sector;
Expand All @@ -1138,15 +1136,6 @@ void init_request_from_bio(struct request *req, struct bio *bio)
blk_rq_bio_prep(req->q, req, bio);
}

/*
* Only disabling plugging for non-rotational devices if it does tagging
* as well, otherwise we do need the proper merging
*/
static inline bool queue_should_plug(struct request_queue *q)
{
return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
}

static int __make_request(struct request_queue *q, struct bio *bio)
{
struct request *req;
Expand Down Expand Up @@ -1253,11 +1242,11 @@ static int __make_request(struct request_queue *q, struct bio *bio)
if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
bio_flagged(bio, BIO_CPU_AFFINE))
req->cpu = blk_cpu_to_group(smp_processor_id());
if (queue_should_plug(q) && elv_queue_empty(q))
if (!blk_queue_nonrot(q) && elv_queue_empty(q))
blk_plug_device(q);
add_request(q, req);
out:
if (unplug || !queue_should_plug(q))
if (unplug || blk_queue_nonrot(q))
__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock);
return 0;
Expand Down
40 changes: 20 additions & 20 deletions trunk/block/blk-sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,28 +48,28 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
q->nr_requests = nr;
blk_queue_congestion_threshold(q);

if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
blk_set_queue_congested(q, BLK_RW_SYNC);
else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, BLK_RW_SYNC);

if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
blk_set_queue_congested(q, BLK_RW_ASYNC);
else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, BLK_RW_ASYNC);

if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
blk_set_queue_full(q, BLK_RW_SYNC);
} else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) {
blk_clear_queue_full(q, BLK_RW_SYNC);
wake_up(&rl->wait[BLK_RW_SYNC]);
if (rl->count[READ] >= queue_congestion_on_threshold(q))
blk_set_queue_congested(q, READ);
else if (rl->count[READ] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, READ);

if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
blk_set_queue_congested(q, WRITE);
else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, WRITE);

if (rl->count[READ] >= q->nr_requests) {
blk_set_queue_full(q, READ);
} else if (rl->count[READ]+1 <= q->nr_requests) {
blk_clear_queue_full(q, READ);
wake_up(&rl->wait[READ]);
}

if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
blk_set_queue_full(q, BLK_RW_ASYNC);
} else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) {
blk_clear_queue_full(q, BLK_RW_ASYNC);
wake_up(&rl->wait[BLK_RW_ASYNC]);
if (rl->count[WRITE] >= q->nr_requests) {
blk_set_queue_full(q, WRITE);
} else if (rl->count[WRITE]+1 <= q->nr_requests) {
blk_clear_queue_full(q, WRITE);
wake_up(&rl->wait[WRITE]);
}
spin_unlock_irq(q->queue_lock);
return ret;
Expand Down
4 changes: 1 addition & 3 deletions trunk/block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1992,10 +1992,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
}
if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
cfq_slice_expired(cfqd, 1);
else if (sync && !rq_noidle(rq) &&
RB_EMPTY_ROOT(&cfqq->sort_list)) {
else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
cfq_arm_slice_timer(cfqd);
}
}

if (!cfqd->rq_in_driver)
Expand Down
2 changes: 1 addition & 1 deletion trunk/block/elevator.c
Original file line number Diff line number Diff line change
Expand Up @@ -677,7 +677,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
}

if (unplug_it && blk_queue_plugged(q)) {
int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
int nrq = q->rq.count[READ] + q->rq.count[WRITE]
- q->in_flight;

if (nrq >= q->unplug_thresh)
Expand Down
21 changes: 20 additions & 1 deletion trunk/drivers/leds/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,26 @@ config LEDS_GPIO
help
This option enables support for the LEDs connected to GPIO
outputs. To be useful the particular board must have LEDs
and they must be connected to the GPIO lines.
and they must be connected to the GPIO lines. The LEDs must be
defined as platform devices and/or OpenFirmware platform devices.
The code to use these bindings can be selected below.

config LEDS_GPIO_PLATFORM
bool "Platform device bindings for GPIO LEDs"
depends on LEDS_GPIO
default y
help
Let the leds-gpio driver drive LEDs which have been defined as
platform devices. If you don't know what this means, say yes.

config LEDS_GPIO_OF
bool "OpenFirmware platform device bindings for GPIO LEDs"
depends on LEDS_GPIO && OF_DEVICE
default y
help
Let the leds-gpio driver drive LEDs which have been defined as
of_platform devices. For instance, LEDs which are listed in a "dts"
file.

config LEDS_CLEVO_MAIL
tristate "Mail LED on Clevo notebook (EXPERIMENTAL)"
Expand Down
Loading

0 comments on commit 5424ba3

Please sign in to comment.