From 5424ba35b9b6000cea7190969aabd4970986b1e5 Mon Sep 17 00:00:00 2001 From: Trent Piepho Date: Sat, 10 Jan 2009 17:26:01 +0000 Subject: [PATCH] --- yaml --- r: 142035 b: refs/heads/master c: a7d878af94b223013a48078e0c8c0a654c24a057 h: refs/heads/master i: 142033: 2bc013e1e65928e27bb833f2c2c4258e2230a732 142031: 5f64dda832d5aadf16da45d0f58c204ad08753d2 v: v3 --- [refs] | 2 +- .../powerpc/dts-bindings/gpio/led.txt | 46 +++- trunk/block/blk-core.c | 85 ++++---- trunk/block/blk-sysfs.c | 40 ++-- trunk/block/cfq-iosched.c | 4 +- trunk/block/elevator.c | 2 +- trunk/drivers/leds/Kconfig | 21 +- trunk/drivers/leds/leds-gpio.c | 205 ++++++++++++++---- trunk/fs/buffer.c | 22 +- trunk/fs/direct-io.c | 2 +- trunk/fs/jbd/commit.c | 7 +- trunk/fs/jbd2/commit.c | 13 +- trunk/include/linux/backing-dev.h | 12 +- trunk/include/linux/bio.h | 19 +- trunk/include/linux/blkdev.h | 55 ++--- trunk/include/linux/fs.h | 8 +- trunk/mm/backing-dev.c | 10 +- 17 files changed, 328 insertions(+), 225 deletions(-) diff --git a/[refs] b/[refs] index c99aa4f2f6fa..4294bd40e359 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 1aa2a7cc6fd7b5c86681a6ae9dfd1072c261a435 +refs/heads/master: a7d878af94b223013a48078e0c8c0a654c24a057 diff --git a/trunk/Documentation/powerpc/dts-bindings/gpio/led.txt b/trunk/Documentation/powerpc/dts-bindings/gpio/led.txt index ff51f4c0fa9d..4fe14deedc0a 100644 --- a/trunk/Documentation/powerpc/dts-bindings/gpio/led.txt +++ b/trunk/Documentation/powerpc/dts-bindings/gpio/led.txt @@ -1,15 +1,43 @@ -LED connected to GPIO +LEDs connected to GPIO lines Required properties: -- compatible : should be "gpio-led". -- label : (optional) the label for this LED. If omitted, the label is +- compatible : should be "gpio-leds". + +Each LED is represented as a sub-node of the gpio-leds device. Each +node's name represents the name of the corresponding LED. + +LED sub-node properties: +- gpios : Should specify the LED's GPIO, see "Specifying GPIO information + for devices" in Documentation/powerpc/booting-without-of.txt. Active + low LEDs should be indicated using flags in the GPIO specifier. +- label : (optional) The label for this LED. If omitted, the label is taken from the node name (excluding the unit address). -- gpios : should specify LED GPIO. +- linux,default-trigger : (optional) This parameter, if present, is a + string defining the trigger assigned to the LED. Current triggers are: + "backlight" - LED will act as a back-light, controlled by the framebuffer + system + "default-on" - LED will turn on + "heartbeat" - LED "double" flashes at a load average based rate + "ide-disk" - LED indicates disk activity + "timer" - LED flashes at a fixed, configurable rate -Example: +Examples: -led@0 { - compatible = "gpio-led"; - label = "hdd"; - gpios = <&mcu_pio 0 1>; +leds { + compatible = "gpio-leds"; + hdd { + label = "IDE Activity"; + gpios = <&mcu_pio 0 1>; /* Active low */ + linux,default-trigger = "ide-disk"; + }; }; + +run-control { + compatible = "gpio-leds"; + red { + gpios = <&mpc8572 6 0>; + }; + green { + gpios = <&mpc8572 7 0>; + }; +} diff --git a/trunk/block/blk-core.c b/trunk/block/blk-core.c index 25572802dac2..996ed906d8ca 100644 --- a/trunk/block/blk-core.c +++ b/trunk/block/blk-core.c @@ -484,11 +484,11 @@ static int blk_init_free_list(struct request_queue *q) { struct request_list *rl = &q->rq; - rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; - rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; + rl->count[READ] = rl->count[WRITE] = 0; + rl->starved[READ] = rl->starved[WRITE] = 0; rl->elvpriv = 0; - init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); - init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); + init_waitqueue_head(&rl->wait[READ]); + init_waitqueue_head(&rl->wait[WRITE]); rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, request_cachep, q->node); @@ -699,18 +699,18 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) ioc->last_waited = jiffies; } -static void __freed_request(struct request_queue *q, int sync) +static void __freed_request(struct request_queue *q, int rw) { struct request_list *rl = &q->rq; - if (rl->count[sync] < queue_congestion_off_threshold(q)) - blk_clear_queue_congested(q, sync); + if (rl->count[rw] < queue_congestion_off_threshold(q)) + blk_clear_queue_congested(q, rw); - if (rl->count[sync] + 1 <= q->nr_requests) { - if (waitqueue_active(&rl->wait[sync])) - wake_up(&rl->wait[sync]); + if (rl->count[rw] + 1 <= q->nr_requests) { + if (waitqueue_active(&rl->wait[rw])) + wake_up(&rl->wait[rw]); - blk_clear_queue_full(q, sync); + blk_clear_queue_full(q, rw); } } @@ -718,18 +718,18 @@ static void __freed_request(struct request_queue *q, int sync) * A request has just been released. Account for it, update the full and * congestion status, wake up any waiters. Called under q->queue_lock. */ -static void freed_request(struct request_queue *q, int sync, int priv) +static void freed_request(struct request_queue *q, int rw, int priv) { struct request_list *rl = &q->rq; - rl->count[sync]--; + rl->count[rw]--; if (priv) rl->elvpriv--; - __freed_request(q, sync); + __freed_request(q, rw); - if (unlikely(rl->starved[sync ^ 1])) - __freed_request(q, sync ^ 1); + if (unlikely(rl->starved[rw ^ 1])) + __freed_request(q, rw ^ 1); } /* @@ -743,15 +743,15 @@ static struct request *get_request(struct request_queue *q, int rw_flags, struct request *rq = NULL; struct request_list *rl = &q->rq; struct io_context *ioc = NULL; - const bool is_sync = rw_is_sync(rw_flags) != 0; + const int rw = rw_flags & 0x01; int may_queue, priv; may_queue = elv_may_queue(q, rw_flags); if (may_queue == ELV_MQUEUE_NO) goto rq_starved; - if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { - if (rl->count[is_sync]+1 >= q->nr_requests) { + if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) { + if (rl->count[rw]+1 >= q->nr_requests) { ioc = current_io_context(GFP_ATOMIC, q->node); /* * The queue will fill after this allocation, so set @@ -759,9 +759,9 @@ static struct request *get_request(struct request_queue *q, int rw_flags, * This process will be allowed to complete a batch of * requests, others will be blocked. */ - if (!blk_queue_full(q, is_sync)) { + if (!blk_queue_full(q, rw)) { ioc_set_batching(q, ioc); - blk_set_queue_full(q, is_sync); + blk_set_queue_full(q, rw); } else { if (may_queue != ELV_MQUEUE_MUST && !ioc_batching(q, ioc)) { @@ -774,7 +774,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, } } } - blk_set_queue_congested(q, is_sync); + blk_set_queue_congested(q, rw); } /* @@ -782,11 +782,11 @@ static struct request *get_request(struct request_queue *q, int rw_flags, * limit of requests, otherwise we could have thousands of requests * allocated with any setting of ->nr_requests */ - if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) + if (rl->count[rw] >= (3 * q->nr_requests / 2)) goto out; - rl->count[is_sync]++; - rl->starved[is_sync] = 0; + rl->count[rw]++; + rl->starved[rw] = 0; priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); if (priv) @@ -804,7 +804,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, * wait queue, but this is pretty rare. */ spin_lock_irq(q->queue_lock); - freed_request(q, is_sync, priv); + freed_request(q, rw, priv); /* * in the very unlikely event that allocation failed and no @@ -814,8 +814,8 @@ static struct request *get_request(struct request_queue *q, int rw_flags, * rq mempool into READ and WRITE */ rq_starved: - if (unlikely(rl->count[is_sync] == 0)) - rl->starved[is_sync] = 1; + if (unlikely(rl->count[rw] == 0)) + rl->starved[rw] = 1; goto out; } @@ -829,7 +829,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, if (ioc_batching(q, ioc)) ioc->nr_batch_requests--; - trace_block_getrq(q, bio, rw_flags & 1); + trace_block_getrq(q, bio, rw); out: return rq; } @@ -843,7 +843,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, static struct request *get_request_wait(struct request_queue *q, int rw_flags, struct bio *bio) { - const bool is_sync = rw_is_sync(rw_flags) != 0; + const int rw = rw_flags & 0x01; struct request *rq; rq = get_request(q, rw_flags, bio, GFP_NOIO); @@ -852,10 +852,10 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, struct io_context *ioc; struct request_list *rl = &q->rq; - prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, + prepare_to_wait_exclusive(&rl->wait[rw], &wait, TASK_UNINTERRUPTIBLE); - trace_block_sleeprq(q, bio, rw_flags & 1); + trace_block_sleeprq(q, bio, rw); __generic_unplug_device(q); spin_unlock_irq(q->queue_lock); @@ -871,7 +871,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, ioc_set_batching(q, ioc); spin_lock_irq(q->queue_lock); - finish_wait(&rl->wait[is_sync], &wait); + finish_wait(&rl->wait[rw], &wait); rq = get_request(q, rw_flags, bio, GFP_NOIO); }; @@ -1070,14 +1070,14 @@ void __blk_put_request(struct request_queue *q, struct request *req) * it didn't come out of our reserved rq pools */ if (req->cmd_flags & REQ_ALLOCED) { - int is_sync = rq_is_sync(req) != 0; + int rw = rq_data_dir(req); int priv = req->cmd_flags & REQ_ELVPRIV; BUG_ON(!list_empty(&req->queuelist)); BUG_ON(!hlist_unhashed(&req->hash)); blk_free_request(q, req); - freed_request(q, is_sync, priv); + freed_request(q, rw, priv); } } EXPORT_SYMBOL_GPL(__blk_put_request); @@ -1128,8 +1128,6 @@ void init_request_from_bio(struct request *req, struct bio *bio) req->cmd_flags |= REQ_UNPLUG; if (bio_rw_meta(bio)) req->cmd_flags |= REQ_RW_META; - if (bio_noidle(bio)) - req->cmd_flags |= REQ_NOIDLE; req->errors = 0; req->hard_sector = req->sector = bio->bi_sector; @@ -1138,15 +1136,6 @@ void init_request_from_bio(struct request *req, struct bio *bio) blk_rq_bio_prep(req->q, req, bio); } -/* - * Only disabling plugging for non-rotational devices if it does tagging - * as well, otherwise we do need the proper merging - */ -static inline bool queue_should_plug(struct request_queue *q) -{ - return !(blk_queue_nonrot(q) && blk_queue_tagged(q)); -} - static int __make_request(struct request_queue *q, struct bio *bio) { struct request *req; @@ -1253,11 +1242,11 @@ static int __make_request(struct request_queue *q, struct bio *bio) if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || bio_flagged(bio, BIO_CPU_AFFINE)) req->cpu = blk_cpu_to_group(smp_processor_id()); - if (queue_should_plug(q) && elv_queue_empty(q)) + if (!blk_queue_nonrot(q) && elv_queue_empty(q)) blk_plug_device(q); add_request(q, req); out: - if (unplug || !queue_should_plug(q)) + if (unplug || blk_queue_nonrot(q)) __generic_unplug_device(q); spin_unlock_irq(q->queue_lock); return 0; diff --git a/trunk/block/blk-sysfs.c b/trunk/block/blk-sysfs.c index 3ff9bba3379a..e29ddfc73cf4 100644 --- a/trunk/block/blk-sysfs.c +++ b/trunk/block/blk-sysfs.c @@ -48,28 +48,28 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) q->nr_requests = nr; blk_queue_congestion_threshold(q); - if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) - blk_set_queue_congested(q, BLK_RW_SYNC); - else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) - blk_clear_queue_congested(q, BLK_RW_SYNC); - - if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) - blk_set_queue_congested(q, BLK_RW_ASYNC); - else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) - blk_clear_queue_congested(q, BLK_RW_ASYNC); - - if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { - blk_set_queue_full(q, BLK_RW_SYNC); - } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) { - blk_clear_queue_full(q, BLK_RW_SYNC); - wake_up(&rl->wait[BLK_RW_SYNC]); + if (rl->count[READ] >= queue_congestion_on_threshold(q)) + blk_set_queue_congested(q, READ); + else if (rl->count[READ] < queue_congestion_off_threshold(q)) + blk_clear_queue_congested(q, READ); + + if (rl->count[WRITE] >= queue_congestion_on_threshold(q)) + blk_set_queue_congested(q, WRITE); + else if (rl->count[WRITE] < queue_congestion_off_threshold(q)) + blk_clear_queue_congested(q, WRITE); + + if (rl->count[READ] >= q->nr_requests) { + blk_set_queue_full(q, READ); + } else if (rl->count[READ]+1 <= q->nr_requests) { + blk_clear_queue_full(q, READ); + wake_up(&rl->wait[READ]); } - if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { - blk_set_queue_full(q, BLK_RW_ASYNC); - } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) { - blk_clear_queue_full(q, BLK_RW_ASYNC); - wake_up(&rl->wait[BLK_RW_ASYNC]); + if (rl->count[WRITE] >= q->nr_requests) { + blk_set_queue_full(q, WRITE); + } else if (rl->count[WRITE]+1 <= q->nr_requests) { + blk_clear_queue_full(q, WRITE); + wake_up(&rl->wait[WRITE]); } spin_unlock_irq(q->queue_lock); return ret; diff --git a/trunk/block/cfq-iosched.c b/trunk/block/cfq-iosched.c index 9e809345f71a..664ebfd092ec 100644 --- a/trunk/block/cfq-iosched.c +++ b/trunk/block/cfq-iosched.c @@ -1992,10 +1992,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) } if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) cfq_slice_expired(cfqd, 1); - else if (sync && !rq_noidle(rq) && - RB_EMPTY_ROOT(&cfqq->sort_list)) { + else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) cfq_arm_slice_timer(cfqd); - } } if (!cfqd->rq_in_driver) diff --git a/trunk/block/elevator.c b/trunk/block/elevator.c index ca6788a0195a..98259eda0ef6 100644 --- a/trunk/block/elevator.c +++ b/trunk/block/elevator.c @@ -677,7 +677,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) } if (unplug_it && blk_queue_plugged(q)) { - int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] + int nrq = q->rq.count[READ] + q->rq.count[WRITE] - q->in_flight; if (nrq >= q->unplug_thresh) diff --git a/trunk/drivers/leds/Kconfig b/trunk/drivers/leds/Kconfig index d9db17624f12..90d39e5803cd 100644 --- a/trunk/drivers/leds/Kconfig +++ b/trunk/drivers/leds/Kconfig @@ -117,7 +117,26 @@ config LEDS_GPIO help This option enables support for the LEDs connected to GPIO outputs. To be useful the particular board must have LEDs - and they must be connected to the GPIO lines. + and they must be connected to the GPIO lines. The LEDs must be + defined as platform devices and/or OpenFirmware platform devices. + The code to use these bindings can be selected below. + +config LEDS_GPIO_PLATFORM + bool "Platform device bindings for GPIO LEDs" + depends on LEDS_GPIO + default y + help + Let the leds-gpio driver drive LEDs which have been defined as + platform devices. If you don't know what this means, say yes. + +config LEDS_GPIO_OF + bool "OpenFirmware platform device bindings for GPIO LEDs" + depends on LEDS_GPIO && OF_DEVICE + default y + help + Let the leds-gpio driver drive LEDs which have been defined as + of_platform devices. For instance, LEDs which are listed in a "dts" + file. config LEDS_CLEVO_MAIL tristate "Mail LED on Clevo notebook (EXPERIMENTAL)" diff --git a/trunk/drivers/leds/leds-gpio.c b/trunk/drivers/leds/leds-gpio.c index 2e3df08b649b..f8bcf98fc15c 100644 --- a/trunk/drivers/leds/leds-gpio.c +++ b/trunk/drivers/leds/leds-gpio.c @@ -3,6 +3,7 @@ * * Copyright (C) 2007 8D Technologies inc. * Raphael Assenat + * Copyright (C) 2008 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -71,11 +72,57 @@ static int gpio_blink_set(struct led_classdev *led_cdev, return led_dat->platform_gpio_blink_set(led_dat->gpio, delay_on, delay_off); } +static int __devinit create_gpio_led(const struct gpio_led *template, + struct gpio_led_data *led_dat, struct device *parent, + int (*blink_set)(unsigned, unsigned long *, unsigned long *)) +{ + int ret; + + ret = gpio_request(template->gpio, template->name); + if (ret < 0) + return ret; + + led_dat->cdev.name = template->name; + led_dat->cdev.default_trigger = template->default_trigger; + led_dat->gpio = template->gpio; + led_dat->can_sleep = gpio_cansleep(template->gpio); + led_dat->active_low = template->active_low; + if (blink_set) { + led_dat->platform_gpio_blink_set = blink_set; + led_dat->cdev.blink_set = gpio_blink_set; + } + led_dat->cdev.brightness_set = gpio_led_set; + led_dat->cdev.brightness = LED_OFF; + led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; + + ret = gpio_direction_output(led_dat->gpio, led_dat->active_low); + if (ret < 0) + goto err; + + INIT_WORK(&led_dat->work, gpio_led_work); + + ret = led_classdev_register(parent, &led_dat->cdev); + if (ret < 0) + goto err; + + return 0; +err: + gpio_free(led_dat->gpio); + return ret; +} + +static void delete_gpio_led(struct gpio_led_data *led) +{ + led_classdev_unregister(&led->cdev); + cancel_work_sync(&led->work); + gpio_free(led->gpio); +} + +#ifdef CONFIG_LEDS_GPIO_PLATFORM static int gpio_led_probe(struct platform_device *pdev) { struct gpio_led_platform_data *pdata = pdev->dev.platform_data; - struct gpio_led *cur_led; - struct gpio_led_data *leds_data, *led_dat; + struct gpio_led_data *leds_data; int i, ret = 0; if (!pdata) @@ -87,35 +134,10 @@ static int gpio_led_probe(struct platform_device *pdev) return -ENOMEM; for (i = 0; i < pdata->num_leds; i++) { - cur_led = &pdata->leds[i]; - led_dat = &leds_data[i]; - - ret = gpio_request(cur_led->gpio, cur_led->name); + ret = create_gpio_led(&pdata->leds[i], &leds_data[i], + &pdev->dev, pdata->gpio_blink_set); if (ret < 0) goto err; - - led_dat->cdev.name = cur_led->name; - led_dat->cdev.default_trigger = cur_led->default_trigger; - led_dat->gpio = cur_led->gpio; - led_dat->can_sleep = gpio_cansleep(cur_led->gpio); - led_dat->active_low = cur_led->active_low; - if (pdata->gpio_blink_set) { - led_dat->platform_gpio_blink_set = pdata->gpio_blink_set; - led_dat->cdev.blink_set = gpio_blink_set; - } - led_dat->cdev.brightness_set = gpio_led_set; - led_dat->cdev.brightness = LED_OFF; - led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; - - gpio_direction_output(led_dat->gpio, led_dat->active_low); - - INIT_WORK(&led_dat->work, gpio_led_work); - - ret = led_classdev_register(&pdev->dev, &led_dat->cdev); - if (ret < 0) { - gpio_free(led_dat->gpio); - goto err; - } } platform_set_drvdata(pdev, leds_data); @@ -123,13 +145,8 @@ static int gpio_led_probe(struct platform_device *pdev) return 0; err: - if (i > 0) { - for (i = i - 1; i >= 0; i--) { - led_classdev_unregister(&leds_data[i].cdev); - cancel_work_sync(&leds_data[i].work); - gpio_free(leds_data[i].gpio); - } - } + for (i = i - 1; i >= 0; i--) + delete_gpio_led(&leds_data[i]); kfree(leds_data); @@ -144,11 +161,8 @@ static int __devexit gpio_led_remove(struct platform_device *pdev) leds_data = platform_get_drvdata(pdev); - for (i = 0; i < pdata->num_leds; i++) { - led_classdev_unregister(&leds_data[i].cdev); - cancel_work_sync(&leds_data[i].work); - gpio_free(leds_data[i].gpio); - } + for (i = 0; i < pdata->num_leds; i++) + delete_gpio_led(&leds_data[i]); kfree(leds_data); @@ -177,7 +191,112 @@ static void __exit gpio_led_exit(void) module_init(gpio_led_init); module_exit(gpio_led_exit); -MODULE_AUTHOR("Raphael Assenat "); +MODULE_ALIAS("platform:leds-gpio"); +#endif /* CONFIG_LEDS_GPIO_PLATFORM */ + +/* Code to create from OpenFirmware platform devices */ +#ifdef CONFIG_LEDS_GPIO_OF +#include +#include + +struct gpio_led_of_platform_data { + int num_leds; + struct gpio_led_data led_data[]; +}; + +static int __devinit of_gpio_leds_probe(struct of_device *ofdev, + const struct of_device_id *match) +{ + struct device_node *np = ofdev->node, *child; + struct gpio_led led; + struct gpio_led_of_platform_data *pdata; + int count = 0, ret; + + /* count LEDs defined by this device, so we know how much to allocate */ + for_each_child_of_node(np, child) + count++; + if (!count) + return 0; /* or ENODEV? */ + + pdata = kzalloc(sizeof(*pdata) + sizeof(struct gpio_led_data) * count, + GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + memset(&led, 0, sizeof(led)); + for_each_child_of_node(np, child) { + enum of_gpio_flags flags; + + led.gpio = of_get_gpio_flags(child, 0, &flags); + led.active_low = flags & OF_GPIO_ACTIVE_LOW; + led.name = of_get_property(child, "label", NULL) ? : child->name; + led.default_trigger = + of_get_property(child, "linux,default-trigger", NULL); + + ret = create_gpio_led(&led, &pdata->led_data[pdata->num_leds++], + &ofdev->dev, NULL); + if (ret < 0) { + of_node_put(child); + goto err; + } + } + + dev_set_drvdata(&ofdev->dev, pdata); + + return 0; + +err: + for (count = pdata->num_leds - 2; count >= 0; count--) + delete_gpio_led(&pdata->led_data[count]); + + kfree(pdata); + + return ret; +} + +static int __devexit of_gpio_leds_remove(struct of_device *ofdev) +{ + struct gpio_led_of_platform_data *pdata = dev_get_drvdata(&ofdev->dev); + int i; + + for (i = 0; i < pdata->num_leds; i++) + delete_gpio_led(&pdata->led_data[i]); + + kfree(pdata); + + dev_set_drvdata(&ofdev->dev, NULL); + + return 0; +} + +static const struct of_device_id of_gpio_leds_match[] = { + { .compatible = "gpio-leds", }, + {}, +}; + +static struct of_platform_driver of_gpio_leds_driver = { + .driver = { + .name = "of_gpio_leds", + .owner = THIS_MODULE, + }, + .match_table = of_gpio_leds_match, + .probe = of_gpio_leds_probe, + .remove = __devexit_p(of_gpio_leds_remove), +}; + +static int __init of_gpio_leds_init(void) +{ + return of_register_platform_driver(&of_gpio_leds_driver); +} +module_init(of_gpio_leds_init); + +static void __exit of_gpio_leds_exit(void) +{ + of_unregister_platform_driver(&of_gpio_leds_driver); +} +module_exit(of_gpio_leds_exit); +#endif + +MODULE_AUTHOR("Raphael Assenat , Trent Piepho "); MODULE_DESCRIPTION("GPIO LED driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:leds-gpio"); diff --git a/trunk/fs/buffer.c b/trunk/fs/buffer.c index 6e35762b6169..5d55a896ff78 100644 --- a/trunk/fs/buffer.c +++ b/trunk/fs/buffer.c @@ -737,7 +737,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) { struct buffer_head *bh; struct list_head tmp; - struct address_space *mapping, *prev_mapping = NULL; + struct address_space *mapping; int err = 0, err2; INIT_LIST_HEAD(&tmp); @@ -762,18 +762,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) * contents - it is a noop if I/O is still in * flight on potentially older contents. */ - ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh); - - /* - * Kick off IO for the previous mapping. Note - * that we will not run the very last mapping, - * wait_on_buffer() will do that for us - * through sync_buffer(). - */ - if (prev_mapping && prev_mapping != mapping) - blk_run_address_space(prev_mapping); - prev_mapping = mapping; - + ll_rw_block(SWRITE_SYNC, 1, &bh); brelse(bh); spin_lock(lock); } @@ -2968,13 +2957,12 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) for (i = 0; i < nr; i++) { struct buffer_head *bh = bhs[i]; - if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG) + if (rw == SWRITE || rw == SWRITE_SYNC) lock_buffer(bh); else if (!trylock_buffer(bh)) continue; - if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC || - rw == SWRITE_SYNC_PLUG) { + if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) { if (test_clear_buffer_dirty(bh)) { bh->b_end_io = end_buffer_write_sync; get_bh(bh); @@ -3010,7 +2998,7 @@ int sync_dirty_buffer(struct buffer_head *bh) if (test_clear_buffer_dirty(bh)) { get_bh(bh); bh->b_end_io = end_buffer_write_sync; - ret = submit_bh(WRITE_SYNC, bh); + ret = submit_bh(WRITE, bh); wait_on_buffer(bh); if (buffer_eopnotsupp(bh)) { clear_buffer_eopnotsupp(bh); diff --git a/trunk/fs/direct-io.c b/trunk/fs/direct-io.c index da258e7249cc..b6d43908ff7a 100644 --- a/trunk/fs/direct-io.c +++ b/trunk/fs/direct-io.c @@ -1126,7 +1126,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, int acquire_i_mutex = 0; if (rw & WRITE) - rw = WRITE_ODIRECT; + rw = WRITE_SYNC; if (bdev) bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev)); diff --git a/trunk/fs/jbd/commit.c b/trunk/fs/jbd/commit.c index a8e8513a78a9..f8077b9c8981 100644 --- a/trunk/fs/jbd/commit.c +++ b/trunk/fs/jbd/commit.c @@ -351,13 +351,8 @@ void journal_commit_transaction(journal_t *journal) spin_lock(&journal->j_state_lock); commit_transaction->t_state = T_LOCKED; - /* - * Use plugged writes here, since we want to submit several before - * we unplug the device. We don't do explicit unplugging in here, - * instead we rely on sync_buffer() doing the unplug for us. - */ if (commit_transaction->t_synchronous_commit) - write_op = WRITE_SYNC_PLUG; + write_op = WRITE_SYNC; spin_lock(&commit_transaction->t_handle_lock); while (commit_transaction->t_updates) { DEFINE_WAIT(wait); diff --git a/trunk/fs/jbd2/commit.c b/trunk/fs/jbd2/commit.c index 073c8c3df7cd..4ea72377c7a2 100644 --- a/trunk/fs/jbd2/commit.c +++ b/trunk/fs/jbd2/commit.c @@ -138,7 +138,7 @@ static int journal_submit_commit_record(journal_t *journal, set_buffer_ordered(bh); barrier_done = 1; } - ret = submit_bh(WRITE_SYNC_PLUG, bh); + ret = submit_bh(WRITE_SYNC, bh); if (barrier_done) clear_buffer_ordered(bh); @@ -159,7 +159,7 @@ static int journal_submit_commit_record(journal_t *journal, lock_buffer(bh); set_buffer_uptodate(bh); clear_buffer_dirty(bh); - ret = submit_bh(WRITE_SYNC_PLUG, bh); + ret = submit_bh(WRITE_SYNC, bh); } *cbh = bh; return ret; @@ -190,7 +190,7 @@ static int journal_wait_on_commit_record(journal_t *journal, set_buffer_uptodate(bh); bh->b_end_io = journal_end_buffer_io_sync; - ret = submit_bh(WRITE_SYNC_PLUG, bh); + ret = submit_bh(WRITE_SYNC, bh); if (ret) { unlock_buffer(bh); return ret; @@ -402,13 +402,8 @@ void jbd2_journal_commit_transaction(journal_t *journal) spin_lock(&journal->j_state_lock); commit_transaction->t_state = T_LOCKED; - /* - * Use plugged writes here, since we want to submit several before - * we unplug the device. We don't do explicit unplugging in here, - * instead we rely on sync_buffer() doing the unplug for us. - */ if (commit_transaction->t_synchronous_commit) - write_op = WRITE_SYNC_PLUG; + write_op = WRITE_SYNC; stats.u.run.rs_wait = commit_transaction->t_max_wait; stats.u.run.rs_locked = jiffies; stats.u.run.rs_running = jbd2_time_diff(commit_transaction->t_start, diff --git a/trunk/include/linux/backing-dev.h b/trunk/include/linux/backing-dev.h index 0ec2c594868e..bee52abb8a4d 100644 --- a/trunk/include/linux/backing-dev.h +++ b/trunk/include/linux/backing-dev.h @@ -24,8 +24,8 @@ struct dentry; */ enum bdi_state { BDI_pdflush, /* A pdflush thread is working this device */ - BDI_async_congested, /* The async (write) queue is getting full */ - BDI_sync_congested, /* The sync queue is getting full */ + BDI_write_congested, /* The write queue is getting full */ + BDI_read_congested, /* The read queue is getting full */ BDI_unused, /* Available bits start here */ }; @@ -215,18 +215,18 @@ static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits) static inline int bdi_read_congested(struct backing_dev_info *bdi) { - return bdi_congested(bdi, 1 << BDI_sync_congested); + return bdi_congested(bdi, 1 << BDI_read_congested); } static inline int bdi_write_congested(struct backing_dev_info *bdi) { - return bdi_congested(bdi, 1 << BDI_async_congested); + return bdi_congested(bdi, 1 << BDI_write_congested); } static inline int bdi_rw_congested(struct backing_dev_info *bdi) { - return bdi_congested(bdi, (1 << BDI_sync_congested) | - (1 << BDI_async_congested)); + return bdi_congested(bdi, (1 << BDI_read_congested)| + (1 << BDI_write_congested)); } void clear_bdi_congested(struct backing_dev_info *bdi, int rw); diff --git a/trunk/include/linux/bio.h b/trunk/include/linux/bio.h index b900d2c67d29..b05b1d4d17d2 100644 --- a/trunk/include/linux/bio.h +++ b/trunk/include/linux/bio.h @@ -145,21 +145,20 @@ struct bio { * bit 2 -- barrier * Insert a serialization point in the IO queue, forcing previously * submitted IO to be completed before this one is issued. - * bit 3 -- synchronous I/O hint. - * bit 4 -- Unplug the device immediately after submitting this bio. - * bit 5 -- metadata request + * bit 3 -- synchronous I/O hint: the block layer will unplug immediately + * Note that this does NOT indicate that the IO itself is sync, just + * that the block layer will not postpone issue of this IO by plugging. + * bit 4 -- metadata request * Used for tracing to differentiate metadata and data IO. May also * get some preferential treatment in the IO scheduler - * bit 6 -- discard sectors + * bit 5 -- discard sectors * Informs the lower level device that this range of sectors is no longer * used by the file system and may thus be freed by the device. Used * for flash based storage. - * bit 7 -- fail fast device errors - * bit 8 -- fail fast transport errors - * bit 9 -- fail fast driver errors + * bit 6 -- fail fast device errors + * bit 7 -- fail fast transport errors + * bit 8 -- fail fast driver errors * Don't want driver retries for any fast fail whatever the reason. - * bit 10 -- Tell the IO scheduler not to wait for more requests after this - one has been submitted, even if it is a SYNC request. */ #define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */ #define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */ @@ -171,7 +170,6 @@ struct bio { #define BIO_RW_FAILFAST_DEV 7 #define BIO_RW_FAILFAST_TRANSPORT 8 #define BIO_RW_FAILFAST_DRIVER 9 -#define BIO_RW_NOIDLE 10 #define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag))) @@ -190,7 +188,6 @@ struct bio { #define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD) #define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META) #define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD) -#define bio_noidle(bio) bio_rw_flagged(bio, BIO_RW_NOIDLE) /* * upper 16 bits of bi_rw define the io priority of this bio diff --git a/trunk/include/linux/blkdev.h b/trunk/include/linux/blkdev.h index e03660964e02..465d6babc847 100644 --- a/trunk/include/linux/blkdev.h +++ b/trunk/include/linux/blkdev.h @@ -38,10 +38,6 @@ struct request; typedef void (rq_end_io_fn)(struct request *, int); struct request_list { - /* - * count[], starved[], and wait[] are indexed by - * BLK_RW_SYNC/BLK_RW_ASYNC - */ int count[2]; int starved[2]; int elvpriv; @@ -70,11 +66,6 @@ enum rq_cmd_type_bits { REQ_TYPE_ATA_PC, }; -enum { - BLK_RW_ASYNC = 0, - BLK_RW_SYNC = 1, -}; - /* * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a @@ -112,13 +103,12 @@ enum rq_flag_bits { __REQ_QUIET, /* don't worry about errors */ __REQ_PREEMPT, /* set for "ide_preempt" requests */ __REQ_ORDERED_COLOR, /* is before or after barrier */ - __REQ_RW_SYNC, /* request is sync (sync write or read) */ + __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ __REQ_ALLOCED, /* request came from our alloc pool */ __REQ_RW_META, /* metadata io request */ __REQ_COPY_USER, /* contains copies of user pages */ __REQ_INTEGRITY, /* integrity metadata has been remapped */ __REQ_UNPLUG, /* unplug queue on submission */ - __REQ_NOIDLE, /* Don't anticipate more IO after this one */ __REQ_NR_BITS, /* stops here */ }; @@ -146,7 +136,6 @@ enum rq_flag_bits { #define REQ_COPY_USER (1 << __REQ_COPY_USER) #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) #define REQ_UNPLUG (1 << __REQ_UNPLUG) -#define REQ_NOIDLE (1 << __REQ_NOIDLE) #define BLK_MAX_CDB 16 @@ -449,8 +438,8 @@ struct request_queue #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ -#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ -#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ +#define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ +#define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ @@ -622,42 +611,32 @@ enum { #define rq_data_dir(rq) ((rq)->cmd_flags & 1) /* - * We regard a request as sync, if either a read or a sync write + * We regard a request as sync, if it's a READ or a SYNC write. */ -static inline bool rw_is_sync(unsigned int rw_flags) -{ - return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); -} - -static inline bool rq_is_sync(struct request *rq) -{ - return rw_is_sync(rq->cmd_flags); -} - +#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) -#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE) -static inline int blk_queue_full(struct request_queue *q, int sync) +static inline int blk_queue_full(struct request_queue *q, int rw) { - if (sync) - return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); - return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); + if (rw == READ) + return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); + return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); } -static inline void blk_set_queue_full(struct request_queue *q, int sync) +static inline void blk_set_queue_full(struct request_queue *q, int rw) { - if (sync) - queue_flag_set(QUEUE_FLAG_SYNCFULL, q); + if (rw == READ) + queue_flag_set(QUEUE_FLAG_READFULL, q); else - queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); + queue_flag_set(QUEUE_FLAG_WRITEFULL, q); } -static inline void blk_clear_queue_full(struct request_queue *q, int sync) +static inline void blk_clear_queue_full(struct request_queue *q, int rw) { - if (sync) - queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); + if (rw == READ) + queue_flag_clear(QUEUE_FLAG_READFULL, q); else - queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); + queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); } diff --git a/trunk/include/linux/fs.h b/trunk/include/linux/fs.h index cae5720f431c..a09e17c8f5fd 100644 --- a/trunk/include/linux/fs.h +++ b/trunk/include/linux/fs.h @@ -95,12 +95,8 @@ struct inodes_stat_t { #define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ #define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) #define READ_META (READ | (1 << BIO_RW_META)) -#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) -#define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) -#define WRITE_ODIRECT (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) -#define SWRITE_SYNC_PLUG \ - (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) -#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) +#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) +#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) #define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) #define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD) #define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER)) diff --git a/trunk/mm/backing-dev.c b/trunk/mm/backing-dev.c index 493b468a5035..be68c956a660 100644 --- a/trunk/mm/backing-dev.c +++ b/trunk/mm/backing-dev.c @@ -284,12 +284,12 @@ static wait_queue_head_t congestion_wqh[2] = { }; -void clear_bdi_congested(struct backing_dev_info *bdi, int sync) +void clear_bdi_congested(struct backing_dev_info *bdi, int rw) { enum bdi_state bit; - wait_queue_head_t *wqh = &congestion_wqh[sync]; + wait_queue_head_t *wqh = &congestion_wqh[rw]; - bit = sync ? BDI_sync_congested : BDI_async_congested; + bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; clear_bit(bit, &bdi->state); smp_mb__after_clear_bit(); if (waitqueue_active(wqh)) @@ -297,11 +297,11 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int sync) } EXPORT_SYMBOL(clear_bdi_congested); -void set_bdi_congested(struct backing_dev_info *bdi, int sync) +void set_bdi_congested(struct backing_dev_info *bdi, int rw) { enum bdi_state bit; - bit = sync ? BDI_sync_congested : BDI_async_congested; + bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; set_bit(bit, &bdi->state); } EXPORT_SYMBOL(set_bdi_congested);