Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 72799
b: refs/heads/master
c: 82798a1
h: refs/heads/master
i:
  72797: 122ec50
  72795: 7e0d049
  72791: e01c5b5
  72783: 4a43a46
  72767: 28e14e1
v: v3
  • Loading branch information
Linus Torvalds committed Oct 29, 2007
1 parent d427db8 commit d6c20e3
Show file tree
Hide file tree
Showing 117 changed files with 3,161 additions and 1,081 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1a3b7920fe55247d39c3e1ac1e9b8aca607d0188
refs/heads/master: 82798a17ad40df827d465329a20ace80497f9b32
19 changes: 19 additions & 0 deletions trunk/Documentation/scsi/link_power_management_policy.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
This parameter allows the user to set the link (interface) power management.
There are 3 possible options:

Value Effect
----------------------------------------------------------------------------
min_power Tell the controller to try to make the link use the
least possible power when possible. This may
sacrifice some performance due to increased latency
when coming out of lower power states.

max_performance Generally, this means no power management. Tell
the controller to have performance be a priority
over power management.

medium_power Tell the controller to enter a lower power state
when possible, but do not enter the lowest power
state, thus improving latency over min_power setting.


2 changes: 1 addition & 1 deletion trunk/arch/um/Kconfig.i386
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
menu "Host processor type and features"

source "arch/i386/Kconfig.cpu"
source "arch/x86/Kconfig.cpu"

endmenu

Expand Down
5 changes: 2 additions & 3 deletions trunk/arch/um/Makefile-i386
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ ELF_ARCH := $(SUBARCH)
ELF_FORMAT := elf32-$(SUBARCH)
OBJCOPYFLAGS := -O binary -R .note -R .comment -S
HEADER_ARCH := x86
CHECKFLAGS += -D__i386__

ifeq ("$(origin SUBARCH)", "command line")
ifneq ("$(shell uname -m | sed -e s/i.86/i386/)", "$(SUBARCH)")
Expand All @@ -26,10 +27,8 @@ AFLAGS += -DCONFIG_X86_32
CONFIG_X86_32 := y
export CONFIG_X86_32

ARCH_KERNEL_DEFINES += -U__$(SUBARCH)__ -U$(SUBARCH)

# First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y.
include $(srctree)/arch/i386/Makefile.cpu
include $(srctree)/arch/x86/Makefile_32.cpu

# prevent gcc from keeping the stack 16 byte aligned. Taken from i386.
cflags-y += $(call cc-option,-mpreferred-stack-boundary=2)
Expand Down
5 changes: 1 addition & 4 deletions trunk/arch/um/Makefile-x86_64
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,9 @@ START := 0x60000000

_extra_flags_ = -fno-builtin -m64

#We #undef __x86_64__ for kernelspace, not for userspace where
#it's needed for headers to work!
ARCH_KERNEL_DEFINES = -U__$(SUBARCH)__
KBUILD_CFLAGS += $(_extra_flags_)

CHECKFLAGS += -m64
CHECKFLAGS += -m64 -D__x86_64__
KBUILD_AFLAGS += -m64
LDFLAGS += -m elf_x86_64
KBUILD_CPPFLAGS += -m64
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/um/kernel/mem.c
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ static void __init kmap_init(void)
kmap_prot = PAGE_KERNEL;
}

static void init_highmem(void)
static void __init init_highmem(void)
{
pgd_t *pgd;
pud_t *pud;
Expand Down
8 changes: 4 additions & 4 deletions trunk/arch/um/sys-i386/ptrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
if (err)
return err;

n = copy_to_user((void *) buf, fpregs, sizeof(fpregs));
n = copy_to_user(buf, fpregs, sizeof(fpregs));
if(n > 0)
return -EFAULT;

Expand All @@ -168,7 +168,7 @@ int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
long fpregs[HOST_FP_SIZE];

BUG_ON(sizeof(*buf) != sizeof(fpregs));
n = copy_from_user(fpregs, (void *) buf, sizeof(fpregs));
n = copy_from_user(fpregs, buf, sizeof(fpregs));
if (n > 0)
return -EFAULT;

Expand All @@ -185,7 +185,7 @@ int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
if (err)
return err;

n = copy_to_user((void *) buf, fpregs, sizeof(fpregs));
n = copy_to_user(buf, fpregs, sizeof(fpregs));
if(n > 0)
return -EFAULT;

Expand All @@ -198,7 +198,7 @@ int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
long fpregs[HOST_XFP_SIZE];

BUG_ON(sizeof(*buf) != sizeof(fpregs));
n = copy_from_user(fpregs, (void *) buf, sizeof(fpregs));
n = copy_from_user(fpregs, buf, sizeof(fpregs));
if (n > 0)
return -EFAULT;

Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/um/sys-x86_64/ptrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
if (err)
return err;

n = copy_to_user((void *) buf, fpregs, sizeof(fpregs));
n = copy_to_user(buf, fpregs, sizeof(fpregs));
if(n > 0)
return -EFAULT;

Expand All @@ -167,7 +167,7 @@ int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
long fpregs[HOST_FP_SIZE];

BUG_ON(sizeof(*buf) != sizeof(fpregs));
n = copy_from_user(fpregs, (void *) buf, sizeof(fpregs));
n = copy_from_user(fpregs, buf, sizeof(fpregs));
if (n > 0)
return -EFAULT;

Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/kernel/pci-gart_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -435,7 +435,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,

error:
flush_gart();
gart_unmap_sg(dev, sg, nents, dir);
gart_unmap_sg(dev, sg, out, dir);
/* When it was forced or merged try again in a dumb way */
if (force_iommu || iommu_merge) {
out = dma_map_sg_nonforce(dev, sg, nents, dir);
Expand Down
6 changes: 0 additions & 6 deletions trunk/arch/x86/mm/init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -728,12 +728,6 @@ int in_gate_area_no_task(unsigned long addr)
return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
}

void * __init alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size)
{
return __alloc_bootmem_core(pgdat->bdata, size,
SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0);
}

const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
Expand Down
7 changes: 5 additions & 2 deletions trunk/block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1443,8 +1443,11 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
cfqq = *async_cfqq;
}

if (!cfqq)
if (!cfqq) {
cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
if (!cfqq)
return NULL;
}

/*
* pin the queue now that it's allocated, scheduler exit will prune it
Expand Down Expand Up @@ -2053,7 +2056,7 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
{
del_timer_sync(&cfqd->idle_slice_timer);
del_timer_sync(&cfqd->idle_class_timer);
blk_sync_queue(cfqd->queue);
kblockd_flush_work(&cfqd->unplug_work);
}

static void cfq_put_async_queues(struct cfq_data *cfqd)
Expand Down
2 changes: 1 addition & 1 deletion trunk/block/compat_ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -581,7 +581,7 @@ static int compat_blkdev_driver_ioctl(struct inode *inode, struct file *file,
{
int ret;

switch (arg) {
switch (cmd) {
case HDIO_GET_UNMASKINTR:
case HDIO_GET_MULTCOUNT:
case HDIO_GET_KEEPSETTINGS:
Expand Down
41 changes: 19 additions & 22 deletions trunk/block/ll_rw_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@

static void blk_unplug_work(struct work_struct *work);
static void blk_unplug_timeout(unsigned long data);
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
static void drive_stat_acct(struct request *rq, int new_io);
static void init_request_from_bio(struct request *req, struct bio *bio);
static int __make_request(struct request_queue *q, struct bio *bio);
static struct io_context *current_io_context(gfp_t gfp_flags, int node);
Expand Down Expand Up @@ -791,7 +791,6 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
retval = atomic_dec_and_test(&bqt->refcnt);
if (retval) {
BUG_ON(bqt->busy);
BUG_ON(!list_empty(&bqt->busy_list));

kfree(bqt->tag_index);
bqt->tag_index = NULL;
Expand Down Expand Up @@ -903,7 +902,6 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
if (init_tag_map(q, tags, depth))
goto fail;

INIT_LIST_HEAD(&tags->busy_list);
tags->busy = 0;
atomic_set(&tags->refcnt, 1);
return tags;
Expand Down Expand Up @@ -954,6 +952,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
*/
q->queue_tags = tags;
q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
INIT_LIST_HEAD(&q->tag_busy_list);
return 0;
fail:
kfree(tags);
Expand Down Expand Up @@ -1057,18 +1056,16 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)

bqt->tag_index[tag] = NULL;

/*
* We use test_and_clear_bit's memory ordering properties here.
* The tag_map bit acts as a lock for tag_index[bit], so we need
* a barrer before clearing the bit (precisely: release semantics).
* Could use clear_bit_unlock when it is merged.
*/
if (unlikely(!test_and_clear_bit(tag, bqt->tag_map))) {
if (unlikely(!test_bit(tag, bqt->tag_map))) {
printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
__FUNCTION__, tag);
return;
}

/*
* The tag_map bit acts as a lock for tag_index[bit], so we need
* unlock memory barrier semantics.
*/
clear_bit_unlock(tag, bqt->tag_map);
bqt->busy--;
}

Expand Down Expand Up @@ -1114,17 +1111,17 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
if (tag >= bqt->max_depth)
return 1;

} while (test_and_set_bit(tag, bqt->tag_map));
} while (test_and_set_bit_lock(tag, bqt->tag_map));
/*
* We rely on test_and_set_bit providing lock memory ordering semantics
* (could use test_and_set_bit_lock when it is merged).
* We need lock ordering semantics given by test_and_set_bit_lock.
* See blk_queue_end_tag for details.
*/

rq->cmd_flags |= REQ_QUEUED;
rq->tag = tag;
bqt->tag_index[tag] = rq;
blkdev_dequeue_request(rq);
list_add(&rq->queuelist, &bqt->busy_list);
list_add(&rq->queuelist, &q->tag_busy_list);
bqt->busy++;
return 0;
}
Expand All @@ -1145,11 +1142,10 @@ EXPORT_SYMBOL(blk_queue_start_tag);
**/
void blk_queue_invalidate_tags(struct request_queue *q)
{
struct blk_queue_tag *bqt = q->queue_tags;
struct list_head *tmp, *n;
struct request *rq;

list_for_each_safe(tmp, n, &bqt->busy_list) {
list_for_each_safe(tmp, n, &q->tag_busy_list) {
rq = list_entry_rq(tmp);

if (rq->tag == -1) {
Expand Down Expand Up @@ -1738,6 +1734,7 @@ EXPORT_SYMBOL(blk_stop_queue);
void blk_sync_queue(struct request_queue *q)
{
del_timer_sync(&q->unplug_timer);
kblockd_flush_work(&q->unplug_work);
}
EXPORT_SYMBOL(blk_sync_queue);

Expand Down Expand Up @@ -2341,7 +2338,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
if (blk_rq_tagged(rq))
blk_queue_end_tag(q, rq);

drive_stat_acct(rq, rq->nr_sectors, 1);
drive_stat_acct(rq, 1);
__elv_add_request(q, rq, where, 0);
blk_start_queueing(q);
spin_unlock_irqrestore(q->queue_lock, flags);
Expand Down Expand Up @@ -2736,7 +2733,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)

EXPORT_SYMBOL(blkdev_issue_flush);

static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
static void drive_stat_acct(struct request *rq, int new_io)
{
int rw = rq_data_dir(rq);

Expand All @@ -2758,7 +2755,7 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
*/
static inline void add_request(struct request_queue * q, struct request * req)
{
drive_stat_acct(req, req->nr_sectors, 1);
drive_stat_acct(req, 1);

/*
* elevator indicated where it wants this request to be
Expand Down Expand Up @@ -3015,7 +3012,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
req->biotail = bio;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, nr_sectors, 0);
drive_stat_acct(req, 0);
if (!attempt_back_merge(q, req))
elv_merged_request(q, req, el_ret);
goto out;
Expand All @@ -3042,7 +3039,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
req->sector = req->hard_sector = bio->bi_sector;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, nr_sectors, 0);
drive_stat_acct(req, 0);
if (!attempt_front_merge(q, req))
elv_merged_request(q, req, el_ret);
goto out;
Expand Down
Loading

0 comments on commit d6c20e3

Please sign in to comment.