From 036c407cc57bd4ddd019080506dcee15caaa5cef Mon Sep 17 00:00:00 2001 From: Jason Gaston Date: Thu, 7 Dec 2006 08:57:32 -0800 Subject: [PATCH] --- yaml --- r: 44619 b: refs/heads/master c: f98b6573f190aff2748894da13a48bab0f10c733 h: refs/heads/master i: 44617: 25d9d4ea1e01ab63115e9f48bc521182fab09048 44615: 2a01e95dbded441d0df56d0d6fe46eef1806a141 v: v3 --- [refs] | 2 +- trunk/drivers/ata/ata_piix.c | 20 +++- trunk/include/linux/workqueue.h | 32 ++---- trunk/kernel/workqueue.c | 16 +-- trunk/mm/mincore.c | 190 +++++++++++++++++--------------- 5 files changed, 139 insertions(+), 121 deletions(-) diff --git a/[refs] b/[refs] index 616a1bb1a788..9f8b939cd6ab 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: a08727bae727fc2ca3a6ee9506d77786b71070b3 +refs/heads/master: f98b6573f190aff2748894da13a48bab0f10c733 diff --git a/trunk/drivers/ata/ata_piix.c b/trunk/drivers/ata/ata_piix.c index c7de0bb1591f..58dba1df2b7b 100644 --- a/trunk/drivers/ata/ata_piix.c +++ b/trunk/drivers/ata/ata_piix.c @@ -226,14 +226,26 @@ static const struct pci_device_id piix_pci_tbl[] = { { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, /* 2801GBM/GHM (ICH7M, identical to ICH6M) */ { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci }, - /* Enterprise Southbridge 2 (where's the datasheet?) */ + /* Enterprise Southbridge 2 (631xESB/632xESB) */ { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, - /* SATA Controller 1 IDE (ICH8, no datasheet yet) */ + /* SATA Controller 1 IDE (ICH8) */ { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, - /* SATA Controller 2 IDE (ICH8, ditto) */ + /* SATA Controller 2 IDE (ICH8) */ { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, - /* Mobile SATA Controller IDE (ICH8M, ditto) */ + /* Mobile SATA Controller IDE (ICH8M) */ { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, + /* SATA Controller IDE (ICH9) */ + { 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, + /* SATA Controller IDE (ICH9) */ + { 0x8086, 0x2921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, + /* SATA Controller IDE (ICH9) */ + { 0x8086, 0x2926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, + /* SATA Controller IDE (ICH9M) */ + { 0x8086, 0x2928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, + /* SATA Controller IDE (ICH9M) */ + { 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, + /* SATA Controller IDE (ICH9M) */ + { 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci }, { } /* terminate list */ }; diff --git a/trunk/include/linux/workqueue.h b/trunk/include/linux/workqueue.h index 2a7b38d87018..5b13dcf02714 100644 --- a/trunk/include/linux/workqueue.h +++ b/trunk/include/linux/workqueue.h @@ -8,21 +8,16 @@ #include #include #include -#include struct workqueue_struct; struct work_struct; typedef void (*work_func_t)(struct work_struct *work); -/* - * The first word is the work queue pointer and the flags rolled into - * one - */ -#define work_data_bits(work) ((unsigned long *)(&(work)->data)) - struct work_struct { - atomic_long_t data; + /* the first word is the work queue pointer and the flags rolled into + * one */ + unsigned long management; #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ #define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */ #define WORK_STRUCT_FLAG_MASK (3UL) @@ -31,9 +26,6 @@ struct work_struct { work_func_t func; }; -#define WORK_DATA_INIT(autorelease) \ - ATOMIC_LONG_INIT((autorelease) << WORK_STRUCT_NOAUTOREL) - struct delayed_work { struct work_struct work; struct timer_list timer; @@ -44,13 +36,13 @@ struct execute_work { }; #define __WORK_INITIALIZER(n, f) { \ - .data = WORK_DATA_INIT(0), \ + .management = 0, \ .entry = { &(n).entry, &(n).entry }, \ .func = (f), \ } #define __WORK_INITIALIZER_NAR(n, f) { \ - .data = WORK_DATA_INIT(1), \ + .management = (1 << WORK_STRUCT_NOAUTOREL), \ .entry = { &(n).entry, &(n).entry }, \ .func = (f), \ } @@ -90,21 +82,17 @@ struct execute_work { /* * initialize all of a work item in one go - * - * NOTE! No point in using "atomic_long_set()": useing a direct - * assignment of the work data initializer allows the compiler - * to generate better code. */ #define INIT_WORK(_work, _func) \ do { \ - (_work)->data = (atomic_long_t) WORK_DATA_INIT(0); \ + (_work)->management = 0; \ INIT_LIST_HEAD(&(_work)->entry); \ PREPARE_WORK((_work), (_func)); \ } while (0) #define INIT_WORK_NAR(_work, _func) \ do { \ - (_work)->data = (atomic_long_t) WORK_DATA_INIT(1); \ + (_work)->management = (1 << WORK_STRUCT_NOAUTOREL); \ INIT_LIST_HEAD(&(_work)->entry); \ PREPARE_WORK((_work), (_func)); \ } while (0) @@ -126,7 +114,7 @@ struct execute_work { * @work: The work item in question */ #define work_pending(work) \ - test_bit(WORK_STRUCT_PENDING, work_data_bits(work)) + test_bit(WORK_STRUCT_PENDING, &(work)->management) /** * delayed_work_pending - Find out whether a delayable work item is currently @@ -155,7 +143,7 @@ struct execute_work { * This should also be used to release a delayed work item. */ #define work_release(work) \ - clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) + clear_bit(WORK_STRUCT_PENDING, &(work)->management) extern struct workqueue_struct *__create_workqueue(const char *name, @@ -200,7 +188,7 @@ static inline int cancel_delayed_work(struct delayed_work *work) ret = del_timer_sync(&work->timer); if (ret) - work_release(&work->work); + clear_bit(WORK_STRUCT_PENDING, &work->work.management); return ret; } diff --git a/trunk/kernel/workqueue.c b/trunk/kernel/workqueue.c index 742cbbe49bdc..db49886bfae1 100644 --- a/trunk/kernel/workqueue.c +++ b/trunk/kernel/workqueue.c @@ -96,13 +96,13 @@ static inline void set_wq_data(struct work_struct *work, void *wq) BUG_ON(!work_pending(work)); new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); - new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); - atomic_long_set(&work->data, new); + new |= work->management & WORK_STRUCT_FLAG_MASK; + work->management = new; } static inline void *get_wq_data(struct work_struct *work) { - return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); + return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK); } static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) @@ -133,7 +133,7 @@ static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work list_del_init(&work->entry); spin_unlock_irqrestore(&cwq->lock, flags); - if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) + if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) work_release(work); f(work); @@ -206,7 +206,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) { int ret = 0, cpu = get_cpu(); - if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { + if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { if (unlikely(is_single_threaded(wq))) cpu = singlethread_cpu; BUG_ON(!list_empty(&work->entry)); @@ -248,7 +248,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq, if (delay == 0) return queue_work(wq, work); - if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { + if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); @@ -280,7 +280,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct timer_list *timer = &dwork->timer; struct work_struct *work = &dwork->work; - if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { + if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); @@ -321,7 +321,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) spin_unlock_irqrestore(&cwq->lock, flags); BUG_ON(get_wq_data(work) != cwq); - if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) + if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) work_release(work); f(work); diff --git a/trunk/mm/mincore.c b/trunk/mm/mincore.c index b44d7f875cb6..72890780c1c9 100644 --- a/trunk/mm/mincore.c +++ b/trunk/mm/mincore.c @@ -1,7 +1,7 @@ /* * linux/mm/mincore.c * - * Copyright (C) 1994-2006 Linus Torvalds + * Copyright (C) 1994-1999 Linus Torvalds */ /* @@ -38,60 +38,46 @@ static unsigned char mincore_page(struct vm_area_struct * vma, return present; } -/* - * Do a chunk of "sys_mincore()". We've already checked - * all the arguments, we hold the mmap semaphore: we should - * just return the amount of info we're asked for. - */ -static long do_mincore(unsigned long addr, unsigned char *vec, unsigned long pages) +static long mincore_vma(struct vm_area_struct * vma, + unsigned long start, unsigned long end, unsigned char __user * vec) { - unsigned long i, nr, pgoff; - struct vm_area_struct *vma = find_vma(current->mm, addr); + long error, i, remaining; + unsigned char * tmp; - /* - * find_vma() didn't find anything: the address - * is above everything we have mapped. - */ - if (!vma) { - memset(vec, 0, pages); - return pages; - } + error = -ENOMEM; + if (!vma->vm_file) + return error; - /* - * find_vma() found something, but we might be - * below it: check for that. - */ - if (addr < vma->vm_start) { - unsigned long gap = (vma->vm_start - addr) >> PAGE_SHIFT; - if (gap > pages) - gap = pages; - memset(vec, 0, gap); - return gap; - } + start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; + if (end > vma->vm_end) + end = vma->vm_end; + end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - /* - * Ok, got it. But check whether it's a segment we support - * mincore() on. Right now, we don't do any anonymous mappings. - */ - if (!vma->vm_file) - return -ENOMEM; + error = -EAGAIN; + tmp = (unsigned char *) __get_free_page(GFP_KERNEL); + if (!tmp) + return error; - /* - * Calculate how many pages there are left in the vma, and - * what the pgoff is for our address. - */ - nr = (vma->vm_end - addr) >> PAGE_SHIFT; - if (nr > pages) - nr = pages; + /* (end - start) is # of pages, and also # of bytes in "vec */ + remaining = (end - start), + + error = 0; + for (i = 0; remaining > 0; remaining -= PAGE_SIZE, i++) { + int j = 0; + long thispiece = (remaining < PAGE_SIZE) ? + remaining : PAGE_SIZE; - pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; - pgoff += vma->vm_pgoff; + while (j < thispiece) + tmp[j++] = mincore_page(vma, start++); - /* And then we just fill the sucker in.. */ - for (i = 0 ; i < nr; i++, pgoff++) - vec[i] = mincore_page(vma, pgoff); + if (copy_to_user(vec + PAGE_SIZE * i, tmp, thispiece)) { + error = -EFAULT; + break; + } + } - return nr; + free_page((unsigned long) tmp); + return error; } /* @@ -121,50 +107,82 @@ static long do_mincore(unsigned long addr, unsigned char *vec, unsigned long pag asmlinkage long sys_mincore(unsigned long start, size_t len, unsigned char __user * vec) { - long retval; - unsigned long pages; - unsigned char *tmp; - - /* Check the start address: needs to be page-aligned.. */ + int index = 0; + unsigned long end, limit; + struct vm_area_struct * vma; + size_t max; + int unmapped_error = 0; + long error; + + /* check the arguments */ if (start & ~PAGE_CACHE_MASK) - return -EINVAL; + goto einval; - /* ..and we need to be passed a valid user-space range */ - if (!access_ok(VERIFY_READ, (void __user *) start, len)) - return -ENOMEM; + limit = TASK_SIZE; + if (start >= limit) + goto enomem; - /* This also avoids any overflows on PAGE_CACHE_ALIGN */ - pages = len >> PAGE_SHIFT; - pages += (len & ~PAGE_MASK) != 0; + if (!len) + return 0; - if (!access_ok(VERIFY_WRITE, vec, pages)) - return -EFAULT; + max = limit - start; + len = PAGE_CACHE_ALIGN(len); + if (len > max || !len) + goto enomem; - tmp = (void *) __get_free_page(GFP_USER); - if (!tmp) - return -ENOMEM; - - retval = 0; - while (pages) { - /* - * Do at most PAGE_SIZE entries per iteration, due to - * the temporary buffer size. - */ - down_read(¤t->mm->mmap_sem); - retval = do_mincore(start, tmp, max(pages, PAGE_SIZE)); - up_read(¤t->mm->mmap_sem); - - if (retval <= 0) - break; - if (copy_to_user(vec, tmp, retval)) { - retval = -EFAULT; - break; + end = start + len; + + /* check the output buffer whilst holding the lock */ + error = -EFAULT; + down_read(¤t->mm->mmap_sem); + + if (!access_ok(VERIFY_WRITE, vec, len >> PAGE_SHIFT)) + goto out; + + /* + * If the interval [start,end) covers some unmapped address + * ranges, just ignore them, but return -ENOMEM at the end. + */ + error = 0; + + vma = find_vma(current->mm, start); + while (vma) { + /* Here start < vma->vm_end. */ + if (start < vma->vm_start) { + unmapped_error = -ENOMEM; + start = vma->vm_start; } - pages -= retval; - vec += retval; - start += retval << PAGE_SHIFT; - retval = 0; + + /* Here vma->vm_start <= start < vma->vm_end. */ + if (end <= vma->vm_end) { + if (start < end) { + error = mincore_vma(vma, start, end, + &vec[index]); + if (error) + goto out; + } + error = unmapped_error; + goto out; + } + + /* Here vma->vm_start <= start < vma->vm_end < end. */ + error = mincore_vma(vma, start, vma->vm_end, &vec[index]); + if (error) + goto out; + index += (vma->vm_end - start) >> PAGE_CACHE_SHIFT; + start = vma->vm_end; + vma = vma->vm_next; } - free_page((unsigned long) tmp); - return retval; + + /* we found a hole in the area queried if we arrive here */ + error = -ENOMEM; + +out: + up_read(¤t->mm->mmap_sem); + return error; + +einval: + return -EINVAL; +enomem: + return -ENOMEM; }