From 706034a74f6e832cfd590a10dc35044af030152e Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 14 May 2008 16:10:41 -0700 Subject: [PATCH] --- yaml --- r: 99351 b: refs/heads/master c: 5136dea5734cfddbc6d7ccb7ead85a3ac7ce3de2 h: refs/heads/master i: 99349: cb2ae9ab61fadc89b67df563e39fa6251463db6c 99347: 2abf249d7a7a425f85ea4cd405aeccc24a931e67 99343: 9707803874bfe71c5c78c25b5b419502086cf377 v: v3 --- [refs] | 2 +- trunk/arch/x86/kernel/apm_32.c | 25 ++++++++++--------------- trunk/include/asm-x86/bitops.h | 34 +++++++++++++++++----------------- 3 files changed, 28 insertions(+), 33 deletions(-) diff --git a/[refs] b/[refs] index 8e92cfa8f6e0..fbfb0f7bbce4 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: b764a15f679942a7bc9d4f9645299e1defcc5b43 +refs/heads/master: 5136dea5734cfddbc6d7ccb7ead85a3ac7ce3de2 diff --git a/trunk/arch/x86/kernel/apm_32.c b/trunk/arch/x86/kernel/apm_32.c index 00e6d1370954..bf9290e29013 100644 --- a/trunk/arch/x86/kernel/apm_32.c +++ b/trunk/arch/x86/kernel/apm_32.c @@ -228,7 +228,6 @@ #include #include #include -#include #include #include @@ -1150,7 +1149,7 @@ static void queue_event(apm_event_t event, struct apm_user *sender) as->event_tail = 0; } as->events[as->event_head] = event; - if (!as->suser || !as->writer) + if ((!as->suser) || (!as->writer)) continue; switch (event) { case APM_SYS_SUSPEND: @@ -1397,7 +1396,7 @@ static void apm_mainloop(void) static int check_apm_user(struct apm_user *as, const char *func) { - if (as == NULL || as->magic != APM_BIOS_MAGIC) { + if ((as == NULL) || (as->magic != APM_BIOS_MAGIC)) { printk(KERN_ERR "apm: %s passed bad filp\n", func); return 1; } @@ -1460,19 +1459,18 @@ static unsigned int do_poll(struct file *fp, poll_table *wait) return 0; } -static long do_ioctl(struct file *filp, u_int cmd, u_long arg) +static int do_ioctl(struct inode *inode, struct file *filp, + u_int cmd, u_long arg) { struct apm_user *as; - int ret; as = filp->private_data; if (check_apm_user(as, "ioctl")) return -EIO; - if (!as->suser || !as->writer) + if ((!as->suser) || (!as->writer)) return -EPERM; switch (cmd) { case APM_IOC_STANDBY: - lock_kernel(); if (as->standbys_read > 0) { as->standbys_read--; as->standbys_pending--; @@ -1481,10 +1479,8 @@ static long do_ioctl(struct file *filp, u_int cmd, u_long arg) queue_event(APM_USER_STANDBY, as); if (standbys_pending <= 0) standby(); - unlock_kernel(); break; case APM_IOC_SUSPEND: - lock_kernel(); if (as->suspends_read > 0) { as->suspends_read--; as->suspends_pending--; @@ -1492,17 +1488,16 @@ static long do_ioctl(struct file *filp, u_int cmd, u_long arg) } else queue_event(APM_USER_SUSPEND, as); if (suspends_pending <= 0) { - ret = suspend(1); + return suspend(1); } else { as->suspend_wait = 1; wait_event_interruptible(apm_suspend_waitqueue, as->suspend_wait == 0); - ret = as->suspend_result; + return as->suspend_result; } - unlock_kernel(); - return ret; + break; default: - return -ENOTTY; + return -EINVAL; } return 0; } @@ -1865,7 +1860,7 @@ static const struct file_operations apm_bios_fops = { .owner = THIS_MODULE, .read = do_read, .poll = do_poll, - .unlocked_ioctl = do_ioctl, + .ioctl = do_ioctl, .open = do_open, .release = do_release, }; diff --git a/trunk/include/asm-x86/bitops.h b/trunk/include/asm-x86/bitops.h index ee4b3ead6a43..7d2494bdc660 100644 --- a/trunk/include/asm-x86/bitops.h +++ b/trunk/include/asm-x86/bitops.h @@ -43,7 +43,7 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void set_bit(int nr, volatile void *addr) +static inline void set_bit(int nr, volatile unsigned long *addr) { asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory"); } @@ -57,7 +57,7 @@ static inline void set_bit(int nr, volatile void *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static inline void __set_bit(int nr, volatile void *addr) +static inline void __set_bit(int nr, volatile unsigned long *addr) { asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); } @@ -72,7 +72,7 @@ static inline void __set_bit(int nr, volatile void *addr) * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ -static inline void clear_bit(int nr, volatile void *addr) +static inline void clear_bit(int nr, volatile unsigned long *addr) { asm volatile(LOCK_PREFIX "btr %1,%0" : ADDR : "Ir" (nr)); } @@ -85,13 +85,13 @@ static inline void clear_bit(int nr, volatile void *addr) * clear_bit() is atomic and implies release semantics before the memory * operation. It can be used for an unlock. */ -static inline void clear_bit_unlock(unsigned nr, volatile void *addr) +static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr) { barrier(); clear_bit(nr, addr); } -static inline void __clear_bit(int nr, volatile void *addr) +static inline void __clear_bit(int nr, volatile unsigned long *addr) { asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); } @@ -108,7 +108,7 @@ static inline void __clear_bit(int nr, volatile void *addr) * No memory barrier is required here, because x86 cannot reorder stores past * older loads. Same principle as spin_unlock. */ -static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) +static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) { barrier(); __clear_bit(nr, addr); @@ -126,7 +126,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static inline void __change_bit(int nr, volatile void *addr) +static inline void __change_bit(int nr, volatile unsigned long *addr) { asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); } @@ -140,7 +140,7 @@ static inline void __change_bit(int nr, volatile void *addr) * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void change_bit(int nr, volatile void *addr) +static inline void change_bit(int nr, volatile unsigned long *addr) { asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr)); } @@ -153,7 +153,7 @@ static inline void change_bit(int nr, volatile void *addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int test_and_set_bit(int nr, volatile void *addr) +static inline int test_and_set_bit(int nr, volatile unsigned long *addr) { int oldbit; @@ -170,7 +170,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr) * * This is the same as test_and_set_bit on x86. */ -static inline int test_and_set_bit_lock(int nr, volatile void *addr) +static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr) { return test_and_set_bit(nr, addr); } @@ -184,7 +184,7 @@ static inline int test_and_set_bit_lock(int nr, volatile void *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_set_bit(int nr, volatile void *addr) +static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) { int oldbit; @@ -203,7 +203,7 @@ static inline int __test_and_set_bit(int nr, volatile void *addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int test_and_clear_bit(int nr, volatile void *addr) +static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) { int oldbit; @@ -223,7 +223,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_clear_bit(int nr, volatile void *addr) +static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) { int oldbit; @@ -235,7 +235,7 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr) } /* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, volatile void *addr) +static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) { int oldbit; @@ -255,7 +255,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int test_and_change_bit(int nr, volatile void *addr) +static inline int test_and_change_bit(int nr, volatile unsigned long *addr) { int oldbit; @@ -266,13 +266,13 @@ static inline int test_and_change_bit(int nr, volatile void *addr) return oldbit; } -static inline int constant_test_bit(int nr, const volatile void *addr) +static inline int constant_test_bit(int nr, const volatile unsigned long *addr) { return ((1UL << (nr % BITS_PER_LONG)) & (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; } -static inline int variable_test_bit(int nr, volatile const void *addr) +static inline int variable_test_bit(int nr, volatile const unsigned long *addr) { int oldbit;