Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 8973
b: refs/heads/master
c: 9cb90de
h: refs/heads/master
i:
  8971: 9c9018c
v: v3
  • Loading branch information
Frank Pavlic authored and Jeff Garzik committed Sep 14, 2005
1 parent 5520827 commit c637eb8
Show file tree
Hide file tree
Showing 89 changed files with 741 additions and 491 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f6098cf449b81c14a51e48dd22ae47d03126a1de
refs/heads/master: 9cb90de84b1d9c4686f12042a3696df38e0114c3
6 changes: 0 additions & 6 deletions trunk/arch/arm/mach-pxa/lubbock.c
Original file line number Diff line number Diff line change
Expand Up @@ -146,11 +146,6 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = {
// no D+ pullup; lubbock can't connect/disconnect in software
};

static struct platform_device lub_audio_device = {
.name = "pxa2xx-ac97",
.id = -1,
};

static struct resource sa1111_resources[] = {
[0] = {
.start = 0x10000000,
Expand Down Expand Up @@ -200,7 +195,6 @@ static struct platform_device smc91x_device = {

static struct platform_device *devices[] __initdata = {
&sa1111_device,
&lub_audio_device,
&smc91x_device,
};

Expand Down
5 changes: 5 additions & 0 deletions trunk/arch/i386/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -908,6 +908,11 @@ config IRQBALANCE
The default yes will allow the kernel to do irq load balancing.
Saying no will keep the kernel from doing irq load balancing.

config HAVE_DEC_LOCK
bool
depends on (SMP || PREEMPT) && X86_CMPXCHG
default y

# turning this on wastes a bunch of space.
# Summit needs it only when NUMA is on
config BOOT_IOREMAP
Expand Down
10 changes: 10 additions & 0 deletions trunk/arch/i386/kernel/acpi/earlyquirk.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include <linux/pci.h>
#include <asm/pci-direct.h>
#include <asm/acpi.h>
#include <asm/apic.h>

static int __init check_bridge(int vendor, int device)
{
Expand All @@ -15,6 +16,15 @@ static int __init check_bridge(int vendor, int device)
if (vendor == PCI_VENDOR_ID_NVIDIA) {
acpi_skip_timer_override = 1;
}
#ifdef CONFIG_X86_LOCAL_APIC
/*
* ATI IXP chipsets get double timer interrupts.
* For now just do this for all ATI chipsets.
* FIXME: this needs to be checked for the non ACPI case too.
*/
if (vendor == PCI_VENDOR_ID_ATI)
disable_timer_pin_1 = 1;
#endif
return 0;
}

Expand Down
1 change: 1 addition & 0 deletions trunk/arch/i386/lib/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,4 @@ lib-y = checksum.o delay.o usercopy.o getuser.o putuser.o memcpy.o strstr.o \
bitops.o

lib-$(CONFIG_X86_USE_3DNOW) += mmx.o
lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
42 changes: 42 additions & 0 deletions trunk/arch/i386/lib/dec_and_lock.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
/*
* x86 version of "atomic_dec_and_lock()" using
* the atomic "cmpxchg" instruction.
*
* (For CPU's lacking cmpxchg, we use the slow
* generic version, and this one never even gets
* compiled).
*/

#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/atomic.h>

int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
int counter;
int newcount;

repeat:
counter = atomic_read(atomic);
newcount = counter-1;

if (!newcount)
goto slow_path;

asm volatile("lock; cmpxchgl %1,%2"
:"=a" (newcount)
:"r" (newcount), "m" (atomic->counter), "0" (counter));

/* If the above failed, "eax" will have changed */
if (newcount != counter)
goto repeat;
return 0;

slow_path:
spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock(lock);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock);
5 changes: 5 additions & 0 deletions trunk/arch/ia64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -298,6 +298,11 @@ config PREEMPT

source "mm/Kconfig"

config HAVE_DEC_LOCK
bool
depends on (SMP || PREEMPT)
default y

config IA32_SUPPORT
bool "Support for Linux/x86 binaries"
help
Expand Down
12 changes: 11 additions & 1 deletion trunk/arch/ia64/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,17 @@ unwcheck: vmlinux
archclean:
$(Q)$(MAKE) $(clean)=$(boot)

CLEAN_FILES += vmlinux.gz bootloader
archprepare: include/asm-ia64/.offsets.h.stamp

include/asm-ia64/.offsets.h.stamp:
mkdir -p include/asm-ia64
[ -s include/asm-ia64/asm-offsets.h ] \
|| echo "#define IA64_TASK_SIZE 0" > include/asm-ia64/asm-offsets.h
touch $@



CLEAN_FILES += vmlinux.gz bootloader include/asm-ia64/.offsets.h.stamp

boot: lib/lib.a vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
Expand Down
6 changes: 6 additions & 0 deletions trunk/arch/ia64/ia32/binfmt_elf32.c
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,12 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
if (!mpnt)
return -ENOMEM;

if (security_vm_enough_memory((IA32_STACK_TOP - (PAGE_MASK & (unsigned long) bprm->p))
>> PAGE_SHIFT)) {
kmem_cache_free(vm_area_cachep, mpnt);
return -ENOMEM;
}

memset(mpnt, 0, sizeof(*mpnt));

down_write(&current->mm->mmap_sem);
Expand Down
1 change: 0 additions & 1 deletion trunk/arch/ia64/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
* to extract and format the required data.
*/

#define ASM_OFFSETS_C 1
#include <linux/config.h>

#include <linux/sched.h>
Expand Down
1 change: 1 addition & 0 deletions trunk/arch/ia64/lib/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
lib-$(CONFIG_PERFMON) += carta_random.o
lib-$(CONFIG_MD_RAID5) += xor.o
lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o

AFLAGS___divdi3.o =
AFLAGS___udivdi3.o = -DUNSIGNED
Expand Down
42 changes: 42 additions & 0 deletions trunk/arch/ia64/lib/dec_and_lock.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
/*
* Copyright (C) 2003 Jerome Marchand, Bull S.A.
* Cleaned up by David Mosberger-Tang <davidm@hpl.hp.com>
*
* This file is released under the GPLv2, or at your option any later version.
*
* ia64 version of "atomic_dec_and_lock()" using the atomic "cmpxchg" instruction. This
* code is an adaptation of the x86 version of "atomic_dec_and_lock()".
*/

#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>

/*
* Decrement REFCOUNT and if the count reaches zero, acquire the spinlock. Both of these
* operations have to be done atomically, so that the count doesn't drop to zero without
* acquiring the spinlock first.
*/
int
_atomic_dec_and_lock (atomic_t *refcount, spinlock_t *lock)
{
int old, new;

do {
old = atomic_read(refcount);
new = old - 1;

if (unlikely (old == 1)) {
/* oops, we may be decrementing to zero, do it the slow way... */
spin_lock(lock);
if (atomic_dec_and_test(refcount))
return 1;
spin_unlock(lock);
return 0;
}
} while (cmpxchg(&refcount->counter, old, new) != old);
return 0;
}

EXPORT_SYMBOL(_atomic_dec_and_lock);
5 changes: 5 additions & 0 deletions trunk/arch/m32r/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,11 @@ config PREEMPT
Say Y here if you are building a kernel for a desktop, embedded
or real-time system. Say N if you are unsure.

config HAVE_DEC_LOCK
bool
depends on (SMP || PREEMPT)
default n

config SMP
bool "Symmetric multi-processing support"
---help---
Expand Down
4 changes: 4 additions & 0 deletions trunk/arch/mips/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1009,6 +1009,10 @@ config GENERIC_CALIBRATE_DELAY
bool
default y

config HAVE_DEC_LOCK
bool
default y

#
# Select some configuration options automatically based on user selections.
#
Expand Down
9 changes: 7 additions & 2 deletions trunk/arch/mips/kernel/sysirix.c
Original file line number Diff line number Diff line change
Expand Up @@ -581,13 +581,18 @@ asmlinkage int irix_brk(unsigned long brk)
}

/*
* Ok, looks good - let it rip.
* Check if we have enough memory..
*/
if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) {
if (security_vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT)) {
ret = -ENOMEM;
goto out;
}

/*
* Ok, looks good - let it rip.
*/
mm->brk = brk;
do_brk(oldbrk, newbrk-oldbrk);
ret = 0;

out:
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/mips/lib/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# Makefile for MIPS-specific library files..
#

lib-y += csum_partial_copy.o memcpy.o promlib.o \
lib-y += csum_partial_copy.o dec_and_lock.o memcpy.o promlib.o \
strlen_user.o strncpy_user.o strnlen_user.o

obj-y += iomap.o
Expand Down
47 changes: 47 additions & 0 deletions trunk/arch/mips/lib/dec_and_lock.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
/*
* MIPS version of atomic_dec_and_lock() using cmpxchg
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/

#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
#include <asm/system.h>

/*
* This is an implementation of the notion of "decrement a
* reference count, and return locked if it decremented to zero".
*
* This implementation can be used on any architecture that
* has a cmpxchg, and where atomic->value is an int holding
* the value of the atomic (i.e. the high bits aren't used
* for a lock or anything like that).
*/
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
int counter;
int newcount;

for (;;) {
counter = atomic_read(atomic);
newcount = counter - 1;
if (!newcount)
break; /* do it the slow way */

newcount = cmpxchg(&atomic->counter, counter, newcount);
if (newcount == counter)
return 0;
}

spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock(lock);
return 0;
}

EXPORT_SYMBOL(_atomic_dec_and_lock);
4 changes: 4 additions & 0 deletions trunk/arch/ppc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,10 @@ config GENERIC_CALIBRATE_DELAY
bool
default y

config HAVE_DEC_LOCK
bool
default y

config PPC
bool
default y
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/ppc/lib/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# Makefile for ppc-specific library files..
#

obj-y := checksum.o string.o strcase.o div64.o
obj-y := checksum.o string.o strcase.o dec_and_lock.o div64.o

obj-$(CONFIG_8xx) += rheap.o
obj-$(CONFIG_CPM2) += rheap.o
38 changes: 38 additions & 0 deletions trunk/arch/ppc/lib/dec_and_lock.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
#include <asm/system.h>

/*
* This is an implementation of the notion of "decrement a
* reference count, and return locked if it decremented to zero".
*
* This implementation can be used on any architecture that
* has a cmpxchg, and where atomic->value is an int holding
* the value of the atomic (i.e. the high bits aren't used
* for a lock or anything like that).
*/
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
int counter;
int newcount;

for (;;) {
counter = atomic_read(atomic);
newcount = counter - 1;
if (!newcount)
break; /* do it the slow way */

newcount = cmpxchg(&atomic->counter, counter, newcount);
if (newcount == counter)
return 0;
}

spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock(lock);
return 0;
}

EXPORT_SYMBOL(_atomic_dec_and_lock);
4 changes: 4 additions & 0 deletions trunk/arch/ppc64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,10 @@ config GENERIC_ISA_DMA
bool
default y

config HAVE_DEC_LOCK
bool
default y

config EARLY_PRINTK
bool
default y
Expand Down
Loading

0 comments on commit c637eb8

Please sign in to comment.