Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 1914
b: refs/heads/master
c: 6d1cfba
h: refs/heads/master
v: v3
  • Loading branch information
Michael Chan authored and David S. Miller committed Jun 8, 2005
1 parent 675c5d3 commit 9621bd3
Show file tree
Hide file tree
Showing 39 changed files with 119 additions and 281 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 4f58802fae8a51d9e79454746584175c14f84519
refs/heads/master: 6d1cfbab4de64f2d0c5b0f81177ade0d75b69288
16 changes: 5 additions & 11 deletions trunk/arch/arm/kernel/entry-armv.S
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ __pabt_svc:
add r5, sp, #S_PC
ldmia r7, {r2 - r4} @ Get USR pc, cpsr

#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#if __LINUX_ARM_ARCH__ < 6
@ make sure our user space atomic helper is aborted
cmp r2, #VIRT_OFFSET
bichs r3, r3, #PSR_Z_BIT
Expand Down Expand Up @@ -616,17 +616,11 @@ __kuser_helper_start:

__kuser_cmpxchg: @ 0xffff0fc0

#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#if __LINUX_ARM_ARCH__ < 6

/*
* Poor you. No fast solution possible...
* The kernel itself must perform the operation.
* A special ghost syscall is used for that (see traps.c).
*/
swi #0x9ffff0
mov pc, lr

#elif __LINUX_ARM_ARCH__ < 6
#ifdef CONFIG_SMP /* sanity check */
#error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?"
#endif

/*
* Theory of operation:
Expand Down
49 changes: 0 additions & 49 deletions trunk/arch/arm/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -464,55 +464,6 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
#endif
return 0;

#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
/*
* Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
* Return zero in r0 if *MEM was changed or non-zero if no exchange
* happened. Also set the user C flag accordingly.
* If access permissions have to be fixed up then non-zero is
* returned and the operation has to be re-attempted.
*
* *NOTE*: This is a ghost syscall private to the kernel. Only the
* __kuser_cmpxchg code in entry-armv.S should be aware of its
* existence. Don't ever use this from user code.
*/
case 0xfff0:
{
extern void do_DataAbort(unsigned long addr, unsigned int fsr,
struct pt_regs *regs);
unsigned long val;
unsigned long addr = regs->ARM_r2;
struct mm_struct *mm = current->mm;
pgd_t *pgd; pmd_t *pmd; pte_t *pte;

regs->ARM_cpsr &= ~PSR_C_BIT;
spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
goto bad_access;
pmd = pmd_offset(pgd, addr);
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map(pmd, addr);
if (!pte_present(*pte) || !pte_write(*pte))
goto bad_access;
val = *(unsigned long *)addr;
val -= regs->ARM_r0;
if (val == 0) {
*(unsigned long *)addr = regs->ARM_r1;
regs->ARM_cpsr |= PSR_C_BIT;
}
spin_unlock(&mm->page_table_lock);
return val;

bad_access:
spin_unlock(&mm->page_table_lock);
/* simulate a read access fault */
do_DataAbort(addr, 15 + (1 << 11), regs);
return -1;
}
#endif

default:
/* Calls 9f00xx..9f07ff are defined to return -ENOSYS
if not implemented, rather than raising SIGILL. This
Expand Down
6 changes: 3 additions & 3 deletions trunk/arch/arm/lib/io-writesw-armv4.S
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,9 @@ ENTRY(__raw_writesw)
subs r2, r2, #2
orr ip, ip, r3, push_hbyte1
strh ip, [r0]
bpl 1b
bpl 2b

tst r2, #1
3: movne ip, r3, lsr #8
3: tst r2, #1
2: movne ip, r3, lsr #8
strneh ip, [r0]
mov pc, lr
8 changes: 0 additions & 8 deletions trunk/arch/arm/mm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -422,11 +422,3 @@ config HAS_TLS_REG
assume directly accessing that register and always obtain the
expected value only on ARMv7 and above.

config NEEDS_SYSCALL_FOR_CMPXCHG
bool
default y if SMP && (CPU_32v5 || CPU_32v4 || CPU_32v3)
help
SMP on a pre-ARMv6 processor? Well OK then.
Forget about fast user space cmpxchg support.
It is just not possible.

2 changes: 1 addition & 1 deletion trunk/arch/i386/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ obj-$(CONFIG_SCx200) += scx200.o
# Note: kbuild does not track this dependency due to usage of .incbin
$(obj)/vsyscall.o: $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so
targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so)
targets += vsyscall-note.o vsyscall.lds
targets += vsyscall.lds

# The DSO images are built using a special linker script.
quiet_cmd_syscall = SYSCALL $@
Expand Down
10 changes: 4 additions & 6 deletions trunk/arch/ia64/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -825,16 +825,14 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
* XXX Should have an arch-hook for running this after final section
* addresses have been selected...
*/
uint64_t gp;
if (mod->core_size > MAX_LTOFF)
/* See if gp can cover the entire core module: */
uint64_t gp = (uint64_t) mod->module_core + MAX_LTOFF / 2;
if (mod->core_size >= MAX_LTOFF)
/*
* This takes advantage of fact that SHF_ARCH_SMALL gets allocated
* at the end of the module.
*/
gp = mod->core_size - MAX_LTOFF / 2;
else
gp = mod->core_size / 2;
gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
gp = (uint64_t) mod->module_core + mod->core_size - MAX_LTOFF / 2;
mod->arch.gp = gp;
DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp);
}
Expand Down
6 changes: 0 additions & 6 deletions trunk/arch/ia64/kernel/ptrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -635,17 +635,11 @@ ia64_flush_fph (struct task_struct *task)
{
struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));

/*
* Prevent migrating this task while
* we're fiddling with the FPU state
*/
preempt_disable();
if (ia64_is_local_fpu_owner(task) && psr->mfh) {
psr->mfh = 0;
task->thread.flags |= IA64_THREAD_FPH_VALID;
ia64_save_fpu(&task->thread.fph[0]);
}
preempt_enable();
}

/*
Expand Down
3 changes: 1 addition & 2 deletions trunk/arch/ia64/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -720,8 +720,7 @@ cpu_init (void)
ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));

/*
* Initialize default control register to defer speculative faults except
* for those arising from TLB misses, which are not deferred. The
* Initialize default control register to defer all speculative faults. The
* kernel MUST NOT depend on a particular setting of these bits (in other words,
* the kernel must have recovery code for all speculative accesses). Turn on
* dcr.lc as per recommendation by the architecture team. Most IA-32 apps
Expand Down
29 changes: 1 addition & 28 deletions trunk/arch/ia64/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,24 +111,6 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
siginfo_t siginfo;
int sig, code;

/* break.b always sets cr.iim to 0, which causes problems for
* debuggers. Get the real break number from the original instruction,
* but only for kernel code. User space break.b is left alone, to
* preserve the existing behaviour. All break codings have the same
* format, so there is no need to check the slot type.
*/
if (break_num == 0 && !user_mode(regs)) {
struct ia64_psr *ipsr = ia64_psr(regs);
unsigned long *bundle = (unsigned long *)regs->cr_iip;
unsigned long slot;
switch (ipsr->ri) {
case 0: slot = (bundle[0] >> 5); break;
case 1: slot = (bundle[0] >> 46) | (bundle[1] << 18); break;
default: slot = (bundle[1] >> 23); break;
}
break_num = ((slot >> 36 & 1) << 20) | (slot >> 6 & 0xfffff);
}

/* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */
siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_imm = break_num;
Expand Down Expand Up @@ -220,21 +202,13 @@ disabled_fph_fault (struct pt_regs *regs)

/* first, grant user-level access to fph partition: */
psr->dfh = 0;

/*
* Make sure that no other task gets in on this processor
* while we're claiming the FPU
*/
preempt_disable();
#ifndef CONFIG_SMP
{
struct task_struct *fpu_owner
= (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER);

if (ia64_is_local_fpu_owner(current)) {
preempt_enable_no_resched();
if (ia64_is_local_fpu_owner(current))
return;
}

if (fpu_owner)
ia64_flush_fph(fpu_owner);
Expand All @@ -252,7 +226,6 @@ disabled_fph_fault (struct pt_regs *regs)
*/
psr->mfh = 1;
}
preempt_enable_no_resched();
}

static inline int
Expand Down
19 changes: 2 additions & 17 deletions trunk/arch/ia64/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -305,9 +305,8 @@ setup_gate (void)
struct page *page;

/*
* Map the gate page twice: once read-only to export the ELF
* headers etc. and once execute-only page to enable
* privilege-promotion via "epc":
* Map the gate page twice: once read-only to export the ELF headers etc. and once
* execute-only page to enable privilege-promotion via "epc":
*/
page = virt_to_page(ia64_imva(__start_gate_section));
put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
Expand All @@ -316,20 +315,6 @@ setup_gate (void)
put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
#else
put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
/* Fill in the holes (if any) with read-only zero pages: */
{
unsigned long addr;

for (addr = GATE_ADDR + PAGE_SIZE;
addr < GATE_ADDR + PERCPU_PAGE_SIZE;
addr += PAGE_SIZE)
{
put_kernel_page(ZERO_PAGE(0), addr,
PAGE_READONLY);
put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
PAGE_READONLY);
}
}
#endif
ia64_patch_gate();
}
Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/ia64/sn/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ void __init early_sn_setup(void)

extern int platform_intr_list[];
extern nasid_t master_nasid;
static int __initdata shub_1_1_found = 0;
static int shub_1_1_found __initdata;

/*
* sn_check_for_wars
Expand Down Expand Up @@ -251,7 +251,7 @@ static void __init sn_check_for_wars(void)
} else {
for_each_online_node(cnode) {
if (is_shub_1_1(cnodeid_to_nasid(cnode)))
shub_1_1_found = 1;
sn_hub_info->shub_1_1_found = 1;
}
}
}
Expand Down
11 changes: 0 additions & 11 deletions trunk/arch/ppc/kernel/cputable.c
Original file line number Diff line number Diff line change
Expand Up @@ -849,17 +849,6 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
},
{ /* 405EP */
.pvr_mask = 0xffff0000,
.pvr_value = 0x51210000,
.cpu_name = "405EP",
.cpu_features = CPU_FTR_SPLIT_ID_CACHE |
CPU_FTR_USE_TB,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
},

#endif /* CONFIG_40x */
#ifdef CONFIG_44x
Expand Down
28 changes: 6 additions & 22 deletions trunk/arch/ppc64/boot/prom.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,23 +11,6 @@
#include <linux/string.h>
#include <linux/ctype.h>

extern __u32 __div64_32(unsigned long long *dividend, __u32 divisor);

/* The unnecessary pointer compare is there
* to check for type safety (n must be 64bit)
*/
# define do_div(n,base) ({ \
__u32 __base = (base); \
__u32 __rem; \
(void)(((typeof((n)) *)0) == ((unsigned long long *)0)); \
if (((n) >> 32) == 0) { \
__rem = (__u32)(n) % __base; \
(n) = (__u32)(n) / __base; \
} else \
__rem = __div64_32(&(n), __base); \
__rem; \
})

int (*prom)(void *);

void *chosen_handle;
Expand Down Expand Up @@ -369,7 +352,7 @@ static int skip_atoi(const char **s)
#define SPECIAL 32 /* 0x */
#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */

static char * number(char * str, unsigned long long num, int base, int size, int precision, int type)
static char * number(char * str, long num, int base, int size, int precision, int type)
{
char c,sign,tmp[66];
const char *digits="0123456789abcdefghijklmnopqrstuvwxyz";
Expand All @@ -384,9 +367,9 @@ static char * number(char * str, unsigned long long num, int base, int size, int
c = (type & ZEROPAD) ? '0' : ' ';
sign = 0;
if (type & SIGN) {
if ((signed long long)num < 0) {
if (num < 0) {
sign = '-';
num = - (signed long long)num;
num = -num;
size--;
} else if (type & PLUS) {
sign = '+';
Expand All @@ -406,7 +389,8 @@ static char * number(char * str, unsigned long long num, int base, int size, int
if (num == 0)
tmp[i++]='0';
else while (num != 0) {
tmp[i++] = digits[do_div(num, base)];
tmp[i++] = digits[num % base];
num /= base;
}
if (i > precision)
precision = i;
Expand Down Expand Up @@ -442,7 +426,7 @@ int sprintf(char * buf, const char *fmt, ...);
int vsprintf(char *buf, const char *fmt, va_list args)
{
int len;
unsigned long long num;
unsigned long num;
int i, base;
char * str;
const char *s;
Expand Down
Loading

0 comments on commit 9621bd3

Please sign in to comment.