Skip to content

Commit

Permalink
Merge branch 'work.sparc32' of git://git.kernel.org/pub/scm/linux/ker…
Browse files Browse the repository at this point in the history
…nel/git/viro/vfs
  • Loading branch information
David S. Miller committed Feb 27, 2021
2 parents b9d6243 + 73686e7 commit cf64c2a
Show file tree
Hide file tree
Showing 14 changed files with 193 additions and 526 deletions.
1 change: 0 additions & 1 deletion arch/sparc/include/asm/elf_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@

#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/extable_64.h>
#include <asm/spitfire.h>
#include <asm/adi.h>

Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_EXTABLE64_H
#define __ASM_EXTABLE64_H
#ifndef __ASM_EXTABLE_H
#define __ASM_EXTABLE_H
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
Expand Down
3 changes: 3 additions & 0 deletions arch/sparc/include/asm/uaccess.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ___ASM_SPARC_UACCESS_H
#define ___ASM_SPARC_UACCESS_H

#include <asm/extable.h>

#if defined(__sparc__) && defined(__arch64__)
#include <asm/uaccess_64.h>
#else
Expand Down
38 changes: 0 additions & 38 deletions arch/sparc/include/asm/uaccess_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,6 @@

#include <asm/processor.h>

#define ARCH_HAS_SORT_EXTABLE
#define ARCH_HAS_SEARCH_EXTABLE

/* Sparc is not segmented, however we need to be able to fool access_ok()
* when doing system calls from kernel mode legitimately.
*
Expand All @@ -40,36 +37,6 @@
#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
#define access_ok(addr, size) __access_ok((unsigned long)(addr), size)

/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*
* There is a special way how to put a range of potentially faulting
* insns (like twenty ldd/std's with now intervening other instructions)
* You specify address of first in insn and 0 in fixup and in the next
* exception_table_entry you specify last potentially faulting insn + 1
* and in fixup the routine which should handle the fault.
* That fixup code will get
* (faulting_insn_address - first_insn_in_the_range_address)/4
* in %g2 (ie. index of the faulting instruction in the range).
*/

struct exception_table_entry
{
unsigned long insn, fixup;
};

/* Returns 0 if exception not found and fixup otherwise. */
unsigned long search_extables_range(unsigned long addr, unsigned long *g2);

/* Uh, these should become the main single-value transfer routines..
* They automatically use the right size if we just have the right
* pointer type..
Expand Down Expand Up @@ -252,12 +219,7 @@ static inline unsigned long __clear_user(void __user *addr, unsigned long size)
unsigned long ret;

__asm__ __volatile__ (
".section __ex_table,#alloc\n\t"
".align 4\n\t"
".word 1f,3\n\t"
".previous\n\t"
"mov %2, %%o1\n"
"1:\n\t"
"call __bzero\n\t"
" mov %1, %%o0\n\t"
"mov %%o0, %0\n"
Expand Down
1 change: 0 additions & 1 deletion arch/sparc/include/asm/uaccess_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
#include <linux/string.h>
#include <asm/asi.h>
#include <asm/spitfire.h>
#include <asm/extable_64.h>

#include <asm/processor.h>

Expand Down
10 changes: 5 additions & 5 deletions arch/sparc/kernel/unaligned_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include <linux/uaccess.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
#include <linux/extable.h>

#include <asm/setup.h>

Expand Down Expand Up @@ -213,10 +214,10 @@ static inline int ok_for_kernel(unsigned int insn)

static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
{
unsigned long g2 = regs->u_regs [UREG_G2];
unsigned long fixup = search_extables_range(regs->pc, &g2);
const struct exception_table_entry *entry;

if (!fixup) {
entry = search_exception_tables(regs->pc);
if (!entry) {
unsigned long address = compute_effective_address(regs, insn);
if(address < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
Expand All @@ -232,9 +233,8 @@ static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
die_if_kernel("Oops", regs);
/* Not reached */
}
regs->pc = fixup;
regs->pc = entry->fixup;
regs->npc = regs->pc + 4;
regs->u_regs [UREG_G2] = g2;
}

asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
Expand Down
64 changes: 27 additions & 37 deletions arch/sparc/lib/checksum_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -155,34 +155,27 @@ cpout: retl ! get outta here
.text; \
.align 4

#define EXT(start,end) \
.section __ex_table,ALLOC; \
.align 4; \
.word start, 0, end, cc_fault; \
.text; \
.align 4

/* This aligned version executes typically in 8.5 superscalar cycles, this
* is the best I can do. I say 8.5 because the final add will pair with
* the next ldd in the main unrolled loop. Thus the pipe is always full.
* If you change these macros (including order of instructions),
* please check the fixup code below as well.
*/
#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
ldd [src + off + 0x00], t0; \
ldd [src + off + 0x08], t2; \
EX(ldd [src + off + 0x00], t0); \
EX(ldd [src + off + 0x08], t2); \
addxcc t0, sum, sum; \
ldd [src + off + 0x10], t4; \
EX(ldd [src + off + 0x10], t4); \
addxcc t1, sum, sum; \
ldd [src + off + 0x18], t6; \
EX(ldd [src + off + 0x18], t6); \
addxcc t2, sum, sum; \
std t0, [dst + off + 0x00]; \
EX(std t0, [dst + off + 0x00]); \
addxcc t3, sum, sum; \
std t2, [dst + off + 0x08]; \
EX(std t2, [dst + off + 0x08]); \
addxcc t4, sum, sum; \
std t4, [dst + off + 0x10]; \
EX(std t4, [dst + off + 0x10]); \
addxcc t5, sum, sum; \
std t6, [dst + off + 0x18]; \
EX(std t6, [dst + off + 0x18]); \
addxcc t6, sum, sum; \
addxcc t7, sum, sum;

Expand All @@ -191,39 +184,39 @@ cpout: retl ! get outta here
* Viking MXCC into streaming mode. Ho hum...
*/
#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
ldd [src + off + 0x00], t0; \
ldd [src + off + 0x08], t2; \
ldd [src + off + 0x10], t4; \
ldd [src + off + 0x18], t6; \
st t0, [dst + off + 0x00]; \
EX(ldd [src + off + 0x00], t0); \
EX(ldd [src + off + 0x08], t2); \
EX(ldd [src + off + 0x10], t4); \
EX(ldd [src + off + 0x18], t6); \
EX(st t0, [dst + off + 0x00]); \
addxcc t0, sum, sum; \
st t1, [dst + off + 0x04]; \
EX(st t1, [dst + off + 0x04]); \
addxcc t1, sum, sum; \
st t2, [dst + off + 0x08]; \
EX(st t2, [dst + off + 0x08]); \
addxcc t2, sum, sum; \
st t3, [dst + off + 0x0c]; \
EX(st t3, [dst + off + 0x0c]); \
addxcc t3, sum, sum; \
st t4, [dst + off + 0x10]; \
EX(st t4, [dst + off + 0x10]); \
addxcc t4, sum, sum; \
st t5, [dst + off + 0x14]; \
EX(st t5, [dst + off + 0x14]); \
addxcc t5, sum, sum; \
st t6, [dst + off + 0x18]; \
EX(st t6, [dst + off + 0x18]); \
addxcc t6, sum, sum; \
st t7, [dst + off + 0x1c]; \
EX(st t7, [dst + off + 0x1c]); \
addxcc t7, sum, sum;

/* Yuck, 6 superscalar cycles... */
#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \
ldd [src - off - 0x08], t0; \
ldd [src - off - 0x00], t2; \
EX(ldd [src - off - 0x08], t0); \
EX(ldd [src - off - 0x00], t2); \
addxcc t0, sum, sum; \
st t0, [dst - off - 0x08]; \
EX(st t0, [dst - off - 0x08]); \
addxcc t1, sum, sum; \
st t1, [dst - off - 0x04]; \
EX(st t1, [dst - off - 0x04]); \
addxcc t2, sum, sum; \
st t2, [dst - off - 0x00]; \
EX(st t2, [dst - off - 0x00]); \
addxcc t3, sum, sum; \
st t3, [dst - off + 0x04];
EX(st t3, [dst - off + 0x04]);

/* Handle the end cruft code out of band for better cache patterns. */
cc_end_cruft:
Expand Down Expand Up @@ -331,7 +324,6 @@ __csum_partial_copy_sparc_generic:
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
10: EXT(5b, 10b) ! note for exception handling
sub %g1, 128, %g1 ! detract from length
addx %g0, %g7, %g7 ! add in last carry bit
andcc %g1, 0xffffff80, %g0 ! more to csum?
Expand All @@ -356,8 +348,7 @@ cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
12: EXT(cctbl, 12b) ! note for exception table handling
addx %g0, %g7, %g7
12: addx %g0, %g7, %g7
andcc %o3, 0xf, %g0 ! check for low bits set
ccte: bne cc_end_cruft ! something left, handle it out of band
andcc %o3, 8, %g0 ! begin checks for that code
Expand All @@ -367,7 +358,6 @@ ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
11: EXT(ccdbl, 11b) ! note for exception table handling
sub %g1, 128, %g1 ! detract from length
addx %g0, %g7, %g7 ! add in last carry bit
andcc %g1, 0xffffff80, %g0 ! more to csum?
Expand Down
Loading

0 comments on commit cf64c2a

Please sign in to comment.