Skip to content

Commit

Permalink
x86: move exports to actual definitions
Browse files Browse the repository at this point in the history
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
  • Loading branch information
Al Viro committed Aug 8, 2016
1 parent 22823ab commit 784d569
Show file tree
Hide file tree
Showing 27 changed files with 68 additions and 150 deletions.
2 changes: 2 additions & 0 deletions arch/x86/entry/entry_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/export.h>

.section .entry.text, "ax"

Expand Down Expand Up @@ -955,6 +956,7 @@ trace:
jmp ftrace_stub
END(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */
EXPORT_SYMBOL(mcount)
#endif /* CONFIG_FUNCTION_TRACER */

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/entry/entry_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/pgtable_types.h>
#include <asm/export.h>
#include <linux/err.h>

/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
Expand Down Expand Up @@ -785,6 +786,7 @@ ENTRY(native_load_gs_index)
popfq
ret
END(native_load_gs_index)
EXPORT_SYMBOL(native_load_gs_index)

_ASM_EXTABLE(.Lgs_change, bad_gs)
.section .fixup, "ax"
Expand Down
3 changes: 3 additions & 0 deletions arch/x86/entry/thunk_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/export.h>

/* put return address in eax (arg1) */
.macro THUNK name, func, put_ret_addr_in_eax=0
Expand Down Expand Up @@ -36,5 +37,7 @@
#ifdef CONFIG_PREEMPT
THUNK ___preempt_schedule, preempt_schedule
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
EXPORT_SYMBOL(___preempt_schedule)
EXPORT_SYMBOL(___preempt_schedule_notrace)
#endif

3 changes: 3 additions & 0 deletions arch/x86/entry/thunk_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include "calling.h"
#include <asm/asm.h>
#include <asm/export.h>

/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func, put_ret_addr_in_rdi=0
Expand Down Expand Up @@ -49,6 +50,8 @@
#ifdef CONFIG_PREEMPT
THUNK ___preempt_schedule, preempt_schedule
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
EXPORT_SYMBOL(___preempt_schedule)
EXPORT_SYMBOL(___preempt_schedule_notrace)
#endif

#if defined(CONFIG_TRACE_IRQFLAGS) \
Expand Down
4 changes: 4 additions & 0 deletions arch/x86/include/asm/export.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifdef CONFIG_64BIT
#define KSYM_ALIGN 16
#endif
#include <asm-generic/export.h>
4 changes: 1 addition & 3 deletions arch/x86/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,7 @@ obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-y += probe_roms.o
obj-$(CONFIG_X86_32) += i386_ksyms_32.o
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
obj-$(CONFIG_X86_64) += mcount_64.o
obj-$(CONFIG_X86_64) += sys_x86_64.o mcount_64.o
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
obj-$(CONFIG_SYSFS) += ksysfs.o
obj-y += bootflag.o e820.o
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/kernel/head_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include <asm/percpu.h>
#include <asm/nops.h>
#include <asm/bootparam.h>
#include <asm/export.h>

/* Physical address */
#define pa(X) ((X) - __PAGE_OFFSET)
Expand Down Expand Up @@ -673,6 +674,7 @@ ENTRY(empty_zero_page)
.fill 4096,1,0
ENTRY(swapper_pg_dir)
.fill 1024,4,0
EXPORT_SYMBOL(empty_zero_page)

/*
* This starts the data section.
Expand Down
3 changes: 3 additions & 0 deletions arch/x86/kernel/head_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
#include <asm/percpu.h>
#include <asm/nops.h>
#include "../entry/calling.h"
#include <asm/export.h>

#ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h>
Expand Down Expand Up @@ -488,10 +489,12 @@ early_gdt_descr_base:
ENTRY(phys_base)
/* This must match the first entry in level2_kernel_pgt */
.quad 0x0000000000000000
EXPORT_SYMBOL(phys_base)

#include "../../x86/xen/xen-head.S"

__PAGE_ALIGNED_BSS
NEXT_PAGE(empty_zero_page)
.skip PAGE_SIZE
EXPORT_SYMBOL(empty_zero_page)

47 changes: 0 additions & 47 deletions arch/x86/kernel/i386_ksyms_32.c

This file was deleted.

2 changes: 2 additions & 0 deletions arch/x86/kernel/mcount_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm/ftrace.h>
#include <asm/export.h>


.code64
Expand Down Expand Up @@ -294,6 +295,7 @@ trace:
jmp fgraph_trace
END(function_hook)
#endif /* CONFIG_DYNAMIC_FTRACE */
EXPORT_SYMBOL(function_hook)
#endif /* CONFIG_FUNCTION_TRACER */

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Expand Down
85 changes: 0 additions & 85 deletions arch/x86/kernel/x8664_ksyms_64.c

This file was deleted.

3 changes: 3 additions & 0 deletions arch/x86/lib/checksum_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/asm.h>
#include <asm/export.h>

/*
* computes a partial checksum, e.g. for TCP/UDP fragments
Expand Down Expand Up @@ -251,6 +252,7 @@ ENTRY(csum_partial)
ENDPROC(csum_partial)

#endif
EXPORT_SYMBOL(csum_partial)

/*
unsigned int csum_partial_copy_generic (const char *src, char *dst,
Expand Down Expand Up @@ -490,3 +492,4 @@ ENDPROC(csum_partial_copy_generic)
#undef ROUND1

#endif
EXPORT_SYMBOL(csum_partial_copy_generic)
2 changes: 2 additions & 0 deletions arch/x86/lib/clear_page_64.S
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>

/*
* Most CPUs support enhanced REP MOVSB/STOSB instructions. It is
Expand All @@ -23,6 +24,7 @@ ENTRY(clear_page)
rep stosq
ret
ENDPROC(clear_page)
EXPORT_SYMBOL(clear_page)

ENTRY(clear_page_orig)

Expand Down
2 changes: 2 additions & 0 deletions arch/x86/lib/cmpxchg8b_emu.S
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
*/

#include <linux/linkage.h>
#include <asm/export.h>

.text

Expand Down Expand Up @@ -48,3 +49,4 @@ ENTRY(cmpxchg8b_emu)
ret

ENDPROC(cmpxchg8b_emu)
EXPORT_SYMBOL(cmpxchg8b_emu)
2 changes: 2 additions & 0 deletions arch/x86/lib/copy_page_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>

/*
* Some CPUs run faster using the string copy instructions (sane microcode).
Expand All @@ -17,6 +18,7 @@ ENTRY(copy_page)
rep movsq
ret
ENDPROC(copy_page)
EXPORT_SYMBOL(copy_page)

ENTRY(copy_page_regs)
subq $2*8, %rsp
Expand Down
8 changes: 8 additions & 0 deletions arch/x86/lib/copy_user_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/export.h>

/* Standard copy_to_user with segment limit checking */
ENTRY(_copy_to_user)
Expand All @@ -29,6 +30,7 @@ ENTRY(_copy_to_user)
"jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS
ENDPROC(_copy_to_user)
EXPORT_SYMBOL(_copy_to_user)

/* Standard copy_from_user with segment limit checking */
ENTRY(_copy_from_user)
Expand All @@ -44,6 +46,8 @@ ENTRY(_copy_from_user)
"jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS
ENDPROC(_copy_from_user)
EXPORT_SYMBOL(_copy_from_user)


.section .fixup,"ax"
/* must zero dest */
Expand Down Expand Up @@ -155,6 +159,7 @@ ENTRY(copy_user_generic_unrolled)
_ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b)
ENDPROC(copy_user_generic_unrolled)
EXPORT_SYMBOL(copy_user_generic_unrolled)

/* Some CPUs run faster using the string copy instructions.
* This is also a lot simpler. Use them when possible.
Expand Down Expand Up @@ -200,6 +205,7 @@ ENTRY(copy_user_generic_string)
_ASM_EXTABLE(1b,11b)
_ASM_EXTABLE(3b,12b)
ENDPROC(copy_user_generic_string)
EXPORT_SYMBOL(copy_user_generic_string)

/*
* Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
Expand Down Expand Up @@ -229,6 +235,7 @@ ENTRY(copy_user_enhanced_fast_string)

_ASM_EXTABLE(1b,12b)
ENDPROC(copy_user_enhanced_fast_string)
EXPORT_SYMBOL(copy_user_enhanced_fast_string)

/*
* copy_user_nocache - Uncached memory copy with exception handling
Expand Down Expand Up @@ -379,3 +386,4 @@ ENTRY(__copy_user_nocache)
_ASM_EXTABLE(40b,.L_fixup_1b_copy)
_ASM_EXTABLE(41b,.L_fixup_1b_copy)
ENDPROC(__copy_user_nocache)
EXPORT_SYMBOL(__copy_user_nocache)
1 change: 1 addition & 0 deletions arch/x86/lib/csum-partial_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
return (__force __wsum)add32_with_carry(do_csum(buff, len),
(__force u32)sum);
}
EXPORT_SYMBOL(csum_partial);

/*
* this routine is used for miscellaneous IP-like checksums, mainly
Expand Down
Loading

0 comments on commit 784d569

Please sign in to comment.