From 04f905a9569ca2b6964a35563f135fabbb2470bc Mon Sep 17 00:00:00 2001
From: Christoffer Dall <christoffer.dall@linaro.org>
Date: Fri, 10 Oct 2014 11:14:30 +0100
Subject: [PATCH 01/13] arm64: Allow 48-bits VA space without ARM_SMMU

Now when KVM has been reworked to support 48-bits host VA space, we can
allow systems to be configured with this option.  However, the ARM SMMU
driver also needs to be tweaked for 48-bit support so only allow the
config option to be set when not including support for theSMMU.

Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm64/Kconfig | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index ac9afde76dead..b8053be1d8036 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -232,7 +232,7 @@ config ARM64_VA_BITS_42
 
 config ARM64_VA_BITS_48
 	bool "48-bit"
-	depends on BROKEN
+	depends on !ARM_SMMU
 
 endchoice
 

From 2a0b5c0d19298cad54573682321b5fe015fa5a0d Mon Sep 17 00:00:00 2001
From: Catalin Marinas <catalin.marinas@arm.com>
Date: Fri, 10 Oct 2014 15:37:28 +0100
Subject: [PATCH 02/13] arm64: Align less than PAGE_SIZE pgds naturally

When the pgd size is smaller than PAGE_SIZE, pgd_alloc() uses kzalloc()
to save space. However, this is not always naturally aligned as required
by the architecture. This patch creates a kmem_cache for pgd allocations
with the correct alignment.

The current kernel configurations with 4K pages + 39-bit VA and 64K
pages + 42-bit VA use a full page for the pgd and are not affected. The
patch is required for 48-bit VA with 64K pages where the pgd is 512
bytes.

Reported-by: Christoffer Dall <christoffer.dall@linaro.org>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm64/mm/pgd.c | 18 ++++++++++++++++--
 1 file changed, 16 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 62c6101df260e..6682b361d3ac4 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -30,12 +30,14 @@
 
 #define PGD_SIZE	(PTRS_PER_PGD * sizeof(pgd_t))
 
+static struct kmem_cache *pgd_cache;
+
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
 	if (PGD_SIZE == PAGE_SIZE)
 		return (pgd_t *)get_zeroed_page(GFP_KERNEL);
 	else
-		return kzalloc(PGD_SIZE, GFP_KERNEL);
+		return kmem_cache_zalloc(pgd_cache, GFP_KERNEL);
 }
 
 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -43,5 +45,17 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 	if (PGD_SIZE == PAGE_SIZE)
 		free_page((unsigned long)pgd);
 	else
-		kfree(pgd);
+		kmem_cache_free(pgd_cache, pgd);
+}
+
+static int __init pgd_cache_init(void)
+{
+	/*
+	 * Naturally aligned pgds required by the architecture.
+	 */
+	if (PGD_SIZE != PAGE_SIZE)
+		pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
+					      SLAB_PANIC, NULL);
+	return 0;
 }
+core_initcall(pgd_cache_init);

From 971a5b6fe634bb7b617d8c5f25b6a3ddbc600194 Mon Sep 17 00:00:00 2001
From: Victor Kamensky <victor.kamensky@linaro.org>
Date: Tue, 14 Oct 2014 06:55:05 +0100
Subject: [PATCH 03/13] arm64: compat: fix compat types affecting struct
 compat_elf_prpsinfo

The compat_elf_prpsinfo structure does not match the arch/arm struct
elf_pspsinfo definition. As result NT_PRPSINFO note in core file
created by arm64 kernel for aarch32 (compat) process has wrong size.
So gdb cannot display command that caused process crash.

Fix is to change size of __compat_uid_t, __compat_gid_t so it would
match size of similar fields in arch/arm case.

Signed-off-by: Victor Kamensky <victor.kamensky@linaro.org>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm64/include/asm/compat.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 253e33bc94fb5..56de5aadede24 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -37,8 +37,8 @@ typedef s32		compat_ssize_t;
 typedef s32		compat_time_t;
 typedef s32		compat_clock_t;
 typedef s32		compat_pid_t;
-typedef u32		__compat_uid_t;
-typedef u32		__compat_gid_t;
+typedef u16		__compat_uid_t;
+typedef u16		__compat_gid_t;
 typedef u16		__compat_uid16_t;
 typedef u16		__compat_gid16_t;
 typedef u32		__compat_uid32_t;

From c0260ba906c4dfbcccd6414c3e2c0e73a7d7e35a Mon Sep 17 00:00:00 2001
From: Steve Capper <steve.capper@linaro.org>
Date: Fri, 17 Oct 2014 15:27:38 +0100
Subject: [PATCH 04/13] arm64: mm: Correct fixmap pagetable types
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Compiling with STRICT_MM_TYPECHECKS gives the following
arch/arm64/mm/ioremap.c: In function ‘early_ioremap_init’:
arch/arm64/mm/ioremap.c:152:2: warning: passing argument 3 of
‘pud_populate’ from incompatible pointer type
  pud_populate(&init_mm, pud, bm_pmd);

The data types for bm_pmd and bm_pud are incorrectly set to pte_t.
This patch corrects these types.

Signed-off-by: Steve Capper <steve.capper@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm64/mm/ioremap.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index fa324bd5a5c42..4a07630a66165 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -105,10 +105,10 @@ EXPORT_SYMBOL(ioremap_cache);
 
 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
 #if CONFIG_ARM64_PGTABLE_LEVELS > 2
-static pte_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
+static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
 #endif
 #if CONFIG_ARM64_PGTABLE_LEVELS > 3
-static pte_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
+static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
 #endif
 
 static inline pud_t * __init early_ioremap_pud(unsigned long addr)

From b569c1c622c5e60c960a6ae5bd0880e0cdbd56b1 Mon Sep 17 00:00:00 2001
From: Daniel Borkmann <dborkman@redhat.com>
Date: Tue, 16 Sep 2014 08:48:50 +0100
Subject: [PATCH 05/13] net: bpf: arm64: address randomize and write protect
 JIT code

This is the ARM64 variant for 314beb9bcab ("x86: bpf_jit_comp: secure bpf
jit against spraying attacks").

Thanks to commit 11d91a770f1f ("arm64: Add CONFIG_DEBUG_SET_MODULE_RONX
support") which added necessary infrastructure, we can now implement
RO marking of eBPF generated JIT image pages and randomize start offset
for the JIT code, so that it does not reside directly on a page boundary
anymore. Likewise, the holes are filled with illegal instructions: here
we use BRK #0x100 (opcode 0xd4202000) to trigger a fault in the kernel
(unallocated BRKs would trigger a fault through do_debug_exception). This
seems more reliable as we don't have a guaranteed undefined instruction
space on ARM64.

This is basically the ARM64 variant of what we already have in ARM via
commit 55309dd3d4cd ("net: bpf: arm: address randomize and write protect
JIT code"). Moreover, this commit also presents a merge resolution due to
conflicts with commit 60a3b2253c41 ("net: bpf: make eBPF interpreter images
read-only") as we don't use kfree() in bpf_jit_free() anymore to release
the locked bpf_prog structure, but instead bpf_prog_unlock_free() through
a different allocator.

JIT tested on aarch64 with BPF test suite.

Reference: http://mainisusuallyafunction.blogspot.com/2012/11/attacking-hardened-linux-systems-with.html
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Reviewed-by: Zi Shen Lim <zlim.lnx@gmail.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm64/net/bpf_jit_comp.c | 39 +++++++++++++++++++++++++++--------
 1 file changed, 30 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 7ae33545535b9..71088952ed270 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -19,12 +19,13 @@
 #define pr_fmt(fmt) "bpf_jit: " fmt
 
 #include <linux/filter.h>
-#include <linux/moduleloader.h>
 #include <linux/printk.h>
 #include <linux/skbuff.h>
 #include <linux/slab.h>
+
 #include <asm/byteorder.h>
 #include <asm/cacheflush.h>
+#include <asm/debug-monitors.h>
 
 #include "bpf_jit.h"
 
@@ -119,6 +120,14 @@ static inline int bpf2a64_offset(int bpf_to, int bpf_from,
 	return to - from;
 }
 
+static void jit_fill_hole(void *area, unsigned int size)
+{
+	u32 *ptr;
+	/* We are guaranteed to have aligned memory. */
+	for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
+		*ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
+}
+
 static inline int epilogue_offset(const struct jit_ctx *ctx)
 {
 	int to = ctx->offset[ctx->prog->len - 1];
@@ -613,8 +622,10 @@ void bpf_jit_compile(struct bpf_prog *prog)
 
 void bpf_int_jit_compile(struct bpf_prog *prog)
 {
+	struct bpf_binary_header *header;
 	struct jit_ctx ctx;
 	int image_size;
+	u8 *image_ptr;
 
 	if (!bpf_jit_enable)
 		return;
@@ -636,23 +647,25 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
 		goto out;
 
 	build_prologue(&ctx);
-
 	build_epilogue(&ctx);
 
 	/* Now we know the actual image size. */
 	image_size = sizeof(u32) * ctx.idx;
-	ctx.image = module_alloc(image_size);
-	if (unlikely(ctx.image == NULL))
+	header = bpf_jit_binary_alloc(image_size, &image_ptr,
+				      sizeof(u32), jit_fill_hole);
+	if (header == NULL)
 		goto out;
 
 	/* 2. Now, the actual pass. */
 
+	ctx.image = (u32 *)image_ptr;
 	ctx.idx = 0;
+
 	build_prologue(&ctx);
 
 	ctx.body_offset = ctx.idx;
 	if (build_body(&ctx)) {
-		module_free(NULL, ctx.image);
+		bpf_jit_binary_free(header);
 		goto out;
 	}
 
@@ -663,17 +676,25 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
 		bpf_jit_dump(prog->len, image_size, 2, ctx.image);
 
 	bpf_flush_icache(ctx.image, ctx.image + ctx.idx);
+
+	set_memory_ro((unsigned long)header, header->pages);
 	prog->bpf_func = (void *)ctx.image;
 	prog->jited = 1;
-
 out:
 	kfree(ctx.offset);
 }
 
 void bpf_jit_free(struct bpf_prog *prog)
 {
-	if (prog->jited)
-		module_free(NULL, prog->bpf_func);
+	unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK;
+	struct bpf_binary_header *header = (void *)addr;
+
+	if (!prog->jited)
+		goto free_filter;
+
+	set_memory_rw(addr, header->pages);
+	bpf_jit_binary_free(header);
 
-	kfree(prog);
+free_filter:
+	bpf_prog_unlock_free(prog);
 }

From d65a634a0acefd6b6e8718e2399b6771ccb17b24 Mon Sep 17 00:00:00 2001
From: Zi Shen Lim <zlim.lnx@gmail.com>
Date: Tue, 16 Sep 2014 19:37:35 +0100
Subject: [PATCH 06/13] arm64: bpf: add 'shift by register' instructions

Commit 72b603ee8cfc ("bpf: x86: add missing 'shift by register'
instructions to x64 eBPF JIT") noted support for 'shift by register'
in eBPF and added support for it for x64. Let's enable this for arm64
as well.

The arm64 eBPF JIT compiler now passes the new 'shift by register'
test case introduced in the same commit 72b603ee8cfc.

Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm64/net/bpf_jit.h      |  8 ++++++--
 arch/arm64/net/bpf_jit_comp.c | 12 ++++++++++++
 2 files changed, 18 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index 2134f7e6c2880..de0a81a539a01 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -144,8 +144,12 @@
 
 /* Data-processing (2 source) */
 /* Rd = Rn OP Rm */
-#define A64_UDIV(sf, Rd, Rn, Rm) aarch64_insn_gen_data2(Rd, Rn, Rm, \
-	A64_VARIANT(sf), AARCH64_INSN_DATA2_UDIV)
+#define A64_DATA2(sf, Rd, Rn, Rm, type) aarch64_insn_gen_data2(Rd, Rn, Rm, \
+	A64_VARIANT(sf), AARCH64_INSN_DATA2_##type)
+#define A64_UDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, UDIV)
+#define A64_LSLV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSLV)
+#define A64_LSRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSRV)
+#define A64_ASRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, ASRV)
 
 /* Data-processing (3 source) */
 /* Rd = Ra + Rn * Rm */
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 71088952ed270..80cc769727980 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -261,6 +261,18 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
 		emit(A64_MUL(is64, tmp, tmp, src), ctx);
 		emit(A64_SUB(is64, dst, dst, tmp), ctx);
 		break;
+	case BPF_ALU | BPF_LSH | BPF_X:
+	case BPF_ALU64 | BPF_LSH | BPF_X:
+		emit(A64_LSLV(is64, dst, dst, src), ctx);
+		break;
+	case BPF_ALU | BPF_RSH | BPF_X:
+	case BPF_ALU64 | BPF_RSH | BPF_X:
+		emit(A64_LSRV(is64, dst, dst, src), ctx);
+		break;
+	case BPF_ALU | BPF_ARSH | BPF_X:
+	case BPF_ALU64 | BPF_ARSH | BPF_X:
+		emit(A64_ASRV(is64, dst, dst, src), ctx);
+		break;
 	/* dst = -dst */
 	case BPF_ALU | BPF_NEG:
 	case BPF_ALU64 | BPF_NEG:

From 30d3d94cc3d507a23dcb09e5c365464e4aa9f580 Mon Sep 17 00:00:00 2001
From: Zi Shen Lim <zlim.lnx@gmail.com>
Date: Tue, 16 Sep 2014 21:29:23 +0100
Subject: [PATCH 07/13] arm64: bpf: add 'load 64-bit immediate' instruction

Commit 02ab695bb37e (net: filter: add "load 64-bit immediate" eBPF
instruction) introduced a new eBPF instruction. Let's add support
for this for arm64 as well.

Our arm64 eBPF JIT compiler now passes the new "load 64-bit
immediate" test case introduced in the same commit 02ab695bb37e.

Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm64/net/bpf_jit_comp.c | 31 +++++++++++++++++++++++++++++++
 1 file changed, 31 insertions(+)

diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 80cc769727980..618d2cdc2f1be 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -205,6 +205,12 @@ static void build_epilogue(struct jit_ctx *ctx)
 	emit(A64_RET(A64_LR), ctx);
 }
 
+/* JITs an eBPF instruction.
+ * Returns:
+ * 0  - successfully JITed an 8-byte eBPF instruction.
+ * >0 - successfully JITed a 16-byte eBPF instruction.
+ * <0 - failed to JIT.
+ */
 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
 {
 	const u8 code = insn->code;
@@ -464,6 +470,27 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
 		emit(A64_B(jmp_offset), ctx);
 		break;
 
+	/* dst = imm64 */
+	case BPF_LD | BPF_IMM | BPF_DW:
+	{
+		const struct bpf_insn insn1 = insn[1];
+		u64 imm64;
+
+		if (insn1.code != 0 || insn1.src_reg != 0 ||
+		    insn1.dst_reg != 0 || insn1.off != 0) {
+			/* Note: verifier in BPF core must catch invalid
+			 * instructions.
+			 */
+			pr_err_once("Invalid BPF_LD_IMM64 instruction\n");
+			return -EINVAL;
+		}
+
+		imm64 = (u64)insn1.imm << 32 | imm;
+		emit_a64_mov_i64(dst, imm64, ctx);
+
+		return 1;
+	}
+
 	/* LDX: dst = *(size *)(src + off) */
 	case BPF_LDX | BPF_MEM | BPF_W:
 	case BPF_LDX | BPF_MEM | BPF_H:
@@ -615,6 +642,10 @@ static int build_body(struct jit_ctx *ctx)
 			ctx->offset[i] = ctx->idx;
 
 		ret = build_insn(insn, ctx);
+		if (ret > 0) {
+			i++;
+			continue;
+		}
 		if (ret)
 			return ret;
 	}

From 74c3deacb2a903bcb9d5a20dc885159142245010 Mon Sep 17 00:00:00 2001
From: Daniel Borkmann <dborkman@redhat.com>
Date: Sat, 11 Oct 2014 10:00:43 +0100
Subject: [PATCH 08/13] net: bpf: arm64: minor fix of type in jited

Commit 286aad3c4014 ("net: bpf: be friendly to kmemcheck") changed the
type of jited from a bitfield into a bool. As this commmit wasn't available
at the time when arm64 eBPF JIT was merged, fix it up now as net is merged
into mainline.

Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: Zi Shen Lim <zlim.lnx@gmail.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm64/net/bpf_jit_comp.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 618d2cdc2f1be..41f1e3e2ea248 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -722,7 +722,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
 
 	set_memory_ro((unsigned long)header, header->pages);
 	prog->bpf_func = (void *)ctx.image;
-	prog->jited = 1;
+	prog->jited = true;
 out:
 	kfree(ctx.offset);
 }

From a24637d5ddc215838776e755970bb199df00a1a5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Alex=20Benn=C3=A9e?= <alex.bennee@linaro.org>
Date: Tue, 22 Jul 2014 16:14:42 +0100
Subject: [PATCH 09/13] Documentation/arm64/memory.txt: fix typo
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

There is no swapper_pgd_dir, it meant swapper_pg_dir.

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
 Documentation/arm64/memory.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt
index 344e85cc73239..d7273a5f64566 100644
--- a/Documentation/arm64/memory.txt
+++ b/Documentation/arm64/memory.txt
@@ -17,7 +17,7 @@ User addresses have bits 63:48 set to 0 while the kernel addresses have
 the same bits set to 1. TTBRx selection is given by bit 63 of the
 virtual address. The swapper_pg_dir contains only kernel (global)
 mappings while the user pgd contains only user (non-global) mappings.
-The swapper_pgd_dir address is written to TTBR1 and never written to
+The swapper_pg_dir address is written to TTBR1 and never written to
 TTBR0.
 
 

From ceab3fe69408cb98f437dad3b4b4bb79434370ef Mon Sep 17 00:00:00 2001
From: Catalin Marinas <catalin.marinas@arm.com>
Date: Tue, 21 Oct 2014 17:01:07 +0100
Subject: [PATCH 10/13] arm64: Fix compilation error on UP builds

In file included from ./arch/arm64/include/asm/irq_work.h:4:0,
        from include/linux/irq_work.h:46,
        from include/linux/perf_event.h:49,
        from include/linux/ftrace_event.h:9,
        from include/trace/syscall.h:6,
        from include/linux/syscalls.h:81,
        from init/main.c:18:
./arch/arm64/include/asm/smp.h:24:3:
        error: #error "<asm/smp.h> included in non-SMP build"
 # error "<asm/smp.h> included in non-SMP build"

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Fixes: 3631073659d0 ("arm64: Tell irq work about self IPI support")
Reported-by: Guenter Roeck <linux@roeck-us.net>
Tested-by: Guenter Roeck <linux@roeck-us.net>
---
 arch/arm64/include/asm/irq_work.h | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h
index 8e24ef3f7c82c..b4f6b19a8a685 100644
--- a/arch/arm64/include/asm/irq_work.h
+++ b/arch/arm64/include/asm/irq_work.h
@@ -1,6 +1,8 @@
 #ifndef __ASM_IRQ_WORK_H
 #define __ASM_IRQ_WORK_H
 
+#ifdef CONFIG_SMP
+
 #include <asm/smp.h>
 
 static inline bool arch_irq_work_has_interrupt(void)
@@ -8,4 +10,13 @@ static inline bool arch_irq_work_has_interrupt(void)
 	return !!__smp_cross_call;
 }
 
+#else
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+	return false;
+}
+
+#endif
+
 #endif /* __ASM_IRQ_WORK_H */

From e2b6b35ee77522c2e15e770aded0b05c25ca0616 Mon Sep 17 00:00:00 2001
From: Catalin Marinas <catalin.marinas@arm.com>
Date: Thu, 4 Oct 2012 14:22:23 +0100
Subject: [PATCH 11/13] arm64: vexpress: Add CLCD support to the ARMv8 model
 platform

This patch enables CLCD support for the VE platform emulated by the
ARMv8 software model (DT bindings are based on Pawel's vexpress
patches) together with defconfig entries for SERIO_AMBAKMI and
FB_ARMCLCD.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Pawel Moll <pawel.moll@arm.com>
---
 arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi | 35 +++++++++++++++++++-
 arch/arm64/configs/defconfig                 |  2 ++
 2 files changed, 36 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
index ac2cb2418025a..c46cbb29f3c6b 100644
--- a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
+++ b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
@@ -22,7 +22,7 @@
 			bank-width = <4>;
 		};
 
-		vram@2,00000000 {
+		v2m_video_ram: vram@2,00000000 {
 			compatible = "arm,vexpress-vram";
 			reg = <2 0x00000000 0x00800000>;
 		};
@@ -179,9 +179,42 @@
 			clcd@1f0000 {
 				compatible = "arm,pl111", "arm,primecell";
 				reg = <0x1f0000 0x1000>;
+				interrupt-names = "combined";
 				interrupts = <14>;
 				clocks = <&v2m_oscclk1>, <&v2m_clk24mhz>;
 				clock-names = "clcdclk", "apb_pclk";
+				arm,pl11x,framebuffer = <0x18000000 0x00180000>;
+				memory-region = <&v2m_video_ram>;
+				max-memory-bandwidth = <130000000>; /* 16bpp @ 63.5MHz */
+
+				port {
+					v2m_clcd_pads: endpoint {
+						remote-endpoint = <&v2m_clcd_panel>;
+						arm,pl11x,tft-r0g0b0-pads = <0 8 16>;
+					};
+				};
+
+				panel {
+					compatible = "panel-dpi";
+
+					port {
+						v2m_clcd_panel: endpoint {
+							remote-endpoint = <&v2m_clcd_pads>;
+						};
+					};
+
+					panel-timing {
+						clock-frequency = <63500127>;
+						hactive = <1024>;
+						hback-porch = <152>;
+						hfront-porch = <48>;
+						hsync-len = <104>;
+						vactive = <768>;
+						vback-porch = <23>;
+						vfront-porch = <3>;
+						vsync-len = <4>;
+					};
+				};
 			};
 
 			virtio_block@0130000 {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 9cd37de9aa8d9..4ce602c2c6de8 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -78,6 +78,7 @@ CONFIG_NET_XGENE=y
 # CONFIG_WLAN is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
 CONFIG_LEGACY_PTY_COUNT=16
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
@@ -90,6 +91,7 @@ CONFIG_VIRTIO_CONSOLE=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_MONO is not set

From 92980405f3537136b8e81007a9df576762f49bbb Mon Sep 17 00:00:00 2001
From: Arun Chandran <achandran@mvista.com>
Date: Fri, 10 Oct 2014 12:31:24 +0100
Subject: [PATCH 12/13] arm64: ASLR: Don't randomise text when
 randomise_va_space == 0

When user asks to turn off ASLR by writing "0" to
/proc/sys/kernel/randomize_va_space there should not be
any randomization to mmap base, stack, VDSO, libs, text and heap

Currently arm64 violates this behavior by randomising text.
Fix this by defining a constant ELF_ET_DYN_BASE. The randomisation of
mm->mmap_base is done by setup_new_exec -> arch_pick_mmap_layout ->
mmap_base -> mmap_rnd.

Signed-off-by: Arun Chandran <achandran@mvista.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm64/Kconfig           | 1 +
 arch/arm64/include/asm/elf.h | 4 ++--
 arch/arm64/kernel/process.c  | 5 -----
 3 files changed, 3 insertions(+), 7 deletions(-)

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b8053be1d8036..9532f8d5857ed 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,5 +1,6 @@
 config ARM64
 	def_bool y
+	select ARCH_BINFMT_ELF_RANDOMIZE_PIE
 	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
 	select ARCH_HAS_SG_CHAIN
 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 01d3aab64b79f..1f65be3931392 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -126,7 +126,7 @@ typedef struct user_fpsimd_state elf_fpregset_t;
  * that it will "exec", and that there is sufficient room for the brk.
  */
 extern unsigned long randomize_et_dyn(unsigned long base);
-#define ELF_ET_DYN_BASE	(randomize_et_dyn(2 * TASK_SIZE_64 / 3))
+#define ELF_ET_DYN_BASE	(2 * TASK_SIZE_64 / 3)
 
 /*
  * When the program starts, a1 contains a pointer to a function to be
@@ -169,7 +169,7 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
 #define COMPAT_ELF_PLATFORM		("v8l")
 #endif
 
-#define COMPAT_ELF_ET_DYN_BASE		(randomize_et_dyn(2 * TASK_SIZE_32 / 3))
+#define COMPAT_ELF_ET_DYN_BASE		(2 * TASK_SIZE_32 / 3)
 
 /* AArch32 registers. */
 #define COMPAT_ELF_NGREG		18
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index c3065dbc4fa26..fde9923af859c 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -378,8 +378,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
 {
 	return randomize_base(mm->brk);
 }
-
-unsigned long randomize_et_dyn(unsigned long base)
-{
-	return randomize_base(base);
-}

From 3dec0fe48a8936528aae2fc3f904c2c9a34ba368 Mon Sep 17 00:00:00 2001
From: Catalin Marinas <catalin.marinas@arm.com>
Date: Fri, 24 Oct 2014 18:16:47 +0100
Subject: [PATCH 13/13] arm64: Fix memblock current_limit with 64K pages and
 48-bit VA

With 48-bit VA space, the 64K page configuration uses 3 levels instead
of 2 and PUD_SIZE != PMD_SIZE. Since with 64K pages we only cover
PMD_SIZE with the initial swapper_pg_dir populated in head.S, the
memblock current_limit needs to be set accordingly in map_mem() to avoid
allocating unmapped memory. The memblock current_limit is progressively
increased as more blocks are mapped.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm64/mm/mmu.c | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6894ef3e62342..0bf90d26e7455 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -297,11 +297,15 @@ static void __init map_mem(void)
 	 * create_mapping requires puds, pmds and ptes to be allocated from
 	 * memory addressable from the initial direct kernel mapping.
 	 *
-	 * The initial direct kernel mapping, located at swapper_pg_dir,
-	 * gives us PUD_SIZE memory starting from PHYS_OFFSET (which must be
-	 * aligned to 2MB as per Documentation/arm64/booting.txt).
+	 * The initial direct kernel mapping, located at swapper_pg_dir, gives
+	 * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
+	 * PHYS_OFFSET (which must be aligned to 2MB as per
+	 * Documentation/arm64/booting.txt).
 	 */
-	limit = PHYS_OFFSET + PUD_SIZE;
+	if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
+		limit = PHYS_OFFSET + PMD_SIZE;
+	else
+		limit = PHYS_OFFSET + PUD_SIZE;
 	memblock_set_current_limit(limit);
 
 	/* map all the memory banks */