From 9917df83a9d11c63a1c4b89ddd821357b7c20171 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Sat, 17 Feb 2018 00:34:46 +0100 Subject: [PATCH 1/4] alpha: rtc: remove unused set_mmss ops The .set_mmss and .setmmss64 ops are only called when the RTC is not providing an implementation for the .set_time callback. On alpha, .set_time is provided so .set_mmss64 is never called. Remove the unused code. Signed-off-by: Alexandre Belloni Signed-off-by: Matt Turner --- arch/alpha/kernel/rtc.c | 99 ----------------------------------------- 1 file changed, 99 deletions(-) diff --git a/arch/alpha/kernel/rtc.c b/arch/alpha/kernel/rtc.c index b3da0dcda47d7..0816e6c747e8d 100644 --- a/arch/alpha/kernel/rtc.c +++ b/arch/alpha/kernel/rtc.c @@ -114,83 +114,6 @@ alpha_rtc_set_time(struct device *dev, struct rtc_time *tm) return mc146818_set_time(tm); } -static int -alpha_rtc_set_mmss(struct device *dev, time64_t nowtime) -{ - int retval = 0; - int real_seconds, real_minutes, cmos_minutes; - unsigned char save_control, save_freq_select; - - /* Note: This code only updates minutes and seconds. Comments - indicate this was to avoid messing with unknown time zones, - and with the epoch nonsense described above. In order for - this to work, the existing clock cannot be off by more than - 15 minutes. - - ??? This choice is may be out of date. The x86 port does - not have problems with timezones, and the epoch processing has - now been fixed in alpha_set_rtc_time. - - In either case, one can always force a full rtc update with - the userland hwclock program, so surely 15 minute accuracy - is no real burden. */ - - /* In order to set the CMOS clock precisely, we have to be called - 500 ms after the second nowtime has started, because when - nowtime is written into the registers of the CMOS clock, it will - jump to the next second precisely 500 ms later. Check the Motorola - MC146818A or Dallas DS12887 data sheet for details. */ - - /* irq are locally disabled here */ - spin_lock(&rtc_lock); - /* Tell the clock it's being set */ - save_control = CMOS_READ(RTC_CONTROL); - CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); - - /* Stop and reset prescaler */ - save_freq_select = CMOS_READ(RTC_FREQ_SELECT); - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); - - cmos_minutes = CMOS_READ(RTC_MINUTES); - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) - cmos_minutes = bcd2bin(cmos_minutes); - - real_seconds = nowtime % 60; - real_minutes = nowtime / 60; - if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1) { - /* correct for half hour time zone */ - real_minutes += 30; - } - real_minutes %= 60; - - if (abs(real_minutes - cmos_minutes) < 30) { - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - real_seconds = bin2bcd(real_seconds); - real_minutes = bin2bcd(real_minutes); - } - CMOS_WRITE(real_seconds,RTC_SECONDS); - CMOS_WRITE(real_minutes,RTC_MINUTES); - } else { - printk_once(KERN_NOTICE - "set_rtc_mmss: can't update from %d to %d\n", - cmos_minutes, real_minutes); - retval = -1; - } - - /* The following flags have to be released exactly in this order, - * otherwise the DS12887 (popular MC146818A clone with integrated - * battery and quartz) will not reset the oscillator and will not - * update precisely 500 ms later. You won't find this mentioned in - * the Dallas Semiconductor data sheets, but who believes data - * sheets anyway ... -- Markus Kuhn - */ - CMOS_WRITE(save_control, RTC_CONTROL); - CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); - spin_unlock(&rtc_lock); - - return retval; -} - static int alpha_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) { @@ -210,7 +133,6 @@ alpha_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) static const struct rtc_class_ops alpha_rtc_ops = { .read_time = alpha_rtc_read_time, .set_time = alpha_rtc_set_time, - .set_mmss64 = alpha_rtc_set_mmss, .ioctl = alpha_rtc_ioctl, }; @@ -225,7 +147,6 @@ static const struct rtc_class_ops alpha_rtc_ops = { union remote_data { struct rtc_time *tm; - unsigned long now; long retval; }; @@ -267,29 +188,9 @@ remote_set_time(struct device *dev, struct rtc_time *tm) return alpha_rtc_set_time(NULL, tm); } -static void -do_remote_mmss(void *data) -{ - union remote_data *x = data; - x->retval = alpha_rtc_set_mmss(NULL, x->now); -} - -static int -remote_set_mmss(struct device *dev, time64_t now) -{ - union remote_data x; - if (smp_processor_id() != boot_cpuid) { - x.now = now; - smp_call_function_single(boot_cpuid, do_remote_mmss, &x, 1); - return x.retval; - } - return alpha_rtc_set_mmss(NULL, now); -} - static const struct rtc_class_ops remote_rtc_ops = { .read_time = remote_read_time, .set_time = remote_set_time, - .set_mmss64 = remote_set_mmss, .ioctl = alpha_rtc_ioctl, }; #endif From 54f16b1967bd7c6a252b3c128032deb37cc61cf3 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Wed, 21 Feb 2018 22:36:16 +0100 Subject: [PATCH 2/4] alpha: rtc: stop validating rtc_time in .read_time The RTC core is always calling rtc_valid_tm after the read_time callback. It is not necessary to call it just before returning from the callback. Signed-off-by: Alexandre Belloni Signed-off-by: Matt Turner --- arch/alpha/kernel/rtc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/alpha/kernel/rtc.c b/arch/alpha/kernel/rtc.c index 0816e6c747e8d..1376a28670482 100644 --- a/arch/alpha/kernel/rtc.c +++ b/arch/alpha/kernel/rtc.c @@ -97,7 +97,7 @@ alpha_rtc_read_time(struct device *dev, struct rtc_time *tm) tm->tm_year = year; } - return rtc_valid_tm(tm); + return 0; } static int From 6fd16ce5590e30d0ed8b21e977102361ff9f92ef Mon Sep 17 00:00:00 2001 From: Michael Cree Date: Mon, 26 Feb 2018 22:02:12 +1300 Subject: [PATCH 3/4] alpha: Implement CPU vulnerabilities sysfs functions. Implement the CPU vulnerabilty show functions for meltdown, spectre_v1 and spectre_v2 on Alpha. Tests on XP1000 (EV67/667MHz) and ES45 (EV68CB/1.25GHz) show them to be vulnerable to Meltdown and Spectre V1. In the case of Meltdown I saw a 1 to 2% success rate in reading bytes on the XP1000 and 50 to 60% success rate on the ES45. (This compares to 99.97% success reported for Intel CPUs.) Report EV6 and later CPUs as vulnerable. Tests on PWS600au (EV56/600MHz) for Spectre V1 attack were unsuccessful (though I did not try particularly hard) so mark EV4 through to EV56 as not vulnerable. Signed-off-by: Michael Cree Signed-off-by: Matt Turner --- arch/alpha/Kconfig | 1 + arch/alpha/kernel/Makefile | 2 +- arch/alpha/kernel/bugs.c | 45 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 arch/alpha/kernel/bugs.c diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index e96adcbcab418..b2022885ced8a 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -18,6 +18,7 @@ config ALPHA select ARCH_HAVE_NMI_SAFE_CMPXCHG select AUDIT_ARCH select GENERIC_CLOCKEVENTS + select GENERIC_CPU_VULNERABILITIES select GENERIC_SMP_IDLE_THREAD select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile index bf7b41fa7b01a..5a74581bf0ee7 100644 --- a/arch/alpha/kernel/Makefile +++ b/arch/alpha/kernel/Makefile @@ -9,7 +9,7 @@ ccflags-y := -Wno-sign-compare obj-y := entry.o traps.o process.o osf_sys.o irq.o \ irq_alpha.o signal.o setup.o ptrace.o time.o \ - systbls.o err_common.o io.o + systbls.o err_common.o io.o bugs.o obj-$(CONFIG_VGA_HOSE) += console.o obj-$(CONFIG_SMP) += smp.o diff --git a/arch/alpha/kernel/bugs.c b/arch/alpha/kernel/bugs.c new file mode 100644 index 0000000000000..08cc10d7fa17d --- /dev/null +++ b/arch/alpha/kernel/bugs.c @@ -0,0 +1,45 @@ + +#include +#include + + +#ifdef CONFIG_SYSFS + +static int cpu_is_ev6_or_later(void) +{ + struct percpu_struct *cpu; + unsigned long cputype; + + cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset); + cputype = cpu->type & 0xffffffff; + /* Include all of EV6, EV67, EV68, EV7, EV79 and EV69. */ + return (cputype == EV6_CPU) || ((cputype >= EV67_CPU) && (cputype <= EV69_CPU)); +} + +ssize_t cpu_show_meltdown(struct device *dev, + struct device_attribute *attr, char *buf) +{ + if (cpu_is_ev6_or_later()) + return sprintf(buf, "Vulnerable\n"); + else + return sprintf(buf, "Not affected\n"); +} + +ssize_t cpu_show_spectre_v1(struct device *dev, + struct device_attribute *attr, char *buf) +{ + if (cpu_is_ev6_or_later()) + return sprintf(buf, "Vulnerable\n"); + else + return sprintf(buf, "Not affected\n"); +} + +ssize_t cpu_show_spectre_v2(struct device *dev, + struct device_attribute *attr, char *buf) +{ + if (cpu_is_ev6_or_later()) + return sprintf(buf, "Vulnerable\n"); + else + return sprintf(buf, "Not affected\n"); +} +#endif From cd0e00c106722eca40b38ebf11cf134c01901086 Mon Sep 17 00:00:00 2001 From: Sinan Kaya Date: Mon, 2 Apr 2018 13:48:00 -0400 Subject: [PATCH 4/4] alpha: io: reorder barriers to guarantee writeX() and iowriteX() ordering memory-barriers.txt has been updated with the following requirement. "When using writel(), a prior wmb() is not needed to guarantee that the cache coherent memory writes have completed before writing to the MMIO region." Current writeX() and iowriteX() implementations on alpha are not satisfying this requirement as the barrier is after the register write. Move mb() in writeX() and iowriteX() functions to guarantee that HW observes memory changes before performing register operations. Signed-off-by: Sinan Kaya Reported-by: Arnd Bergmann Signed-off-by: Matt Turner --- arch/alpha/include/asm/io.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index d123ff90f7a83..4c533fc94d62f 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -341,14 +341,14 @@ extern inline unsigned int ioread16(void __iomem *addr) extern inline void iowrite8(u8 b, void __iomem *addr) { - IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); mb(); + IO_CONCAT(__IO_PREFIX, iowrite8)(b, addr); } extern inline void iowrite16(u16 b, void __iomem *addr) { - IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); mb(); + IO_CONCAT(__IO_PREFIX, iowrite16)(b, addr); } extern inline u8 inb(unsigned long port) @@ -382,8 +382,8 @@ extern inline unsigned int ioread32(void __iomem *addr) extern inline void iowrite32(u32 b, void __iomem *addr) { - IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr); mb(); + IO_CONCAT(__IO_PREFIX, iowrite32)(b, addr); } extern inline u32 inl(unsigned long port) @@ -434,14 +434,14 @@ extern inline u16 readw(const volatile void __iomem *addr) extern inline void writeb(u8 b, volatile void __iomem *addr) { - __raw_writeb(b, addr); mb(); + __raw_writeb(b, addr); } extern inline void writew(u16 b, volatile void __iomem *addr) { - __raw_writew(b, addr); mb(); + __raw_writew(b, addr); } #endif @@ -482,14 +482,14 @@ extern inline u64 readq(const volatile void __iomem *addr) extern inline void writel(u32 b, volatile void __iomem *addr) { - __raw_writel(b, addr); mb(); + __raw_writel(b, addr); } extern inline void writeq(u64 b, volatile void __iomem *addr) { - __raw_writeq(b, addr); mb(); + __raw_writeq(b, addr); } #endif