diff --git a/[refs] b/[refs]
index f5bbc569e677..2bfde5cf3fa9 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: 15fc5deb1f3e9f6e42213ab1bbb0a463e5366c9c
+refs/heads/master: 39f78e70567a07a6fc0d7a4ca9e3331e44dd400d
diff --git a/trunk/Documentation/DocBook/filesystems.tmpl b/trunk/Documentation/DocBook/filesystems.tmpl
index 25b58efd955d..3fca32c41927 100644
--- a/trunk/Documentation/DocBook/filesystems.tmpl
+++ b/trunk/Documentation/DocBook/filesystems.tmpl
@@ -224,8 +224,8 @@ all your transactions.
-Then at umount time , in your put_super() you can then call journal_destroy()
-to clean up your in-core journal object.
+Then at umount time , in your put_super() (2.4) or write_super() (2.5)
+you can then call journal_destroy() to clean up your in-core journal object.
diff --git a/trunk/Documentation/filesystems/Locking b/trunk/Documentation/filesystems/Locking
index e540a24e5d06..0f103e39b4f6 100644
--- a/trunk/Documentation/filesystems/Locking
+++ b/trunk/Documentation/filesystems/Locking
@@ -114,6 +114,7 @@ prototypes:
int (*drop_inode) (struct inode *);
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
+ void (*write_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
int (*freeze_fs) (struct super_block *);
int (*unfreeze_fs) (struct super_block *);
@@ -135,6 +136,7 @@ write_inode:
drop_inode: !!!inode->i_lock!!!
evict_inode:
put_super: write
+write_super: read
sync_fs: read
freeze_fs: write
unfreeze_fs: write
diff --git a/trunk/Documentation/filesystems/porting b/trunk/Documentation/filesystems/porting
index 0742feebc6e2..2bef2b3843d1 100644
--- a/trunk/Documentation/filesystems/porting
+++ b/trunk/Documentation/filesystems/porting
@@ -94,8 +94,9 @@ protected.
---
[mandatory]
-BKL is also moved from around sb operations. BKL should have been shifted into
-individual fs sb_op functions. If you don't need it, remove it.
+BKL is also moved from around sb operations. ->write_super() Is now called
+without BKL held. BKL should have been shifted into individual fs sb_op
+functions. If you don't need it, remove it.
---
[informational]
diff --git a/trunk/Documentation/filesystems/vfs.txt b/trunk/Documentation/filesystems/vfs.txt
index 2ee133e030c3..065aa2dc0835 100644
--- a/trunk/Documentation/filesystems/vfs.txt
+++ b/trunk/Documentation/filesystems/vfs.txt
@@ -216,6 +216,7 @@ struct super_operations {
void (*drop_inode) (struct inode *);
void (*delete_inode) (struct inode *);
void (*put_super) (struct super_block *);
+ void (*write_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
int (*freeze_fs) (struct super_block *);
int (*unfreeze_fs) (struct super_block *);
@@ -272,6 +273,9 @@ or bottom half).
put_super: called when the VFS wishes to free the superblock
(i.e. unmount). This is called with the superblock lock held
+ write_super: called when the VFS superblock needs to be written to
+ disc. This method is optional
+
sync_fs: called when VFS is writing out all dirty data associated with
a superblock. The second parameter indicates whether the method
should wait until the write out has been completed. Optional.
diff --git a/trunk/Documentation/laptops/laptop-mode.txt b/trunk/Documentation/laptops/laptop-mode.txt
index 4ebbfc3f1c6e..0bf25eebce94 100644
--- a/trunk/Documentation/laptops/laptop-mode.txt
+++ b/trunk/Documentation/laptops/laptop-mode.txt
@@ -262,9 +262,9 @@ MINIMUM_BATTERY_MINUTES=10
#
# Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been
-# exceeded, the kernel will wake flusher threads which will then reduce the
-# amount of dirty memory to dirty_background_ratio. Set this nice and low,
-# so once some writeout has commenced, we do a lot of it.
+# exceeded, the kernel will wake pdflush which will then reduce the amount
+# of dirty memory to dirty_background_ratio. Set this nice and low, so once
+# some writeout has commenced, we do a lot of it.
#
#DIRTY_BACKGROUND_RATIO=5
@@ -384,9 +384,9 @@ CPU_MAXFREQ=${CPU_MAXFREQ:-'slowest'}
#
# Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been
-# exceeded, the kernel will wake flusher threads which will then reduce the
-# amount of dirty memory to dirty_background_ratio. Set this nice and low,
-# so once some writeout has commenced, we do a lot of it.
+# exceeded, the kernel will wake pdflush which will then reduce the amount
+# of dirty memory to dirty_background_ratio. Set this nice and low, so once
+# some writeout has commenced, we do a lot of it.
#
DIRTY_BACKGROUND_RATIO=${DIRTY_BACKGROUND_RATIO:-'5'}
diff --git a/trunk/Documentation/sysctl/vm.txt b/trunk/Documentation/sysctl/vm.txt
index 078701fdbd4d..dcc2a94ae34e 100644
--- a/trunk/Documentation/sysctl/vm.txt
+++ b/trunk/Documentation/sysctl/vm.txt
@@ -76,8 +76,8 @@ huge pages although processes will also directly compact memory as required.
dirty_background_bytes
-Contains the amount of dirty memory at which the background kernel
-flusher threads will start writeback.
+Contains the amount of dirty memory at which the pdflush background writeback
+daemon will start writeback.
Note: dirty_background_bytes is the counterpart of dirty_background_ratio. Only
one of them may be specified at a time. When one sysctl is written it is
@@ -89,7 +89,7 @@ other appears as 0 when read.
dirty_background_ratio
Contains, as a percentage of total system memory, the number of pages at which
-the background kernel flusher threads will start writing out dirty data.
+the pdflush background writeback daemon will start writing out dirty data.
==============================================================
@@ -112,9 +112,9 @@ retained.
dirty_expire_centisecs
This tunable is used to define when dirty data is old enough to be eligible
-for writeout by the kernel flusher threads. It is expressed in 100'ths
-of a second. Data which has been dirty in-memory for longer than this
-interval will be written out next time a flusher thread wakes up.
+for writeout by the pdflush daemons. It is expressed in 100'ths of a second.
+Data which has been dirty in-memory for longer than this interval will be
+written out next time a pdflush daemon wakes up.
==============================================================
@@ -128,7 +128,7 @@ data.
dirty_writeback_centisecs
-The kernel flusher threads will periodically wake up and write `old' data
+The pdflush writeback daemons will periodically wake up and write `old' data
out to disk. This tunable expresses the interval between those wakeups, in
100'ths of a second.
diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS
index 63ce3a38b332..94b823f71e94 100644
--- a/trunk/MAINTAINERS
+++ b/trunk/MAINTAINERS
@@ -5329,15 +5329,14 @@ PIN CONTROL SUBSYSTEM
M: Linus Walleij
S: Maintained
F: drivers/pinctrl/
-F: include/linux/pinctrl/
PIN CONTROLLER - ST SPEAR
-M: Viresh Kumar
+M: Viresh Kumar
L: spear-devel@list.st.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.st.com/spear
S: Maintained
-F: drivers/pinctrl/spear/
+F: driver/pinctrl/spear/
PKTCDVD DRIVER
M: Peter Osterlund
diff --git a/trunk/arch/arm/mm/dma-mapping.c b/trunk/arch/arm/mm/dma-mapping.c
index c2cdf6500f75..334dd79ad5e6 100644
--- a/trunk/arch/arm/mm/dma-mapping.c
+++ b/trunk/arch/arm/mm/dma-mapping.c
@@ -358,7 +358,7 @@ void __init dma_contiguous_remap(void)
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
- return;
+ continue;
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
diff --git a/trunk/arch/blackfin/kernel/setup.c b/trunk/arch/blackfin/kernel/setup.c
index fb96e607adcf..ada8f0fc71e4 100644
--- a/trunk/arch/blackfin/kernel/setup.c
+++ b/trunk/arch/blackfin/kernel/setup.c
@@ -52,6 +52,7 @@ EXPORT_SYMBOL(reserved_mem_dcache_on);
#ifdef CONFIG_MTD_UCLINUX
extern struct map_info uclinux_ram_map;
unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
+unsigned long _ebss;
EXPORT_SYMBOL(memory_mtd_end);
EXPORT_SYMBOL(memory_mtd_start);
EXPORT_SYMBOL(mtd_size);
diff --git a/trunk/arch/ia64/kernel/acpi.c b/trunk/arch/ia64/kernel/acpi.c
index 440578850ae5..6f38b6120d96 100644
--- a/trunk/arch/ia64/kernel/acpi.c
+++ b/trunk/arch/ia64/kernel/acpi.c
@@ -497,7 +497,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
srat_num_cpus++;
}
-int __init
+void __init
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
unsigned long paddr, size;
@@ -512,7 +512,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
/* Ignore disabled entries */
if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
- return -1;
+ return;
/* record this node in proximity bitmap */
pxm_bit_set(pxm);
@@ -531,7 +531,6 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
p->size = size;
p->nid = pxm;
num_node_memblks++;
- return 0;
}
void __init acpi_numa_arch_fixup(void)
diff --git a/trunk/arch/m68k/Kconfig b/trunk/arch/m68k/Kconfig
index 4a469907f04a..0b0f8b8c4a26 100644
--- a/trunk/arch/m68k/Kconfig
+++ b/trunk/arch/m68k/Kconfig
@@ -54,6 +54,18 @@ config ZONE_DMA
bool
default y
+config CPU_HAS_NO_BITFIELDS
+ bool
+
+config CPU_HAS_NO_MULDIV64
+ bool
+
+config CPU_HAS_ADDRESS_SPACES
+ bool
+
+config FPU
+ bool
+
config HZ
int
default 1000 if CLEOPATRA
diff --git a/trunk/arch/m68k/Kconfig.cpu b/trunk/arch/m68k/Kconfig.cpu
index 82068349a2bb..43a9f8f1b8eb 100644
--- a/trunk/arch/m68k/Kconfig.cpu
+++ b/trunk/arch/m68k/Kconfig.cpu
@@ -37,7 +37,6 @@ config M68000
bool
select CPU_HAS_NO_BITFIELDS
select CPU_HAS_NO_MULDIV64
- select CPU_HAS_NO_UNALIGNED
select GENERIC_CSUM
help
The Freescale (was Motorola) 68000 CPU is the first generation of
@@ -49,7 +48,6 @@ config M68000
config MCPU32
bool
select CPU_HAS_NO_BITFIELDS
- select CPU_HAS_NO_UNALIGNED
help
The Freescale (was then Motorola) CPU32 is a CPU core that is
based on the 68020 processor. For the most part it is used in
@@ -378,18 +376,6 @@ config NODES_SHIFT
default "3"
depends on !SINGLE_MEMORY_CHUNK
-config CPU_HAS_NO_BITFIELDS
- bool
-
-config CPU_HAS_NO_MULDIV64
- bool
-
-config CPU_HAS_NO_UNALIGNED
- bool
-
-config CPU_HAS_ADDRESS_SPACES
- bool
-
config FPU
bool
diff --git a/trunk/arch/m68k/apollo/config.c b/trunk/arch/m68k/apollo/config.c
index f5565d6eeb8e..0a30406b9442 100644
--- a/trunk/arch/m68k/apollo/config.c
+++ b/trunk/arch/m68k/apollo/config.c
@@ -177,8 +177,8 @@ irqreturn_t dn_timer_int(int irq, void *dev_id)
timer_handler(irq, dev_id);
- x = *(volatile unsigned char *)(apollo_timer + 3);
- x = *(volatile unsigned char *)(apollo_timer + 5);
+ x=*(volatile unsigned char *)(timer+3);
+ x=*(volatile unsigned char *)(timer+5);
return IRQ_HANDLED;
}
@@ -186,17 +186,17 @@ irqreturn_t dn_timer_int(int irq, void *dev_id)
void dn_sched_init(irq_handler_t timer_routine)
{
/* program timer 1 */
- *(volatile unsigned char *)(apollo_timer + 3) = 0x01;
- *(volatile unsigned char *)(apollo_timer + 1) = 0x40;
- *(volatile unsigned char *)(apollo_timer + 5) = 0x09;
- *(volatile unsigned char *)(apollo_timer + 7) = 0xc4;
+ *(volatile unsigned char *)(timer+3)=0x01;
+ *(volatile unsigned char *)(timer+1)=0x40;
+ *(volatile unsigned char *)(timer+5)=0x09;
+ *(volatile unsigned char *)(timer+7)=0xc4;
/* enable IRQ of PIC B */
*(volatile unsigned char *)(pica+1)&=(~8);
#if 0
- printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3));
- printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3));
+ printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3));
+ printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3));
#endif
if (request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine))
diff --git a/trunk/arch/m68k/include/asm/Kbuild b/trunk/arch/m68k/include/asm/Kbuild
index a74e5d95c384..eafa2539a8ee 100644
--- a/trunk/arch/m68k/include/asm/Kbuild
+++ b/trunk/arch/m68k/include/asm/Kbuild
@@ -1,29 +1,4 @@
include include/asm-generic/Kbuild.asm
header-y += cachectl.h
-generic-y += bitsperlong.h
-generic-y += cputime.h
-generic-y += device.h
-generic-y += emergency-restart.h
-generic-y += errno.h
-generic-y += futex.h
-generic-y += ioctl.h
-generic-y += ipcbuf.h
-generic-y += irq_regs.h
-generic-y += kdebug.h
-generic-y += kmap_types.h
-generic-y += kvm_para.h
-generic-y += local64.h
-generic-y += local.h
-generic-y += mman.h
-generic-y += mutex.h
-generic-y += percpu.h
-generic-y += resource.h
-generic-y += scatterlist.h
-generic-y += sections.h
-generic-y += siginfo.h
-generic-y += statfs.h
-generic-y += topology.h
-generic-y += types.h
generic-y += word-at-a-time.h
-generic-y += xor.h
diff --git a/trunk/arch/m68k/include/asm/MC68332.h b/trunk/arch/m68k/include/asm/MC68332.h
new file mode 100644
index 000000000000..6bb8f02685a2
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/MC68332.h
@@ -0,0 +1,152 @@
+
+/* include/asm-m68knommu/MC68332.h: '332 control registers
+ *
+ * Copyright (C) 1998 Kenneth Albanowski ,
+ *
+ */
+
+#ifndef _MC68332_H_
+#define _MC68332_H_
+
+#define BYTE_REF(addr) (*((volatile unsigned char*)addr))
+#define WORD_REF(addr) (*((volatile unsigned short*)addr))
+
+#define PORTE_ADDR 0xfffa11
+#define PORTE BYTE_REF(PORTE_ADDR)
+#define DDRE_ADDR 0xfffa15
+#define DDRE BYTE_REF(DDRE_ADDR)
+#define PEPAR_ADDR 0xfffa17
+#define PEPAR BYTE_REF(PEPAR_ADDR)
+
+#define PORTF_ADDR 0xfffa19
+#define PORTF BYTE_REF(PORTF_ADDR)
+#define DDRF_ADDR 0xfffa1d
+#define DDRF BYTE_REF(DDRF_ADDR)
+#define PFPAR_ADDR 0xfffa1f
+#define PFPAR BYTE_REF(PFPAR_ADDR)
+
+#define PORTQS_ADDR 0xfffc15
+#define PORTQS BYTE_REF(PORTQS_ADDR)
+#define DDRQS_ADDR 0xfffc17
+#define DDRQS BYTE_REF(DDRQS_ADDR)
+#define PQSPAR_ADDR 0xfffc16
+#define PQSPAR BYTE_REF(PQSPAR_ADDR)
+
+#define CSPAR0_ADDR 0xFFFA44
+#define CSPAR0 WORD_REF(CSPAR0_ADDR)
+#define CSPAR1_ADDR 0xFFFA46
+#define CSPAR1 WORD_REF(CSPAR1_ADDR)
+#define CSARBT_ADDR 0xFFFA48
+#define CSARBT WORD_REF(CSARBT_ADDR)
+#define CSOPBT_ADDR 0xFFFA4A
+#define CSOPBT WORD_REF(CSOPBT_ADDR)
+#define CSBAR0_ADDR 0xFFFA4C
+#define CSBAR0 WORD_REF(CSBAR0_ADDR)
+#define CSOR0_ADDR 0xFFFA4E
+#define CSOR0 WORD_REF(CSOR0_ADDR)
+#define CSBAR1_ADDR 0xFFFA50
+#define CSBAR1 WORD_REF(CSBAR1_ADDR)
+#define CSOR1_ADDR 0xFFFA52
+#define CSOR1 WORD_REF(CSOR1_ADDR)
+#define CSBAR2_ADDR 0xFFFA54
+#define CSBAR2 WORD_REF(CSBAR2_ADDR)
+#define CSOR2_ADDR 0xFFFA56
+#define CSOR2 WORD_REF(CSOR2_ADDR)
+#define CSBAR3_ADDR 0xFFFA58
+#define CSBAR3 WORD_REF(CSBAR3_ADDR)
+#define CSOR3_ADDR 0xFFFA5A
+#define CSOR3 WORD_REF(CSOR3_ADDR)
+#define CSBAR4_ADDR 0xFFFA5C
+#define CSBAR4 WORD_REF(CSBAR4_ADDR)
+#define CSOR4_ADDR 0xFFFA5E
+#define CSOR4 WORD_REF(CSOR4_ADDR)
+#define CSBAR5_ADDR 0xFFFA60
+#define CSBAR5 WORD_REF(CSBAR5_ADDR)
+#define CSOR5_ADDR 0xFFFA62
+#define CSOR5 WORD_REF(CSOR5_ADDR)
+#define CSBAR6_ADDR 0xFFFA64
+#define CSBAR6 WORD_REF(CSBAR6_ADDR)
+#define CSOR6_ADDR 0xFFFA66
+#define CSOR6 WORD_REF(CSOR6_ADDR)
+#define CSBAR7_ADDR 0xFFFA68
+#define CSBAR7 WORD_REF(CSBAR7_ADDR)
+#define CSOR7_ADDR 0xFFFA6A
+#define CSOR7 WORD_REF(CSOR7_ADDR)
+#define CSBAR8_ADDR 0xFFFA6C
+#define CSBAR8 WORD_REF(CSBAR8_ADDR)
+#define CSOR8_ADDR 0xFFFA6E
+#define CSOR8 WORD_REF(CSOR8_ADDR)
+#define CSBAR9_ADDR 0xFFFA70
+#define CSBAR9 WORD_REF(CSBAR9_ADDR)
+#define CSOR9_ADDR 0xFFFA72
+#define CSOR9 WORD_REF(CSOR9_ADDR)
+#define CSBAR10_ADDR 0xFFFA74
+#define CSBAR10 WORD_REF(CSBAR10_ADDR)
+#define CSOR10_ADDR 0xFFFA76
+#define CSOR10 WORD_REF(CSOR10_ADDR)
+
+#define CSOR_MODE_ASYNC 0x0000
+#define CSOR_MODE_SYNC 0x8000
+#define CSOR_MODE_MASK 0x8000
+#define CSOR_BYTE_DISABLE 0x0000
+#define CSOR_BYTE_UPPER 0x4000
+#define CSOR_BYTE_LOWER 0x2000
+#define CSOR_BYTE_BOTH 0x6000
+#define CSOR_BYTE_MASK 0x6000
+#define CSOR_RW_RSVD 0x0000
+#define CSOR_RW_READ 0x0800
+#define CSOR_RW_WRITE 0x1000
+#define CSOR_RW_BOTH 0x1800
+#define CSOR_RW_MASK 0x1800
+#define CSOR_STROBE_DS 0x0400
+#define CSOR_STROBE_AS 0x0000
+#define CSOR_STROBE_MASK 0x0400
+#define CSOR_DSACK_WAIT(x) (wait << 6)
+#define CSOR_DSACK_FTERM (14 << 6)
+#define CSOR_DSACK_EXTERNAL (15 << 6)
+#define CSOR_DSACK_MASK 0x03c0
+#define CSOR_SPACE_CPU 0x0000
+#define CSOR_SPACE_USER 0x0010
+#define CSOR_SPACE_SU 0x0020
+#define CSOR_SPACE_BOTH 0x0030
+#define CSOR_SPACE_MASK 0x0030
+#define CSOR_IPL_ALL 0x0000
+#define CSOR_IPL_PRIORITY(x) (x << 1)
+#define CSOR_IPL_MASK 0x000e
+#define CSOR_AVEC_ON 0x0001
+#define CSOR_AVEC_OFF 0x0000
+#define CSOR_AVEC_MASK 0x0001
+
+#define CSBAR_ADDR(x) ((addr >> 11) << 3)
+#define CSBAR_ADDR_MASK 0xfff8
+#define CSBAR_BLKSIZE_2K 0x0000
+#define CSBAR_BLKSIZE_8K 0x0001
+#define CSBAR_BLKSIZE_16K 0x0002
+#define CSBAR_BLKSIZE_64K 0x0003
+#define CSBAR_BLKSIZE_128K 0x0004
+#define CSBAR_BLKSIZE_256K 0x0005
+#define CSBAR_BLKSIZE_512K 0x0006
+#define CSBAR_BLKSIZE_1M 0x0007
+#define CSBAR_BLKSIZE_MASK 0x0007
+
+#define CSPAR_DISC 0
+#define CSPAR_ALT 1
+#define CSPAR_CS8 2
+#define CSPAR_CS16 3
+#define CSPAR_MASK 3
+
+#define CSPAR0_CSBOOT(x) (x << 0)
+#define CSPAR0_CS0(x) (x << 2)
+#define CSPAR0_CS1(x) (x << 4)
+#define CSPAR0_CS2(x) (x << 6)
+#define CSPAR0_CS3(x) (x << 8)
+#define CSPAR0_CS4(x) (x << 10)
+#define CSPAR0_CS5(x) (x << 12)
+
+#define CSPAR1_CS6(x) (x << 0)
+#define CSPAR1_CS7(x) (x << 2)
+#define CSPAR1_CS8(x) (x << 4)
+#define CSPAR1_CS9(x) (x << 6)
+#define CSPAR1_CS10(x) (x << 8)
+
+#endif
diff --git a/trunk/arch/m68k/include/asm/apollodma.h b/trunk/arch/m68k/include/asm/apollodma.h
new file mode 100644
index 000000000000..954adc851adb
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/apollodma.h
@@ -0,0 +1,248 @@
+/*
+ * linux/include/asm/dma.h: Defines for using and allocating dma channels.
+ * Written by Hennus Bergman, 1992.
+ * High DMA channel support & info by Hannu Savolainen
+ * and John Boyd, Nov. 1992.
+ */
+
+#ifndef _ASM_APOLLO_DMA_H
+#define _ASM_APOLLO_DMA_H
+
+#include /* need byte IO */
+#include /* And spinlocks */
+#include
+
+
+#define dma_outb(val,addr) (*((volatile unsigned char *)(addr+IO_BASE)) = (val))
+#define dma_inb(addr) (*((volatile unsigned char *)(addr+IO_BASE)))
+
+/*
+ * NOTES about DMA transfers:
+ *
+ * controller 1: channels 0-3, byte operations, ports 00-1F
+ * controller 2: channels 4-7, word operations, ports C0-DF
+ *
+ * - ALL registers are 8 bits only, regardless of transfer size
+ * - channel 4 is not used - cascades 1 into 2.
+ * - channels 0-3 are byte - addresses/counts are for physical bytes
+ * - channels 5-7 are word - addresses/counts are for physical words
+ * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
+ * - transfer count loaded to registers is 1 less than actual count
+ * - controller 2 offsets are all even (2x offsets for controller 1)
+ * - page registers for 5-7 don't use data bit 0, represent 128K pages
+ * - page registers for 0-3 use bit 0, represent 64K pages
+ *
+ * DMA transfers are limited to the lower 16MB of _physical_ memory.
+ * Note that addresses loaded into registers must be _physical_ addresses,
+ * not logical addresses (which may differ if paging is active).
+ *
+ * Address mapping for channels 0-3:
+ *
+ * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * P7 ... P0 A7 ... A0 A7 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Address mapping for channels 5-7:
+ *
+ * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
+ * | ... | \ \ ... \ \ \ ... \ \
+ * | ... | \ \ ... \ \ \ ... \ (not used)
+ * | ... | \ \ ... \ \ \ ... \
+ * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
+ * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
+ * the hardware level, so odd-byte transfers aren't possible).
+ *
+ * Transfer count (_not # bytes_) is limited to 64K, represented as actual
+ * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
+ * and up to 128K bytes may be transferred on channels 5-7 in one operation.
+ *
+ */
+
+#define MAX_DMA_CHANNELS 8
+
+/* The maximum address that we can perform a DMA transfer to on this platform */#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000)
+
+/* 8237 DMA controllers */
+#define IO_DMA1_BASE 0x10C00 /* 8 bit slave DMA, channels 0..3 */
+#define IO_DMA2_BASE 0x10D00 /* 16 bit master DMA, ch 4(=slave input)..7 */
+
+/* DMA controller registers */
+#define DMA1_CMD_REG (IO_DMA1_BASE+0x08) /* command register (w) */
+#define DMA1_STAT_REG (IO_DMA1_BASE+0x08) /* status register (r) */
+#define DMA1_REQ_REG (IO_DMA1_BASE+0x09) /* request register (w) */
+#define DMA1_MASK_REG (IO_DMA1_BASE+0x0A) /* single-channel mask (w) */
+#define DMA1_MODE_REG (IO_DMA1_BASE+0x0B) /* mode register (w) */
+#define DMA1_CLEAR_FF_REG (IO_DMA1_BASE+0x0C) /* clear pointer flip-flop (w) */
+#define DMA1_TEMP_REG (IO_DMA1_BASE+0x0D) /* Temporary Register (r) */
+#define DMA1_RESET_REG (IO_DMA1_BASE+0x0D) /* Master Clear (w) */
+#define DMA1_CLR_MASK_REG (IO_DMA1_BASE+0x0E) /* Clear Mask */
+#define DMA1_MASK_ALL_REG (IO_DMA1_BASE+0x0F) /* all-channels mask (w) */
+
+#define DMA2_CMD_REG (IO_DMA2_BASE+0x10) /* command register (w) */
+#define DMA2_STAT_REG (IO_DMA2_BASE+0x10) /* status register (r) */
+#define DMA2_REQ_REG (IO_DMA2_BASE+0x12) /* request register (w) */
+#define DMA2_MASK_REG (IO_DMA2_BASE+0x14) /* single-channel mask (w) */
+#define DMA2_MODE_REG (IO_DMA2_BASE+0x16) /* mode register (w) */
+#define DMA2_CLEAR_FF_REG (IO_DMA2_BASE+0x18) /* clear pointer flip-flop (w) */
+#define DMA2_TEMP_REG (IO_DMA2_BASE+0x1A) /* Temporary Register (r) */
+#define DMA2_RESET_REG (IO_DMA2_BASE+0x1A) /* Master Clear (w) */
+#define DMA2_CLR_MASK_REG (IO_DMA2_BASE+0x1C) /* Clear Mask */
+#define DMA2_MASK_ALL_REG (IO_DMA2_BASE+0x1E) /* all-channels mask (w) */
+
+#define DMA_ADDR_0 (IO_DMA1_BASE+0x00) /* DMA address registers */
+#define DMA_ADDR_1 (IO_DMA1_BASE+0x02)
+#define DMA_ADDR_2 (IO_DMA1_BASE+0x04)
+#define DMA_ADDR_3 (IO_DMA1_BASE+0x06)
+#define DMA_ADDR_4 (IO_DMA2_BASE+0x00)
+#define DMA_ADDR_5 (IO_DMA2_BASE+0x04)
+#define DMA_ADDR_6 (IO_DMA2_BASE+0x08)
+#define DMA_ADDR_7 (IO_DMA2_BASE+0x0C)
+
+#define DMA_CNT_0 (IO_DMA1_BASE+0x01) /* DMA count registers */
+#define DMA_CNT_1 (IO_DMA1_BASE+0x03)
+#define DMA_CNT_2 (IO_DMA1_BASE+0x05)
+#define DMA_CNT_3 (IO_DMA1_BASE+0x07)
+#define DMA_CNT_4 (IO_DMA2_BASE+0x02)
+#define DMA_CNT_5 (IO_DMA2_BASE+0x06)
+#define DMA_CNT_6 (IO_DMA2_BASE+0x0A)
+#define DMA_CNT_7 (IO_DMA2_BASE+0x0E)
+
+#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
+#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
+#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
+
+#define DMA_AUTOINIT 0x10
+
+#define DMA_8BIT 0
+#define DMA_16BIT 1
+#define DMA_BUSMASTER 2
+
+extern spinlock_t dma_spin_lock;
+
+static __inline__ unsigned long claim_dma_lock(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dma_spin_lock, flags);
+ return flags;
+}
+
+static __inline__ void release_dma_lock(unsigned long flags)
+{
+ spin_unlock_irqrestore(&dma_spin_lock, flags);
+}
+
+/* enable/disable a specific DMA channel */
+static __inline__ void enable_dma(unsigned int dmanr)
+{
+ if (dmanr<=3)
+ dma_outb(dmanr, DMA1_MASK_REG);
+ else
+ dma_outb(dmanr & 3, DMA2_MASK_REG);
+}
+
+static __inline__ void disable_dma(unsigned int dmanr)
+{
+ if (dmanr<=3)
+ dma_outb(dmanr | 4, DMA1_MASK_REG);
+ else
+ dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
+}
+
+/* Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ * Use this once to initialize the FF to a known state.
+ * After that, keep track of it. :-)
+ * --- In order to do that, the DMA routines below should ---
+ * --- only be used while holding the DMA lock ! ---
+ */
+static __inline__ void clear_dma_ff(unsigned int dmanr)
+{
+ if (dmanr<=3)
+ dma_outb(0, DMA1_CLEAR_FF_REG);
+ else
+ dma_outb(0, DMA2_CLEAR_FF_REG);
+}
+
+/* set mode (above) for a specific DMA channel */
+static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+{
+ if (dmanr<=3)
+ dma_outb(mode | dmanr, DMA1_MODE_REG);
+ else
+ dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
+}
+
+/* Set transfer address & page bits for specific DMA channel.
+ * Assumes dma flipflop is clear.
+ */
+static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
+{
+ if (dmanr <= 3) {
+ dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+ dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+ } else {
+ dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
+ dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
+ }
+}
+
+
+/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
+ * a specific DMA channel.
+ * You must ensure the parameters are valid.
+ * NOTE: from a manual: "the number of transfers is one more
+ * than the initial word count"! This is taken into account.
+ * Assumes dma flip-flop is clear.
+ * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
+ */
+static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+ count--;
+ if (dmanr <= 3) {
+ dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
+ dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
+ } else {
+ dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
+ dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
+ }
+}
+
+
+/* Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * If called before the channel has been used, it may return 1.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ *
+ * Assumes DMA flip-flop is clear.
+ */
+static __inline__ int get_dma_residue(unsigned int dmanr)
+{
+ unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
+ : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
+
+ /* using short to get 16-bit wrap around */
+ unsigned short count;
+
+ count = 1 + dma_inb(io_port);
+ count += dma_inb(io_port) << 8;
+
+ return (dmanr<=3)? count : (count<<1);
+}
+
+
+/* These are in kernel/dma.c: */
+extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
+extern void free_dma(unsigned int dmanr); /* release it again */
+
+/* These are in arch/m68k/apollo/dma.c: */
+extern unsigned short dma_map_page(unsigned long phys_addr,int count,int type);
+extern void dma_unmap_page(unsigned short dma_addr);
+
+#endif /* _ASM_APOLLO_DMA_H */
diff --git a/trunk/arch/m68k/include/asm/apollohw.h b/trunk/arch/m68k/include/asm/apollohw.h
index 635ef4f89010..a1373b9aa281 100644
--- a/trunk/arch/m68k/include/asm/apollohw.h
+++ b/trunk/arch/m68k/include/asm/apollohw.h
@@ -98,7 +98,7 @@ extern u_long timer_physaddr;
#define cpuctrl (*(volatile unsigned int *)(IO_BASE + cpuctrl_physaddr))
#define pica (IO_BASE + pica_physaddr)
#define picb (IO_BASE + picb_physaddr)
-#define apollo_timer (IO_BASE + timer_physaddr)
+#define timer (IO_BASE + timer_physaddr)
#define addr_xlat_map ((unsigned short *)(IO_BASE + 0x17000))
#define isaIO2mem(x) (((((x) & 0x3f8) << 7) | (((x) & 0xfc00) >> 6) | ((x) & 0x7)) + 0x40000 + IO_BASE)
diff --git a/trunk/arch/m68k/include/asm/bitsperlong.h b/trunk/arch/m68k/include/asm/bitsperlong.h
new file mode 100644
index 000000000000..6dc0bb0c13b2
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/bitsperlong.h
@@ -0,0 +1 @@
+#include
diff --git a/trunk/arch/m68k/include/asm/cputime.h b/trunk/arch/m68k/include/asm/cputime.h
new file mode 100644
index 000000000000..c79c5e892305
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/cputime.h
@@ -0,0 +1,6 @@
+#ifndef __M68K_CPUTIME_H
+#define __M68K_CPUTIME_H
+
+#include
+
+#endif /* __M68K_CPUTIME_H */
diff --git a/trunk/arch/m68k/include/asm/delay.h b/trunk/arch/m68k/include/asm/delay.h
index 12d8fe4f1d30..9c09becfd4c9 100644
--- a/trunk/arch/m68k/include/asm/delay.h
+++ b/trunk/arch/m68k/include/asm/delay.h
@@ -43,7 +43,7 @@ static inline void __delay(unsigned long loops)
extern void __bad_udelay(void);
-#ifdef CONFIG_CPU_HAS_NO_MULDIV64
+#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
/*
* The simpler m68k and ColdFire processors do not have a 32*32->64
* multiply instruction. So we need to handle them a little differently.
diff --git a/trunk/arch/m68k/include/asm/device.h b/trunk/arch/m68k/include/asm/device.h
new file mode 100644
index 000000000000..d8f9872b0e2d
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include
+
diff --git a/trunk/arch/m68k/include/asm/emergency-restart.h b/trunk/arch/m68k/include/asm/emergency-restart.h
new file mode 100644
index 000000000000..108d8c48e42e
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/emergency-restart.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_EMERGENCY_RESTART_H
+#define _ASM_EMERGENCY_RESTART_H
+
+#include
+
+#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/trunk/arch/m68k/include/asm/errno.h b/trunk/arch/m68k/include/asm/errno.h
new file mode 100644
index 000000000000..0d4e188d6ef6
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/errno.h
@@ -0,0 +1,6 @@
+#ifndef _M68K_ERRNO_H
+#define _M68K_ERRNO_H
+
+#include
+
+#endif /* _M68K_ERRNO_H */
diff --git a/trunk/arch/m68k/include/asm/futex.h b/trunk/arch/m68k/include/asm/futex.h
new file mode 100644
index 000000000000..6a332a9f099c
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/futex.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#include
+
+#endif
diff --git a/trunk/arch/m68k/include/asm/ioctl.h b/trunk/arch/m68k/include/asm/ioctl.h
new file mode 100644
index 000000000000..b279fe06dfe5
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/ioctl.h
@@ -0,0 +1 @@
+#include
diff --git a/trunk/arch/m68k/include/asm/ipcbuf.h b/trunk/arch/m68k/include/asm/ipcbuf.h
new file mode 100644
index 000000000000..84c7e51cb6d0
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/ipcbuf.h
@@ -0,0 +1 @@
+#include
diff --git a/trunk/arch/m68k/include/asm/irq_regs.h b/trunk/arch/m68k/include/asm/irq_regs.h
new file mode 100644
index 000000000000..3dd9c0b70270
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/irq_regs.h
@@ -0,0 +1 @@
+#include
diff --git a/trunk/arch/m68k/include/asm/kdebug.h b/trunk/arch/m68k/include/asm/kdebug.h
new file mode 100644
index 000000000000..6ece1b037665
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/kdebug.h
@@ -0,0 +1 @@
+#include
diff --git a/trunk/arch/m68k/include/asm/kmap_types.h b/trunk/arch/m68k/include/asm/kmap_types.h
new file mode 100644
index 000000000000..3413cc1390ec
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/kmap_types.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_M68K_KMAP_TYPES_H
+#define __ASM_M68K_KMAP_TYPES_H
+
+#include
+
+#endif /* __ASM_M68K_KMAP_TYPES_H */
diff --git a/trunk/arch/m68k/include/asm/kvm_para.h b/trunk/arch/m68k/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include
diff --git a/trunk/arch/m68k/include/asm/local.h b/trunk/arch/m68k/include/asm/local.h
new file mode 100644
index 000000000000..6c259263e1f0
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/local.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_M68K_LOCAL_H
+#define _ASM_M68K_LOCAL_H
+
+#include
+
+#endif /* _ASM_M68K_LOCAL_H */
diff --git a/trunk/arch/m68k/include/asm/local64.h b/trunk/arch/m68k/include/asm/local64.h
new file mode 100644
index 000000000000..36c93b5cc239
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/local64.h
@@ -0,0 +1 @@
+#include
diff --git a/trunk/arch/m68k/include/asm/mac_mouse.h b/trunk/arch/m68k/include/asm/mac_mouse.h
new file mode 100644
index 000000000000..39a5c292eaee
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/mac_mouse.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_MAC_MOUSE_H
+#define _ASM_MAC_MOUSE_H
+
+/*
+ * linux/include/asm-m68k/mac_mouse.h
+ * header file for Macintosh ADB mouse driver
+ * 27-10-97 Michael Schmitz
+ * copied from:
+ * header file for Atari Mouse driver
+ * by Robert de Vries (robert@and.nl) on 19Jul93
+ */
+
+struct mouse_status {
+ char buttons;
+ short dx;
+ short dy;
+ int ready;
+ int active;
+ wait_queue_head_t wait;
+ struct fasync_struct *fasyncptr;
+};
+
+#endif
diff --git a/trunk/arch/m68k/include/asm/mcfmbus.h b/trunk/arch/m68k/include/asm/mcfmbus.h
new file mode 100644
index 000000000000..319899c47a2c
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/mcfmbus.h
@@ -0,0 +1,77 @@
+/****************************************************************************/
+
+/*
+ * mcfmbus.h -- Coldfire MBUS support defines.
+ *
+ * (C) Copyright 1999, Martin Floeer (mfloeer@axcent.de)
+ */
+
+/****************************************************************************/
+
+
+#ifndef mcfmbus_h
+#define mcfmbus_h
+
+
+#define MCFMBUS_BASE 0x280
+#define MCFMBUS_IRQ_VECTOR 0x19
+#define MCFMBUS_IRQ 0x1
+#define MCFMBUS_CLK 0x3f
+#define MCFMBUS_IRQ_LEVEL 0x07 /*IRQ Level 1*/
+#define MCFMBUS_ADDRESS 0x01
+
+
+/*
+* Define the 5307 MBUS register set addresses
+*/
+
+#define MCFMBUS_MADR 0x00
+#define MCFMBUS_MFDR 0x04
+#define MCFMBUS_MBCR 0x08
+#define MCFMBUS_MBSR 0x0C
+#define MCFMBUS_MBDR 0x10
+
+
+#define MCFMBUS_MADR_ADDR(a) (((a)&0x7F)<<0x01) /*Slave Address*/
+
+#define MCFMBUS_MFDR_MBC(a) ((a)&0x3F) /*M-Bus Clock*/
+
+/*
+* Define bit flags in Control Register
+*/
+
+#define MCFMBUS_MBCR_MEN (0x80) /* M-Bus Enable */
+#define MCFMBUS_MBCR_MIEN (0x40) /* M-Bus Interrupt Enable */
+#define MCFMBUS_MBCR_MSTA (0x20) /* Master/Slave Mode Select Bit */
+#define MCFMBUS_MBCR_MTX (0x10) /* Transmit/Rcv Mode Select Bit */
+#define MCFMBUS_MBCR_TXAK (0x08) /* Transmit Acknowledge Enable */
+#define MCFMBUS_MBCR_RSTA (0x04) /* Repeat Start */
+
+/*
+* Define bit flags in Status Register
+*/
+
+#define MCFMBUS_MBSR_MCF (0x80) /* Data Transfer Complete */
+#define MCFMBUS_MBSR_MAAS (0x40) /* Addressed as a Slave */
+#define MCFMBUS_MBSR_MBB (0x20) /* Bus Busy */
+#define MCFMBUS_MBSR_MAL (0x10) /* Arbitration Lost */
+#define MCFMBUS_MBSR_SRW (0x04) /* Slave Transmit */
+#define MCFMBUS_MBSR_MIF (0x02) /* M-Bus Interrupt */
+#define MCFMBUS_MBSR_RXAK (0x01) /* No Acknowledge Received */
+
+/*
+* Define bit flags in DATA I/O Register
+*/
+
+#define MCFMBUS_MBDR_READ (0x01) /* 1=read 0=write MBUS */
+
+#define MBUSIOCSCLOCK 1
+#define MBUSIOCGCLOCK 2
+#define MBUSIOCSADDR 3
+#define MBUSIOCGADDR 4
+#define MBUSIOCSSLADDR 5
+#define MBUSIOCGSLADDR 6
+#define MBUSIOCSSUBADDR 7
+#define MBUSIOCGSUBADDR 8
+
+#endif
diff --git a/trunk/arch/m68k/include/asm/mman.h b/trunk/arch/m68k/include/asm/mman.h
new file mode 100644
index 000000000000..8eebf89f5ab1
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/mman.h
@@ -0,0 +1 @@
+#include
diff --git a/trunk/arch/m68k/include/asm/mutex.h b/trunk/arch/m68k/include/asm/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include
diff --git a/trunk/arch/m68k/include/asm/percpu.h b/trunk/arch/m68k/include/asm/percpu.h
new file mode 100644
index 000000000000..0859d048faf5
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/percpu.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_M68K_PERCPU_H
+#define __ASM_M68K_PERCPU_H
+
+#include
+
+#endif /* __ASM_M68K_PERCPU_H */
diff --git a/trunk/arch/m68k/include/asm/resource.h b/trunk/arch/m68k/include/asm/resource.h
new file mode 100644
index 000000000000..e7d35019f337
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/resource.h
@@ -0,0 +1,6 @@
+#ifndef _M68K_RESOURCE_H
+#define _M68K_RESOURCE_H
+
+#include
+
+#endif /* _M68K_RESOURCE_H */
diff --git a/trunk/arch/m68k/include/asm/sbus.h b/trunk/arch/m68k/include/asm/sbus.h
new file mode 100644
index 000000000000..bfe3ba147f2e
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/sbus.h
@@ -0,0 +1,45 @@
+/*
+ * some sbus structures and macros to make usage of sbus drivers possible
+ */
+
+#ifndef __M68K_SBUS_H
+#define __M68K_SBUS_H
+
+struct sbus_dev {
+ struct {
+ unsigned int which_io;
+ unsigned int phys_addr;
+ } reg_addrs[1];
+};
+
+/* sbus IO functions stolen from include/asm-sparc/io.h for the serial driver */
+/* No SBUS on the Sun3, kludge -- sam */
+
+static inline void _sbus_writeb(unsigned char val, unsigned long addr)
+{
+ *(volatile unsigned char *)addr = val;
+}
+
+static inline unsigned char _sbus_readb(unsigned long addr)
+{
+ return *(volatile unsigned char *)addr;
+}
+
+static inline void _sbus_writel(unsigned long val, unsigned long addr)
+{
+ *(volatile unsigned long *)addr = val;
+
+}
+
+extern inline unsigned long _sbus_readl(unsigned long addr)
+{
+ return *(volatile unsigned long *)addr;
+}
+
+
+#define sbus_readb(a) _sbus_readb((unsigned long)a)
+#define sbus_writeb(v, a) _sbus_writeb(v, (unsigned long)a)
+#define sbus_readl(a) _sbus_readl((unsigned long)a)
+#define sbus_writel(v, a) _sbus_writel(v, (unsigned long)a)
+
+#endif
diff --git a/trunk/arch/m68k/include/asm/scatterlist.h b/trunk/arch/m68k/include/asm/scatterlist.h
new file mode 100644
index 000000000000..312505452a1e
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/scatterlist.h
@@ -0,0 +1,6 @@
+#ifndef _M68K_SCATTERLIST_H
+#define _M68K_SCATTERLIST_H
+
+#include
+
+#endif /* !(_M68K_SCATTERLIST_H) */
diff --git a/trunk/arch/m68k/include/asm/sections.h b/trunk/arch/m68k/include/asm/sections.h
new file mode 100644
index 000000000000..5277e52715ec
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/sections.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_M68K_SECTIONS_H
+#define _ASM_M68K_SECTIONS_H
+
+#include
+
+extern char _sbss[], _ebss[];
+
+#endif /* _ASM_M68K_SECTIONS_H */
diff --git a/trunk/arch/m68k/include/asm/shm.h b/trunk/arch/m68k/include/asm/shm.h
new file mode 100644
index 000000000000..fa56ec84a126
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/shm.h
@@ -0,0 +1,31 @@
+#ifndef _M68K_SHM_H
+#define _M68K_SHM_H
+
+
+/* format of page table entries that correspond to shared memory pages
+ currently out in swap space (see also mm/swap.c):
+ bits 0-1 (PAGE_PRESENT) is = 0
+ bits 8..2 (SWP_TYPE) are = SHM_SWP_TYPE
+ bits 31..9 are used like this:
+ bits 15..9 (SHM_ID) the id of the shared memory segment
+ bits 30..16 (SHM_IDX) the index of the page within the shared memory segment
+ (actually only bits 25..16 get used since SHMMAX is so low)
+ bit 31 (SHM_READ_ONLY) flag whether the page belongs to a read-only attach
+*/
+/* on the m68k both bits 0 and 1 must be zero */
+/* format on the sun3 is similar, but bits 30, 31 are set to zero and all
+ others are reduced by 2. --m */
+
+#ifndef CONFIG_SUN3
+#define SHM_ID_SHIFT 9
+#else
+#define SHM_ID_SHIFT 7
+#endif
+#define _SHM_ID_BITS 7
+#define SHM_ID_MASK ((1<<_SHM_ID_BITS)-1)
+
+#define SHM_IDX_SHIFT (SHM_ID_SHIFT+_SHM_ID_BITS)
+#define _SHM_IDX_BITS 15
+#define SHM_IDX_MASK ((1<<_SHM_IDX_BITS)-1)
+
+#endif /* _M68K_SHM_H */
diff --git a/trunk/arch/m68k/include/asm/siginfo.h b/trunk/arch/m68k/include/asm/siginfo.h
new file mode 100644
index 000000000000..851d3d784b53
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/siginfo.h
@@ -0,0 +1,6 @@
+#ifndef _M68K_SIGINFO_H
+#define _M68K_SIGINFO_H
+
+#include
+
+#endif
diff --git a/trunk/arch/m68k/include/asm/statfs.h b/trunk/arch/m68k/include/asm/statfs.h
new file mode 100644
index 000000000000..08d93f14e061
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/statfs.h
@@ -0,0 +1,6 @@
+#ifndef _M68K_STATFS_H
+#define _M68K_STATFS_H
+
+#include
+
+#endif /* _M68K_STATFS_H */
diff --git a/trunk/arch/m68k/include/asm/topology.h b/trunk/arch/m68k/include/asm/topology.h
new file mode 100644
index 000000000000..ca173e9f26ff
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/topology.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_M68K_TOPOLOGY_H
+#define _ASM_M68K_TOPOLOGY_H
+
+#include
+
+#endif /* _ASM_M68K_TOPOLOGY_H */
diff --git a/trunk/arch/m68k/include/asm/types.h b/trunk/arch/m68k/include/asm/types.h
new file mode 100644
index 000000000000..89705adcbd52
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/types.h
@@ -0,0 +1,22 @@
+#ifndef _M68K_TYPES_H
+#define _M68K_TYPES_H
+
+/*
+ * This file is never included by application software unless
+ * explicitly requested (e.g., via linux/types.h) in which case the
+ * application is Linux specific so (user-) name space pollution is
+ * not a major issue. However, for interoperability, libraries still
+ * need to be careful to avoid a name clashes.
+ */
+#include
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+
+#define BITS_PER_LONG 32
+
+#endif /* __KERNEL__ */
+
+#endif /* _M68K_TYPES_H */
diff --git a/trunk/arch/m68k/include/asm/unaligned.h b/trunk/arch/m68k/include/asm/unaligned.h
index 2b3ca0bf7a0d..f4043ae63db1 100644
--- a/trunk/arch/m68k/include/asm/unaligned.h
+++ b/trunk/arch/m68k/include/asm/unaligned.h
@@ -2,7 +2,7 @@
#define _ASM_M68K_UNALIGNED_H
-#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
+#if defined(CONFIG_COLDFIRE) || defined(CONFIG_M68000)
#include
#include
#include
@@ -12,7 +12,7 @@
#else
/*
- * The m68k can do unaligned accesses itself.
+ * The m68k can do unaligned accesses itself.
*/
#include
#include
diff --git a/trunk/arch/m68k/include/asm/xor.h b/trunk/arch/m68k/include/asm/xor.h
new file mode 100644
index 000000000000..c82eb12a5b18
--- /dev/null
+++ b/trunk/arch/m68k/include/asm/xor.h
@@ -0,0 +1 @@
+#include
diff --git a/trunk/arch/m68k/kernel/setup_no.c b/trunk/arch/m68k/kernel/setup_no.c
index 71fb29938dba..7dc186b7a85f 100644
--- a/trunk/arch/m68k/kernel/setup_no.c
+++ b/trunk/arch/m68k/kernel/setup_no.c
@@ -218,10 +218,13 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n");
#endif
- pr_debug("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n",
- _stext, _etext, _sdata, _edata, __bss_start, __bss_stop);
- pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
- __bss_stop, memory_start, memory_start, memory_end);
+ pr_debug("KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x "
+ "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext,
+ (int) &_sdata, (int) &_edata,
+ (int) &_sbss, (int) &_ebss);
+ pr_debug("MEMORY -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x\n ",
+ (int) &_ebss, (int) memory_start,
+ (int) memory_start, (int) memory_end);
/* Keep a copy of command line */
*cmdline_p = &command_line[0];
diff --git a/trunk/arch/m68k/kernel/sys_m68k.c b/trunk/arch/m68k/kernel/sys_m68k.c
index 9a5932ec3689..8623f8dc16f8 100644
--- a/trunk/arch/m68k/kernel/sys_m68k.c
+++ b/trunk/arch/m68k/kernel/sys_m68k.c
@@ -479,13 +479,9 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
goto bad_access;
}
- /*
- * No need to check for EFAULT; we know that the page is
- * present and writable.
- */
- __get_user(mem_value, mem);
+ mem_value = *mem;
if (mem_value == oldval)
- __put_user(newval, mem);
+ *mem = newval;
pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
diff --git a/trunk/arch/m68k/kernel/vmlinux-nommu.lds b/trunk/arch/m68k/kernel/vmlinux-nommu.lds
index 06a763f49fd3..40e02d9c38b4 100644
--- a/trunk/arch/m68k/kernel/vmlinux-nommu.lds
+++ b/trunk/arch/m68k/kernel/vmlinux-nommu.lds
@@ -78,7 +78,9 @@ SECTIONS {
__init_end = .;
}
+ _sbss = .;
BSS_SECTION(0, 0, 0)
+ _ebss = .;
_end = .;
diff --git a/trunk/arch/m68k/kernel/vmlinux-std.lds b/trunk/arch/m68k/kernel/vmlinux-std.lds
index d0993594f558..63407c836826 100644
--- a/trunk/arch/m68k/kernel/vmlinux-std.lds
+++ b/trunk/arch/m68k/kernel/vmlinux-std.lds
@@ -31,7 +31,9 @@ SECTIONS
RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE)
+ _sbss = .;
BSS_SECTION(0, 0, 0)
+ _ebss = .;
_edata = .; /* End of data section */
diff --git a/trunk/arch/m68k/kernel/vmlinux-sun3.lds b/trunk/arch/m68k/kernel/vmlinux-sun3.lds
index 8080469ee6c1..ad0f46d64c0b 100644
--- a/trunk/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/trunk/arch/m68k/kernel/vmlinux-sun3.lds
@@ -44,7 +44,9 @@ __init_begin = .;
. = ALIGN(PAGE_SIZE);
__init_end = .;
+ _sbss = .;
BSS_SECTION(0, 0, 0)
+ _ebss = .;
_end = . ;
diff --git a/trunk/arch/m68k/lib/muldi3.c b/trunk/arch/m68k/lib/muldi3.c
index ee5f0b1b5c5d..79e928a525d0 100644
--- a/trunk/arch/m68k/lib/muldi3.c
+++ b/trunk/arch/m68k/lib/muldi3.c
@@ -19,7 +19,7 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-#ifdef CONFIG_CPU_HAS_NO_MULDIV64
+#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
#define SI_TYPE_SIZE 32
#define __BITS4 (SI_TYPE_SIZE / 4)
diff --git a/trunk/arch/m68k/mm/init_mm.c b/trunk/arch/m68k/mm/init_mm.c
index 282f9de68966..f77f258dce3a 100644
--- a/trunk/arch/m68k/mm/init_mm.c
+++ b/trunk/arch/m68k/mm/init_mm.c
@@ -104,7 +104,7 @@ void __init print_memmap(void)
MLK_ROUNDUP(__init_begin, __init_end),
MLK_ROUNDUP(_stext, _etext),
MLK_ROUNDUP(_sdata, _edata),
- MLK_ROUNDUP(__bss_start, __bss_stop));
+ MLK_ROUNDUP(_sbss, _ebss));
}
void __init mem_init(void)
diff --git a/trunk/arch/m68k/mm/init_no.c b/trunk/arch/m68k/mm/init_no.c
index 688e3664aea0..345ec0d83e3d 100644
--- a/trunk/arch/m68k/mm/init_no.c
+++ b/trunk/arch/m68k/mm/init_no.c
@@ -91,7 +91,7 @@ void __init mem_init(void)
totalram_pages = free_all_bootmem();
codek = (_etext - _stext) >> 10;
- datak = (__bss_stop - _sdata) >> 10;
+ datak = (_ebss - _sdata) >> 10;
initk = (__init_begin - __init_end) >> 10;
tmp = nr_free_pages() << PAGE_SHIFT;
diff --git a/trunk/arch/m68k/platform/68328/head-de2.S b/trunk/arch/m68k/platform/68328/head-de2.S
index 537d3245b539..f632fdcb93e9 100644
--- a/trunk/arch/m68k/platform/68328/head-de2.S
+++ b/trunk/arch/m68k/platform/68328/head-de2.S
@@ -60,8 +60,8 @@ _start:
* Move ROM filesystem above bss :-)
*/
- moveal #__bss_start, %a0 /* romfs at the start of bss */
- moveal #__bss_stop, %a1 /* Set up destination */
+ moveal #_sbss, %a0 /* romfs at the start of bss */
+ moveal #_ebss, %a1 /* Set up destination */
movel %a0, %a2 /* Copy of bss start */
movel 8(%a0), %d1 /* Get size of ROMFS */
@@ -84,8 +84,8 @@ _start:
* Initialize BSS segment to 0
*/
- lea __bss_start, %a0
- lea __bss_stop, %a1
+ lea _sbss, %a0
+ lea _ebss, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
2: cmpal %a0, %a1
diff --git a/trunk/arch/m68k/platform/68328/head-pilot.S b/trunk/arch/m68k/platform/68328/head-pilot.S
index 45a9dad29e3d..2ebfd6420818 100644
--- a/trunk/arch/m68k/platform/68328/head-pilot.S
+++ b/trunk/arch/m68k/platform/68328/head-pilot.S
@@ -110,7 +110,7 @@ L0:
movel #CONFIG_VECTORBASE, %d7
addl #16, %d7
moveal %d7, %a0
- moveal #__bss_stop, %a1
+ moveal #_ebss, %a1
lea %a1@(512), %a2
DBG_PUTC('C')
@@ -138,8 +138,8 @@ LD1:
DBG_PUTC('E')
- moveal #__bss_start, %a0
- moveal #__bss_stop, %a1
+ moveal #_sbss, %a0
+ moveal #_ebss, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
L1:
@@ -150,7 +150,7 @@ L1:
DBG_PUTC('F')
/* Copy command line from end of bss to command line */
- moveal #__bss_stop, %a0
+ moveal #_ebss, %a0
moveal #command_line, %a1
lea %a1@(512), %a2
@@ -165,7 +165,7 @@ L3:
movel #_sdata, %d0
movel %d0, _rambase
- movel #__bss_stop, %d0
+ movel #_ebss, %d0
movel %d0, _ramstart
movel %a4, %d0
diff --git a/trunk/arch/m68k/platform/68328/head-ram.S b/trunk/arch/m68k/platform/68328/head-ram.S
index 5189ef926098..7f1aeeacb219 100644
--- a/trunk/arch/m68k/platform/68328/head-ram.S
+++ b/trunk/arch/m68k/platform/68328/head-ram.S
@@ -76,8 +76,8 @@ pclp3:
beq pclp3
#endif /* DEBUG */
moveal #0x007ffff0, %ssp
- moveal #__bss_start, %a0
- moveal #__bss_stop, %a1
+ moveal #_sbss, %a0
+ moveal #_ebss, %a1
/* Copy 0 to %a0 until %a0 >= %a1 */
L1:
diff --git a/trunk/arch/m68k/platform/68328/head-rom.S b/trunk/arch/m68k/platform/68328/head-rom.S
index 3dff98ba2e97..a5ff96d0295f 100644
--- a/trunk/arch/m68k/platform/68328/head-rom.S
+++ b/trunk/arch/m68k/platform/68328/head-rom.S
@@ -59,8 +59,8 @@ _stext: movew #0x2700,%sr
cmpal %a1, %a2
bhi 1b
- moveal #__bss_start, %a0
- moveal #__bss_stop, %a1
+ moveal #_sbss, %a0
+ moveal #_ebss, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
1:
@@ -70,7 +70,7 @@ _stext: movew #0x2700,%sr
movel #_sdata, %d0
movel %d0, _rambase
- movel #__bss_stop, %d0
+ movel #_ebss, %d0
movel %d0, _ramstart
movel #RAMEND-CONFIG_MEMORY_RESERVE*0x100000, %d0
movel %d0, _ramend
diff --git a/trunk/arch/m68k/platform/68360/head-ram.S b/trunk/arch/m68k/platform/68360/head-ram.S
index acd213170d80..8eb94fb6b971 100644
--- a/trunk/arch/m68k/platform/68360/head-ram.S
+++ b/trunk/arch/m68k/platform/68360/head-ram.S
@@ -219,8 +219,8 @@ LD1:
cmp.l #_edata, %a1
blt LD1
- moveal #__bss_start, %a0
- moveal #__bss_stop, %a1
+ moveal #_sbss, %a0
+ moveal #_ebss, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
L1:
@@ -234,7 +234,7 @@ load_quicc:
store_ram_size:
/* Set ram size information */
move.l #_sdata, _rambase
- move.l #__bss_stop, _ramstart
+ move.l #_ebss, _ramstart
move.l #RAMEND, %d0
sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/
move.l %d0, _ramend /* Different from RAMEND.*/
diff --git a/trunk/arch/m68k/platform/68360/head-rom.S b/trunk/arch/m68k/platform/68360/head-rom.S
index dfc756d99886..97510e55b802 100644
--- a/trunk/arch/m68k/platform/68360/head-rom.S
+++ b/trunk/arch/m68k/platform/68360/head-rom.S
@@ -13,7 +13,7 @@
*/
.global _stext
-.global __bss_start
+.global _sbss
.global _start
.global _rambase
@@ -229,8 +229,8 @@ LD1:
cmp.l #_edata, %a1
blt LD1
- moveal #__bss_start, %a0
- moveal #__bss_stop, %a1
+ moveal #_sbss, %a0
+ moveal #_ebss, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
L1:
@@ -244,7 +244,7 @@ load_quicc:
store_ram_size:
/* Set ram size information */
move.l #_sdata, _rambase
- move.l #__bss_stop, _ramstart
+ move.l #_ebss, _ramstart
move.l #RAMEND, %d0
sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/
move.l %d0, _ramend /* Different from RAMEND.*/
diff --git a/trunk/arch/m68k/platform/coldfire/head.S b/trunk/arch/m68k/platform/coldfire/head.S
index b88f5716f357..4e0c9eb3bd1f 100644
--- a/trunk/arch/m68k/platform/coldfire/head.S
+++ b/trunk/arch/m68k/platform/coldfire/head.S
@@ -230,8 +230,8 @@ _vstart:
/*
* Move ROM filesystem above bss :-)
*/
- lea __bss_start,%a0 /* get start of bss */
- lea __bss_stop,%a1 /* set up destination */
+ lea _sbss,%a0 /* get start of bss */
+ lea _ebss,%a1 /* set up destination */
movel %a0,%a2 /* copy of bss start */
movel 8(%a0),%d0 /* get size of ROMFS */
@@ -249,7 +249,7 @@ _copy_romfs:
bne _copy_romfs
#else /* CONFIG_ROMFS_FS */
- lea __bss_stop,%a1
+ lea _ebss,%a1
movel %a1,_ramstart
#endif /* CONFIG_ROMFS_FS */
@@ -257,8 +257,8 @@ _copy_romfs:
/*
* Zero out the bss region.
*/
- lea __bss_start,%a0 /* get start of bss */
- lea __bss_stop,%a1 /* get end of bss */
+ lea _sbss,%a0 /* get start of bss */
+ lea _ebss,%a1 /* get end of bss */
clrl %d0 /* set value */
_clear_bss:
movel %d0,(%a0)+ /* clear each word */
diff --git a/trunk/arch/m68k/sun3/prom/init.c b/trunk/arch/m68k/sun3/prom/init.c
index eeba067d565f..d8e6349336b4 100644
--- a/trunk/arch/m68k/sun3/prom/init.c
+++ b/trunk/arch/m68k/sun3/prom/init.c
@@ -22,13 +22,57 @@ int prom_root_node;
struct linux_nodeops *prom_nodeops;
/* You must call prom_init() before you attempt to use any of the
- * routines in the prom library.
- * It gets passed the pointer to the PROM vector.
+ * routines in the prom library. It returns 0 on success, 1 on
+ * failure. It gets passed the pointer to the PROM vector.
*/
+extern void prom_meminit(void);
+extern void prom_ranges_init(void);
+
void __init prom_init(struct linux_romvec *rp)
{
romvec = rp;
+#ifndef CONFIG_SUN3
+ switch(romvec->pv_romvers) {
+ case 0:
+ prom_vers = PROM_V0;
+ break;
+ case 2:
+ prom_vers = PROM_V2;
+ break;
+ case 3:
+ prom_vers = PROM_V3;
+ break;
+ case 4:
+ prom_vers = PROM_P1275;
+ prom_printf("PROMLIB: Sun IEEE Prom not supported yet\n");
+ prom_halt();
+ break;
+ default:
+ prom_printf("PROMLIB: Bad PROM version %d\n",
+ romvec->pv_romvers);
+ prom_halt();
+ break;
+ };
+
+ prom_rev = romvec->pv_plugin_revision;
+ prom_prev = romvec->pv_printrev;
+ prom_nodeops = romvec->pv_nodeops;
+
+ prom_root_node = prom_getsibling(0);
+ if((prom_root_node == 0) || (prom_root_node == -1))
+ prom_halt();
+
+ if((((unsigned long) prom_nodeops) == 0) ||
+ (((unsigned long) prom_nodeops) == -1))
+ prom_halt();
+
+ prom_meminit();
+
+ prom_ranges_init();
+#endif
+// printk("PROMLIB: Sun Boot Prom Version %d Revision %d\n",
+// romvec->pv_romvers, prom_rev);
/* Initialization successful. */
return;
diff --git a/trunk/arch/microblaze/include/asm/sections.h b/trunk/arch/microblaze/include/asm/sections.h
index c07ed5d2a820..4487e150b455 100644
--- a/trunk/arch/microblaze/include/asm/sections.h
+++ b/trunk/arch/microblaze/include/asm/sections.h
@@ -18,6 +18,10 @@ extern char _ssbss[], _esbss[];
extern unsigned long __ivt_start[], __ivt_end[];
extern char _etext[], _stext[];
+# ifdef CONFIG_MTD_UCLINUX
+extern char *_ebss;
+# endif
+
extern u32 _fdt_start[], _fdt_end[];
# endif /* !__ASSEMBLY__ */
diff --git a/trunk/arch/microblaze/kernel/microblaze_ksyms.c b/trunk/arch/microblaze/kernel/microblaze_ksyms.c
index 2b25bcf05c00..bb4907c828dc 100644
--- a/trunk/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/trunk/arch/microblaze/kernel/microblaze_ksyms.c
@@ -21,6 +21,9 @@
#include
#include
+extern char *_ebss;
+EXPORT_SYMBOL_GPL(_ebss);
+
#ifdef CONFIG_FUNCTION_TRACER
extern void _mcount(void);
EXPORT_SYMBOL(_mcount);
diff --git a/trunk/arch/microblaze/kernel/setup.c b/trunk/arch/microblaze/kernel/setup.c
index 4da971d4392f..16d8dfd9094b 100644
--- a/trunk/arch/microblaze/kernel/setup.c
+++ b/trunk/arch/microblaze/kernel/setup.c
@@ -121,7 +121,7 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
/* Move ROMFS out of BSS before clearing it */
if (romfs_size > 0) {
- memmove(&__bss_stop, (int *)romfs_base, romfs_size);
+ memmove(&_ebss, (int *)romfs_base, romfs_size);
klimit += romfs_size;
}
#endif
@@ -165,7 +165,7 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
BUG_ON(romfs_size < 0); /* What else can we do? */
printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
- romfs_size, romfs_base, (unsigned)&__bss_stop);
+ romfs_size, romfs_base, (unsigned)&_ebss);
printk("New klimit: 0x%08x\n", (unsigned)klimit);
#endif
diff --git a/trunk/arch/microblaze/kernel/vmlinux.lds.S b/trunk/arch/microblaze/kernel/vmlinux.lds.S
index 936d01a689d7..109e9d86ade4 100644
--- a/trunk/arch/microblaze/kernel/vmlinux.lds.S
+++ b/trunk/arch/microblaze/kernel/vmlinux.lds.S
@@ -131,6 +131,7 @@ SECTIONS {
*(COMMON)
. = ALIGN (4) ;
__bss_stop = . ;
+ _ebss = . ;
}
. = ALIGN(PAGE_SIZE);
_end = .;
diff --git a/trunk/arch/sh/include/asm/sections.h b/trunk/arch/sh/include/asm/sections.h
index 1b6199740e98..4a5350037c8f 100644
--- a/trunk/arch/sh/include/asm/sections.h
+++ b/trunk/arch/sh/include/asm/sections.h
@@ -6,6 +6,7 @@
extern long __nosave_begin, __nosave_end;
extern long __machvec_start, __machvec_end;
extern char __uncached_start, __uncached_end;
+extern char _ebss[];
extern char __start_eh_frame[], __stop_eh_frame[];
#endif /* __ASM_SH_SECTIONS_H */
diff --git a/trunk/arch/sh/kernel/setup.c b/trunk/arch/sh/kernel/setup.c
index ebe7a7d97215..7b57bf1dc855 100644
--- a/trunk/arch/sh/kernel/setup.c
+++ b/trunk/arch/sh/kernel/setup.c
@@ -273,7 +273,7 @@ void __init setup_arch(char **cmdline_p)
data_resource.start = virt_to_phys(_etext);
data_resource.end = virt_to_phys(_edata)-1;
bss_resource.start = virt_to_phys(__bss_start);
- bss_resource.end = virt_to_phys(__bss_stop)-1;
+ bss_resource.end = virt_to_phys(_ebss)-1;
#ifdef CONFIG_CMDLINE_OVERWRITE
strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
diff --git a/trunk/arch/sh/kernel/sh_ksyms_32.c b/trunk/arch/sh/kernel/sh_ksyms_32.c
index 2a0a596ebf67..3896f26efa4a 100644
--- a/trunk/arch/sh/kernel/sh_ksyms_32.c
+++ b/trunk/arch/sh/kernel/sh_ksyms_32.c
@@ -19,6 +19,7 @@ EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(_ebss);
EXPORT_SYMBOL(empty_zero_page);
#define DECLARE_EXPORT(name) \
diff --git a/trunk/arch/sh/kernel/vmlinux.lds.S b/trunk/arch/sh/kernel/vmlinux.lds.S
index db88cbf9eafd..c98905f71e28 100644
--- a/trunk/arch/sh/kernel/vmlinux.lds.S
+++ b/trunk/arch/sh/kernel/vmlinux.lds.S
@@ -78,6 +78,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_end = .;
BSS_SECTION(0, PAGE_SIZE, 4)
+ _ebss = .; /* uClinux MTD sucks */
_end = . ;
STABS_DEBUG
diff --git a/trunk/arch/sh/lib/mcount.S b/trunk/arch/sh/lib/mcount.S
index 60164e65d665..84a57761f17e 100644
--- a/trunk/arch/sh/lib/mcount.S
+++ b/trunk/arch/sh/lib/mcount.S
@@ -39,7 +39,7 @@
*
* Make sure the stack pointer contains a valid address. Valid
* addresses for kernel stacks are anywhere after the bss
- * (after __bss_stop) and anywhere in init_thread_union (init_stack).
+ * (after _ebss) and anywhere in init_thread_union (init_stack).
*/
#define STACK_CHECK() \
mov #(THREAD_SIZE >> 10), r0; \
@@ -60,7 +60,7 @@
cmp/hi r2, r1; \
bf stack_panic; \
\
- /* If sp > __bss_stop then we're OK. */ \
+ /* If sp > _ebss then we're OK. */ \
mov.l .L_ebss, r1; \
cmp/hi r1, r15; \
bt 1f; \
@@ -70,7 +70,7 @@
cmp/hs r1, r15; \
bf stack_panic; \
\
- /* If sp > init_stack && sp < __bss_stop, not OK. */ \
+ /* If sp > init_stack && sp < _ebss, not OK. */ \
add r0, r1; \
cmp/hs r1, r15; \
bt stack_panic; \
@@ -292,6 +292,8 @@ stack_panic:
nop
.align 2
+.L_ebss:
+ .long _ebss
.L_init_thread_union:
.long init_thread_union
.Lpanic:
diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig
index 8ec3a1aa4abd..ba2657c49217 100644
--- a/trunk/arch/x86/Kconfig
+++ b/trunk/arch/x86/Kconfig
@@ -1527,7 +1527,7 @@ config SECCOMP
If unsure, say Y. Only embedded should say N here.
config CC_STACKPROTECTOR
- bool "Enable -fstack-protector buffer overflow detection"
+ bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
---help---
This option turns on the -fstack-protector GCC feature. This
feature puts, at the beginning of functions, a canary value on
diff --git a/trunk/arch/x86/include/asm/mce.h b/trunk/arch/x86/include/asm/mce.h
index a3ac52b29cbf..441520e4174f 100644
--- a/trunk/arch/x86/include/asm/mce.h
+++ b/trunk/arch/x86/include/asm/mce.h
@@ -33,14 +33,6 @@
#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
#define MCI_STATUS_AR (1ULL<<55) /* Action required */
-#define MCACOD 0xffff /* MCA Error Code */
-
-/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
-#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
-#define MCACOD_SCRUBMSK 0xfff0
-#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
-#define MCACOD_DATA 0x0134 /* Data Load */
-#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
/* MCi_MISC register defines */
#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
diff --git a/trunk/arch/x86/include/asm/perf_event.h b/trunk/arch/x86/include/asm/perf_event.h
index cb4e43bce98a..dab39350e51e 100644
--- a/trunk/arch/x86/include/asm/perf_event.h
+++ b/trunk/arch/x86/include/asm/perf_event.h
@@ -196,16 +196,11 @@ static inline u32 get_ibs_caps(void) { return 0; }
extern void perf_events_lapic_init(void);
/*
- * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
- * unused and ABI specified to be 0, so nobody should care what we do with
- * them.
- *
- * EXACT - the IP points to the exact instruction that triggered the
- * event (HW bugs exempt).
- * VM - original X86_VM_MASK; see set_linear_ip().
+ * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
+ * This flag is otherwise unused and ABI specified to be 0, so nobody should
+ * care what we do with it.
*/
#define PERF_EFLAGS_EXACT (1UL << 3)
-#define PERF_EFLAGS_VM (1UL << 5)
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
diff --git a/trunk/arch/x86/kernel/acpi/sleep.c b/trunk/arch/x86/kernel/acpi/sleep.c
index 1b8e5a03d942..95bf99de9058 100644
--- a/trunk/arch/x86/kernel/acpi/sleep.c
+++ b/trunk/arch/x86/kernel/acpi/sleep.c
@@ -25,6 +25,10 @@ unsigned long acpi_realmode_flags;
static char temp_stack[4096];
#endif
+asmlinkage void acpi_enter_s3(void)
+{
+ acpi_enter_sleep_state(3, wake_sleep_flags);
+}
/**
* acpi_suspend_lowlevel - save kernel state
*
diff --git a/trunk/arch/x86/kernel/acpi/sleep.h b/trunk/arch/x86/kernel/acpi/sleep.h
index 67f59f8c6956..5653a5791ec9 100644
--- a/trunk/arch/x86/kernel/acpi/sleep.h
+++ b/trunk/arch/x86/kernel/acpi/sleep.h
@@ -2,6 +2,7 @@
* Variables and functions used by the code in sleep.c
*/
+#include
#include
extern unsigned long saved_video_mode;
@@ -10,6 +11,7 @@ extern long saved_magic;
extern int wakeup_pmode_return;
extern u8 wake_sleep_flags;
+extern asmlinkage void acpi_enter_s3(void);
extern unsigned long acpi_copy_wakeup_routine(unsigned long);
extern void wakeup_long64(void);
diff --git a/trunk/arch/x86/kernel/acpi/wakeup_32.S b/trunk/arch/x86/kernel/acpi/wakeup_32.S
index 13ab720573e3..72610839f03b 100644
--- a/trunk/arch/x86/kernel/acpi/wakeup_32.S
+++ b/trunk/arch/x86/kernel/acpi/wakeup_32.S
@@ -74,9 +74,7 @@ restore_registers:
ENTRY(do_suspend_lowlevel)
call save_processor_state
call save_registers
- pushl $3
- call acpi_enter_sleep_state
- addl $4, %esp
+ call acpi_enter_s3
# In case of S3 failure, we'll emerge here. Jump
# to ret_point to recover
diff --git a/trunk/arch/x86/kernel/acpi/wakeup_64.S b/trunk/arch/x86/kernel/acpi/wakeup_64.S
index 8ea5164cbd04..014d1d28c397 100644
--- a/trunk/arch/x86/kernel/acpi/wakeup_64.S
+++ b/trunk/arch/x86/kernel/acpi/wakeup_64.S
@@ -71,9 +71,7 @@ ENTRY(do_suspend_lowlevel)
movq %rsi, saved_rsi
addq $8, %rsp
- movl $3, %edi
- xorl %eax, %eax
- call acpi_enter_sleep_state
+ call acpi_enter_s3
/* in case something went wrong, restore the machine status and go on */
jmp resume_point
diff --git a/trunk/arch/x86/kernel/alternative.c b/trunk/arch/x86/kernel/alternative.c
index afb7ff79a29f..931280ff8299 100644
--- a/trunk/arch/x86/kernel/alternative.c
+++ b/trunk/arch/x86/kernel/alternative.c
@@ -224,7 +224,7 @@ void __init arch_init_ideal_nops(void)
ideal_nops = intel_nops;
#endif
}
- break;
+
default:
#ifdef CONFIG_X86_64
ideal_nops = k8_nops;
diff --git a/trunk/arch/x86/kernel/apic/io_apic.c b/trunk/arch/x86/kernel/apic/io_apic.c
index a6c64aaddf9a..406eee784684 100644
--- a/trunk/arch/x86/kernel/apic/io_apic.c
+++ b/trunk/arch/x86/kernel/apic/io_apic.c
@@ -1204,7 +1204,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
BUG_ON(!cfg->vector);
vector = cfg->vector;
- for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
+ for_each_cpu(cpu, cfg->domain)
per_cpu(vector_irq, cpu)[vector] = -1;
cfg->vector = 0;
@@ -1212,7 +1212,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
if (likely(!cfg->move_in_progress))
return;
- for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
+ for_each_cpu(cpu, cfg->old_domain) {
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
vector++) {
if (per_cpu(vector_irq, cpu)[vector] != irq)
diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce-severity.c b/trunk/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 13017626f9a8..413c2ced887c 100644
--- a/trunk/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/trunk/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -55,6 +55,13 @@ static struct severity {
#define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S)
#define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR)
#define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV)
+#define MCACOD 0xffff
+/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
+#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
+#define MCACOD_SCRUBMSK 0xfff0
+#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
+#define MCACOD_DATA 0x0134 /* Data Load */
+#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
MCESEV(
NO, "Invalid",
diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce.c b/trunk/arch/x86/kernel/cpu/mcheck/mce.c
index 292d0258311c..5e095f873e3e 100644
--- a/trunk/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/trunk/arch/x86/kernel/cpu/mcheck/mce.c
@@ -103,8 +103,6 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
static DEFINE_PER_CPU(struct work_struct, mce_work);
-static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
-
/*
* CPU/chipset specific EDAC code can register a notifier call here to print
* MCE errors in a human-readable form.
@@ -652,18 +650,14 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
* Do a quick check if any of the events requires a panic.
* This decides if we keep the events around or clear them.
*/
-static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
- struct pt_regs *regs)
+static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp)
{
int i, ret = 0;
for (i = 0; i < banks; i++) {
m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
- if (m->status & MCI_STATUS_VAL) {
+ if (m->status & MCI_STATUS_VAL)
__set_bit(i, validp);
- if (quirk_no_way_out)
- quirk_no_way_out(i, m, regs);
- }
if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
ret = 1;
}
@@ -1046,7 +1040,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
*final = m;
memset(valid_banks, 0, sizeof(valid_banks));
- no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
+ no_way_out = mce_no_way_out(&m, &msg, valid_banks);
barrier();
@@ -1424,34 +1418,6 @@ static void __mcheck_cpu_init_generic(void)
}
}
-/*
- * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
- * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
- * Vol 3B Table 15-20). But this confuses both the code that determines
- * whether the machine check occurred in kernel or user mode, and also
- * the severity assessment code. Pretend that EIPV was set, and take the
- * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
- */
-static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
-{
- if (bank != 0)
- return;
- if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
- return;
- if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
- MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
- MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
- MCACOD)) !=
- (MCI_STATUS_UC|MCI_STATUS_EN|
- MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
- MCI_STATUS_AR|MCACOD_INSTR))
- return;
-
- m->mcgstatus |= MCG_STATUS_EIPV;
- m->ip = regs->ip;
- m->cs = regs->cs;
-}
-
/* Add per CPU specific workarounds here */
static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
{
@@ -1549,9 +1515,6 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
*/
if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
mce_bootlog = 0;
-
- if (c->x86 == 6 && c->x86_model == 45)
- quirk_no_way_out = quirk_sandybridge_ifu;
}
if (monarch_timeout < 0)
monarch_timeout = 0;
diff --git a/trunk/arch/x86/kernel/cpu/perf_event.c b/trunk/arch/x86/kernel/cpu/perf_event.c
index 915b876edd1e..29557aa06dda 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event.c
@@ -32,8 +32,6 @@
#include
#include
#include
-#include
-#include
#include "perf_event.h"
@@ -1740,29 +1738,6 @@ valid_user_frame(const void __user *fp, unsigned long size)
return (__range_not_ok(fp, size, TASK_SIZE) == 0);
}
-static unsigned long get_segment_base(unsigned int segment)
-{
- struct desc_struct *desc;
- int idx = segment >> 3;
-
- if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
- if (idx > LDT_ENTRIES)
- return 0;
-
- if (idx > current->active_mm->context.size)
- return 0;
-
- desc = current->active_mm->context.ldt;
- } else {
- if (idx > GDT_ENTRIES)
- return 0;
-
- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
- }
-
- return get_desc_base(desc + idx);
-}
-
#ifdef CONFIG_COMPAT
#include
@@ -1771,17 +1746,13 @@ static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
/* 32-bit process in 64-bit kernel. */
- unsigned long ss_base, cs_base;
struct stack_frame_ia32 frame;
const void __user *fp;
if (!test_thread_flag(TIF_IA32))
return 0;
- cs_base = get_segment_base(regs->cs);
- ss_base = get_segment_base(regs->ss);
-
- fp = compat_ptr(ss_base + regs->bp);
+ fp = compat_ptr(regs->bp);
while (entry->nr < PERF_MAX_STACK_DEPTH) {
unsigned long bytes;
frame.next_frame = 0;
@@ -1794,8 +1765,8 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
if (!valid_user_frame(fp, sizeof(frame)))
break;
- perf_callchain_store(entry, cs_base + frame.return_address);
- fp = compat_ptr(ss_base + frame.next_frame);
+ perf_callchain_store(entry, frame.return_address);
+ fp = compat_ptr(frame.next_frame);
}
return 1;
}
@@ -1818,12 +1789,6 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
return;
}
- /*
- * We don't know what to do with VM86 stacks.. ignore them for now.
- */
- if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
- return;
-
fp = (void __user *)regs->bp;
perf_callchain_store(entry, regs->ip);
@@ -1851,50 +1816,16 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
}
}
-/*
- * Deal with code segment offsets for the various execution modes:
- *
- * VM86 - the good olde 16 bit days, where the linear address is
- * 20 bits and we use regs->ip + 0x10 * regs->cs.
- *
- * IA32 - Where we need to look at GDT/LDT segment descriptor tables
- * to figure out what the 32bit base address is.
- *
- * X32 - has TIF_X32 set, but is running in x86_64
- *
- * X86_64 - CS,DS,SS,ES are all zero based.
- */
-static unsigned long code_segment_base(struct pt_regs *regs)
-{
- /*
- * If we are in VM86 mode, add the segment offset to convert to a
- * linear address.
- */
- if (regs->flags & X86_VM_MASK)
- return 0x10 * regs->cs;
-
- /*
- * For IA32 we look at the GDT/LDT segment base to convert the
- * effective IP to a linear address.
- */
-#ifdef CONFIG_X86_32
- if (user_mode(regs) && regs->cs != __USER_CS)
- return get_segment_base(regs->cs);
-#else
- if (test_thread_flag(TIF_IA32)) {
- if (user_mode(regs) && regs->cs != __USER32_CS)
- return get_segment_base(regs->cs);
- }
-#endif
- return 0;
-}
-
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
+ unsigned long ip;
+
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
- return perf_guest_cbs->get_guest_ip();
+ ip = perf_guest_cbs->get_guest_ip();
+ else
+ ip = instruction_pointer(regs);
- return regs->ip + code_segment_base(regs);
+ return ip;
}
unsigned long perf_misc_flags(struct pt_regs *regs)
@@ -1907,7 +1838,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
else
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
} else {
- if (user_mode(regs))
+ if (!kernel_ip(regs->ip))
misc |= PERF_RECORD_MISC_USER;
else
misc |= PERF_RECORD_MISC_KERNEL;
diff --git a/trunk/arch/x86/kernel/cpu/perf_event.h b/trunk/arch/x86/kernel/cpu/perf_event.h
index 6605a81ba339..821d53b696d1 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event.h
+++ b/trunk/arch/x86/kernel/cpu/perf_event.h
@@ -516,26 +516,6 @@ static inline bool kernel_ip(unsigned long ip)
#endif
}
-/*
- * Not all PMUs provide the right context information to place the reported IP
- * into full context. Specifically segment registers are typically not
- * supplied.
- *
- * Assuming the address is a linear address (it is for IBS), we fake the CS and
- * vm86 mode using the known zero-based code segment and 'fix up' the registers
- * to reflect this.
- *
- * Intel PEBS/LBR appear to typically provide the effective address, nothing
- * much we can do about that but pray and treat it like a linear address.
- */
-static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
-{
- regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
- if (regs->flags & X86_VM_MASK)
- regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
- regs->ip = ip;
-}
-
#ifdef CONFIG_CPU_SUP_AMD
int amd_pmu_init(void);
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/trunk/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index 7bfb5bec8630..da9bcdcd9856 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -13,8 +13,6 @@
#include
-#include "perf_event.h"
-
static u32 ibs_caps;
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
@@ -538,7 +536,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
regs.flags &= ~PERF_EFLAGS_EXACT;
} else {
- set_linear_ip(®s, ibs_data.regs[1]);
+ instruction_pointer_set(®s, ibs_data.regs[1]);
regs.flags |= PERF_EFLAGS_EXACT;
}
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
index e38d97bf4259..629ae0b7ad90 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -499,7 +499,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
* We sampled a branch insn, rewind using the LBR stack
*/
if (ip == to) {
- set_linear_ip(regs, from);
+ regs->ip = from;
return 1;
}
@@ -529,7 +529,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
} while (to < ip);
if (to == ip) {
- set_linear_ip(regs, old_to);
+ regs->ip = old_to;
return 1;
}
@@ -569,8 +569,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
* A possible PERF_SAMPLE_REGS will have to transfer all regs.
*/
regs = *iregs;
- regs.flags = pebs->flags;
- set_linear_ip(®s, pebs->ip);
+ regs.ip = pebs->ip;
regs.bp = pebs->bp;
regs.sp = pebs->sp;
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index c9e5dc56630a..f3851892e077 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -5,7 +5,7 @@
#include "perf_event.h"
#define UNCORE_PMU_NAME_LEN 32
-#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
+#define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC)
#define UNCORE_FIXED_EVENT 0xff
#define UNCORE_PMC_IDX_MAX_GENERIC 8
diff --git a/trunk/arch/x86/kernel/irq.c b/trunk/arch/x86/kernel/irq.c
index 7ad683d78645..1f5f1d5d2a02 100644
--- a/trunk/arch/x86/kernel/irq.c
+++ b/trunk/arch/x86/kernel/irq.c
@@ -328,7 +328,6 @@ void fixup_irqs(void)
chip->irq_retrigger(data);
raw_spin_unlock(&desc->lock);
}
- __this_cpu_write(vector_irq[vector], -1);
}
}
#endif
diff --git a/trunk/arch/x86/kernel/kdebugfs.c b/trunk/arch/x86/kernel/kdebugfs.c
index dc1404bf8e4b..1d5d31ea686b 100644
--- a/trunk/arch/x86/kernel/kdebugfs.c
+++ b/trunk/arch/x86/kernel/kdebugfs.c
@@ -107,7 +107,7 @@ static int __init create_setup_data_nodes(struct dentry *parent)
{
struct setup_data_node *node;
struct setup_data *data;
- int error;
+ int error = -ENOMEM;
struct dentry *d;
struct page *pg;
u64 pa_data;
@@ -121,10 +121,8 @@ static int __init create_setup_data_nodes(struct dentry *parent)
while (pa_data) {
node = kmalloc(sizeof(*node), GFP_KERNEL);
- if (!node) {
- error = -ENOMEM;
+ if (!node)
goto err_dir;
- }
pg = pfn_to_page((pa_data+sizeof(*data)-1) >> PAGE_SHIFT);
if (PageHighMem(pg)) {
diff --git a/trunk/arch/x86/kvm/i8259.c b/trunk/arch/x86/kvm/i8259.c
index e498b18f010c..1df8fb9e1d5d 100644
--- a/trunk/arch/x86/kvm/i8259.c
+++ b/trunk/arch/x86/kvm/i8259.c
@@ -316,11 +316,6 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
addr &= 1;
if (addr == 0) {
if (val & 0x10) {
- u8 edge_irr = s->irr & ~s->elcr;
- int i;
- bool found;
- struct kvm_vcpu *vcpu;
-
s->init4 = val & 1;
s->last_irr = 0;
s->irr &= s->elcr;
@@ -338,18 +333,6 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
if (val & 0x08)
pr_pic_unimpl(
"level sensitive irq not supported");
-
- kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm)
- if (kvm_apic_accept_pic_intr(vcpu)) {
- found = true;
- break;
- }
-
-
- if (found)
- for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
- if (edge_irr & (1 << irq))
- pic_clear_isr(s, irq);
} else if (val & 0x08) {
if (val & 0x04)
s->poll = 1;
diff --git a/trunk/arch/x86/kvm/vmx.c b/trunk/arch/x86/kvm/vmx.c
index c00f03de1b79..c39b60707e02 100644
--- a/trunk/arch/x86/kvm/vmx.c
+++ b/trunk/arch/x86/kvm/vmx.c
@@ -1488,6 +1488,13 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
loadsegment(ds, vmx->host_state.ds_sel);
loadsegment(es, vmx->host_state.es_sel);
}
+#else
+ /*
+ * The sysexit path does not restore ds/es, so we must set them to
+ * a reasonable value ourselves.
+ */
+ loadsegment(ds, __USER_DS);
+ loadsegment(es, __USER_DS);
#endif
reload_tss();
#ifdef CONFIG_X86_64
@@ -6363,19 +6370,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
);
-#ifndef CONFIG_X86_64
- /*
- * The sysexit path does not restore ds/es, so we must set them to
- * a reasonable value ourselves.
- *
- * We can't defer this to vmx_load_host_state() since that function
- * may be executed in interrupt context, which saves and restore segments
- * around it, nullifying its effect.
- */
- loadsegment(ds, __USER_DS);
- loadsegment(es, __USER_DS);
-#endif
-
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
| (1 << VCPU_EXREG_RFLAGS)
| (1 << VCPU_EXREG_CPL)
diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c
index 42bce48f6928..59b59508ff07 100644
--- a/trunk/arch/x86/kvm/x86.c
+++ b/trunk/arch/x86/kvm/x86.c
@@ -925,10 +925,6 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
*/
getboottime(&boot);
- if (kvm->arch.kvmclock_offset) {
- struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset);
- boot = timespec_sub(boot, ts);
- }
wc.sec = boot.tv_sec;
wc.nsec = boot.tv_nsec;
wc.version = version;
diff --git a/trunk/arch/x86/mm/srat.c b/trunk/arch/x86/mm/srat.c
index 4ddf497ca65b..4599c3e8bcb6 100644
--- a/trunk/arch/x86/mm/srat.c
+++ b/trunk/arch/x86/mm/srat.c
@@ -142,23 +142,23 @@ static inline int save_add_info(void) {return 0;}
#endif
/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
-int __init
+void __init
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
u64 start, end;
int node, pxm;
if (srat_disabled())
- return -1;
+ return;
if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
bad_srat();
- return -1;
+ return;
}
if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
- return -1;
+ return;
if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
- return -1;
+ return;
start = ma->base_address;
end = start + ma->length;
pxm = ma->proximity_domain;
@@ -168,12 +168,12 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains.\n");
bad_srat();
- return -1;
+ return;
}
if (numa_add_memblk(node, start, end) < 0) {
bad_srat();
- return -1;
+ return;
}
node_set(node, numa_nodes_parsed);
@@ -181,7 +181,6 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
node, pxm,
(unsigned long long) start, (unsigned long long) end - 1);
- return 0;
}
void __init acpi_numa_arch_fixup(void) {}
diff --git a/trunk/arch/x86/syscalls/syscall_64.tbl b/trunk/arch/x86/syscalls/syscall_64.tbl
index 29aed7ac2c02..51171aeff0dc 100644
--- a/trunk/arch/x86/syscalls/syscall_64.tbl
+++ b/trunk/arch/x86/syscalls/syscall_64.tbl
@@ -318,7 +318,7 @@
309 common getcpu sys_getcpu
310 64 process_vm_readv sys_process_vm_readv
311 64 process_vm_writev sys_process_vm_writev
-312 common kcmp sys_kcmp
+312 64 kcmp sys_kcmp
#
# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/trunk/drivers/acpi/acpica/achware.h b/trunk/drivers/acpi/acpica/achware.h
index 5de4ec72766d..5ccb99ae3a6f 100644
--- a/trunk/drivers/acpi/acpica/achware.h
+++ b/trunk/drivers/acpi/acpica/achware.h
@@ -83,22 +83,22 @@ acpi_status acpi_hw_clear_acpi_status(void);
/*
* hwsleep - sleep/wake support (Legacy sleep registers)
*/
-acpi_status acpi_hw_legacy_sleep(u8 sleep_state);
+acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags);
-acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state);
+acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags);
-acpi_status acpi_hw_legacy_wake(u8 sleep_state);
+acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags);
/*
* hwesleep - sleep/wake support (Extended FADT-V5 sleep registers)
*/
void acpi_hw_execute_sleep_method(char *method_name, u32 integer_argument);
-acpi_status acpi_hw_extended_sleep(u8 sleep_state);
+acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags);
-acpi_status acpi_hw_extended_wake_prep(u8 sleep_state);
+acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags);
-acpi_status acpi_hw_extended_wake(u8 sleep_state);
+acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags);
/*
* hwvalid - Port I/O with validation
diff --git a/trunk/drivers/acpi/acpica/hwesleep.c b/trunk/drivers/acpi/acpica/hwesleep.c
index 94996f9ae3ad..48518dac5342 100644
--- a/trunk/drivers/acpi/acpica/hwesleep.c
+++ b/trunk/drivers/acpi/acpica/hwesleep.c
@@ -90,6 +90,7 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
* FUNCTION: acpi_hw_extended_sleep
*
* PARAMETERS: sleep_state - Which sleep state to enter
+ * flags - ACPI_EXECUTE_GTS to run optional method
*
* RETURN: Status
*
@@ -99,7 +100,7 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
*
******************************************************************************/
-acpi_status acpi_hw_extended_sleep(u8 sleep_state)
+acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
{
acpi_status status;
u8 sleep_type_value;
@@ -124,6 +125,12 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state)
acpi_gbl_system_awake_and_running = FALSE;
+ /* Optionally execute _GTS (Going To Sleep) */
+
+ if (flags & ACPI_EXECUTE_GTS) {
+ acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
+ }
+
/* Flush caches, as per ACPI specification */
ACPI_FLUSH_CPU_CACHE();
@@ -165,6 +172,7 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state)
* FUNCTION: acpi_hw_extended_wake_prep
*
* PARAMETERS: sleep_state - Which sleep state we just exited
+ * flags - ACPI_EXECUTE_BFS to run optional method
*
* RETURN: Status
*
@@ -173,7 +181,7 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state)
*
******************************************************************************/
-acpi_status acpi_hw_extended_wake_prep(u8 sleep_state)
+acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
{
acpi_status status;
u8 sleep_type_value;
@@ -192,6 +200,11 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state)
&acpi_gbl_FADT.sleep_control);
}
+ /* Optionally execute _BFS (Back From Sleep) */
+
+ if (flags & ACPI_EXECUTE_BFS) {
+ acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
+ }
return_ACPI_STATUS(AE_OK);
}
@@ -209,7 +222,7 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state)
*
******************************************************************************/
-acpi_status acpi_hw_extended_wake(u8 sleep_state)
+acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags)
{
ACPI_FUNCTION_TRACE(hw_extended_wake);
diff --git a/trunk/drivers/acpi/acpica/hwsleep.c b/trunk/drivers/acpi/acpica/hwsleep.c
index 3fddde056a5e..9960fe9ef533 100644
--- a/trunk/drivers/acpi/acpica/hwsleep.c
+++ b/trunk/drivers/acpi/acpica/hwsleep.c
@@ -56,6 +56,7 @@ ACPI_MODULE_NAME("hwsleep")
* FUNCTION: acpi_hw_legacy_sleep
*
* PARAMETERS: sleep_state - Which sleep state to enter
+ * flags - ACPI_EXECUTE_GTS to run optional method
*
* RETURN: Status
*
@@ -63,7 +64,7 @@ ACPI_MODULE_NAME("hwsleep")
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
-acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
+acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
{
struct acpi_bit_register_info *sleep_type_reg_info;
struct acpi_bit_register_info *sleep_enable_reg_info;
@@ -109,6 +110,12 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
return_ACPI_STATUS(status);
}
+ /* Optionally execute _GTS (Going To Sleep) */
+
+ if (flags & ACPI_EXECUTE_GTS) {
+ acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
+ }
+
/* Get current value of PM1A control */
status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
@@ -207,6 +214,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
* FUNCTION: acpi_hw_legacy_wake_prep
*
* PARAMETERS: sleep_state - Which sleep state we just exited
+ * flags - ACPI_EXECUTE_BFS to run optional method
*
* RETURN: Status
*
@@ -216,7 +224,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
*
******************************************************************************/
-acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state)
+acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
{
acpi_status status;
struct acpi_bit_register_info *sleep_type_reg_info;
@@ -267,6 +275,11 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state)
}
}
+ /* Optionally execute _BFS (Back From Sleep) */
+
+ if (flags & ACPI_EXECUTE_BFS) {
+ acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
+ }
return_ACPI_STATUS(status);
}
@@ -275,6 +288,7 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state)
* FUNCTION: acpi_hw_legacy_wake
*
* PARAMETERS: sleep_state - Which sleep state we just exited
+ * flags - Reserved, set to zero
*
* RETURN: Status
*
@@ -283,7 +297,7 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state)
*
******************************************************************************/
-acpi_status acpi_hw_legacy_wake(u8 sleep_state)
+acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags)
{
acpi_status status;
diff --git a/trunk/drivers/acpi/acpica/hwxfsleep.c b/trunk/drivers/acpi/acpica/hwxfsleep.c
index 1f165a750ae2..f8684bfe7907 100644
--- a/trunk/drivers/acpi/acpica/hwxfsleep.c
+++ b/trunk/drivers/acpi/acpica/hwxfsleep.c
@@ -50,7 +50,7 @@ ACPI_MODULE_NAME("hwxfsleep")
/* Local prototypes */
static acpi_status
-acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
+acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id);
/*
* Dispatch table used to efficiently branch to the various sleep
@@ -235,7 +235,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
*
******************************************************************************/
static acpi_status
-acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id)
+acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
{
acpi_status status;
struct acpi_sleep_functions *sleep_functions =
@@ -248,11 +248,11 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id)
* use the extended sleep registers
*/
if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) {
- status = sleep_functions->extended_function(sleep_state);
+ status = sleep_functions->extended_function(sleep_state, flags);
} else {
/* Legacy sleep */
- status = sleep_functions->legacy_function(sleep_state);
+ status = sleep_functions->legacy_function(sleep_state, flags);
}
return (status);
@@ -262,7 +262,7 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id)
* For the case where reduced-hardware-only code is being generated,
* we know that only the extended sleep registers are available
*/
- status = sleep_functions->extended_function(sleep_state);
+ status = sleep_functions->extended_function(sleep_state, flags);
return (status);
#endif /* !ACPI_REDUCED_HARDWARE */
@@ -349,6 +349,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
* FUNCTION: acpi_enter_sleep_state
*
* PARAMETERS: sleep_state - Which sleep state to enter
+ * flags - ACPI_EXECUTE_GTS to run optional method
*
* RETURN: Status
*
@@ -356,7 +357,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
+acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags)
{
acpi_status status;
@@ -370,7 +371,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
}
status =
- acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID);
+ acpi_hw_sleep_dispatch(sleep_state, flags, ACPI_SLEEP_FUNCTION_ID);
return_ACPI_STATUS(status);
}
@@ -390,14 +391,14 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
* Called with interrupts DISABLED.
*
******************************************************************************/
-acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
+acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags)
{
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
status =
- acpi_hw_sleep_dispatch(sleep_state,
+ acpi_hw_sleep_dispatch(sleep_state, flags,
ACPI_WAKE_PREP_FUNCTION_ID);
return_ACPI_STATUS(status);
}
@@ -422,7 +423,8 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
- status = acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_FUNCTION_ID);
+
+ status = acpi_hw_sleep_dispatch(sleep_state, 0, ACPI_WAKE_FUNCTION_ID);
return_ACPI_STATUS(status);
}
diff --git a/trunk/drivers/acpi/numa.c b/trunk/drivers/acpi/numa.c
index cb31298ca684..e56f3be7b07d 100644
--- a/trunk/drivers/acpi/numa.c
+++ b/trunk/drivers/acpi/numa.c
@@ -237,8 +237,6 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header,
return 0;
}
-static int __initdata parsed_numa_memblks;
-
static int __init
acpi_parse_memory_affinity(struct acpi_subtable_header * header,
const unsigned long end)
@@ -252,8 +250,8 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
acpi_table_print_srat_entry(header);
/* let architecture-dependent part to do it */
- if (!acpi_numa_memory_affinity_init(memory_affinity))
- parsed_numa_memblks++;
+ acpi_numa_memory_affinity_init(memory_affinity);
+
return 0;
}
@@ -306,10 +304,8 @@ int __init acpi_numa_init(void)
acpi_numa_arch_fixup();
- if (cnt < 0)
- return cnt;
- else if (!parsed_numa_memblks)
- return -ENOENT;
+ if (cnt <= 0)
+ return cnt ?: -ENOENT;
return 0;
}
diff --git a/trunk/drivers/acpi/pci_root.c b/trunk/drivers/acpi/pci_root.c
index 72a2c98bc429..ec54014c321c 100644
--- a/trunk/drivers/acpi/pci_root.c
+++ b/trunk/drivers/acpi/pci_root.c
@@ -573,15 +573,8 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
if (pci_msi_enabled())
flags |= OSC_MSI_SUPPORT;
- if (flags != base_flags) {
- status = acpi_pci_osc_support(root, flags);
- if (ACPI_FAILURE(status)) {
- dev_info(root->bus->bridge, "ACPI _OSC support "
- "notification failed, disabling PCIe ASPM\n");
- pcie_no_aspm();
- flags = base_flags;
- }
- }
+ if (flags != base_flags)
+ acpi_pci_osc_support(root, flags);
if (!pcie_ports_disabled
&& (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
diff --git a/trunk/drivers/acpi/processor_driver.c b/trunk/drivers/acpi/processor_driver.c
index bfc31cb0dd3e..ff8e04f2fab4 100644
--- a/trunk/drivers/acpi/processor_driver.c
+++ b/trunk/drivers/acpi/processor_driver.c
@@ -437,7 +437,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
/* Normal CPU soft online event */
} else {
acpi_processor_ppc_has_changed(pr, 0);
- acpi_processor_hotplug(pr);
+ acpi_processor_cst_has_changed(pr);
acpi_processor_reevaluate_tstate(pr, action);
acpi_processor_tstate_has_changed(pr);
}
diff --git a/trunk/drivers/acpi/sleep.c b/trunk/drivers/acpi/sleep.c
index fdcdbb652915..7a7a9c929247 100644
--- a/trunk/drivers/acpi/sleep.c
+++ b/trunk/drivers/acpi/sleep.c
@@ -28,7 +28,36 @@
#include "internal.h"
#include "sleep.h"
+u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS;
+static unsigned int gts, bfs;
+static int set_param_wake_flag(const char *val, struct kernel_param *kp)
+{
+ int ret = param_set_int(val, kp);
+
+ if (ret)
+ return ret;
+
+ if (kp->arg == (const char *)>s) {
+ if (gts)
+ wake_sleep_flags |= ACPI_EXECUTE_GTS;
+ else
+ wake_sleep_flags &= ~ACPI_EXECUTE_GTS;
+ }
+ if (kp->arg == (const char *)&bfs) {
+ if (bfs)
+ wake_sleep_flags |= ACPI_EXECUTE_BFS;
+ else
+ wake_sleep_flags &= ~ACPI_EXECUTE_BFS;
+ }
+ return ret;
+}
+module_param_call(gts, set_param_wake_flag, param_get_int, >s, 0644);
+module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644);
+MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
+MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
+
static u8 sleep_states[ACPI_S_STATE_COUNT];
+static bool pwr_btn_event_pending;
static void acpi_sleep_tts_switch(u32 acpi_state)
{
@@ -81,7 +110,6 @@ static int acpi_sleep_prepare(u32 acpi_state)
#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
-static bool pwr_btn_event_pending;
/*
* The ACPI specification wants us to save NVS memory regions during hibernation
@@ -277,7 +305,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
switch (acpi_state) {
case ACPI_STATE_S1:
barrier();
- status = acpi_enter_sleep_state(acpi_state);
+ status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags);
break;
case ACPI_STATE_S3:
@@ -291,8 +319,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
/* This violates the spec but is required for bug compatibility. */
acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
- /* Reprogram control registers */
- acpi_leave_sleep_state_prep(acpi_state);
+ /* Reprogram control registers and execute _BFS */
+ acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags);
/* ACPI 3.0 specs (P62) says that it's the responsibility
* of the OSPM to clear the status bit [ implying that the
@@ -575,9 +603,9 @@ static int acpi_hibernation_enter(void)
ACPI_FLUSH_CPU_CACHE();
/* This shouldn't return. If it returns, we have a problem */
- status = acpi_enter_sleep_state(ACPI_STATE_S4);
- /* Reprogram control registers */
- acpi_leave_sleep_state_prep(ACPI_STATE_S4);
+ status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags);
+ /* Reprogram control registers and execute _BFS */
+ acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
}
@@ -589,8 +617,8 @@ static void acpi_hibernation_leave(void)
* enable it here.
*/
acpi_enable();
- /* Reprogram control registers */
- acpi_leave_sleep_state_prep(ACPI_STATE_S4);
+ /* Reprogram control registers and execute _BFS */
+ acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
/* Check the hardware signature */
if (facs && s4_hardware_signature != facs->hardware_signature) {
printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
@@ -864,7 +892,33 @@ static void acpi_power_off(void)
/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
printk(KERN_DEBUG "%s called\n", __func__);
local_irq_disable();
- acpi_enter_sleep_state(ACPI_STATE_S5);
+ acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags);
+}
+
+/*
+ * ACPI 2.0 created the optional _GTS and _BFS,
+ * but industry adoption has been neither rapid nor broad.
+ *
+ * Linux gets into trouble when it executes poorly validated
+ * paths through the BIOS, so disable _GTS and _BFS by default,
+ * but do speak up and offer the option to enable them.
+ */
+static void __init acpi_gts_bfs_check(void)
+{
+ acpi_handle dummy;
+
+ if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__GTS, &dummy)))
+ {
+ printk(KERN_NOTICE PREFIX "BIOS offers _GTS\n");
+ printk(KERN_NOTICE PREFIX "If \"acpi.gts=1\" improves suspend, "
+ "please notify linux-acpi@vger.kernel.org\n");
+ }
+ if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__BFS, &dummy)))
+ {
+ printk(KERN_NOTICE PREFIX "BIOS offers _BFS\n");
+ printk(KERN_NOTICE PREFIX "If \"acpi.bfs=1\" improves resume, "
+ "please notify linux-acpi@vger.kernel.org\n");
+ }
}
int __init acpi_sleep_init(void)
@@ -925,5 +979,6 @@ int __init acpi_sleep_init(void)
* object can also be evaluated when the system enters S5.
*/
register_reboot_notifier(&tts_notifier);
+ acpi_gts_bfs_check();
return 0;
}
diff --git a/trunk/drivers/acpi/sysfs.c b/trunk/drivers/acpi/sysfs.c
index 7c3f98ba4afe..240a24400976 100644
--- a/trunk/drivers/acpi/sysfs.c
+++ b/trunk/drivers/acpi/sysfs.c
@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
{
int result = 0;
- if (!strncmp(val, "enable", sizeof("enable") - 1)) {
+ if (!strncmp(val, "enable", strlen("enable"))) {
result = acpi_debug_trace(trace_method_name, trace_debug_level,
trace_debug_layer, 0);
if (result)
@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
goto exit;
}
- if (!strncmp(val, "disable", sizeof("disable") - 1)) {
+ if (!strncmp(val, "disable", strlen("disable"))) {
int name = 0;
result = acpi_debug_trace((char *)&name, trace_debug_level,
trace_debug_layer, 0);
diff --git a/trunk/drivers/atm/iphase.c b/trunk/drivers/atm/iphase.c
index 96cce6d53195..d4386019af5d 100644
--- a/trunk/drivers/atm/iphase.c
+++ b/trunk/drivers/atm/iphase.c
@@ -2362,7 +2362,7 @@ static int __devinit ia_init(struct atm_dev *dev)
{
printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
dev->number);
- return -ENOMEM;
+ return error;
}
IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
dev->number, iadev->pci->revision, base, iadev->irq);)
diff --git a/trunk/drivers/bcma/host_pci.c b/trunk/drivers/bcma/host_pci.c
index a6e5672c67e7..11b32d2642df 100644
--- a/trunk/drivers/bcma/host_pci.c
+++ b/trunk/drivers/bcma/host_pci.c
@@ -272,7 +272,6 @@ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
{ 0, },
};
diff --git a/trunk/drivers/bcma/sprom.c b/trunk/drivers/bcma/sprom.c
index 9ea4627dc0c2..26823d97fd9f 100644
--- a/trunk/drivers/bcma/sprom.c
+++ b/trunk/drivers/bcma/sprom.c
@@ -507,9 +507,7 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
/* for these chips OTP is always available */
present = true;
break;
- case BCMA_CHIP_ID_BCM43228:
- present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT;
- break;
+
default:
present = false;
break;
diff --git a/trunk/drivers/block/drbd/drbd_main.c b/trunk/drivers/block/drbd/drbd_main.c
index dbe6135a2abe..2e0e7fc1dbba 100644
--- a/trunk/drivers/block/drbd/drbd_main.c
+++ b/trunk/drivers/block/drbd/drbd_main.c
@@ -3537,9 +3537,9 @@ static void drbd_cleanup(void)
}
/**
- * drbd_congested() - Callback for the flusher thread
+ * drbd_congested() - Callback for pdflush
* @congested_data: User data
- * @bdi_bits: Bits the BDI flusher thread is currently interested in
+ * @bdi_bits: Bits pdflush is currently interested in
*
* Returns 1<multifunction &&
!pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
swap_pci_ref(&dma_pdev,
@@ -310,28 +305,14 @@ static int iommu_init_device(struct device *dev)
PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
0)));
- /*
- * Devices on the root bus go through the iommu. If that's not us,
- * find the next upstream device and test ACS up to the root bus.
- * Finding the next device may require skipping virtual buses.
- */
while (!pci_is_root_bus(dma_pdev->bus)) {
- struct pci_bus *bus = dma_pdev->bus;
-
- while (!bus->self) {
- if (!pci_is_root_bus(bus))
- bus = bus->parent;
- else
- goto root_bus;
- }
-
- if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
+ if (pci_acs_path_enabled(dma_pdev->bus->self,
+ NULL, REQ_ACS_FLAGS))
break;
- swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
+ swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
}
-root_bus:
group = iommu_group_get(&dma_pdev->dev);
pci_dev_put(dma_pdev);
if (!group) {
diff --git a/trunk/drivers/iommu/amd_iommu_init.c b/trunk/drivers/iommu/amd_iommu_init.c
index 0a2ea317120a..500e7f15f5c2 100644
--- a/trunk/drivers/iommu/amd_iommu_init.c
+++ b/trunk/drivers/iommu/amd_iommu_init.c
@@ -1131,6 +1131,9 @@ static int __init amd_iommu_init_pci(void)
break;
}
+ /* Make sure ACS will be enabled */
+ pci_request_acs();
+
ret = amd_iommu_init_devices();
print_iommu_info();
@@ -1649,9 +1652,6 @@ static bool detect_ivrs(void)
early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
- /* Make sure ACS will be enabled during PCI probe */
- pci_request_acs();
-
return true;
}
diff --git a/trunk/drivers/iommu/exynos-iommu.c b/trunk/drivers/iommu/exynos-iommu.c
index 80bad32aa463..45350ff5e93c 100644
--- a/trunk/drivers/iommu/exynos-iommu.c
+++ b/trunk/drivers/iommu/exynos-iommu.c
@@ -732,9 +732,9 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain)
spin_lock_init(&priv->pgtablelock);
INIT_LIST_HEAD(&priv->clients);
- domain->geometry.aperture_start = 0;
- domain->geometry.aperture_end = ~0UL;
- domain->geometry.force_aperture = true;
+ dom->geometry.aperture_start = 0;
+ dom->geometry.aperture_end = ~0UL;
+ dom->geometry.force_aperture = true;
domain->priv = priv;
return 0;
diff --git a/trunk/drivers/iommu/intel-iommu.c b/trunk/drivers/iommu/intel-iommu.c
index 2297ec193eb4..7469b5346643 100644
--- a/trunk/drivers/iommu/intel-iommu.c
+++ b/trunk/drivers/iommu/intel-iommu.c
@@ -2008,7 +2008,6 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
if (!drhd) {
printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
pci_name(pdev));
- free_domain_mem(domain);
return NULL;
}
iommu = drhd->iommu;
@@ -4125,13 +4124,8 @@ static int intel_iommu_add_device(struct device *dev)
} else
dma_pdev = pci_dev_get(pdev);
- /* Account for quirked devices */
swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
- /*
- * If it's a multifunction device that does not support our
- * required ACS flags, add to the same group as function 0.
- */
if (dma_pdev->multifunction &&
!pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
swap_pci_ref(&dma_pdev,
@@ -4139,28 +4133,14 @@ static int intel_iommu_add_device(struct device *dev)
PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
0)));
- /*
- * Devices on the root bus go through the iommu. If that's not us,
- * find the next upstream device and test ACS up to the root bus.
- * Finding the next device may require skipping virtual buses.
- */
while (!pci_is_root_bus(dma_pdev->bus)) {
- struct pci_bus *bus = dma_pdev->bus;
-
- while (!bus->self) {
- if (!pci_is_root_bus(bus))
- bus = bus->parent;
- else
- goto root_bus;
- }
-
- if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
+ if (pci_acs_path_enabled(dma_pdev->bus->self,
+ NULL, REQ_ACS_FLAGS))
break;
- swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
+ swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
}
-root_bus:
group = iommu_group_get(&dma_pdev->dev);
pci_dev_put(dma_pdev);
if (!group) {
diff --git a/trunk/drivers/iommu/tegra-smmu.c b/trunk/drivers/iommu/tegra-smmu.c
index 2a4bb36bc688..4ba325ab6262 100644
--- a/trunk/drivers/iommu/tegra-smmu.c
+++ b/trunk/drivers/iommu/tegra-smmu.c
@@ -799,14 +799,14 @@ static void smmu_iommu_detach_dev(struct iommu_domain *domain,
goto out;
}
}
- dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev));
+ dev_err(smmu->dev, "Couldn't find %s\n", dev_name(c->dev));
out:
spin_unlock(&as->client_lock);
}
static int smmu_iommu_domain_init(struct iommu_domain *domain)
{
- int i, err = -EAGAIN;
+ int i, err = -ENODEV;
unsigned long flags;
struct smmu_as *as;
struct smmu_device *smmu = smmu_handle;
@@ -814,14 +814,11 @@ static int smmu_iommu_domain_init(struct iommu_domain *domain)
/* Look for a free AS with lock held */
for (i = 0; i < smmu->num_as; i++) {
as = &smmu->as[i];
-
- if (as->pdir_page)
- continue;
-
- err = alloc_pdir(as);
- if (!err)
- goto found;
-
+ if (!as->pdir_page) {
+ err = alloc_pdir(as);
+ if (!err)
+ goto found;
+ }
if (err != -EAGAIN)
break;
}
diff --git a/trunk/drivers/isdn/isdnloop/isdnloop.c b/trunk/drivers/isdn/isdnloop/isdnloop.c
index baf2686aa8eb..5405ec644db3 100644
--- a/trunk/drivers/isdn/isdnloop/isdnloop.c
+++ b/trunk/drivers/isdn/isdnloop/isdnloop.c
@@ -16,6 +16,7 @@
#include
#include "isdnloop.h"
+static char *revision = "$Revision: 1.11.6.7 $";
static char *isdnloop_id = "loop0";
MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card");
@@ -1493,6 +1494,17 @@ isdnloop_addcard(char *id1)
static int __init
isdnloop_init(void)
{
+ char *p;
+ char rev[10];
+
+ if ((p = strchr(revision, ':'))) {
+ strcpy(rev, p + 1);
+ p = strchr(rev, '$');
+ *p = 0;
+ } else
+ strcpy(rev, " ??? ");
+ printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev);
+
if (isdnloop_id)
return (isdnloop_addcard(isdnloop_id));
diff --git a/trunk/drivers/isdn/mISDN/layer2.c b/trunk/drivers/isdn/mISDN/layer2.c
index 949cabb88f1c..0dc8abca1407 100644
--- a/trunk/drivers/isdn/mISDN/layer2.c
+++ b/trunk/drivers/isdn/mISDN/layer2.c
@@ -2222,7 +2222,7 @@ create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei,
InitWin(l2);
l2->l2m.fsm = &l2fsm;
if (test_bit(FLG_LAPB, &l2->flag) ||
- test_bit(FLG_FIXED_TEI, &l2->flag) ||
+ test_bit(FLG_PTP, &l2->flag) ||
test_bit(FLG_LAPD_NET, &l2->flag))
l2->l2m.state = ST_L2_4;
else
diff --git a/trunk/drivers/mtd/maps/uclinux.c b/trunk/drivers/mtd/maps/uclinux.c
index c3bb304eca07..cfff454f628b 100644
--- a/trunk/drivers/mtd/maps/uclinux.c
+++ b/trunk/drivers/mtd/maps/uclinux.c
@@ -19,13 +19,14 @@
#include
#include
#include
-#include
/****************************************************************************/
+extern char _ebss;
+
struct map_info uclinux_ram_map = {
.name = "RAM",
- .phys = (unsigned long)__bss_stop,
+ .phys = (unsigned long)&_ebss,
.size = 0,
};
diff --git a/trunk/drivers/net/appletalk/cops.c b/trunk/drivers/net/appletalk/cops.c
index cff6f023c03a..545c09ed9079 100644
--- a/trunk/drivers/net/appletalk/cops.c
+++ b/trunk/drivers/net/appletalk/cops.c
@@ -996,7 +996,9 @@ static int __init cops_module_init(void)
printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
cardname);
cops_dev = cops_probe(-1);
- return PTR_RET(cops_dev);
+ if (IS_ERR(cops_dev))
+ return PTR_ERR(cops_dev);
+ return 0;
}
static void __exit cops_module_exit(void)
diff --git a/trunk/drivers/net/appletalk/ltpc.c b/trunk/drivers/net/appletalk/ltpc.c
index b5782cdf0bca..0910dce3996d 100644
--- a/trunk/drivers/net/appletalk/ltpc.c
+++ b/trunk/drivers/net/appletalk/ltpc.c
@@ -1243,7 +1243,9 @@ static int __init ltpc_module_init(void)
"ltpc: Autoprobing is not recommended for modules\n");
dev_ltpc = ltpc_probe();
- return PTR_RET(dev_ltpc);
+ if (IS_ERR(dev_ltpc))
+ return PTR_ERR(dev_ltpc);
+ return 0;
}
module_init(ltpc_module_init);
#endif
diff --git a/trunk/drivers/net/cris/eth_v10.c b/trunk/drivers/net/cris/eth_v10.c
index 021d69c5d9bc..f0c8bd54ce29 100644
--- a/trunk/drivers/net/cris/eth_v10.c
+++ b/trunk/drivers/net/cris/eth_v10.c
@@ -1712,7 +1712,7 @@ e100_set_network_leds(int active)
static void
e100_netpoll(struct net_device* netdev)
{
- e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev);
+ e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL);
}
#endif
diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 62f754bd0dfe..734fd87cd990 100644
--- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2485,7 +2485,6 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
break;
default:
- kfree(new_cmd);
BNX2X_ERR("Unknown command: %d\n", cmd);
return -EINVAL;
}
diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_main.c b/trunk/drivers/net/ethernet/emulex/benet/be_main.c
index 90a903d83d87..c60de89b6669 100644
--- a/trunk/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/trunk/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1948,7 +1948,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
if (adapter->num_rx_qs != MAX_RX_QS)
dev_info(&adapter->pdev->dev,
- "Created only %d receive queues\n", adapter->num_rx_qs);
+ "Created only %d receive queues", adapter->num_rx_qs);
return 0;
}
diff --git a/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c b/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c
index ba994fb4cec6..5e84eaac48c1 100644
--- a/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -254,14 +254,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
- /*
- * Check for invalid size
- */
- if ((hw->mac.type == e1000_82576) && (size > 15)) {
- pr_notice("The NVM size is not valid, defaulting to 32K\n");
- size = 15;
- }
-
nvm->word_size = 1 << size;
if (hw->mac.type < e1000_i210) {
nvm->opcode_bits = 8;
@@ -289,6 +281,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
} else
nvm->type = e1000_nvm_flash_hw;
+ /*
+ * Check for invalid size
+ */
+ if ((hw->mac.type == e1000_82576) && (size > 15)) {
+ pr_notice("The NVM size is not valid, defaulting to 32K\n");
+ size = 15;
+ }
+
/* NVM Function Pointers */
switch (hw->mac.type) {
case e1000_82580:
diff --git a/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c b/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 90550f5e3dd9..a19c84cad0e9 100644
--- a/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -209,8 +209,8 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
/* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed */
if (igb_check_reset_block(hw)) {
- dev_err(&adapter->pdev->dev,
- "Cannot change link characteristics when SoL/IDER is active.\n");
+ dev_err(&adapter->pdev->dev, "Cannot change link "
+ "characteristics when SoL/IDER is active.\n");
return -EINVAL;
}
@@ -1089,8 +1089,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
wr32(reg, (_test[pat] & write));
val = rd32(reg) & mask;
if (val != (_test[pat] & write & mask)) {
- dev_err(&adapter->pdev->dev,
- "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
+ dev_err(&adapter->pdev->dev, "pattern test reg %04X "
+ "failed: got 0x%08X expected 0x%08X\n",
reg, val, (_test[pat] & write & mask));
*data = reg;
return 1;
@@ -1108,8 +1108,8 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
wr32(reg, write & mask);
val = rd32(reg);
if ((write & mask) != (val & mask)) {
- dev_err(&adapter->pdev->dev,
- "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
+ dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:"
+ " got 0x%08X expected 0x%08X\n", reg,
(val & mask), (write & mask));
*data = reg;
return 1;
@@ -1171,9 +1171,8 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_STATUS, toggle);
after = rd32(E1000_STATUS) & toggle;
if (value != after) {
- dev_err(&adapter->pdev->dev,
- "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
- after, value);
+ dev_err(&adapter->pdev->dev, "failed STATUS register test "
+ "got: 0x%08X expected: 0x%08X\n", after, value);
*data = 1;
return 1;
}
@@ -1778,14 +1777,16 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
* sessions are active */
if (igb_check_reset_block(&adapter->hw)) {
dev_err(&adapter->pdev->dev,
- "Cannot do PHY loopback test when SoL/IDER is active.\n");
+ "Cannot do PHY loopback test "
+ "when SoL/IDER is active.\n");
*data = 0;
goto out;
}
if ((adapter->hw.mac.type == e1000_i210)
- || (adapter->hw.mac.type == e1000_i211)) {
+ || (adapter->hw.mac.type == e1000_i210)) {
dev_err(&adapter->pdev->dev,
- "Loopback test not supported on this part at this time.\n");
+ "Loopback test not supported "
+ "on this part at this time.\n");
*data = 0;
goto out;
}
diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 5aba5ecdf1e2..f32e70300770 100644
--- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -614,8 +614,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* If source MAC is equal to our own MAC and not performing
* the selftest or flb disabled - drop the packet */
if (s_mac == priv->mac &&
- !((dev->features & NETIF_F_LOOPBACK) ||
- priv->validate_loopback))
+ (!(dev->features & NETIF_F_LOOPBACK) ||
+ !priv->validate_loopback))
goto next;
/*
diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 10bba09c44ea..019d856b1334 100644
--- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -164,6 +164,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
ring->cons = 0xffffffff;
ring->last_nr_txbb = 1;
ring->poll_cnt = 0;
+ ring->blocked = 0;
memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
memset(ring->buf, 0, ring->buf_size);
@@ -364,13 +365,14 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
ring->cons += txbbs_skipped;
netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
- /*
- * Wakeup Tx queue if this stopped, and at least 1 packet
- * was completed
- */
- if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
- netif_tx_wake_queue(ring->tx_queue);
- priv->port_stats.wake_queue++;
+ /* Wakeup Tx queue if this ring stopped it */
+ if (unlikely(ring->blocked)) {
+ if ((u32) (ring->prod - ring->cons) <=
+ ring->size - HEADROOM - MAX_DESC_TXBBS) {
+ ring->blocked = 0;
+ netif_tx_wake_queue(ring->tx_queue);
+ priv->port_stats.wake_queue++;
+ }
}
}
@@ -590,6 +592,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->size - HEADROOM - MAX_DESC_TXBBS)) {
/* every full Tx ring stops queue */
netif_tx_stop_queue(ring->tx_queue);
+ ring->blocked = 1;
priv->port_stats.queue_stopped++;
return NETDEV_TX_BUSY;
diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/main.c b/trunk/drivers/net/ethernet/mellanox/mlx4/main.c
index 827b72dfce99..48d0e90194cb 100644
--- a/trunk/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/trunk/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -157,6 +157,9 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
"on this HCA, aborting.\n");
return -EINVAL;
}
+ if (port_type[i] == MLX4_PORT_TYPE_ETH &&
+ port_type[i + 1] == MLX4_PORT_TYPE_IB)
+ return -EINVAL;
}
}
diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 9d27e42264e2..5f1ab105debc 100644
--- a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -248,6 +248,7 @@ struct mlx4_en_tx_ring {
u32 doorbell_qpn;
void *buf;
u16 poll_cnt;
+ int blocked;
struct mlx4_en_tx_info *tx_info;
u8 *bounce_buf;
u32 last_nr_txbb;
diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/sense.c b/trunk/drivers/net/ethernet/mellanox/mlx4/sense.c
index 34ee09bae36e..802498293528 100644
--- a/trunk/drivers/net/ethernet/mellanox/mlx4/sense.c
+++ b/trunk/drivers/net/ethernet/mellanox/mlx4/sense.c
@@ -80,6 +80,20 @@ void mlx4_do_sense_ports(struct mlx4_dev *dev,
stype[i - 1] = defaults[i - 1];
}
+ /*
+ * Adjust port configuration:
+ * If port 1 sensed nothing and port 2 is IB, set both as IB
+ * If port 2 sensed nothing and port 1 is Eth, set both as Eth
+ */
+ if (stype[0] == MLX4_PORT_TYPE_ETH) {
+ for (i = 1; i < dev->caps.num_ports; i++)
+ stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH;
+ }
+ if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) {
+ for (i = 0; i < dev->caps.num_ports - 1; i++)
+ stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB;
+ }
+
/*
* If sensed nothing, remain in current configuration.
*/
diff --git a/trunk/drivers/net/ethernet/sfc/efx.c b/trunk/drivers/net/ethernet/sfc/efx.c
index 65a8d49106a4..70554a1b2b02 100644
--- a/trunk/drivers/net/ethernet/sfc/efx.c
+++ b/trunk/drivers/net/ethernet/sfc/efx.c
@@ -1503,11 +1503,6 @@ static int efx_probe_all(struct efx_nic *efx)
goto fail2;
}
- BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
- if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
- rc = -EINVAL;
- goto fail3;
- }
efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
rc = efx_probe_filters(efx);
@@ -2075,7 +2070,6 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &efx_netdev_ops;
SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
- net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
rtnl_lock();
diff --git a/trunk/drivers/net/ethernet/sfc/efx.h b/trunk/drivers/net/ethernet/sfc/efx.h
index 70755c97251a..be8f9158a714 100644
--- a/trunk/drivers/net/ethernet/sfc/efx.h
+++ b/trunk/drivers/net/ethernet/sfc/efx.h
@@ -30,7 +30,6 @@ extern netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
-extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
/* RX */
extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -53,15 +52,10 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_MAX_EVQ_SIZE 16384UL
#define EFX_MIN_EVQ_SIZE 512UL
-/* Maximum number of TCP segments we support for soft-TSO */
-#define EFX_TSO_MAX_SEGS 100
-
-/* The smallest [rt]xq_entries that the driver supports. RX minimum
- * is a bit arbitrary. For TX, we must have space for at least 2
- * TSO skbs.
- */
-#define EFX_RXQ_MIN_ENT 128U
-#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
+/* The smallest [rt]xq_entries that the driver supports. Callers of
+ * efx_wake_queue() assume that they can subsequently send at least one
+ * skb. Falcon/A1 may require up to three descriptors per skb_frag. */
+#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS))
/* Filters */
extern int efx_probe_filters(struct efx_nic *efx);
diff --git a/trunk/drivers/net/ethernet/sfc/ethtool.c b/trunk/drivers/net/ethernet/sfc/ethtool.c
index 8cba2df82b18..10536f93b561 100644
--- a/trunk/drivers/net/ethernet/sfc/ethtool.c
+++ b/trunk/drivers/net/ethernet/sfc/ethtool.c
@@ -680,27 +680,21 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
struct ethtool_ringparam *ring)
{
struct efx_nic *efx = netdev_priv(net_dev);
- u32 txq_entries;
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
ring->tx_pending > EFX_MAX_DMAQ_SIZE)
return -EINVAL;
- if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
+ if (ring->rx_pending < EFX_MIN_RING_SIZE ||
+ ring->tx_pending < EFX_MIN_RING_SIZE) {
netif_err(efx, drv, efx->net_dev,
- "RX queues cannot be smaller than %u\n",
- EFX_RXQ_MIN_ENT);
+ "TX and RX queues cannot be smaller than %ld\n",
+ EFX_MIN_RING_SIZE);
return -EINVAL;
}
- txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
- if (txq_entries != ring->tx_pending)
- netif_warn(efx, drv, efx->net_dev,
- "increasing TX queue size to minimum of %u\n",
- txq_entries);
-
- return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
+ return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending);
}
static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
diff --git a/trunk/drivers/net/ethernet/sfc/tx.c b/trunk/drivers/net/ethernet/sfc/tx.c
index 18713436b443..9b225a7769f7 100644
--- a/trunk/drivers/net/ethernet/sfc/tx.c
+++ b/trunk/drivers/net/ethernet/sfc/tx.c
@@ -119,25 +119,6 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
return len;
}
-unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
-{
- /* Header and payload descriptor for each output segment, plus
- * one for every input fragment boundary within a segment
- */
- unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
-
- /* Possibly one more per segment for the alignment workaround */
- if (EFX_WORKAROUND_5391(efx))
- max_descs += EFX_TSO_MAX_SEGS;
-
- /* Possibly more for PCIe page boundaries within input fragments */
- if (PAGE_SIZE > EFX_PAGE_SIZE)
- max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
- DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
-
- return max_descs;
-}
-
/*
* Add a socket buffer to a TX queue
*
diff --git a/trunk/drivers/net/ethernet/xscale/ixp4xx_eth.c b/trunk/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 98934bdf6acf..482648fcf0b6 100644
--- a/trunk/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/trunk/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1003,7 +1003,6 @@ static int ixp4xx_nway_reset(struct net_device *dev)
}
int ixp46x_phc_index = -1;
-EXPORT_SYMBOL_GPL(ixp46x_phc_index);
static int ixp4xx_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
diff --git a/trunk/drivers/net/hyperv/netvsc.c b/trunk/drivers/net/hyperv/netvsc.c
index 4a1a5f58fa73..6cee2917eb02 100644
--- a/trunk/drivers/net/hyperv/netvsc.c
+++ b/trunk/drivers/net/hyperv/netvsc.c
@@ -383,6 +383,13 @@ int netvsc_device_remove(struct hv_device *device)
unsigned long flags;
net_device = hv_get_drvdata(device);
+ spin_lock_irqsave(&device->channel->inbound_lock, flags);
+ net_device->destroy = true;
+ spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
+
+ /* Wait for all send completions */
+ wait_event(net_device->wait_drain,
+ atomic_read(&net_device->num_outstanding_sends) == 0);
netvsc_disconnect_vsp(net_device);
diff --git a/trunk/drivers/net/hyperv/rndis_filter.c b/trunk/drivers/net/hyperv/rndis_filter.c
index 1e88a1095934..e5d6146937fa 100644
--- a/trunk/drivers/net/hyperv/rndis_filter.c
+++ b/trunk/drivers/net/hyperv/rndis_filter.c
@@ -718,9 +718,6 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
{
struct rndis_request *request;
struct rndis_halt_request *halt;
- struct netvsc_device *nvdev = dev->net_dev;
- struct hv_device *hdev = nvdev->dev;
- ulong flags;
/* Attempt to do a rndis device halt */
request = get_rndis_request(dev, RNDIS_MSG_HALT,
@@ -738,14 +735,6 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
dev->state = RNDIS_DEV_UNINITIALIZED;
cleanup:
- spin_lock_irqsave(&hdev->channel->inbound_lock, flags);
- nvdev->destroy = true;
- spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags);
-
- /* Wait for all send completions */
- wait_event(nvdev->wait_drain,
- atomic_read(&nvdev->num_outstanding_sends) == 0);
-
if (request)
put_rndis_request(dev, request);
return;
diff --git a/trunk/drivers/net/phy/mdio-mux-gpio.c b/trunk/drivers/net/phy/mdio-mux-gpio.c
index eefe49e8713c..e0cc4ef33dee 100644
--- a/trunk/drivers/net/phy/mdio-mux-gpio.c
+++ b/trunk/drivers/net/phy/mdio-mux-gpio.c
@@ -101,6 +101,7 @@ static int __devinit mdio_mux_gpio_probe(struct platform_device *pdev)
n--;
gpio_free(s->gpio[n]);
}
+ devm_kfree(&pdev->dev, s);
return r;
}
diff --git a/trunk/drivers/net/usb/cdc_ncm.c b/trunk/drivers/net/usb/cdc_ncm.c
index 4cd582a4f625..f4ce5957df32 100644
--- a/trunk/drivers/net/usb/cdc_ncm.c
+++ b/trunk/drivers/net/usb/cdc_ncm.c
@@ -1225,26 +1225,6 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long) &wwan_info,
},
- /* Dell branded MBM devices like DW5550 */
- { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
- | USB_DEVICE_ID_MATCH_VENDOR,
- .idVendor = 0x413c,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
- .driver_info = (unsigned long) &wwan_info,
- },
-
- /* Toshiba branded MBM devices */
- { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
- | USB_DEVICE_ID_MATCH_VENDOR,
- .idVendor = 0x0930,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
- .driver_info = (unsigned long) &wwan_info,
- },
-
/* Generic CDC-NCM devices */
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
diff --git a/trunk/drivers/net/wireless/ath/ath9k/hw.c b/trunk/drivers/net/wireless/ath/ath9k/hw.c
index 60b6a9daff7e..cfa91ab7acf8 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/hw.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/hw.c
@@ -730,7 +730,6 @@ int ath9k_hw_init(struct ath_hw *ah)
case AR9300_DEVID_QCA955X:
case AR9300_DEVID_AR9580:
case AR9300_DEVID_AR9462:
- case AR9485_DEVID_AR1111:
break;
default:
if (common->bus_ops->ath_bus_type == ATH_USB)
diff --git a/trunk/drivers/net/wireless/ath/ath9k/hw.h b/trunk/drivers/net/wireless/ath/ath9k/hw.h
index ce7332c64efb..dd0c146d81dc 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/hw.h
+++ b/trunk/drivers/net/wireless/ath/ath9k/hw.h
@@ -49,7 +49,6 @@
#define AR9300_DEVID_AR9462 0x0034
#define AR9300_DEVID_AR9330 0x0035
#define AR9300_DEVID_QCA955X 0x0038
-#define AR9485_DEVID_AR1111 0x0037
#define AR5416_AR9100_DEVID 0x000b
diff --git a/trunk/drivers/net/wireless/ath/ath9k/pci.c b/trunk/drivers/net/wireless/ath/ath9k/pci.c
index d455de9162ec..87b89d55e637 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/pci.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/pci.c
@@ -37,7 +37,6 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
{ PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
{ PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
- { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
{ 0 }
};
diff --git a/trunk/drivers/net/wireless/b43/main.c b/trunk/drivers/net/wireless/b43/main.c
index a140165dfee0..b80352b308d5 100644
--- a/trunk/drivers/net/wireless/b43/main.c
+++ b/trunk/drivers/net/wireless/b43/main.c
@@ -2719,37 +2719,32 @@ static int b43_gpio_init(struct b43_wldev *dev)
if (dev->dev->chip_id == 0x4301) {
mask |= 0x0060;
set |= 0x0060;
- } else if (dev->dev->chip_id == 0x5354) {
- /* Don't allow overtaking buttons GPIOs */
- set &= 0x2; /* 0x2 is LED GPIO on BCM5354 */
}
-
+ if (dev->dev->chip_id == 0x5354)
+ set &= 0xff02;
if (0 /* FIXME: conditional unknown */ ) {
b43_write16(dev, B43_MMIO_GPIO_MASK,
b43_read16(dev, B43_MMIO_GPIO_MASK)
| 0x0100);
- /* BT Coexistance Input */
- mask |= 0x0080;
- set |= 0x0080;
- /* BT Coexistance Out */
- mask |= 0x0100;
- set |= 0x0100;
+ mask |= 0x0180;
+ set |= 0x0180;
}
if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) {
- /* PA is controlled by gpio 9, let ucode handle it */
b43_write16(dev, B43_MMIO_GPIO_MASK,
b43_read16(dev, B43_MMIO_GPIO_MASK)
| 0x0200);
mask |= 0x0200;
set |= 0x0200;
}
+ if (dev->dev->core_rev >= 2)
+ mask |= 0x0010; /* FIXME: This is redundant. */
switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA
case B43_BUS_BCMA:
bcma_cc_write32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_GPIOCTL,
(bcma_cc_read32(&dev->dev->bdev->bus->drv_cc,
- BCMA_CC_GPIOCTL) & ~mask) | set);
+ BCMA_CC_GPIOCTL) & mask) | set);
break;
#endif
#ifdef CONFIG_B43_SSB
@@ -2758,7 +2753,7 @@ static int b43_gpio_init(struct b43_wldev *dev)
if (gpiodev)
ssb_write32(gpiodev, B43_GPIO_CONTROL,
(ssb_read32(gpiodev, B43_GPIO_CONTROL)
- & ~mask) | set);
+ & mask) | set);
break;
#endif
}
diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 7ed7d7577024..9a4c63f927cb 100644
--- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -382,7 +382,9 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
{
struct brcms_c_info *wlc = wlc_cm->wlc;
struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
+ const struct ieee80211_reg_rule *reg_rule;
struct txpwr_limits txpwr;
+ int ret;
brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr);
@@ -391,7 +393,8 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
);
/* set or restore gmode as required by regulatory */
- if (ch->flags & IEEE80211_CHAN_NO_OFDM)
+ ret = freq_reg_info(wlc->wiphy, ch->center_freq, 0, ®_rule);
+ if (!ret && (reg_rule->flags & NL80211_RRF_NO_OFDM))
brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
else
brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 192ad5c1fcc8..9e79d47e077f 100644
--- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -121,8 +121,7 @@ static struct ieee80211_channel brcms_2ghz_chantable[] = {
IEEE80211_CHAN_NO_HT40PLUS),
CHAN2GHZ(14, 2484,
IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS |
- IEEE80211_CHAN_NO_OFDM)
+ IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS)
};
static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = {
diff --git a/trunk/drivers/net/wireless/libertas/cfg.c b/trunk/drivers/net/wireless/libertas/cfg.c
index 1c10b542ab23..eb5de800ed90 100644
--- a/trunk/drivers/net/wireless/libertas/cfg.c
+++ b/trunk/drivers/net/wireless/libertas/cfg.c
@@ -1254,7 +1254,6 @@ static int lbs_associate(struct lbs_private *priv,
netif_tx_wake_all_queues(priv->dev);
}
- kfree(cmd);
done:
lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
diff --git a/trunk/drivers/net/wireless/libertas/if_sdio.c b/trunk/drivers/net/wireless/libertas/if_sdio.c
index e970897f6ab5..76caebaa4397 100644
--- a/trunk/drivers/net/wireless/libertas/if_sdio.c
+++ b/trunk/drivers/net/wireless/libertas/if_sdio.c
@@ -1314,7 +1314,6 @@ static void if_sdio_remove(struct sdio_func *func)
kfree(packet);
}
- kfree(card);
lbs_deb_leave(LBS_DEB_SDIO);
}
diff --git a/trunk/drivers/net/wireless/libertas/main.c b/trunk/drivers/net/wireless/libertas/main.c
index fe1ea43c5149..58048189bd24 100644
--- a/trunk/drivers/net/wireless/libertas/main.c
+++ b/trunk/drivers/net/wireless/libertas/main.c
@@ -571,10 +571,7 @@ static int lbs_thread(void *data)
netdev_info(dev, "Timeout submitting command 0x%04x\n",
le16_to_cpu(cmdnode->cmdbuf->command));
lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
-
- /* Reset card, but only when it isn't in the process
- * of being shutdown anyway. */
- if (!dev->dismantle && priv->reset_card)
+ if (priv->reset_card)
priv->reset_card(priv);
}
priv->cmd_timed_out = 0;
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800lib.c b/trunk/drivers/net/wireless/rt2x00/rt2800lib.c
index cb8c2aca54e4..88455b1b9fe0 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/trunk/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -221,67 +221,6 @@ static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev,
mutex_unlock(&rt2x00dev->csr_mutex);
}
-static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
-{
- u32 reg;
- int i, count;
-
- rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®);
- if (rt2x00_get_field32(reg, WLAN_EN))
- return 0;
-
- rt2x00_set_field32(®, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
- rt2x00_set_field32(®, FRC_WL_ANT_SET, 1);
- rt2x00_set_field32(®, WLAN_CLK_EN, 0);
- rt2x00_set_field32(®, WLAN_EN, 1);
- rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
-
- udelay(REGISTER_BUSY_DELAY);
-
- count = 0;
- do {
- /*
- * Check PLL_LD & XTAL_RDY.
- */
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, CMB_CTRL, ®);
- if (rt2x00_get_field32(reg, PLL_LD) &&
- rt2x00_get_field32(reg, XTAL_RDY))
- break;
- udelay(REGISTER_BUSY_DELAY);
- }
-
- if (i >= REGISTER_BUSY_COUNT) {
-
- if (count >= 10)
- return -EIO;
-
- rt2800_register_write(rt2x00dev, 0x58, 0x018);
- udelay(REGISTER_BUSY_DELAY);
- rt2800_register_write(rt2x00dev, 0x58, 0x418);
- udelay(REGISTER_BUSY_DELAY);
- rt2800_register_write(rt2x00dev, 0x58, 0x618);
- udelay(REGISTER_BUSY_DELAY);
- count++;
- } else {
- count = 0;
- }
-
- rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®);
- rt2x00_set_field32(®, PCIE_APP0_CLK_REQ, 0);
- rt2x00_set_field32(®, WLAN_CLK_EN, 1);
- rt2x00_set_field32(®, WLAN_RESET, 1);
- rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
- udelay(10);
- rt2x00_set_field32(®, WLAN_RESET, 0);
- rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
- udelay(10);
- rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
- } while (count != 0);
-
- return 0;
-}
-
void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
const u8 command, const u8 token,
const u8 arg0, const u8 arg1)
@@ -461,13 +400,6 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
{
unsigned int i;
u32 reg;
- int retval;
-
- if (rt2x00_rt(rt2x00dev, RT3290)) {
- retval = rt2800_enable_wlan_rt3290(rt2x00dev);
- if (retval)
- return -EBUSY;
- }
/*
* If driver doesn't wake up firmware here,
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800pci.c b/trunk/drivers/net/wireless/rt2x00/rt2800pci.c
index 98aa426a3564..235376e9cb04 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/trunk/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -980,6 +980,66 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
return rt2800_validate_eeprom(rt2x00dev);
}
+static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
+{
+ u32 reg;
+ int i, count;
+
+ rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®);
+ if (rt2x00_get_field32(reg, WLAN_EN))
+ return 0;
+
+ rt2x00_set_field32(®, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
+ rt2x00_set_field32(®, FRC_WL_ANT_SET, 1);
+ rt2x00_set_field32(®, WLAN_CLK_EN, 0);
+ rt2x00_set_field32(®, WLAN_EN, 1);
+ rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+
+ udelay(REGISTER_BUSY_DELAY);
+
+ count = 0;
+ do {
+ /*
+ * Check PLL_LD & XTAL_RDY.
+ */
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2800_register_read(rt2x00dev, CMB_CTRL, ®);
+ if (rt2x00_get_field32(reg, PLL_LD) &&
+ rt2x00_get_field32(reg, XTAL_RDY))
+ break;
+ udelay(REGISTER_BUSY_DELAY);
+ }
+
+ if (i >= REGISTER_BUSY_COUNT) {
+
+ if (count >= 10)
+ return -EIO;
+
+ rt2800_register_write(rt2x00dev, 0x58, 0x018);
+ udelay(REGISTER_BUSY_DELAY);
+ rt2800_register_write(rt2x00dev, 0x58, 0x418);
+ udelay(REGISTER_BUSY_DELAY);
+ rt2800_register_write(rt2x00dev, 0x58, 0x618);
+ udelay(REGISTER_BUSY_DELAY);
+ count++;
+ } else {
+ count = 0;
+ }
+
+ rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®);
+ rt2x00_set_field32(®, PCIE_APP0_CLK_REQ, 0);
+ rt2x00_set_field32(®, WLAN_CLK_EN, 1);
+ rt2x00_set_field32(®, WLAN_RESET, 1);
+ rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+ udelay(10);
+ rt2x00_set_field32(®, WLAN_RESET, 0);
+ rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+ udelay(10);
+ rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
+ } while (count != 0);
+
+ return 0;
+}
static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
{
int retval;
@@ -1002,6 +1062,17 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
if (retval)
return retval;
+ /*
+ * In probe phase call rt2800_enable_wlan_rt3290 to enable wlan
+ * clk for rt3290. That avoid the MCU fail in start phase.
+ */
+ if (rt2x00_rt(rt2x00dev, RT3290)) {
+ retval = rt2800_enable_wlan_rt3290(rt2x00dev);
+
+ if (retval)
+ return retval;
+ }
+
/*
* This device has multiple filters for control frames
* and has a separate filter for PS Poll frames.
diff --git a/trunk/drivers/pinctrl/pinctrl-imx23.c b/trunk/drivers/pinctrl/pinctrl-imx23.c
index 3674d877ed7c..75d3eff94296 100644
--- a/trunk/drivers/pinctrl/pinctrl-imx23.c
+++ b/trunk/drivers/pinctrl/pinctrl-imx23.c
@@ -292,7 +292,7 @@ static int __init imx23_pinctrl_init(void)
{
return platform_driver_register(&imx23_pinctrl_driver);
}
-postcore_initcall(imx23_pinctrl_init);
+arch_initcall(imx23_pinctrl_init);
static void __exit imx23_pinctrl_exit(void)
{
diff --git a/trunk/drivers/pinctrl/pinctrl-imx28.c b/trunk/drivers/pinctrl/pinctrl-imx28.c
index 0f5b2122b1ba..b973026811a2 100644
--- a/trunk/drivers/pinctrl/pinctrl-imx28.c
+++ b/trunk/drivers/pinctrl/pinctrl-imx28.c
@@ -408,7 +408,7 @@ static int __init imx28_pinctrl_init(void)
{
return platform_driver_register(&imx28_pinctrl_driver);
}
-postcore_initcall(imx28_pinctrl_init);
+arch_initcall(imx28_pinctrl_init);
static void __exit imx28_pinctrl_exit(void)
{
diff --git a/trunk/drivers/pinctrl/pinctrl-nomadik-db8500.c b/trunk/drivers/pinctrl/pinctrl-nomadik-db8500.c
index 5f3e9d0221e1..6f99769c6733 100644
--- a/trunk/drivers/pinctrl/pinctrl-nomadik-db8500.c
+++ b/trunk/drivers/pinctrl/pinctrl-nomadik-db8500.c
@@ -766,7 +766,7 @@ DB8500_FUNC_GROUPS(ipgpio, "ipgpio0_a_1", "ipgpio1_a_1", "ipgpio7_b_1",
DB8500_FUNC_GROUPS(msp2, "msp2sck_a_1", "msp2_a_1");
DB8500_FUNC_GROUPS(mc4, "mc4_a_1", "mc4rstn_c_1");
DB8500_FUNC_GROUPS(mc1, "mc1_a_1", "mc1dir_a_1");
-DB8500_FUNC_GROUPS(hsi, "hsir_a_1", "hsit_a_1", "hsit_a_2");
+DB8500_FUNC_GROUPS(hsi, "hsir1_a_1", "hsit1_a_1", "hsit_a_2");
DB8500_FUNC_GROUPS(clkout, "clkout_a_1", "clkout_a_2", "clkout_c_1");
DB8500_FUNC_GROUPS(usb, "usb_a_1");
DB8500_FUNC_GROUPS(trig, "trig_b_1");
diff --git a/trunk/drivers/pinctrl/pinctrl-nomadik.c b/trunk/drivers/pinctrl/pinctrl-nomadik.c
index ec6ac501b23a..53b0d49a7a1c 100644
--- a/trunk/drivers/pinctrl/pinctrl-nomadik.c
+++ b/trunk/drivers/pinctrl/pinctrl-nomadik.c
@@ -1731,6 +1731,7 @@ static int __devinit nmk_pinctrl_probe(struct platform_device *pdev)
for (i = 0; i < npct->soc->gpio_num_ranges; i++) {
if (!nmk_gpio_chips[i]) {
dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
+ devm_kfree(&pdev->dev, npct);
return -EPROBE_DEFER;
}
npct->soc->gpio_ranges[i].gc = &nmk_gpio_chips[i]->chip;
diff --git a/trunk/drivers/pinctrl/pinctrl-sirf.c b/trunk/drivers/pinctrl/pinctrl-sirf.c
index 7fca6ce5952b..2aae8a8978e9 100644
--- a/trunk/drivers/pinctrl/pinctrl-sirf.c
+++ b/trunk/drivers/pinctrl/pinctrl-sirf.c
@@ -1217,6 +1217,7 @@ static int __devinit sirfsoc_pinmux_probe(struct platform_device *pdev)
iounmap(spmx->gpio_virtbase);
out_no_gpio_remap:
platform_set_drvdata(pdev, NULL);
+ devm_kfree(&pdev->dev, spmx);
return ret;
}
diff --git a/trunk/drivers/pinctrl/pinctrl-u300.c b/trunk/drivers/pinctrl/pinctrl-u300.c
index 309f5b9a70ec..a7ad8c112d91 100644
--- a/trunk/drivers/pinctrl/pinctrl-u300.c
+++ b/trunk/drivers/pinctrl/pinctrl-u300.c
@@ -1121,8 +1121,10 @@ static int __devinit u300_pmx_probe(struct platform_device *pdev)
upmx->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
+ if (!res) {
+ ret = -ENOENT;
+ goto out_no_resource;
+ }
upmx->phybase = res->start;
upmx->physize = resource_size(res);
@@ -1163,6 +1165,8 @@ static int __devinit u300_pmx_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
out_no_memregion:
release_mem_region(upmx->phybase, upmx->physize);
+out_no_resource:
+ devm_kfree(&pdev->dev, upmx);
return ret;
}
diff --git a/trunk/drivers/usb/early/ehci-dbgp.c b/trunk/drivers/usb/early/ehci-dbgp.c
index 89dcf155d57e..ee0ebacf8227 100644
--- a/trunk/drivers/usb/early/ehci-dbgp.c
+++ b/trunk/drivers/usb/early/ehci-dbgp.c
@@ -450,7 +450,7 @@ static int dbgp_ehci_startup(void)
writel(FLAG_CF, &ehci_regs->configured_flag);
/* Wait until the controller is no longer halted */
- loop = 1000;
+ loop = 10;
do {
status = readl(&ehci_regs->status);
if (!(status & STS_HALT))
diff --git a/trunk/drivers/zorro/zorro.c b/trunk/drivers/zorro/zorro.c
index 858c9714b2f3..181fa8158a8b 100644
--- a/trunk/drivers/zorro/zorro.c
+++ b/trunk/drivers/zorro/zorro.c
@@ -37,6 +37,7 @@ struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO];
*/
struct zorro_bus {
+ struct list_head devices; /* list of devices on this bus */
struct device dev;
};
@@ -135,6 +136,7 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
if (!bus)
return -ENOMEM;
+ INIT_LIST_HEAD(&bus->devices);
bus->dev.parent = &pdev->dev;
dev_set_name(&bus->dev, "zorro");
error = device_register(&bus->dev);
diff --git a/trunk/fs/bio.c b/trunk/fs/bio.c
index 5eaa70c9d96e..73922abba832 100644
--- a/trunk/fs/bio.c
+++ b/trunk/fs/bio.c
@@ -1312,7 +1312,7 @@ EXPORT_SYMBOL(bio_copy_kern);
* Note that this code is very hard to test under normal circumstances because
* direct-io pins the pages with get_user_pages(). This makes
* is_page_cache_freeable return false, and the VM will not clean the pages.
- * But other code (eg, flusher threads) could clean the pages if they are mapped
+ * But other code (eg, pdflush) could clean the pages if they are mapped
* pagecache.
*
* Simply disabling the call to bio_set_pages_dirty() is a good way to test the
diff --git a/trunk/fs/btrfs/inode.c b/trunk/fs/btrfs/inode.c
index 6e8f416773d4..83baec24946d 100644
--- a/trunk/fs/btrfs/inode.c
+++ b/trunk/fs/btrfs/inode.c
@@ -324,8 +324,7 @@ static noinline int add_async_extent(struct async_cow *cow,
* If this code finds it can't get good compression, it puts an
* entry onto the work queue to write the uncompressed bytes. This
* makes sure that both compressed inodes and uncompressed inodes
- * are written in the same order that the flusher thread sent them
- * down.
+ * are written in the same order that pdflush sent them down.
*/
static noinline int compress_file_range(struct inode *inode,
struct page *locked_page,
diff --git a/trunk/fs/btrfs/ioctl.c b/trunk/fs/btrfs/ioctl.c
index 7bb755677a22..bc2f6ffff3cf 100644
--- a/trunk/fs/btrfs/ioctl.c
+++ b/trunk/fs/btrfs/ioctl.c
@@ -664,6 +664,10 @@ static noinline int btrfs_mksubvol(struct path *parent,
struct dentry *dentry;
int error;
+ error = mnt_want_write(parent->mnt);
+ if (error)
+ return error;
+
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
dentry = lookup_one_len(name, parent->dentry, namelen);
@@ -699,6 +703,7 @@ static noinline int btrfs_mksubvol(struct path *parent,
dput(dentry);
out_unlock:
mutex_unlock(&dir->i_mutex);
+ mnt_drop_write(parent->mnt);
return error;
}
diff --git a/trunk/fs/btrfs/ordered-data.c b/trunk/fs/btrfs/ordered-data.c
index 051c7fe551dd..643335a4fe3c 100644
--- a/trunk/fs/btrfs/ordered-data.c
+++ b/trunk/fs/btrfs/ordered-data.c
@@ -596,7 +596,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
/*
* pages in the range can be dirty, clean or writeback. We
* start IO on any dirty ones so the wait doesn't stall waiting
- * for the flusher thread to find them
+ * for pdflush to find them
*/
if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
filemap_fdatawrite_range(inode->i_mapping, start, end);
diff --git a/trunk/fs/btrfs/super.c b/trunk/fs/btrfs/super.c
index f2eb24c477a3..8c6e61d6eed5 100644
--- a/trunk/fs/btrfs/super.c
+++ b/trunk/fs/btrfs/super.c
@@ -100,6 +100,10 @@ static void __save_error_info(struct btrfs_fs_info *fs_info)
fs_info->fs_state = BTRFS_SUPER_FLAG_ERROR;
}
+/* NOTE:
+ * We move write_super stuff at umount in order to avoid deadlock
+ * for umount hold all lock.
+ */
static void save_error_info(struct btrfs_fs_info *fs_info)
{
__save_error_info(fs_info);
diff --git a/trunk/fs/btrfs/volumes.c b/trunk/fs/btrfs/volumes.c
index e86ae04abe6a..b8708f994e67 100644
--- a/trunk/fs/btrfs/volumes.c
+++ b/trunk/fs/btrfs/volumes.c
@@ -1744,6 +1744,10 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
device->fs_devices = root->fs_info->fs_devices;
+ /*
+ * we don't want write_supers to jump in here with our device
+ * half setup
+ */
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
list_add(&device->dev_alloc_list,
diff --git a/trunk/fs/exofs/inode.c b/trunk/fs/exofs/inode.c
index 1562c27a2fab..5badb0c039de 100644
--- a/trunk/fs/exofs/inode.c
+++ b/trunk/fs/exofs/inode.c
@@ -37,12 +37,15 @@
#define EXOFS_DBGMSG2(M...) do {} while (0)
+enum {MAX_PAGES_KMALLOC = PAGE_SIZE / sizeof(struct page *), };
+
unsigned exofs_max_io_pages(struct ore_layout *layout,
unsigned expected_pages)
{
- unsigned pages = min_t(unsigned, expected_pages,
- layout->max_io_length / PAGE_SIZE);
+ unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
+ /* TODO: easily support bio chaining */
+ pages = min_t(unsigned, pages, layout->max_io_length / PAGE_SIZE);
return pages;
}
@@ -98,8 +101,7 @@ static void _pcol_reset(struct page_collect *pcol)
* it might not end here. don't be left with nothing
*/
if (!pcol->expected_pages)
- pcol->expected_pages =
- exofs_max_io_pages(&pcol->sbi->layout, ~0);
+ pcol->expected_pages = MAX_PAGES_KMALLOC;
}
static int pcol_try_alloc(struct page_collect *pcol)
@@ -387,8 +389,6 @@ static int readpage_strip(void *data, struct page *page)
size_t len;
int ret;
- BUG_ON(!PageLocked(page));
-
/* FIXME: Just for debugging, will be removed */
if (PageUptodate(page))
EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
@@ -572,16 +572,8 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
if (!pcol->that_locked_page ||
(pcol->that_locked_page->index != index)) {
- struct page *page;
- loff_t i_size = i_size_read(pcol->inode);
-
- if (offset >= i_size) {
- *uptodate = true;
- EXOFS_DBGMSG("offset >= i_size index=0x%lx\n", index);
- return ZERO_PAGE(0);
- }
+ struct page *page = find_get_page(pcol->inode->i_mapping, index);
- page = find_get_page(pcol->inode->i_mapping, index);
if (!page) {
page = find_or_create_page(pcol->inode->i_mapping,
index, GFP_NOFS);
@@ -610,13 +602,12 @@ static void __r4w_put_page(void *priv, struct page *page)
{
struct page_collect *pcol = priv;
- if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) {
+ if (pcol->that_locked_page != page) {
EXOFS_DBGMSG("index=0x%lx\n", page->index);
page_cache_release(page);
return;
}
- EXOFS_DBGMSG("that_locked_page index=0x%lx\n",
- ZERO_PAGE(0) == page ? -1 : page->index);
+ EXOFS_DBGMSG("that_locked_page index=0x%lx\n", page->index);
}
static const struct _ore_r4w_op _r4w_op = {
diff --git a/trunk/fs/exofs/ore.c b/trunk/fs/exofs/ore.c
index 1585db1aa365..24a49d47e935 100644
--- a/trunk/fs/exofs/ore.c
+++ b/trunk/fs/exofs/ore.c
@@ -837,11 +837,11 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
bio->bi_rw |= REQ_WRITE;
}
- osd_req_write(or, _ios_obj(ios, cur_comp),
- per_dev->offset, bio, per_dev->length);
+ osd_req_write(or, _ios_obj(ios, dev), per_dev->offset,
+ bio, per_dev->length);
ORE_DBGMSG("write(0x%llx) offset=0x%llx "
"length=0x%llx dev=%d\n",
- _LLU(_ios_obj(ios, cur_comp)->id),
+ _LLU(_ios_obj(ios, dev)->id),
_LLU(per_dev->offset),
_LLU(per_dev->length), dev);
} else if (ios->kern_buff) {
@@ -853,20 +853,20 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
(ios->si.unit_off + ios->length >
ios->layout->stripe_unit));
- ret = osd_req_write_kern(or, _ios_obj(ios, cur_comp),
+ ret = osd_req_write_kern(or, _ios_obj(ios, per_dev->dev),
per_dev->offset,
ios->kern_buff, ios->length);
if (unlikely(ret))
goto out;
ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
"length=0x%llx dev=%d\n",
- _LLU(_ios_obj(ios, cur_comp)->id),
+ _LLU(_ios_obj(ios, dev)->id),
_LLU(per_dev->offset),
_LLU(ios->length), per_dev->dev);
} else {
- osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
+ osd_req_set_attributes(or, _ios_obj(ios, dev));
ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
- _LLU(_ios_obj(ios, cur_comp)->id),
+ _LLU(_ios_obj(ios, dev)->id),
ios->out_attr_len, dev);
}
diff --git a/trunk/fs/exofs/super.c b/trunk/fs/exofs/super.c
index dde41a75c7c8..433783624d10 100644
--- a/trunk/fs/exofs/super.c
+++ b/trunk/fs/exofs/super.c
@@ -400,6 +400,8 @@ static int exofs_sync_fs(struct super_block *sb, int wait)
ret = ore_write(ios);
if (unlikely(ret))
EXOFS_ERR("%s: ore_write failed.\n", __func__);
+ else
+ sb->s_dirt = 0;
unlock_super(sb);
@@ -410,6 +412,14 @@ static int exofs_sync_fs(struct super_block *sb, int wait)
return ret;
}
+static void exofs_write_super(struct super_block *sb)
+{
+ if (!(sb->s_flags & MS_RDONLY))
+ exofs_sync_fs(sb, 1);
+ else
+ sb->s_dirt = 0;
+}
+
static void _exofs_print_device(const char *msg, const char *dev_path,
struct osd_dev *od, u64 pid)
{
@@ -942,6 +952,7 @@ static const struct super_operations exofs_sops = {
.write_inode = exofs_write_inode,
.evict_inode = exofs_evict_inode,
.put_super = exofs_put_super,
+ .write_super = exofs_write_super,
.sync_fs = exofs_sync_fs,
.statfs = exofs_statfs,
};
diff --git a/trunk/fs/ext3/inode.c b/trunk/fs/ext3/inode.c
index a07597307fd1..9a4a5c48b1c9 100644
--- a/trunk/fs/ext3/inode.c
+++ b/trunk/fs/ext3/inode.c
@@ -3459,6 +3459,14 @@ ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
* inode out, but prune_icache isn't a user-visible syncing function.
* Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
* we start and wait on commits.
+ *
+ * Is this efficient/effective? Well, we're being nice to the system
+ * by cleaning up our inodes proactively so they can be reaped
+ * without I/O. But we are potentially leaving up to five seconds'
+ * worth of inodes floating about which prune_icache wants us to
+ * write out. One way to fix that would be to get prune_icache()
+ * to do a write_super() to free up some memory. It has the desired
+ * effect.
*/
int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
{
diff --git a/trunk/fs/ext3/super.c b/trunk/fs/ext3/super.c
index 8c892e93d8e7..ff9bcdc5b0d5 100644
--- a/trunk/fs/ext3/super.c
+++ b/trunk/fs/ext3/super.c
@@ -64,6 +64,11 @@ static int ext3_freeze(struct super_block *sb);
/*
* Wrappers for journal_start/end.
+ *
+ * The only special thing we need to do here is to make sure that all
+ * journal_end calls result in the superblock being marked dirty, so
+ * that sync() will call the filesystem's write_super callback if
+ * appropriate.
*/
handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
{
@@ -85,6 +90,12 @@ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
return journal_start(journal, nblocks);
}
+/*
+ * The only special thing we need to do here is to make sure that all
+ * journal_stop calls result in the superblock being marked dirty, so
+ * that sync() will call the filesystem's write_super callback if
+ * appropriate.
+ */
int __ext3_journal_stop(const char *where, handle_t *handle)
{
struct super_block *sb;
diff --git a/trunk/fs/ext4/inode.c b/trunk/fs/ext4/inode.c
index dff171c3a123..6324f74e0342 100644
--- a/trunk/fs/ext4/inode.c
+++ b/trunk/fs/ext4/inode.c
@@ -1970,7 +1970,7 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
* This function can get called via...
* - ext4_da_writepages after taking page lock (have journal handle)
* - journal_submit_inode_data_buffers (no journal handle)
- * - shrink_page_list via the kswapd/direct reclaim (no journal handle)
+ * - shrink_page_list via pdflush (no journal handle)
* - grab_page_cache when doing write_begin (have journal handle)
*
* We don't do any block allocation in this function. If we have page with
@@ -4589,6 +4589,14 @@ static int ext4_expand_extra_isize(struct inode *inode,
* inode out, but prune_icache isn't a user-visible syncing function.
* Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
* we start and wait on commits.
+ *
+ * Is this efficient/effective? Well, we're being nice to the system
+ * by cleaning up our inodes proactively so they can be reaped
+ * without I/O. But we are potentially leaving up to five seconds'
+ * worth of inodes floating about which prune_icache wants us to
+ * write out. One way to fix that would be to get prune_icache()
+ * to do a write_super() to free up some memory. It has the desired
+ * effect.
*/
int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
{
diff --git a/trunk/fs/ext4/super.c b/trunk/fs/ext4/super.c
index 3e0851e4f468..d76ec8277d3f 100644
--- a/trunk/fs/ext4/super.c
+++ b/trunk/fs/ext4/super.c
@@ -326,6 +326,11 @@ static void ext4_put_nojournal(handle_t *handle)
/*
* Wrappers for jbd2_journal_start/end.
+ *
+ * The only special thing we need to do here is to make sure that all
+ * journal_end calls result in the superblock being marked dirty, so
+ * that sync() will call the filesystem's write_super callback if
+ * appropriate.
*/
handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
{
@@ -351,6 +356,12 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
return jbd2_journal_start(journal, nblocks);
}
+/*
+ * The only special thing we need to do here is to make sure that all
+ * jbd2_journal_stop calls result in the superblock being marked dirty, so
+ * that sync() will call the filesystem's write_super callback if
+ * appropriate.
+ */
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
{
struct super_block *sb;
diff --git a/trunk/fs/gfs2/meta_io.c b/trunk/fs/gfs2/meta_io.c
index 22255d96b27e..3a56c8d94de0 100644
--- a/trunk/fs/gfs2/meta_io.c
+++ b/trunk/fs/gfs2/meta_io.c
@@ -52,7 +52,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
/*
* If it's a fully non-blocking write attempt and we cannot
* lock the buffer then redirty the page. Note that this can
- * potentially cause a busy-wait loop from flusher thread and kswapd
+ * potentially cause a busy-wait loop from pdflush and kswapd
* activity, but those code paths have their own higher-level
* throttling.
*/
diff --git a/trunk/fs/hfs/mdb.c b/trunk/fs/hfs/mdb.c
index b7ec224910c5..5fd51a5833ff 100644
--- a/trunk/fs/hfs/mdb.c
+++ b/trunk/fs/hfs/mdb.c
@@ -236,10 +236,10 @@ int hfs_mdb_get(struct super_block *sb)
* hfs_mdb_commit()
*
* Description:
- * This updates the MDB on disk.
+ * This updates the MDB on disk (look also at hfs_write_super()).
* It does not check, if the superblock has been modified, or
* if the filesystem has been mounted read-only. It is mainly
- * called by hfs_sync_fs() and flush_mdb().
+ * called by hfs_write_super() and hfs_btree_extend().
* Input Variable(s):
* struct hfs_mdb *mdb: Pointer to the hfs MDB
* int backup;
diff --git a/trunk/fs/jbd/journal.c b/trunk/fs/jbd/journal.c
index 09357508ec9a..425c2f2cf170 100644
--- a/trunk/fs/jbd/journal.c
+++ b/trunk/fs/jbd/journal.c
@@ -534,8 +534,8 @@ int journal_start_commit(journal_t *journal, tid_t *ptid)
ret = 1;
} else if (journal->j_committing_transaction) {
/*
- * If commit has been started, then we have to wait for
- * completion of that transaction.
+ * If ext3_write_super() recently started a commit, then we
+ * have to wait for completion of that transaction
*/
if (ptid)
*ptid = journal->j_committing_transaction->t_tid;
diff --git a/trunk/fs/jbd2/journal.c b/trunk/fs/jbd2/journal.c
index 8625da27eccf..e9a3c4c85594 100644
--- a/trunk/fs/jbd2/journal.c
+++ b/trunk/fs/jbd2/journal.c
@@ -612,8 +612,8 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
ret = 1;
} else if (journal->j_committing_transaction) {
/*
- * If commit has been started, then we have to wait for
- * completion of that transaction.
+ * If ext3_write_super() recently started a commit, then we
+ * have to wait for completion of that transaction
*/
if (ptid)
*ptid = journal->j_committing_transaction->t_tid;
diff --git a/trunk/fs/nilfs2/super.c b/trunk/fs/nilfs2/super.c
index 6a10812711c1..6522cac6057c 100644
--- a/trunk/fs/nilfs2/super.c
+++ b/trunk/fs/nilfs2/super.c
@@ -676,13 +676,17 @@ static const struct super_operations nilfs_sops = {
.alloc_inode = nilfs_alloc_inode,
.destroy_inode = nilfs_destroy_inode,
.dirty_inode = nilfs_dirty_inode,
+ /* .write_inode = nilfs_write_inode, */
+ /* .drop_inode = nilfs_drop_inode, */
.evict_inode = nilfs_evict_inode,
.put_super = nilfs_put_super,
+ /* .write_super = nilfs_write_super, */
.sync_fs = nilfs_sync_fs,
.freeze_fs = nilfs_freeze,
.unfreeze_fs = nilfs_unfreeze,
.statfs = nilfs_statfs,
.remount_fs = nilfs_remount,
+ /* .umount_begin */
.show_options = nilfs_show_options
};
diff --git a/trunk/fs/nilfs2/the_nilfs.h b/trunk/fs/nilfs2/the_nilfs.h
index be1267a34cea..6eee4177807b 100644
--- a/trunk/fs/nilfs2/the_nilfs.h
+++ b/trunk/fs/nilfs2/the_nilfs.h
@@ -107,6 +107,8 @@ struct the_nilfs {
* used for
* - loading the latest checkpoint exclusively.
* - allocating a new full segment.
+ * - protecting s_dirt in the super_block struct
+ * (see nilfs_write_super) and the following fields.
*/
struct buffer_head *ns_sbh[2];
struct nilfs_super_block *ns_sbp[2];
diff --git a/trunk/fs/open.c b/trunk/fs/open.c
index bc132e167d2d..f3d96e7e7b19 100644
--- a/trunk/fs/open.c
+++ b/trunk/fs/open.c
@@ -717,7 +717,7 @@ static int do_dentry_open(struct file *f,
* here, so just reset the state.
*/
file_reset_write(f);
- __mnt_drop_write(f->f_path.mnt);
+ mnt_drop_write(f->f_path.mnt);
}
}
cleanup_file:
diff --git a/trunk/fs/super.c b/trunk/fs/super.c
index 0902cfa6a12e..b05cf47463d0 100644
--- a/trunk/fs/super.c
+++ b/trunk/fs/super.c
@@ -536,6 +536,46 @@ void drop_super(struct super_block *sb)
EXPORT_SYMBOL(drop_super);
+/**
+ * sync_supers - helper for periodic superblock writeback
+ *
+ * Call the write_super method if present on all dirty superblocks in
+ * the system. This is for the periodic writeback used by most older
+ * filesystems. For data integrity superblock writeback use
+ * sync_filesystems() instead.
+ *
+ * Note: check the dirty flag before waiting, so we don't
+ * hold up the sync while mounting a device. (The newly
+ * mounted device won't need syncing.)
+ */
+void sync_supers(void)
+{
+ struct super_block *sb, *p = NULL;
+
+ spin_lock(&sb_lock);
+ list_for_each_entry(sb, &super_blocks, s_list) {
+ if (hlist_unhashed(&sb->s_instances))
+ continue;
+ if (sb->s_op->write_super && sb->s_dirt) {
+ sb->s_count++;
+ spin_unlock(&sb_lock);
+
+ down_read(&sb->s_umount);
+ if (sb->s_root && sb->s_dirt && (sb->s_flags & MS_BORN))
+ sb->s_op->write_super(sb);
+ up_read(&sb->s_umount);
+
+ spin_lock(&sb_lock);
+ if (p)
+ __put_super(p);
+ p = sb;
+ }
+ }
+ if (p)
+ __put_super(p);
+ spin_unlock(&sb_lock);
+}
+
/**
* iterate_supers - call function for all active superblocks
* @f: function to call
diff --git a/trunk/fs/ubifs/file.c b/trunk/fs/ubifs/file.c
index 7bd6e72afd11..35389ca2d267 100644
--- a/trunk/fs/ubifs/file.c
+++ b/trunk/fs/ubifs/file.c
@@ -37,11 +37,11 @@
*
* A thing to keep in mind: inode @i_mutex is locked in most VFS operations we
* implement. However, this is not true for 'ubifs_writepage()', which may be
- * called with @i_mutex unlocked. For example, when flusher thread is doing
- * background write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex.
- * At "normal" work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g.
- * in the "sys_write -> alloc_pages -> direct reclaim path". So, in
- * 'ubifs_writepage()' we are only guaranteed that the page is locked.
+ * called with @i_mutex unlocked. For example, when pdflush is doing background
+ * write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex. At "normal"
+ * work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g. in the
+ * "sys_write -> alloc_pages -> direct reclaim path". So, in 'ubifs_writepage()'
+ * we are only guaranteed that the page is locked.
*
* Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
* read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
diff --git a/trunk/fs/ubifs/super.c b/trunk/fs/ubifs/super.c
index c3fa6c5327a3..1c766c39c038 100644
--- a/trunk/fs/ubifs/super.c
+++ b/trunk/fs/ubifs/super.c
@@ -303,7 +303,7 @@ static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc)
mutex_lock(&ui->ui_mutex);
/*
* Due to races between write-back forced by budgeting
- * (see 'sync_some_inodes()') and background write-back, the inode may
+ * (see 'sync_some_inodes()') and pdflush write-back, the inode may
* have already been synchronized, do not do this again. This might
* also happen if it was synchronized in an VFS operation, e.g.
* 'ubifs_link()'.
diff --git a/trunk/include/acpi/acpixf.h b/trunk/include/acpi/acpixf.h
index 26a92fc28a59..2c744c7a5b3d 100644
--- a/trunk/include/acpi/acpixf.h
+++ b/trunk/include/acpi/acpixf.h
@@ -491,11 +491,11 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b);
acpi_status acpi_enter_sleep_state_prep(u8 sleep_state);
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state);
+acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags);
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void))
-acpi_status acpi_leave_sleep_state_prep(u8 sleep_state);
+acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags);
acpi_status acpi_leave_sleep_state(u8 sleep_state);
diff --git a/trunk/include/acpi/actypes.h b/trunk/include/acpi/actypes.h
index 3d00bd5bd7e3..3af87de6a68c 100644
--- a/trunk/include/acpi/actypes.h
+++ b/trunk/include/acpi/actypes.h
@@ -803,7 +803,7 @@ typedef u8 acpi_adr_space_type;
/* Sleep function dispatch */
-typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state);
+typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state, u8 flags);
struct acpi_sleep_functions {
ACPI_SLEEP_FUNCTION legacy_function;
diff --git a/trunk/include/linux/acpi.h b/trunk/include/linux/acpi.h
index 4f2a76224509..3ad510b25283 100644
--- a/trunk/include/linux/acpi.h
+++ b/trunk/include/linux/acpi.h
@@ -96,7 +96,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
void acpi_numa_slit_init (struct acpi_table_slit *slit);
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
-int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
+void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
void acpi_numa_arch_fixup(void);
#ifdef CONFIG_ACPI_HOTPLUG_CPU
diff --git a/trunk/include/linux/backing-dev.h b/trunk/include/linux/backing-dev.h
index 2a9a9abc9126..c97c6b9cd38e 100644
--- a/trunk/include/linux/backing-dev.h
+++ b/trunk/include/linux/backing-dev.h
@@ -124,6 +124,7 @@ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
void bdi_start_background_writeback(struct backing_dev_info *bdi);
int bdi_writeback_thread(void *data);
int bdi_has_dirty_io(struct backing_dev_info *bdi);
+void bdi_arm_supers_timer(void);
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
diff --git a/trunk/include/linux/bcma/bcma_driver_chipcommon.h b/trunk/include/linux/bcma/bcma_driver_chipcommon.h
index d323a4b4143c..3c80885fa829 100644
--- a/trunk/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/trunk/include/linux/bcma/bcma_driver_chipcommon.h
@@ -89,12 +89,6 @@
#define BCMA_CC_CHIPST_4313_OTP_PRESENT 2
#define BCMA_CC_CHIPST_4331_SPROM_PRESENT 2
#define BCMA_CC_CHIPST_4331_OTP_PRESENT 4
-#define BCMA_CC_CHIPST_43228_ILP_DIV_EN 0x00000001
-#define BCMA_CC_CHIPST_43228_OTP_PRESENT 0x00000002
-#define BCMA_CC_CHIPST_43228_SERDES_REFCLK_PADSEL 0x00000004
-#define BCMA_CC_CHIPST_43228_SDIO_MODE 0x00000008
-#define BCMA_CC_CHIPST_43228_SDIO_OTP_PRESENT 0x00000010
-#define BCMA_CC_CHIPST_43228_SDIO_RESET 0x00000020
#define BCMA_CC_CHIPST_4706_PKG_OPTION BIT(0) /* 0: full-featured package 1: low-cost package */
#define BCMA_CC_CHIPST_4706_SFLASH_PRESENT BIT(1) /* 0: parallel, 1: serial flash is present */
#define BCMA_CC_CHIPST_4706_SFLASH_TYPE BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */
diff --git a/trunk/include/linux/fs.h b/trunk/include/linux/fs.h
index aa110476a95b..38dba16c4176 100644
--- a/trunk/include/linux/fs.h
+++ b/trunk/include/linux/fs.h
@@ -1491,6 +1491,7 @@ struct sb_writers {
struct super_block {
struct list_head s_list; /* Keep this first */
dev_t s_dev; /* search index; _not_ kdev_t */
+ unsigned char s_dirt;
unsigned char s_blocksize_bits;
unsigned long s_blocksize;
loff_t s_maxbytes; /* Max file size */
@@ -1860,6 +1861,7 @@ struct super_operations {
int (*drop_inode) (struct inode *);
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
+ void (*write_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
int (*freeze_fs) (struct super_block *);
int (*unfreeze_fs) (struct super_block *);
@@ -2395,6 +2397,7 @@ extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
extern int generic_write_sync(struct file *file, loff_t pos, loff_t count);
+extern void sync_supers(void);
extern void emergency_sync(void);
extern void emergency_remount(void);
#ifdef CONFIG_BLOCK
diff --git a/trunk/include/linux/ftrace_event.h b/trunk/include/linux/ftrace_event.h
index 642928cf57b4..af961d6f7ab1 100644
--- a/trunk/include/linux/ftrace_event.h
+++ b/trunk/include/linux/ftrace_event.h
@@ -306,10 +306,9 @@ extern void *perf_trace_buf_prepare(int size, unsigned short type,
static inline void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
- u64 count, struct pt_regs *regs, void *head,
- struct task_struct *task)
+ u64 count, struct pt_regs *regs, void *head)
{
- perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task);
+ perf_tp_event(addr, count, raw_data, size, regs, head, rctx);
}
#endif
diff --git a/trunk/include/linux/hardirq.h b/trunk/include/linux/hardirq.h
index 305f23cd7cff..bb7f30971858 100644
--- a/trunk/include/linux/hardirq.h
+++ b/trunk/include/linux/hardirq.h
@@ -22,7 +22,7 @@
*
* - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
* - bit 26 is the NMI_MASK
- * - bit 27 is the PREEMPT_ACTIVE flag
+ * - bit 28 is the PREEMPT_ACTIVE flag
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
diff --git a/trunk/include/linux/iommu.h b/trunk/include/linux/iommu.h
index 7e83370e6fd2..54d6d690073c 100644
--- a/trunk/include/linux/iommu.h
+++ b/trunk/include/linux/iommu.h
@@ -20,7 +20,6 @@
#define __LINUX_IOMMU_H
#include
-#include
#define IOMMU_READ (1)
#define IOMMU_WRITE (2)
@@ -31,7 +30,6 @@ struct iommu_group;
struct bus_type;
struct device;
struct iommu_domain;
-struct notifier_block;
/* iommu fault flags */
#define IOMMU_FAULT_READ 0x0
diff --git a/trunk/include/linux/ipv6.h b/trunk/include/linux/ipv6.h
index 879db26ec401..379e433e15e0 100644
--- a/trunk/include/linux/ipv6.h
+++ b/trunk/include/linux/ipv6.h
@@ -369,7 +369,6 @@ struct ipv6_pinfo {
__u8 rcv_tclass;
__u32 dst_cookie;
- __u32 rx_dst_cookie;
struct ipv6_mc_socklist __rcu *ipv6_mc_list;
struct ipv6_ac_socklist *ipv6_ac_list;
diff --git a/trunk/include/linux/irq.h b/trunk/include/linux/irq.h
index 216b0ba109d7..553fb66da130 100644
--- a/trunk/include/linux/irq.h
+++ b/trunk/include/linux/irq.h
@@ -349,7 +349,6 @@ enum {
IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
IRQCHIP_SKIP_SET_WAKE = (1 << 4),
- IRQCHIP_ONESHOT_SAFE = (1 << 5),
};
/* This include will go away once we isolated irq_desc usage to core code */
diff --git a/trunk/include/linux/jiffies.h b/trunk/include/linux/jiffies.h
index 82680541576d..265e2c3cbd1c 100644
--- a/trunk/include/linux/jiffies.h
+++ b/trunk/include/linux/jiffies.h
@@ -39,6 +39,9 @@
# error Invalid value of HZ.
#endif
+/* LATCH is used in the interval timer and ftape setup. */
+#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
+
/* Suppose we want to divide two numbers NOM and DEN: NOM/DEN, then we can
* improve accuracy by shifting LSH bits, hence calculating:
* (NOM << LSH) / DEN
@@ -51,30 +54,18 @@
#define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \
+ ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN))
-#ifdef CLOCK_TICK_RATE
-/* LATCH is used in the interval timer and ftape setup. */
-# define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
-
-/*
- * HZ is the requested value. However the CLOCK_TICK_RATE may not allow
- * for exactly HZ. So SHIFTED_HZ is high res HZ ("<< 8" is for accuracy)
- */
-# define SHIFTED_HZ (SH_DIV(CLOCK_TICK_RATE, LATCH, 8))
-#else
-# define SHIFTED_HZ (HZ << 8)
-#endif
+/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */
+#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8))
-/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
-#define TICK_NSEC (SH_DIV(1000000UL * 1000, SHIFTED_HZ, 8))
+/* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */
+#define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8))
/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
-/*
- * TICK_USEC_TO_NSEC is the time between ticks in nsec assuming SHIFTED_HZ and
- * a value TUSEC for TICK_USEC (can be set bij adjtimex)
- */
-#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV(TUSEC * USER_HZ * 1000, SHIFTED_HZ, 8))
+/* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming real ACTHZ and */
+/* a value TUSEC for TICK_USEC (can be set bij adjtimex) */
+#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8))
/* some arch's have a small-data section that can be accessed register-relative
* but that can only take up to, say, 4-byte variables. jiffies being part of
diff --git a/trunk/include/linux/kdb.h b/trunk/include/linux/kdb.h
index 42d9e863a313..064725854db8 100644
--- a/trunk/include/linux/kdb.h
+++ b/trunk/include/linux/kdb.h
@@ -75,6 +75,8 @@ extern const char *kdb_diemsg;
#define KDB_FLAG_CATASTROPHIC (1 << 1) /* A catastrophic event has occurred */
#define KDB_FLAG_CMD_INTERRUPT (1 << 2) /* Previous command was interrupted */
#define KDB_FLAG_NOIPI (1 << 3) /* Do not send IPIs */
+#define KDB_FLAG_ONLY_DO_DUMP (1 << 4) /* Only do a dump, used when
+ * kdb is off */
#define KDB_FLAG_NO_CONSOLE (1 << 5) /* No console is available,
* kdb is disabled */
#define KDB_FLAG_NO_VT_CONSOLE (1 << 6) /* No VT console is available, do
diff --git a/trunk/include/linux/netdevice.h b/trunk/include/linux/netdevice.h
index a9db4f33407f..eb06e58bed0b 100644
--- a/trunk/include/linux/netdevice.h
+++ b/trunk/include/linux/netdevice.h
@@ -1300,8 +1300,6 @@ struct net_device {
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SIZE 65536
unsigned int gso_max_size;
-#define GSO_MAX_SEGS 65535
- u16 gso_max_segs;
#ifdef CONFIG_DCB
/* Data Center Bridging netlink ops */
diff --git a/trunk/include/linux/perf_event.h b/trunk/include/linux/perf_event.h
index 7602ccb3f40e..76c5c8b724a7 100644
--- a/trunk/include/linux/perf_event.h
+++ b/trunk/include/linux/perf_event.h
@@ -1272,8 +1272,7 @@ static inline bool perf_paranoid_kernel(void)
extern void perf_event_init(void);
extern void perf_tp_event(u64 addr, u64 count, void *record,
int entry_size, struct pt_regs *regs,
- struct hlist_head *head, int rctx,
- struct task_struct *task);
+ struct hlist_head *head, int rctx);
extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags
diff --git a/trunk/include/linux/timex.h b/trunk/include/linux/timex.h
index 7c5ceb20e03a..99bc88b1fc02 100644
--- a/trunk/include/linux/timex.h
+++ b/trunk/include/linux/timex.h
@@ -232,7 +232,7 @@ struct timex {
* estimated error = NTP dispersion.
*/
extern unsigned long tick_usec; /* USER_HZ period (usec) */
-extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */
+extern unsigned long tick_nsec; /* ACTHZ period (nsec) */
extern void ntp_init(void);
extern void ntp_clear(void);
diff --git a/trunk/include/linux/topology.h b/trunk/include/linux/topology.h
index fec12d667211..e91cd43394df 100644
--- a/trunk/include/linux/topology.h
+++ b/trunk/include/linux/topology.h
@@ -164,7 +164,6 @@ int arch_update_cpu_topology(void);
| 0*SD_SHARE_CPUPOWER \
| 0*SD_SHARE_PKG_RESOURCES \
| 0*SD_SERIALIZE \
- | 1*SD_PREFER_SIBLING \
, \
.last_balance = jiffies, \
.balance_interval = 1, \
diff --git a/trunk/include/linux/writeback.h b/trunk/include/linux/writeback.h
index 50c3e8fa06a8..c66fe3332d83 100644
--- a/trunk/include/linux/writeback.h
+++ b/trunk/include/linux/writeback.h
@@ -104,6 +104,7 @@ static inline void wait_on_inode(struct inode *inode)
wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE);
}
+
/*
* mm/page-writeback.c
*/
diff --git a/trunk/include/net/cfg80211.h b/trunk/include/net/cfg80211.h
index 3d254e10ff30..493fa0c79005 100644
--- a/trunk/include/net/cfg80211.h
+++ b/trunk/include/net/cfg80211.h
@@ -96,7 +96,6 @@ enum ieee80211_band {
* is not permitted.
* @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel
* is not permitted.
- * @IEEE80211_CHAN_NO_OFDM: OFDM is not allowed on this channel.
*/
enum ieee80211_channel_flags {
IEEE80211_CHAN_DISABLED = 1<<0,
@@ -105,7 +104,6 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_RADAR = 1<<3,
IEEE80211_CHAN_NO_HT40PLUS = 1<<4,
IEEE80211_CHAN_NO_HT40MINUS = 1<<5,
- IEEE80211_CHAN_NO_OFDM = 1<<6,
};
#define IEEE80211_CHAN_NO_HT40 \
diff --git a/trunk/include/net/inet_connection_sock.h b/trunk/include/net/inet_connection_sock.h
index ba1d3615acbb..5ee66f517b4f 100644
--- a/trunk/include/net/inet_connection_sock.h
+++ b/trunk/include/net/inet_connection_sock.h
@@ -39,7 +39,6 @@ struct inet_connection_sock_af_ops {
int (*queue_xmit)(struct sk_buff *skb, struct flowi *fl);
void (*send_check)(struct sock *sk, struct sk_buff *skb);
int (*rebuild_header)(struct sock *sk);
- void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
int (*conn_request)(struct sock *sk, struct sk_buff *skb);
struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
diff --git a/trunk/include/net/inet_sock.h b/trunk/include/net/inet_sock.h
index 613cfa401672..83b567fe1941 100644
--- a/trunk/include/net/inet_sock.h
+++ b/trunk/include/net/inet_sock.h
@@ -249,4 +249,13 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
return flags;
}
+static inline void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ dst_hold(dst);
+ sk->sk_rx_dst = dst;
+ inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
+}
+
#endif /* _INET_SOCK_H */
diff --git a/trunk/include/net/sock.h b/trunk/include/net/sock.h
index 72132aef53fc..b3730239bf18 100644
--- a/trunk/include/net/sock.h
+++ b/trunk/include/net/sock.h
@@ -218,7 +218,6 @@ struct cg_proto;
* @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
* @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
* @sk_gso_max_size: Maximum GSO segment size to build
- * @sk_gso_max_segs: Maximum number of GSO segments
* @sk_lingertime: %SO_LINGER l_linger setting
* @sk_backlog: always used with the per-socket spinlock held
* @sk_callback_lock: used with the callbacks in the end of this struct
@@ -339,7 +338,6 @@ struct sock {
netdev_features_t sk_route_nocaps;
int sk_gso_type;
unsigned int sk_gso_max_size;
- u16 sk_gso_max_segs;
int sk_rcvlowat;
unsigned long sk_lingertime;
struct sk_buff_head sk_error_queue;
diff --git a/trunk/include/net/xfrm.h b/trunk/include/net/xfrm.h
index 62b619e82a90..d9509eb29b80 100644
--- a/trunk/include/net/xfrm.h
+++ b/trunk/include/net/xfrm.h
@@ -213,9 +213,6 @@ struct xfrm_state {
struct xfrm_lifetime_cur curlft;
struct tasklet_hrtimer mtimer;
- /* used to fix curlft->add_time when changing date */
- long saved_tmo;
-
/* Last used time */
unsigned long lastused;
@@ -241,7 +238,6 @@ static inline struct net *xs_net(struct xfrm_state *x)
/* xflags - make enum if more show up */
#define XFRM_TIME_DEFER 1
-#define XFRM_SOFT_EXPIRE 2
enum {
XFRM_STATE_VOID,
diff --git a/trunk/include/trace/events/sched.h b/trunk/include/trace/events/sched.h
index 5a8671e8a67f..ea7a2035456d 100644
--- a/trunk/include/trace/events/sched.h
+++ b/trunk/include/trace/events/sched.h
@@ -73,9 +73,6 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
__entry->prio = p->prio;
__entry->success = success;
__entry->target_cpu = task_cpu(p);
- )
- TP_perf_assign(
- __perf_task(p);
),
TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
@@ -328,7 +325,6 @@ DECLARE_EVENT_CLASS(sched_stat_template,
)
TP_perf_assign(
__perf_count(delay);
- __perf_task(tsk);
),
TP_printk("comm=%s pid=%d delay=%Lu [ns]",
diff --git a/trunk/include/trace/ftrace.h b/trunk/include/trace/ftrace.h
index a763888a36f9..c6bc2faaf261 100644
--- a/trunk/include/trace/ftrace.h
+++ b/trunk/include/trace/ftrace.h
@@ -712,9 +712,6 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#undef __perf_count
#define __perf_count(c) __count = (c)
-#undef __perf_task
-#define __perf_task(t) __task = (t)
-
#undef TP_perf_assign
#define TP_perf_assign(args...) args
@@ -728,7 +725,6 @@ perf_trace_##call(void *__data, proto) \
struct ftrace_raw_##call *entry; \
struct pt_regs __regs; \
u64 __addr = 0, __count = 1; \
- struct task_struct *__task = NULL; \
struct hlist_head *head; \
int __entry_size; \
int __data_size; \
@@ -756,7 +752,7 @@ perf_trace_##call(void *__data, proto) \
\
head = this_cpu_ptr(event_call->perf_events); \
perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
- __count, &__regs, head, __task); \
+ __count, &__regs, head); \
}
/*
diff --git a/trunk/kernel/debug/kdb/kdb_debugger.c b/trunk/kernel/debug/kdb/kdb_debugger.c
index be7b33b73d30..8b68ce78ff17 100644
--- a/trunk/kernel/debug/kdb/kdb_debugger.c
+++ b/trunk/kernel/debug/kdb/kdb_debugger.c
@@ -12,7 +12,6 @@
#include
#include
#include
-#include
#include "kdb_private.h"
#include "../debug_core.h"
@@ -53,9 +52,6 @@ int kdb_stub(struct kgdb_state *ks)
if (atomic_read(&kgdb_setting_breakpoint))
reason = KDB_REASON_KEYBOARD;
- if (in_nmi())
- reason = KDB_REASON_NMI;
-
for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) {
if ((bp->bp_enabled) && (bp->bp_addr == addr)) {
reason = KDB_REASON_BREAK;
diff --git a/trunk/kernel/debug/kdb/kdb_io.c b/trunk/kernel/debug/kdb/kdb_io.c
index 0a69d2adc4f3..bb9520f0f6ff 100644
--- a/trunk/kernel/debug/kdb/kdb_io.c
+++ b/trunk/kernel/debug/kdb/kdb_io.c
@@ -715,6 +715,9 @@ int vkdb_printf(const char *fmt, va_list ap)
/* check for having reached the LINES number of printed lines */
if (kdb_nextline == linecount) {
char buf1[16] = "";
+#if defined(CONFIG_SMP)
+ char buf2[32];
+#endif
/* Watch out for recursion here. Any routine that calls
* kdb_printf will come back through here. And kdb_read
@@ -729,6 +732,14 @@ int vkdb_printf(const char *fmt, va_list ap)
if (moreprompt == NULL)
moreprompt = "more> ";
+#if defined(CONFIG_SMP)
+ if (strchr(moreprompt, '%')) {
+ sprintf(buf2, moreprompt, get_cpu());
+ put_cpu();
+ moreprompt = buf2;
+ }
+#endif
+
kdb_input_flush();
c = console_drivers;
diff --git a/trunk/kernel/debug/kdb/kdb_main.c b/trunk/kernel/debug/kdb/kdb_main.c
index 31df1706b9a9..1f91413edb87 100644
--- a/trunk/kernel/debug/kdb/kdb_main.c
+++ b/trunk/kernel/debug/kdb/kdb_main.c
@@ -139,10 +139,11 @@ static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t);
static char *__env[] = {
#if defined(CONFIG_SMP)
"PROMPT=[%d]kdb> ",
+ "MOREPROMPT=[%d]more> ",
#else
"PROMPT=kdb> ",
-#endif
"MOREPROMPT=more> ",
+#endif
"RADIX=16",
"MDCOUNT=8", /* lines of md output */
KDB_PLATFORM_ENV,
@@ -1235,6 +1236,18 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
*cmdbuf = '\0';
*(cmd_hist[cmd_head]) = '\0';
+ if (KDB_FLAG(ONLY_DO_DUMP)) {
+ /* kdb is off but a catastrophic error requires a dump.
+ * Take the dump and reboot.
+ * Turn on logging so the kdb output appears in the log
+ * buffer in the dump.
+ */
+ const char *setargs[] = { "set", "LOGGING", "1" };
+ kdb_set(2, setargs);
+ kdb_reboot(0, NULL);
+ /*NOTREACHED*/
+ }
+
do_full_getstr:
#if defined(CONFIG_SMP)
snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
diff --git a/trunk/kernel/events/callchain.c b/trunk/kernel/events/callchain.c
index 98d4597f43d6..6581a040f399 100644
--- a/trunk/kernel/events/callchain.c
+++ b/trunk/kernel/events/callchain.c
@@ -153,8 +153,7 @@ put_callchain_entry(int rctx)
put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
}
-struct perf_callchain_entry *
-perf_callchain(struct perf_event *event, struct pt_regs *regs)
+struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
{
int rctx;
struct perf_callchain_entry *entry;
@@ -179,12 +178,6 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
}
if (regs) {
- /*
- * Disallow cross-task user callchains.
- */
- if (event->ctx->task && event->ctx->task != current)
- goto exit_put;
-
perf_callchain_store(entry, PERF_CONTEXT_USER);
perf_callchain_user(entry, regs);
}
diff --git a/trunk/kernel/events/core.c b/trunk/kernel/events/core.c
index b7935fcec7d9..f1cf0edeb39a 100644
--- a/trunk/kernel/events/core.c
+++ b/trunk/kernel/events/core.c
@@ -4039,7 +4039,7 @@ void perf_prepare_sample(struct perf_event_header *header,
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
int size = 1;
- data->callchain = perf_callchain(event, regs);
+ data->callchain = perf_callchain(regs);
if (data->callchain)
size += data->callchain->nr;
@@ -5209,8 +5209,7 @@ static int perf_tp_event_match(struct perf_event *event,
}
void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
- struct pt_regs *regs, struct hlist_head *head, int rctx,
- struct task_struct *task)
+ struct pt_regs *regs, struct hlist_head *head, int rctx)
{
struct perf_sample_data data;
struct perf_event *event;
@@ -5229,31 +5228,6 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
perf_swevent_event(event, count, &data, regs);
}
- /*
- * If we got specified a target task, also iterate its context and
- * deliver this event there too.
- */
- if (task && task != current) {
- struct perf_event_context *ctx;
- struct trace_entry *entry = record;
-
- rcu_read_lock();
- ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
- if (!ctx)
- goto unlock;
-
- list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
- if (event->attr.type != PERF_TYPE_TRACEPOINT)
- continue;
- if (event->attr.config != entry->type)
- continue;
- if (perf_tp_event_match(event, &data, regs))
- perf_swevent_event(event, count, &data, regs);
- }
-unlock:
- rcu_read_unlock();
- }
-
perf_swevent_put_recursion_context(rctx);
}
EXPORT_SYMBOL_GPL(perf_tp_event);
diff --git a/trunk/kernel/events/internal.h b/trunk/kernel/events/internal.h
index a096c19f2c2a..b0b107f90afc 100644
--- a/trunk/kernel/events/internal.h
+++ b/trunk/kernel/events/internal.h
@@ -101,8 +101,7 @@ __output_copy(struct perf_output_handle *handle,
}
/* Callchain handling */
-extern struct perf_callchain_entry *
-perf_callchain(struct perf_event *event, struct pt_regs *regs);
+extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
extern int get_callchain_buffers(void);
extern void put_callchain_buffers(void);
diff --git a/trunk/kernel/futex.c b/trunk/kernel/futex.c
index 3717e7b306e0..e2b0fb9a0b3b 100644
--- a/trunk/kernel/futex.c
+++ b/trunk/kernel/futex.c
@@ -2231,11 +2231,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
* @uaddr2: the pi futex we will take prior to returning to user-space
*
* The caller will wait on uaddr and will be requeued by futex_requeue() to
- * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
- * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
- * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
- * without one, the pi logic would not know which task to boost/deboost, if
- * there was a need to.
+ * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
+ * complete the acquisition of the rt_mutex prior to returning to userspace.
+ * This ensures the rt_mutex maintains an owner when it has waiters; without
+ * one, the pi logic wouldn't know which task to boost/deboost, if there was a
+ * need to.
*
* We call schedule in futex_wait_queue_me() when we enqueue and return there
* via the following:
@@ -2272,9 +2272,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
struct futex_q q = futex_q_init;
int res, ret;
- if (uaddr == uaddr2)
- return -EINVAL;
-
if (!bitset)
return -EINVAL;
@@ -2346,7 +2343,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
* the pi_state.
*/
- WARN_ON(!q.pi_state);
+ WARN_ON(!&q.pi_state);
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
debug_rt_mutex_free_waiter(&rt_waiter);
@@ -2373,7 +2370,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* fault, unlock the rt_mutex and return the fault to userspace.
*/
if (ret == -EFAULT) {
- if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
+ if (rt_mutex_owner(pi_mutex) == current)
rt_mutex_unlock(pi_mutex);
} else if (ret == -EINTR) {
/*
diff --git a/trunk/kernel/irq/manage.c b/trunk/kernel/irq/manage.c
index 4c69326aa773..0a8e8f059627 100644
--- a/trunk/kernel/irq/manage.c
+++ b/trunk/kernel/irq/manage.c
@@ -943,18 +943,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
goto out_thread;
}
- /*
- * Drivers are often written to work w/o knowledge about the
- * underlying irq chip implementation, so a request for a
- * threaded irq without a primary hard irq context handler
- * requires the ONESHOT flag to be set. Some irq chips like
- * MSI based interrupts are per se one shot safe. Check the
- * chip flags, so we can avoid the unmask dance at the end of
- * the threaded handler for those.
- */
- if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
- new->flags &= ~IRQF_ONESHOT;
-
/*
* The following block of code has to be executed atomically
*/
@@ -1029,8 +1017,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
*/
new->thread_mask = 1 << ffz(thread_mask);
- } else if (new->handler == irq_default_primary_handler &&
- !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
+ } else if (new->handler == irq_default_primary_handler) {
/*
* The interrupt was requested with handler = NULL, so
* we use the default primary handler for it. But it
diff --git a/trunk/kernel/printk.c b/trunk/kernel/printk.c
index 66a2ea37b576..6a76ab9d4476 100644
--- a/trunk/kernel/printk.c
+++ b/trunk/kernel/printk.c
@@ -1034,7 +1034,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
struct log *msg = log_from_idx(idx);
len += msg_print_text(msg, prev, true, NULL, 0);
- prev = msg->flags;
idx = log_next(idx);
seq++;
}
@@ -1047,7 +1046,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
struct log *msg = log_from_idx(idx);
len -= msg_print_text(msg, prev, true, NULL, 0);
- prev = msg->flags;
idx = log_next(idx);
seq++;
}
diff --git a/trunk/kernel/sched/core.c b/trunk/kernel/sched/core.c
index 82ad284f823b..d325c4b2dcbb 100644
--- a/trunk/kernel/sched/core.c
+++ b/trunk/kernel/sched/core.c
@@ -4340,7 +4340,9 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
*/
if (unlikely(policy == p->policy && (!rt_policy(policy) ||
param->sched_priority == p->rt_priority))) {
- task_rq_unlock(rq, p, &flags);
+
+ __task_rq_unlock(rq);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
return 0;
}
diff --git a/trunk/kernel/sched/cpupri.c b/trunk/kernel/sched/cpupri.c
index 23aa789c53ee..d72586fdf660 100644
--- a/trunk/kernel/sched/cpupri.c
+++ b/trunk/kernel/sched/cpupri.c
@@ -65,8 +65,8 @@ static int convert_prio(int prio)
int cpupri_find(struct cpupri *cp, struct task_struct *p,
struct cpumask *lowest_mask)
{
- int idx = 0;
- int task_pri = convert_prio(p->prio);
+ int idx = 0;
+ int task_pri = convert_prio(p->prio);
if (task_pri >= MAX_RT_PRIO)
return 0;
@@ -137,9 +137,9 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
*/
void cpupri_set(struct cpupri *cp, int cpu, int newpri)
{
- int *currpri = &cp->cpu_to_pri[cpu];
- int oldpri = *currpri;
- int do_mb = 0;
+ int *currpri = &cp->cpu_to_pri[cpu];
+ int oldpri = *currpri;
+ int do_mb = 0;
newpri = convert_prio(newpri);
diff --git a/trunk/kernel/sched/fair.c b/trunk/kernel/sched/fair.c
index d0cc03b3e70b..22321db64952 100644
--- a/trunk/kernel/sched/fair.c
+++ b/trunk/kernel/sched/fair.c
@@ -3069,9 +3069,6 @@ struct lb_env {
int new_dst_cpu;
enum cpu_idle_type idle;
long imbalance;
- /* The set of CPUs under consideration for load-balancing */
- struct cpumask *cpus;
-
unsigned int flags;
unsigned int loop;
@@ -3656,7 +3653,8 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
*/
static inline void update_sg_lb_stats(struct lb_env *env,
struct sched_group *group, int load_idx,
- int local_group, int *balance, struct sg_lb_stats *sgs)
+ int local_group, const struct cpumask *cpus,
+ int *balance, struct sg_lb_stats *sgs)
{
unsigned long nr_running, max_nr_running, min_nr_running;
unsigned long load, max_cpu_load, min_cpu_load;
@@ -3673,7 +3671,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
max_nr_running = 0;
min_nr_running = ~0UL;
- for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
+ for_each_cpu_and(i, sched_group_cpus(group), cpus) {
struct rq *rq = cpu_rq(i);
nr_running = rq->nr_running;
@@ -3802,7 +3800,8 @@ static bool update_sd_pick_busiest(struct lb_env *env,
* @sds: variable to hold the statistics for this sched_domain.
*/
static inline void update_sd_lb_stats(struct lb_env *env,
- int *balance, struct sd_lb_stats *sds)
+ const struct cpumask *cpus,
+ int *balance, struct sd_lb_stats *sds)
{
struct sched_domain *child = env->sd->child;
struct sched_group *sg = env->sd->groups;
@@ -3819,7 +3818,8 @@ static inline void update_sd_lb_stats(struct lb_env *env,
local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
memset(&sgs, 0, sizeof(sgs));
- update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
+ update_sg_lb_stats(env, sg, load_idx, local_group,
+ cpus, balance, &sgs);
if (local_group && !(*balance))
return;
@@ -4055,6 +4055,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* to restore balance.
*
* @env: The load balancing environment.
+ * @cpus: The set of CPUs under consideration for load-balancing.
* @balance: Pointer to a variable indicating if this_cpu
* is the appropriate cpu to perform load balancing at this_level.
*
@@ -4064,7 +4065,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* put to idle by rebalancing its tasks onto our group.
*/
static struct sched_group *
-find_busiest_group(struct lb_env *env, int *balance)
+find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
{
struct sd_lb_stats sds;
@@ -4074,7 +4075,7 @@ find_busiest_group(struct lb_env *env, int *balance)
* Compute the various statistics relavent for load balancing at
* this level.
*/
- update_sd_lb_stats(env, balance, &sds);
+ update_sd_lb_stats(env, cpus, balance, &sds);
/*
* this_cpu is not the appropriate cpu to perform load balancing at
@@ -4154,7 +4155,8 @@ find_busiest_group(struct lb_env *env, int *balance)
* find_busiest_queue - find the busiest runqueue among the cpus in group.
*/
static struct rq *find_busiest_queue(struct lb_env *env,
- struct sched_group *group)
+ struct sched_group *group,
+ const struct cpumask *cpus)
{
struct rq *busiest = NULL, *rq;
unsigned long max_load = 0;
@@ -4169,7 +4171,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
if (!capacity)
capacity = fix_small_capacity(env->sd, group);
- if (!cpumask_test_cpu(i, env->cpus))
+ if (!cpumask_test_cpu(i, cpus))
continue;
rq = cpu_rq(i);
@@ -4250,7 +4252,6 @@ static int load_balance(int this_cpu, struct rq *this_rq,
.dst_grpmask = sched_group_cpus(sd->groups),
.idle = idle,
.loop_break = sched_nr_migrate_break,
- .cpus = cpus,
};
cpumask_copy(cpus, cpu_active_mask);
@@ -4259,7 +4260,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
schedstat_inc(sd, lb_count[idle]);
redo:
- group = find_busiest_group(&env, balance);
+ group = find_busiest_group(&env, cpus, balance);
if (*balance == 0)
goto out_balanced;
@@ -4269,7 +4270,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
goto out_balanced;
}
- busiest = find_busiest_queue(&env, group);
+ busiest = find_busiest_queue(&env, group, cpus);
if (!busiest) {
schedstat_inc(sd, lb_nobusyq[idle]);
goto out_balanced;
diff --git a/trunk/kernel/time/jiffies.c b/trunk/kernel/time/jiffies.c
index 46da0537c10b..a470154e0408 100644
--- a/trunk/kernel/time/jiffies.c
+++ b/trunk/kernel/time/jiffies.c
@@ -37,7 +37,7 @@
* requested HZ value. It is also not recommended
* for "tick-less" systems.
*/
-#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/SHIFTED_HZ))
+#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ))
/* Since jiffies uses a simple NSEC_PER_JIFFY multiplier
* conversion, the .shift value could be zero. However
diff --git a/trunk/kernel/time/ntp.c b/trunk/kernel/time/ntp.c
index 24174b4d669b..b7fbadc5c973 100644
--- a/trunk/kernel/time/ntp.c
+++ b/trunk/kernel/time/ntp.c
@@ -28,7 +28,7 @@ DEFINE_SPINLOCK(ntp_lock);
/* USER_HZ period (usecs): */
unsigned long tick_usec = TICK_USEC;
-/* SHIFTED_HZ period (nsecs): */
+/* ACTHZ period (nsecs): */
unsigned long tick_nsec;
static u64 tick_length;
diff --git a/trunk/kernel/time/timekeeping.c b/trunk/kernel/time/timekeeping.c
index e16af197a2bc..f045cc50832d 100644
--- a/trunk/kernel/time/timekeeping.c
+++ b/trunk/kernel/time/timekeeping.c
@@ -65,14 +65,14 @@ struct timekeeper {
* used instead.
*/
struct timespec wall_to_monotonic;
- /* Offset clock monotonic -> clock realtime */
- ktime_t offs_real;
/* time spent in suspend */
struct timespec total_sleep_time;
- /* Offset clock monotonic -> clock boottime */
- ktime_t offs_boot;
/* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
struct timespec raw_time;
+ /* Offset clock monotonic -> clock realtime */
+ ktime_t offs_real;
+ /* Offset clock monotonic -> clock boottime */
+ ktime_t offs_boot;
/* Seqlock for all timekeeper values */
seqlock_t lock;
};
@@ -108,38 +108,13 @@ static struct timespec tk_xtime(struct timekeeper *tk)
static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
{
tk->xtime_sec = ts->tv_sec;
- tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
+ tk->xtime_nsec = ts->tv_nsec << tk->shift;
}
static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
{
tk->xtime_sec += ts->tv_sec;
- tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
-}
-
-static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
-{
- struct timespec tmp;
-
- /*
- * Verify consistency of: offset_real = -wall_to_monotonic
- * before modifying anything
- */
- set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec,
- -tk->wall_to_monotonic.tv_nsec);
- WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64);
- tk->wall_to_monotonic = wtm;
- set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
- tk->offs_real = timespec_to_ktime(tmp);
-}
-
-static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
-{
- /* Verify consistency before modifying */
- WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64);
-
- tk->total_sleep_time = t;
- tk->offs_boot = timespec_to_ktime(t);
+ tk->xtime_nsec += ts->tv_nsec << tk->shift;
}
/**
@@ -242,6 +217,14 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
return nsec + arch_gettimeoffset();
}
+static void update_rt_offset(struct timekeeper *tk)
+{
+ struct timespec tmp, *wtm = &tk->wall_to_monotonic;
+
+ set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
+ tk->offs_real = timespec_to_ktime(tmp);
+}
+
/* must hold write on timekeeper.lock */
static void timekeeping_update(struct timekeeper *tk, bool clearntp)
{
@@ -251,10 +234,12 @@ static void timekeeping_update(struct timekeeper *tk, bool clearntp)
tk->ntp_error = 0;
ntp_clear();
}
+ update_rt_offset(tk);
xt = tk_xtime(tk);
update_vsyscall(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
}
+
/**
* timekeeping_forward_now - update clock to the current time
*
@@ -292,19 +277,18 @@ static void timekeeping_forward_now(struct timekeeper *tk)
*/
void getnstimeofday(struct timespec *ts)
{
- struct timekeeper *tk = &timekeeper;
unsigned long seq;
s64 nsecs = 0;
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqbegin(&timekeeper.lock);
- ts->tv_sec = tk->xtime_sec;
- ts->tv_nsec = timekeeping_get_ns(tk);
+ ts->tv_sec = timekeeper.xtime_sec;
+ ts->tv_nsec = timekeeping_get_ns(&timekeeper);
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqretry(&timekeeper.lock, seq));
timespec_add_ns(ts, nsecs);
}
@@ -312,18 +296,19 @@ EXPORT_SYMBOL(getnstimeofday);
ktime_t ktime_get(void)
{
- struct timekeeper *tk = &timekeeper;
unsigned int seq;
s64 secs, nsecs;
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&tk->lock);
- secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
- nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
+ seq = read_seqbegin(&timekeeper.lock);
+ secs = timekeeper.xtime_sec +
+ timekeeper.wall_to_monotonic.tv_sec;
+ nsecs = timekeeping_get_ns(&timekeeper) +
+ timekeeper.wall_to_monotonic.tv_nsec;
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqretry(&timekeeper.lock, seq));
/*
* Use ktime_set/ktime_add_ns to create a proper ktime on
* 32-bit architectures without CONFIG_KTIME_SCALAR.
@@ -342,19 +327,18 @@ EXPORT_SYMBOL_GPL(ktime_get);
*/
void ktime_get_ts(struct timespec *ts)
{
- struct timekeeper *tk = &timekeeper;
struct timespec tomono;
unsigned int seq;
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&tk->lock);
- ts->tv_sec = tk->xtime_sec;
- ts->tv_nsec = timekeeping_get_ns(tk);
- tomono = tk->wall_to_monotonic;
+ seq = read_seqbegin(&timekeeper.lock);
+ ts->tv_sec = timekeeper.xtime_sec;
+ ts->tv_nsec = timekeeping_get_ns(&timekeeper);
+ tomono = timekeeper.wall_to_monotonic;
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqretry(&timekeeper.lock, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
ts->tv_nsec + tomono.tv_nsec);
@@ -374,23 +358,22 @@ EXPORT_SYMBOL_GPL(ktime_get_ts);
*/
void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
{
- struct timekeeper *tk = &timekeeper;
unsigned long seq;
s64 nsecs_raw, nsecs_real;
WARN_ON_ONCE(timekeeping_suspended);
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqbegin(&timekeeper.lock);
- *ts_raw = tk->raw_time;
- ts_real->tv_sec = tk->xtime_sec;
+ *ts_raw = timekeeper.raw_time;
+ ts_real->tv_sec = timekeeper.xtime_sec;
ts_real->tv_nsec = 0;
- nsecs_raw = timekeeping_get_ns_raw(tk);
- nsecs_real = timekeeping_get_ns(tk);
+ nsecs_raw = timekeeping_get_ns_raw(&timekeeper);
+ nsecs_real = timekeeping_get_ns(&timekeeper);
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqretry(&timekeeper.lock, seq));
timespec_add_ns(ts_raw, nsecs_raw);
timespec_add_ns(ts_real, nsecs_real);
@@ -423,28 +406,28 @@ EXPORT_SYMBOL(do_gettimeofday);
*/
int do_settimeofday(const struct timespec *tv)
{
- struct timekeeper *tk = &timekeeper;
struct timespec ts_delta, xt;
unsigned long flags;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irqsave(&tk->lock, flags);
+ write_seqlock_irqsave(&timekeeper.lock, flags);
- timekeeping_forward_now(tk);
+ timekeeping_forward_now(&timekeeper);
- xt = tk_xtime(tk);
+ xt = tk_xtime(&timekeeper);
ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
- tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta));
+ timekeeper.wall_to_monotonic =
+ timespec_sub(timekeeper.wall_to_monotonic, ts_delta);
- tk_set_xtime(tk, tv);
+ tk_set_xtime(&timekeeper, tv);
- timekeeping_update(tk, true);
+ timekeeping_update(&timekeeper, true);
- write_sequnlock_irqrestore(&tk->lock, flags);
+ write_sequnlock_irqrestore(&timekeeper.lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -453,6 +436,7 @@ int do_settimeofday(const struct timespec *tv)
}
EXPORT_SYMBOL(do_settimeofday);
+
/**
* timekeeping_inject_offset - Adds or subtracts from the current time.
* @tv: pointer to the timespec variable containing the offset
@@ -461,23 +445,23 @@ EXPORT_SYMBOL(do_settimeofday);
*/
int timekeeping_inject_offset(struct timespec *ts)
{
- struct timekeeper *tk = &timekeeper;
unsigned long flags;
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irqsave(&tk->lock, flags);
+ write_seqlock_irqsave(&timekeeper.lock, flags);
- timekeeping_forward_now(tk);
+ timekeeping_forward_now(&timekeeper);
- tk_xtime_add(tk, ts);
- tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
+ tk_xtime_add(&timekeeper, ts);
+ timekeeper.wall_to_monotonic =
+ timespec_sub(timekeeper.wall_to_monotonic, *ts);
- timekeeping_update(tk, true);
+ timekeeping_update(&timekeeper, true);
- write_sequnlock_irqrestore(&tk->lock, flags);
+ write_sequnlock_irqrestore(&timekeeper.lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -493,24 +477,23 @@ EXPORT_SYMBOL(timekeeping_inject_offset);
*/
static int change_clocksource(void *data)
{
- struct timekeeper *tk = &timekeeper;
struct clocksource *new, *old;
unsigned long flags;
new = (struct clocksource *) data;
- write_seqlock_irqsave(&tk->lock, flags);
+ write_seqlock_irqsave(&timekeeper.lock, flags);
- timekeeping_forward_now(tk);
+ timekeeping_forward_now(&timekeeper);
if (!new->enable || new->enable(new) == 0) {
- old = tk->clock;
- tk_setup_internals(tk, new);
+ old = timekeeper.clock;
+ tk_setup_internals(&timekeeper, new);
if (old->disable)
old->disable(old);
}
- timekeeping_update(tk, true);
+ timekeeping_update(&timekeeper, true);
- write_sequnlock_irqrestore(&tk->lock, flags);
+ write_sequnlock_irqrestore(&timekeeper.lock, flags);
return 0;
}
@@ -524,9 +507,7 @@ static int change_clocksource(void *data)
*/
void timekeeping_notify(struct clocksource *clock)
{
- struct timekeeper *tk = &timekeeper;
-
- if (tk->clock == clock)
+ if (timekeeper.clock == clock)
return;
stop_machine(change_clocksource, clock, NULL);
tick_clock_notify();
@@ -555,36 +536,35 @@ EXPORT_SYMBOL_GPL(ktime_get_real);
*/
void getrawmonotonic(struct timespec *ts)
{
- struct timekeeper *tk = &timekeeper;
unsigned long seq;
s64 nsecs;
do {
- seq = read_seqbegin(&tk->lock);
- nsecs = timekeeping_get_ns_raw(tk);
- *ts = tk->raw_time;
+ seq = read_seqbegin(&timekeeper.lock);
+ nsecs = timekeeping_get_ns_raw(&timekeeper);
+ *ts = timekeeper.raw_time;
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqretry(&timekeeper.lock, seq));
timespec_add_ns(ts, nsecs);
}
EXPORT_SYMBOL(getrawmonotonic);
+
/**
* timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
*/
int timekeeping_valid_for_hres(void)
{
- struct timekeeper *tk = &timekeeper;
unsigned long seq;
int ret;
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqbegin(&timekeeper.lock);
- ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
+ ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqretry(&timekeeper.lock, seq));
return ret;
}
@@ -594,16 +574,15 @@ int timekeeping_valid_for_hres(void)
*/
u64 timekeeping_max_deferment(void)
{
- struct timekeeper *tk = &timekeeper;
unsigned long seq;
u64 ret;
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqbegin(&timekeeper.lock);
- ret = tk->clock->max_idle_ns;
+ ret = timekeeper.clock->max_idle_ns;
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqretry(&timekeeper.lock, seq));
return ret;
}
@@ -643,43 +622,46 @@ void __attribute__((weak)) read_boot_clock(struct timespec *ts)
*/
void __init timekeeping_init(void)
{
- struct timekeeper *tk = &timekeeper;
struct clocksource *clock;
unsigned long flags;
- struct timespec now, boot, tmp;
+ struct timespec now, boot;
read_persistent_clock(&now);
read_boot_clock(&boot);
- seqlock_init(&tk->lock);
+ seqlock_init(&timekeeper.lock);
ntp_init();
- write_seqlock_irqsave(&tk->lock, flags);
+ write_seqlock_irqsave(&timekeeper.lock, flags);
clock = clocksource_default_clock();
if (clock->enable)
clock->enable(clock);
- tk_setup_internals(tk, clock);
+ tk_setup_internals(&timekeeper, clock);
- tk_set_xtime(tk, &now);
- tk->raw_time.tv_sec = 0;
- tk->raw_time.tv_nsec = 0;
+ tk_set_xtime(&timekeeper, &now);
+ timekeeper.raw_time.tv_sec = 0;
+ timekeeper.raw_time.tv_nsec = 0;
if (boot.tv_sec == 0 && boot.tv_nsec == 0)
- boot = tk_xtime(tk);
-
- set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec);
- tk_set_wall_to_mono(tk, tmp);
-
- tmp.tv_sec = 0;
- tmp.tv_nsec = 0;
- tk_set_sleep_time(tk, tmp);
-
- write_sequnlock_irqrestore(&tk->lock, flags);
+ boot = tk_xtime(&timekeeper);
+
+ set_normalized_timespec(&timekeeper.wall_to_monotonic,
+ -boot.tv_sec, -boot.tv_nsec);
+ update_rt_offset(&timekeeper);
+ timekeeper.total_sleep_time.tv_sec = 0;
+ timekeeper.total_sleep_time.tv_nsec = 0;
+ write_sequnlock_irqrestore(&timekeeper.lock, flags);
}
/* time in seconds when suspend began */
static struct timespec timekeeping_suspend_time;
+static void update_sleep_time(struct timespec t)
+{
+ timekeeper.total_sleep_time = t;
+ timekeeper.offs_boot = timespec_to_ktime(t);
+}
+
/**
* __timekeeping_inject_sleeptime - Internal function to add sleep interval
* @delta: pointer to a timespec delta value
@@ -695,11 +677,13 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
"sleep delta value!\n");
return;
}
+
tk_xtime_add(tk, delta);
- tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta));
- tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta));
+ tk->wall_to_monotonic = timespec_sub(tk->wall_to_monotonic, *delta);
+ update_sleep_time(timespec_add(tk->total_sleep_time, *delta));
}
+
/**
* timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
* @delta: pointer to a timespec delta value
@@ -712,7 +696,6 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
*/
void timekeeping_inject_sleeptime(struct timespec *delta)
{
- struct timekeeper *tk = &timekeeper;
unsigned long flags;
struct timespec ts;
@@ -721,20 +704,21 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
return;
- write_seqlock_irqsave(&tk->lock, flags);
+ write_seqlock_irqsave(&timekeeper.lock, flags);
- timekeeping_forward_now(tk);
+ timekeeping_forward_now(&timekeeper);
- __timekeeping_inject_sleeptime(tk, delta);
+ __timekeeping_inject_sleeptime(&timekeeper, delta);
- timekeeping_update(tk, true);
+ timekeeping_update(&timekeeper, true);
- write_sequnlock_irqrestore(&tk->lock, flags);
+ write_sequnlock_irqrestore(&timekeeper.lock, flags);
/* signal hrtimers about time change */
clock_was_set();
}
+
/**
* timekeeping_resume - Resumes the generic timekeeping subsystem.
*
@@ -744,7 +728,6 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
*/
static void timekeeping_resume(void)
{
- struct timekeeper *tk = &timekeeper;
unsigned long flags;
struct timespec ts;
@@ -752,18 +735,18 @@ static void timekeeping_resume(void)
clocksource_resume();
- write_seqlock_irqsave(&tk->lock, flags);
+ write_seqlock_irqsave(&timekeeper.lock, flags);
if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
ts = timespec_sub(ts, timekeeping_suspend_time);
- __timekeeping_inject_sleeptime(tk, &ts);
+ __timekeeping_inject_sleeptime(&timekeeper, &ts);
}
/* re-base the last cycle value */
- tk->clock->cycle_last = tk->clock->read(tk->clock);
- tk->ntp_error = 0;
+ timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
+ timekeeper.ntp_error = 0;
timekeeping_suspended = 0;
- timekeeping_update(tk, false);
- write_sequnlock_irqrestore(&tk->lock, flags);
+ timekeeping_update(&timekeeper, false);
+ write_sequnlock_irqrestore(&timekeeper.lock, flags);
touch_softlockup_watchdog();
@@ -775,15 +758,14 @@ static void timekeeping_resume(void)
static int timekeeping_suspend(void)
{
- struct timekeeper *tk = &timekeeper;
unsigned long flags;
struct timespec delta, delta_delta;
static struct timespec old_delta;
read_persistent_clock(&timekeeping_suspend_time);
- write_seqlock_irqsave(&tk->lock, flags);
- timekeeping_forward_now(tk);
+ write_seqlock_irqsave(&timekeeper.lock, flags);
+ timekeeping_forward_now(&timekeeper);
timekeeping_suspended = 1;
/*
@@ -792,7 +774,7 @@ static int timekeeping_suspend(void)
* try to compensate so the difference in system time
* and persistent_clock time stays close to constant.
*/
- delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time);
+ delta = timespec_sub(tk_xtime(&timekeeper), timekeeping_suspend_time);
delta_delta = timespec_sub(delta, old_delta);
if (abs(delta_delta.tv_sec) >= 2) {
/*
@@ -805,7 +787,7 @@ static int timekeeping_suspend(void)
timekeeping_suspend_time =
timespec_add(timekeeping_suspend_time, delta_delta);
}
- write_sequnlock_irqrestore(&tk->lock, flags);
+ write_sequnlock_irqrestore(&timekeeper.lock, flags);
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
clocksource_suspend();
@@ -916,29 +898,27 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
* the error. This causes the likely below to be unlikely.
*
* The proper fix is to avoid rounding up by using
- * the high precision tk->xtime_nsec instead of
+ * the high precision timekeeper.xtime_nsec instead of
* xtime.tv_nsec everywhere. Fixing this will take some
* time.
*/
if (likely(error <= interval))
adj = 1;
else
- adj = timekeeping_bigadjust(tk, error, &interval, &offset);
- } else {
- if (error < -interval) {
- /* See comment above, this is just switched for the negative */
- error >>= 2;
- if (likely(error >= -interval)) {
- adj = -1;
- interval = -interval;
- offset = -offset;
- } else {
- adj = timekeeping_bigadjust(tk, error, &interval, &offset);
- }
- } else {
- goto out_adjust;
- }
- }
+ adj = timekeeping_bigadjust(tk, error, &interval,
+ &offset);
+ } else if (error < -interval) {
+ /* See comment above, this is just switched for the negative */
+ error >>= 2;
+ if (likely(error >= -interval)) {
+ adj = -1;
+ interval = -interval;
+ offset = -offset;
+ } else
+ adj = timekeeping_bigadjust(tk, error, &interval,
+ &offset);
+ } else
+ return;
if (unlikely(tk->clock->maxadj &&
(tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
@@ -1001,7 +981,6 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
tk->xtime_nsec -= offset;
tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
-out_adjust:
/*
* It may be possible that when we entered this function, xtime_nsec
* was very small. Further, if we're slightly speeding the clocksource
@@ -1024,6 +1003,7 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
}
+
/**
* accumulate_nsecs_to_secs - Accumulates nsecs into secs
*
@@ -1044,21 +1024,15 @@ static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
/* Figure out if its a leap sec and apply if needed */
leap = second_overflow(tk->xtime_sec);
- if (unlikely(leap)) {
- struct timespec ts;
-
- tk->xtime_sec += leap;
-
- ts.tv_sec = leap;
- ts.tv_nsec = 0;
- tk_set_wall_to_mono(tk,
- timespec_sub(tk->wall_to_monotonic, ts));
-
+ tk->xtime_sec += leap;
+ tk->wall_to_monotonic.tv_sec -= leap;
+ if (leap)
clock_was_set_delayed();
- }
+
}
}
+
/**
* logarithmic_accumulation - shifted accumulation of cycles
*
@@ -1102,6 +1076,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
return offset;
}
+
/**
* update_wall_time - Uses the current clocksource to increment the wall time
*
@@ -1109,22 +1084,21 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
static void update_wall_time(void)
{
struct clocksource *clock;
- struct timekeeper *tk = &timekeeper;
cycle_t offset;
int shift = 0, maxshift;
unsigned long flags;
s64 remainder;
- write_seqlock_irqsave(&tk->lock, flags);
+ write_seqlock_irqsave(&timekeeper.lock, flags);
/* Make sure we're fully resumed: */
if (unlikely(timekeeping_suspended))
goto out;
- clock = tk->clock;
+ clock = timekeeper.clock;
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
- offset = tk->cycle_interval;
+ offset = timekeeper.cycle_interval;
#else
offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
#endif
@@ -1137,19 +1111,19 @@ static void update_wall_time(void)
* chunk in one go, and then try to consume the next smaller
* doubled multiple.
*/
- shift = ilog2(offset) - ilog2(tk->cycle_interval);
+ shift = ilog2(offset) - ilog2(timekeeper.cycle_interval);
shift = max(0, shift);
/* Bound shift to one less than what overflows tick_length */
maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
shift = min(shift, maxshift);
- while (offset >= tk->cycle_interval) {
- offset = logarithmic_accumulation(tk, offset, shift);
- if (offset < tk->cycle_interval<= timekeeper.cycle_interval) {
+ offset = logarithmic_accumulation(&timekeeper, offset, shift);
+ if(offset < timekeeper.cycle_interval<xtime_nsec & ((1 << tk->shift) - 1);
- tk->xtime_nsec -= remainder;
- tk->xtime_nsec += 1 << tk->shift;
- tk->ntp_error += remainder << tk->ntp_error_shift;
+ remainder = timekeeper.xtime_nsec & ((1 << timekeeper.shift) - 1);
+ timekeeper.xtime_nsec -= remainder;
+ timekeeper.xtime_nsec += 1 << timekeeper.shift;
+ timekeeper.ntp_error += remainder << timekeeper.ntp_error_shift;
/*
* Finally, make sure that after the rounding
* xtime_nsec isn't larger than NSEC_PER_SEC
*/
- accumulate_nsecs_to_secs(tk);
+ accumulate_nsecs_to_secs(&timekeeper);
- timekeeping_update(tk, false);
+ timekeeping_update(&timekeeper, false);
out:
- write_sequnlock_irqrestore(&tk->lock, flags);
+ write_sequnlock_irqrestore(&timekeeper.lock, flags);
}
@@ -1192,18 +1166,18 @@ static void update_wall_time(void)
*/
void getboottime(struct timespec *ts)
{
- struct timekeeper *tk = &timekeeper;
struct timespec boottime = {
- .tv_sec = tk->wall_to_monotonic.tv_sec +
- tk->total_sleep_time.tv_sec,
- .tv_nsec = tk->wall_to_monotonic.tv_nsec +
- tk->total_sleep_time.tv_nsec
+ .tv_sec = timekeeper.wall_to_monotonic.tv_sec +
+ timekeeper.total_sleep_time.tv_sec,
+ .tv_nsec = timekeeper.wall_to_monotonic.tv_nsec +
+ timekeeper.total_sleep_time.tv_nsec
};
set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
}
EXPORT_SYMBOL_GPL(getboottime);
+
/**
* get_monotonic_boottime - Returns monotonic time since boot
* @ts: pointer to the timespec to be set
@@ -1215,20 +1189,19 @@ EXPORT_SYMBOL_GPL(getboottime);
*/
void get_monotonic_boottime(struct timespec *ts)
{
- struct timekeeper *tk = &timekeeper;
struct timespec tomono, sleep;
unsigned int seq;
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&tk->lock);
- ts->tv_sec = tk->xtime_sec;
- ts->tv_nsec = timekeeping_get_ns(tk);
- tomono = tk->wall_to_monotonic;
- sleep = tk->total_sleep_time;
+ seq = read_seqbegin(&timekeeper.lock);
+ ts->tv_sec = timekeeper.xtime_sec;
+ ts->tv_nsec = timekeeping_get_ns(&timekeeper);
+ tomono = timekeeper.wall_to_monotonic;
+ sleep = timekeeper.total_sleep_time;
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqretry(&timekeeper.lock, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec);
@@ -1258,38 +1231,31 @@ EXPORT_SYMBOL_GPL(ktime_get_boottime);
*/
void monotonic_to_bootbased(struct timespec *ts)
{
- struct timekeeper *tk = &timekeeper;
-
- *ts = timespec_add(*ts, tk->total_sleep_time);
+ *ts = timespec_add(*ts, timekeeper.total_sleep_time);
}
EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
unsigned long get_seconds(void)
{
- struct timekeeper *tk = &timekeeper;
-
- return tk->xtime_sec;
+ return timekeeper.xtime_sec;
}
EXPORT_SYMBOL(get_seconds);
struct timespec __current_kernel_time(void)
{
- struct timekeeper *tk = &timekeeper;
-
- return tk_xtime(tk);
+ return tk_xtime(&timekeeper);
}
struct timespec current_kernel_time(void)
{
- struct timekeeper *tk = &timekeeper;
struct timespec now;
unsigned long seq;
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqbegin(&timekeeper.lock);
- now = tk_xtime(tk);
- } while (read_seqretry(&tk->lock, seq));
+ now = tk_xtime(&timekeeper);
+ } while (read_seqretry(&timekeeper.lock, seq));
return now;
}
@@ -1297,16 +1263,15 @@ EXPORT_SYMBOL(current_kernel_time);
struct timespec get_monotonic_coarse(void)
{
- struct timekeeper *tk = &timekeeper;
struct timespec now, mono;
unsigned long seq;
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqbegin(&timekeeper.lock);
- now = tk_xtime(tk);
- mono = tk->wall_to_monotonic;
- } while (read_seqretry(&tk->lock, seq));
+ now = tk_xtime(&timekeeper);
+ mono = timekeeper.wall_to_monotonic;
+ } while (read_seqretry(&timekeeper.lock, seq));
set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
now.tv_nsec + mono.tv_nsec);
@@ -1335,15 +1300,14 @@ void do_timer(unsigned long ticks)
void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
struct timespec *wtom, struct timespec *sleep)
{
- struct timekeeper *tk = &timekeeper;
unsigned long seq;
do {
- seq = read_seqbegin(&tk->lock);
- *xtim = tk_xtime(tk);
- *wtom = tk->wall_to_monotonic;
- *sleep = tk->total_sleep_time;
- } while (read_seqretry(&tk->lock, seq));
+ seq = read_seqbegin(&timekeeper.lock);
+ *xtim = tk_xtime(&timekeeper);
+ *wtom = timekeeper.wall_to_monotonic;
+ *sleep = timekeeper.total_sleep_time;
+ } while (read_seqretry(&timekeeper.lock, seq));
}
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -1357,20 +1321,19 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
*/
ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
{
- struct timekeeper *tk = &timekeeper;
ktime_t now;
unsigned int seq;
u64 secs, nsecs;
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqbegin(&timekeeper.lock);
- secs = tk->xtime_sec;
- nsecs = timekeeping_get_ns(tk);
+ secs = timekeeper.xtime_sec;
+ nsecs = timekeeping_get_ns(&timekeeper);
- *offs_real = tk->offs_real;
- *offs_boot = tk->offs_boot;
- } while (read_seqretry(&tk->lock, seq));
+ *offs_real = timekeeper.offs_real;
+ *offs_boot = timekeeper.offs_boot;
+ } while (read_seqretry(&timekeeper.lock, seq));
now = ktime_add_ns(ktime_set(secs, 0), nsecs);
now = ktime_sub(now, *offs_real);
@@ -1383,19 +1346,19 @@ ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
*/
ktime_t ktime_get_monotonic_offset(void)
{
- struct timekeeper *tk = &timekeeper;
unsigned long seq;
struct timespec wtom;
do {
- seq = read_seqbegin(&tk->lock);
- wtom = tk->wall_to_monotonic;
- } while (read_seqretry(&tk->lock, seq));
+ seq = read_seqbegin(&timekeeper.lock);
+ wtom = timekeeper.wall_to_monotonic;
+ } while (read_seqretry(&timekeeper.lock, seq));
return timespec_to_ktime(wtom);
}
EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
+
/**
* xtime_update() - advances the timekeeping infrastructure
* @ticks: number of ticks, that have elapsed since the last call.
diff --git a/trunk/kernel/trace/trace_event_perf.c b/trunk/kernel/trace/trace_event_perf.c
index 8a6d2ee2086c..fee3752ae8f6 100644
--- a/trunk/kernel/trace/trace_event_perf.c
+++ b/trunk/kernel/trace/trace_event_perf.c
@@ -281,7 +281,7 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip)
head = this_cpu_ptr(event_function.perf_events);
perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
- 1, ®s, head, NULL);
+ 1, ®s, head);
#undef ENTRY_SIZE
}
diff --git a/trunk/kernel/trace/trace_kprobe.c b/trunk/kernel/trace/trace_kprobe.c
index 1a2117043bb1..b31d3d5699fe 100644
--- a/trunk/kernel/trace/trace_kprobe.c
+++ b/trunk/kernel/trace/trace_kprobe.c
@@ -1002,8 +1002,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
head = this_cpu_ptr(call->perf_events);
- perf_trace_buf_submit(entry, size, rctx,
- entry->ip, 1, regs, head, NULL);
+ perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head);
}
/* Kretprobe profile handler */
@@ -1034,8 +1033,7 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
head = this_cpu_ptr(call->perf_events);
- perf_trace_buf_submit(entry, size, rctx,
- entry->ret_ip, 1, regs, head, NULL);
+ perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head);
}
#endif /* CONFIG_PERF_EVENTS */
diff --git a/trunk/kernel/trace/trace_syscalls.c b/trunk/kernel/trace/trace_syscalls.c
index 60e4d7875672..96fc73369099 100644
--- a/trunk/kernel/trace/trace_syscalls.c
+++ b/trunk/kernel/trace/trace_syscalls.c
@@ -532,7 +532,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
(unsigned long *)&rec->args);
head = this_cpu_ptr(sys_data->enter_event->perf_events);
- perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
+ perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
}
int perf_sysenter_enable(struct ftrace_event_call *call)
@@ -608,7 +608,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
rec->ret = syscall_get_return_value(current, regs);
head = this_cpu_ptr(sys_data->exit_event->perf_events);
- perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
+ perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
}
int perf_sysexit_enable(struct ftrace_event_call *call)
diff --git a/trunk/kernel/trace/trace_uprobe.c b/trunk/kernel/trace/trace_uprobe.c
index 03003cd7dd96..2b36ac68549e 100644
--- a/trunk/kernel/trace/trace_uprobe.c
+++ b/trunk/kernel/trace/trace_uprobe.c
@@ -670,7 +670,7 @@ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
head = this_cpu_ptr(call->perf_events);
- perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head, NULL);
+ perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head);
out:
preempt_enable();
diff --git a/trunk/mm/backing-dev.c b/trunk/mm/backing-dev.c
index b41823cc05e6..6b4718e2ee34 100644
--- a/trunk/mm/backing-dev.c
+++ b/trunk/mm/backing-dev.c
@@ -39,6 +39,12 @@ DEFINE_SPINLOCK(bdi_lock);
LIST_HEAD(bdi_list);
LIST_HEAD(bdi_pending_list);
+static struct task_struct *sync_supers_tsk;
+static struct timer_list sync_supers_timer;
+
+static int bdi_sync_supers(void *);
+static void sync_supers_timer_fn(unsigned long);
+
void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
{
if (wb1 < wb2) {
@@ -244,6 +250,12 @@ static int __init default_bdi_init(void)
{
int err;
+ sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
+ BUG_ON(IS_ERR(sync_supers_tsk));
+
+ setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
+ bdi_arm_supers_timer();
+
err = bdi_init(&default_backing_dev_info);
if (!err)
bdi_register(&default_backing_dev_info, NULL, "default");
@@ -258,6 +270,46 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi)
return wb_has_dirty_io(&bdi->wb);
}
+/*
+ * kupdated() used to do this. We cannot do it from the bdi_forker_thread()
+ * or we risk deadlocking on ->s_umount. The longer term solution would be
+ * to implement sync_supers_bdi() or similar and simply do it from the
+ * bdi writeback thread individually.
+ */
+static int bdi_sync_supers(void *unused)
+{
+ set_user_nice(current, 0);
+
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+
+ /*
+ * Do this periodically, like kupdated() did before.
+ */
+ sync_supers();
+ }
+
+ return 0;
+}
+
+void bdi_arm_supers_timer(void)
+{
+ unsigned long next;
+
+ if (!dirty_writeback_interval)
+ return;
+
+ next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
+ mod_timer(&sync_supers_timer, round_jiffies_up(next));
+}
+
+static void sync_supers_timer_fn(unsigned long unused)
+{
+ wake_up_process(sync_supers_tsk);
+ bdi_arm_supers_timer();
+}
+
static void wakeup_timer_fn(unsigned long data)
{
struct backing_dev_info *bdi = (struct backing_dev_info *)data;
diff --git a/trunk/mm/page-writeback.c b/trunk/mm/page-writeback.c
index 5ad5ce23c1e0..e5363f34e025 100644
--- a/trunk/mm/page-writeback.c
+++ b/trunk/mm/page-writeback.c
@@ -1532,6 +1532,7 @@ int dirty_writeback_centisecs_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
proc_dointvec(table, write, buffer, length, ppos);
+ bdi_arm_supers_timer();
return 0;
}
diff --git a/trunk/net/batman-adv/gateway_client.c b/trunk/net/batman-adv/gateway_client.c
index fc866f2e4528..b421cc49d2cd 100644
--- a/trunk/net/batman-adv/gateway_client.c
+++ b/trunk/net/batman-adv/gateway_client.c
@@ -200,11 +200,11 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
goto out;
- curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
-
- if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw)
+ if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect))
goto out;
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+
next_gw = batadv_gw_get_best_gw_node(bat_priv);
if (curr_gw == next_gw)
diff --git a/trunk/net/core/dev.c b/trunk/net/core/dev.c
index f91abf800161..0cb3fe8d8e72 100644
--- a/trunk/net/core/dev.c
+++ b/trunk/net/core/dev.c
@@ -2134,9 +2134,6 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
__be16 protocol = skb->protocol;
netdev_features_t features = skb->dev->features;
- if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
- features &= ~NETIF_F_GSO_MASK;
-
if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
@@ -5989,7 +5986,6 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev_net_set(dev, &init_net);
dev->gso_max_size = GSO_MAX_SIZE;
- dev->gso_max_segs = GSO_MAX_SEGS;
INIT_LIST_HEAD(&dev->napi_list);
INIT_LIST_HEAD(&dev->unreg_list);
diff --git a/trunk/net/core/sock.c b/trunk/net/core/sock.c
index 8f67ced8d6a8..6b654b3ddfda 100644
--- a/trunk/net/core/sock.c
+++ b/trunk/net/core/sock.c
@@ -1458,7 +1458,6 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
} else {
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
sk->sk_gso_max_size = dst->dev->gso_max_size;
- sk->sk_gso_max_segs = dst->dev->gso_max_segs;
}
}
}
diff --git a/trunk/net/ipv4/ip_output.c b/trunk/net/ipv4/ip_output.c
index 76dde25fb9a0..ba39a52d18c1 100644
--- a/trunk/net/ipv4/ip_output.c
+++ b/trunk/net/ipv4/ip_output.c
@@ -197,7 +197,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
if (unlikely(!neigh))
neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
- if (!IS_ERR(neigh)) {
+ if (neigh) {
int res = dst_neigh_output(dst, neigh, skb);
rcu_read_unlock_bh();
diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c
index e4ba974f143c..c035251beb07 100644
--- a/trunk/net/ipv4/route.c
+++ b/trunk/net/ipv4/route.c
@@ -70,6 +70,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -79,6 +80,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -86,9 +88,11 @@
#include
#include
#include
+#include
#include
#include
#include
+#include
#include
#include
#include
diff --git a/trunk/net/ipv4/tcp.c b/trunk/net/ipv4/tcp.c
index 2109ff4a1daf..e7e6eeae49c0 100644
--- a/trunk/net/ipv4/tcp.c
+++ b/trunk/net/ipv4/tcp.c
@@ -811,9 +811,7 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
old_size_goal + mss_now > xmit_size_goal)) {
xmit_size_goal = old_size_goal;
} else {
- tp->xmit_size_goal_segs =
- min_t(u16, xmit_size_goal / mss_now,
- sk->sk_gso_max_segs);
+ tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
}
}
diff --git a/trunk/net/ipv4/tcp_cong.c b/trunk/net/ipv4/tcp_cong.c
index 1432cdb0644c..4d4db16e336e 100644
--- a/trunk/net/ipv4/tcp_cong.c
+++ b/trunk/net/ipv4/tcp_cong.c
@@ -291,8 +291,7 @@ bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
left = tp->snd_cwnd - in_flight;
if (sk_can_gso(sk) &&
left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
- left * tp->mss_cache < sk->sk_gso_max_size &&
- left < sk->sk_gso_max_segs)
+ left * tp->mss_cache < sk->sk_gso_max_size)
return true;
return left <= tcp_max_tso_deferred_mss(tp);
}
diff --git a/trunk/net/ipv4/tcp_input.c b/trunk/net/ipv4/tcp_input.c
index 85308b90df80..2fd2bc9e3c64 100644
--- a/trunk/net/ipv4/tcp_input.c
+++ b/trunk/net/ipv4/tcp_input.c
@@ -5392,8 +5392,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
{
struct tcp_sock *tp = tcp_sk(sk);
- if (unlikely(sk->sk_rx_dst == NULL))
- inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
/*
* Header prediction.
* The code loosely follows the one in the famous
@@ -5607,7 +5605,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
tcp_set_state(sk, TCP_ESTABLISHED);
if (skb != NULL) {
- icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
+ inet_sk_rx_dst_set(sk, skb);
security_inet_conn_established(sk, skb);
}
diff --git a/trunk/net/ipv4/tcp_ipv4.c b/trunk/net/ipv4/tcp_ipv4.c
index 272241f16fcb..42b2a6a73092 100644
--- a/trunk/net/ipv4/tcp_ipv4.c
+++ b/trunk/net/ipv4/tcp_ipv4.c
@@ -1627,6 +1627,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
sk->sk_rx_dst = NULL;
}
}
+ if (unlikely(sk->sk_rx_dst == NULL))
+ inet_sk_rx_dst_set(sk, skb);
+
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk;
goto reset;
@@ -1869,20 +1872,10 @@ static struct timewait_sock_ops tcp_timewait_sock_ops = {
.twsk_destructor= tcp_twsk_destructor,
};
-static void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
-{
- struct dst_entry *dst = skb_dst(skb);
-
- dst_hold(dst);
- sk->sk_rx_dst = dst;
- inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
-}
-
const struct inet_connection_sock_af_ops ipv4_specific = {
.queue_xmit = ip_queue_xmit,
.send_check = tcp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
- .sk_rx_dst_set = inet_sk_rx_dst_set,
.conn_request = tcp_v4_conn_request,
.syn_recv_sock = tcp_v4_syn_recv_sock,
.net_header_len = sizeof(struct iphdr),
diff --git a/trunk/net/ipv4/tcp_minisocks.c b/trunk/net/ipv4/tcp_minisocks.c
index d9c9dcef2de3..232a90c3ec86 100644
--- a/trunk/net/ipv4/tcp_minisocks.c
+++ b/trunk/net/ipv4/tcp_minisocks.c
@@ -387,7 +387,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
struct tcp_sock *oldtp = tcp_sk(sk);
struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
- newicsk->icsk_af_ops->sk_rx_dst_set(newsk, skb);
+ inet_sk_rx_dst_set(newsk, skb);
/* TCP Cookie Transactions require space for the cookie pair,
* as it differs for each connection. There is no need to
diff --git a/trunk/net/ipv4/tcp_output.c b/trunk/net/ipv4/tcp_output.c
index 20dfd892c86f..3f1bcff0b10b 100644
--- a/trunk/net/ipv4/tcp_output.c
+++ b/trunk/net/ipv4/tcp_output.c
@@ -940,7 +940,7 @@ void __init tcp_tasklet_init(void)
* We cant xmit new skbs from this context, as we might already
* hold qdisc lock.
*/
-static void tcp_wfree(struct sk_buff *skb)
+void tcp_wfree(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct tcp_sock *tp = tcp_sk(sk);
@@ -1522,21 +1522,21 @@ static void tcp_cwnd_validate(struct sock *sk)
* when we would be allowed to send the split-due-to-Nagle skb fully.
*/
static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
- unsigned int mss_now, unsigned int max_segs)
+ unsigned int mss_now, unsigned int cwnd)
{
const struct tcp_sock *tp = tcp_sk(sk);
- u32 needed, window, max_len;
+ u32 needed, window, cwnd_len;
window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
- max_len = mss_now * max_segs;
+ cwnd_len = mss_now * cwnd;
- if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
- return max_len;
+ if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk)))
+ return cwnd_len;
needed = min(skb->len, window);
- if (max_len <= needed)
- return max_len;
+ if (cwnd_len <= needed)
+ return cwnd_len;
return needed - needed % mss_now;
}
@@ -1765,8 +1765,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
limit = min(send_win, cong_win);
/* If a full-sized TSO skb can be sent, do it. */
- if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
- sk->sk_gso_max_segs * tp->mss_cache))
+ if (limit >= sk->sk_gso_max_size)
goto send_now;
/* Middle in queue won't get any more data, full sendable already? */
@@ -2000,9 +1999,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
limit = mss_now;
if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
- min_t(unsigned int,
- cwnd_quota,
- sk->sk_gso_max_segs));
+ cwnd_quota);
if (skb->len > limit &&
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
diff --git a/trunk/net/ipv6/tcp_ipv6.c b/trunk/net/ipv6/tcp_ipv6.c
index 5a439e9a4c01..c66b90f71c9b 100644
--- a/trunk/net/ipv6/tcp_ipv6.c
+++ b/trunk/net/ipv6/tcp_ipv6.c
@@ -1447,17 +1447,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
- struct dst_entry *dst = sk->sk_rx_dst;
-
sock_rps_save_rxhash(sk, skb);
- if (dst) {
- if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
- dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
- dst_release(dst);
- sk->sk_rx_dst = NULL;
- }
- }
-
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
if (opt_skb)
@@ -1715,9 +1705,9 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
struct dst_entry *dst = sk->sk_rx_dst;
struct inet_sock *icsk = inet_sk(sk);
if (dst)
- dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
+ dst = dst_check(dst, 0);
if (dst &&
- icsk->rx_dst_ifindex == skb->skb_iif)
+ icsk->rx_dst_ifindex == inet6_iif(skb))
skb_dst_set_noref(skb, dst);
}
}
@@ -1729,23 +1719,10 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = {
.twsk_destructor= tcp_twsk_destructor,
};
-static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
-{
- struct dst_entry *dst = skb_dst(skb);
- const struct rt6_info *rt = (const struct rt6_info *)dst;
-
- dst_hold(dst);
- sk->sk_rx_dst = dst;
- inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
- if (rt->rt6i_node)
- inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
-}
-
static const struct inet_connection_sock_af_ops ipv6_specific = {
.queue_xmit = inet6_csk_xmit,
.send_check = tcp_v6_send_check,
.rebuild_header = inet6_sk_rebuild_header,
- .sk_rx_dst_set = inet6_sk_rx_dst_set,
.conn_request = tcp_v6_conn_request,
.syn_recv_sock = tcp_v6_syn_recv_sock,
.net_header_len = sizeof(struct ipv6hdr),
diff --git a/trunk/net/llc/llc_station.c b/trunk/net/llc/llc_station.c
index 6828e39ec2ec..39a8d8924b9c 100644
--- a/trunk/net/llc/llc_station.c
+++ b/trunk/net/llc/llc_station.c
@@ -268,7 +268,7 @@ static int llc_station_ac_send_null_dsap_xid_c(struct sk_buff *skb)
out:
return rc;
free:
- kfree_skb(nskb);
+ kfree_skb(skb);
goto out;
}
@@ -293,7 +293,7 @@ static int llc_station_ac_send_xid_r(struct sk_buff *skb)
out:
return rc;
free:
- kfree_skb(nskb);
+ kfree_skb(skb);
goto out;
}
@@ -322,7 +322,7 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
out:
return rc;
free:
- kfree_skb(nskb);
+ kfree_skb(skb);
goto out;
}
diff --git a/trunk/net/mac80211/mesh.c b/trunk/net/mac80211/mesh.c
index 85572353a7e3..6fac18c0423f 100644
--- a/trunk/net/mac80211/mesh.c
+++ b/trunk/net/mac80211/mesh.c
@@ -622,7 +622,6 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
del_timer_sync(&sdata->u.mesh.housekeeping_timer);
del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
- del_timer_sync(&sdata->u.mesh.mesh_path_timer);
/*
* If the timer fired while we waited for it, it will have
* requeued the work. Now the work will be running again
@@ -635,8 +634,6 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
local->fif_other_bss--;
atomic_dec(&local->iff_allmultis);
ieee80211_configure_filter(local);
-
- sdata->u.mesh.timers_running = 0;
}
static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
diff --git a/trunk/net/mac80211/mlme.c b/trunk/net/mac80211/mlme.c
index a4a5acdbaa4d..cef0c9e79aba 100644
--- a/trunk/net/mac80211/mlme.c
+++ b/trunk/net/mac80211/mlme.c
@@ -1430,8 +1430,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
del_timer_sync(&sdata->u.mgd.timer);
del_timer_sync(&sdata->u.mgd.chswitch_timer);
-
- sdata->u.mgd.timers_running = 0;
}
void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
diff --git a/trunk/net/mac80211/scan.c b/trunk/net/mac80211/scan.c
index 839dd9737989..bcaee5d12839 100644
--- a/trunk/net/mac80211/scan.c
+++ b/trunk/net/mac80211/scan.c
@@ -299,7 +299,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
if (local->scan_req != local->int_scan_req)
cfg80211_scan_done(local->scan_req, aborted);
local->scan_req = NULL;
- rcu_assign_pointer(local->scan_sdata, NULL);
+ local->scan_sdata = NULL;
local->scanning = 0;
local->scan_channel = NULL;
@@ -984,6 +984,7 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
kfree(local->sched_scan_ies.ie[i]);
drv_sched_scan_stop(local, sdata);
+ rcu_assign_pointer(local->sched_scan_sdata, NULL);
}
out:
mutex_unlock(&local->mtx);
diff --git a/trunk/net/sched/act_gact.c b/trunk/net/sched/act_gact.c
index 05d60859d8e3..f10fb8256442 100644
--- a/trunk/net/sched/act_gact.c
+++ b/trunk/net/sched/act_gact.c
@@ -67,9 +67,6 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
struct tcf_common *pc;
int ret = 0;
int err;
-#ifdef CONFIG_GACT_PROB
- struct tc_gact_p *p_parm = NULL;
-#endif
if (nla == NULL)
return -EINVAL;
@@ -85,12 +82,6 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
#ifndef CONFIG_GACT_PROB
if (tb[TCA_GACT_PROB] != NULL)
return -EOPNOTSUPP;
-#else
- if (tb[TCA_GACT_PROB]) {
- p_parm = nla_data(tb[TCA_GACT_PROB]);
- if (p_parm->ptype >= MAX_RAND)
- return -EINVAL;
- }
#endif
pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info);
@@ -112,7 +103,8 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
spin_lock_bh(&gact->tcf_lock);
gact->tcf_action = parm->action;
#ifdef CONFIG_GACT_PROB
- if (p_parm) {
+ if (tb[TCA_GACT_PROB] != NULL) {
+ struct tc_gact_p *p_parm = nla_data(tb[TCA_GACT_PROB]);
gact->tcfg_paction = p_parm->paction;
gact->tcfg_pval = p_parm->pval;
gact->tcfg_ptype = p_parm->ptype;
@@ -141,7 +133,7 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
spin_lock(&gact->tcf_lock);
#ifdef CONFIG_GACT_PROB
- if (gact->tcfg_ptype)
+ if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL)
action = gact_rand[gact->tcfg_ptype](gact);
else
action = gact->tcf_action;
diff --git a/trunk/net/sched/act_ipt.c b/trunk/net/sched/act_ipt.c
index 58fb3c7aab9e..60e281ad0f07 100644
--- a/trunk/net/sched/act_ipt.c
+++ b/trunk/net/sched/act_ipt.c
@@ -185,12 +185,7 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est,
err2:
kfree(tname);
err1:
- if (ret == ACT_P_CREATED) {
- if (est)
- gen_kill_estimator(&pc->tcfc_bstats,
- &pc->tcfc_rate_est);
- kfree_rcu(pc, tcfc_rcu);
- }
+ kfree(pc);
return err;
}
diff --git a/trunk/net/sched/act_pedit.c b/trunk/net/sched/act_pedit.c
index 45c53ab067a6..26aa2f6ce257 100644
--- a/trunk/net/sched/act_pedit.c
+++ b/trunk/net/sched/act_pedit.c
@@ -74,10 +74,7 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est,
p = to_pedit(pc);
keys = kmalloc(ksize, GFP_KERNEL);
if (keys == NULL) {
- if (est)
- gen_kill_estimator(&pc->tcfc_bstats,
- &pc->tcfc_rate_est);
- kfree_rcu(pc, tcfc_rcu);
+ kfree(pc);
return -ENOMEM;
}
ret = ACT_P_CREATED;
diff --git a/trunk/net/sched/act_simple.c b/trunk/net/sched/act_simple.c
index 3714f60f0b3c..3922f2a2821b 100644
--- a/trunk/net/sched/act_simple.c
+++ b/trunk/net/sched/act_simple.c
@@ -131,10 +131,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
d = to_defact(pc);
ret = alloc_defdata(d, defdata);
if (ret < 0) {
- if (est)
- gen_kill_estimator(&pc->tcfc_bstats,
- &pc->tcfc_rate_est);
- kfree_rcu(pc, tcfc_rcu);
+ kfree(pc);
return ret;
}
d->tcf_action = parm->action;
diff --git a/trunk/net/wireless/reg.c b/trunk/net/wireless/reg.c
index 2ded3c7fad06..2303ee73b50a 100644
--- a/trunk/net/wireless/reg.c
+++ b/trunk/net/wireless/reg.c
@@ -680,8 +680,6 @@ static u32 map_regdom_flags(u32 rd_flags)
channel_flags |= IEEE80211_CHAN_NO_IBSS;
if (rd_flags & NL80211_RRF_DFS)
channel_flags |= IEEE80211_CHAN_RADAR;
- if (rd_flags & NL80211_RRF_NO_OFDM)
- channel_flags |= IEEE80211_CHAN_NO_OFDM;
return channel_flags;
}
@@ -903,21 +901,7 @@ static void handle_channel(struct wiphy *wiphy,
chan->max_antenna_gain = min(chan->orig_mag,
(int) MBI_TO_DBI(power_rule->max_antenna_gain));
chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp);
- if (chan->orig_mpwr) {
- /*
- * Devices that have their own custom regulatory domain
- * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the
- * passed country IE power settings.
- */
- if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
- wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
- wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY)
- chan->max_power = chan->max_reg_power;
- else
- chan->max_power = min(chan->orig_mpwr,
- chan->max_reg_power);
- } else
- chan->max_power = chan->max_reg_power;
+ chan->max_power = min(chan->max_power, chan->max_reg_power);
}
static void handle_band(struct wiphy *wiphy,
@@ -1901,7 +1885,6 @@ static void restore_custom_reg_settings(struct wiphy *wiphy)
chan->flags = chan->orig_flags;
chan->max_antenna_gain = chan->orig_mag;
chan->max_power = chan->orig_mpwr;
- chan->beacon_found = false;
}
}
}
diff --git a/trunk/net/xfrm/xfrm_state.c b/trunk/net/xfrm/xfrm_state.c
index 87cd0e4d4282..5b228f97d4b3 100644
--- a/trunk/net/xfrm/xfrm_state.c
+++ b/trunk/net/xfrm/xfrm_state.c
@@ -415,17 +415,8 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me)
if (x->lft.hard_add_expires_seconds) {
long tmo = x->lft.hard_add_expires_seconds +
x->curlft.add_time - now;
- if (tmo <= 0) {
- if (x->xflags & XFRM_SOFT_EXPIRE) {
- /* enter hard expire without soft expire first?!
- * setting a new date could trigger this.
- * workarbound: fix x->curflt.add_time by below:
- */
- x->curlft.add_time = now - x->saved_tmo - 1;
- tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
- } else
- goto expired;
- }
+ if (tmo <= 0)
+ goto expired;
if (tmo < next)
next = tmo;
}
@@ -442,14 +433,10 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me)
if (x->lft.soft_add_expires_seconds) {
long tmo = x->lft.soft_add_expires_seconds +
x->curlft.add_time - now;
- if (tmo <= 0) {
+ if (tmo <= 0)
warn = 1;
- x->xflags &= ~XFRM_SOFT_EXPIRE;
- } else if (tmo < next) {
+ else if (tmo < next)
next = tmo;
- x->xflags |= XFRM_SOFT_EXPIRE;
- x->saved_tmo = tmo;
- }
}
if (x->lft.soft_use_expires_seconds) {
long tmo = x->lft.soft_use_expires_seconds +
diff --git a/trunk/sound/core/sgbuf.c b/trunk/sound/core/sgbuf.c
index d0f00356fc11..4e7ec2b49873 100644
--- a/trunk/sound/core/sgbuf.c
+++ b/trunk/sound/core/sgbuf.c
@@ -101,7 +101,7 @@ void *snd_malloc_sgbuf_pages(struct device *device,
if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device,
chunk, &tmpb) < 0) {
if (!sgbuf->pages)
- goto _failed;
+ return NULL;
if (!res_size)
goto _failed;
size = sgbuf->pages * PAGE_SIZE;
diff --git a/trunk/sound/pci/emu10k1/memory.c b/trunk/sound/pci/emu10k1/memory.c
index 0a436626182b..4f502a2bdc3c 100644
--- a/trunk/sound/pci/emu10k1/memory.c
+++ b/trunk/sound/pci/emu10k1/memory.c
@@ -326,10 +326,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
unsigned long ofs = idx << PAGE_SHIFT;
dma_addr_t addr;
- if (ofs >= runtime->dma_bytes)
- addr = emu->silent_page.addr;
- else
- addr = snd_pcm_sgbuf_get_addr(substream, ofs);
+ addr = snd_pcm_sgbuf_get_addr(substream, ofs);
if (! is_valid_page(emu, addr)) {
printk(KERN_ERR "emu: failure page = %d\n", idx);
mutex_unlock(&hdr->block_mutex);
diff --git a/trunk/sound/pci/hda/hda_auto_parser.c b/trunk/sound/pci/hda/hda_auto_parser.c
index 4f7d2dfcef7b..647218d69f68 100644
--- a/trunk/sound/pci/hda/hda_auto_parser.c
+++ b/trunk/sound/pci/hda/hda_auto_parser.c
@@ -332,12 +332,13 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
if (cfg->dig_outs)
snd_printd(" dig-out=0x%x/0x%x\n",
cfg->dig_out_pins[0], cfg->dig_out_pins[1]);
- snd_printd(" inputs:\n");
+ snd_printd(" inputs:");
for (i = 0; i < cfg->num_inputs; i++) {
- snd_printd(" %s=0x%x\n",
+ snd_printd(" %s=0x%x",
hda_get_autocfg_input_label(codec, cfg, i),
cfg->inputs[i].pin);
}
+ snd_printd("\n");
if (cfg->dig_in_pin)
snd_printd(" dig-in=0x%x\n", cfg->dig_in_pin);
diff --git a/trunk/sound/pci/hda/patch_conexant.c b/trunk/sound/pci/hda/patch_conexant.c
index 5e22a8f43d2e..14361184ae1e 100644
--- a/trunk/sound/pci/hda/patch_conexant.c
+++ b/trunk/sound/pci/hda/patch_conexant.c
@@ -2967,10 +2967,12 @@ static const char * const cxt5066_models[CXT5066_MODELS] = {
};
static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT5066_AUTO),
SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
+ SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
@@ -2986,10 +2988,14 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
+ SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T510", CXT5066_AUTO),
+ SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO),
SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
+ SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO),
+ SND_PCI_QUIRK(0x1b0a, 0x2092, "CyberpowerPC Gamer Xplorer N57001", CXT5066_AUTO),
{}
};
diff --git a/trunk/sound/pci/hda/patch_hdmi.c b/trunk/sound/pci/hda/patch_hdmi.c
index 8f23374fa642..69b928449789 100644
--- a/trunk/sound/pci/hda/patch_hdmi.c
+++ b/trunk/sound/pci/hda/patch_hdmi.c
@@ -877,6 +877,8 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
struct hdmi_eld *eld;
struct hdmi_spec_per_cvt *per_cvt = NULL;
+ hinfo->nid = 0; /* clear the leftover value */
+
/* Validate hinfo */
pin_idx = hinfo_to_pin_index(spec, hinfo);
if (snd_BUG_ON(pin_idx < 0))
@@ -1161,14 +1163,6 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
}
-static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- struct snd_pcm_substream *substream)
-{
- snd_hda_codec_cleanup_stream(codec, hinfo->nid);
- return 0;
-}
-
static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
@@ -1208,7 +1202,6 @@ static const struct hda_pcm_ops generic_ops = {
.open = hdmi_pcm_open,
.close = hdmi_pcm_close,
.prepare = generic_hdmi_playback_pcm_prepare,
- .cleanup = generic_hdmi_playback_pcm_cleanup,
};
static int generic_hdmi_build_pcms(struct hda_codec *codec)
@@ -1227,6 +1220,7 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
pstr = &info->stream[SNDRV_PCM_STREAM_PLAYBACK];
pstr->substreams = 1;
pstr->ops = generic_ops;
+ pstr->nid = 1; /* FIXME: just for avoiding a debug WARNING */
/* other pstr fields are set in open */
}
diff --git a/trunk/sound/pci/hda/patch_realtek.c b/trunk/sound/pci/hda/patch_realtek.c
index 4f81dd44c837..344b221d2102 100644
--- a/trunk/sound/pci/hda/patch_realtek.c
+++ b/trunk/sound/pci/hda/patch_realtek.c
@@ -6099,8 +6099,6 @@ static const struct alc_fixup alc269_fixups[] = {
[ALC269_FIXUP_PCM_44K] = {
.type = ALC_FIXUP_FUNC,
.v.func = alc269_fixup_pcm_44k,
- .chained = true,
- .chain_id = ALC269_FIXUP_QUANTA_MUTE
},
[ALC269_FIXUP_STEREO_DMIC] = {
.type = ALC_FIXUP_FUNC,
@@ -6208,11 +6206,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
- SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
#if 0
diff --git a/trunk/sound/soc/codecs/ab8500-codec.c b/trunk/sound/soc/codecs/ab8500-codec.c
index 23b40186f9b8..3c795921c5f6 100644
--- a/trunk/sound/soc/codecs/ab8500-codec.c
+++ b/trunk/sound/soc/codecs/ab8500-codec.c
@@ -2406,10 +2406,6 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
/* Setup AB8500 according to board-settings */
pdata = (struct ab8500_platform_data *)dev_get_platdata(dev->parent);
-
- /* Inform SoC Core that we have our own I/O arrangements. */
- codec->control_data = (void *)true;
-
status = ab8500_audio_setup_mics(codec, &pdata->codec->amics);
if (status < 0) {
pr_err("%s: Failed to setup mics (%d)!\n", __func__, status);
diff --git a/trunk/sound/soc/codecs/ad1980.c b/trunk/sound/soc/codecs/ad1980.c
index 11b1b714b8b5..8c39dddd7d00 100644
--- a/trunk/sound/soc/codecs/ad1980.c
+++ b/trunk/sound/soc/codecs/ad1980.c
@@ -186,7 +186,6 @@ static int ad1980_soc_probe(struct snd_soc_codec *codec)
printk(KERN_INFO "AD1980 SoC Audio Codec\n");
- codec->control_data = codec; /* we don't use regmap! */
ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
if (ret < 0) {
printk(KERN_ERR "ad1980: failed to register AC97 codec\n");
diff --git a/trunk/sound/soc/codecs/mc13783.c b/trunk/sound/soc/codecs/mc13783.c
index 8f726c063f42..6276e352125f 100644
--- a/trunk/sound/soc/codecs/mc13783.c
+++ b/trunk/sound/soc/codecs/mc13783.c
@@ -581,8 +581,6 @@ static int mc13783_probe(struct snd_soc_codec *codec)
{
struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec);
- codec->control_data = priv->mc13xxx;
-
mc13xxx_lock(priv->mc13xxx);
/* these are the reset values */
diff --git a/trunk/sound/soc/codecs/sgtl5000.c b/trunk/sound/soc/codecs/sgtl5000.c
index df2f99d1d428..8af6a5245b18 100644
--- a/trunk/sound/soc/codecs/sgtl5000.c
+++ b/trunk/sound/soc/codecs/sgtl5000.c
@@ -239,7 +239,6 @@ static const struct snd_soc_dapm_route sgtl5000_dapm_routes[] = {
{"Headphone Mux", "DAC", "DAC"}, /* dac --> hp_mux */
{"LO", NULL, "DAC"}, /* dac --> line_out */
- {"LINE_IN", NULL, "VAG_POWER"},
{"Headphone Mux", "LINE_IN", "LINE_IN"},/* line_in --> hp_mux */
{"HP", NULL, "Headphone Mux"}, /* hp_mux --> hp */
@@ -1358,6 +1357,8 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
if (ret)
goto err;
+ snd_soc_dapm_new_widgets(&codec->dapm);
+
return 0;
err:
diff --git a/trunk/sound/soc/codecs/stac9766.c b/trunk/sound/soc/codecs/stac9766.c
index 33c0f3d39c87..982e437799a8 100644
--- a/trunk/sound/soc/codecs/stac9766.c
+++ b/trunk/sound/soc/codecs/stac9766.c
@@ -340,7 +340,6 @@ static int stac9766_codec_probe(struct snd_soc_codec *codec)
printk(KERN_INFO "STAC9766 SoC Audio Codec %s\n", STAC9766_VERSION);
- codec->control_data = codec; /* we don't use regmap! */
ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
if (ret < 0)
goto codec_err;
diff --git a/trunk/sound/soc/codecs/wm8962.c b/trunk/sound/soc/codecs/wm8962.c
index aa9ce9dd7d8a..eaf65863ec21 100644
--- a/trunk/sound/soc/codecs/wm8962.c
+++ b/trunk/sound/soc/codecs/wm8962.c
@@ -2501,9 +2501,6 @@ static int wm8962_set_bias_level(struct snd_soc_codec *codec,
/* VMID 2*250k */
snd_soc_update_bits(codec, WM8962_PWR_MGMT_1,
WM8962_VMID_SEL_MASK, 0x100);
-
- if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
- msleep(100);
break;
case SND_SOC_BIAS_OFF:
diff --git a/trunk/sound/soc/codecs/wm8994.c b/trunk/sound/soc/codecs/wm8994.c
index 04ef03175c51..bb62f4b3d563 100644
--- a/trunk/sound/soc/codecs/wm8994.c
+++ b/trunk/sound/soc/codecs/wm8994.c
@@ -2649,7 +2649,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- bclk_rate = params_rate(params) * 4;
+ bclk_rate = params_rate(params) * 2;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
bclk_rate *= 16;
@@ -3253,13 +3253,10 @@ static void wm8994_mic_work(struct work_struct *work)
int ret;
int report;
- pm_runtime_get_sync(dev);
-
ret = regmap_read(regmap, WM8994_INTERRUPT_RAW_STATUS_2, ®);
if (ret < 0) {
dev_err(dev, "Failed to read microphone status: %d\n",
ret);
- pm_runtime_put(dev);
return;
}
@@ -3302,8 +3299,6 @@ static void wm8994_mic_work(struct work_struct *work)
snd_soc_jack_report(priv->micdet[1].jack, report,
SND_JACK_HEADSET | SND_JACK_BTN_0);
-
- pm_runtime_put(dev);
}
static irqreturn_t wm8994_mic_irq(int irq, void *data)
@@ -3426,15 +3421,12 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data)
int reg;
bool present;
- pm_runtime_get_sync(codec->dev);
-
mutex_lock(&wm8994->accdet_lock);
reg = snd_soc_read(codec, WM1811_JACKDET_CTRL);
if (reg < 0) {
dev_err(codec->dev, "Failed to read jack status: %d\n", reg);
mutex_unlock(&wm8994->accdet_lock);
- pm_runtime_put(codec->dev);
return IRQ_NONE;
}
@@ -3499,7 +3491,6 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data)
SND_JACK_MECHANICAL | SND_JACK_HEADSET |
wm8994->btn_mask);
- pm_runtime_put(codec->dev);
return IRQ_HANDLED;
}
@@ -3611,8 +3602,6 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
if (!(snd_soc_read(codec, WM8958_MIC_DETECT_1) & WM8958_MICD_ENA))
return IRQ_HANDLED;
- pm_runtime_get_sync(codec->dev);
-
/* We may occasionally read a detection without an impedence
* range being provided - if that happens loop again.
*/
@@ -3623,7 +3612,6 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
dev_err(codec->dev,
"Failed to read mic detect status: %d\n",
reg);
- pm_runtime_put(codec->dev);
return IRQ_NONE;
}
@@ -3651,7 +3639,6 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
dev_warn(codec->dev, "Accessory detection with no callback\n");
out:
- pm_runtime_put(codec->dev);
return IRQ_HANDLED;
}
diff --git a/trunk/sound/soc/codecs/wm9712.c b/trunk/sound/soc/codecs/wm9712.c
index f16fb361a4eb..099e6ec32125 100644
--- a/trunk/sound/soc/codecs/wm9712.c
+++ b/trunk/sound/soc/codecs/wm9712.c
@@ -619,7 +619,6 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec)
{
int ret = 0;
- codec->control_data = codec; /* we don't use regmap! */
ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
if (ret < 0) {
printk(KERN_ERR "wm9712: failed to register AC97 codec\n");
diff --git a/trunk/sound/soc/codecs/wm9713.c b/trunk/sound/soc/codecs/wm9713.c
index d0b8a3287a85..3eb19fb71d17 100644
--- a/trunk/sound/soc/codecs/wm9713.c
+++ b/trunk/sound/soc/codecs/wm9713.c
@@ -1196,7 +1196,6 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
if (wm9713 == NULL)
return -ENOMEM;
snd_soc_codec_set_drvdata(codec, wm9713);
- codec->control_data = wm9713; /* we don't use regmap! */
ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
if (ret < 0)
diff --git a/trunk/sound/soc/mxs/mxs-saif.c b/trunk/sound/soc/mxs/mxs-saif.c
index b3030718c228..aba71bfa33b1 100644
--- a/trunk/sound/soc/mxs/mxs-saif.c
+++ b/trunk/sound/soc/mxs/mxs-saif.c
@@ -394,14 +394,9 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
- struct mxs_saif *master_saif;
u32 scr, stat;
int ret;
- master_saif = mxs_saif_get_master(saif);
- if (!master_saif)
- return -EINVAL;
-
/* mclk should already be set */
if (!saif->mclk && saif->mclk_in_use) {
dev_err(cpu_dai->dev, "set mclk first\n");
@@ -425,25 +420,6 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream,
return ret;
}
- /* prepare clk in hw_param, enable in trigger */
- clk_prepare(saif->clk);
- if (saif != master_saif) {
- /*
- * Set an initial clock rate for the saif internal logic to work
- * properly. This is important when working in EXTMASTER mode
- * that uses the other saif's BITCLK&LRCLK but it still needs a
- * basic clock which should be fast enough for the internal
- * logic.
- */
- clk_enable(saif->clk);
- ret = clk_set_rate(saif->clk, 24000000);
- clk_disable(saif->clk);
- if (ret)
- return ret;
-
- clk_prepare(master_saif->clk);
- }
-
scr = __raw_readl(saif->base + SAIF_CTRL);
scr &= ~BM_SAIF_CTRL_WORD_LENGTH;
diff --git a/trunk/sound/soc/omap/omap-mcbsp.c b/trunk/sound/soc/omap/omap-mcbsp.c
index acdd3ef14e08..1046083e90a0 100644
--- a/trunk/sound/soc/omap/omap-mcbsp.c
+++ b/trunk/sound/soc/omap/omap-mcbsp.c
@@ -820,4 +820,3 @@ module_platform_driver(asoc_mcbsp_driver);
MODULE_AUTHOR("Jarkko Nikula ");
MODULE_DESCRIPTION("OMAP I2S SoC Interface");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:omap-mcbsp");
diff --git a/trunk/sound/soc/omap/omap-pcm.c b/trunk/sound/soc/omap/omap-pcm.c
index f0feb06615f8..5a649da9122a 100644
--- a/trunk/sound/soc/omap/omap-pcm.c
+++ b/trunk/sound/soc/omap/omap-pcm.c
@@ -441,4 +441,3 @@ module_platform_driver(omap_pcm_driver);
MODULE_AUTHOR("Jarkko Nikula ");
MODULE_DESCRIPTION("OMAP PCM DMA module");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:omap-pcm-audio");
diff --git a/trunk/sound/soc/soc-core.c b/trunk/sound/soc/soc-core.c
index f81c5976b961..f219b2f7ee68 100644
--- a/trunk/sound/soc/soc-core.c
+++ b/trunk/sound/soc/soc-core.c
@@ -1096,7 +1096,7 @@ static int soc_probe_codec(struct snd_soc_card *card,
}
/* If the driver didn't set I/O up try regmap */
- if (!codec->write && dev_get_regmap(codec->dev, NULL))
+ if (!codec->control_data)
snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
if (driver->controls)
diff --git a/trunk/sound/soc/tegra/tegra_alc5632.c b/trunk/sound/soc/tegra/tegra_alc5632.c
index e463529b38bb..d684df294c0c 100644
--- a/trunk/sound/soc/tegra/tegra_alc5632.c
+++ b/trunk/sound/soc/tegra/tegra_alc5632.c
@@ -177,7 +177,7 @@ static __devinit int tegra_alc5632_probe(struct platform_device *pdev)
}
alc5632->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0);
- if (alc5632->gpio_hp_det == -EPROBE_DEFER)
+ if (alc5632->gpio_hp_det == -ENODEV)
return -EPROBE_DEFER;
ret = snd_soc_of_parse_card_name(card, "nvidia,model");
diff --git a/trunk/sound/soc/tegra/tegra_wm8903.c b/trunk/sound/soc/tegra/tegra_wm8903.c
index d4f14e492341..0c5bb33d258e 100644
--- a/trunk/sound/soc/tegra/tegra_wm8903.c
+++ b/trunk/sound/soc/tegra/tegra_wm8903.c
@@ -284,27 +284,27 @@ static __devinit int tegra_wm8903_driver_probe(struct platform_device *pdev)
} else if (np) {
pdata->gpio_spkr_en = of_get_named_gpio(np,
"nvidia,spkr-en-gpios", 0);
- if (pdata->gpio_spkr_en == -EPROBE_DEFER)
+ if (pdata->gpio_spkr_en == -ENODEV)
return -EPROBE_DEFER;
pdata->gpio_hp_mute = of_get_named_gpio(np,
"nvidia,hp-mute-gpios", 0);
- if (pdata->gpio_hp_mute == -EPROBE_DEFER)
+ if (pdata->gpio_hp_mute == -ENODEV)
return -EPROBE_DEFER;
pdata->gpio_hp_det = of_get_named_gpio(np,
"nvidia,hp-det-gpios", 0);
- if (pdata->gpio_hp_det == -EPROBE_DEFER)
+ if (pdata->gpio_hp_det == -ENODEV)
return -EPROBE_DEFER;
pdata->gpio_int_mic_en = of_get_named_gpio(np,
"nvidia,int-mic-en-gpios", 0);
- if (pdata->gpio_int_mic_en == -EPROBE_DEFER)
+ if (pdata->gpio_int_mic_en == -ENODEV)
return -EPROBE_DEFER;
pdata->gpio_ext_mic_en = of_get_named_gpio(np,
"nvidia,ext-mic-en-gpios", 0);
- if (pdata->gpio_ext_mic_en == -EPROBE_DEFER)
+ if (pdata->gpio_ext_mic_en == -ENODEV)
return -EPROBE_DEFER;
}
diff --git a/trunk/sound/soc/ux500/ux500_msp_dai.c b/trunk/sound/soc/ux500/ux500_msp_dai.c
index 057e28ef770e..62ac0285bfaf 100644
--- a/trunk/sound/soc/ux500/ux500_msp_dai.c
+++ b/trunk/sound/soc/ux500/ux500_msp_dai.c
@@ -21,7 +21,7 @@
#include
#include
-#include
+#include
#include
#include
diff --git a/trunk/sound/soc/ux500/ux500_msp_i2s.c b/trunk/sound/soc/ux500/ux500_msp_i2s.c
index 5c472f335a64..ee14d2dac2f5 100644
--- a/trunk/sound/soc/ux500/ux500_msp_i2s.c
+++ b/trunk/sound/soc/ux500/ux500_msp_i2s.c
@@ -19,7 +19,7 @@
#include
#include
-#include
+#include
#include
diff --git a/trunk/sound/soc/ux500/ux500_msp_i2s.h b/trunk/sound/soc/ux500/ux500_msp_i2s.h
index 2d9136da9865..7f71b4a0d4bc 100644
--- a/trunk/sound/soc/ux500/ux500_msp_i2s.h
+++ b/trunk/sound/soc/ux500/ux500_msp_i2s.h
@@ -17,7 +17,7 @@
#include
-#include
+#include
#define MSP_INPUT_FREQ_APB 48000000
diff --git a/trunk/tools/perf/Makefile b/trunk/tools/perf/Makefile
index 35655c3a7b7a..77f124fe57ad 100644
--- a/trunk/tools/perf/Makefile
+++ b/trunk/tools/perf/Makefile
@@ -319,8 +319,6 @@ LIB_H += $(ARCH_INCLUDE)
LIB_H += util/cgroup.h
LIB_H += $(TRACE_EVENT_DIR)event-parse.h
LIB_H += util/target.h
-LIB_H += util/rblist.h
-LIB_H += util/intlist.h
LIB_OBJS += $(OUTPUT)util/abspath.o
LIB_OBJS += $(OUTPUT)util/alias.o
@@ -385,8 +383,6 @@ LIB_OBJS += $(OUTPUT)util/xyarray.o
LIB_OBJS += $(OUTPUT)util/cpumap.o
LIB_OBJS += $(OUTPUT)util/cgroup.o
LIB_OBJS += $(OUTPUT)util/target.o
-LIB_OBJS += $(OUTPUT)util/rblist.o
-LIB_OBJS += $(OUTPUT)util/intlist.o
BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
@@ -987,8 +983,7 @@ clean:
$(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
$(MAKE) -C Documentation/ clean
$(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS
- $(RM) $(OUTPUT)util/*-bison*
- $(RM) $(OUTPUT)util/*-flex*
+ $(RM) $(OUTPUT)util/*-{bison,flex}*
$(python-clean)
.PHONY: all install clean strip $(LIBTRACEEVENT)
diff --git a/trunk/tools/perf/builtin-record.c b/trunk/tools/perf/builtin-record.c
index 4db6e1ba54e3..f5a6452931e6 100644
--- a/trunk/tools/perf/builtin-record.c
+++ b/trunk/tools/perf/builtin-record.c
@@ -313,7 +313,7 @@ static void perf_record__open(struct perf_record *rec)
}
}
- perf_session__set_id_hdr_size(session);
+ perf_session__update_sample_type(session);
}
static int process_buildids(struct perf_record *rec)
@@ -844,6 +844,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
struct perf_record *rec = &record;
char errbuf[BUFSIZ];
+ perf_header__set_cmdline(argc, argv);
+
evsel_list = perf_evlist__new(NULL, NULL);
if (evsel_list == NULL)
return -ENOMEM;
diff --git a/trunk/tools/perf/builtin-report.c b/trunk/tools/perf/builtin-report.c
index 7c88a243b5db..69b1c1185159 100644
--- a/trunk/tools/perf/builtin-report.c
+++ b/trunk/tools/perf/builtin-report.c
@@ -249,9 +249,8 @@ static int process_read_event(struct perf_tool *tool,
static int perf_report__setup_sample_type(struct perf_report *rep)
{
struct perf_session *self = rep->session;
- u64 sample_type = perf_evlist__sample_type(self->evlist);
- if (!self->fd_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
+ if (!self->fd_pipe && !(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
if (sort__has_parent) {
ui__error("Selected --sort parent, but no "
"callchain data. Did you call "
@@ -275,7 +274,7 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
if (sort__branch_mode == 1) {
if (!self->fd_pipe &&
- !(sample_type & PERF_SAMPLE_BRANCH_STACK)) {
+ !(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) {
ui__error("Selected -b but no branch data. "
"Did you call perf record without -b?\n");
return -1;
diff --git a/trunk/tools/perf/builtin-test.c b/trunk/tools/perf/builtin-test.c
index 1d592f5cbea9..d909eb74a0eb 100644
--- a/trunk/tools/perf/builtin-test.c
+++ b/trunk/tools/perf/builtin-test.c
@@ -478,6 +478,7 @@ static int test__basic_mmap(void)
unsigned int nr_events[nsyscalls],
expected_nr_events[nsyscalls], i, j;
struct perf_evsel *evsels[nsyscalls], *evsel;
+ int sample_size = __perf_evsel__sample_size(attr.sample_type);
for (i = 0; i < nsyscalls; ++i) {
char name[64];
@@ -562,7 +563,8 @@ static int test__basic_mmap(void)
goto out_munmap;
}
- err = perf_evlist__parse_sample(evlist, event, &sample, false);
+ err = perf_event__parse_sample(event, attr.sample_type, sample_size,
+ false, &sample, false);
if (err) {
pr_err("Can't parse sample, err = %d\n", err);
goto out_munmap;
@@ -659,12 +661,12 @@ static int test__PERF_RECORD(void)
const char *cmd = "sleep";
const char *argv[] = { cmd, "1", NULL, };
char *bname;
- u64 prev_time = 0;
+ u64 sample_type, prev_time = 0;
bool found_cmd_mmap = false,
found_libc_mmap = false,
found_vdso_mmap = false,
found_ld_mmap = false;
- int err = -1, errs = 0, i, wakeups = 0;
+ int err = -1, errs = 0, i, wakeups = 0, sample_size;
u32 cpu;
int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
@@ -754,6 +756,13 @@ static int test__PERF_RECORD(void)
goto out_delete_evlist;
}
+ /*
+ * We'll need these two to parse the PERF_SAMPLE_* fields in each
+ * event.
+ */
+ sample_type = perf_evlist__sample_type(evlist);
+ sample_size = __perf_evsel__sample_size(sample_type);
+
/*
* Now that all is properly set up, enable the events, they will
* count just on workload.pid, which will start...
@@ -779,7 +788,9 @@ static int test__PERF_RECORD(void)
if (type < PERF_RECORD_MAX)
nr_events[type]++;
- err = perf_evlist__parse_sample(evlist, event, &sample, false);
+ err = perf_event__parse_sample(event, sample_type,
+ sample_size, true,
+ &sample, false);
if (err < 0) {
if (verbose)
perf_event__fprintf(event, stderr);
diff --git a/trunk/tools/perf/builtin-top.c b/trunk/tools/perf/builtin-top.c
index 68cd61ef6ac5..35e86c6df713 100644
--- a/trunk/tools/perf/builtin-top.c
+++ b/trunk/tools/perf/builtin-top.c
@@ -38,7 +38,6 @@
#include "util/cpumap.h"
#include "util/xyarray.h"
#include "util/sort.h"
-#include "util/intlist.h"
#include "util/debug.h"
@@ -707,16 +706,8 @@ static void perf_event__process_sample(struct perf_tool *tool,
int err;
if (!machine && perf_guest) {
- static struct intlist *seen;
-
- if (!seen)
- seen = intlist__new();
-
- if (!intlist__has_entry(seen, event->ip.pid)) {
- pr_err("Can't find guest [%d]'s kernel information\n",
- event->ip.pid);
- intlist__add(seen, event->ip.pid);
- }
+ pr_err("Can't find guest [%d]'s kernel information\n",
+ event->ip.pid);
return;
}
@@ -820,7 +811,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
int ret;
while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
- ret = perf_evlist__parse_sample(top->evlist, event, &sample, false);
+ ret = perf_session__parse_sample(session, event, &sample);
if (ret) {
pr_err("Can't parse sample, err = %d\n", ret);
continue;
@@ -952,10 +943,8 @@ static void perf_top__start_counters(struct perf_top *top)
* based cpu-clock-tick sw counter, which
* is always available even if no PMU support:
*/
- if ((err == ENOENT || err == ENXIO) &&
- (attr->type == PERF_TYPE_HARDWARE) &&
- (attr->config == PERF_COUNT_HW_CPU_CYCLES)) {
-
+ if (attr->type == PERF_TYPE_HARDWARE &&
+ attr->config == PERF_COUNT_HW_CPU_CYCLES) {
if (verbose)
ui__warning("Cycles event not supported,\n"
"trying to fall back to cpu-clock-ticks\n");
@@ -1043,7 +1032,7 @@ static int __cmd_top(struct perf_top *top)
&top->session->host_machine);
perf_top__start_counters(top);
top->session->evlist = top->evlist;
- perf_session__set_id_hdr_size(top->session);
+ perf_session__update_sample_type(top->session);
/* Wait for a minimal set of events before starting the snapshot */
poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
diff --git a/trunk/tools/perf/util/event.h b/trunk/tools/perf/util/event.h
index d84870b06426..1b197280c621 100644
--- a/trunk/tools/perf/util/event.h
+++ b/trunk/tools/perf/util/event.h
@@ -197,6 +197,9 @@ int perf_event__preprocess_sample(const union perf_event *self,
const char *perf_event__name(unsigned int id);
+int perf_event__parse_sample(const union perf_event *event, u64 type,
+ int sample_size, bool sample_id_all,
+ struct perf_sample *sample, bool swapped);
int perf_event__synthesize_sample(union perf_event *event, u64 type,
const struct perf_sample *sample,
bool swapped);
diff --git a/trunk/tools/perf/util/evlist.c b/trunk/tools/perf/util/evlist.c
index 9b38681add9e..3edfd3483816 100644
--- a/trunk/tools/perf/util/evlist.c
+++ b/trunk/tools/perf/util/evlist.c
@@ -881,10 +881,3 @@ int perf_evlist__start_workload(struct perf_evlist *evlist)
return 0;
}
-
-int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
- struct perf_sample *sample, bool swapped)
-{
- struct perf_evsel *e = list_entry(evlist->entries.next, struct perf_evsel, node);
- return perf_evsel__parse_sample(e, event, sample, swapped);
-}
diff --git a/trunk/tools/perf/util/evlist.h b/trunk/tools/perf/util/evlist.h
index 528c1acd9298..40d4d3cdced0 100644
--- a/trunk/tools/perf/util/evlist.h
+++ b/trunk/tools/perf/util/evlist.h
@@ -122,9 +122,6 @@ u64 perf_evlist__sample_type(const struct perf_evlist *evlist);
bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist);
u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist);
-int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
- struct perf_sample *sample, bool swapped);
-
bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist);
bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist);
diff --git a/trunk/tools/perf/util/evsel.c b/trunk/tools/perf/util/evsel.c
index 2eaae140def2..e81771364867 100644
--- a/trunk/tools/perf/util/evsel.c
+++ b/trunk/tools/perf/util/evsel.c
@@ -20,7 +20,7 @@
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
-static int __perf_evsel__sample_size(u64 sample_type)
+int __perf_evsel__sample_size(u64 sample_type)
{
u64 mask = sample_type & PERF_SAMPLE_MASK;
int size = 0;
@@ -53,7 +53,6 @@ void perf_evsel__init(struct perf_evsel *evsel,
evsel->attr = *attr;
INIT_LIST_HEAD(&evsel->node);
hists__init(&evsel->hists);
- evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
}
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
@@ -729,10 +728,10 @@ static bool sample_overlap(const union perf_event *event,
return false;
}
-int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
+int perf_event__parse_sample(const union perf_event *event, u64 type,
+ int sample_size, bool sample_id_all,
struct perf_sample *data, bool swapped)
{
- u64 type = evsel->attr.sample_type;
const u64 *array;
/*
@@ -747,14 +746,14 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
data->period = 1;
if (event->header.type != PERF_RECORD_SAMPLE) {
- if (!evsel->attr.sample_id_all)
+ if (!sample_id_all)
return 0;
return perf_event__parse_id_sample(event, type, data, swapped);
}
array = event->sample.array;
- if (evsel->sample_size + sizeof(event->header) > event->header.size)
+ if (sample_size + sizeof(event->header) > event->header.size)
return -EFAULT;
if (type & PERF_SAMPLE_IP) {
@@ -896,7 +895,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
u.val32[1] = sample->tid;
if (swapped) {
/*
- * Inverse of what is done in perf_evsel__parse_sample
+ * Inverse of what is done in perf_event__parse_sample
*/
u.val32[0] = bswap_32(u.val32[0]);
u.val32[1] = bswap_32(u.val32[1]);
@@ -931,7 +930,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
u.val32[0] = sample->cpu;
if (swapped) {
/*
- * Inverse of what is done in perf_evsel__parse_sample
+ * Inverse of what is done in perf_event__parse_sample
*/
u.val32[0] = bswap_32(u.val32[0]);
u.val64 = bswap_64(u.val64);
diff --git a/trunk/tools/perf/util/evsel.h b/trunk/tools/perf/util/evsel.h
index b559929983bb..67cc5033d192 100644
--- a/trunk/tools/perf/util/evsel.h
+++ b/trunk/tools/perf/util/evsel.h
@@ -65,7 +65,6 @@ struct perf_evsel {
void *func;
void *data;
} handler;
- unsigned int sample_size;
bool supported;
};
@@ -178,8 +177,13 @@ static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
return __perf_evsel__read(evsel, ncpus, nthreads, true);
}
+int __perf_evsel__sample_size(u64 sample_type);
+
+static inline int perf_evsel__sample_size(struct perf_evsel *evsel)
+{
+ return __perf_evsel__sample_size(evsel->attr.sample_type);
+}
+
void hists__init(struct hists *hists);
-int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
- struct perf_sample *sample, bool swapped);
#endif /* __PERF_EVSEL_H */
diff --git a/trunk/tools/perf/util/header.c b/trunk/tools/perf/util/header.c
index 74ea3c2f8138..3a6d20443330 100644
--- a/trunk/tools/perf/util/header.c
+++ b/trunk/tools/perf/util/header.c
@@ -174,15 +174,6 @@ perf_header__set_cmdline(int argc, const char **argv)
{
int i;
- /*
- * If header_argv has already been set, do not override it.
- * This allows a command to set the cmdline, parse args and
- * then call another builtin function that implements a
- * command -- e.g, cmd_kvm calling cmd_record.
- */
- if (header_argv)
- return 0;
-
header_argc = (u32)argc;
/* do not include NULL termination */
diff --git a/trunk/tools/perf/util/intlist.c b/trunk/tools/perf/util/intlist.c
deleted file mode 100644
index fd530dced9cb..000000000000
--- a/trunk/tools/perf/util/intlist.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Based on intlist.c by:
- * (c) 2009 Arnaldo Carvalho de Melo
- *
- * Licensed under the GPLv2.
- */
-
-#include
-#include
-#include
-
-#include "intlist.h"
-
-static struct rb_node *intlist__node_new(struct rblist *rblist __used,
- const void *entry)
-{
- int i = (int)((long)entry);
- struct rb_node *rc = NULL;
- struct int_node *node = malloc(sizeof(*node));
-
- if (node != NULL) {
- node->i = i;
- rc = &node->rb_node;
- }
-
- return rc;
-}
-
-static void int_node__delete(struct int_node *ilist)
-{
- free(ilist);
-}
-
-static void intlist__node_delete(struct rblist *rblist __used,
- struct rb_node *rb_node)
-{
- struct int_node *node = container_of(rb_node, struct int_node, rb_node);
-
- int_node__delete(node);
-}
-
-static int intlist__node_cmp(struct rb_node *rb_node, const void *entry)
-{
- int i = (int)((long)entry);
- struct int_node *node = container_of(rb_node, struct int_node, rb_node);
-
- return node->i - i;
-}
-
-int intlist__add(struct intlist *ilist, int i)
-{
- return rblist__add_node(&ilist->rblist, (void *)((long)i));
-}
-
-void intlist__remove(struct intlist *ilist __used, struct int_node *node)
-{
- int_node__delete(node);
-}
-
-struct int_node *intlist__find(struct intlist *ilist, int i)
-{
- struct int_node *node = NULL;
- struct rb_node *rb_node = rblist__find(&ilist->rblist, (void *)((long)i));
-
- if (rb_node)
- node = container_of(rb_node, struct int_node, rb_node);
-
- return node;
-}
-
-struct intlist *intlist__new(void)
-{
- struct intlist *ilist = malloc(sizeof(*ilist));
-
- if (ilist != NULL) {
- rblist__init(&ilist->rblist);
- ilist->rblist.node_cmp = intlist__node_cmp;
- ilist->rblist.node_new = intlist__node_new;
- ilist->rblist.node_delete = intlist__node_delete;
- }
-
- return ilist;
-}
-
-void intlist__delete(struct intlist *ilist)
-{
- if (ilist != NULL)
- rblist__delete(&ilist->rblist);
-}
-
-struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx)
-{
- struct int_node *node = NULL;
- struct rb_node *rb_node;
-
- rb_node = rblist__entry(&ilist->rblist, idx);
- if (rb_node)
- node = container_of(rb_node, struct int_node, rb_node);
-
- return node;
-}
diff --git a/trunk/tools/perf/util/intlist.h b/trunk/tools/perf/util/intlist.h
deleted file mode 100644
index 6d63ab90db50..000000000000
--- a/trunk/tools/perf/util/intlist.h
+++ /dev/null
@@ -1,75 +0,0 @@
-#ifndef __PERF_INTLIST_H
-#define __PERF_INTLIST_H
-
-#include
-#include
-
-#include "rblist.h"
-
-struct int_node {
- struct rb_node rb_node;
- int i;
-};
-
-struct intlist {
- struct rblist rblist;
-};
-
-struct intlist *intlist__new(void);
-void intlist__delete(struct intlist *ilist);
-
-void intlist__remove(struct intlist *ilist, struct int_node *in);
-int intlist__add(struct intlist *ilist, int i);
-
-struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx);
-struct int_node *intlist__find(struct intlist *ilist, int i);
-
-static inline bool intlist__has_entry(struct intlist *ilist, int i)
-{
- return intlist__find(ilist, i) != NULL;
-}
-
-static inline bool intlist__empty(const struct intlist *ilist)
-{
- return rblist__empty(&ilist->rblist);
-}
-
-static inline unsigned int intlist__nr_entries(const struct intlist *ilist)
-{
- return rblist__nr_entries(&ilist->rblist);
-}
-
-/* For intlist iteration */
-static inline struct int_node *intlist__first(struct intlist *ilist)
-{
- struct rb_node *rn = rb_first(&ilist->rblist.entries);
- return rn ? rb_entry(rn, struct int_node, rb_node) : NULL;
-}
-static inline struct int_node *intlist__next(struct int_node *in)
-{
- struct rb_node *rn;
- if (!in)
- return NULL;
- rn = rb_next(&in->rb_node);
- return rn ? rb_entry(rn, struct int_node, rb_node) : NULL;
-}
-
-/**
- * intlist_for_each - iterate over a intlist
- * @pos: the &struct int_node to use as a loop cursor.
- * @ilist: the &struct intlist for loop.
- */
-#define intlist__for_each(pos, ilist) \
- for (pos = intlist__first(ilist); pos; pos = intlist__next(pos))
-
-/**
- * intlist_for_each_safe - iterate over a intlist safe against removal of
- * int_node
- * @pos: the &struct int_node to use as a loop cursor.
- * @n: another &struct int_node to use as temporary storage.
- * @ilist: the &struct intlist for loop.
- */
-#define intlist__for_each_safe(pos, n, ilist) \
- for (pos = intlist__first(ilist), n = intlist__next(pos); pos;\
- pos = n, n = intlist__next(n))
-#endif /* __PERF_INTLIST_H */
diff --git a/trunk/tools/perf/util/parse-events-test.c b/trunk/tools/perf/util/parse-events-test.c
index 127d648cc548..1b997d2b89ce 100644
--- a/trunk/tools/perf/util/parse-events-test.c
+++ b/trunk/tools/perf/util/parse-events-test.c
@@ -13,9 +13,6 @@ do { \
} \
} while (0)
-#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
- PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
-
static int test__checkevent_tracepoint(struct perf_evlist *evlist)
{
struct perf_evsel *evsel = list_entry(evlist->entries.next,
@@ -24,7 +21,8 @@ static int test__checkevent_tracepoint(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
TEST_ASSERT_VAL("wrong sample_type",
- PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
+ (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) ==
+ evsel->attr.sample_type);
TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
return 0;
}
@@ -39,7 +37,8 @@ static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong type",
PERF_TYPE_TRACEPOINT == evsel->attr.type);
TEST_ASSERT_VAL("wrong sample_type",
- PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
+ (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU)
+ == evsel->attr.sample_type);
TEST_ASSERT_VAL("wrong sample_period",
1 == evsel->attr.sample_period);
}
@@ -429,7 +428,8 @@ static int test__checkevent_list(struct perf_evlist *evlist)
evsel = list_entry(evsel->node.next, struct perf_evsel, node);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
TEST_ASSERT_VAL("wrong sample_type",
- PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
+ (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) ==
+ evsel->attr.sample_type);
TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
diff --git a/trunk/tools/perf/util/parse-options.c b/trunk/tools/perf/util/parse-options.c
index 594f8fad5ecd..99d02aa57dbf 100644
--- a/trunk/tools/perf/util/parse-options.c
+++ b/trunk/tools/perf/util/parse-options.c
@@ -1,7 +1,6 @@
#include "util.h"
#include "parse-options.h"
#include "cache.h"
-#include "header.h"
#define OPT_SHORT 1
#define OPT_UNSET 2
@@ -414,8 +413,6 @@ int parse_options(int argc, const char **argv, const struct option *options,
{
struct parse_opt_ctx_t ctx;
- perf_header__set_cmdline(argc, argv);
-
parse_options_start(&ctx, argc, argv, flags);
switch (parse_options_step(&ctx, options, usagestr)) {
case PARSE_OPT_HELP:
diff --git a/trunk/tools/perf/util/python.c b/trunk/tools/perf/util/python.c
index 0688bfb6d280..e03b58a48424 100644
--- a/trunk/tools/perf/util/python.c
+++ b/trunk/tools/perf/util/python.c
@@ -797,13 +797,17 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
event = perf_evlist__mmap_read(evlist, cpu);
if (event != NULL) {
+ struct perf_evsel *first;
PyObject *pyevent = pyrf_event__new(event);
struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
if (pyevent == NULL)
return PyErr_NoMemory();
- err = perf_evlist__parse_sample(evlist, event, &pevent->sample, false);
+ first = list_entry(evlist->entries.next, struct perf_evsel, node);
+ err = perf_event__parse_sample(event, first->attr.sample_type,
+ perf_evsel__sample_size(first),
+ sample_id_all, &pevent->sample, false);
if (err)
return PyErr_Format(PyExc_OSError,
"perf: can't parse sample, err=%d", err);
diff --git a/trunk/tools/perf/util/rblist.c b/trunk/tools/perf/util/rblist.c
deleted file mode 100644
index 0171fb611004..000000000000
--- a/trunk/tools/perf/util/rblist.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Based on strlist.c by:
- * (c) 2009 Arnaldo Carvalho de Melo
- *
- * Licensed under the GPLv2.
- */
-
-#include
-#include
-#include
-
-#include "rblist.h"
-
-int rblist__add_node(struct rblist *rblist, const void *new_entry)
-{
- struct rb_node **p = &rblist->entries.rb_node;
- struct rb_node *parent = NULL, *new_node;
-
- while (*p != NULL) {
- int rc;
-
- parent = *p;
-
- rc = rblist->node_cmp(parent, new_entry);
- if (rc > 0)
- p = &(*p)->rb_left;
- else if (rc < 0)
- p = &(*p)->rb_right;
- else
- return -EEXIST;
- }
-
- new_node = rblist->node_new(rblist, new_entry);
- if (new_node == NULL)
- return -ENOMEM;
-
- rb_link_node(new_node, parent, p);
- rb_insert_color(new_node, &rblist->entries);
- ++rblist->nr_entries;
-
- return 0;
-}
-
-void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node)
-{
- rb_erase(rb_node, &rblist->entries);
- rblist->node_delete(rblist, rb_node);
-}
-
-struct rb_node *rblist__find(struct rblist *rblist, const void *entry)
-{
- struct rb_node **p = &rblist->entries.rb_node;
- struct rb_node *parent = NULL;
-
- while (*p != NULL) {
- int rc;
-
- parent = *p;
-
- rc = rblist->node_cmp(parent, entry);
- if (rc > 0)
- p = &(*p)->rb_left;
- else if (rc < 0)
- p = &(*p)->rb_right;
- else
- return parent;
- }
-
- return NULL;
-}
-
-void rblist__init(struct rblist *rblist)
-{
- if (rblist != NULL) {
- rblist->entries = RB_ROOT;
- rblist->nr_entries = 0;
- }
-
- return;
-}
-
-void rblist__delete(struct rblist *rblist)
-{
- if (rblist != NULL) {
- struct rb_node *pos, *next = rb_first(&rblist->entries);
-
- while (next) {
- pos = next;
- next = rb_next(pos);
- rb_erase(pos, &rblist->entries);
- rblist->node_delete(rblist, pos);
- }
- free(rblist);
- }
-}
-
-struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx)
-{
- struct rb_node *node;
-
- for (node = rb_first(&rblist->entries); node; node = rb_next(node)) {
- if (!idx--)
- return node;
- }
-
- return NULL;
-}
diff --git a/trunk/tools/perf/util/rblist.h b/trunk/tools/perf/util/rblist.h
deleted file mode 100644
index 6d0cae5ae83d..000000000000
--- a/trunk/tools/perf/util/rblist.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#ifndef __PERF_RBLIST_H
-#define __PERF_RBLIST_H
-
-#include
-#include
-
-/*
- * create node structs of the form:
- * struct my_node {
- * struct rb_node rb_node;
- * ... my data ...
- * };
- *
- * create list structs of the form:
- * struct mylist {
- * struct rblist rblist;
- * ... my data ...
- * };
- */
-
-struct rblist {
- struct rb_root entries;
- unsigned int nr_entries;
-
- int (*node_cmp)(struct rb_node *rbn, const void *entry);
- struct rb_node *(*node_new)(struct rblist *rlist, const void *new_entry);
- void (*node_delete)(struct rblist *rblist, struct rb_node *rb_node);
-};
-
-void rblist__init(struct rblist *rblist);
-void rblist__delete(struct rblist *rblist);
-int rblist__add_node(struct rblist *rblist, const void *new_entry);
-void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node);
-struct rb_node *rblist__find(struct rblist *rblist, const void *entry);
-struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx);
-
-static inline bool rblist__empty(const struct rblist *rblist)
-{
- return rblist->nr_entries == 0;
-}
-
-static inline unsigned int rblist__nr_entries(const struct rblist *rblist)
-{
- return rblist->nr_entries;
-}
-
-#endif /* __PERF_RBLIST_H */
diff --git a/trunk/tools/perf/util/session.c b/trunk/tools/perf/util/session.c
index 2437fb0b463a..8e4f0755d2aa 100644
--- a/trunk/tools/perf/util/session.c
+++ b/trunk/tools/perf/util/session.c
@@ -80,12 +80,14 @@ static int perf_session__open(struct perf_session *self, bool force)
return -1;
}
-void perf_session__set_id_hdr_size(struct perf_session *session)
+void perf_session__update_sample_type(struct perf_session *self)
{
- u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
-
- session->host_machine.id_hdr_size = id_hdr_size;
- machines__set_id_hdr_size(&session->machines, id_hdr_size);
+ self->sample_type = perf_evlist__sample_type(self->evlist);
+ self->sample_size = __perf_evsel__sample_size(self->sample_type);
+ self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
+ self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist);
+ self->host_machine.id_hdr_size = self->id_hdr_size;
+ machines__set_id_hdr_size(&self->machines, self->id_hdr_size);
}
int perf_session__create_kernel_maps(struct perf_session *self)
@@ -145,7 +147,7 @@ struct perf_session *perf_session__new(const char *filename, int mode,
if (mode == O_RDONLY) {
if (perf_session__open(self, force) < 0)
goto out_delete;
- perf_session__set_id_hdr_size(self);
+ perf_session__update_sample_type(self);
} else if (mode == O_WRONLY) {
/*
* In O_RDONLY mode this will be performed when reading the
@@ -156,7 +158,7 @@ struct perf_session *perf_session__new(const char *filename, int mode,
}
if (tool && tool->ordering_requires_timestamps &&
- tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) {
+ tool->ordered_samples && !self->sample_id_all) {
dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
tool->ordered_samples = false;
}
@@ -671,8 +673,7 @@ static void flush_sample_queue(struct perf_session *s,
if (iter->timestamp > limit)
break;
- ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample,
- s->header.needs_swap);
+ ret = perf_session__parse_sample(s, iter->event, &sample);
if (ret)
pr_err("Can't parse sample, err = %d\n", ret);
else
@@ -864,18 +865,16 @@ static void perf_session__print_tstamp(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample)
{
- u64 sample_type = perf_evlist__sample_type(session->evlist);
-
if (event->header.type != PERF_RECORD_SAMPLE &&
- !perf_evlist__sample_id_all(session->evlist)) {
+ !session->sample_id_all) {
fputs("-1 -1 ", stdout);
return;
}
- if ((sample_type & PERF_SAMPLE_CPU))
+ if ((session->sample_type & PERF_SAMPLE_CPU))
printf("%u ", sample->cpu);
- if (sample_type & PERF_SAMPLE_TIME)
+ if (session->sample_type & PERF_SAMPLE_TIME)
printf("%" PRIu64 " ", sample->time);
}
@@ -900,8 +899,6 @@ static void dump_event(struct perf_session *session, union perf_event *event,
static void dump_sample(struct perf_session *session, union perf_event *event,
struct perf_sample *sample)
{
- u64 sample_type;
-
if (!dump_trace)
return;
@@ -909,12 +906,10 @@ static void dump_sample(struct perf_session *session, union perf_event *event,
event->header.misc, sample->pid, sample->tid, sample->ip,
sample->period, sample->addr);
- sample_type = perf_evlist__sample_type(session->evlist);
-
- if (sample_type & PERF_SAMPLE_CALLCHAIN)
+ if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
callchain__printf(sample);
- if (sample_type & PERF_SAMPLE_BRANCH_STACK)
+ if (session->sample_type & PERF_SAMPLE_BRANCH_STACK)
branch_stack__printf(sample);
}
@@ -1011,7 +1006,7 @@ static int perf_session__preprocess_sample(struct perf_session *session,
union perf_event *event, struct perf_sample *sample)
{
if (event->header.type != PERF_RECORD_SAMPLE ||
- !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN))
+ !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
return 0;
if (!ip_callchain__valid(sample->callchain, event)) {
@@ -1035,7 +1030,7 @@ static int perf_session__process_user_event(struct perf_session *session, union
case PERF_RECORD_HEADER_ATTR:
err = tool->attr(event, &session->evlist);
if (err == 0)
- perf_session__set_id_hdr_size(session);
+ perf_session__update_sample_type(session);
return err;
case PERF_RECORD_HEADER_EVENT_TYPE:
return tool->event_type(tool, event);
@@ -1070,7 +1065,7 @@ static int perf_session__process_event(struct perf_session *session,
int ret;
if (session->header.needs_swap)
- event_swap(event, perf_evlist__sample_id_all(session->evlist));
+ event_swap(event, session->sample_id_all);
if (event->header.type >= PERF_RECORD_HEADER_MAX)
return -EINVAL;
@@ -1083,8 +1078,7 @@ static int perf_session__process_event(struct perf_session *session,
/*
* For all kernel events we get the sample data
*/
- ret = perf_evlist__parse_sample(session->evlist, event, &sample,
- session->header.needs_swap);
+ ret = perf_session__parse_sample(session, event, &sample);
if (ret)
return ret;
@@ -1395,9 +1389,9 @@ int perf_session__process_events(struct perf_session *self,
return err;
}
-bool perf_session__has_traces(struct perf_session *session, const char *msg)
+bool perf_session__has_traces(struct perf_session *self, const char *msg)
{
- if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) {
+ if (!(self->sample_type & PERF_SAMPLE_RAW)) {
pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
return false;
}
diff --git a/trunk/tools/perf/util/session.h b/trunk/tools/perf/util/session.h
index 1f7ec87db7d7..7c435bde6eb0 100644
--- a/trunk/tools/perf/util/session.h
+++ b/trunk/tools/perf/util/session.h
@@ -41,9 +41,13 @@ struct perf_session {
* perf.data file.
*/
struct hists hists;
+ u64 sample_type;
+ int sample_size;
int fd;
bool fd_pipe;
bool repipe;
+ bool sample_id_all;
+ u16 id_hdr_size;
int cwdlen;
char *cwd;
struct ordered_samples ordered_samples;
@@ -82,7 +86,7 @@ void perf_event__attr_swap(struct perf_event_attr *attr);
int perf_session__create_kernel_maps(struct perf_session *self);
-void perf_session__set_id_hdr_size(struct perf_session *session);
+void perf_session__update_sample_type(struct perf_session *self);
void perf_session__remove_thread(struct perf_session *self, struct thread *th);
static inline
@@ -126,6 +130,24 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *self,
size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);
+static inline int perf_session__parse_sample(struct perf_session *session,
+ const union perf_event *event,
+ struct perf_sample *sample)
+{
+ return perf_event__parse_sample(event, session->sample_type,
+ session->sample_size,
+ session->sample_id_all, sample,
+ session->header.needs_swap);
+}
+
+static inline int perf_session__synthesize_sample(struct perf_session *session,
+ union perf_event *event,
+ const struct perf_sample *sample)
+{
+ return perf_event__synthesize_sample(event, session->sample_type,
+ sample, session->header.needs_swap);
+}
+
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
unsigned int type);
diff --git a/trunk/tools/perf/util/strlist.c b/trunk/tools/perf/util/strlist.c
index 95856ff3dda4..6783a2043555 100644
--- a/trunk/tools/perf/util/strlist.c
+++ b/trunk/tools/perf/util/strlist.c
@@ -10,28 +10,23 @@
#include
#include
-static
-struct rb_node *strlist__node_new(struct rblist *rblist, const void *entry)
+static struct str_node *str_node__new(const char *s, bool dupstr)
{
- const char *s = entry;
- struct rb_node *rc = NULL;
- struct strlist *strlist = container_of(rblist, struct strlist, rblist);
- struct str_node *snode = malloc(sizeof(*snode));
+ struct str_node *self = malloc(sizeof(*self));
- if (snode != NULL) {
- if (strlist->dupstr) {
+ if (self != NULL) {
+ if (dupstr) {
s = strdup(s);
if (s == NULL)
goto out_delete;
}
- snode->s = s;
- rc = &snode->rb_node;
+ self->s = s;
}
- return rc;
+ return self;
out_delete:
- free(snode);
+ free(self);
return NULL;
}
@@ -42,26 +37,36 @@ static void str_node__delete(struct str_node *self, bool dupstr)
free(self);
}
-static
-void strlist__node_delete(struct rblist *rblist, struct rb_node *rb_node)
+int strlist__add(struct strlist *self, const char *new_entry)
{
- struct strlist *slist = container_of(rblist, struct strlist, rblist);
- struct str_node *snode = container_of(rb_node, struct str_node, rb_node);
-
- str_node__delete(snode, slist->dupstr);
-}
+ struct rb_node **p = &self->entries.rb_node;
+ struct rb_node *parent = NULL;
+ struct str_node *sn;
+
+ while (*p != NULL) {
+ int rc;
+
+ parent = *p;
+ sn = rb_entry(parent, struct str_node, rb_node);
+ rc = strcmp(sn->s, new_entry);
+
+ if (rc > 0)
+ p = &(*p)->rb_left;
+ else if (rc < 0)
+ p = &(*p)->rb_right;
+ else
+ return -EEXIST;
+ }
-static int strlist__node_cmp(struct rb_node *rb_node, const void *entry)
-{
- const char *str = entry;
- struct str_node *snode = container_of(rb_node, struct str_node, rb_node);
+ sn = str_node__new(new_entry, self->dupstr);
+ if (sn == NULL)
+ return -ENOMEM;
- return strcmp(snode->s, str);
-}
+ rb_link_node(&sn->rb_node, parent, p);
+ rb_insert_color(&sn->rb_node, &self->entries);
+ ++self->nr_entries;
-int strlist__add(struct strlist *self, const char *new_entry)
-{
- return rblist__add_node(&self->rblist, new_entry);
+ return 0;
}
int strlist__load(struct strlist *self, const char *filename)
@@ -91,20 +96,34 @@ int strlist__load(struct strlist *self, const char *filename)
return err;
}
-void strlist__remove(struct strlist *slist, struct str_node *snode)
+void strlist__remove(struct strlist *self, struct str_node *sn)
{
- str_node__delete(snode, slist->dupstr);
+ rb_erase(&sn->rb_node, &self->entries);
+ str_node__delete(sn, self->dupstr);
}
-struct str_node *strlist__find(struct strlist *slist, const char *entry)
+struct str_node *strlist__find(struct strlist *self, const char *entry)
{
- struct str_node *snode = NULL;
- struct rb_node *rb_node = rblist__find(&slist->rblist, entry);
-
- if (rb_node)
- snode = container_of(rb_node, struct str_node, rb_node);
+ struct rb_node **p = &self->entries.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*p != NULL) {
+ struct str_node *sn;
+ int rc;
+
+ parent = *p;
+ sn = rb_entry(parent, struct str_node, rb_node);
+ rc = strcmp(sn->s, entry);
+
+ if (rc > 0)
+ p = &(*p)->rb_left;
+ else if (rc < 0)
+ p = &(*p)->rb_right;
+ else
+ return sn;
+ }
- return snode;
+ return NULL;
}
static int strlist__parse_list_entry(struct strlist *self, const char *s)
@@ -137,12 +156,9 @@ struct strlist *strlist__new(bool dupstr, const char *slist)
struct strlist *self = malloc(sizeof(*self));
if (self != NULL) {
- rblist__init(&self->rblist);
- self->rblist.node_cmp = strlist__node_cmp;
- self->rblist.node_new = strlist__node_new;
- self->rblist.node_delete = strlist__node_delete;
-
+ self->entries = RB_ROOT;
self->dupstr = dupstr;
+ self->nr_entries = 0;
if (slist && strlist__parse_list(self, slist) != 0)
goto out_error;
}
@@ -155,18 +171,30 @@ struct strlist *strlist__new(bool dupstr, const char *slist)
void strlist__delete(struct strlist *self)
{
- if (self != NULL)
- rblist__delete(&self->rblist);
+ if (self != NULL) {
+ struct str_node *pos;
+ struct rb_node *next = rb_first(&self->entries);
+
+ while (next) {
+ pos = rb_entry(next, struct str_node, rb_node);
+ next = rb_next(&pos->rb_node);
+ strlist__remove(self, pos);
+ }
+ self->entries = RB_ROOT;
+ free(self);
+ }
}
-struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx)
+struct str_node *strlist__entry(const struct strlist *self, unsigned int idx)
{
- struct str_node *snode = NULL;
- struct rb_node *rb_node;
+ struct rb_node *nd;
- rb_node = rblist__entry(&slist->rblist, idx);
- if (rb_node)
- snode = container_of(rb_node, struct str_node, rb_node);
+ for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
+ struct str_node *pos = rb_entry(nd, struct str_node, rb_node);
- return snode;
+ if (!idx--)
+ return pos;
+ }
+
+ return NULL;
}
diff --git a/trunk/tools/perf/util/strlist.h b/trunk/tools/perf/util/strlist.h
index dd9f922ec67c..3ba839007d2c 100644
--- a/trunk/tools/perf/util/strlist.h
+++ b/trunk/tools/perf/util/strlist.h
@@ -4,15 +4,14 @@
#include
#include
-#include "rblist.h"
-
struct str_node {
struct rb_node rb_node;
const char *s;
};
struct strlist {
- struct rblist rblist;
+ struct rb_root entries;
+ unsigned int nr_entries;
bool dupstr;
};
@@ -33,18 +32,18 @@ static inline bool strlist__has_entry(struct strlist *self, const char *entry)
static inline bool strlist__empty(const struct strlist *self)
{
- return rblist__empty(&self->rblist);
+ return self->nr_entries == 0;
}
static inline unsigned int strlist__nr_entries(const struct strlist *self)
{
- return rblist__nr_entries(&self->rblist);
+ return self->nr_entries;
}
/* For strlist iteration */
static inline struct str_node *strlist__first(struct strlist *self)
{
- struct rb_node *rn = rb_first(&self->rblist.entries);
+ struct rb_node *rn = rb_first(&self->entries);
return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
}
static inline struct str_node *strlist__next(struct str_node *sn)
diff --git a/trunk/tools/perf/util/symbol.c b/trunk/tools/perf/util/symbol.c
index 8b63b678e127..fdad4eeeb429 100644
--- a/trunk/tools/perf/util/symbol.c
+++ b/trunk/tools/perf/util/symbol.c
@@ -64,7 +64,7 @@ static enum dso_binary_type binary_type_symtab[] = {
DSO_BINARY_TYPE__NOT_FOUND,
};
-#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
+#define DSO_BINARY_TYPE__SYMTAB_CNT sizeof(binary_type_symtab)
static enum dso_binary_type binary_type_data[] = {
DSO_BINARY_TYPE__BUILD_ID_CACHE,
@@ -72,7 +72,7 @@ static enum dso_binary_type binary_type_data[] = {
DSO_BINARY_TYPE__NOT_FOUND,
};
-#define DSO_BINARY_TYPE__DATA_CNT ARRAY_SIZE(binary_type_data)
+#define DSO_BINARY_TYPE__DATA_CNT sizeof(binary_type_data)
int dso__name_len(const struct dso *dso)
{
@@ -2875,7 +2875,6 @@ int machines__create_guest_kernel_maps(struct rb_root *machines)
int i, items = 0;
char path[PATH_MAX];
pid_t pid;
- char *endp;
if (symbol_conf.default_guest_vmlinux_name ||
symbol_conf.default_guest_modules ||
@@ -2892,14 +2891,7 @@ int machines__create_guest_kernel_maps(struct rb_root *machines)
/* Filter out . and .. */
continue;
}
- pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
- if ((*endp != '\0') ||
- (endp == namelist[i]->d_name) ||
- (errno == ERANGE)) {
- pr_debug("invalid directory (%s). Skipping.\n",
- namelist[i]->d_name);
- continue;
- }
+ pid = atoi(namelist[i]->d_name);
sprintf(path, "%s/%s/proc/kallsyms",
symbol_conf.guestmount,
namelist[i]->d_name);
diff --git a/trunk/tools/perf/util/target.c b/trunk/tools/perf/util/target.c
index 051eaa68095e..3f59c496e64c 100644
--- a/trunk/tools/perf/util/target.c
+++ b/trunk/tools/perf/util/target.c
@@ -110,7 +110,7 @@ int perf_target__strerror(struct perf_target *target, int errnum,
int idx;
const char *msg;
- BUG_ON(buflen == 0);
+ BUG_ON(buflen > 0);
if (errnum >= 0) {
const char *err = strerror_r(errnum, buf, buflen);