diff --git a/[refs] b/[refs] index 6dbcb42b8d2d..3d5a1b43ca0e 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 6b5e7229bbd59f0cfce7015fd46736fc93d8c8c3 +refs/heads/master: 9a5d5bd8480068c5829e3d997ee21dab9b3ed05f diff --git a/trunk/Documentation/ABI/testing/sysfs-bus-pci b/trunk/Documentation/ABI/testing/sysfs-bus-pci index 34f51100f029..dff1f48d252d 100644 --- a/trunk/Documentation/ABI/testing/sysfs-bus-pci +++ b/trunk/Documentation/ABI/testing/sysfs-bus-pci @@ -210,3 +210,15 @@ Users: firmware assigned instance number of the PCI device that can help in understanding the firmware intended order of the PCI device. + +What: /sys/bus/pci/devices/.../d3cold_allowed +Date: July 2012 +Contact: Huang Ying +Description: + d3cold_allowed is bit to control whether the corresponding PCI + device can be put into D3Cold state. If it is cleared, the + device will never be put into D3Cold state. If it is set, the + device may be put into D3Cold state if other requirements are + satisfied too. Reading this attribute will show the current + value of d3cold_allowed bit. Writing this attribute will set + the value of d3cold_allowed bit. diff --git a/trunk/Documentation/feature-removal-schedule.txt b/trunk/Documentation/feature-removal-schedule.txt index afaff312bf41..f4d8c7105fcd 100644 --- a/trunk/Documentation/feature-removal-schedule.txt +++ b/trunk/Documentation/feature-removal-schedule.txt @@ -579,7 +579,7 @@ Why: KVM tracepoints provide mostly equivalent information in a much more ---------------------------- What: at91-mci driver ("CONFIG_MMC_AT91") -When: 3.7 +When: 3.8 Why: There are two mci drivers: at91-mci and atmel-mci. The PDC support was added to atmel-mci as a first step to support more chips. Then at91-mci was kept only for old IP versions (on at91rm9200 and diff --git a/trunk/Makefile b/trunk/Makefile index 371ce8899f5c..0f66f146d57e 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 6 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc5 NAME = Saber-toothed Squirrel # *DOCUMENTATION* diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig index c5f9ae5dbd1a..2f88d8d97701 100644 --- a/trunk/arch/arm/Kconfig +++ b/trunk/arch/arm/Kconfig @@ -6,7 +6,7 @@ config ARM select HAVE_DMA_API_DEBUG select HAVE_IDE if PCI || ISA || PCMCIA select HAVE_DMA_ATTRS - select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) + select HAVE_DMA_CONTIGUOUS if MMU select HAVE_MEMBLOCK select RTC_LIB select SYS_SUPPORTS_APM_EMULATION diff --git a/trunk/arch/arm/boot/dts/at91sam9g25ek.dts b/trunk/arch/arm/boot/dts/at91sam9g25ek.dts index 7829a4d0cb22..96514c134e54 100644 --- a/trunk/arch/arm/boot/dts/at91sam9g25ek.dts +++ b/trunk/arch/arm/boot/dts/at91sam9g25ek.dts @@ -15,7 +15,7 @@ compatible = "atmel,at91sam9g25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9"; chosen { - bootargs = "128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs"; + bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs"; }; ahb { diff --git a/trunk/arch/arm/configs/armadillo800eva_defconfig b/trunk/arch/arm/configs/armadillo800eva_defconfig index 7d8718468e0d..90610c7030f7 100644 --- a/trunk/arch/arm/configs/armadillo800eva_defconfig +++ b/trunk/arch/arm/configs/armadillo800eva_defconfig @@ -33,7 +33,7 @@ CONFIG_AEABI=y CONFIG_FORCE_MAX_ZONEORDER=13 CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 -CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096" +CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096 rw" CONFIG_CMDLINE_FORCE=y CONFIG_KEXEC=y CONFIG_VFP=y diff --git a/trunk/arch/arm/include/asm/dma-mapping.h b/trunk/arch/arm/include/asm/dma-mapping.h index 2ae842df4551..5c44dcb0987b 100644 --- a/trunk/arch/arm/include/asm/dma-mapping.h +++ b/trunk/arch/arm/include/asm/dma-mapping.h @@ -202,6 +202,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size, return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); } +/* + * This can be called during early boot to increase the size of the atomic + * coherent DMA pool above the default value of 256KiB. It must be called + * before postcore_initcall. + */ +extern void __init init_dma_coherent_pool_size(unsigned long size); + /* * This can be called during boot to increase the size of the consistent * DMA region above it's default value of 2MB. It must be called before the diff --git a/trunk/arch/arm/mach-at91/at91rm9200_time.c b/trunk/arch/arm/mach-at91/at91rm9200_time.c index 104ca40d8d18..aaa443b48c91 100644 --- a/trunk/arch/arm/mach-at91/at91rm9200_time.c +++ b/trunk/arch/arm/mach-at91/at91rm9200_time.c @@ -197,7 +197,7 @@ void __init at91rm9200_timer_init(void) at91_st_read(AT91_ST_SR); /* Make IRQs happen for the system timer */ - setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq); + setup_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq); /* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used * directly for the clocksource and all clockevents, after adjusting diff --git a/trunk/arch/arm/mach-at91/at91sam9260_devices.c b/trunk/arch/arm/mach-at91/at91sam9260_devices.c index 7b9c2ba396ed..bce572a530ef 100644 --- a/trunk/arch/arm/mach-at91/at91sam9260_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9260_devices.c @@ -726,6 +726,8 @@ static struct resource rtt_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, }, }; @@ -744,10 +746,12 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed: * GPBR will serve as the storage for RTC time offset */ - at91sam9260_rtt_device.num_resources = 2; + at91sam9260_rtt_device.num_resources = 3; rtt_resources[1].start = AT91SAM9260_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; rtt_resources[1].end = rtt_resources[1].start + 3; + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/trunk/arch/arm/mach-at91/at91sam9261_devices.c b/trunk/arch/arm/mach-at91/at91sam9261_devices.c index 8df5c1bdff92..bc2590d712d0 100644 --- a/trunk/arch/arm/mach-at91/at91sam9261_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9261_devices.c @@ -609,6 +609,8 @@ static struct resource rtt_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -626,10 +628,12 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed: * GPBR will serve as the storage for RTC time offset */ - at91sam9261_rtt_device.num_resources = 2; + at91sam9261_rtt_device.num_resources = 3; rtt_resources[1].start = AT91SAM9261_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; rtt_resources[1].end = rtt_resources[1].start + 3; + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/trunk/arch/arm/mach-at91/at91sam9263_devices.c b/trunk/arch/arm/mach-at91/at91sam9263_devices.c index eb6bbf86fb9f..9b6ca734f1a9 100644 --- a/trunk/arch/arm/mach-at91/at91sam9263_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9263_devices.c @@ -990,6 +990,8 @@ static struct resource rtt0_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -1006,6 +1008,8 @@ static struct resource rtt1_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -1027,14 +1031,14 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed only for the chosen RTT: * GPBR will serve as the storage for RTC time offset */ - at91sam9263_rtt0_device.num_resources = 2; + at91sam9263_rtt0_device.num_resources = 3; at91sam9263_rtt1_device.num_resources = 1; pdev = &at91sam9263_rtt0_device; r = rtt0_resources; break; case 1: at91sam9263_rtt0_device.num_resources = 1; - at91sam9263_rtt1_device.num_resources = 2; + at91sam9263_rtt1_device.num_resources = 3; pdev = &at91sam9263_rtt1_device; r = rtt1_resources; break; @@ -1047,6 +1051,8 @@ static void __init at91_add_device_rtt_rtc(void) pdev->name = "rtc-at91sam9"; r[1].start = AT91SAM9263_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; r[1].end = r[1].start + 3; + r[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + r[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/trunk/arch/arm/mach-at91/at91sam9g45_devices.c b/trunk/arch/arm/mach-at91/at91sam9g45_devices.c index 06073996a382..1b47319ca00b 100644 --- a/trunk/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9g45_devices.c @@ -1293,6 +1293,8 @@ static struct resource rtt_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -1310,10 +1312,12 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed: * GPBR will serve as the storage for RTC time offset */ - at91sam9g45_rtt_device.num_resources = 2; + at91sam9g45_rtt_device.num_resources = 3; rtt_resources[1].start = AT91SAM9G45_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; rtt_resources[1].end = rtt_resources[1].start + 3; + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/trunk/arch/arm/mach-at91/at91sam9rl_devices.c b/trunk/arch/arm/mach-at91/at91sam9rl_devices.c index f09fff932172..b3d365dadef5 100644 --- a/trunk/arch/arm/mach-at91/at91sam9rl_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9rl_devices.c @@ -688,6 +688,8 @@ static struct resource rtt_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -705,10 +707,12 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed: * GPBR will serve as the storage for RTC time offset */ - at91sam9rl_rtt_device.num_resources = 2; + at91sam9rl_rtt_device.num_resources = 3; rtt_resources[1].start = AT91SAM9RL_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; rtt_resources[1].end = rtt_resources[1].start + 3; + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/trunk/arch/arm/mach-at91/clock.c b/trunk/arch/arm/mach-at91/clock.c index de2ec6b8fea7..188c82971ebd 100644 --- a/trunk/arch/arm/mach-at91/clock.c +++ b/trunk/arch/arm/mach-at91/clock.c @@ -63,6 +63,12 @@ EXPORT_SYMBOL_GPL(at91_pmc_base); #define cpu_has_300M_plla() (cpu_is_at91sam9g10()) +#define cpu_has_240M_plla() (cpu_is_at91sam9261() \ + || cpu_is_at91sam9263() \ + || cpu_is_at91sam9rl()) + +#define cpu_has_210M_plla() (cpu_is_at91sam9260()) + #define cpu_has_pllb() (!(cpu_is_at91sam9rl() \ || cpu_is_at91sam9g45() \ || cpu_is_at91sam9x5() \ @@ -706,6 +712,12 @@ static int __init at91_pmc_init(unsigned long main_clock) } else if (cpu_has_800M_plla()) { if (plla.rate_hz > 800000000) pll_overclock = true; + } else if (cpu_has_240M_plla()) { + if (plla.rate_hz > 240000000) + pll_overclock = true; + } else if (cpu_has_210M_plla()) { + if (plla.rate_hz > 210000000) + pll_overclock = true; } else { if (plla.rate_hz > 209000000) pll_overclock = true; diff --git a/trunk/arch/arm/mach-gemini/irq.c b/trunk/arch/arm/mach-gemini/irq.c index ca70e5fcc7ac..020852d3bdd8 100644 --- a/trunk/arch/arm/mach-gemini/irq.c +++ b/trunk/arch/arm/mach-gemini/irq.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #define IRQ_SOURCE(base_addr) (base_addr + 0x00) diff --git a/trunk/arch/arm/mach-kirkwood/common.c b/trunk/arch/arm/mach-kirkwood/common.c index 3226077735b1..1201191d7f1b 100644 --- a/trunk/arch/arm/mach-kirkwood/common.c +++ b/trunk/arch/arm/mach-kirkwood/common.c @@ -517,6 +517,13 @@ void __init kirkwood_wdt_init(void) void __init kirkwood_init_early(void) { orion_time_set_base(TIMER_VIRT_BASE); + + /* + * Some Kirkwood devices allocate their coherent buffers from atomic + * context. Increase size of atomic coherent pool to make sure such + * the allocations won't fail. + */ + init_dma_coherent_pool_size(SZ_1M); } int kirkwood_tclk; diff --git a/trunk/arch/arm/mach-kirkwood/db88f6281-bp-setup.c b/trunk/arch/arm/mach-kirkwood/db88f6281-bp-setup.c index d93359379598..be90b7d0e10b 100644 --- a/trunk/arch/arm/mach-kirkwood/db88f6281-bp-setup.c +++ b/trunk/arch/arm/mach-kirkwood/db88f6281-bp-setup.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include diff --git a/trunk/arch/arm/mach-shmobile/board-armadillo800eva.c b/trunk/arch/arm/mach-shmobile/board-armadillo800eva.c index cf10f92856dc..453a6e50db8b 100644 --- a/trunk/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/trunk/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -520,13 +520,14 @@ static struct platform_device hdmi_lcdc_device = { }; /* GPIO KEY */ -#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 } +#define GPIO_KEY(c, g, d, ...) \ + { .code = c, .gpio = g, .desc = d, .active_low = 1, __VA_ARGS__ } static struct gpio_keys_button gpio_buttons[] = { - GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW1"), - GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW2"), - GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW3"), - GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW4"), + GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW3", .wakeup = 1), + GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW4"), + GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW5"), + GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW6"), }; static struct gpio_keys_platform_data gpio_key_info = { @@ -901,8 +902,8 @@ static struct platform_device *eva_devices[] __initdata = { &camera_device, &ceu0_device, &fsi_device, - &fsi_hdmi_device, &fsi_wm8978_device, + &fsi_hdmi_device, }; static void __init eva_clock_init(void) diff --git a/trunk/arch/arm/mach-shmobile/board-mackerel.c b/trunk/arch/arm/mach-shmobile/board-mackerel.c index 7ea2b31e3199..c129542f6aed 100644 --- a/trunk/arch/arm/mach-shmobile/board-mackerel.c +++ b/trunk/arch/arm/mach-shmobile/board-mackerel.c @@ -695,6 +695,7 @@ static struct platform_device usbhs0_device = { * - J30 "open" * - modify usbhs1_get_id() USBHS_HOST -> USBHS_GADGET * - add .get_vbus = usbhs_get_vbus in usbhs1_private + * - check usbhs0_device(pio)/usbhs1_device(irq) order in mackerel_devices. */ #define IRQ8 evt2irq(0x0300) #define USB_PHY_MODE (1 << 4) @@ -1325,8 +1326,8 @@ static struct platform_device *mackerel_devices[] __initdata = { &nor_flash_device, &smc911x_device, &lcdc_device, - &usbhs1_device, &usbhs0_device, + &usbhs1_device, &leds_device, &fsi_device, &fsi_ak4643_device, diff --git a/trunk/arch/arm/mach-shmobile/board-marzen.c b/trunk/arch/arm/mach-shmobile/board-marzen.c index 3a528cf4366c..fcf5a47f4772 100644 --- a/trunk/arch/arm/mach-shmobile/board-marzen.c +++ b/trunk/arch/arm/mach-shmobile/board-marzen.c @@ -67,7 +67,7 @@ static struct smsc911x_platform_config smsc911x_platdata = { static struct platform_device eth_device = { .name = "smsc911x", - .id = 0, + .id = -1, .dev = { .platform_data = &smsc911x_platdata, }, diff --git a/trunk/arch/arm/mach-shmobile/intc-sh73a0.c b/trunk/arch/arm/mach-shmobile/intc-sh73a0.c index ee447404c857..588555a67d9c 100644 --- a/trunk/arch/arm/mach-shmobile/intc-sh73a0.c +++ b/trunk/arch/arm/mach-shmobile/intc-sh73a0.c @@ -259,9 +259,9 @@ static int sh73a0_set_wake(struct irq_data *data, unsigned int on) return 0; /* always allow wakeup */ } -#define RELOC_BASE 0x1000 +#define RELOC_BASE 0x1200 -/* INTCA IRQ pins at INTCS + 0x1000 to make space for GIC+INTC handling */ +/* INTCA IRQ pins at INTCS + RELOC_BASE to make space for GIC+INTC handling */ #define INTCS_VECT_RELOC(n, vect) INTCS_VECT((n), (vect) + RELOC_BASE) INTC_IRQ_PINS_32(intca_irq_pins, 0xe6900000, diff --git a/trunk/arch/arm/mach-tegra/pcie.c b/trunk/arch/arm/mach-tegra/pcie.c index d3ad5150d660..c25a2a4f2e3d 100644 --- a/trunk/arch/arm/mach-tegra/pcie.c +++ b/trunk/arch/arm/mach-tegra/pcie.c @@ -367,17 +367,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class); /* Tegra PCIE requires relaxed ordering */ static void __devinit tegra_pcie_relax_enable(struct pci_dev *dev) { - u16 val16; - int pos = pci_find_capability(dev, PCI_CAP_ID_EXP); - - if (pos <= 0) { - dev_err(&dev->dev, "skipping relaxed ordering fixup\n"); - return; - } - - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &val16); - val16 |= PCI_EXP_DEVCTL_RELAX_EN; - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, val16); + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); } DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable); diff --git a/trunk/arch/arm/mm/dma-mapping.c b/trunk/arch/arm/mm/dma-mapping.c index 4e7d1182e8a3..051204fc4617 100644 --- a/trunk/arch/arm/mm/dma-mapping.c +++ b/trunk/arch/arm/mm/dma-mapping.c @@ -267,17 +267,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size) vunmap(cpu_addr); } +#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K + struct dma_pool { size_t size; spinlock_t lock; unsigned long *bitmap; unsigned long nr_pages; void *vaddr; - struct page *page; + struct page **pages; }; static struct dma_pool atomic_pool = { - .size = SZ_256K, + .size = DEFAULT_DMA_COHERENT_POOL_SIZE, }; static int __init early_coherent_pool(char *p) @@ -287,6 +289,21 @@ static int __init early_coherent_pool(char *p) } early_param("coherent_pool", early_coherent_pool); +void __init init_dma_coherent_pool_size(unsigned long size) +{ + /* + * Catch any attempt to set the pool size too late. + */ + BUG_ON(atomic_pool.vaddr); + + /* + * Set architecture specific coherent pool size only if + * it has not been changed by kernel command line parameter. + */ + if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) + atomic_pool.size = size; +} + /* * Initialise the coherent pool for atomic allocations. */ @@ -297,6 +314,7 @@ static int __init atomic_pool_init(void) unsigned long nr_pages = pool->size >> PAGE_SHIFT; unsigned long *bitmap; struct page *page; + struct page **pages; void *ptr; int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); @@ -304,21 +322,31 @@ static int __init atomic_pool_init(void) if (!bitmap) goto no_bitmap; + pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); + if (!pages) + goto no_pages; + if (IS_ENABLED(CONFIG_CMA)) ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); else ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, &page, NULL); if (ptr) { + int i; + + for (i = 0; i < nr_pages; i++) + pages[i] = page + i; + spin_lock_init(&pool->lock); pool->vaddr = ptr; - pool->page = page; + pool->pages = pages; pool->bitmap = bitmap; pool->nr_pages = nr_pages; pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", (unsigned)pool->size / 1024); return 0; } +no_pages: kfree(bitmap); no_bitmap: pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", @@ -443,27 +471,45 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) if (pageno < pool->nr_pages) { bitmap_set(pool->bitmap, pageno, count); ptr = pool->vaddr + PAGE_SIZE * pageno; - *ret_page = pool->page + pageno; + *ret_page = pool->pages[pageno]; + } else { + pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" + "Please increase it with coherent_pool= kernel parameter!\n", + (unsigned)pool->size / 1024); } spin_unlock_irqrestore(&pool->lock, flags); return ptr; } +static bool __in_atomic_pool(void *start, size_t size) +{ + struct dma_pool *pool = &atomic_pool; + void *end = start + size; + void *pool_start = pool->vaddr; + void *pool_end = pool->vaddr + pool->size; + + if (start < pool_start || start > pool_end) + return false; + + if (end <= pool_end) + return true; + + WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", + start, end - 1, pool_start, pool_end - 1); + + return false; +} + static int __free_from_pool(void *start, size_t size) { struct dma_pool *pool = &atomic_pool; unsigned long pageno, count; unsigned long flags; - if (start < pool->vaddr || start > pool->vaddr + pool->size) + if (!__in_atomic_pool(start, size)) return 0; - if (start + size > pool->vaddr + pool->size) { - WARN(1, "freeing wrong coherent size from pool\n"); - return 0; - } - pageno = (start - pool->vaddr) >> PAGE_SHIFT; count = size >> PAGE_SHIFT; @@ -1090,10 +1136,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si return 0; } +static struct page **__atomic_get_pages(void *addr) +{ + struct dma_pool *pool = &atomic_pool; + struct page **pages = pool->pages; + int offs = (addr - pool->vaddr) >> PAGE_SHIFT; + + return pages + offs; +} + static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) { struct vm_struct *area; + if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) + return __atomic_get_pages(cpu_addr); + if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) return cpu_addr; @@ -1103,6 +1161,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) return NULL; } +static void *__iommu_alloc_atomic(struct device *dev, size_t size, + dma_addr_t *handle) +{ + struct page *page; + void *addr; + + addr = __alloc_from_pool(size, &page); + if (!addr) + return NULL; + + *handle = __iommu_create_mapping(dev, &page, size); + if (*handle == DMA_ERROR_CODE) + goto err_mapping; + + return addr; + +err_mapping: + __free_from_pool(addr, size); + return NULL; +} + +static void __iommu_free_atomic(struct device *dev, struct page **pages, + dma_addr_t handle, size_t size) +{ + __iommu_remove_mapping(dev, handle, size); + __free_from_pool(page_address(pages[0]), size); +} + static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { @@ -1113,6 +1199,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); + if (gfp & GFP_ATOMIC) + return __iommu_alloc_atomic(dev, size, handle); + pages = __iommu_alloc_buffer(dev, size, gfp); if (!pages) return NULL; @@ -1179,6 +1268,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, return; } + if (__in_atomic_pool(cpu_addr, size)) { + __iommu_free_atomic(dev, pages, handle, size); + return; + } + if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { unmap_kernel_range((unsigned long)cpu_addr, size); vunmap(cpu_addr); diff --git a/trunk/arch/mips/pci/pci-octeon.c b/trunk/arch/mips/pci/pci-octeon.c index 52a1ba70b3b6..c5dfb2c87d44 100644 --- a/trunk/arch/mips/pci/pci-octeon.c +++ b/trunk/arch/mips/pci/pci-octeon.c @@ -117,16 +117,11 @@ int pcibios_plat_dev_init(struct pci_dev *dev) } /* Enable the PCIe normal error reporting */ - pos = pci_find_capability(dev, PCI_CAP_ID_EXP); - if (pos) { - /* Update Device Control */ - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &config); - config |= PCI_EXP_DEVCTL_CERE; /* Correctable Error Reporting */ - config |= PCI_EXP_DEVCTL_NFERE; /* Non-Fatal Error Reporting */ - config |= PCI_EXP_DEVCTL_FERE; /* Fatal Error Reporting */ - config |= PCI_EXP_DEVCTL_URRE; /* Unsupported Request */ - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, config); - } + config = PCI_EXP_DEVCTL_CERE; /* Correctable Error Reporting */ + config |= PCI_EXP_DEVCTL_NFERE; /* Non-Fatal Error Reporting */ + config |= PCI_EXP_DEVCTL_FERE; /* Fatal Error Reporting */ + config |= PCI_EXP_DEVCTL_URRE; /* Unsupported Request */ + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, config); /* Find the Advanced Error Reporting capability */ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); diff --git a/trunk/arch/powerpc/Kconfig b/trunk/arch/powerpc/Kconfig index 98e513b62709..352f416269ce 100644 --- a/trunk/arch/powerpc/Kconfig +++ b/trunk/arch/powerpc/Kconfig @@ -239,9 +239,6 @@ config PPC_OF_PLATFORM_PCI config ARCH_SUPPORTS_DEBUG_PAGEALLOC def_bool y -config ARCH_SUPPORTS_UPROBES - def_bool y - config PPC_ADV_DEBUG_REGS bool depends on 40x || BOOKE diff --git a/trunk/arch/powerpc/boot/Makefile b/trunk/arch/powerpc/boot/Makefile index 6a15c968d214..b7d833382be4 100644 --- a/trunk/arch/powerpc/boot/Makefile +++ b/trunk/arch/powerpc/boot/Makefile @@ -107,7 +107,6 @@ src-boot := $(addprefix $(obj)/, $(src-boot)) obj-boot := $(addsuffix .o, $(basename $(src-boot))) obj-wlib := $(addsuffix .o, $(basename $(addprefix $(obj)/, $(src-wlib)))) obj-plat := $(addsuffix .o, $(basename $(addprefix $(obj)/, $(src-plat)))) -obj-plat: $(libfdt) quiet_cmd_copy_zlib = COPY $@ cmd_copy_zlib = sed "s@__used@@;s@]*\).*@\"\1\"@" $< > $@ diff --git a/trunk/arch/powerpc/include/asm/abs_addr.h b/trunk/arch/powerpc/include/asm/abs_addr.h new file mode 100644 index 000000000000..9d92ba04b033 --- /dev/null +++ b/trunk/arch/powerpc/include/asm/abs_addr.h @@ -0,0 +1,56 @@ +#ifndef _ASM_POWERPC_ABS_ADDR_H +#define _ASM_POWERPC_ABS_ADDR_H +#ifdef __KERNEL__ + + +/* + * c 2001 PPC 64 Team, IBM Corp + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include + +#include +#include +#include + +struct mschunks_map { + unsigned long num_chunks; + unsigned long chunk_size; + unsigned long chunk_shift; + unsigned long chunk_mask; + u32 *mapping; +}; + +extern struct mschunks_map mschunks_map; + +/* Chunks are 256 KB */ +#define MSCHUNKS_CHUNK_SHIFT (18) +#define MSCHUNKS_CHUNK_SIZE (1UL << MSCHUNKS_CHUNK_SHIFT) +#define MSCHUNKS_OFFSET_MASK (MSCHUNKS_CHUNK_SIZE - 1) + +static inline unsigned long chunk_to_addr(unsigned long chunk) +{ + return chunk << MSCHUNKS_CHUNK_SHIFT; +} + +static inline unsigned long addr_to_chunk(unsigned long addr) +{ + return addr >> MSCHUNKS_CHUNK_SHIFT; +} + +static inline unsigned long phys_to_abs(unsigned long pa) +{ + return pa; +} + +/* Convenience macros */ +#define virt_to_abs(va) phys_to_abs(__pa(va)) +#define abs_to_virt(aa) __va(aa) + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_ABS_ADDR_H */ diff --git a/trunk/arch/powerpc/include/asm/debug.h b/trunk/arch/powerpc/include/asm/debug.h index 32de2577bb6d..716d2f089eb6 100644 --- a/trunk/arch/powerpc/include/asm/debug.h +++ b/trunk/arch/powerpc/include/asm/debug.h @@ -44,7 +44,7 @@ static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } #endif -extern int set_dabr(unsigned long dabr, unsigned long dabrx); +extern int set_dabr(unsigned long dabr); #ifdef CONFIG_PPC_ADV_DEBUG_REGS extern void do_send_trap(struct pt_regs *regs, unsigned long address, unsigned long error_code, int signal_code, int brkpt); diff --git a/trunk/arch/powerpc/include/asm/eeh.h b/trunk/arch/powerpc/include/asm/eeh.h index 58c5ee61e700..d60f99814ffb 100644 --- a/trunk/arch/powerpc/include/asm/eeh.h +++ b/trunk/arch/powerpc/include/asm/eeh.h @@ -31,45 +31,6 @@ struct device_node; #ifdef CONFIG_EEH -/* - * The struct is used to trace PE related EEH functionality. - * In theory, there will have one instance of the struct to - * be created against particular PE. In nature, PEs corelate - * to each other. the struct has to reflect that hierarchy in - * order to easily pick up those affected PEs when one particular - * PE has EEH errors. - * - * Also, one particular PE might be composed of PCI device, PCI - * bus and its subordinate components. The struct also need ship - * the information. Further more, one particular PE is only meaingful - * in the corresponding PHB. Therefore, the root PEs should be created - * against existing PHBs in on-to-one fashion. - */ -#define EEH_PE_PHB 1 /* PHB PE */ -#define EEH_PE_DEVICE 2 /* Device PE */ -#define EEH_PE_BUS 3 /* Bus PE */ - -#define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */ -#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */ - -struct eeh_pe { - int type; /* PE type: PHB/Bus/Device */ - int state; /* PE EEH dependent mode */ - int config_addr; /* Traditional PCI address */ - int addr; /* PE configuration address */ - struct pci_controller *phb; /* Associated PHB */ - int check_count; /* Times of ignored error */ - int freeze_count; /* Times of froze up */ - int false_positives; /* Times of reported #ff's */ - struct eeh_pe *parent; /* Parent PE */ - struct list_head child_list; /* Link PE to the child list */ - struct list_head edevs; /* Link list of EEH devices */ - struct list_head child; /* Child PEs */ -}; - -#define eeh_pe_for_each_dev(pe, edev) \ - list_for_each_entry(edev, &pe->edevs, list) - /* * The struct is used to trace EEH state for the associated * PCI device node or PCI device. In future, it might @@ -77,16 +38,21 @@ struct eeh_pe { * another tree except the currently existing tree of PCI * buses and PCI devices */ -#define EEH_DEV_IRQ_DISABLED (1<<0) /* Interrupt disabled */ +#define EEH_MODE_SUPPORTED (1<<0) /* EEH supported on the device */ +#define EEH_MODE_NOCHECK (1<<1) /* EEH check should be skipped */ +#define EEH_MODE_ISOLATED (1<<2) /* The device has been isolated */ +#define EEH_MODE_RECOVERING (1<<3) /* Recovering the device */ +#define EEH_MODE_IRQ_DISABLED (1<<4) /* Interrupt disabled */ struct eeh_dev { int mode; /* EEH mode */ int class_code; /* Class code of the device */ int config_addr; /* Config address */ int pe_config_addr; /* PE config address */ + int check_count; /* Times of ignored error */ + int freeze_count; /* Times of froze up */ + int false_positives; /* Times of reported #ff's */ u32 config_space[16]; /* Saved PCI config space */ - struct eeh_pe *pe; /* Associated PE */ - struct list_head list; /* Form link list in the PE */ struct pci_controller *phb; /* Associated PHB */ struct device_node *dn; /* Associated device node */ struct pci_dev *pdev; /* Associated PCI device */ @@ -129,51 +95,19 @@ static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev) struct eeh_ops { char *name; int (*init)(void); - void* (*of_probe)(struct device_node *dn, void *flag); - void* (*dev_probe)(struct pci_dev *dev, void *flag); - int (*set_option)(struct eeh_pe *pe, int option); - int (*get_pe_addr)(struct eeh_pe *pe); - int (*get_state)(struct eeh_pe *pe, int *state); - int (*reset)(struct eeh_pe *pe, int option); - int (*wait_state)(struct eeh_pe *pe, int max_wait); - int (*get_log)(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len); - int (*configure_bridge)(struct eeh_pe *pe); + int (*set_option)(struct device_node *dn, int option); + int (*get_pe_addr)(struct device_node *dn); + int (*get_state)(struct device_node *dn, int *state); + int (*reset)(struct device_node *dn, int option); + int (*wait_state)(struct device_node *dn, int max_wait); + int (*get_log)(struct device_node *dn, int severity, char *drv_log, unsigned long len); + int (*configure_bridge)(struct device_node *dn); int (*read_config)(struct device_node *dn, int where, int size, u32 *val); int (*write_config)(struct device_node *dn, int where, int size, u32 val); }; extern struct eeh_ops *eeh_ops; extern int eeh_subsystem_enabled; -extern struct mutex eeh_mutex; -extern int eeh_probe_mode; - -#define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */ -#define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */ - -static inline void eeh_probe_mode_set(int flag) -{ - eeh_probe_mode = flag; -} - -static inline int eeh_probe_mode_devtree(void) -{ - return (eeh_probe_mode == EEH_PROBE_MODE_DEVTREE); -} - -static inline int eeh_probe_mode_dev(void) -{ - return (eeh_probe_mode == EEH_PROBE_MODE_DEV); -} - -static inline void eeh_lock(void) -{ - mutex_lock(&eeh_mutex); -} - -static inline void eeh_unlock(void) -{ - mutex_unlock(&eeh_mutex); -} /* * Max number of EEH freezes allowed before we consider the device @@ -181,23 +115,19 @@ static inline void eeh_unlock(void) */ #define EEH_MAX_ALLOWED_FREEZES 5 -typedef void *(*eeh_traverse_func)(void *data, void *flag); -int __devinit eeh_phb_pe_create(struct pci_controller *phb); -int eeh_add_to_parent_pe(struct eeh_dev *edev); -int eeh_rmv_from_parent_pe(struct eeh_dev *edev); -void *eeh_pe_dev_traverse(struct eeh_pe *root, - eeh_traverse_func fn, void *flag); -void eeh_pe_restore_bars(struct eeh_pe *pe); -struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe); - void * __devinit eeh_dev_init(struct device_node *dn, void *data); void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb); +void __init eeh_dev_phb_init(void); +void __init eeh_init(void); +#ifdef CONFIG_PPC_PSERIES +int __init eeh_pseries_init(void); +#endif int __init eeh_ops_register(struct eeh_ops *ops); int __exit eeh_ops_unregister(const char *name); unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val); -int eeh_dev_check_failure(struct eeh_dev *edev); -void __init eeh_addr_cache_build(void); +int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev); +void __init pci_addr_cache_build(void); void eeh_add_device_tree_early(struct device_node *); void eeh_add_device_tree_late(struct pci_bus *); void eeh_remove_bus_device(struct pci_dev *); @@ -226,24 +156,34 @@ static inline void *eeh_dev_init(struct device_node *dn, void *data) static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { } +static inline void eeh_dev_phb_init(void) { } + +static inline void eeh_init(void) { } + +#ifdef CONFIG_PPC_PSERIES +static inline int eeh_pseries_init(void) +{ + return 0; +} +#endif /* CONFIG_PPC_PSERIES */ + static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val) { return val; } -#define eeh_dev_check_failure(x) (0) +static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) +{ + return 0; +} -static inline void eeh_addr_cache_build(void) { } +static inline void pci_addr_cache_build(void) { } static inline void eeh_add_device_tree_early(struct device_node *dn) { } static inline void eeh_add_device_tree_late(struct pci_bus *bus) { } static inline void eeh_remove_bus_device(struct pci_dev *dev) { } - -static inline void eeh_lock(void) { } -static inline void eeh_unlock(void) { } - #define EEH_POSSIBLE_ERROR(val, type) (0) #define EEH_IO_ERROR_VALUE(size) (-1UL) #endif /* CONFIG_EEH */ diff --git a/trunk/arch/powerpc/include/asm/eeh_event.h b/trunk/arch/powerpc/include/asm/eeh_event.h index de67d830151b..c68b012b7797 100644 --- a/trunk/arch/powerpc/include/asm/eeh_event.h +++ b/trunk/arch/powerpc/include/asm/eeh_event.h @@ -28,11 +28,11 @@ */ struct eeh_event { struct list_head list; /* to form event queue */ - struct eeh_pe *pe; /* EEH PE */ + struct eeh_dev *edev; /* EEH device */ }; -int eeh_send_failure_event(struct eeh_pe *pe); -void eeh_handle_event(struct eeh_pe *pe); +int eeh_send_failure_event(struct eeh_dev *edev); +struct eeh_dev *handle_eeh_events(struct eeh_event *); #endif /* __KERNEL__ */ #endif /* ASM_POWERPC_EEH_EVENT_H */ diff --git a/trunk/arch/powerpc/include/asm/exception-64e.h b/trunk/arch/powerpc/include/asm/exception-64e.h index 51fa43e536b9..ac13addb8495 100644 --- a/trunk/arch/powerpc/include/asm/exception-64e.h +++ b/trunk/arch/powerpc/include/asm/exception-64e.h @@ -37,7 +37,6 @@ * critical data */ -#define PACA_EXGDBELL PACA_EXGEN /* We are out of SPRGs so we save some things in the PACA. The normal * exception frame is smaller than the CRIT or MC one though @@ -46,9 +45,8 @@ #define EX_CR (1 * 8) #define EX_R10 (2 * 8) #define EX_R11 (3 * 8) -#define EX_R13 (4 * 8) -#define EX_R14 (5 * 8) -#define EX_R15 (6 * 8) +#define EX_R14 (4 * 8) +#define EX_R15 (5 * 8) /* * The TLB miss exception uses different slots. diff --git a/trunk/arch/powerpc/include/asm/hvcall.h b/trunk/arch/powerpc/include/asm/hvcall.h index 7a867065db79..423cf9eaf4a4 100644 --- a/trunk/arch/powerpc/include/asm/hvcall.h +++ b/trunk/arch/powerpc/include/asm/hvcall.h @@ -152,6 +152,11 @@ #define H_VASI_RESUMED 5 #define H_VASI_COMPLETED 6 +/* DABRX flags */ +#define H_DABRX_HYPERVISOR (1UL<<(63-61)) +#define H_DABRX_KERNEL (1UL<<(63-62)) +#define H_DABRX_USER (1UL<<(63-63)) + /* Each control block has to be on a 4K boundary */ #define H_CB_ALIGNMENT 4096 diff --git a/trunk/arch/powerpc/include/asm/hw_breakpoint.h b/trunk/arch/powerpc/include/asm/hw_breakpoint.h index 423424599dad..be04330af751 100644 --- a/trunk/arch/powerpc/include/asm/hw_breakpoint.h +++ b/trunk/arch/powerpc/include/asm/hw_breakpoint.h @@ -27,11 +27,10 @@ #ifdef CONFIG_HAVE_HW_BREAKPOINT struct arch_hw_breakpoint { - unsigned long address; - unsigned long dabrx; - int type; - u8 len; /* length of the target data symbol */ bool extraneous_interrupt; + u8 len; /* length of the target data symbol */ + int type; + unsigned long address; }; #include @@ -62,7 +61,7 @@ extern void ptrace_triggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs); static inline void hw_breakpoint_disable(void) { - set_dabr(0, 0); + set_dabr(0); } extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs); diff --git a/trunk/arch/powerpc/include/asm/kprobes.h b/trunk/arch/powerpc/include/asm/kprobes.h index 7b6feab6fd26..be0171afdc0f 100644 --- a/trunk/arch/powerpc/include/asm/kprobes.h +++ b/trunk/arch/powerpc/include/asm/kprobes.h @@ -29,16 +29,21 @@ #include #include #include -#include #define __ARCH_WANT_KPROBES_INSN_SLOT struct pt_regs; struct kprobe; -typedef ppc_opcode_t kprobe_opcode_t; +typedef unsigned int kprobe_opcode_t; +#define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */ #define MAX_INSN_SIZE 1 +#define IS_TW(instr) (((instr) & 0xfc0007fe) == 0x7c000008) +#define IS_TD(instr) (((instr) & 0xfc0007fe) == 0x7c000088) +#define IS_TDI(instr) (((instr) & 0xfc000000) == 0x08000000) +#define IS_TWI(instr) (((instr) & 0xfc000000) == 0x0c000000) + #ifdef CONFIG_PPC64 /* * 64bit powerpc uses function descriptors. @@ -67,6 +72,12 @@ typedef ppc_opcode_t kprobe_opcode_t; addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \ } \ } + +#define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \ + IS_TWI(instr) || IS_TDI(instr)) +#else +/* Use stock kprobe_lookup_name since ppc32 doesn't use function descriptors */ +#define is_trap(instr) (IS_TW(instr) || IS_TWI(instr)) #endif #define flush_insn_slot(p) do { } while (0) diff --git a/trunk/arch/powerpc/include/asm/kvm_book3s_asm.h b/trunk/arch/powerpc/include/asm/kvm_book3s_asm.h index 88609b23b775..bfcd00c1485d 100644 --- a/trunk/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/trunk/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -74,6 +74,7 @@ struct kvmppc_host_state { ulong vmhandler; ulong scratch0; ulong scratch1; + ulong sprg3; u8 in_guest; u8 restore_hid5; u8 napping; diff --git a/trunk/arch/powerpc/include/asm/machdep.h b/trunk/arch/powerpc/include/asm/machdep.h index 236b4779ec4f..f7706d722b39 100644 --- a/trunk/arch/powerpc/include/asm/machdep.h +++ b/trunk/arch/powerpc/include/asm/machdep.h @@ -180,8 +180,7 @@ struct machdep_calls { void (*enable_pmcs)(void); /* Set DABR for this platform, leave empty for default implemenation */ - int (*set_dabr)(unsigned long dabr, - unsigned long dabrx); + int (*set_dabr)(unsigned long dabr); #ifdef CONFIG_PPC32 /* XXX for now */ /* A general init function, called by ppc_init in init/main.c. @@ -215,6 +214,9 @@ struct machdep_calls { /* Called after scan and before resource survey */ void (*pcibios_fixup_phb)(struct pci_controller *hose); + /* Called during PCI resource reassignment */ + resource_size_t (*pcibios_window_alignment)(struct pci_bus *, unsigned long type); + /* Called to shutdown machine specific hardware not already controlled * by other drivers. */ diff --git a/trunk/arch/powerpc/include/asm/paca.h b/trunk/arch/powerpc/include/asm/paca.h index 7796519fd238..daf813fea91f 100644 --- a/trunk/arch/powerpc/include/asm/paca.h +++ b/trunk/arch/powerpc/include/asm/paca.h @@ -136,7 +136,6 @@ struct paca_struct { u8 io_sync; /* writel() needs spin_unlock sync */ u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ u8 nap_state_lost; /* NV GPR values lost in power7_idle */ - u64 sprg3; /* Saved user-visible sprg */ #ifdef CONFIG_PPC_POWERNV /* Pointer to OPAL machine check event structure set by the diff --git a/trunk/arch/powerpc/include/asm/pci-bridge.h b/trunk/arch/powerpc/include/asm/pci-bridge.h index 973df4d9d366..8cccbee61519 100644 --- a/trunk/arch/powerpc/include/asm/pci-bridge.h +++ b/trunk/arch/powerpc/include/asm/pci-bridge.h @@ -184,8 +184,6 @@ static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn) { return PCI_DN(dn)->edev; } -#else -#define of_node_to_eeh_dev(x) (NULL) #endif /** Find the bus corresponding to the indicated device node */ diff --git a/trunk/arch/powerpc/include/asm/ppc-pci.h b/trunk/arch/powerpc/include/asm/ppc-pci.h index ed57fa7920c8..80fa704d410f 100644 --- a/trunk/arch/powerpc/include/asm/ppc-pci.h +++ b/trunk/arch/powerpc/include/asm/ppc-pci.h @@ -47,17 +47,19 @@ extern int rtas_setup_phb(struct pci_controller *phb); #ifdef CONFIG_EEH -void eeh_addr_cache_insert_dev(struct pci_dev *dev); -void eeh_addr_cache_rmv_dev(struct pci_dev *dev); -struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr); -void eeh_slot_error_detail(struct eeh_pe *pe, int severity); -int eeh_pci_enable(struct eeh_pe *pe, int function); -int eeh_reset_pe(struct eeh_pe *); -void eeh_save_bars(struct eeh_dev *edev); +void pci_addr_cache_build(void); +void pci_addr_cache_insert_device(struct pci_dev *dev); +void pci_addr_cache_remove_device(struct pci_dev *dev); +struct pci_dev *pci_addr_cache_get_device(unsigned long addr); +void eeh_slot_error_detail(struct eeh_dev *edev, int severity); +int eeh_pci_enable(struct eeh_dev *edev, int function); +int eeh_reset_pe(struct eeh_dev *); +void eeh_restore_bars(struct eeh_dev *); int rtas_write_config(struct pci_dn *, int where, int size, u32 val); int rtas_read_config(struct pci_dn *, int where, int size, u32 *val); -void eeh_pe_state_mark(struct eeh_pe *pe, int state); -void eeh_pe_state_clear(struct eeh_pe *pe, int state); +void eeh_mark_slot(struct device_node *dn, int mode_flag); +void eeh_clear_slot(struct device_node *dn, int mode_flag); +struct device_node *eeh_find_device_pe(struct device_node *dn); void eeh_sysfs_add_device(struct pci_dev *pdev); void eeh_sysfs_remove_device(struct pci_dev *pdev); diff --git a/trunk/arch/powerpc/include/asm/probes.h b/trunk/arch/powerpc/include/asm/probes.h deleted file mode 100644 index 5f1e15b68704..000000000000 --- a/trunk/arch/powerpc/include/asm/probes.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef _ASM_POWERPC_PROBES_H -#define _ASM_POWERPC_PROBES_H -#ifdef __KERNEL__ -/* - * Definitions common to probes files - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright IBM Corporation, 2012 - */ -#include - -typedef u32 ppc_opcode_t; -#define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */ - -/* Trap definitions per ISA */ -#define IS_TW(instr) (((instr) & 0xfc0007fe) == 0x7c000008) -#define IS_TD(instr) (((instr) & 0xfc0007fe) == 0x7c000088) -#define IS_TDI(instr) (((instr) & 0xfc000000) == 0x08000000) -#define IS_TWI(instr) (((instr) & 0xfc000000) == 0x0c000000) - -#ifdef CONFIG_PPC64 -#define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \ - IS_TWI(instr) || IS_TDI(instr)) -#else -#define is_trap(instr) (IS_TW(instr) || IS_TWI(instr)) -#endif /* CONFIG_PPC64 */ - -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_PROBES_H */ diff --git a/trunk/arch/powerpc/include/asm/processor.h b/trunk/arch/powerpc/include/asm/processor.h index 83efc6e81543..54b73a28c205 100644 --- a/trunk/arch/powerpc/include/asm/processor.h +++ b/trunk/arch/powerpc/include/asm/processor.h @@ -219,8 +219,6 @@ struct thread_struct { #endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif unsigned long dabr; /* Data address breakpoint register */ - unsigned long dabrx; /* ... extension */ - unsigned long trap_nr; /* last trap # on this thread */ #ifdef CONFIG_ALTIVEC /* Complete AltiVec register set */ vector128 vr[32] __attribute__((aligned(16))); diff --git a/trunk/arch/powerpc/include/asm/reg.h b/trunk/arch/powerpc/include/asm/reg.h index 121a90bbf778..638608677e2a 100644 --- a/trunk/arch/powerpc/include/asm/reg.h +++ b/trunk/arch/powerpc/include/asm/reg.h @@ -208,9 +208,6 @@ #define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */ #define DABRX_USER (1UL << 0) #define DABRX_KERNEL (1UL << 1) -#define DABRX_HYP (1UL << 2) -#define DABRX_BTI (1UL << 3) -#define DABRX_ALL (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER) #define SPRN_DAR 0x013 /* Data Address Register */ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ @@ -764,8 +761,7 @@ * 64-bit embedded * - SPRG0 generic exception scratch * - SPRG2 TLB exception stack - * - SPRG3 critical exception scratch and - * CPU and NUMA node for VDSO getcpu (user visible) + * - SPRG3 CPU and NUMA node for VDSO getcpu (user visible) * - SPRG4 unused (user visible) * - SPRG6 TLB miss scratch (user visible, sorry !) * - SPRG7 critical exception scratch @@ -862,12 +858,11 @@ #ifdef CONFIG_PPC_BOOK3E_64 #define SPRN_SPRG_MC_SCRATCH SPRN_SPRG8 -#define SPRN_SPRG_CRIT_SCRATCH SPRN_SPRG3 +#define SPRN_SPRG_CRIT_SCRATCH SPRN_SPRG7 #define SPRN_SPRG_DBG_SCRATCH SPRN_SPRG9 #define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2 #define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6 #define SPRN_SPRG_GEN_SCRATCH SPRN_SPRG0 -#define SPRN_SPRG_GDBELL_SCRATCH SPRN_SPRG_GEN_SCRATCH #define SET_PACA(rX) mtspr SPRN_SPRG_PACA,rX #define GET_PACA(rX) mfspr rX,SPRN_SPRG_PACA @@ -942,7 +937,7 @@ #define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */ #define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */ -#define pvr_version_is(pvr) (PVR_VER(mfspr(SPRN_PVR)) == (pvr)) +#define __is_processor(pv) (PVR_VER(mfspr(SPRN_PVR)) == (pv)) /* * IBM has further subdivided the standard PowerPC 16-bit version and @@ -1007,25 +1002,25 @@ #define PVR_476_ISS 0x00052000 /* 64-bit processors */ -#define PVR_NORTHSTAR 0x0033 -#define PVR_PULSAR 0x0034 -#define PVR_POWER4 0x0035 -#define PVR_ICESTAR 0x0036 -#define PVR_SSTAR 0x0037 -#define PVR_POWER4p 0x0038 -#define PVR_970 0x0039 -#define PVR_POWER5 0x003A -#define PVR_POWER5p 0x003B -#define PVR_970FX 0x003C -#define PVR_POWER6 0x003E -#define PVR_POWER7 0x003F -#define PVR_630 0x0040 -#define PVR_630p 0x0041 -#define PVR_970MP 0x0044 -#define PVR_970GX 0x0045 -#define PVR_POWER7p 0x004A -#define PVR_BE 0x0070 -#define PVR_PA6T 0x0090 +/* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */ +#define PV_NORTHSTAR 0x0033 +#define PV_PULSAR 0x0034 +#define PV_POWER4 0x0035 +#define PV_ICESTAR 0x0036 +#define PV_SSTAR 0x0037 +#define PV_POWER4p 0x0038 +#define PV_970 0x0039 +#define PV_POWER5 0x003A +#define PV_POWER5p 0x003B +#define PV_970FX 0x003C +#define PV_POWER6 0x003E +#define PV_POWER7 0x003F +#define PV_630 0x0040 +#define PV_630p 0x0041 +#define PV_970MP 0x0044 +#define PV_970GX 0x0045 +#define PV_BE 0x0070 +#define PV_PA6T 0x0090 /* Macros for setting and retrieving special purpose registers */ #ifndef __ASSEMBLY__ diff --git a/trunk/arch/powerpc/include/asm/setup.h b/trunk/arch/powerpc/include/asm/setup.h index 8b9a306260b2..d084ce195fc3 100644 --- a/trunk/arch/powerpc/include/asm/setup.h +++ b/trunk/arch/powerpc/include/asm/setup.h @@ -9,7 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex); extern unsigned int rtas_data; extern int mem_init_done; /* set on boot once kmalloc can be called */ extern int init_bootmem_done; /* set once bootmem is available */ -extern unsigned long long memory_limit; +extern phys_addr_t memory_limit; extern unsigned long klimit; extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); diff --git a/trunk/arch/powerpc/include/asm/thread_info.h b/trunk/arch/powerpc/include/asm/thread_info.h index e942203cd4a8..faf93529cbf0 100644 --- a/trunk/arch/powerpc/include/asm/thread_info.h +++ b/trunk/arch/powerpc/include/asm/thread_info.h @@ -102,7 +102,6 @@ static inline struct thread_info *current_thread_info(void) #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ #define TIF_NOERROR 12 /* Force successful syscall return */ #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ -#define TIF_UPROBE 14 /* breakpointed or single-stepping */ #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ /* as above, but as bit values */ @@ -119,13 +118,12 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_RESTOREALL (1< - */ - -#include -#include - -typedef ppc_opcode_t uprobe_opcode_t; - -#define MAX_UINSN_BYTES 4 -#define UPROBE_XOL_SLOT_BYTES (MAX_UINSN_BYTES) - -/* The following alias is needed for reference from arch-agnostic code */ -#define UPROBE_SWBP_INSN BREAKPOINT_INSTRUCTION -#define UPROBE_SWBP_INSN_SIZE 4 /* swbp insn size in bytes */ - -struct arch_uprobe { - union { - u8 insn[MAX_UINSN_BYTES]; - u32 ainsn; - }; -}; - -struct arch_uprobe_task { - unsigned long saved_trap_nr; -}; - -extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr); -extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs); -extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs); -extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); -extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); -extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); -#endif /* _ASM_UPROBES_H */ diff --git a/trunk/arch/powerpc/kernel/Makefile b/trunk/arch/powerpc/kernel/Makefile index cde12f8a4ebc..bb282dd81612 100644 --- a/trunk/arch/powerpc/kernel/Makefile +++ b/trunk/arch/powerpc/kernel/Makefile @@ -96,7 +96,6 @@ obj-$(CONFIG_MODULES) += ppc_ksyms.o obj-$(CONFIG_BOOTX_TEXT) += btext.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_KPROBES) += kprobes.o -obj-$(CONFIG_UPROBES) += uprobes.o obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o diff --git a/trunk/arch/powerpc/kernel/asm-offsets.c b/trunk/arch/powerpc/kernel/asm-offsets.c index 7523539cfe9f..e8995727b1c1 100644 --- a/trunk/arch/powerpc/kernel/asm-offsets.c +++ b/trunk/arch/powerpc/kernel/asm-offsets.c @@ -206,7 +206,6 @@ int main(void) DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost)); - DEFINE(PACA_SPRG3, offsetof(struct paca_struct, sprg3)); #endif /* CONFIG_PPC64 */ /* RTAS */ @@ -535,6 +534,7 @@ int main(void) HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); + HSTATE_FIELD(HSTATE_SPRG3, sprg3); HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); HSTATE_FIELD(HSTATE_NAPPING, napping); diff --git a/trunk/arch/powerpc/kernel/dma-swiotlb.c b/trunk/arch/powerpc/kernel/dma-swiotlb.c index a720b54b971c..46943651da23 100644 --- a/trunk/arch/powerpc/kernel/dma-swiotlb.c +++ b/trunk/arch/powerpc/kernel/dma-swiotlb.c @@ -12,7 +12,6 @@ */ #include -#include #include #include #include @@ -21,6 +20,7 @@ #include #include #include +#include unsigned int ppc_swiotlb_enable; diff --git a/trunk/arch/powerpc/kernel/dma.c b/trunk/arch/powerpc/kernel/dma.c index 8032b97ccdcb..355b9d84b0f8 100644 --- a/trunk/arch/powerpc/kernel/dma.c +++ b/trunk/arch/powerpc/kernel/dma.c @@ -14,6 +14,7 @@ #include #include #include +#include #include /* @@ -49,7 +50,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size, return NULL; ret = page_address(page); memset(ret, 0, size); - *dma_handle = __pa(ret) + get_dma_offset(dev); + *dma_handle = virt_to_abs(ret) + get_dma_offset(dev); return ret; #endif diff --git a/trunk/arch/powerpc/kernel/exceptions-64e.S b/trunk/arch/powerpc/kernel/exceptions-64e.S index 87a82fbdf05a..98be7f0cd227 100644 --- a/trunk/arch/powerpc/kernel/exceptions-64e.S +++ b/trunk/arch/powerpc/kernel/exceptions-64e.S @@ -25,8 +25,6 @@ #include #include #include -#include -#include /* XXX This will ultimately add space for a special exception save * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc... @@ -37,18 +35,16 @@ #define SPECIAL_EXC_FRAME_SIZE INT_FRAME_SIZE /* Exception prolog code for all exceptions */ -#define EXCEPTION_PROLOG(n, intnum, type, addition) \ +#define EXCEPTION_PROLOG(n, type, addition) \ mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \ mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \ std r10,PACA_EX##type+EX_R10(r13); \ std r11,PACA_EX##type+EX_R11(r13); \ - PROLOG_STORE_RESTORE_SCRATCH_##type; \ mfcr r10; /* save CR */ \ - mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \ - DO_KVM intnum,SPRN_##type##_SRR1; /* KVM hook */ \ - stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \ addition; /* additional code for that exc. */ \ std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \ + stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \ + mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \ type##_SET_KSTACK; /* get special stack if necessary */\ andi. r10,r11,MSR_PR; /* save stack pointer */ \ beq 1f; /* branch around if supervisor */ \ @@ -63,10 +59,6 @@ #define SPRN_GEN_SRR0 SPRN_SRR0 #define SPRN_GEN_SRR1 SPRN_SRR1 -#define GDBELL_SET_KSTACK GEN_SET_KSTACK -#define SPRN_GDBELL_SRR0 SPRN_GSRR0 -#define SPRN_GDBELL_SRR1 SPRN_GSRR1 - #define CRIT_SET_KSTACK \ ld r1,PACA_CRIT_STACK(r13); \ subi r1,r1,SPECIAL_EXC_FRAME_SIZE; @@ -85,46 +77,29 @@ #define SPRN_MC_SRR0 SPRN_MCSRR0 #define SPRN_MC_SRR1 SPRN_MCSRR1 -#define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \ - EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n)) +#define NORMAL_EXCEPTION_PROLOG(n, addition) \ + EXCEPTION_PROLOG(n, GEN, addition##_GEN(n)) -#define CRIT_EXCEPTION_PROLOG(n, intnum, addition) \ - EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n)) +#define CRIT_EXCEPTION_PROLOG(n, addition) \ + EXCEPTION_PROLOG(n, CRIT, addition##_CRIT(n)) -#define DBG_EXCEPTION_PROLOG(n, intnum, addition) \ - EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n)) +#define DBG_EXCEPTION_PROLOG(n, addition) \ + EXCEPTION_PROLOG(n, DBG, addition##_DBG(n)) -#define MC_EXCEPTION_PROLOG(n, intnum, addition) \ - EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n)) +#define MC_EXCEPTION_PROLOG(n, addition) \ + EXCEPTION_PROLOG(n, MC, addition##_MC(n)) -#define GDBELL_EXCEPTION_PROLOG(n, intnum, addition) \ - EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n)) - -/* - * Store user-visible scratch in PACA exception slots and restore proper value - */ -#define PROLOG_STORE_RESTORE_SCRATCH_GEN -#define PROLOG_STORE_RESTORE_SCRATCH_GDBELL -#define PROLOG_STORE_RESTORE_SCRATCH_DBG -#define PROLOG_STORE_RESTORE_SCRATCH_MC - -#define PROLOG_STORE_RESTORE_SCRATCH_CRIT \ - mfspr r10,SPRN_SPRG_CRIT_SCRATCH; /* get r13 */ \ - std r10,PACA_EXCRIT+EX_R13(r13); \ - ld r11,PACA_SPRG3(r13); \ - mtspr SPRN_SPRG_CRIT_SCRATCH,r11; /* Variants of the "addition" argument for the prolog */ #define PROLOG_ADDITION_NONE_GEN(n) -#define PROLOG_ADDITION_NONE_GDBELL(n) #define PROLOG_ADDITION_NONE_CRIT(n) #define PROLOG_ADDITION_NONE_DBG(n) #define PROLOG_ADDITION_NONE_MC(n) #define PROLOG_ADDITION_MASKABLE_GEN(n) \ - lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ - cmpwi cr0,r10,0; /* yes -> go out of line */ \ + lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ + cmpwi cr0,r11,0; /* yes -> go out of line */ \ beq masked_interrupt_book3e_##n #define PROLOG_ADDITION_2REGS_GEN(n) \ @@ -258,9 +233,9 @@ exc_##n##_bad_stack: \ 1: -#define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack) \ +#define MASKABLE_EXCEPTION(trapnum, label, hdlr, ack) \ START_EXCEPTION(label); \ - NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\ + NORMAL_EXCEPTION_PROLOG(trapnum, PROLOG_ADDITION_MASKABLE) \ EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE) \ ack(r8); \ CHECK_NAPPING(); \ @@ -311,8 +286,7 @@ interrupt_end_book3e: /* Critical Input Interrupt */ START_EXCEPTION(critical_input); - CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, - PROLOG_ADDITION_NONE) + CRIT_EXCEPTION_PROLOG(0x100, PROLOG_ADDITION_NONE) // EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE) // bl special_reg_save_crit // CHECK_NAPPING(); @@ -323,8 +297,7 @@ interrupt_end_book3e: /* Machine Check Interrupt */ START_EXCEPTION(machine_check); - MC_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_MACHINE_CHECK, - PROLOG_ADDITION_NONE) + CRIT_EXCEPTION_PROLOG(0x200, PROLOG_ADDITION_NONE) // EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE) // bl special_reg_save_mc // addi r3,r1,STACK_FRAME_OVERHEAD @@ -335,8 +308,7 @@ interrupt_end_book3e: /* Data Storage Interrupt */ START_EXCEPTION(data_storage) - NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE, - PROLOG_ADDITION_2REGS) + NORMAL_EXCEPTION_PROLOG(0x300, PROLOG_ADDITION_2REGS) mfspr r14,SPRN_DEAR mfspr r15,SPRN_ESR EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_DISABLE) @@ -344,21 +316,18 @@ interrupt_end_book3e: /* Instruction Storage Interrupt */ START_EXCEPTION(instruction_storage); - NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE, - PROLOG_ADDITION_2REGS) + NORMAL_EXCEPTION_PROLOG(0x400, PROLOG_ADDITION_2REGS) li r15,0 mr r14,r10 EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_DISABLE) b storage_fault_common /* External Input Interrupt */ - MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL, - external_input, .do_IRQ, ACK_NONE) + MASKABLE_EXCEPTION(0x500, external_input, .do_IRQ, ACK_NONE) /* Alignment */ START_EXCEPTION(alignment); - NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT, - PROLOG_ADDITION_2REGS) + NORMAL_EXCEPTION_PROLOG(0x600, PROLOG_ADDITION_2REGS) mfspr r14,SPRN_DEAR mfspr r15,SPRN_ESR EXCEPTION_COMMON(0x600, PACA_EXGEN, INTS_KEEP) @@ -366,8 +335,7 @@ interrupt_end_book3e: /* Program Interrupt */ START_EXCEPTION(program); - NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM, - PROLOG_ADDITION_1REG) + NORMAL_EXCEPTION_PROLOG(0x700, PROLOG_ADDITION_1REG) mfspr r14,SPRN_ESR EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE) std r14,_DSISR(r1) @@ -379,8 +347,7 @@ interrupt_end_book3e: /* Floating Point Unavailable Interrupt */ START_EXCEPTION(fp_unavailable); - NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL, - PROLOG_ADDITION_NONE) + NORMAL_EXCEPTION_PROLOG(0x800, PROLOG_ADDITION_NONE) /* we can probably do a shorter exception entry for that one... */ EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP) ld r12,_MSR(r1) @@ -395,17 +362,14 @@ interrupt_end_book3e: b .ret_from_except /* Decrementer Interrupt */ - MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER, - decrementer, .timer_interrupt, ACK_DEC) + MASKABLE_EXCEPTION(0x900, decrementer, .timer_interrupt, ACK_DEC) /* Fixed Interval Timer Interrupt */ - MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT, - fixed_interval, .unknown_exception, ACK_FIT) + MASKABLE_EXCEPTION(0x980, fixed_interval, .unknown_exception, ACK_FIT) /* Watchdog Timer Interrupt */ START_EXCEPTION(watchdog); - CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, - PROLOG_ADDITION_NONE) + CRIT_EXCEPTION_PROLOG(0x9f0, PROLOG_ADDITION_NONE) // EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE) // bl special_reg_save_crit // CHECK_NAPPING(); @@ -424,8 +388,7 @@ interrupt_end_book3e: /* Auxiliary Processor Unavailable Interrupt */ START_EXCEPTION(ap_unavailable); - NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL, - PROLOG_ADDITION_NONE) + NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_DISABLE) bl .save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD @@ -434,8 +397,7 @@ interrupt_end_book3e: /* Debug exception as a critical interrupt*/ START_EXCEPTION(debug_crit); - CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG, - PROLOG_ADDITION_2REGS) + CRIT_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS) /* * If there is a single step or branch-taken exception in an @@ -469,7 +431,7 @@ interrupt_end_book3e: mtcr r10 ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */ ld r11,PACA_EXCRIT+EX_R11(r13) - ld r13,PACA_EXCRIT+EX_R13(r13) + mfspr r13,SPRN_SPRG_CRIT_SCRATCH rfci /* Normal debug exception */ @@ -482,7 +444,7 @@ interrupt_end_book3e: /* Now we mash up things to make it look like we are coming on a * normal exception */ - ld r15,PACA_EXCRIT+EX_R13(r13) + mfspr r15,SPRN_SPRG_CRIT_SCRATCH mtspr SPRN_SPRG_GEN_SCRATCH,r15 mfspr r14,SPRN_DBSR EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE) @@ -500,8 +462,7 @@ kernel_dbg_exc: /* Debug exception as a debug interrupt*/ START_EXCEPTION(debug_debug); - DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG, - PROLOG_ADDITION_2REGS) + DBG_EXCEPTION_PROLOG(0xd08, PROLOG_ADDITION_2REGS) /* * If there is a single step or branch-taken exception in an @@ -562,21 +523,18 @@ kernel_dbg_exc: b .ret_from_except START_EXCEPTION(perfmon); - NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, - PROLOG_ADDITION_NONE) + NORMAL_EXCEPTION_PROLOG(0x260, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE) addi r3,r1,STACK_FRAME_OVERHEAD bl .performance_monitor_exception b .ret_from_except_lite /* Doorbell interrupt */ - MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, - doorbell, .doorbell_exception, ACK_NONE) + MASKABLE_EXCEPTION(0x280, doorbell, .doorbell_exception, ACK_NONE) /* Doorbell critical Interrupt */ START_EXCEPTION(doorbell_crit); - CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, - PROLOG_ADDITION_NONE) + CRIT_EXCEPTION_PROLOG(0x2a0, PROLOG_ADDITION_NONE) // EXCEPTION_COMMON(0x2a0, PACA_EXCRIT, INTS_DISABLE) // bl special_reg_save_crit // CHECK_NAPPING(); @@ -585,24 +543,12 @@ kernel_dbg_exc: // b ret_from_crit_except b . -/* - * Guest doorbell interrupt - * This general exception use GSRRx save/restore registers - */ - START_EXCEPTION(guest_doorbell); - GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL, - PROLOG_ADDITION_NONE) - EXCEPTION_COMMON(0x2c0, PACA_EXGEN, INTS_KEEP) - addi r3,r1,STACK_FRAME_OVERHEAD - bl .save_nvgprs - INTS_RESTORE_HARD - bl .unknown_exception - b .ret_from_except +/* Guest Doorbell */ + MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE) /* Guest Doorbell critical Interrupt */ START_EXCEPTION(guest_doorbell_crit); - CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, - PROLOG_ADDITION_NONE) + CRIT_EXCEPTION_PROLOG(0x2e0, PROLOG_ADDITION_NONE) // EXCEPTION_COMMON(0x2e0, PACA_EXCRIT, INTS_DISABLE) // bl special_reg_save_crit // CHECK_NAPPING(); @@ -613,8 +559,7 @@ kernel_dbg_exc: /* Hypervisor call */ START_EXCEPTION(hypercall); - NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL, - PROLOG_ADDITION_NONE) + NORMAL_EXCEPTION_PROLOG(0x310, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x310, PACA_EXGEN, INTS_KEEP) addi r3,r1,STACK_FRAME_OVERHEAD bl .save_nvgprs @@ -624,8 +569,7 @@ kernel_dbg_exc: /* Embedded Hypervisor priviledged */ START_EXCEPTION(ehpriv); - NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV, - PROLOG_ADDITION_NONE) + NORMAL_EXCEPTION_PROLOG(0x320, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x320, PACA_EXGEN, INTS_KEEP) addi r3,r1,STACK_FRAME_OVERHEAD bl .save_nvgprs @@ -638,42 +582,44 @@ kernel_dbg_exc: * accordingly and if the interrupt is level sensitive, we hard disable */ -.macro masked_interrupt_book3e paca_irq full_mask - lbz r10,PACAIRQHAPPENED(r13) - ori r10,r10,\paca_irq - stb r10,PACAIRQHAPPENED(r13) - - .if \full_mask == 1 - rldicl r10,r11,48,1 /* clear MSR_EE */ - rotldi r11,r10,16 - mtspr SPRN_SRR1,r11 - .endif - - lwz r11,PACA_EXGEN+EX_CR(r13) - mtcr r11 - ld r10,PACA_EXGEN+EX_R10(r13) - ld r11,PACA_EXGEN+EX_R11(r13) - mfspr r13,SPRN_SPRG_GEN_SCRATCH - rfi - b . -.endm - masked_interrupt_book3e_0x500: - // XXX When adding support for EPR, use PACA_IRQ_EE_EDGE - masked_interrupt_book3e PACA_IRQ_EE 1 + /* XXX When adding support for EPR, use PACA_IRQ_EE_EDGE */ + li r11,PACA_IRQ_EE + b masked_interrupt_book3e_full_mask masked_interrupt_book3e_0x900: - ACK_DEC(r10); - masked_interrupt_book3e PACA_IRQ_DEC 0 - + ACK_DEC(r11); + li r11,PACA_IRQ_DEC + b masked_interrupt_book3e_no_mask masked_interrupt_book3e_0x980: - ACK_FIT(r10); - masked_interrupt_book3e PACA_IRQ_DEC 0 - + ACK_FIT(r11); + li r11,PACA_IRQ_DEC + b masked_interrupt_book3e_no_mask masked_interrupt_book3e_0x280: masked_interrupt_book3e_0x2c0: - masked_interrupt_book3e PACA_IRQ_DBELL 0 + li r11,PACA_IRQ_DBELL + b masked_interrupt_book3e_no_mask +masked_interrupt_book3e_no_mask: + mtcr r10 + lbz r10,PACAIRQHAPPENED(r13) + or r10,r10,r11 + stb r10,PACAIRQHAPPENED(r13) + b 1f +masked_interrupt_book3e_full_mask: + mtcr r10 + lbz r10,PACAIRQHAPPENED(r13) + or r10,r10,r11 + stb r10,PACAIRQHAPPENED(r13) + mfspr r10,SPRN_SRR1 + rldicl r11,r10,48,1 /* clear MSR_EE */ + rotldi r10,r11,16 + mtspr SPRN_SRR1,r10 +1: ld r10,PACA_EXGEN+EX_R10(r13); + ld r11,PACA_EXGEN+EX_R11(r13); + mfspr r13,SPRN_SPRG_GEN_SCRATCH; + rfi + b . /* * Called from arch_local_irq_enable when an interrupt needs * to be resent. r3 contains either 0x500,0x900,0x260 or 0x280 diff --git a/trunk/arch/powerpc/kernel/fadump.c b/trunk/arch/powerpc/kernel/fadump.c index 06c8202a69cf..18bdf74fa164 100644 --- a/trunk/arch/powerpc/kernel/fadump.c +++ b/trunk/arch/powerpc/kernel/fadump.c @@ -289,7 +289,8 @@ int __init fadump_reserve_mem(void) else memory_limit = memblock_end_of_DRAM(); printk(KERN_INFO "Adjusted memory_limit for firmware-assisted" - " dump, now %#016llx\n", memory_limit); + " dump, now %#016llx\n", + (unsigned long long)memory_limit); } if (memory_limit) memory_boundary = memory_limit; diff --git a/trunk/arch/powerpc/kernel/hw_breakpoint.c b/trunk/arch/powerpc/kernel/hw_breakpoint.c index a89cae481b04..956a4c496de9 100644 --- a/trunk/arch/powerpc/kernel/hw_breakpoint.c +++ b/trunk/arch/powerpc/kernel/hw_breakpoint.c @@ -73,7 +73,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) * If so, DABR will be populated in single_step_dabr_instruction(). */ if (current->thread.last_hit_ubp != bp) - set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx); + set_dabr(info->address | info->type | DABR_TRANSLATION); return 0; } @@ -97,7 +97,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) } *slot = NULL; - set_dabr(0, 0); + set_dabr(0); } /* @@ -170,13 +170,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) info->address = bp->attr.bp_addr; info->len = bp->attr.bp_len; - info->dabrx = DABRX_ALL; - if (bp->attr.exclude_user) - info->dabrx &= ~DABRX_USER; - if (bp->attr.exclude_kernel) - info->dabrx &= ~DABRX_KERNEL; - if (bp->attr.exclude_hv) - info->dabrx &= ~DABRX_HYP; /* * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8) @@ -204,7 +197,7 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) info = counter_arch_bp(tsk->thread.last_hit_ubp); regs->msr &= ~MSR_SE; - set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx); + set_dabr(info->address | info->type | DABR_TRANSLATION); tsk->thread.last_hit_ubp = NULL; } @@ -222,7 +215,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) unsigned long dar = regs->dar; /* Disable breakpoints during exception handling */ - set_dabr(0, 0); + set_dabr(0); /* * The counter may be concurrently released but that can only @@ -288,7 +281,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) if (!info->extraneous_interrupt) perf_bp_event(bp, regs); - set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx); + set_dabr(info->address | info->type | DABR_TRANSLATION); out: rcu_read_unlock(); return rc; @@ -301,7 +294,7 @@ int __kprobes single_step_dabr_instruction(struct die_args *args) { struct pt_regs *regs = args->regs; struct perf_event *bp = NULL; - struct arch_hw_breakpoint *info; + struct arch_hw_breakpoint *bp_info; bp = current->thread.last_hit_ubp; /* @@ -311,16 +304,16 @@ int __kprobes single_step_dabr_instruction(struct die_args *args) if (!bp) return NOTIFY_DONE; - info = counter_arch_bp(bp); + bp_info = counter_arch_bp(bp); /* * We shall invoke the user-defined callback function in the single * stepping handler to confirm to 'trigger-after-execute' semantics */ - if (!info->extraneous_interrupt) + if (!bp_info->extraneous_interrupt) perf_bp_event(bp, regs); - set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx); + set_dabr(bp_info->address | bp_info->type | DABR_TRANSLATION); current->thread.last_hit_ubp = NULL; /* diff --git a/trunk/arch/powerpc/kernel/ibmebus.c b/trunk/arch/powerpc/kernel/ibmebus.c index 8220baa46faf..b01d14eeca8d 100644 --- a/trunk/arch/powerpc/kernel/ibmebus.c +++ b/trunk/arch/powerpc/kernel/ibmebus.c @@ -47,6 +47,7 @@ #include #include #include +#include static struct device ibmebus_bus_device = { /* fake "parent" device */ .init_name = "ibmebus", diff --git a/trunk/arch/powerpc/kernel/machine_kexec.c b/trunk/arch/powerpc/kernel/machine_kexec.c index fa9f6c72f557..5df777794403 100644 --- a/trunk/arch/powerpc/kernel/machine_kexec.c +++ b/trunk/arch/powerpc/kernel/machine_kexec.c @@ -165,7 +165,7 @@ void __init reserve_crashkernel(void) if (memory_limit && memory_limit <= crashk_res.end) { memory_limit = crashk_res.end + 1; printk("Adjusted memory limit for crashkernel, now 0x%llx\n", - memory_limit); + (unsigned long long)memory_limit); } printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " @@ -204,12 +204,6 @@ static struct property crashk_size_prop = { .value = &crashk_size, }; -static struct property memory_limit_prop = { - .name = "linux,memory-limit", - .length = sizeof(unsigned long long), - .value = &memory_limit, -}; - static void __init export_crashk_values(struct device_node *node) { struct property *prop; @@ -229,12 +223,6 @@ static void __init export_crashk_values(struct device_node *node) crashk_size = resource_size(&crashk_res); prom_add_property(node, &crashk_size_prop); } - - /* - * memory_limit is required by the kexec-tools to limit the - * crash regions to the actual memory used. - */ - prom_update_property(node, &memory_limit_prop); } static int __init kexec_setup(void) diff --git a/trunk/arch/powerpc/kernel/paca.c b/trunk/arch/powerpc/kernel/paca.c index cd6da855090c..fbe1a12dc7f1 100644 --- a/trunk/arch/powerpc/kernel/paca.c +++ b/trunk/arch/powerpc/kernel/paca.c @@ -142,7 +142,6 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) new_paca->hw_cpu_id = 0xffff; new_paca->kexec_state = KEXEC_STATE_NONE; new_paca->__current = &init_task; - new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL; #ifdef CONFIG_PPC_STD_MMU_64 new_paca->slb_shadow_ptr = &slb_shadow[cpu]; #endif /* CONFIG_PPC_STD_MMU_64 */ diff --git a/trunk/arch/powerpc/kernel/pci-common.c b/trunk/arch/powerpc/kernel/pci-common.c index 4cb714792bea..43fea543d686 100644 --- a/trunk/arch/powerpc/kernel/pci-common.c +++ b/trunk/arch/powerpc/kernel/pci-common.c @@ -99,6 +99,26 @@ void pcibios_free_controller(struct pci_controller *phb) kfree(phb); } +/* + * The function is used to return the minimal alignment + * for memory or I/O windows of the associated P2P bridge. + * By default, 4KiB alignment for I/O windows and 1MiB for + * memory windows. + */ +resource_size_t pcibios_window_alignment(struct pci_bus *bus, + unsigned long type) +{ + if (ppc_md.pcibios_window_alignment) + return ppc_md.pcibios_window_alignment(bus, type); + + /* + * PCI core will figure out the default + * alignment: 4KiB for I/O and 1MiB for + * memory window. + */ + return 1; +} + static resource_size_t pcibios_io_size(const struct pci_controller *hose) { #ifdef CONFIG_PPC64 @@ -960,14 +980,13 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus) if (i >= 3 && bus->self->transparent) continue; - /* If we're going to reassign everything, we can - * shrink the P2P resource to have size as being - * of 0 in order to save space. + /* If we are going to re-assign everything, mark the resource + * as unset and move it down to 0 */ if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) { res->flags |= IORESOURCE_UNSET; + res->end -= res->start; res->start = 0; - res->end = -1; continue; } @@ -1229,14 +1248,7 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus) pr_warning("PCI: Cannot allocate resource region " "%d of PCI bridge %d, will remap\n", i, bus->number); clear_resource: - /* The resource might be figured out when doing - * reassignment based on the resources required - * by the downstream PCI devices. Here we set - * the size of the resource to be 0 in order to - * save more space. - */ - res->start = 0; - res->end = -1; + res->start = res->end = 0; res->flags = 0; } diff --git a/trunk/arch/powerpc/kernel/process.c b/trunk/arch/powerpc/kernel/process.c index 50e504c29bb9..1a1f2ddfb581 100644 --- a/trunk/arch/powerpc/kernel/process.c +++ b/trunk/arch/powerpc/kernel/process.c @@ -258,7 +258,6 @@ void do_send_trap(struct pt_regs *regs, unsigned long address, { siginfo_t info; - current->thread.trap_nr = signal_code; if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 11, SIGSEGV) == NOTIFY_STOP) return; @@ -276,7 +275,6 @@ void do_dabr(struct pt_regs *regs, unsigned long address, { siginfo_t info; - current->thread.trap_nr = TRAP_HWBKPT; if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 11, SIGSEGV) == NOTIFY_STOP) return; @@ -285,7 +283,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address, return; /* Clear the DABR */ - set_dabr(0, 0); + set_dabr(0); /* Deliver the signal to userspace */ info.si_signo = SIGTRAP; @@ -366,19 +364,18 @@ static void set_debug_reg_defaults(struct thread_struct *thread) { if (thread->dabr) { thread->dabr = 0; - thread->dabrx = 0; - set_dabr(0, 0); + set_dabr(0); } } #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ -int set_dabr(unsigned long dabr, unsigned long dabrx) +int set_dabr(unsigned long dabr) { __get_cpu_var(current_dabr) = dabr; if (ppc_md.set_dabr) - return ppc_md.set_dabr(dabr, dabrx); + return ppc_md.set_dabr(dabr); /* XXX should we have a CPU_FTR_HAS_DABR ? */ #ifdef CONFIG_PPC_ADV_DEBUG_REGS @@ -388,8 +385,9 @@ int set_dabr(unsigned long dabr, unsigned long dabrx) #endif #elif defined(CONFIG_PPC_BOOK3S) mtspr(SPRN_DABR, dabr); - mtspr(SPRN_DABRX, dabrx); #endif + + return 0; } @@ -482,7 +480,7 @@ struct task_struct *__switch_to(struct task_struct *prev, */ #ifndef CONFIG_HAVE_HW_BREAKPOINT if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) - set_dabr(new->thread.dabr, new->thread.dabrx); + set_dabr(new->thread.dabr); #endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif diff --git a/trunk/arch/powerpc/kernel/prom.c b/trunk/arch/powerpc/kernel/prom.c index 37725e86651e..f191bf02943a 100644 --- a/trunk/arch/powerpc/kernel/prom.c +++ b/trunk/arch/powerpc/kernel/prom.c @@ -78,7 +78,7 @@ static int __init early_parse_mem(char *p) return 1; memory_limit = PAGE_ALIGN(memparse(p, &p)); - DBG("memory limit = 0x%llx\n", memory_limit); + DBG("memory limit = 0x%llx\n", (unsigned long long)memory_limit); return 0; } @@ -661,7 +661,7 @@ void __init early_init_devtree(void *params) /* make sure we've parsed cmdline for mem= before this */ if (memory_limit) - first_memblock_size = min_t(u64, first_memblock_size, memory_limit); + first_memblock_size = min(first_memblock_size, memory_limit); setup_initial_memory_limit(memstart_addr, first_memblock_size); /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */ memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); diff --git a/trunk/arch/powerpc/kernel/prom_init.c b/trunk/arch/powerpc/kernel/prom_init.c index ce68278a5d73..0794a3017b1b 100644 --- a/trunk/arch/powerpc/kernel/prom_init.c +++ b/trunk/arch/powerpc/kernel/prom_init.c @@ -1691,7 +1691,7 @@ static void __init prom_initialize_tce_table(void) * else will impact performance, so we always allocate 8MB. * Anton */ - if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p)) + if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p)) minsize = 8UL << 20; else minsize = 4UL << 20; diff --git a/trunk/arch/powerpc/kernel/ptrace.c b/trunk/arch/powerpc/kernel/ptrace.c index 79d8e56470df..c10fc28b9092 100644 --- a/trunk/arch/powerpc/kernel/ptrace.c +++ b/trunk/arch/powerpc/kernel/ptrace.c @@ -960,7 +960,6 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, thread->ptrace_bps[0] = bp; ptrace_put_breakpoints(task); thread->dabr = data; - thread->dabrx = DABRX_ALL; return 0; } @@ -984,7 +983,6 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, /* Move contents to the DABR register */ task->thread.dabr = data; - task->thread.dabrx = DABRX_ALL; #else /* CONFIG_PPC_ADV_DEBUG_REGS */ /* As described above, it was assumed 3 bits were passed with the data * address, but we will assume only the mode bits will be passed @@ -1399,7 +1397,6 @@ static long ppc_set_hwdebug(struct task_struct *child, dabr |= DABR_DATA_WRITE; child->thread.dabr = dabr; - child->thread.dabrx = DABRX_ALL; return 1; #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */ diff --git a/trunk/arch/powerpc/kernel/rtas_flash.c b/trunk/arch/powerpc/kernel/rtas_flash.c index 20b0120db0c3..2c0ee6405633 100644 --- a/trunk/arch/powerpc/kernel/rtas_flash.c +++ b/trunk/arch/powerpc/kernel/rtas_flash.c @@ -21,6 +21,7 @@ #include #include #include +#include #define MODULE_VERS "1.0" #define MODULE_NAME "rtas_flash" @@ -581,7 +582,7 @@ static void rtas_flash_firmware(int reboot_type) flist = (struct flash_block_list *)&rtas_data_buf[0]; flist->num_blocks = 0; flist->next = rtas_firmware_flash_list; - rtas_block_list = __pa(flist); + rtas_block_list = virt_to_abs(flist); if (rtas_block_list >= 4UL*1024*1024*1024) { printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n"); spin_unlock(&rtas_data_buf_lock); @@ -595,13 +596,13 @@ static void rtas_flash_firmware(int reboot_type) for (f = flist; f; f = next) { /* Translate data addrs to absolute */ for (i = 0; i < f->num_blocks; i++) { - f->blocks[i].data = (char *)__pa(f->blocks[i].data); + f->blocks[i].data = (char *)virt_to_abs(f->blocks[i].data); image_size += f->blocks[i].length; } next = f->next; /* Don't translate NULL pointer for last entry */ if (f->next) - f->next = (struct flash_block_list *)__pa(f->next); + f->next = (struct flash_block_list *)virt_to_abs(f->next); else f->next = NULL; /* make num_blocks into the version/length field */ diff --git a/trunk/arch/powerpc/kernel/rtas_pci.c b/trunk/arch/powerpc/kernel/rtas_pci.c index 6de63e3250bb..179af906dcda 100644 --- a/trunk/arch/powerpc/kernel/rtas_pci.c +++ b/trunk/arch/powerpc/kernel/rtas_pci.c @@ -81,7 +81,7 @@ int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val) return PCIBIOS_DEVICE_NOT_FOUND; if (returnval == EEH_IO_ERROR_VALUE(size) && - eeh_dev_check_failure(of_node_to_eeh_dev(pdn->node))) + eeh_dn_check_failure (pdn->node, NULL)) return PCIBIOS_DEVICE_NOT_FOUND; return PCIBIOS_SUCCESSFUL; @@ -275,6 +275,9 @@ void __init find_and_init_phbs(void) of_node_put(root); pci_devs_phb_init(); + /* Create EEH devices for all PHBs */ + eeh_dev_phb_init(); + /* * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties * in chosen. diff --git a/trunk/arch/powerpc/kernel/signal.c b/trunk/arch/powerpc/kernel/signal.c index a2dc75793bd5..5c023c9cf16e 100644 --- a/trunk/arch/powerpc/kernel/signal.c +++ b/trunk/arch/powerpc/kernel/signal.c @@ -11,7 +11,6 @@ #include #include -#include #include #include #include @@ -131,7 +130,7 @@ static int do_signal(struct pt_regs *regs) * triggered inside the kernel. */ if (current->thread.dabr) - set_dabr(current->thread.dabr, current->thread.dabrx); + set_dabr(current->thread.dabr); #endif /* Re-enable the breakpoints for the signal stack */ thread_change_pc(current, regs); @@ -158,11 +157,6 @@ static int do_signal(struct pt_regs *regs) void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) { - if (thread_info_flags & _TIF_UPROBE) { - clear_thread_flag(TIF_UPROBE); - uprobe_notify_resume(regs); - } - if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); diff --git a/trunk/arch/powerpc/kernel/traps.c b/trunk/arch/powerpc/kernel/traps.c index 32518401af68..ae0843fa7a61 100644 --- a/trunk/arch/powerpc/kernel/traps.c +++ b/trunk/arch/powerpc/kernel/traps.c @@ -251,7 +251,6 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) local_irq_enable(); - current->thread.trap_nr = code; memset(&info, 0, sizeof(info)); info.si_signo = signr; info.si_code = code; diff --git a/trunk/arch/powerpc/kernel/uprobes.c b/trunk/arch/powerpc/kernel/uprobes.c deleted file mode 100644 index d2d46d1014f8..000000000000 --- a/trunk/arch/powerpc/kernel/uprobes.c +++ /dev/null @@ -1,184 +0,0 @@ -/* - * User-space Probes (UProbes) for powerpc - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright IBM Corporation, 2007-2012 - * - * Adapted from the x86 port by Ananth N Mavinakayanahalli - */ -#include -#include -#include -#include -#include -#include - -#include - -#define UPROBE_TRAP_NR UINT_MAX - -/** - * arch_uprobe_analyze_insn - * @mm: the probed address space. - * @arch_uprobe: the probepoint information. - * @addr: vaddr to probe. - * Return 0 on success or a -ve number on error. - */ -int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, - struct mm_struct *mm, unsigned long addr) -{ - if (addr & 0x03) - return -EINVAL; - - /* - * We currently don't support a uprobe on an already - * existing breakpoint instruction underneath - */ - if (is_trap(auprobe->ainsn)) - return -ENOTSUPP; - return 0; -} - -/* - * arch_uprobe_pre_xol - prepare to execute out of line. - * @auprobe: the probepoint information. - * @regs: reflects the saved user state of current task. - */ -int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) -{ - struct arch_uprobe_task *autask = ¤t->utask->autask; - - autask->saved_trap_nr = current->thread.trap_nr; - current->thread.trap_nr = UPROBE_TRAP_NR; - regs->nip = current->utask->xol_vaddr; - return 0; -} - -/** - * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs - * @regs: Reflects the saved state of the task after it has hit a breakpoint - * instruction. - * Return the address of the breakpoint instruction. - */ -unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) -{ - return instruction_pointer(regs); -} - -/* - * If xol insn itself traps and generates a signal (SIGILL/SIGSEGV/etc), - * then detect the case where a singlestepped instruction jumps back to its - * own address. It is assumed that anything like do_page_fault/do_trap/etc - * sets thread.trap_nr != UINT_MAX. - * - * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr, - * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to - * UPROBE_TRAP_NR == UINT_MAX set by arch_uprobe_pre_xol(). - */ -bool arch_uprobe_xol_was_trapped(struct task_struct *t) -{ - if (t->thread.trap_nr != UPROBE_TRAP_NR) - return true; - - return false; -} - -/* - * Called after single-stepping. To avoid the SMP problems that can - * occur when we temporarily put back the original opcode to - * single-step, we single-stepped a copy of the instruction. - * - * This function prepares to resume execution after the single-step. - */ -int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) -{ - struct uprobe_task *utask = current->utask; - - WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); - - current->thread.trap_nr = utask->autask.saved_trap_nr; - - /* - * On powerpc, except for loads and stores, most instructions - * including ones that alter code flow (branches, calls, returns) - * are emulated in the kernel. We get here only if the emulation - * support doesn't exist and have to fix-up the next instruction - * to be executed. - */ - regs->nip = utask->vaddr + MAX_UINSN_BYTES; - return 0; -} - -/* callback routine for handling exceptions. */ -int arch_uprobe_exception_notify(struct notifier_block *self, - unsigned long val, void *data) -{ - struct die_args *args = data; - struct pt_regs *regs = args->regs; - - /* regs == NULL is a kernel bug */ - if (WARN_ON(!regs)) - return NOTIFY_DONE; - - /* We are only interested in userspace traps */ - if (!user_mode(regs)) - return NOTIFY_DONE; - - switch (val) { - case DIE_BPT: - if (uprobe_pre_sstep_notifier(regs)) - return NOTIFY_STOP; - break; - case DIE_SSTEP: - if (uprobe_post_sstep_notifier(regs)) - return NOTIFY_STOP; - default: - break; - } - return NOTIFY_DONE; -} - -/* - * This function gets called when XOL instruction either gets trapped or - * the thread has a fatal signal, so reset the instruction pointer to its - * probed address. - */ -void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) -{ - struct uprobe_task *utask = current->utask; - - current->thread.trap_nr = utask->autask.saved_trap_nr; - instruction_pointer_set(regs, utask->vaddr); -} - -/* - * See if the instruction can be emulated. - * Returns true if instruction was emulated, false otherwise. - */ -bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) -{ - int ret; - - /* - * emulate_step() returns 1 if the insn was successfully emulated. - * For all other cases, we need to single-step in hardware. - */ - ret = emulate_step(regs, auprobe->ainsn); - if (ret > 0) - return true; - - return false; -} diff --git a/trunk/arch/powerpc/kernel/vdso.c b/trunk/arch/powerpc/kernel/vdso.c index 1b2076f049ce..b67db22e102d 100644 --- a/trunk/arch/powerpc/kernel/vdso.c +++ b/trunk/arch/powerpc/kernel/vdso.c @@ -723,7 +723,9 @@ int __cpuinit vdso_getcpu_init(void) val = (cpu & 0xfff) | ((node & 0xffff) << 16); mtspr(SPRN_SPRG3, val); - get_paca()->sprg3 = val; +#ifdef CONFIG_KVM_BOOK3S_HANDLER + get_paca()->kvm_hstate.sprg3 = val; +#endif put_cpu(); diff --git a/trunk/arch/powerpc/kernel/vio.c b/trunk/arch/powerpc/kernel/vio.c index 201ba59738be..02b32216bbc3 100644 --- a/trunk/arch/powerpc/kernel/vio.c +++ b/trunk/arch/powerpc/kernel/vio.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include diff --git a/trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 74a24bbb9637..44b72feaff7d 100644 --- a/trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1065,7 +1065,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) mtspr SPRN_DABRX,r6 /* Restore SPRG3 */ - ld r3,PACA_SPRG3(r13) + ld r3,HSTATE_SPRG3(r13) mtspr SPRN_SPRG3,r3 /* diff --git a/trunk/arch/powerpc/mm/fault.c b/trunk/arch/powerpc/mm/fault.c index 995f924e007f..08ffcf52a856 100644 --- a/trunk/arch/powerpc/mm/fault.c +++ b/trunk/arch/powerpc/mm/fault.c @@ -133,7 +133,6 @@ static int do_sigbus(struct pt_regs *regs, unsigned long address) up_read(¤t->mm->mmap_sem); if (user_mode(regs)) { - current->thread.trap_nr = BUS_ADRERR; info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; diff --git a/trunk/arch/powerpc/mm/hash_native_64.c b/trunk/arch/powerpc/mm/hash_native_64.c index f21e8ce8db33..90039bc64119 100644 --- a/trunk/arch/powerpc/mm/hash_native_64.c +++ b/trunk/arch/powerpc/mm/hash_native_64.c @@ -14,10 +14,10 @@ #include #include -#include #include #include +#include #include #include #include diff --git a/trunk/arch/powerpc/mm/hash_utils_64.c b/trunk/arch/powerpc/mm/hash_utils_64.c index ba45739bdfe8..377e5cbedbbb 100644 --- a/trunk/arch/powerpc/mm/hash_utils_64.c +++ b/trunk/arch/powerpc/mm/hash_utils_64.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -650,7 +651,7 @@ static void __init htab_initialize(void) DBG("Hash table allocated at %lx, size: %lx\n", table, htab_size_bytes); - htab_address = __va(table); + htab_address = abs_to_virt(table); /* htab absolute addr + encoded htabsize */ _SDR1 = table + __ilog2(pteg_count) - 11; diff --git a/trunk/arch/powerpc/mm/init_64.c b/trunk/arch/powerpc/mm/init_64.c index 95a45293e5ac..620b7acd2fdf 100644 --- a/trunk/arch/powerpc/mm/init_64.c +++ b/trunk/arch/powerpc/mm/init_64.c @@ -62,6 +62,7 @@ #include #include #include +#include #include #include "mmu_decl.h" diff --git a/trunk/arch/powerpc/mm/mem.c b/trunk/arch/powerpc/mm/mem.c index 44cf2b20503d..fbdad0e3929a 100644 --- a/trunk/arch/powerpc/mm/mem.c +++ b/trunk/arch/powerpc/mm/mem.c @@ -62,7 +62,7 @@ int init_bootmem_done; int mem_init_done; -unsigned long long memory_limit; +phys_addr_t memory_limit; #ifdef CONFIG_HIGHMEM pte_t *kmap_pte; diff --git a/trunk/arch/powerpc/mm/pgtable_64.c b/trunk/arch/powerpc/mm/pgtable_64.c index 297d49547ea8..249a0631c4db 100644 --- a/trunk/arch/powerpc/mm/pgtable_64.c +++ b/trunk/arch/powerpc/mm/pgtable_64.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include "mmu_decl.h" diff --git a/trunk/arch/powerpc/mm/stab.c b/trunk/arch/powerpc/mm/stab.c index 3f8efa6f2997..9106ebb118f5 100644 --- a/trunk/arch/powerpc/mm/stab.c +++ b/trunk/arch/powerpc/mm/stab.c @@ -20,6 +20,7 @@ #include #include #include +#include struct stab_entry { unsigned long esid_data; @@ -256,7 +257,7 @@ void __init stabs_alloc(void) memset((void *)newstab, 0, HW_PAGE_SIZE); paca[cpu].stab_addr = newstab; - paca[cpu].stab_real = __pa(newstab); + paca[cpu].stab_real = virt_to_abs(newstab); printk(KERN_INFO "Segment table for CPU %d at 0x%llx " "virtual, 0x%llx absolute\n", cpu, paca[cpu].stab_addr, paca[cpu].stab_real); diff --git a/trunk/arch/powerpc/mm/subpage-prot.c b/trunk/arch/powerpc/mm/subpage-prot.c index 7c415ddde948..e4f8f1fc81a5 100644 --- a/trunk/arch/powerpc/mm/subpage-prot.c +++ b/trunk/arch/powerpc/mm/subpage-prot.c @@ -95,8 +95,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len) struct mm_struct *mm = current->mm; struct subpage_prot_table *spt = &mm->context.spt; u32 **spm, *spp; - unsigned long i; - size_t nw; + int i, nw; unsigned long next, limit; down_write(&mm->mmap_sem); @@ -145,8 +144,7 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) struct mm_struct *mm = current->mm; struct subpage_prot_table *spt = &mm->context.spt; u32 **spm, *spp; - unsigned long i; - size_t nw; + int i, nw; unsigned long next, limit; int err; diff --git a/trunk/arch/powerpc/mm/tlb_low_64e.S b/trunk/arch/powerpc/mm/tlb_low_64e.S index b4113bf86353..f09d48e3268d 100644 --- a/trunk/arch/powerpc/mm/tlb_low_64e.S +++ b/trunk/arch/powerpc/mm/tlb_low_64e.S @@ -20,8 +20,6 @@ #include #include #include -#include -#include #ifdef CONFIG_PPC_64K_PAGES #define VPTE_PMD_SHIFT (PTE_INDEX_SIZE+1) @@ -39,18 +37,12 @@ * * **********************************************************************/ -.macro tlb_prolog_bolted intnum addr - mtspr SPRN_SPRG_GEN_SCRATCH,r13 +.macro tlb_prolog_bolted addr + mtspr SPRN_SPRG_TLB_SCRATCH,r13 mfspr r13,SPRN_SPRG_PACA std r10,PACA_EXTLB+EX_TLB_R10(r13) mfcr r10 std r11,PACA_EXTLB+EX_TLB_R11(r13) -#ifdef CONFIG_KVM_BOOKE_HV -BEGIN_FTR_SECTION - mfspr r11, SPRN_SRR1 -END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) -#endif - DO_KVM \intnum, SPRN_SRR1 std r16,PACA_EXTLB+EX_TLB_R16(r13) mfspr r16,\addr /* get faulting address */ std r14,PACA_EXTLB+EX_TLB_R14(r13) @@ -69,12 +61,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) ld r15,PACA_EXTLB+EX_TLB_R15(r13) TLB_MISS_RESTORE_STATS_BOLTED ld r16,PACA_EXTLB+EX_TLB_R16(r13) - mfspr r13,SPRN_SPRG_GEN_SCRATCH + mfspr r13,SPRN_SPRG_TLB_SCRATCH .endm /* Data TLB miss */ START_EXCEPTION(data_tlb_miss_bolted) - tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR + tlb_prolog_bolted SPRN_DEAR /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */ @@ -222,7 +214,7 @@ itlb_miss_fault_bolted: /* Instruction TLB miss */ START_EXCEPTION(instruction_tlb_miss_bolted) - tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0 + tlb_prolog_bolted SPRN_SRR0 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 srdi r15,r16,60 /* get region */ diff --git a/trunk/arch/powerpc/oprofile/op_model_power4.c b/trunk/arch/powerpc/oprofile/op_model_power4.c index 315f9495e9b2..95ae77dec3f6 100644 --- a/trunk/arch/powerpc/oprofile/op_model_power4.c +++ b/trunk/arch/powerpc/oprofile/op_model_power4.c @@ -21,13 +21,6 @@ #include #define dbg(args...) -#define OPROFILE_PM_PMCSEL_MSK 0xffULL -#define OPROFILE_PM_UNIT_SHIFT 60 -#define OPROFILE_PM_UNIT_MSK 0xfULL -#define OPROFILE_MAX_PMC_NUM 3 -#define OPROFILE_PMSEL_FIELD_WIDTH 8 -#define OPROFILE_UNIT_FIELD_WIDTH 4 -#define MMCRA_SIAR_VALID_MASK 0x10000000ULL static unsigned long reset_value[OP_MAX_COUNTER]; @@ -38,61 +31,6 @@ static int use_slot_nums; static u32 mmcr0_val; static u64 mmcr1_val; static u64 mmcra_val; -static u32 cntr_marked_events; - -static int power7_marked_instr_event(u64 mmcr1) -{ - u64 psel, unit; - int pmc, cntr_marked_events = 0; - - /* Given the MMCR1 value, look at the field for each counter to - * determine if it is a marked event. Code based on the function - * power7_marked_instr_event() in file arch/powerpc/perf/power7-pmu.c. - */ - for (pmc = 0; pmc < 4; pmc++) { - psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK - << (OPROFILE_MAX_PMC_NUM - pmc) - * OPROFILE_MAX_PMC_NUM); - psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc) - * OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL; - unit = mmcr1 & (OPROFILE_PM_UNIT_MSK - << (OPROFILE_PM_UNIT_SHIFT - - (pmc * OPROFILE_PMSEL_FIELD_WIDTH ))); - unit = unit >> (OPROFILE_PM_UNIT_SHIFT - - (pmc * OPROFILE_PMSEL_FIELD_WIDTH)); - - switch (psel >> 4) { - case 2: - cntr_marked_events |= (pmc == 1 || pmc == 3) << pmc; - break; - case 3: - if (psel == 0x3c) { - cntr_marked_events |= (pmc == 0) << pmc; - break; - } - - if (psel == 0x3e) { - cntr_marked_events |= (pmc != 1) << pmc; - break; - } - - cntr_marked_events |= 1 << pmc; - break; - case 4: - case 5: - cntr_marked_events |= (unit == 0xd) << pmc; - break; - case 6: - if (psel == 0x64) - cntr_marked_events |= (pmc >= 2) << pmc; - break; - case 8: - cntr_marked_events |= (unit == 0xd) << pmc; - break; - } - } - return cntr_marked_events; -} static int power4_reg_setup(struct op_counter_config *ctr, struct op_system_config *sys, @@ -109,23 +47,6 @@ static int power4_reg_setup(struct op_counter_config *ctr, mmcr1_val = sys->mmcr1; mmcra_val = sys->mmcra; - /* Power 7+ and newer architectures: - * Determine which counter events in the group (the group of events is - * specified by the bit settings in the MMCR1 register) are marked - * events for use in the interrupt handler. Do the calculation once - * before OProfile starts. Information is used in the interrupt - * handler. Starting with Power 7+ we only record the sample for - * marked events if the SIAR valid bit is set. For non marked events - * the sample is always recorded. - */ - if (pvr_version_is(PVR_POWER7p)) - cntr_marked_events = power7_marked_instr_event(mmcr1_val); - else - cntr_marked_events = 0; /* For older processors, set the bit map - * to zero so the sample will always be - * be recorded. - */ - for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) reset_value[i] = 0x80000000UL - ctr[i].count; @@ -140,10 +61,10 @@ static int power4_reg_setup(struct op_counter_config *ctr, else mmcr0_val |= MMCR0_PROBLEM_DISABLE; - if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p) || - pvr_version_is(PVR_970) || pvr_version_is(PVR_970FX) || - pvr_version_is(PVR_970MP) || pvr_version_is(PVR_970GX) || - pvr_version_is(PVR_POWER5) || pvr_version_is(PVR_POWER5p)) + if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p) || + __is_processor(PV_970) || __is_processor(PV_970FX) || + __is_processor(PV_970MP) || __is_processor(PV_970GX) || + __is_processor(PV_POWER5) || __is_processor(PV_POWER5p)) use_slot_nums = 1; return 0; @@ -163,9 +84,9 @@ extern void ppc_enable_pmcs(void); */ static inline int mmcra_must_set_sample(void) { - if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p) || - pvr_version_is(PVR_970) || pvr_version_is(PVR_970FX) || - pvr_version_is(PVR_970MP) || pvr_version_is(PVR_970GX)) + if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p) || + __is_processor(PV_970) || __is_processor(PV_970FX) || + __is_processor(PV_970MP) || __is_processor(PV_970GX)) return 1; return 0; @@ -355,7 +276,7 @@ static bool pmc_overflow(unsigned long val) * PMCs because a user might set a period of less than 256 and we * don't want to mistakenly reset them. */ - if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256)) + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) return true; return false; @@ -370,7 +291,6 @@ static void power4_handle_interrupt(struct pt_regs *regs, int i; unsigned int mmcr0; unsigned long mmcra; - bool siar_valid = false; mmcra = mfspr(SPRN_MMCRA); @@ -380,29 +300,11 @@ static void power4_handle_interrupt(struct pt_regs *regs, /* set the PMM bit (see comment below) */ mtmsrd(mfmsr() | MSR_PMM); - /* Check that the SIAR valid bit in MMCRA is set to 1. */ - if ((mmcra & MMCRA_SIAR_VALID_MASK) == MMCRA_SIAR_VALID_MASK) - siar_valid = true; - for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { val = classic_ctr_read(i); if (pmc_overflow(val)) { if (oprofile_running && ctr[i].enabled) { - /* Power 7+ and newer architectures: - * If the event is a marked event, then only - * save the sample if the SIAR valid bit is - * set. If the event is not marked, then - * always save the sample. - * Note, the Sample enable bit in the MMCRA - * register must be set to 1 if the group - * contains a marked event. - */ - if ((siar_valid && - (cntr_marked_events & (1 << i))) - || !(cntr_marked_events & (1 << i))) - oprofile_add_ext_sample(pc, regs, i, - is_kernel); - + oprofile_add_ext_sample(pc, regs, i, is_kernel); classic_ctr_write(i, reset_value[i]); } else { classic_ctr_write(i, 0); diff --git a/trunk/arch/powerpc/perf/core-book3s.c b/trunk/arch/powerpc/perf/core-book3s.c index fb55da91aa45..7cd2dbd6e4c4 100644 --- a/trunk/arch/powerpc/perf/core-book3s.c +++ b/trunk/arch/powerpc/perf/core-book3s.c @@ -1396,7 +1396,7 @@ static bool pmc_overflow(unsigned long val) * PMCs because a user might set a period of less than 256 and we * don't want to mistakenly reset them. */ - if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256)) + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) return true; return false; diff --git a/trunk/arch/powerpc/platforms/cell/beat.c b/trunk/arch/powerpc/platforms/cell/beat.c index affcf566d460..852592b2b712 100644 --- a/trunk/arch/powerpc/platforms/cell/beat.c +++ b/trunk/arch/powerpc/platforms/cell/beat.c @@ -136,9 +136,9 @@ ssize_t beat_nvram_get_size(void) return BEAT_NVRAM_SIZE; } -int beat_set_xdabr(unsigned long dabr, unsigned long dabrx) +int beat_set_xdabr(unsigned long dabr) { - if (beat_set_dabr(dabr, dabrx)) + if (beat_set_dabr(dabr, DABRX_KERNEL | DABRX_USER)) return -1; return 0; } diff --git a/trunk/arch/powerpc/platforms/cell/beat.h b/trunk/arch/powerpc/platforms/cell/beat.h index bfcb8e351ae5..32c8efcedc80 100644 --- a/trunk/arch/powerpc/platforms/cell/beat.h +++ b/trunk/arch/powerpc/platforms/cell/beat.h @@ -32,7 +32,7 @@ void beat_get_rtc_time(struct rtc_time *); ssize_t beat_nvram_get_size(void); ssize_t beat_nvram_read(char *, size_t, loff_t *); ssize_t beat_nvram_write(char *, size_t, loff_t *); -int beat_set_xdabr(unsigned long, unsigned long); +int beat_set_xdabr(unsigned long); void beat_power_save(void); void beat_kexec_cpu_down(int, int); diff --git a/trunk/arch/powerpc/platforms/pasemi/iommu.c b/trunk/arch/powerpc/platforms/pasemi/iommu.c index 7d2d036754b5..14943ef01918 100644 --- a/trunk/arch/powerpc/platforms/pasemi/iommu.c +++ b/trunk/arch/powerpc/platforms/pasemi/iommu.c @@ -19,12 +19,12 @@ #undef DEBUG -#include #include #include #include #include #include +#include #include #define IOBMAP_PAGE_SHIFT 12 @@ -99,7 +99,7 @@ static int iobmap_build(struct iommu_table *tbl, long index, ip = ((u32 *)tbl->it_base) + index; while (npages--) { - rpn = __pa(uaddr) >> IOBMAP_PAGE_SHIFT; + rpn = virt_to_abs(uaddr) >> IOBMAP_PAGE_SHIFT; *(ip++) = IOBMAP_L2E_V | rpn; /* invalidate tlb, can be optimized more */ @@ -258,7 +258,7 @@ void __init alloc_iobmap_l2(void) return; #endif /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */ - iob_l2_base = (u32 *)__va(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000)); + iob_l2_base = (u32 *)abs_to_virt(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000)); printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base); } diff --git a/trunk/arch/powerpc/platforms/powernv/pci-ioda.c b/trunk/arch/powerpc/platforms/powernv/pci-ioda.c index 5e75dcfe51b9..0e7eccc0f88d 100644 --- a/trunk/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/trunk/arch/powerpc/platforms/powernv/pci-ioda.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "powernv.h" #include "pci.h" @@ -854,7 +855,7 @@ static void __devinit pnv_ioda_setup_PEs(struct pci_bus *bus) if (pe == NULL) continue; /* Leaving the PCIe domain ... single PE# */ - if (dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) + if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) pnv_ioda_setup_bus_PE(dev, pe); else if (dev->subordinate) pnv_ioda_setup_PEs(dev->subordinate); @@ -1138,6 +1139,44 @@ static void __devinit pnv_pci_ioda_fixup_phb(struct pci_controller *hose) } } +/* + * Returns the alignment for I/O or memory windows for P2P + * bridges. That actually depends on how PEs are segmented. + * For now, we return I/O or M32 segment size for PE sensitive + * P2P bridges. Otherwise, the default values (4KiB for I/O, + * 1MiB for memory) will be returned. + * + * The current PCI bus might be put into one PE, which was + * create against the parent PCI bridge. For that case, we + * needn't enlarge the alignment so that we can save some + * resources. + */ +static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus, + unsigned long type) +{ + struct pci_dev *bridge; + struct pci_controller *hose = pci_bus_to_host(bus); + struct pnv_phb *phb = hose->private_data; + int num_pci_bridges = 0; + + bridge = bus->self; + while (bridge) { + if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) { + num_pci_bridges++; + if (num_pci_bridges >= 2) + return 1; + } + + bridge = bridge->bus->self; + } + + /* We need support prefetchable memory window later */ + if (type & IORESOURCE_MEM) + return phb->ioda.m32_segsize; + + return phb->ioda.io_segsize; +} + /* Prevent enabling devices for which we couldn't properly * assign a PE */ @@ -1305,6 +1344,7 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np) */ ppc_md.pcibios_fixup_phb = pnv_pci_ioda_fixup_phb; ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook; + ppc_md.pcibios_window_alignment = pnv_pci_window_alignment; pci_add_flags(PCI_PROBE_ONLY | PCI_REASSIGN_ALL_RSRC); /* Reset IODA tables to a clean state */ diff --git a/trunk/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/trunk/arch/powerpc/platforms/powernv/pci-p5ioc2.c index 6b4bef4e9d82..264967770c3a 100644 --- a/trunk/arch/powerpc/platforms/powernv/pci-p5ioc2.c +++ b/trunk/arch/powerpc/platforms/powernv/pci-p5ioc2.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "powernv.h" #include "pci.h" diff --git a/trunk/arch/powerpc/platforms/powernv/pci.c b/trunk/arch/powerpc/platforms/powernv/pci.c index c01688a1a741..be3cfc5ceabb 100644 --- a/trunk/arch/powerpc/platforms/powernv/pci.c +++ b/trunk/arch/powerpc/platforms/powernv/pci.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include "powernv.h" @@ -446,11 +447,6 @@ static void pnv_tce_free(struct iommu_table *tbl, long index, long npages) pnv_tce_invalidate(tbl, tces, tcep - 1); } -static unsigned long pnv_tce_get(struct iommu_table *tbl, long index) -{ - return ((u64 *)tbl->it_base)[index - tbl->it_offset]; -} - void pnv_pci_setup_iommu_table(struct iommu_table *tbl, void *tce_mem, u64 tce_size, u64 dma_offset) @@ -601,7 +597,6 @@ void __init pnv_pci_init(void) ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup; ppc_md.tce_build = pnv_tce_build; ppc_md.tce_free = pnv_tce_free; - ppc_md.tce_get = pnv_tce_get; ppc_md.pci_probe_mode = pnv_pci_probe_mode; set_pci_dma_ops(&dma_iommu_ops); diff --git a/trunk/arch/powerpc/platforms/ps3/setup.c b/trunk/arch/powerpc/platforms/ps3/setup.c index 3f509f86432c..2d664c5a83b0 100644 --- a/trunk/arch/powerpc/platforms/ps3/setup.c +++ b/trunk/arch/powerpc/platforms/ps3/setup.c @@ -184,15 +184,11 @@ early_param("ps3flash", early_parse_ps3flash); #define prealloc_ps3flash_bounce_buffer() do { } while (0) #endif -static int ps3_set_dabr(unsigned long dabr, unsigned long dabrx) +static int ps3_set_dabr(unsigned long dabr) { - /* Have to set at least one bit in the DABRX */ - if (dabrx == 0 && dabr == 0) - dabrx = DABRX_USER; - /* hypervisor only allows us to set BTI, Kernel and user */ - dabrx &= DABRX_BTI | DABRX_KERNEL | DABRX_USER; + enum {DABR_USER = 1, DABR_KERNEL = 2,}; - return lv1_set_dabr(dabr, dabrx) ? -1 : 0; + return lv1_set_dabr(dabr, DABR_KERNEL | DABR_USER) ? -1 : 0; } static void __init ps3_setup_arch(void) diff --git a/trunk/arch/powerpc/platforms/pseries/Makefile b/trunk/arch/powerpc/platforms/pseries/Makefile index 890622b87c8f..c222189f5bb2 100644 --- a/trunk/arch/powerpc/platforms/pseries/Makefile +++ b/trunk/arch/powerpc/platforms/pseries/Makefile @@ -6,9 +6,8 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \ firmware.o power.o dlpar.o mobility.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SCANLOG) += scanlog.o -obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \ - eeh_driver.o eeh_event.o eeh_sysfs.o \ - eeh_pseries.o +obj-$(CONFIG_EEH) += eeh.o eeh_dev.o eeh_cache.o eeh_driver.o \ + eeh_event.o eeh_sysfs.o eeh_pseries.o obj-$(CONFIG_KEXEC) += kexec.o obj-$(CONFIG_PCI) += pci.o pci_dlpar.o obj-$(CONFIG_PSERIES_MSI) += msi.o diff --git a/trunk/arch/powerpc/platforms/pseries/eeh.c b/trunk/arch/powerpc/platforms/pseries/eeh.c index 18c168b752da..ecd394cf34e6 100644 --- a/trunk/arch/powerpc/platforms/pseries/eeh.c +++ b/trunk/arch/powerpc/platforms/pseries/eeh.c @@ -92,20 +92,6 @@ struct eeh_ops *eeh_ops = NULL; int eeh_subsystem_enabled; EXPORT_SYMBOL(eeh_subsystem_enabled); -/* - * EEH probe mode support. The intention is to support multiple - * platforms for EEH. Some platforms like pSeries do PCI emunation - * based on device tree. However, other platforms like powernv probe - * PCI devices from hardware. The flag is used to distinguish that. - * In addition, struct eeh_ops::probe would be invoked for particular - * OF node or PCI device so that the corresponding PE would be created - * there. - */ -int eeh_probe_mode; - -/* Global EEH mutex */ -DEFINE_MUTEX(eeh_mutex); - /* Lock to avoid races due to multiple reports of an error */ static DEFINE_RAW_SPINLOCK(confirm_error_lock); @@ -218,12 +204,22 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len) } } + /* Gather status on devices under the bridge */ + if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) { + struct device_node *child; + + for_each_child_of_node(dn, child) { + if (of_node_to_eeh_dev(child)) + n += eeh_gather_pci_data(of_node_to_eeh_dev(child), buf+n, len-n); + } + } + return n; } /** * eeh_slot_error_detail - Generate combined log including driver log and error log - * @pe: EEH PE + * @edev: device to report error log for * @severity: temporary or permanent error log * * This routine should be called to generate the combined log, which @@ -231,22 +227,17 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len) * out from the config space of the corresponding PCI device, while * the error log is fetched through platform dependent function call. */ -void eeh_slot_error_detail(struct eeh_pe *pe, int severity) +void eeh_slot_error_detail(struct eeh_dev *edev, int severity) { size_t loglen = 0; - struct eeh_dev *edev; - - eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); - eeh_ops->configure_bridge(pe); - eeh_pe_restore_bars(pe); - pci_regs_buf[0] = 0; - eeh_pe_for_each_dev(pe, edev) { - loglen += eeh_gather_pci_data(edev, pci_regs_buf, - EEH_PCI_REGS_LOG_LEN); - } - eeh_ops->get_log(pe, severity, pci_regs_buf, loglen); + eeh_pci_enable(edev, EEH_OPT_THAW_MMIO); + eeh_ops->configure_bridge(eeh_dev_to_of_node(edev)); + eeh_restore_bars(edev); + loglen = eeh_gather_pci_data(edev, pci_regs_buf, EEH_PCI_REGS_LOG_LEN); + + eeh_ops->get_log(eeh_dev_to_of_node(edev), severity, pci_regs_buf, loglen); } /** @@ -270,8 +261,126 @@ static inline unsigned long eeh_token_to_phys(unsigned long token) } /** - * eeh_dev_check_failure - Check if all 1's data is due to EEH slot freeze - * @edev: eeh device + * eeh_find_device_pe - Retrieve the PE for the given device + * @dn: device node + * + * Return the PE under which this device lies + */ +struct device_node *eeh_find_device_pe(struct device_node *dn) +{ + while (dn->parent && of_node_to_eeh_dev(dn->parent) && + (of_node_to_eeh_dev(dn->parent)->mode & EEH_MODE_SUPPORTED)) { + dn = dn->parent; + } + return dn; +} + +/** + * __eeh_mark_slot - Mark all child devices as failed + * @parent: parent device + * @mode_flag: failure flag + * + * Mark all devices that are children of this device as failed. + * Mark the device driver too, so that it can see the failure + * immediately; this is critical, since some drivers poll + * status registers in interrupts ... If a driver is polling, + * and the slot is frozen, then the driver can deadlock in + * an interrupt context, which is bad. + */ +static void __eeh_mark_slot(struct device_node *parent, int mode_flag) +{ + struct device_node *dn; + + for_each_child_of_node(parent, dn) { + if (of_node_to_eeh_dev(dn)) { + /* Mark the pci device driver too */ + struct pci_dev *dev = of_node_to_eeh_dev(dn)->pdev; + + of_node_to_eeh_dev(dn)->mode |= mode_flag; + + if (dev && dev->driver) + dev->error_state = pci_channel_io_frozen; + + __eeh_mark_slot(dn, mode_flag); + } + } +} + +/** + * eeh_mark_slot - Mark the indicated device and its children as failed + * @dn: parent device + * @mode_flag: failure flag + * + * Mark the indicated device and its child devices as failed. + * The device drivers are marked as failed as well. + */ +void eeh_mark_slot(struct device_node *dn, int mode_flag) +{ + struct pci_dev *dev; + dn = eeh_find_device_pe(dn); + + /* Back up one, since config addrs might be shared */ + if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent)) + dn = dn->parent; + + of_node_to_eeh_dev(dn)->mode |= mode_flag; + + /* Mark the pci device too */ + dev = of_node_to_eeh_dev(dn)->pdev; + if (dev) + dev->error_state = pci_channel_io_frozen; + + __eeh_mark_slot(dn, mode_flag); +} + +/** + * __eeh_clear_slot - Clear failure flag for the child devices + * @parent: parent device + * @mode_flag: flag to be cleared + * + * Clear failure flag for the child devices. + */ +static void __eeh_clear_slot(struct device_node *parent, int mode_flag) +{ + struct device_node *dn; + + for_each_child_of_node(parent, dn) { + if (of_node_to_eeh_dev(dn)) { + of_node_to_eeh_dev(dn)->mode &= ~mode_flag; + of_node_to_eeh_dev(dn)->check_count = 0; + __eeh_clear_slot(dn, mode_flag); + } + } +} + +/** + * eeh_clear_slot - Clear failure flag for the indicated device and its children + * @dn: parent device + * @mode_flag: flag to be cleared + * + * Clear failure flag for the indicated device and its children. + */ +void eeh_clear_slot(struct device_node *dn, int mode_flag) +{ + unsigned long flags; + raw_spin_lock_irqsave(&confirm_error_lock, flags); + + dn = eeh_find_device_pe(dn); + + /* Back up one, since config addrs might be shared */ + if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent)) + dn = dn->parent; + + of_node_to_eeh_dev(dn)->mode &= ~mode_flag; + of_node_to_eeh_dev(dn)->check_count = 0; + __eeh_clear_slot(dn, mode_flag); + raw_spin_unlock_irqrestore(&confirm_error_lock, flags); +} + +/** + * eeh_dn_check_failure - Check if all 1's data is due to EEH slot freeze + * @dn: device node + * @dev: pci device, if known * * Check for an EEH failure for the given device node. Call this * routine if the result of a read was all 0xff's and you want to @@ -283,13 +392,11 @@ static inline unsigned long eeh_token_to_phys(unsigned long token) * * It is safe to call this routine in an interrupt context. */ -int eeh_dev_check_failure(struct eeh_dev *edev) +int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) { int ret; unsigned long flags; - struct device_node *dn; - struct pci_dev *dev; - struct eeh_pe *pe; + struct eeh_dev *edev; int rc = 0; const char *location; @@ -298,23 +405,23 @@ int eeh_dev_check_failure(struct eeh_dev *edev) if (!eeh_subsystem_enabled) return 0; - if (!edev) { + if (!dn) { eeh_stats.no_dn++; return 0; } - dn = eeh_dev_to_of_node(edev); - dev = eeh_dev_to_pci_dev(edev); - pe = edev->pe; + dn = eeh_find_device_pe(dn); + edev = of_node_to_eeh_dev(dn); /* Access to IO BARs might get this far and still not want checking. */ - if (!pe) { + if (!(edev->mode & EEH_MODE_SUPPORTED) || + edev->mode & EEH_MODE_NOCHECK) { eeh_stats.ignored_check++; - pr_debug("EEH: Ignored check for %s %s\n", - eeh_pci_name(dev), dn->full_name); + pr_debug("EEH: Ignored check (%x) for %s %s\n", + edev->mode, eeh_pci_name(dev), dn->full_name); return 0; } - if (!pe->addr && !pe->config_addr) { + if (!edev->config_addr && !edev->pe_config_addr) { eeh_stats.no_cfg_addr++; return 0; } @@ -327,13 +434,13 @@ int eeh_dev_check_failure(struct eeh_dev *edev) */ raw_spin_lock_irqsave(&confirm_error_lock, flags); rc = 1; - if (pe->state & EEH_PE_ISOLATED) { - pe->check_count++; - if (pe->check_count % EEH_MAX_FAILS == 0) { + if (edev->mode & EEH_MODE_ISOLATED) { + edev->check_count++; + if (edev->check_count % EEH_MAX_FAILS == 0) { location = of_get_property(dn, "ibm,loc-code", NULL); printk(KERN_ERR "EEH: %d reads ignored for recovering device at " "location=%s driver=%s pci addr=%s\n", - pe->check_count, location, + edev->check_count, location, eeh_driver_name(dev), eeh_pci_name(dev)); printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n", eeh_driver_name(dev)); @@ -349,7 +456,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) * function zero of a multi-function device. * In any case they must share a common PHB. */ - ret = eeh_ops->get_state(pe, NULL); + ret = eeh_ops->get_state(dn, NULL); /* Note that config-io to empty slots may fail; * they are empty when they don't have children. @@ -362,7 +469,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) { eeh_stats.false_positives++; - pe->false_positives++; + edev->false_positives ++; rc = 0; goto dn_unlock; } @@ -373,10 +480,10 @@ int eeh_dev_check_failure(struct eeh_dev *edev) * with other functions on this device, and functions under * bridges. */ - eeh_pe_state_mark(pe, EEH_PE_ISOLATED); + eeh_mark_slot(dn, EEH_MODE_ISOLATED); raw_spin_unlock_irqrestore(&confirm_error_lock, flags); - eeh_send_failure_event(pe); + eeh_send_failure_event(edev); /* Most EEH events are due to device driver bugs. Having * a stack trace will help the device-driver authors figure @@ -390,7 +497,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) return rc; } -EXPORT_SYMBOL_GPL(eeh_dev_check_failure); +EXPORT_SYMBOL_GPL(eeh_dn_check_failure); /** * eeh_check_failure - Check if all 1's data is due to EEH slot freeze @@ -407,19 +514,21 @@ EXPORT_SYMBOL_GPL(eeh_dev_check_failure); unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val) { unsigned long addr; - struct eeh_dev *edev; + struct pci_dev *dev; + struct device_node *dn; /* Finding the phys addr + pci device; this is pretty quick. */ addr = eeh_token_to_phys((unsigned long __force) token); - edev = eeh_addr_cache_get_dev(addr); - if (!edev) { + dev = pci_addr_cache_get_device(addr); + if (!dev) { eeh_stats.no_device++; return val; } - eeh_dev_check_failure(edev); + dn = pci_device_to_OF_node(dev); + eeh_dn_check_failure(dn, dev); - pci_dev_put(eeh_dev_to_pci_dev(edev)); + pci_dev_put(dev); return val; } @@ -428,22 +537,23 @@ EXPORT_SYMBOL(eeh_check_failure); /** * eeh_pci_enable - Enable MMIO or DMA transfers for this slot - * @pe: EEH PE + * @edev: pci device node * * This routine should be called to reenable frozen MMIO or DMA * so that it would work correctly again. It's useful while doing * recovery or log collection on the indicated device. */ -int eeh_pci_enable(struct eeh_pe *pe, int function) +int eeh_pci_enable(struct eeh_dev *edev, int function) { int rc; + struct device_node *dn = eeh_dev_to_of_node(edev); - rc = eeh_ops->set_option(pe, function); + rc = eeh_ops->set_option(dn, function); if (rc) - pr_warning("%s: Unexpected state change %d on PHB#%d-PE#%x, err=%d\n", - __func__, function, pe->phb->global_number, pe->addr, rc); + printk(KERN_WARNING "EEH: Unexpected state change %d, err=%d dn=%s\n", + function, rc, dn->full_name); - rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); + rc = eeh_ops->wait_state(dn, PCI_BUS_RESET_WAIT_MSEC); if (rc > 0 && (rc & EEH_STATE_MMIO_ENABLED) && (function == EEH_OPT_THAW_MMIO)) return 0; @@ -461,24 +571,17 @@ int eeh_pci_enable(struct eeh_pe *pe, int function) */ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) { - struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); - struct eeh_pe *pe = edev->pe; - - if (!pe) { - pr_err("%s: No PE found on PCI device %s\n", - __func__, pci_name(dev)); - return -EINVAL; - } + struct device_node *dn = pci_device_to_OF_node(dev); switch (state) { case pcie_deassert_reset: - eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); + eeh_ops->reset(dn, EEH_RESET_DEACTIVATE); break; case pcie_hot_reset: - eeh_ops->reset(pe, EEH_RESET_HOT); + eeh_ops->reset(dn, EEH_RESET_HOT); break; case pcie_warm_reset: - eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); + eeh_ops->reset(dn, EEH_RESET_FUNDAMENTAL); break; default: return -EINVAL; @@ -488,37 +591,66 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat } /** - * eeh_set_pe_freset - Check the required reset for the indicated device - * @data: EEH device - * @flag: return value + * __eeh_set_pe_freset - Check the required reset for child devices + * @parent: parent device + * @freset: return value + * + * Each device might have its preferred reset type: fundamental or + * hot reset. The routine is used to collect the information from + * the child devices so that they could be reset accordingly. + */ +void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset) +{ + struct device_node *dn; + + for_each_child_of_node(parent, dn) { + if (of_node_to_eeh_dev(dn)) { + struct pci_dev *dev = of_node_to_eeh_dev(dn)->pdev; + + if (dev && dev->driver) + *freset |= dev->needs_freset; + + __eeh_set_pe_freset(dn, freset); + } + } +} + +/** + * eeh_set_pe_freset - Check the required reset for the indicated device and its children + * @dn: parent device + * @freset: return value * * Each device might have its preferred reset type: fundamental or * hot reset. The routine is used to collected the information for * the indicated device and its children so that the bunch of the * devices could be reset properly. */ -static void *eeh_set_dev_freset(void *data, void *flag) +void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset) { struct pci_dev *dev; - unsigned int *freset = (unsigned int *)flag; - struct eeh_dev *edev = (struct eeh_dev *)data; + dn = eeh_find_device_pe(dn); + + /* Back up one, since config addrs might be shared */ + if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent)) + dn = dn->parent; - dev = eeh_dev_to_pci_dev(edev); + dev = of_node_to_eeh_dev(dn)->pdev; if (dev) *freset |= dev->needs_freset; - return NULL; + __eeh_set_pe_freset(dn, freset); } /** * eeh_reset_pe_once - Assert the pci #RST line for 1/4 second - * @pe: EEH PE + * @edev: pci device node to be reset. * * Assert the PCI #RST line for 1/4 second. */ -static void eeh_reset_pe_once(struct eeh_pe *pe) +static void eeh_reset_pe_once(struct eeh_dev *edev) { unsigned int freset = 0; + struct device_node *dn = eeh_dev_to_of_node(edev); /* Determine type of EEH reset required for * Partitionable Endpoint, a hot-reset (1) @@ -526,12 +658,12 @@ static void eeh_reset_pe_once(struct eeh_pe *pe) * A fundamental reset required by any device under * Partitionable Endpoint trumps hot-reset. */ - eeh_pe_dev_traverse(pe, eeh_set_dev_freset, &freset); + eeh_set_pe_freset(dn, &freset); if (freset) - eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); + eeh_ops->reset(dn, EEH_RESET_FUNDAMENTAL); else - eeh_ops->reset(pe, EEH_RESET_HOT); + eeh_ops->reset(dn, EEH_RESET_HOT); /* The PCI bus requires that the reset be held high for at least * a 100 milliseconds. We wait a bit longer 'just in case'. @@ -543,9 +675,9 @@ static void eeh_reset_pe_once(struct eeh_pe *pe) * pci slot reset line is dropped. Make sure we don't miss * these, and clear the flag now. */ - eeh_pe_state_clear(pe, EEH_PE_ISOLATED); + eeh_clear_slot(dn, EEH_MODE_ISOLATED); - eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); + eeh_ops->reset(dn, EEH_RESET_DEACTIVATE); /* After a PCI slot has been reset, the PCI Express spec requires * a 1.5 second idle time for the bus to stabilize, before starting @@ -557,36 +689,116 @@ static void eeh_reset_pe_once(struct eeh_pe *pe) /** * eeh_reset_pe - Reset the indicated PE - * @pe: EEH PE + * @edev: PCI device associated EEH device * * This routine should be called to reset indicated device, including * PE. A PE might include multiple PCI devices and sometimes PCI bridges * might be involved as well. */ -int eeh_reset_pe(struct eeh_pe *pe) +int eeh_reset_pe(struct eeh_dev *edev) { int i, rc; + struct device_node *dn = eeh_dev_to_of_node(edev); /* Take three shots at resetting the bus */ for (i=0; i<3; i++) { - eeh_reset_pe_once(pe); + eeh_reset_pe_once(edev); - rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); + rc = eeh_ops->wait_state(dn, PCI_BUS_RESET_WAIT_MSEC); if (rc == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) return 0; if (rc < 0) { - pr_err("%s: Unrecoverable slot failure on PHB#%d-PE#%x", - __func__, pe->phb->global_number, pe->addr); + printk(KERN_ERR "EEH: unrecoverable slot failure %s\n", + dn->full_name); return -1; } - pr_err("EEH: bus reset %d failed on PHB#%d-PE#%x, rc=%d\n", - i+1, pe->phb->global_number, pe->addr, rc); + printk(KERN_ERR "EEH: bus reset %d failed on slot %s, rc=%d\n", + i+1, dn->full_name, rc); } return -1; } +/** Save and restore of PCI BARs + * + * Although firmware will set up BARs during boot, it doesn't + * set up device BAR's after a device reset, although it will, + * if requested, set up bridge configuration. Thus, we need to + * configure the PCI devices ourselves. + */ + +/** + * eeh_restore_one_device_bars - Restore the Base Address Registers for one device + * @edev: PCI device associated EEH device + * + * Loads the PCI configuration space base address registers, + * the expansion ROM base address, the latency timer, and etc. + * from the saved values in the device node. + */ +static inline void eeh_restore_one_device_bars(struct eeh_dev *edev) +{ + int i; + u32 cmd; + struct device_node *dn = eeh_dev_to_of_node(edev); + + if (!edev->phb) + return; + + for (i=4; i<10; i++) { + eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]); + } + + /* 12 == Expansion ROM Address */ + eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]); + +#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) +#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)]) + + eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1, + SAVED_BYTE(PCI_CACHE_LINE_SIZE)); + + eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1, + SAVED_BYTE(PCI_LATENCY_TIMER)); + + /* max latency, min grant, interrupt pin and line */ + eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]); + + /* Restore PERR & SERR bits, some devices require it, + * don't touch the other command bits + */ + eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd); + if (edev->config_space[1] & PCI_COMMAND_PARITY) + cmd |= PCI_COMMAND_PARITY; + else + cmd &= ~PCI_COMMAND_PARITY; + if (edev->config_space[1] & PCI_COMMAND_SERR) + cmd |= PCI_COMMAND_SERR; + else + cmd &= ~PCI_COMMAND_SERR; + eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd); +} + +/** + * eeh_restore_bars - Restore the PCI config space info + * @edev: EEH device + * + * This routine performs a recursive walk to the children + * of this device as well. + */ +void eeh_restore_bars(struct eeh_dev *edev) +{ + struct device_node *dn; + if (!edev) + return; + + if ((edev->mode & EEH_MODE_SUPPORTED) && !IS_BRIDGE(edev->class_code)) + eeh_restore_one_device_bars(edev); + + for_each_child_of_node(eeh_dev_to_of_node(edev), dn) + eeh_restore_bars(of_node_to_eeh_dev(dn)); +} + /** * eeh_save_bars - Save device bars * @edev: PCI device associated EEH device @@ -596,7 +808,7 @@ int eeh_reset_pe(struct eeh_pe *pe) * PCI devices are added individually; but, for the restore, * an entire slot is reset at a time. */ -void eeh_save_bars(struct eeh_dev *edev) +static void eeh_save_bars(struct eeh_dev *edev) { int i; struct device_node *dn; @@ -609,6 +821,102 @@ void eeh_save_bars(struct eeh_dev *edev) eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]); } +/** + * eeh_early_enable - Early enable EEH on the indicated device + * @dn: device node + * @data: BUID + * + * Enable EEH functionality on the specified PCI device. The function + * is expected to be called before real PCI probing is done. However, + * the PHBs have been initialized at this point. + */ +static void *eeh_early_enable(struct device_node *dn, void *data) +{ + int ret; + const u32 *class_code = of_get_property(dn, "class-code", NULL); + const u32 *vendor_id = of_get_property(dn, "vendor-id", NULL); + const u32 *device_id = of_get_property(dn, "device-id", NULL); + const u32 *regs; + int enable; + struct eeh_dev *edev = of_node_to_eeh_dev(dn); + + edev->class_code = 0; + edev->mode = 0; + edev->check_count = 0; + edev->freeze_count = 0; + edev->false_positives = 0; + + if (!of_device_is_available(dn)) + return NULL; + + /* Ignore bad nodes. */ + if (!class_code || !vendor_id || !device_id) + return NULL; + + /* There is nothing to check on PCI to ISA bridges */ + if (dn->type && !strcmp(dn->type, "isa")) { + edev->mode |= EEH_MODE_NOCHECK; + return NULL; + } + edev->class_code = *class_code; + + /* Ok... see if this device supports EEH. Some do, some don't, + * and the only way to find out is to check each and every one. + */ + regs = of_get_property(dn, "reg", NULL); + if (regs) { + /* First register entry is addr (00BBSS00) */ + /* Try to enable eeh */ + ret = eeh_ops->set_option(dn, EEH_OPT_ENABLE); + + enable = 0; + if (ret == 0) { + edev->config_addr = regs[0]; + + /* If the newer, better, ibm,get-config-addr-info is supported, + * then use that instead. + */ + edev->pe_config_addr = eeh_ops->get_pe_addr(dn); + + /* Some older systems (Power4) allow the + * ibm,set-eeh-option call to succeed even on nodes + * where EEH is not supported. Verify support + * explicitly. + */ + ret = eeh_ops->get_state(dn, NULL); + if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT) + enable = 1; + } + + if (enable) { + eeh_subsystem_enabled = 1; + edev->mode |= EEH_MODE_SUPPORTED; + + pr_debug("EEH: %s: eeh enabled, config=%x pe_config=%x\n", + dn->full_name, edev->config_addr, + edev->pe_config_addr); + } else { + + /* This device doesn't support EEH, but it may have an + * EEH parent, in which case we mark it as supported. + */ + if (dn->parent && of_node_to_eeh_dev(dn->parent) && + (of_node_to_eeh_dev(dn->parent)->mode & EEH_MODE_SUPPORTED)) { + /* Parent supports EEH. */ + edev->mode |= EEH_MODE_SUPPORTED; + edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr; + return NULL; + } + } + } else { + printk(KERN_WARNING "EEH: %s: unable to get reg property.\n", + dn->full_name); + } + + eeh_save_bars(edev); + return NULL; +} + /** * eeh_ops_register - Register platform dependent EEH operations * @ops: platform dependent EEH operations @@ -674,7 +982,7 @@ int __exit eeh_ops_unregister(const char *name) * Even if force-off is set, the EEH hardware is still enabled, so that * newer systems can boot. */ -static int __init eeh_init(void) +void __init eeh_init(void) { struct pci_controller *hose, *tmp; struct device_node *phb; @@ -684,34 +992,27 @@ static int __init eeh_init(void) if (!eeh_ops) { pr_warning("%s: Platform EEH operation not found\n", __func__); - return -EEXIST; + return; } else if ((ret = eeh_ops->init())) { pr_warning("%s: Failed to call platform init function (%d)\n", __func__, ret); - return ret; + return; } raw_spin_lock_init(&confirm_error_lock); /* Enable EEH for all adapters */ - if (eeh_probe_mode_devtree()) { - list_for_each_entry_safe(hose, tmp, - &hose_list, list_node) { - phb = hose->dn; - traverse_pci_devices(phb, eeh_ops->of_probe, NULL); - } + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { + phb = hose->dn; + traverse_pci_devices(phb, eeh_early_enable, NULL); } if (eeh_subsystem_enabled) - pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n"); + printk(KERN_INFO "EEH: PCI Enhanced I/O Error Handling Enabled\n"); else - pr_warning("EEH: No capable adapters found\n"); - - return ret; + printk(KERN_WARNING "EEH: No capable adapters found\n"); } -core_initcall_sync(eeh_init); - /** * eeh_add_device_early - Enable EEH for the indicated device_node * @dn: device node for which to set up EEH @@ -736,8 +1037,7 @@ static void eeh_add_device_early(struct device_node *dn) if (NULL == phb || 0 == phb->buid) return; - /* FIXME: hotplug support on POWERNV */ - eeh_ops->of_probe(dn, NULL); + eeh_early_enable(dn, NULL); } /** @@ -787,7 +1087,7 @@ static void eeh_add_device_late(struct pci_dev *dev) edev->pdev = dev; dev->dev.archdata.edev = edev; - eeh_addr_cache_insert_dev(dev); + pci_addr_cache_insert_device(dev); eeh_sysfs_add_device(dev); } @@ -843,8 +1143,7 @@ static void eeh_remove_device(struct pci_dev *dev) dev->dev.archdata.edev = NULL; pci_dev_put(dev); - eeh_rmv_from_parent_pe(edev); - eeh_addr_cache_rmv_dev(dev); + pci_addr_cache_remove_device(dev); eeh_sysfs_remove_device(dev); } diff --git a/trunk/arch/powerpc/platforms/pseries/eeh_cache.c b/trunk/arch/powerpc/platforms/pseries/eeh_cache.c index 5a4c87903057..e5ae1c687c66 100644 --- a/trunk/arch/powerpc/platforms/pseries/eeh_cache.c +++ b/trunk/arch/powerpc/platforms/pseries/eeh_cache.c @@ -50,7 +50,6 @@ struct pci_io_addr_range { struct rb_node rb_node; unsigned long addr_lo; unsigned long addr_hi; - struct eeh_dev *edev; struct pci_dev *pcidev; unsigned int flags; }; @@ -60,7 +59,7 @@ static struct pci_io_addr_cache { spinlock_t piar_lock; } pci_io_addr_cache_root; -static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr) +static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr) { struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node; @@ -75,7 +74,7 @@ static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr) n = n->rb_right; } else { pci_dev_get(piar->pcidev); - return piar->edev; + return piar->pcidev; } } } @@ -84,7 +83,7 @@ static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr) } /** - * eeh_addr_cache_get_dev - Get device, given only address + * pci_addr_cache_get_device - Get device, given only address * @addr: mmio (PIO) phys address or i/o port number * * Given an mmio phys address, or a port number, find a pci device @@ -93,15 +92,15 @@ static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr) * from zero (that is, they do *not* have pci_io_addr added in). * It is safe to call this function within an interrupt. */ -struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr) +struct pci_dev *pci_addr_cache_get_device(unsigned long addr) { - struct eeh_dev *edev; + struct pci_dev *dev; unsigned long flags; spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); - edev = __eeh_addr_cache_get_device(addr); + dev = __pci_addr_cache_get_device(addr); spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); - return edev; + return dev; } #ifdef DEBUG @@ -109,7 +108,7 @@ struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr) * Handy-dandy debug print routine, does nothing more * than print out the contents of our addr cache. */ -static void eeh_addr_cache_print(struct pci_io_addr_cache *cache) +static void pci_addr_cache_print(struct pci_io_addr_cache *cache) { struct rb_node *n; int cnt = 0; @@ -118,7 +117,7 @@ static void eeh_addr_cache_print(struct pci_io_addr_cache *cache) while (n) { struct pci_io_addr_range *piar; piar = rb_entry(n, struct pci_io_addr_range, rb_node); - pr_debug("PCI: %s addr range %d [%lx-%lx]: %s\n", + printk(KERN_DEBUG "PCI: %s addr range %d [%lx-%lx]: %s\n", (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt, piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev)); cnt++; @@ -129,7 +128,7 @@ static void eeh_addr_cache_print(struct pci_io_addr_cache *cache) /* Insert address range into the rb tree. */ static struct pci_io_addr_range * -eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo, +pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo, unsigned long ahi, unsigned int flags) { struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node; @@ -147,24 +146,23 @@ eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo, } else { if (dev != piar->pcidev || alo != piar->addr_lo || ahi != piar->addr_hi) { - pr_warning("PIAR: overlapping address range\n"); + printk(KERN_WARNING "PIAR: overlapping address range\n"); } return piar; } } - piar = kzalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC); + piar = kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC); if (!piar) return NULL; pci_dev_get(dev); piar->addr_lo = alo; piar->addr_hi = ahi; - piar->edev = pci_dev_to_eeh_dev(dev); piar->pcidev = dev; piar->flags = flags; #ifdef DEBUG - pr_debug("PIAR: insert range=[%lx:%lx] dev=%s\n", + printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n", alo, ahi, pci_name(dev)); #endif @@ -174,7 +172,7 @@ eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo, return piar; } -static void __eeh_addr_cache_insert_dev(struct pci_dev *dev) +static void __pci_addr_cache_insert_device(struct pci_dev *dev) { struct device_node *dn; struct eeh_dev *edev; @@ -182,7 +180,7 @@ static void __eeh_addr_cache_insert_dev(struct pci_dev *dev) dn = pci_device_to_OF_node(dev); if (!dn) { - pr_warning("PCI: no pci dn found for dev=%s\n", pci_name(dev)); + printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n", pci_name(dev)); return; } @@ -194,7 +192,8 @@ static void __eeh_addr_cache_insert_dev(struct pci_dev *dev) } /* Skip any devices for which EEH is not enabled. */ - if (!edev->pe) { + if (!(edev->mode & EEH_MODE_SUPPORTED) || + edev->mode & EEH_MODE_NOCHECK) { #ifdef DEBUG pr_info("PCI: skip building address cache for=%s - %s\n", pci_name(dev), dn->full_name); @@ -213,19 +212,19 @@ static void __eeh_addr_cache_insert_dev(struct pci_dev *dev) continue; if (start == 0 || ~start == 0 || end == 0 || ~end == 0) continue; - eeh_addr_cache_insert(dev, start, end, flags); + pci_addr_cache_insert(dev, start, end, flags); } } /** - * eeh_addr_cache_insert_dev - Add a device to the address cache + * pci_addr_cache_insert_device - Add a device to the address cache * @dev: PCI device whose I/O addresses we are interested in. * * In order to support the fast lookup of devices based on addresses, * we maintain a cache of devices that can be quickly searched. * This routine adds a device to that cache. */ -void eeh_addr_cache_insert_dev(struct pci_dev *dev) +void pci_addr_cache_insert_device(struct pci_dev *dev) { unsigned long flags; @@ -234,11 +233,11 @@ void eeh_addr_cache_insert_dev(struct pci_dev *dev) return; spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); - __eeh_addr_cache_insert_dev(dev); + __pci_addr_cache_insert_device(dev); spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); } -static inline void __eeh_addr_cache_rmv_dev(struct pci_dev *dev) +static inline void __pci_addr_cache_remove_device(struct pci_dev *dev) { struct rb_node *n; @@ -259,7 +258,7 @@ static inline void __eeh_addr_cache_rmv_dev(struct pci_dev *dev) } /** - * eeh_addr_cache_rmv_dev - remove pci device from addr cache + * pci_addr_cache_remove_device - remove pci device from addr cache * @dev: device to remove * * Remove a device from the addr-cache tree. @@ -267,17 +266,17 @@ static inline void __eeh_addr_cache_rmv_dev(struct pci_dev *dev) * the tree multiple times (once per resource). * But so what; device removal doesn't need to be that fast. */ -void eeh_addr_cache_rmv_dev(struct pci_dev *dev) +void pci_addr_cache_remove_device(struct pci_dev *dev) { unsigned long flags; spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); - __eeh_addr_cache_rmv_dev(dev); + __pci_addr_cache_remove_device(dev); spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); } /** - * eeh_addr_cache_build - Build a cache of I/O addresses + * pci_addr_cache_build - Build a cache of I/O addresses * * Build a cache of pci i/o addresses. This cache will be used to * find the pci device that corresponds to a given address. @@ -285,7 +284,7 @@ void eeh_addr_cache_rmv_dev(struct pci_dev *dev) * Must be run late in boot process, after the pci controllers * have been scanned for devices (after all device resources are known). */ -void __init eeh_addr_cache_build(void) +void __init pci_addr_cache_build(void) { struct device_node *dn; struct eeh_dev *edev; @@ -294,7 +293,7 @@ void __init eeh_addr_cache_build(void) spin_lock_init(&pci_io_addr_cache_root.piar_lock); for_each_pci_dev(dev) { - eeh_addr_cache_insert_dev(dev); + pci_addr_cache_insert_device(dev); dn = pci_device_to_OF_node(dev); if (!dn) @@ -313,7 +312,7 @@ void __init eeh_addr_cache_build(void) #ifdef DEBUG /* Verify tree built up above, echo back the list of addrs. */ - eeh_addr_cache_print(&pci_io_addr_cache_root); + pci_addr_cache_print(&pci_io_addr_cache_root); #endif } diff --git a/trunk/arch/powerpc/platforms/pseries/eeh_dev.c b/trunk/arch/powerpc/platforms/pseries/eeh_dev.c index 66442341d3a6..c4507d095900 100644 --- a/trunk/arch/powerpc/platforms/pseries/eeh_dev.c +++ b/trunk/arch/powerpc/platforms/pseries/eeh_dev.c @@ -55,7 +55,7 @@ void * __devinit eeh_dev_init(struct device_node *dn, void *data) struct eeh_dev *edev; /* Allocate EEH device */ - edev = kzalloc(sizeof(*edev), GFP_KERNEL); + edev = zalloc_maybe_bootmem(sizeof(*edev), GFP_KERNEL); if (!edev) { pr_warning("%s: out of memory\n", __func__); return NULL; @@ -65,7 +65,6 @@ void * __devinit eeh_dev_init(struct device_node *dn, void *data) PCI_DN(dn)->edev = edev; edev->dn = dn; edev->phb = phb; - INIT_LIST_HEAD(&edev->list); return NULL; } @@ -81,9 +80,6 @@ void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb) { struct device_node *dn = phb->dn; - /* EEH PE for PHB */ - eeh_phb_pe_create(phb); - /* EEH device for PHB */ eeh_dev_init(dn, phb); @@ -97,16 +93,10 @@ void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb) * Scan all the existing PHBs and create EEH devices for their OF * nodes and their children OF nodes */ -static int __init eeh_dev_phb_init(void) +void __init eeh_dev_phb_init(void) { struct pci_controller *phb, *tmp; list_for_each_entry_safe(phb, tmp, &hose_list, list_node) eeh_dev_phb_init_dynamic(phb); - - pr_info("EEH: devices created\n"); - - return 0; } - -core_initcall(eeh_dev_phb_init); diff --git a/trunk/arch/powerpc/platforms/pseries/eeh_driver.c b/trunk/arch/powerpc/platforms/pseries/eeh_driver.c index 8370ce7d5931..baf92cd9dfab 100644 --- a/trunk/arch/powerpc/platforms/pseries/eeh_driver.c +++ b/trunk/arch/powerpc/platforms/pseries/eeh_driver.c @@ -93,7 +93,7 @@ static void eeh_disable_irq(struct pci_dev *dev) if (!irq_has_action(dev->irq)) return; - edev->mode |= EEH_DEV_IRQ_DISABLED; + edev->mode |= EEH_MODE_IRQ_DISABLED; disable_irq_nosync(dev->irq); } @@ -108,43 +108,36 @@ static void eeh_enable_irq(struct pci_dev *dev) { struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); - if ((edev->mode) & EEH_DEV_IRQ_DISABLED) { - edev->mode &= ~EEH_DEV_IRQ_DISABLED; + if ((edev->mode) & EEH_MODE_IRQ_DISABLED) { + edev->mode &= ~EEH_MODE_IRQ_DISABLED; enable_irq(dev->irq); } } /** * eeh_report_error - Report pci error to each device driver - * @data: eeh device + * @dev: PCI device * @userdata: return value * * Report an EEH error to each device driver, collect up and * merge the device driver responses. Cumulative response * passed back in "userdata". */ -static void *eeh_report_error(void *data, void *userdata) +static int eeh_report_error(struct pci_dev *dev, void *userdata) { - struct eeh_dev *edev = (struct eeh_dev *)data; - struct pci_dev *dev = eeh_dev_to_pci_dev(edev); enum pci_ers_result rc, *res = userdata; struct pci_driver *driver = dev->driver; - /* We might not have the associated PCI device, - * then we should continue for next one. - */ - if (!dev) return NULL; - dev->error_state = pci_channel_io_frozen; if (!driver) - return NULL; + return 0; eeh_disable_irq(dev); if (!driver->err_handler || !driver->err_handler->error_detected) - return NULL; + return 0; rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen); @@ -152,31 +145,27 @@ static void *eeh_report_error(void *data, void *userdata) if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; if (*res == PCI_ERS_RESULT_NONE) *res = rc; - return NULL; + return 0; } /** * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled - * @data: eeh device + * @dev: PCI device * @userdata: return value * * Tells each device driver that IO ports, MMIO and config space I/O * are now enabled. Collects up and merges the device driver responses. * Cumulative response passed back in "userdata". */ -static void *eeh_report_mmio_enabled(void *data, void *userdata) +static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata) { - struct eeh_dev *edev = (struct eeh_dev *)data; - struct pci_dev *dev = eeh_dev_to_pci_dev(edev); enum pci_ers_result rc, *res = userdata; - struct pci_driver *driver; - - if (!dev) return NULL; + struct pci_driver *driver = dev->driver; - if (!(driver = dev->driver) || + if (!driver || !driver->err_handler || !driver->err_handler->mmio_enabled) - return NULL; + return 0; rc = driver->err_handler->mmio_enabled(dev); @@ -184,12 +173,12 @@ static void *eeh_report_mmio_enabled(void *data, void *userdata) if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; if (*res == PCI_ERS_RESULT_NONE) *res = rc; - return NULL; + return 0; } /** * eeh_report_reset - Tell device that slot has been reset - * @data: eeh device + * @dev: PCI device * @userdata: return value * * This routine must be called while EEH tries to reset particular @@ -197,15 +186,13 @@ static void *eeh_report_mmio_enabled(void *data, void *userdata) * some actions, usually to save data the driver needs so that the * driver can work again while the device is recovered. */ -static void *eeh_report_reset(void *data, void *userdata) +static int eeh_report_reset(struct pci_dev *dev, void *userdata) { - struct eeh_dev *edev = (struct eeh_dev *)data; - struct pci_dev *dev = eeh_dev_to_pci_dev(edev); enum pci_ers_result rc, *res = userdata; - struct pci_driver *driver; + struct pci_driver *driver = dev->driver; - if (!dev || !(driver = dev->driver)) - return NULL; + if (!driver) + return 0; dev->error_state = pci_channel_io_normal; @@ -213,7 +200,7 @@ static void *eeh_report_reset(void *data, void *userdata) if (!driver->err_handler || !driver->err_handler->slot_reset) - return NULL; + return 0; rc = driver->err_handler->slot_reset(dev); if ((*res == PCI_ERS_RESULT_NONE) || @@ -221,89 +208,82 @@ static void *eeh_report_reset(void *data, void *userdata) if (*res == PCI_ERS_RESULT_DISCONNECT && rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; - return NULL; + return 0; } /** * eeh_report_resume - Tell device to resume normal operations - * @data: eeh device + * @dev: PCI device * @userdata: return value * * This routine must be called to notify the device driver that it * could resume so that the device driver can do some initialization * to make the recovered device work again. */ -static void *eeh_report_resume(void *data, void *userdata) +static int eeh_report_resume(struct pci_dev *dev, void *userdata) { - struct eeh_dev *edev = (struct eeh_dev *)data; - struct pci_dev *dev = eeh_dev_to_pci_dev(edev); - struct pci_driver *driver; - - if (!dev) return NULL; + struct pci_driver *driver = dev->driver; dev->error_state = pci_channel_io_normal; - if (!(driver = dev->driver)) - return NULL; + if (!driver) + return 0; eeh_enable_irq(dev); if (!driver->err_handler || !driver->err_handler->resume) - return NULL; + return 0; driver->err_handler->resume(dev); - return NULL; + return 0; } /** * eeh_report_failure - Tell device driver that device is dead. - * @data: eeh device + * @dev: PCI device * @userdata: return value * * This informs the device driver that the device is permanently * dead, and that no further recovery attempts will be made on it. */ -static void *eeh_report_failure(void *data, void *userdata) +static int eeh_report_failure(struct pci_dev *dev, void *userdata) { - struct eeh_dev *edev = (struct eeh_dev *)data; - struct pci_dev *dev = eeh_dev_to_pci_dev(edev); - struct pci_driver *driver; - - if (!dev) return NULL; + struct pci_driver *driver = dev->driver; dev->error_state = pci_channel_io_perm_failure; - if (!(driver = dev->driver)) - return NULL; + if (!driver) + return 0; eeh_disable_irq(dev); if (!driver->err_handler || !driver->err_handler->error_detected) - return NULL; + return 0; driver->err_handler->error_detected(dev, pci_channel_io_perm_failure); - return NULL; + return 0; } /** * eeh_reset_device - Perform actual reset of a pci slot - * @pe: EEH PE + * @edev: PE associated EEH device * @bus: PCI bus corresponding to the isolcated slot * * This routine must be called to do reset on the indicated PE. * During the reset, udev might be invoked because those affected * PCI devices will be removed and then added. */ -static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) +static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus) { + struct device_node *dn; int cnt, rc; /* pcibios will clear the counter; save the value */ - cnt = pe->freeze_count; + cnt = edev->freeze_count; if (bus) pcibios_remove_pci_devices(bus); @@ -312,13 +292,25 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) * Reconfigure bridges and devices. Don't try to bring the system * up if the reset failed for some reason. */ - rc = eeh_reset_pe(pe); + rc = eeh_reset_pe(edev); if (rc) return rc; - /* Restore PE */ - eeh_ops->configure_bridge(pe); - eeh_pe_restore_bars(pe); + /* Walk over all functions on this device. */ + dn = eeh_dev_to_of_node(edev); + if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent)) + dn = dn->parent->child; + + while (dn) { + struct eeh_dev *pedev = of_node_to_eeh_dev(dn); + + /* On Power4, always true because eeh_pe_config_addr=0 */ + if (edev->pe_config_addr == pedev->pe_config_addr) { + eeh_ops->configure_bridge(dn); + eeh_restore_bars(pedev); + } + dn = dn->sibling; + } /* Give the system 5 seconds to finish running the user-space * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, @@ -330,7 +322,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) ssleep(5); pcibios_add_pci_devices(bus); } - pe->freeze_count = cnt; + edev->freeze_count = cnt; return 0; } @@ -342,7 +334,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) /** * eeh_handle_event - Reset a PCI device after hard lockup. - * @pe: EEH PE + * @event: EEH event * * While PHB detects address or data parity errors on particular PCI * slot, the associated PE will be frozen. Besides, DMA's occurring @@ -357,24 +349,69 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) * drivers (which cause a second set of hotplug events to go out to * userspace). */ -void eeh_handle_event(struct eeh_pe *pe) +struct eeh_dev *handle_eeh_events(struct eeh_event *event) { + struct device_node *frozen_dn; + struct eeh_dev *frozen_edev; struct pci_bus *frozen_bus; int rc = 0; enum pci_ers_result result = PCI_ERS_RESULT_NONE; + const char *location, *pci_str, *drv_str, *bus_pci_str, *bus_drv_str; + + frozen_dn = eeh_find_device_pe(eeh_dev_to_of_node(event->edev)); + if (!frozen_dn) { + location = of_get_property(eeh_dev_to_of_node(event->edev), "ibm,loc-code", NULL); + location = location ? location : "unknown"; + printk(KERN_ERR "EEH: Error: Cannot find partition endpoint " + "for location=%s pci addr=%s\n", + location, eeh_pci_name(eeh_dev_to_pci_dev(event->edev))); + return NULL; + } + + frozen_bus = pcibios_find_pci_bus(frozen_dn); + location = of_get_property(frozen_dn, "ibm,loc-code", NULL); + location = location ? location : "unknown"; + + /* There are two different styles for coming up with the PE. + * In the old style, it was the highest EEH-capable device + * which was always an EADS pci bridge. In the new style, + * there might not be any EADS bridges, and even when there are, + * the firmware marks them as "EEH incapable". So another + * two-step is needed to find the pci bus.. + */ + if (!frozen_bus) + frozen_bus = pcibios_find_pci_bus(frozen_dn->parent); - frozen_bus = eeh_pe_bus_get(pe); if (!frozen_bus) { - pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n", - __func__, pe->phb->global_number, pe->addr); - return; + printk(KERN_ERR "EEH: Cannot find PCI bus " + "for location=%s dn=%s\n", + location, frozen_dn->full_name); + return NULL; } - pe->freeze_count++; - if (pe->freeze_count > EEH_MAX_ALLOWED_FREEZES) + frozen_edev = of_node_to_eeh_dev(frozen_dn); + frozen_edev->freeze_count++; + pci_str = eeh_pci_name(eeh_dev_to_pci_dev(event->edev)); + drv_str = eeh_pcid_name(eeh_dev_to_pci_dev(event->edev)); + + if (frozen_edev->freeze_count > EEH_MAX_ALLOWED_FREEZES) goto excess_failures; - pr_warning("EEH: This PCI device has failed %d times in the last hour\n", - pe->freeze_count); + + printk(KERN_WARNING + "EEH: This PCI device has failed %d times in the last hour:\n", + frozen_edev->freeze_count); + + if (frozen_edev->pdev) { + bus_pci_str = pci_name(frozen_edev->pdev); + bus_drv_str = eeh_pcid_name(frozen_edev->pdev); + printk(KERN_WARNING + "EEH: Bus location=%s driver=%s pci addr=%s\n", + location, bus_drv_str, bus_pci_str); + } + + printk(KERN_WARNING + "EEH: Device location=%s driver=%s pci addr=%s\n", + location, drv_str, pci_str); /* Walk the various device drivers attached to this slot through * a reset sequence, giving each an opportunity to do what it needs @@ -382,12 +419,12 @@ void eeh_handle_event(struct eeh_pe *pe) * status ... if any child can't handle the reset, then the entire * slot is dlpar removed and added. */ - eeh_pe_dev_traverse(pe, eeh_report_error, &result); + pci_walk_bus(frozen_bus, eeh_report_error, &result); /* Get the current PCI slot state. This can take a long time, * sometimes over 3 seconds for certain systems. */ - rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000); + rc = eeh_ops->wait_state(eeh_dev_to_of_node(frozen_edev), MAX_WAIT_FOR_RECOVERY*1000); if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) { printk(KERN_WARNING "EEH: Permanent failure\n"); goto hard_fail; @@ -397,14 +434,14 @@ void eeh_handle_event(struct eeh_pe *pe) * don't post the error log until after all dev drivers * have been informed. */ - eeh_slot_error_detail(pe, EEH_LOG_TEMP); + eeh_slot_error_detail(frozen_edev, EEH_LOG_TEMP); /* If all device drivers were EEH-unaware, then shut * down all of the device drivers, and hope they * go down willingly, without panicing the system. */ if (result == PCI_ERS_RESULT_NONE) { - rc = eeh_reset_device(pe, frozen_bus); + rc = eeh_reset_device(frozen_edev, frozen_bus); if (rc) { printk(KERN_WARNING "EEH: Unable to reset, rc=%d\n", rc); goto hard_fail; @@ -413,7 +450,7 @@ void eeh_handle_event(struct eeh_pe *pe) /* If all devices reported they can proceed, then re-enable MMIO */ if (result == PCI_ERS_RESULT_CAN_RECOVER) { - rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); + rc = eeh_pci_enable(frozen_edev, EEH_OPT_THAW_MMIO); if (rc < 0) goto hard_fail; @@ -421,13 +458,13 @@ void eeh_handle_event(struct eeh_pe *pe) result = PCI_ERS_RESULT_NEED_RESET; } else { result = PCI_ERS_RESULT_NONE; - eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result); + pci_walk_bus(frozen_bus, eeh_report_mmio_enabled, &result); } } /* If all devices reported they can proceed, then re-enable DMA */ if (result == PCI_ERS_RESULT_CAN_RECOVER) { - rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA); + rc = eeh_pci_enable(frozen_edev, EEH_OPT_THAW_DMA); if (rc < 0) goto hard_fail; @@ -445,13 +482,13 @@ void eeh_handle_event(struct eeh_pe *pe) /* If any device called out for a reset, then reset the slot */ if (result == PCI_ERS_RESULT_NEED_RESET) { - rc = eeh_reset_device(pe, NULL); + rc = eeh_reset_device(frozen_edev, NULL); if (rc) { printk(KERN_WARNING "EEH: Cannot reset, rc=%d\n", rc); goto hard_fail; } result = PCI_ERS_RESULT_NONE; - eeh_pe_dev_traverse(pe, eeh_report_reset, &result); + pci_walk_bus(frozen_bus, eeh_report_reset, &result); } /* All devices should claim they have recovered by now. */ @@ -462,9 +499,9 @@ void eeh_handle_event(struct eeh_pe *pe) } /* Tell all device drivers that they can resume operations */ - eeh_pe_dev_traverse(pe, eeh_report_resume, NULL); + pci_walk_bus(frozen_bus, eeh_report_resume, NULL); - return; + return frozen_edev; excess_failures: /* @@ -472,26 +509,30 @@ void eeh_handle_event(struct eeh_pe *pe) * are due to poorly seated PCI cards. Only 10% or so are * due to actual, failed cards. */ - pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n" - "last hour and has been permanently disabled.\n" - "Please try reseating or replacing it.\n", - pe->phb->global_number, pe->addr, - pe->freeze_count); + printk(KERN_ERR + "EEH: PCI device at location=%s driver=%s pci addr=%s\n" + "has failed %d times in the last hour " + "and has been permanently disabled.\n" + "Please try reseating this device or replacing it.\n", + location, drv_str, pci_str, frozen_edev->freeze_count); goto perm_error; hard_fail: - pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n" - "Please try reseating or replacing it\n", - pe->phb->global_number, pe->addr); + printk(KERN_ERR + "EEH: Unable to recover from failure of PCI device " + "at location=%s driver=%s pci addr=%s\n" + "Please try reseating this device or replacing it.\n", + location, drv_str, pci_str); perm_error: - eeh_slot_error_detail(pe, EEH_LOG_PERM); + eeh_slot_error_detail(frozen_edev, EEH_LOG_PERM); /* Notify all devices that they're about to go down. */ - eeh_pe_dev_traverse(pe, eeh_report_failure, NULL); + pci_walk_bus(frozen_bus, eeh_report_failure, NULL); /* Shut down the device drivers for good. */ - if (frozen_bus) - pcibios_remove_pci_devices(frozen_bus); + pcibios_remove_pci_devices(frozen_bus); + + return NULL; } diff --git a/trunk/arch/powerpc/platforms/pseries/eeh_event.c b/trunk/arch/powerpc/platforms/pseries/eeh_event.c index 51faaac8abe6..fb506317ebb0 100644 --- a/trunk/arch/powerpc/platforms/pseries/eeh_event.c +++ b/trunk/arch/powerpc/platforms/pseries/eeh_event.c @@ -57,7 +57,7 @@ static int eeh_event_handler(void * dummy) { unsigned long flags; struct eeh_event *event; - struct eeh_pe *pe; + struct eeh_dev *edev; set_task_comm(current, "eehd"); @@ -76,23 +76,28 @@ static int eeh_event_handler(void * dummy) /* Serialize processing of EEH events */ mutex_lock(&eeh_event_mutex); - pe = event->pe; - eeh_pe_state_mark(pe, EEH_PE_RECOVERING); - pr_info("EEH: Detected PCI bus error on PHB#%d-PE#%x\n", - pe->phb->global_number, pe->addr); + edev = event->edev; + eeh_mark_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING); + + printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n", + eeh_pci_name(edev->pdev)); set_current_state(TASK_INTERRUPTIBLE); /* Don't add to load average */ - eeh_handle_event(pe); - eeh_pe_state_clear(pe, EEH_PE_RECOVERING); + edev = handle_eeh_events(event); + + if (edev) { + eeh_clear_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING); + pci_dev_put(edev->pdev); + } kfree(event); mutex_unlock(&eeh_event_mutex); /* If there are no new errors after an hour, clear the counter. */ - if (pe && pe->freeze_count > 0) { + if (edev && edev->freeze_count>0) { msleep_interruptible(3600*1000); - if (pe->freeze_count > 0) - pe->freeze_count--; + if (edev->freeze_count>0) + edev->freeze_count--; } @@ -114,23 +119,36 @@ static void eeh_thread_launcher(struct work_struct *dummy) /** * eeh_send_failure_event - Generate a PCI error event - * @pe: EEH PE + * @edev: EEH device * * This routine can be called within an interrupt context; * the actual event will be delivered in a normal context * (from a workqueue). */ -int eeh_send_failure_event(struct eeh_pe *pe) +int eeh_send_failure_event(struct eeh_dev *edev) { unsigned long flags; struct eeh_event *event; - - event = kzalloc(sizeof(*event), GFP_ATOMIC); - if (!event) { - pr_err("EEH: out of memory, event not handled\n"); - return -ENOMEM; + struct device_node *dn = eeh_dev_to_of_node(edev); + const char *location; + + if (!mem_init_done) { + printk(KERN_ERR "EEH: event during early boot not handled\n"); + location = of_get_property(dn, "ibm,loc-code", NULL); + printk(KERN_ERR "EEH: device node = %s\n", dn->full_name); + printk(KERN_ERR "EEH: PCI location = %s\n", location); + return 1; } - event->pe = pe; + event = kmalloc(sizeof(*event), GFP_ATOMIC); + if (event == NULL) { + printk(KERN_ERR "EEH: out of memory, event not handled\n"); + return 1; + } + + if (edev->pdev) + pci_dev_get(edev->pdev); + + event->edev = edev; /* We may or may not be called in an interrupt context */ spin_lock_irqsave(&eeh_eventlist_lock, flags); diff --git a/trunk/arch/powerpc/platforms/pseries/eeh_pe.c b/trunk/arch/powerpc/platforms/pseries/eeh_pe.c deleted file mode 100644 index 904123c7657b..000000000000 --- a/trunk/arch/powerpc/platforms/pseries/eeh_pe.c +++ /dev/null @@ -1,591 +0,0 @@ -/* - * The file intends to implement PE based on the information from - * platforms. Basically, there have 3 types of PEs: PHB/Bus/Device. - * All the PEs should be organized as hierarchy tree. The first level - * of the tree will be associated to existing PHBs since the particular - * PE is only meaningful in one PHB domain. - * - * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include -#include -#include -#include -#include - -#include -#include - -static LIST_HEAD(eeh_phb_pe); - -/** - * eeh_pe_alloc - Allocate PE - * @phb: PCI controller - * @type: PE type - * - * Allocate PE instance dynamically. - */ -static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type) -{ - struct eeh_pe *pe; - - /* Allocate PHB PE */ - pe = kzalloc(sizeof(struct eeh_pe), GFP_KERNEL); - if (!pe) return NULL; - - /* Initialize PHB PE */ - pe->type = type; - pe->phb = phb; - INIT_LIST_HEAD(&pe->child_list); - INIT_LIST_HEAD(&pe->child); - INIT_LIST_HEAD(&pe->edevs); - - return pe; -} - -/** - * eeh_phb_pe_create - Create PHB PE - * @phb: PCI controller - * - * The function should be called while the PHB is detected during - * system boot or PCI hotplug in order to create PHB PE. - */ -int __devinit eeh_phb_pe_create(struct pci_controller *phb) -{ - struct eeh_pe *pe; - - /* Allocate PHB PE */ - pe = eeh_pe_alloc(phb, EEH_PE_PHB); - if (!pe) { - pr_err("%s: out of memory!\n", __func__); - return -ENOMEM; - } - - /* Put it into the list */ - eeh_lock(); - list_add_tail(&pe->child, &eeh_phb_pe); - eeh_unlock(); - - pr_debug("EEH: Add PE for PHB#%d\n", phb->global_number); - - return 0; -} - -/** - * eeh_phb_pe_get - Retrieve PHB PE based on the given PHB - * @phb: PCI controller - * - * The overall PEs form hierarchy tree. The first layer of the - * hierarchy tree is composed of PHB PEs. The function is used - * to retrieve the corresponding PHB PE according to the given PHB. - */ -static struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb) -{ - struct eeh_pe *pe; - - eeh_lock(); - - list_for_each_entry(pe, &eeh_phb_pe, child) { - /* - * Actually, we needn't check the type since - * the PE for PHB has been determined when that - * was created. - */ - if (pe->type == EEH_PE_PHB && - pe->phb == phb) { - eeh_unlock(); - return pe; - } - } - - eeh_unlock(); - - return NULL; -} - -/** - * eeh_pe_next - Retrieve the next PE in the tree - * @pe: current PE - * @root: root PE - * - * The function is used to retrieve the next PE in the - * hierarchy PE tree. - */ -static struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, - struct eeh_pe *root) -{ - struct list_head *next = pe->child_list.next; - - if (next == &pe->child_list) { - while (1) { - if (pe == root) - return NULL; - next = pe->child.next; - if (next != &pe->parent->child_list) - break; - pe = pe->parent; - } - } - - return list_entry(next, struct eeh_pe, child); -} - -/** - * eeh_pe_traverse - Traverse PEs in the specified PHB - * @root: root PE - * @fn: callback - * @flag: extra parameter to callback - * - * The function is used to traverse the specified PE and its - * child PEs. The traversing is to be terminated once the - * callback returns something other than NULL, or no more PEs - * to be traversed. - */ -static void *eeh_pe_traverse(struct eeh_pe *root, - eeh_traverse_func fn, void *flag) -{ - struct eeh_pe *pe; - void *ret; - - for (pe = root; pe; pe = eeh_pe_next(pe, root)) { - ret = fn(pe, flag); - if (ret) return ret; - } - - return NULL; -} - -/** - * eeh_pe_dev_traverse - Traverse the devices from the PE - * @root: EEH PE - * @fn: function callback - * @flag: extra parameter to callback - * - * The function is used to traverse the devices of the specified - * PE and its child PEs. - */ -void *eeh_pe_dev_traverse(struct eeh_pe *root, - eeh_traverse_func fn, void *flag) -{ - struct eeh_pe *pe; - struct eeh_dev *edev; - void *ret; - - if (!root) { - pr_warning("%s: Invalid PE %p\n", __func__, root); - return NULL; - } - - /* Traverse root PE */ - for (pe = root; pe; pe = eeh_pe_next(pe, root)) { - eeh_pe_for_each_dev(pe, edev) { - ret = fn(edev, flag); - if (ret) return ret; - } - } - - return NULL; -} - -/** - * __eeh_pe_get - Check the PE address - * @data: EEH PE - * @flag: EEH device - * - * For one particular PE, it can be identified by PE address - * or tranditional BDF address. BDF address is composed of - * Bus/Device/Function number. The extra data referred by flag - * indicates which type of address should be used. - */ -static void *__eeh_pe_get(void *data, void *flag) -{ - struct eeh_pe *pe = (struct eeh_pe *)data; - struct eeh_dev *edev = (struct eeh_dev *)flag; - - /* Unexpected PHB PE */ - if (pe->type == EEH_PE_PHB) - return NULL; - - /* We prefer PE address */ - if (edev->pe_config_addr && - (edev->pe_config_addr == pe->addr)) - return pe; - - /* Try BDF address */ - if (edev->pe_config_addr && - (edev->config_addr == pe->config_addr)) - return pe; - - return NULL; -} - -/** - * eeh_pe_get - Search PE based on the given address - * @edev: EEH device - * - * Search the corresponding PE based on the specified address which - * is included in the eeh device. The function is used to check if - * the associated PE has been created against the PE address. It's - * notable that the PE address has 2 format: traditional PE address - * which is composed of PCI bus/device/function number, or unified - * PE address. - */ -static struct eeh_pe *eeh_pe_get(struct eeh_dev *edev) -{ - struct eeh_pe *root = eeh_phb_pe_get(edev->phb); - struct eeh_pe *pe; - - eeh_lock(); - pe = eeh_pe_traverse(root, __eeh_pe_get, edev); - eeh_unlock(); - - return pe; -} - -/** - * eeh_pe_get_parent - Retrieve the parent PE - * @edev: EEH device - * - * The whole PEs existing in the system are organized as hierarchy - * tree. The function is used to retrieve the parent PE according - * to the parent EEH device. - */ -static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev) -{ - struct device_node *dn; - struct eeh_dev *parent; - - /* - * It might have the case for the indirect parent - * EEH device already having associated PE, but - * the direct parent EEH device doesn't have yet. - */ - dn = edev->dn->parent; - while (dn) { - /* We're poking out of PCI territory */ - if (!PCI_DN(dn)) return NULL; - - parent = of_node_to_eeh_dev(dn); - /* We're poking out of PCI territory */ - if (!parent) return NULL; - - if (parent->pe) - return parent->pe; - - dn = dn->parent; - } - - return NULL; -} - -/** - * eeh_add_to_parent_pe - Add EEH device to parent PE - * @edev: EEH device - * - * Add EEH device to the parent PE. If the parent PE already - * exists, the PE type will be changed to EEH_PE_BUS. Otherwise, - * we have to create new PE to hold the EEH device and the new - * PE will be linked to its parent PE as well. - */ -int eeh_add_to_parent_pe(struct eeh_dev *edev) -{ - struct eeh_pe *pe, *parent; - - /* - * Search the PE has been existing or not according - * to the PE address. If that has been existing, the - * PE should be composed of PCI bus and its subordinate - * components. - */ - pe = eeh_pe_get(edev); - if (pe) { - if (!edev->pe_config_addr) { - pr_err("%s: PE with addr 0x%x already exists\n", - __func__, edev->config_addr); - return -EEXIST; - } - - /* Mark the PE as type of PCI bus */ - pe->type = EEH_PE_BUS; - edev->pe = pe; - - /* Put the edev to PE */ - list_add_tail(&edev->list, &pe->edevs); - pr_debug("EEH: Add %s to Bus PE#%x\n", - edev->dn->full_name, pe->addr); - - return 0; - } - - /* Create a new EEH PE */ - pe = eeh_pe_alloc(edev->phb, EEH_PE_DEVICE); - if (!pe) { - pr_err("%s: out of memory!\n", __func__); - return -ENOMEM; - } - pe->addr = edev->pe_config_addr; - pe->config_addr = edev->config_addr; - - /* - * Put the new EEH PE into hierarchy tree. If the parent - * can't be found, the newly created PE will be attached - * to PHB directly. Otherwise, we have to associate the - * PE with its parent. - */ - parent = eeh_pe_get_parent(edev); - if (!parent) { - parent = eeh_phb_pe_get(edev->phb); - if (!parent) { - pr_err("%s: No PHB PE is found (PHB Domain=%d)\n", - __func__, edev->phb->global_number); - edev->pe = NULL; - kfree(pe); - return -EEXIST; - } - } - pe->parent = parent; - - /* - * Put the newly created PE into the child list and - * link the EEH device accordingly. - */ - list_add_tail(&pe->child, &parent->child_list); - list_add_tail(&edev->list, &pe->edevs); - edev->pe = pe; - pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n", - edev->dn->full_name, pe->addr, pe->parent->addr); - - return 0; -} - -/** - * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE - * @edev: EEH device - * - * The PE hierarchy tree might be changed when doing PCI hotplug. - * Also, the PCI devices or buses could be removed from the system - * during EEH recovery. So we have to call the function remove the - * corresponding PE accordingly if necessary. - */ -int eeh_rmv_from_parent_pe(struct eeh_dev *edev) -{ - struct eeh_pe *pe, *parent; - - if (!edev->pe) { - pr_warning("%s: No PE found for EEH device %s\n", - __func__, edev->dn->full_name); - return -EEXIST; - } - - /* Remove the EEH device */ - pe = edev->pe; - edev->pe = NULL; - list_del(&edev->list); - - /* - * Check if the parent PE includes any EEH devices. - * If not, we should delete that. Also, we should - * delete the parent PE if it doesn't have associated - * child PEs and EEH devices. - */ - while (1) { - parent = pe->parent; - if (pe->type == EEH_PE_PHB) - break; - - if (list_empty(&pe->edevs) && - list_empty(&pe->child_list)) { - list_del(&pe->child); - kfree(pe); - } - - pe = parent; - } - - return 0; -} - -/** - * __eeh_pe_state_mark - Mark the state for the PE - * @data: EEH PE - * @flag: state - * - * The function is used to mark the indicated state for the given - * PE. Also, the associated PCI devices will be put into IO frozen - * state as well. - */ -static void *__eeh_pe_state_mark(void *data, void *flag) -{ - struct eeh_pe *pe = (struct eeh_pe *)data; - int state = *((int *)flag); - struct eeh_dev *tmp; - struct pci_dev *pdev; - - /* - * Mark the PE with the indicated state. Also, - * the associated PCI device will be put into - * I/O frozen state to avoid I/O accesses from - * the PCI device driver. - */ - pe->state |= state; - eeh_pe_for_each_dev(pe, tmp) { - pdev = eeh_dev_to_pci_dev(tmp); - if (pdev) - pdev->error_state = pci_channel_io_frozen; - } - - return NULL; -} - -/** - * eeh_pe_state_mark - Mark specified state for PE and its associated device - * @pe: EEH PE - * - * EEH error affects the current PE and its child PEs. The function - * is used to mark appropriate state for the affected PEs and the - * associated devices. - */ -void eeh_pe_state_mark(struct eeh_pe *pe, int state) -{ - eeh_pe_traverse(pe, __eeh_pe_state_mark, &state); -} - -/** - * __eeh_pe_state_clear - Clear state for the PE - * @data: EEH PE - * @flag: state - * - * The function is used to clear the indicated state from the - * given PE. Besides, we also clear the check count of the PE - * as well. - */ -static void *__eeh_pe_state_clear(void *data, void *flag) -{ - struct eeh_pe *pe = (struct eeh_pe *)data; - int state = *((int *)flag); - - pe->state &= ~state; - pe->check_count = 0; - - return NULL; -} - -/** - * eeh_pe_state_clear - Clear state for the PE and its children - * @pe: PE - * @state: state to be cleared - * - * When the PE and its children has been recovered from error, - * we need clear the error state for that. The function is used - * for the purpose. - */ -void eeh_pe_state_clear(struct eeh_pe *pe, int state) -{ - eeh_pe_traverse(pe, __eeh_pe_state_clear, &state); -} - -/** - * eeh_restore_one_device_bars - Restore the Base Address Registers for one device - * @data: EEH device - * @flag: Unused - * - * Loads the PCI configuration space base address registers, - * the expansion ROM base address, the latency timer, and etc. - * from the saved values in the device node. - */ -static void *eeh_restore_one_device_bars(void *data, void *flag) -{ - int i; - u32 cmd; - struct eeh_dev *edev = (struct eeh_dev *)data; - struct device_node *dn = eeh_dev_to_of_node(edev); - - for (i = 4; i < 10; i++) - eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]); - /* 12 == Expansion ROM Address */ - eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]); - -#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) -#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)]) - - eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1, - SAVED_BYTE(PCI_CACHE_LINE_SIZE)); - eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1, - SAVED_BYTE(PCI_LATENCY_TIMER)); - - /* max latency, min grant, interrupt pin and line */ - eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]); - - /* - * Restore PERR & SERR bits, some devices require it, - * don't touch the other command bits - */ - eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd); - if (edev->config_space[1] & PCI_COMMAND_PARITY) - cmd |= PCI_COMMAND_PARITY; - else - cmd &= ~PCI_COMMAND_PARITY; - if (edev->config_space[1] & PCI_COMMAND_SERR) - cmd |= PCI_COMMAND_SERR; - else - cmd &= ~PCI_COMMAND_SERR; - eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd); - - return NULL; -} - -/** - * eeh_pe_restore_bars - Restore the PCI config space info - * @pe: EEH PE - * - * This routine performs a recursive walk to the children - * of this device as well. - */ -void eeh_pe_restore_bars(struct eeh_pe *pe) -{ - eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL); -} - -/** - * eeh_pe_bus_get - Retrieve PCI bus according to the given PE - * @pe: EEH PE - * - * Retrieve the PCI bus according to the given PE. Basically, - * there're 3 types of PEs: PHB/Bus/Device. For PHB PE, the - * primary PCI bus will be retrieved. The parent bus will be - * returned for BUS PE. However, we don't have associated PCI - * bus for DEVICE PE. - */ -struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe) -{ - struct pci_bus *bus = NULL; - struct eeh_dev *edev; - struct pci_dev *pdev; - - if (pe->type == EEH_PE_PHB) { - bus = pe->phb->bus; - } else if (pe->type == EEH_PE_BUS) { - edev = list_first_entry(&pe->edevs, struct eeh_dev, list); - pdev = eeh_dev_to_pci_dev(edev); - if (pdev) - bus = pdev->bus; - } - - return bus; -} diff --git a/trunk/arch/powerpc/platforms/pseries/eeh_pseries.c b/trunk/arch/powerpc/platforms/pseries/eeh_pseries.c index 19506f935737..c33360ec4f4f 100644 --- a/trunk/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/trunk/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -129,117 +129,27 @@ static int pseries_eeh_init(void) eeh_error_buf_size = RTAS_ERROR_LOG_MAX; } - /* Set EEH probe mode */ - eeh_probe_mode_set(EEH_PROBE_MODE_DEVTREE); - return 0; } -/** - * pseries_eeh_of_probe - EEH probe on the given device - * @dn: OF node - * @flag: Unused - * - * When EEH module is installed during system boot, all PCI devices - * are checked one by one to see if it supports EEH. The function - * is introduced for the purpose. - */ -static void *pseries_eeh_of_probe(struct device_node *dn, void *flag) -{ - struct eeh_dev *edev; - struct eeh_pe pe; - const u32 *class_code, *vendor_id, *device_id; - const u32 *regs; - int enable = 0; - int ret; - - /* Retrieve OF node and eeh device */ - edev = of_node_to_eeh_dev(dn); - if (!of_device_is_available(dn)) - return NULL; - - /* Retrieve class/vendor/device IDs */ - class_code = of_get_property(dn, "class-code", NULL); - vendor_id = of_get_property(dn, "vendor-id", NULL); - device_id = of_get_property(dn, "device-id", NULL); - - /* Skip for bad OF node or PCI-ISA bridge */ - if (!class_code || !vendor_id || !device_id) - return NULL; - if (dn->type && !strcmp(dn->type, "isa")) - return NULL; - - /* Update class code and mode of eeh device */ - edev->class_code = *class_code; - edev->mode = 0; - - /* Retrieve the device address */ - regs = of_get_property(dn, "reg", NULL); - if (!regs) { - pr_warning("%s: OF node property %s::reg not found\n", - __func__, dn->full_name); - return NULL; - } - - /* Initialize the fake PE */ - memset(&pe, 0, sizeof(struct eeh_pe)); - pe.phb = edev->phb; - pe.config_addr = regs[0]; - - /* Enable EEH on the device */ - ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE); - if (!ret) { - edev->config_addr = regs[0]; - /* Retrieve PE address */ - edev->pe_config_addr = eeh_ops->get_pe_addr(&pe); - pe.addr = edev->pe_config_addr; - - /* Some older systems (Power4) allow the ibm,set-eeh-option - * call to succeed even on nodes where EEH is not supported. - * Verify support explicitly. - */ - ret = eeh_ops->get_state(&pe, NULL); - if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT) - enable = 1; - - if (enable) { - eeh_subsystem_enabled = 1; - eeh_add_to_parent_pe(edev); - - pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n", - __func__, dn->full_name, pe.phb->global_number, - pe.addr, pe.config_addr); - } else if (dn->parent && of_node_to_eeh_dev(dn->parent) && - (of_node_to_eeh_dev(dn->parent))->pe) { - /* This device doesn't support EEH, but it may have an - * EEH parent, in which case we mark it as supported. - */ - edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr; - edev->pe_config_addr = of_node_to_eeh_dev(dn->parent)->pe_config_addr; - eeh_add_to_parent_pe(edev); - } - } - - /* Save memory bars */ - eeh_save_bars(edev); - - return NULL; -} - /** * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable - * @pe: EEH PE + * @dn: device node * @option: operation to be issued * * The function is used to control the EEH functionality globally. * Currently, following options are support according to PAPR: * Enable EEH, Disable EEH, Enable MMIO and Enable DMA */ -static int pseries_eeh_set_option(struct eeh_pe *pe, int option) +static int pseries_eeh_set_option(struct device_node *dn, int option) { int ret = 0; + struct eeh_dev *edev; + const u32 *reg; int config_addr; + edev = of_node_to_eeh_dev(dn); + /* * When we're enabling or disabling EEH functioality on * the particular PE, the PE config address is possibly @@ -249,11 +159,15 @@ static int pseries_eeh_set_option(struct eeh_pe *pe, int option) switch (option) { case EEH_OPT_DISABLE: case EEH_OPT_ENABLE: + reg = of_get_property(dn, "reg", NULL); + config_addr = reg[0]; + break; + case EEH_OPT_THAW_MMIO: case EEH_OPT_THAW_DMA: - config_addr = pe->config_addr; - if (pe->addr) - config_addr = pe->addr; + config_addr = edev->config_addr; + if (edev->pe_config_addr) + config_addr = edev->pe_config_addr; break; default: @@ -263,15 +177,15 @@ static int pseries_eeh_set_option(struct eeh_pe *pe, int option) } ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL, - config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid), option); + config_addr, BUID_HI(edev->phb->buid), + BUID_LO(edev->phb->buid), option); return ret; } /** * pseries_eeh_get_pe_addr - Retrieve PE address - * @pe: EEH PE + * @dn: device node * * Retrieve the assocated PE address. Actually, there're 2 RTAS * function calls dedicated for the purpose. We need implement @@ -282,11 +196,14 @@ static int pseries_eeh_set_option(struct eeh_pe *pe, int option) * It's notable that zero'ed return value means invalid PE config * address. */ -static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) +static int pseries_eeh_get_pe_addr(struct device_node *dn) { + struct eeh_dev *edev; int ret = 0; int rets[3]; + edev = of_node_to_eeh_dev(dn); + if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) { /* * First of all, we need to make sure there has one PE @@ -294,18 +211,18 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) * meaningless. */ ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, - pe->config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid), 1); + edev->config_addr, BUID_HI(edev->phb->buid), + BUID_LO(edev->phb->buid), 1); if (ret || (rets[0] == 0)) return 0; /* Retrieve the associated PE config address */ ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, - pe->config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid), 0); + edev->config_addr, BUID_HI(edev->phb->buid), + BUID_LO(edev->phb->buid), 0); if (ret) { - pr_warning("%s: Failed to get address for PHB#%d-PE#%x\n", - __func__, pe->phb->global_number, pe->config_addr); + pr_warning("%s: Failed to get PE address for %s\n", + __func__, dn->full_name); return 0; } @@ -314,11 +231,11 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) { ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets, - pe->config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid), 0); + edev->config_addr, BUID_HI(edev->phb->buid), + BUID_LO(edev->phb->buid), 0); if (ret) { - pr_warning("%s: Failed to get address for PHB#%d-PE#%x\n", - __func__, pe->phb->global_number, pe->config_addr); + pr_warning("%s: Failed to get PE address for %s\n", + __func__, dn->full_name); return 0; } @@ -330,7 +247,7 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) /** * pseries_eeh_get_state - Retrieve PE state - * @pe: EEH PE + * @dn: PE associated device node * @state: return value * * Retrieve the state of the specified PE. On RTAS compliant @@ -341,28 +258,30 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) * RTAS calls for the purpose, we need to try the new one and back * to the old one if the new one couldn't work properly. */ -static int pseries_eeh_get_state(struct eeh_pe *pe, int *state) +static int pseries_eeh_get_state(struct device_node *dn, int *state) { + struct eeh_dev *edev; int config_addr; int ret; int rets[4]; int result; /* Figure out PE config address if possible */ - config_addr = pe->config_addr; - if (pe->addr) - config_addr = pe->addr; + edev = of_node_to_eeh_dev(dn); + config_addr = edev->config_addr; + if (edev->pe_config_addr) + config_addr = edev->pe_config_addr; if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) { ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets, - config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid)); + config_addr, BUID_HI(edev->phb->buid), + BUID_LO(edev->phb->buid)); } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) { /* Fake PE unavailable info */ rets[2] = 0; ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets, - config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid)); + config_addr, BUID_HI(edev->phb->buid), + BUID_LO(edev->phb->buid)); } else { return EEH_STATE_NOT_SUPPORT; } @@ -414,32 +333,34 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *state) /** * pseries_eeh_reset - Reset the specified PE - * @pe: EEH PE + * @dn: PE associated device node * @option: reset option * * Reset the specified PE */ -static int pseries_eeh_reset(struct eeh_pe *pe, int option) +static int pseries_eeh_reset(struct device_node *dn, int option) { + struct eeh_dev *edev; int config_addr; int ret; /* Figure out PE address */ - config_addr = pe->config_addr; - if (pe->addr) - config_addr = pe->addr; + edev = of_node_to_eeh_dev(dn); + config_addr = edev->config_addr; + if (edev->pe_config_addr) + config_addr = edev->pe_config_addr; /* Reset PE through RTAS call */ ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, - config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid), option); + config_addr, BUID_HI(edev->phb->buid), + BUID_LO(edev->phb->buid), option); /* If fundamental-reset not supported, try hot-reset */ if (option == EEH_RESET_FUNDAMENTAL && ret == -8) { ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, - config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid), EEH_RESET_HOT); + config_addr, BUID_HI(edev->phb->buid), + BUID_LO(edev->phb->buid), EEH_RESET_HOT); } return ret; @@ -447,13 +368,13 @@ static int pseries_eeh_reset(struct eeh_pe *pe, int option) /** * pseries_eeh_wait_state - Wait for PE state - * @pe: EEH PE + * @dn: PE associated device node * @max_wait: maximal period in microsecond * * Wait for the state of associated PE. It might take some time * to retrieve the PE's state. */ -static int pseries_eeh_wait_state(struct eeh_pe *pe, int max_wait) +static int pseries_eeh_wait_state(struct device_node *dn, int max_wait) { int ret; int mwait; @@ -470,7 +391,7 @@ static int pseries_eeh_wait_state(struct eeh_pe *pe, int max_wait) #define EEH_STATE_MAX_WAIT_TIME (300 * 1000) while (1) { - ret = pseries_eeh_get_state(pe, &mwait); + ret = pseries_eeh_get_state(dn, &mwait); /* * If the PE's state is temporarily unavailable, @@ -505,7 +426,7 @@ static int pseries_eeh_wait_state(struct eeh_pe *pe, int max_wait) /** * pseries_eeh_get_log - Retrieve error log - * @pe: EEH PE + * @dn: device node * @severity: temporary or permanent error log * @drv_log: driver log to be combined with retrieved error log * @len: length of driver log @@ -514,22 +435,24 @@ static int pseries_eeh_wait_state(struct eeh_pe *pe, int max_wait) * Actually, the error will be retrieved through the dedicated * RTAS call. */ -static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len) +static int pseries_eeh_get_log(struct device_node *dn, int severity, char *drv_log, unsigned long len) { + struct eeh_dev *edev; int config_addr; unsigned long flags; int ret; + edev = of_node_to_eeh_dev(dn); spin_lock_irqsave(&slot_errbuf_lock, flags); memset(slot_errbuf, 0, eeh_error_buf_size); /* Figure out the PE address */ - config_addr = pe->config_addr; - if (pe->addr) - config_addr = pe->addr; + config_addr = edev->config_addr; + if (edev->pe_config_addr) + config_addr = edev->pe_config_addr; ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr, - BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid), + BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid), virt_to_phys(drv_log), len, virt_to_phys(slot_errbuf), eeh_error_buf_size, severity); @@ -542,38 +465,40 @@ static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, u /** * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE - * @pe: EEH PE + * @dn: PE associated device node * * The function will be called to reconfigure the bridges included * in the specified PE so that the mulfunctional PE would be recovered * again. */ -static int pseries_eeh_configure_bridge(struct eeh_pe *pe) +static int pseries_eeh_configure_bridge(struct device_node *dn) { + struct eeh_dev *edev; int config_addr; int ret; /* Figure out the PE address */ - config_addr = pe->config_addr; - if (pe->addr) - config_addr = pe->addr; + edev = of_node_to_eeh_dev(dn); + config_addr = edev->config_addr; + if (edev->pe_config_addr) + config_addr = edev->pe_config_addr; /* Use new configure-pe function, if supported */ if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) { ret = rtas_call(ibm_configure_pe, 3, 1, NULL, - config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid)); + config_addr, BUID_HI(edev->phb->buid), + BUID_LO(edev->phb->buid)); } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) { ret = rtas_call(ibm_configure_bridge, 3, 1, NULL, - config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid)); + config_addr, BUID_HI(edev->phb->buid), + BUID_LO(edev->phb->buid)); } else { return -EFAULT; } if (ret) - pr_warning("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", - __func__, pe->phb->global_number, pe->addr, ret); + pr_warning("%s: Unable to configure bridge %d for %s\n", + __func__, ret, dn->full_name); return ret; } @@ -617,8 +542,6 @@ static int pseries_eeh_write_config(struct device_node *dn, int where, int size, static struct eeh_ops pseries_eeh_ops = { .name = "pseries", .init = pseries_eeh_init, - .of_probe = pseries_eeh_of_probe, - .dev_probe = NULL, .set_option = pseries_eeh_set_option, .get_pe_addr = pseries_eeh_get_pe_addr, .get_state = pseries_eeh_get_state, @@ -636,21 +559,7 @@ static struct eeh_ops pseries_eeh_ops = { * EEH initialization on pseries platform. This function should be * called before any EEH related functions. */ -static int __init eeh_pseries_init(void) +int __init eeh_pseries_init(void) { - int ret = -EINVAL; - - if (!machine_is(pseries)) - return ret; - - ret = eeh_ops_register(&pseries_eeh_ops); - if (!ret) - pr_info("EEH: pSeries platform initialized\n"); - else - pr_info("EEH: pSeries platform initialization failure (%d)\n", - ret); - - return ret; + return eeh_ops_register(&pseries_eeh_ops); } - -early_initcall(eeh_pseries_init); diff --git a/trunk/arch/powerpc/platforms/pseries/eeh_sysfs.c b/trunk/arch/powerpc/platforms/pseries/eeh_sysfs.c index d37708360f2e..243b3510d70f 100644 --- a/trunk/arch/powerpc/platforms/pseries/eeh_sysfs.c +++ b/trunk/arch/powerpc/platforms/pseries/eeh_sysfs.c @@ -53,6 +53,9 @@ static DEVICE_ATTR(_name, S_IRUGO, eeh_show_##_name, NULL); EEH_SHOW_ATTR(eeh_mode, mode, "0x%x"); EEH_SHOW_ATTR(eeh_config_addr, config_addr, "0x%x"); EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x"); +EEH_SHOW_ATTR(eeh_check_count, check_count, "%d" ); +EEH_SHOW_ATTR(eeh_freeze_count, freeze_count, "%d" ); +EEH_SHOW_ATTR(eeh_false_positives, false_positives, "%d" ); void eeh_sysfs_add_device(struct pci_dev *pdev) { @@ -61,6 +64,9 @@ void eeh_sysfs_add_device(struct pci_dev *pdev) rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode); rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr); rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); + rc += device_create_file(&pdev->dev, &dev_attr_eeh_check_count); + rc += device_create_file(&pdev->dev, &dev_attr_eeh_false_positives); + rc += device_create_file(&pdev->dev, &dev_attr_eeh_freeze_count); if (rc) printk(KERN_WARNING "EEH: Unable to create sysfs entries\n"); @@ -71,5 +77,8 @@ void eeh_sysfs_remove_device(struct pci_dev *pdev) device_remove_file(&pdev->dev, &dev_attr_eeh_mode); device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr); device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); + device_remove_file(&pdev->dev, &dev_attr_eeh_check_count); + device_remove_file(&pdev->dev, &dev_attr_eeh_false_positives); + device_remove_file(&pdev->dev, &dev_attr_eeh_freeze_count); } diff --git a/trunk/arch/powerpc/platforms/pseries/iommu.c b/trunk/arch/powerpc/platforms/pseries/iommu.c index 6153eea27ce7..bca220f2873c 100644 --- a/trunk/arch/powerpc/platforms/pseries/iommu.c +++ b/trunk/arch/powerpc/platforms/pseries/iommu.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include /* for show_stack */ #include @@ -42,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -99,7 +99,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index, while (npages--) { /* can't move this out since we might cross MEMBLOCK boundary */ - rpn = __pa(uaddr) >> TCE_SHIFT; + rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; uaddr += TCE_PAGE_SIZE; @@ -148,7 +148,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, int ret = 0; long tcenum_start = tcenum, npages_start = npages; - rpn = __pa(uaddr) >> TCE_SHIFT; + rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; proto_tce = TCE_PCI_READ; if (direction != DMA_TO_DEVICE) proto_tce |= TCE_PCI_WRITE; @@ -217,7 +217,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, __get_cpu_var(tce_page) = tcep; } - rpn = __pa(uaddr) >> TCE_SHIFT; + rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; proto_tce = TCE_PCI_READ; if (direction != DMA_TO_DEVICE) proto_tce |= TCE_PCI_WRITE; @@ -237,7 +237,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, rc = plpar_tce_put_indirect((u64)tbl->it_index, (u64)tcenum << 12, - (u64)__pa(tcep), + (u64)virt_to_abs(tcep), limit); npages -= limit; @@ -441,7 +441,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, rc = plpar_tce_put_indirect(liobn, dma_offset, - (u64)__pa(tcep), + (u64)virt_to_abs(tcep), limit); num_tce -= limit; diff --git a/trunk/arch/powerpc/platforms/pseries/lpar.c b/trunk/arch/powerpc/platforms/pseries/lpar.c index 177055d0186b..5f3ef876ded2 100644 --- a/trunk/arch/powerpc/platforms/pseries/lpar.c +++ b/trunk/arch/powerpc/platforms/pseries/lpar.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include diff --git a/trunk/arch/powerpc/platforms/pseries/msi.c b/trunk/arch/powerpc/platforms/pseries/msi.c index d19f4977c834..109fdb75578d 100644 --- a/trunk/arch/powerpc/platforms/pseries/msi.c +++ b/trunk/arch/powerpc/platforms/pseries/msi.c @@ -210,7 +210,6 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) { struct device_node *dn; - struct eeh_dev *edev; /* Found our PE and assume 8 at that point. */ @@ -218,10 +217,7 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) if (!dn) return NULL; - /* Get the top level device in the PE */ - edev = of_node_to_eeh_dev(dn); - edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list); - dn = eeh_dev_to_of_node(edev); + dn = eeh_find_device_pe(dn); if (!dn) return NULL; @@ -391,13 +387,12 @@ static int check_msix_entries(struct pci_dev *pdev) return 0; } -static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type) +static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) { struct pci_dn *pdn; int hwirq, virq, i, rc; struct msi_desc *entry; struct msi_msg msg; - int nvec = nvec_in; pdn = get_pdn(pdev); if (!pdn) @@ -406,24 +401,11 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type) if (type == PCI_CAP_ID_MSIX && check_msix_entries(pdev)) return -EINVAL; - /* - * Firmware currently refuse any non power of two allocation - * so we round up if the quota will allow it. - */ - if (type == PCI_CAP_ID_MSIX) { - int m = roundup_pow_of_two(nvec); - int quota = msi_quota_for_device(pdev, m); - - if (quota >= m) - nvec = m; - } - /* * Try the new more explicit firmware interface, if that fails fall * back to the old interface. The old interface is known to never * return MSI-Xs. */ -again: if (type == PCI_CAP_ID_MSI) { rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec); @@ -435,10 +417,6 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type) rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec); if (rc != nvec) { - if (nvec != nvec_in) { - nvec = nvec_in; - goto again; - } pr_debug("rtas_msi: rtas_change_msi() failed\n"); return rc; } diff --git a/trunk/arch/powerpc/platforms/pseries/pci.c b/trunk/arch/powerpc/platforms/pseries/pci.c index 56b864d777ee..2c6ded29f73d 100644 --- a/trunk/arch/powerpc/platforms/pseries/pci.c +++ b/trunk/arch/powerpc/platforms/pseries/pci.c @@ -73,7 +73,7 @@ void __init pSeries_final_fixup(void) { pSeries_request_regions(); - eeh_addr_cache_build(); + pci_addr_cache_build(); } /* diff --git a/trunk/arch/powerpc/platforms/pseries/setup.c b/trunk/arch/powerpc/platforms/pseries/setup.c index e3cb7ae61658..51ecac920dd8 100644 --- a/trunk/arch/powerpc/platforms/pseries/setup.c +++ b/trunk/arch/powerpc/platforms/pseries/setup.c @@ -388,8 +388,10 @@ static void __init pSeries_setup_arch(void) /* Find and initialize PCI host bridges */ init_pci_config_tokens(); + eeh_pseries_init(); find_and_init_phbs(); pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb); + eeh_init(); pSeries_nvram_init(); @@ -414,20 +416,16 @@ static int __init pSeries_init_panel(void) } machine_arch_initcall(pseries, pSeries_init_panel); -static int pseries_set_dabr(unsigned long dabr, unsigned long dabrx) +static int pseries_set_dabr(unsigned long dabr) { return plpar_hcall_norets(H_SET_DABR, dabr); } -static int pseries_set_xdabr(unsigned long dabr, unsigned long dabrx) +static int pseries_set_xdabr(unsigned long dabr) { - /* Have to set at least one bit in the DABRX according to PAPR */ - if (dabrx == 0 && dabr == 0) - dabrx = DABRX_USER; - /* PAPR says we can only set kernel and user bits */ - dabrx &= DABRX_KERNEL | DABRX_USER; - - return plpar_hcall_norets(H_SET_XDABR, dabr, dabrx); + /* We want to catch accesses from kernel and userspace */ + return plpar_hcall_norets(H_SET_XDABR, dabr, + H_DABRX_KERNEL | H_DABRX_USER); } #define CMO_CHARACTERISTICS_TOKEN 44 @@ -531,10 +529,10 @@ static void __init pSeries_init_early(void) if (firmware_has_feature(FW_FEATURE_LPAR)) hvc_vio_init_early(); #endif - if (firmware_has_feature(FW_FEATURE_XDABR)) - ppc_md.set_dabr = pseries_set_xdabr; - else if (firmware_has_feature(FW_FEATURE_DABR)) + if (firmware_has_feature(FW_FEATURE_DABR)) ppc_md.set_dabr = pseries_set_dabr; + else if (firmware_has_feature(FW_FEATURE_XDABR)) + ppc_md.set_dabr = pseries_set_xdabr; pSeries_cmo_feature_init(); iommu_init_early_pSeries(); diff --git a/trunk/arch/powerpc/sysdev/dart_iommu.c b/trunk/arch/powerpc/sysdev/dart_iommu.c index 8ef63a01e345..4f2680f431b5 100644 --- a/trunk/arch/powerpc/sysdev/dart_iommu.c +++ b/trunk/arch/powerpc/sysdev/dart_iommu.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -166,7 +167,7 @@ static int dart_build(struct iommu_table *tbl, long index, */ l = npages; while (l--) { - rpn = __pa(uaddr) >> DART_PAGE_SHIFT; + rpn = virt_to_abs(uaddr) >> DART_PAGE_SHIFT; *(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK); @@ -243,7 +244,7 @@ static int __init dart_init(struct device_node *dart_node) panic("DART: Cannot map registers!"); /* Map in DART table */ - dart_vbase = ioremap(__pa(dart_tablebase), dart_tablesize); + dart_vbase = ioremap(virt_to_abs(dart_tablebase), dart_tablesize); /* Fill initial table */ for (i = 0; i < dart_tablesize/4; i++) @@ -462,7 +463,7 @@ void __init alloc_dart_table(void) * will blow up an entire large page anyway in the kernel mapping */ dart_tablebase = (unsigned long) - __va(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L)); + abs_to_virt(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L)); printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase); } diff --git a/trunk/arch/powerpc/xmon/xmon.c b/trunk/arch/powerpc/xmon/xmon.c index 987f441525cb..9b49c65ee7a4 100644 --- a/trunk/arch/powerpc/xmon/xmon.c +++ b/trunk/arch/powerpc/xmon/xmon.c @@ -740,7 +740,7 @@ static void insert_bpts(void) static void insert_cpu_bpts(void) { if (dabr.enabled) - set_dabr(dabr.address | (dabr.enabled & 7), DABRX_ALL); + set_dabr(dabr.address | (dabr.enabled & 7)); if (iabr && cpu_has_feature(CPU_FTR_IABR)) mtspr(SPRN_IABR, iabr->address | (iabr->enabled & (BP_IABR|BP_IABR_TE))); @@ -768,7 +768,7 @@ static void remove_bpts(void) static void remove_cpu_bpts(void) { - set_dabr(0, 0); + set_dabr(0); if (cpu_has_feature(CPU_FTR_IABR)) mtspr(SPRN_IABR, 0); } diff --git a/trunk/arch/tile/kernel/pci.c b/trunk/arch/tile/kernel/pci.c index 33c10864d2f7..d2292be6fb90 100644 --- a/trunk/arch/tile/kernel/pci.c +++ b/trunk/arch/tile/kernel/pci.c @@ -246,16 +246,13 @@ static void __devinit fixup_read_and_payload_sizes(void) /* Scan for the smallest maximum payload size. */ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { - int pcie_caps_offset; u32 devcap; int max_payload; - pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP); - if (pcie_caps_offset == 0) + if (!pci_is_pcie(dev)) continue; - pci_read_config_dword(dev, pcie_caps_offset + PCI_EXP_DEVCAP, - &devcap); + pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &devcap); max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD; if (max_payload < smallest_max_payload) smallest_max_payload = max_payload; @@ -263,21 +260,10 @@ static void __devinit fixup_read_and_payload_sizes(void) /* Now, set the max_payload_size for all devices to that value. */ new_values = (max_read_size << 12) | (smallest_max_payload << 5); - while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { - int pcie_caps_offset; - u16 devctl; - - pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP); - if (pcie_caps_offset == 0) - continue; - - pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, - &devctl); - devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ); - devctl |= new_values; - pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, - devctl); - } + while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) + pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ, + new_values); } diff --git a/trunk/arch/um/os-Linux/time.c b/trunk/arch/um/os-Linux/time.c index f60238559af3..0748fe0c8a73 100644 --- a/trunk/arch/um/os-Linux/time.c +++ b/trunk/arch/um/os-Linux/time.c @@ -114,7 +114,7 @@ static void deliver_alarm(void) skew += this_tick - last_tick; while (skew >= one_tick) { - alarm_handler(SIGVTALRM, NULL); + alarm_handler(SIGVTALRM, NULL, NULL); skew -= one_tick; } diff --git a/trunk/arch/x86/xen/mmu.c b/trunk/arch/x86/xen/mmu.c index b65a76133f4f..5141d808e751 100644 --- a/trunk/arch/x86/xen/mmu.c +++ b/trunk/arch/x86/xen/mmu.c @@ -1283,7 +1283,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; - if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { + if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { args->op.cmd = MMUEXT_INVLPG_MULTI; args->op.arg1.linear_addr = start; } diff --git a/trunk/arch/x86/xen/p2m.c b/trunk/arch/x86/xen/p2m.c index d4b255463253..76ba0e97e530 100644 --- a/trunk/arch/x86/xen/p2m.c +++ b/trunk/arch/x86/xen/p2m.c @@ -599,7 +599,7 @@ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_ if (p2m_index(set_pfn)) return false; - for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { + for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { topidx = p2m_top_index(pfn); if (!p2m_top[topidx]) diff --git a/trunk/drivers/base/dma-contiguous.c b/trunk/drivers/base/dma-contiguous.c index 78efb0306a44..34d94c762a1e 100644 --- a/trunk/drivers/base/dma-contiguous.c +++ b/trunk/drivers/base/dma-contiguous.c @@ -250,7 +250,7 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size, return -EINVAL; /* Sanitise input arguments */ - alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order); + alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); base = ALIGN(base, alignment); size = ALIGN(size, alignment); limit &= ~(alignment - 1); diff --git a/trunk/drivers/crypto/nx/nx.c b/trunk/drivers/crypto/nx/nx.c index 638110efae9b..d7f179cc2e98 100644 --- a/trunk/drivers/crypto/nx/nx.c +++ b/trunk/drivers/crypto/nx/nx.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -103,10 +104,10 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, /* determine the start and end for this address range - slightly * different if this is in VMALLOC_REGION */ if (is_vmalloc_addr(start_addr)) - sg_addr = page_to_phys(vmalloc_to_page(start_addr)) + sg_addr = phys_to_abs(page_to_phys(vmalloc_to_page(start_addr))) + offset_in_page(sg_addr); else - sg_addr = __pa(sg_addr); + sg_addr = virt_to_abs(sg_addr); end_addr = sg_addr + len; @@ -264,17 +265,17 @@ void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function) nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT; nx_ctx->op.flags = function; - nx_ctx->op.csbcpb = __pa(nx_ctx->csbcpb); - nx_ctx->op.in = __pa(nx_ctx->in_sg); - nx_ctx->op.out = __pa(nx_ctx->out_sg); + nx_ctx->op.csbcpb = virt_to_abs(nx_ctx->csbcpb); + nx_ctx->op.in = virt_to_abs(nx_ctx->in_sg); + nx_ctx->op.out = virt_to_abs(nx_ctx->out_sg); if (nx_ctx->csbcpb_aead) { nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT; nx_ctx->op_aead.flags = function; - nx_ctx->op_aead.csbcpb = __pa(nx_ctx->csbcpb_aead); - nx_ctx->op_aead.in = __pa(nx_ctx->in_sg); - nx_ctx->op_aead.out = __pa(nx_ctx->out_sg); + nx_ctx->op_aead.csbcpb = virt_to_abs(nx_ctx->csbcpb_aead); + nx_ctx->op_aead.in = virt_to_abs(nx_ctx->in_sg); + nx_ctx->op_aead.out = virt_to_abs(nx_ctx->out_sg); } } diff --git a/trunk/drivers/gpio/Kconfig b/trunk/drivers/gpio/Kconfig index b16c8a72a2e2..ba7926f5c099 100644 --- a/trunk/drivers/gpio/Kconfig +++ b/trunk/drivers/gpio/Kconfig @@ -294,7 +294,7 @@ config GPIO_MAX732X_IRQ config GPIO_MC9S08DZ60 bool "MX35 3DS BOARD MC9S08DZ60 GPIO functions" - depends on I2C && MACH_MX35_3DS + depends on I2C=y && MACH_MX35_3DS help Select this to enable the MC9S08DZ60 GPIO driver diff --git a/trunk/drivers/gpio/gpio-em.c b/trunk/drivers/gpio/gpio-em.c index ae37181798b3..ec48ed512628 100644 --- a/trunk/drivers/gpio/gpio-em.c +++ b/trunk/drivers/gpio/gpio-em.c @@ -247,9 +247,9 @@ static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p) p->irq_base = irq_alloc_descs(pdata->irq_base, 0, pdata->number_of_pins, numa_node_id()); - if (IS_ERR_VALUE(p->irq_base)) { + if (p->irq_base < 0) { dev_err(&pdev->dev, "cannot get irq_desc\n"); - return -ENXIO; + return p->irq_base; } pr_debug("gio: hw base = %d, nr = %d, sw base = %d\n", pdata->gpio_base, pdata->number_of_pins, p->irq_base); diff --git a/trunk/drivers/gpio/gpio-rdc321x.c b/trunk/drivers/gpio/gpio-rdc321x.c index e97016af6443..b62d443e9a59 100644 --- a/trunk/drivers/gpio/gpio-rdc321x.c +++ b/trunk/drivers/gpio/gpio-rdc321x.c @@ -170,6 +170,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev) rdc321x_gpio_dev->reg2_data_base = r->start + 0x4; rdc321x_gpio_dev->chip.label = "rdc321x-gpio"; + rdc321x_gpio_dev->chip.owner = THIS_MODULE; rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input; rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config; rdc321x_gpio_dev->chip.get = rdc_gpio_get_value; diff --git a/trunk/drivers/gpio/gpiolib-of.c b/trunk/drivers/gpio/gpiolib-of.c index a18c4aa68b1e..f1a45997aea8 100644 --- a/trunk/drivers/gpio/gpiolib-of.c +++ b/trunk/drivers/gpio/gpiolib-of.c @@ -82,7 +82,7 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname, gpiochip_find(&gg_data, of_gpiochip_find_and_xlate); of_node_put(gg_data.gpiospec.np); - pr_debug("%s exited with status %d\n", __func__, ret); + pr_debug("%s exited with status %d\n", __func__, gg_data.out_gpio); return gg_data.out_gpio; } EXPORT_SYMBOL(of_get_named_gpio_flags); diff --git a/trunk/drivers/gpu/drm/radeon/evergreen.c b/trunk/drivers/gpu/drm/radeon/evergreen.c index e93b80a6d4e9..ed3340adeb6f 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen.c @@ -77,13 +77,9 @@ void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw, void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) { u16 ctl, v; - int cap, err; + int err; - cap = pci_pcie_cap(rdev->pdev); - if (!cap) - return; - - err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl); + err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl); if (err) return; @@ -95,7 +91,7 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) if ((v == 0) || (v == 6) || (v == 7)) { ctl &= ~PCI_EXP_DEVCTL_READRQ; ctl |= (2 << 12); - pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl); + pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl); } } diff --git a/trunk/drivers/hid/hid-core.c b/trunk/drivers/hid/hid-core.c index 8bf8a64e5115..8bcd168fffae 100644 --- a/trunk/drivers/hid/hid-core.c +++ b/trunk/drivers/hid/hid-core.c @@ -996,7 +996,8 @@ static void hid_process_event(struct hid_device *hid, struct hid_field *field, struct hid_driver *hdrv = hid->driver; int ret; - hid_dump_input(hid, usage, value); + if (!list_empty(&hid->debug_list)) + hid_dump_input(hid, usage, value); if (hdrv && hdrv->event && hid_match_usage(hid, usage)) { ret = hdrv->event(hid, field, usage, value); @@ -1558,7 +1559,9 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, - { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, +#if IS_ENABLED(CONFIG_HID_LENOVO_TPKBD) + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, +#endif { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, diff --git a/trunk/drivers/hid/hid-logitech-dj.c b/trunk/drivers/hid/hid-logitech-dj.c index 0f9c146fc00d..4d524b5f52f5 100644 --- a/trunk/drivers/hid/hid-logitech-dj.c +++ b/trunk/drivers/hid/hid-logitech-dj.c @@ -439,7 +439,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) struct dj_report *dj_report; int retval; - dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL); + dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); if (!dj_report) return -ENOMEM; dj_report->report_id = REPORT_ID_DJ_SHORT; @@ -456,7 +456,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, struct dj_report *dj_report; int retval; - dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL); + dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); if (!dj_report) return -ENOMEM; dj_report->report_id = REPORT_ID_DJ_SHORT; diff --git a/trunk/drivers/hid/usbhid/hid-quirks.c b/trunk/drivers/hid/usbhid/hid-quirks.c index 903eef3d3e10..991e85c7325c 100644 --- a/trunk/drivers/hid/usbhid/hid-quirks.c +++ b/trunk/drivers/hid/usbhid/hid-quirks.c @@ -70,6 +70,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS }, diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_cq.c b/trunk/drivers/infiniband/hw/ehca/ehca_cq.c index 8f5290147e8a..d9b0ebcb67d7 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_cq.c +++ b/trunk/drivers/infiniband/hw/ehca/ehca_cq.c @@ -220,7 +220,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, cq = ERR_PTR(-EAGAIN); goto create_cq_exit4; } - rpage = __pa(vpage); + rpage = virt_to_abs(vpage); h_ret = hipz_h_register_rpage_cq(adapter_handle, my_cq->ipz_cq_handle, diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_eq.c b/trunk/drivers/infiniband/hw/ehca/ehca_eq.c index 90da6747d395..818d721fc448 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_eq.c +++ b/trunk/drivers/infiniband/hw/ehca/ehca_eq.c @@ -101,7 +101,7 @@ int ehca_create_eq(struct ehca_shca *shca, if (!vpage) goto create_eq_exit2; - rpage = __pa(vpage); + rpage = virt_to_abs(vpage); h_ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle, eq->ipz_eq_handle, &eq->pf, diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_mrmw.c b/trunk/drivers/infiniband/hw/ehca/ehca_mrmw.c index 87844869dcc2..b781b2cb0624 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/trunk/drivers/infiniband/hw/ehca/ehca_mrmw.c @@ -1136,7 +1136,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca, } if (rnum > 1) { - rpage = __pa(kpage); + rpage = virt_to_abs(kpage); if (!rpage) { ehca_err(&shca->ib_device, "kpage=%p i=%x", kpage, i); @@ -1231,7 +1231,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca, pginfo->num_kpages, pginfo->num_hwpages, kpage); goto ehca_rereg_mr_rereg1_exit1; } - rpage = __pa(kpage); + rpage = virt_to_abs(kpage); if (!rpage) { ehca_err(&shca->ib_device, "kpage=%p", kpage); ret = -EFAULT; @@ -1525,7 +1525,7 @@ static inline void *ehca_calc_sectbase(int top, int dir, int idx) unsigned long ret = idx; ret |= dir << EHCA_DIR_INDEX_SHIFT; ret |= top << EHCA_TOP_INDEX_SHIFT; - return __va(ret << SECTION_SIZE_BITS); + return abs_to_virt(ret << SECTION_SIZE_BITS); } #define ehca_bmap_valid(entry) \ @@ -1537,7 +1537,7 @@ static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage, { u64 h_ret = 0; unsigned long page = 0; - u64 rpage = __pa(kpage); + u64 rpage = virt_to_abs(kpage); int page_count; void *sectbase = ehca_calc_sectbase(top, dir, idx); @@ -1553,7 +1553,7 @@ static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage, for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count); rnum++) { void *pg = sectbase + ((page++) * pginfo->hwpage_size); - kpage[rnum] = __pa(pg); + kpage[rnum] = virt_to_abs(pg); } h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr, @@ -1870,8 +1870,9 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo, for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { pgaddr = page_to_pfn(sg_page(&chunk->page_list[i])) << PAGE_SHIFT ; - *kpage = pgaddr + (pginfo->next_hwpage * - pginfo->hwpage_size); + *kpage = phys_to_abs(pgaddr + + (pginfo->next_hwpage * + pginfo->hwpage_size)); if ( !(*kpage) ) { ehca_gen_err("pgaddr=%llx " "chunk->page_list[i]=%llx " @@ -1926,7 +1927,7 @@ static int ehca_check_kpages_per_ate(struct scatterlist *page_list, u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; if (ehca_debug_level >= 3) ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr, - *(u64 *)__va(pgaddr)); + *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); if (pgaddr - PAGE_SIZE != *prev_pgaddr) { ehca_gen_err("uncontiguous page found pgaddr=%llx " "prev_pgaddr=%llx page_list_i=%x", @@ -1961,7 +1962,7 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo, if (nr_kpages == kpages_per_hwpage) { pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i])) << PAGE_SHIFT ); - *kpage = pgaddr; + *kpage = phys_to_abs(pgaddr); if ( !(*kpage) ) { ehca_gen_err("pgaddr=%llx i=%x", pgaddr, i); @@ -1989,11 +1990,13 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo, (pginfo->hwpage_size - 1)) >> PAGE_SHIFT; nr_kpages -= pginfo->kpage_cnt; - *kpage = pgaddr & - ~(pginfo->hwpage_size - 1); + *kpage = phys_to_abs( + pgaddr & + ~(pginfo->hwpage_size - 1)); } if (ehca_debug_level >= 3) { - u64 val = *(u64 *)__va(pgaddr); + u64 val = *(u64 *)abs_to_virt( + phys_to_abs(pgaddr)); ehca_gen_dbg("kpage=%llx chunk_page=%llx " "value=%016llx", *kpage, pgaddr, val); @@ -2081,8 +2084,9 @@ static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo, pginfo->num_hwpages, i); return -EFAULT; } - *kpage = (pbuf->addr & ~(pginfo->hwpage_size - 1)) + - (pginfo->next_hwpage * pginfo->hwpage_size); + *kpage = phys_to_abs( + (pbuf->addr & ~(pginfo->hwpage_size - 1)) + + (pginfo->next_hwpage * pginfo->hwpage_size)); if ( !(*kpage) && pbuf->addr ) { ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx " "next_hwpage=%llx", pbuf->addr, @@ -2120,8 +2124,8 @@ static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo, /* loop over desired page_list entries */ fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem; for (i = 0; i < number; i++) { - *kpage = (*fmrlist & ~(pginfo->hwpage_size - 1)) + - pginfo->next_hwpage * pginfo->hwpage_size; + *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) + + pginfo->next_hwpage * pginfo->hwpage_size); if ( !(*kpage) ) { ehca_gen_err("*fmrlist=%llx fmrlist=%p " "next_listelem=%llx next_hwpage=%llx", @@ -2148,7 +2152,8 @@ static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo, u64 prev = *kpage; /* check if adrs are contiguous */ for (j = 1; j < cnt_per_hwpage; j++) { - u64 p = fmrlist[j] & ~(pginfo->hwpage_size - 1); + u64 p = phys_to_abs(fmrlist[j] & + ~(pginfo->hwpage_size - 1)); if (prev + pginfo->u.fmr.fmr_pgsize != p) { ehca_gen_err("uncontiguous fmr pages " "found prev=%llx p=%llx " @@ -2383,8 +2388,8 @@ static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages) memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE); } - start_section = (pfn * PAGE_SIZE) / EHCA_SECTSIZE; - end_section = ((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE; + start_section = phys_to_abs(pfn * PAGE_SIZE) / EHCA_SECTSIZE; + end_section = phys_to_abs((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE; for (i = start_section; i < end_section; i++) { int ret; top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT); @@ -2503,7 +2508,7 @@ static u64 ehca_map_vaddr(void *caddr) if (!ehca_bmap) return EHCA_INVAL_ADDR; - abs_addr = __pa(caddr); + abs_addr = virt_to_abs(caddr); top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT); if (!ehca_bmap_valid(ehca_bmap->top[top])) return EHCA_INVAL_ADDR; diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_qp.c b/trunk/drivers/infiniband/hw/ehca/ehca_qp.c index 149393915ae5..964f85520798 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/trunk/drivers/infiniband/hw/ehca/ehca_qp.c @@ -321,7 +321,7 @@ static inline int init_qp_queue(struct ehca_shca *shca, ret = -EINVAL; goto init_qp_queue1; } - rpage = __pa(vpage); + rpage = virt_to_abs(vpage); h_ret = hipz_h_register_rpage_qp(ipz_hca_handle, my_qp->ipz_qp_handle, @@ -1094,7 +1094,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p", qp_num, bad_send_wqe_p); /* convert wqe pointer to vadr */ - bad_send_wqe_v = __va((u64)bad_send_wqe_p); + bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p); if (ehca_debug_level >= 2) ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); squeue = &my_qp->ipz_squeue; @@ -1138,7 +1138,7 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue, /* convert real to abs address */ wqe_p = wqe_p & (~(1UL << 63)); - wqe_v = __va(wqe_p); + wqe_v = abs_to_virt(wqe_p); if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) { ehca_gen_err("Invalid offset for calculating left cqes " diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_reqs.c b/trunk/drivers/infiniband/hw/ehca/ehca_reqs.c index 47f94984353d..fd05f48f6b0b 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/trunk/drivers/infiniband/hw/ehca/ehca_reqs.c @@ -135,7 +135,7 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr) mad_hdr->attr_mod); } for (j = 0; j < send_wr->num_sge; j++) { - u8 *data = __va(sge->addr); + u8 *data = (u8 *)abs_to_virt(sge->addr); ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x " "lkey=%x", idx, j, data, sge->length, sge->lkey); diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_tools.h b/trunk/drivers/infiniband/hw/ehca/ehca_tools.h index d280b12aae64..54c0d23bad92 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_tools.h +++ b/trunk/drivers/infiniband/hw/ehca/ehca_tools.h @@ -59,6 +59,7 @@ #include #include +#include #include #include #include diff --git a/trunk/drivers/infiniband/hw/ehca/hcp_if.c b/trunk/drivers/infiniband/hw/ehca/hcp_if.c index 2d41d04fd959..e6f9cdd94c7a 100644 --- a/trunk/drivers/infiniband/hw/ehca/hcp_if.c +++ b/trunk/drivers/infiniband/hw/ehca/hcp_if.c @@ -396,7 +396,7 @@ u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, struct hipz_query_port *query_port_response_block) { u64 ret; - u64 r_cb = __pa(query_port_response_block); + u64 r_cb = virt_to_abs(query_port_response_block); if (r_cb & (EHCA_PAGESIZE-1)) { ehca_gen_err("response block not page aligned"); @@ -438,7 +438,7 @@ u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle, u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle, struct hipz_query_hca *query_hca_rblock) { - u64 r_cb = __pa(query_hca_rblock); + u64 r_cb = virt_to_abs(query_hca_rblock); if (r_cb & (EHCA_PAGESIZE-1)) { ehca_gen_err("response_block=%p not page aligned", @@ -577,7 +577,7 @@ u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle, adapter_handle.handle, /* r4 */ qp_handle.handle, /* r5 */ update_mask, /* r6 */ - __pa(mqpcb), /* r7 */ + virt_to_abs(mqpcb), /* r7 */ 0, 0, 0, 0, 0); if (ret == H_NOT_ENOUGH_RESOURCES) @@ -595,7 +595,7 @@ u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle, return ehca_plpar_hcall_norets(H_QUERY_QP, adapter_handle.handle, /* r4 */ qp_handle.handle, /* r5 */ - __pa(qqpcb), /* r6 */ + virt_to_abs(qqpcb), /* r6 */ 0, 0, 0, 0); } @@ -787,7 +787,7 @@ u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle, if (count > 1) { u64 *kpage; int i; - kpage = __va(logical_address_of_page); + kpage = (u64 *)abs_to_virt(logical_address_of_page); for (i = 0; i < count; i++) ehca_gen_dbg("kpage[%d]=%p", i, (void *)kpage[i]); @@ -944,7 +944,7 @@ u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle, void *rblock, unsigned long *byte_count) { - u64 r_cb = __pa(rblock); + u64 r_cb = virt_to_abs(rblock); if (r_cb & (EHCA_PAGESIZE-1)) { ehca_gen_err("rblock not page aligned."); diff --git a/trunk/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/trunk/drivers/infiniband/hw/ehca/ipz_pt_fn.c index 62c71fadb4d9..1898d6e7cce5 100644 --- a/trunk/drivers/infiniband/hw/ehca/ipz_pt_fn.c +++ b/trunk/drivers/infiniband/hw/ehca/ipz_pt_fn.c @@ -81,7 +81,7 @@ int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset) { int i; for (i = 0; i < queue->queue_length / queue->pagesize; i++) { - u64 page = __pa(queue->queue_pages[i]); + u64 page = (u64)virt_to_abs(queue->queue_pages[i]); if (addr >= page && addr < page + queue->pagesize) { *q_offset = addr - page + i * queue->pagesize; return 0; diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_reset.c b/trunk/drivers/infiniband/hw/mthca/mthca_reset.c index 4fa3534ec233..74c6a9426047 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_reset.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_reset.c @@ -241,16 +241,16 @@ int mthca_reset(struct mthca_dev *mdev) if (hca_pcie_cap) { devctl = hca_header[(hca_pcie_cap + PCI_EXP_DEVCTL) / 4]; - if (pci_write_config_word(mdev->pdev, hca_pcie_cap + PCI_EXP_DEVCTL, - devctl)) { + if (pcie_capability_write_word(mdev->pdev, PCI_EXP_DEVCTL, + devctl)) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA PCI Express " "Device Control register, aborting.\n"); goto out; } linkctl = hca_header[(hca_pcie_cap + PCI_EXP_LNKCTL) / 4]; - if (pci_write_config_word(mdev->pdev, hca_pcie_cap + PCI_EXP_LNKCTL, - linkctl)) { + if (pcie_capability_write_word(mdev->pdev, PCI_EXP_LNKCTL, + linkctl)) { err = -ENODEV; mthca_err(mdev, "Couldn't restore HCA PCI Express " "Link control register, aborting.\n"); diff --git a/trunk/drivers/infiniband/hw/qib/qib_pcie.c b/trunk/drivers/infiniband/hw/qib/qib_pcie.c index 062c301ebf53..900137173210 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_pcie.c +++ b/trunk/drivers/infiniband/hw/qib/qib_pcie.c @@ -273,10 +273,9 @@ int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent, struct qib_msix_entry *entry) { u16 linkstat, speed; - int pos = 0, pose, ret = 1; + int pos = 0, ret = 1; - pose = pci_pcie_cap(dd->pcidev); - if (!pose) { + if (!pci_is_pcie(dd->pcidev)) { qib_dev_err(dd, "Can't find PCI Express capability!\n"); /* set up something... */ dd->lbus_width = 1; @@ -298,7 +297,7 @@ int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent, if (!pos) qib_enable_intx(dd->pcidev); - pci_read_config_word(dd->pcidev, pose + PCI_EXP_LNKSTA, &linkstat); + pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat); /* * speed is bits 0-3, linkwidth is bits 4-8 * no defines for them in headers @@ -516,7 +515,6 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd) { int r; struct pci_dev *parent; - int ppos; u16 devid; u32 mask, bits, val; @@ -529,8 +527,7 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd) qib_devinfo(dd->pcidev, "Parent not root\n"); return 1; } - ppos = pci_pcie_cap(parent); - if (!ppos) + if (!pci_is_pcie(parent)) return 1; if (parent->vendor != 0x8086) return 1; @@ -587,7 +584,6 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd) { int ret = 1; /* Assume the worst */ struct pci_dev *parent; - int ppos, epos; u16 pcaps, pctl, ecaps, ectl; int rc_sup, ep_sup; int rc_cur, ep_cur; @@ -598,19 +594,15 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd) qib_devinfo(dd->pcidev, "Parent not root\n"); goto bail; } - ppos = pci_pcie_cap(parent); - if (ppos) { - pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps); - pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl); - } else + + if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev)) goto bail; + pcie_capability_read_word(parent, PCI_EXP_DEVCAP, &pcaps); + pcie_capability_read_word(parent, PCI_EXP_DEVCTL, &pctl); /* Find out supported and configured values for endpoint (us) */ - epos = pci_pcie_cap(dd->pcidev); - if (epos) { - pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps); - pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl); - } else - goto bail; + pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCAP, &ecaps); + pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl); + ret = 0; /* Find max payload supported by root, endpoint */ rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD); @@ -629,14 +621,14 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd) rc_cur = rc_sup; pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) | val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD); - pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl); + pcie_capability_write_word(parent, PCI_EXP_DEVCTL, pctl); } /* If less than (allowed, supported), bump endpoint payload */ if (rc_sup > ep_cur) { ep_cur = rc_sup; ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) | val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD); - pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl); + pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl); } /* @@ -654,13 +646,13 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd) rc_cur = rc_sup; pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) | val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ); - pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl); + pcie_capability_write_word(parent, PCI_EXP_DEVCTL, pctl); } if (rc_sup > ep_cur) { ep_cur = rc_sup; ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) | val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ); - pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl); + pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl); } bail: return ret; diff --git a/trunk/drivers/input/keyboard/imx_keypad.c b/trunk/drivers/input/keyboard/imx_keypad.c index ff4c0a87a25f..ce68e361558c 100644 --- a/trunk/drivers/input/keyboard/imx_keypad.c +++ b/trunk/drivers/input/keyboard/imx_keypad.c @@ -358,6 +358,7 @@ static void imx_keypad_inhibit(struct imx_keypad *keypad) /* Inhibit KDI and KRI interrupts. */ reg_val = readw(keypad->mmio_base + KPSR); reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE); + reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD; writew(reg_val, keypad->mmio_base + KPSR); /* Colums as open drain and disable all rows */ @@ -515,7 +516,9 @@ static int __devinit imx_keypad_probe(struct platform_device *pdev) input_set_drvdata(input_dev, keypad); /* Ensure that the keypad will stay dormant until opened */ + clk_enable(keypad->clk); imx_keypad_inhibit(keypad); + clk_disable(keypad->clk); error = request_irq(irq, imx_keypad_irq_handler, 0, pdev->name, keypad); diff --git a/trunk/drivers/input/serio/i8042-x86ia64io.h b/trunk/drivers/input/serio/i8042-x86ia64io.h index 5ec774d6c82b..6918773ce024 100644 --- a/trunk/drivers/input/serio/i8042-x86ia64io.h +++ b/trunk/drivers/input/serio/i8042-x86ia64io.h @@ -176,6 +176,20 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"), }, }, + { + /* Gigabyte T1005 - defines wrong chassis type ("Other") */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "T1005"), + }, + }, + { + /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"), + }, + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), diff --git a/trunk/drivers/input/tablet/wacom_wac.c b/trunk/drivers/input/tablet/wacom_wac.c index 002041975de9..532d067a9e07 100644 --- a/trunk/drivers/input/tablet/wacom_wac.c +++ b/trunk/drivers/input/tablet/wacom_wac.c @@ -1848,7 +1848,10 @@ static const struct wacom_features wacom_features_0x2A = { "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047, 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xF4 = - { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, + { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, + 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; +static const struct wacom_features wacom_features_0xF8 = + { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0x3F = { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023, @@ -2091,6 +2094,7 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0xEF) }, { USB_DEVICE_WACOM(0x47) }, { USB_DEVICE_WACOM(0xF4) }, + { USB_DEVICE_WACOM(0xF8) }, { USB_DEVICE_WACOM(0xFA) }, { USB_DEVICE_LENOVO(0x6004) }, { } diff --git a/trunk/drivers/input/touchscreen/edt-ft5x06.c b/trunk/drivers/input/touchscreen/edt-ft5x06.c index 9afc777a40a7..b06a5e3a665e 100644 --- a/trunk/drivers/input/touchscreen/edt-ft5x06.c +++ b/trunk/drivers/input/touchscreen/edt-ft5x06.c @@ -602,6 +602,7 @@ edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata) { if (tsdata->debug_dir) debugfs_remove_recursive(tsdata->debug_dir); + kfree(tsdata->raw_buffer); } #else @@ -843,7 +844,6 @@ static int __devexit edt_ft5x06_ts_remove(struct i2c_client *client) if (gpio_is_valid(pdata->reset_pin)) gpio_free(pdata->reset_pin); - kfree(tsdata->raw_buffer); kfree(tsdata); return 0; diff --git a/trunk/drivers/iommu/intel-iommu.c b/trunk/drivers/iommu/intel-iommu.c index 2297ec193eb4..db820d7dd0bc 100644 --- a/trunk/drivers/iommu/intel-iommu.c +++ b/trunk/drivers/iommu/intel-iommu.c @@ -2351,7 +2351,7 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup) return 0; if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) return 0; - } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) + } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE) return 0; /* @@ -3546,10 +3546,10 @@ int dmar_find_matched_atsr_unit(struct pci_dev *dev) struct pci_dev *bridge = bus->self; if (!bridge || !pci_is_pcie(bridge) || - bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) + pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) return 0; - if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) { + if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) { for (i = 0; i < atsru->devices_cnt; i++) if (atsru->devices[i] == bridge) return 1; diff --git a/trunk/drivers/macintosh/smu.c b/trunk/drivers/macintosh/smu.c index 7d5a6b40b31c..54ac7ffacb40 100644 --- a/trunk/drivers/macintosh/smu.c +++ b/trunk/drivers/macintosh/smu.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #define VERSION "0.7" @@ -501,7 +502,7 @@ int __init smu_init (void) * 32 bits value safely */ smu->cmd_buf_abs = (u32)smu_cmdbuf_abs; - smu->cmd_buf = __va(smu_cmdbuf_abs); + smu->cmd_buf = (struct smu_cmd_buf *)abs_to_virt(smu_cmdbuf_abs); smu->db_node = of_find_node_by_name(NULL, "smu-doorbell"); if (smu->db_node == NULL) { diff --git a/trunk/drivers/mmc/card/block.c b/trunk/drivers/mmc/card/block.c index f1c84decb192..172a768036d8 100644 --- a/trunk/drivers/mmc/card/block.c +++ b/trunk/drivers/mmc/card/block.c @@ -1411,7 +1411,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) /* complete ongoing async transfer before issuing discard */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); - if (req->cmd_flags & REQ_SECURE) + if (req->cmd_flags & REQ_SECURE && + !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) ret = mmc_blk_issue_secdiscard_rq(mq, req); else ret = mmc_blk_issue_discard_rq(mq, req); @@ -1716,6 +1717,7 @@ static int mmc_add_disk(struct mmc_blk_data *md) #define CID_MANFID_SANDISK 0x2 #define CID_MANFID_TOSHIBA 0x11 #define CID_MANFID_MICRON 0x13 +#define CID_MANFID_SAMSUNG 0x15 static const struct mmc_fixup blk_fixups[] = { @@ -1752,6 +1754,28 @@ static const struct mmc_fixup blk_fixups[] = MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, MMC_QUIRK_LONG_READ_TIME), + /* + * On these Samsung MoviNAND parts, performing secure erase or + * secure trim can result in unrecoverable corruption due to a + * firmware bug. + */ + MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + END_FIXUP }; diff --git a/trunk/drivers/mmc/host/atmel-mci.c b/trunk/drivers/mmc/host/atmel-mci.c index 322412cec4ee..a53c7c478e05 100644 --- a/trunk/drivers/mmc/host/atmel-mci.c +++ b/trunk/drivers/mmc/host/atmel-mci.c @@ -81,6 +81,7 @@ struct atmel_mci_caps { bool has_bad_data_ordering; bool need_reset_after_xfer; bool need_blksz_mul_4; + bool need_notbusy_for_read_ops; }; struct atmel_mci_dma { @@ -1625,7 +1626,8 @@ static void atmci_tasklet_func(unsigned long priv) __func__); atmci_set_completed(host, EVENT_XFER_COMPLETE); - if (host->data->flags & MMC_DATA_WRITE) { + if (host->caps.need_notbusy_for_read_ops || + (host->data->flags & MMC_DATA_WRITE)) { atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); state = STATE_WAITING_NOTBUSY; } else if (host->mrq->stop) { @@ -2218,6 +2220,7 @@ static void __init atmci_get_cap(struct atmel_mci *host) host->caps.has_bad_data_ordering = 1; host->caps.need_reset_after_xfer = 1; host->caps.need_blksz_mul_4 = 1; + host->caps.need_notbusy_for_read_ops = 0; /* keep only major version number */ switch (version & 0xf00) { @@ -2238,6 +2241,7 @@ static void __init atmci_get_cap(struct atmel_mci *host) case 0x200: host->caps.has_rwproof = 1; host->caps.need_blksz_mul_4 = 0; + host->caps.need_notbusy_for_read_ops = 1; case 0x100: host->caps.has_bad_data_ordering = 0; host->caps.need_reset_after_xfer = 0; diff --git a/trunk/drivers/mmc/host/bfin_sdh.c b/trunk/drivers/mmc/host/bfin_sdh.c index 03666174ca48..a17dd7363ceb 100644 --- a/trunk/drivers/mmc/host/bfin_sdh.c +++ b/trunk/drivers/mmc/host/bfin_sdh.c @@ -49,13 +49,6 @@ #define bfin_write_SDH_CFG bfin_write_RSI_CFG #endif -struct dma_desc_array { - unsigned long start_addr; - unsigned short cfg; - unsigned short x_count; - short x_modify; -} __packed; - struct sdh_host { struct mmc_host *mmc; spinlock_t lock; diff --git a/trunk/drivers/mmc/host/dw_mmc.c b/trunk/drivers/mmc/host/dw_mmc.c index 72dc3cde646d..af40d227bece 100644 --- a/trunk/drivers/mmc/host/dw_mmc.c +++ b/trunk/drivers/mmc/host/dw_mmc.c @@ -627,6 +627,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot) { struct dw_mci *host = slot->host; u32 div; + u32 clk_en_a; if (slot->clock != host->current_speed) { div = host->bus_hz / slot->clock; @@ -659,9 +660,11 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot) mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); - /* enable clock */ - mci_writel(host, CLKENA, ((SDMMC_CLKEN_ENABLE | - SDMMC_CLKEN_LOW_PWR) << slot->id)); + /* enable clock; only low power if no SDIO */ + clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; + if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id))) + clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; + mci_writel(host, CLKENA, clk_en_a); /* inform CIU */ mci_send_cmd(slot, @@ -862,6 +865,30 @@ static int dw_mci_get_cd(struct mmc_host *mmc) return present; } +/* + * Disable lower power mode. + * + * Low power mode will stop the card clock when idle. According to the + * description of the CLKENA register we should disable low power mode + * for SDIO cards if we need SDIO interrupts to work. + * + * This function is fast if low power mode is already disabled. + */ +static void dw_mci_disable_low_power(struct dw_mci_slot *slot) +{ + struct dw_mci *host = slot->host; + u32 clk_en_a; + const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; + + clk_en_a = mci_readl(host, CLKENA); + + if (clk_en_a & clken_low_pwr) { + mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr); + mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | + SDMMC_CMD_PRV_DAT_WAIT, 0); + } +} + static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) { struct dw_mci_slot *slot = mmc_priv(mmc); @@ -871,6 +898,14 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) /* Enable/disable Slot Specific SDIO interrupt */ int_mask = mci_readl(host, INTMASK); if (enb) { + /* + * Turn off low power mode if it was enabled. This is a bit of + * a heavy operation and we disable / enable IRQs a lot, so + * we'll leave low power mode disabled and it will get + * re-enabled again in dw_mci_setup_bus(). + */ + dw_mci_disable_low_power(slot); + mci_writel(host, INTMASK, (int_mask | SDMMC_INT_SDIO(slot->id))); } else { @@ -1429,22 +1464,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host) nbytes += len; remain -= len; } while (remain); - sg_miter->consumed = offset; + sg_miter->consumed = offset; status = mci_readl(host, MINTSTS); mci_writel(host, RINTSTS, SDMMC_INT_RXDR); - if (status & DW_MCI_DATA_ERROR_FLAGS) { - host->data_status = status; - data->bytes_xfered += nbytes; - sg_miter_stop(sg_miter); - host->sg = NULL; - smp_wmb(); - - set_bit(EVENT_DATA_ERROR, &host->pending_events); - - tasklet_schedule(&host->tasklet); - return; - } } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ data->bytes_xfered += nbytes; @@ -1497,23 +1520,10 @@ static void dw_mci_write_data_pio(struct dw_mci *host) nbytes += len; remain -= len; } while (remain); - sg_miter->consumed = offset; + sg_miter->consumed = offset; status = mci_readl(host, MINTSTS); mci_writel(host, RINTSTS, SDMMC_INT_TXDR); - if (status & DW_MCI_DATA_ERROR_FLAGS) { - host->data_status = status; - data->bytes_xfered += nbytes; - sg_miter_stop(sg_miter); - host->sg = NULL; - - smp_wmb(); - - set_bit(EVENT_DATA_ERROR, &host->pending_events); - - tasklet_schedule(&host->tasklet); - return; - } } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ data->bytes_xfered += nbytes; @@ -1547,12 +1557,11 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) { struct dw_mci *host = dev_id; - u32 status, pending; + u32 pending; unsigned int pass_count = 0; int i; do { - status = mci_readl(host, RINTSTS); pending = mci_readl(host, MINTSTS); /* read-only mask reg */ /* @@ -1570,7 +1579,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) if (pending & DW_MCI_CMD_ERROR_FLAGS) { mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); - host->cmd_status = status; + host->cmd_status = pending; smp_wmb(); set_bit(EVENT_CMD_COMPLETE, &host->pending_events); } @@ -1578,18 +1587,16 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) if (pending & DW_MCI_DATA_ERROR_FLAGS) { /* if there is an error report DATA_ERROR */ mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); - host->data_status = status; + host->data_status = pending; smp_wmb(); set_bit(EVENT_DATA_ERROR, &host->pending_events); - if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC | - SDMMC_INT_SBE | SDMMC_INT_EBE))) - tasklet_schedule(&host->tasklet); + tasklet_schedule(&host->tasklet); } if (pending & SDMMC_INT_DATA_OVER) { mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); if (!host->data_status) - host->data_status = status; + host->data_status = pending; smp_wmb(); if (host->dir_status == DW_MCI_RECV_STATUS) { if (host->sg != NULL) @@ -1613,7 +1620,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) if (pending & SDMMC_INT_CMD_DONE) { mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); - dw_mci_cmd_interrupt(host, status); + dw_mci_cmd_interrupt(host, pending); } if (pending & SDMMC_INT_CD) { diff --git a/trunk/drivers/mmc/host/mxs-mmc.c b/trunk/drivers/mmc/host/mxs-mmc.c index a51f9309ffbb..ad3fcea1269e 100644 --- a/trunk/drivers/mmc/host/mxs-mmc.c +++ b/trunk/drivers/mmc/host/mxs-mmc.c @@ -285,11 +285,11 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id) writel(stat & MXS_MMC_IRQ_BITS, host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR); + spin_unlock(&host->lock); + if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) mmc_signal_sdio_irq(host->mmc); - spin_unlock(&host->lock); - if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ) cmd->error = -ETIMEDOUT; else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ) @@ -644,11 +644,6 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); writel(BM_SSP_CTRL1_SDIO_IRQ_EN, host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET); - - if (readl(host->base + HW_SSP_STATUS(host)) & - BM_SSP_STATUS_SDIO_IRQ) - mmc_signal_sdio_irq(host->mmc); - } else { writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); @@ -657,6 +652,11 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) } spin_unlock_irqrestore(&host->lock, flags); + + if (enable && readl(host->base + HW_SSP_STATUS(host)) & + BM_SSP_STATUS_SDIO_IRQ) + mmc_signal_sdio_irq(host->mmc); + } static const struct mmc_host_ops mxs_mmc_ops = { diff --git a/trunk/drivers/mmc/host/omap.c b/trunk/drivers/mmc/host/omap.c index 50e08f03aa65..a5999a74496a 100644 --- a/trunk/drivers/mmc/host/omap.c +++ b/trunk/drivers/mmc/host/omap.c @@ -668,7 +668,7 @@ mmc_omap_clk_timer(unsigned long data) static void mmc_omap_xfer_data(struct mmc_omap_host *host, int write) { - int n; + int n, nwords; if (host->buffer_bytes_left == 0) { host->sg_idx++; @@ -678,15 +678,23 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write) n = 64; if (n > host->buffer_bytes_left) n = host->buffer_bytes_left; + + nwords = n / 2; + nwords += n & 1; /* handle odd number of bytes to transfer */ + host->buffer_bytes_left -= n; host->total_bytes_left -= n; host->data->bytes_xfered += n; if (write) { - __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); + __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), + host->buffer, nwords); } else { - __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); + __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), + host->buffer, nwords); } + + host->buffer += nwords; } static inline void mmc_omap_report_irq(u16 status) diff --git a/trunk/drivers/mmc/host/sdhci-esdhc.h b/trunk/drivers/mmc/host/sdhci-esdhc.h index b97b2f5dafdb..d25f9ab9a54d 100644 --- a/trunk/drivers/mmc/host/sdhci-esdhc.h +++ b/trunk/drivers/mmc/host/sdhci-esdhc.h @@ -48,14 +48,14 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) int div = 1; u32 temp; + if (clock == 0) + goto out; + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - if (clock == 0) - goto out; - while (host->max_clk / pre_div / 16 > clock && pre_div < 256) pre_div *= 2; diff --git a/trunk/drivers/mtd/ubi/vtbl.c b/trunk/drivers/mtd/ubi/vtbl.c index 437bc193e170..568307cc7caf 100644 --- a/trunk/drivers/mtd/ubi/vtbl.c +++ b/trunk/drivers/mtd/ubi/vtbl.c @@ -340,7 +340,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai, * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'. */ err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0); - kfree(new_aeb); + kmem_cache_free(ai->aeb_slab_cache, new_aeb); ubi_free_vid_hdr(ubi, vid_hdr); return err; @@ -353,7 +353,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai, list_add(&new_aeb->u.list, &ai->erase); goto retry; } - kfree(new_aeb); + kmem_cache_free(ai->aeb_slab_cache, new_aeb); out_free: ubi_free_vid_hdr(ubi, vid_hdr); return err; diff --git a/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 1bf5bbfe778e..8892e2b64498 100644 --- a/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -149,7 +149,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); /* clear error status */ - pci_write_config_word(pdev, pci_pcie_cap(pdev) + PCI_EXP_DEVSTA, + pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_NFED | PCI_EXP_DEVSTA_FED | PCI_EXP_DEVSTA_CED | diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 21054987257a..605c4574d32d 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -1162,14 +1162,9 @@ static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, static u8 bnx2x_is_pcie_pending(struct pci_dev *dev) { - int pos; u16 status; - pos = pci_pcie_cap(dev); - if (!pos) - return false; - - pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); return status & PCI_EXP_DEVSTA_TRPND; } @@ -6135,8 +6130,7 @@ static void bnx2x_init_pxp(struct bnx2x *bp) u16 devctl; int r_order, w_order; - pci_read_config_word(bp->pdev, - pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl); + pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl); DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); if (bp->mrrs == -1) @@ -9374,7 +9368,7 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp) static int __devinit bnx2x_do_flr(struct bnx2x *bp) { - int i, pos; + int i; u16 status; struct pci_dev *dev = bp->pdev; @@ -9391,16 +9385,12 @@ static int __devinit bnx2x_do_flr(struct bnx2x *bp) return -EINVAL; } - pos = pci_pcie_cap(dev); - if (!pos) - return -ENOTTY; - /* Wait for Transaction Pending bit clean */ for (i = 0; i < 4; i++) { if (i) msleep((1 << (i - 1)) * 100); - pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); if (!(status & PCI_EXP_DEVSTA_TRPND)) goto clear; } diff --git a/trunk/drivers/net/ethernet/broadcom/tg3.c b/trunk/drivers/net/ethernet/broadcom/tg3.c index bf906c51d82a..8325fd8d4e5b 100644 --- a/trunk/drivers/net/ethernet/broadcom/tg3.c +++ b/trunk/drivers/net/ethernet/broadcom/tg3.c @@ -3653,17 +3653,9 @@ static int tg3_power_down_prepare(struct tg3 *tp) tg3_enable_register_access(tp); /* Restore the CLKREQ setting. */ - if (tg3_flag(tp, CLKREQ_BUG)) { - u16 lnkctl; - - pci_read_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, - &lnkctl); - lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN; - pci_write_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, - lnkctl); - } + if (tg3_flag(tp, CLKREQ_BUG)) + pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); tw32(TG3PCI_MISC_HOST_CTRL, @@ -4434,20 +4426,13 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) /* Prevent send BD corruption. */ if (tg3_flag(tp, CLKREQ_BUG)) { - u16 oldlnkctl, newlnkctl; - - pci_read_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, - &oldlnkctl); if (tp->link_config.active_speed == SPEED_100 || tp->link_config.active_speed == SPEED_10) - newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN; + pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); else - newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN; - if (newlnkctl != oldlnkctl) - pci_write_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + - PCI_EXP_LNKCTL, newlnkctl); + pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); } if (current_link_up != netif_carrier_ok(tp->dev)) { @@ -8054,7 +8039,7 @@ static int tg3_chip_reset(struct tg3 *tp) udelay(120); - if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) { + if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) { u16 val16; if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { @@ -8071,24 +8056,17 @@ static int tg3_chip_reset(struct tg3 *tp) } /* Clear the "no snoop" and "relaxed ordering" bits. */ - pci_read_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, - &val16); - val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN | - PCI_EXP_DEVCTL_NOSNOOP_EN); + val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN; /* * Older PCIe devices only support the 128 byte * MPS setting. Enforce the restriction. */ if (!tg3_flag(tp, CPMU_PRESENT)) - val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; - pci_write_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, - val16); + val16 |= PCI_EXP_DEVCTL_PAYLOAD; + pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16); /* Clear error status */ - pci_write_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA, + pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_CED | PCI_EXP_DEVSTA_NFED | PCI_EXP_DEVSTA_FED | @@ -14565,9 +14543,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tg3_flag_set(tp, PCI_EXPRESS); - pci_read_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, - &lnkctl); + pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl); if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { diff --git a/trunk/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/trunk/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index bff8a3cdd3df..aef45d3113ba 100644 --- a/trunk/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/trunk/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -3289,22 +3289,18 @@ static void config_pcie(struct adapter *adap) unsigned int log2_width, pldsize; unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt; - pci_read_config_word(adap->pdev, - adap->pdev->pcie_cap + PCI_EXP_DEVCTL, - &val); + pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL, &val); pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5; pci_read_config_word(adap->pdev, 0x2, &devid); if (devid == 0x37) { - pci_write_config_word(adap->pdev, - adap->pdev->pcie_cap + PCI_EXP_DEVCTL, - val & ~PCI_EXP_DEVCTL_READRQ & - ~PCI_EXP_DEVCTL_PAYLOAD); + pcie_capability_write_word(adap->pdev, PCI_EXP_DEVCTL, + val & ~PCI_EXP_DEVCTL_READRQ & + ~PCI_EXP_DEVCTL_PAYLOAD); pldsize = 0; } - pci_read_config_word(adap->pdev, adap->pdev->pcie_cap + PCI_EXP_LNKCTL, - &val); + pcie_capability_read_word(adap->pdev, PCI_EXP_LNKCTL, &val); fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0)); fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx : @@ -3425,15 +3421,13 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params) static void get_pci_mode(struct adapter *adapter, struct pci_params *p) { static unsigned short speed_map[] = { 33, 66, 100, 133 }; - u32 pci_mode, pcie_cap; + u32 pci_mode; - pcie_cap = pci_pcie_cap(adapter->pdev); - if (pcie_cap) { + if (pci_is_pcie(adapter->pdev)) { u16 val; p->variant = PCI_VARIANT_PCIE; - pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, - &val); + pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val); p->width = (val >> 4) & 0x3f; return; } diff --git a/trunk/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/trunk/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 5ed49af23d6a..4a20821511e7 100644 --- a/trunk/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/trunk/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3694,15 +3694,7 @@ static void __devinit print_port_info(const struct net_device *dev) static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev) { - u16 v; - int pos; - - pos = pci_pcie_cap(dev); - if (pos > 0) { - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v); - v |= PCI_EXP_DEVCTL_RELAX_EN; - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v); - } + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); } /* diff --git a/trunk/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/trunk/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index fa947dfa4c30..af1601323173 100644 --- a/trunk/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/trunk/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2741,11 +2741,9 @@ static void __devinit get_pci_mode(struct adapter *adapter, struct pci_params *p) { u16 val; - u32 pcie_cap = pci_pcie_cap(adapter->pdev); - if (pcie_cap) { - pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, - &val); + if (pci_is_pcie(adapter->pdev)) { + pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val); p->speed = val & PCI_EXP_LNKSTA_CLS; p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; } diff --git a/trunk/drivers/net/ethernet/ibm/ehea/ehea.h b/trunk/drivers/net/ethernet/ibm/ehea/ehea.h index 6be7b9839f35..b8e46cc31e53 100644 --- a/trunk/drivers/net/ethernet/ibm/ehea/ehea.h +++ b/trunk/drivers/net/ethernet/ibm/ehea/ehea.h @@ -35,6 +35,7 @@ #include #include +#include #include #define DRV_NAME "ehea" diff --git a/trunk/drivers/net/ethernet/ibm/ehea/ehea_phyp.c b/trunk/drivers/net/ethernet/ibm/ehea/ehea_phyp.c index d3a130ccdcc8..30f903332e92 100644 --- a/trunk/drivers/net/ethernet/ibm/ehea/ehea_phyp.c +++ b/trunk/drivers/net/ethernet/ibm/ehea/ehea_phyp.c @@ -141,7 +141,7 @@ u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category, qp_category, /* R5 */ qp_handle, /* R6 */ sel_mask, /* R7 */ - __pa(cb_addr), /* R8 */ + virt_to_abs(cb_addr), /* R8 */ 0, 0); } @@ -415,7 +415,7 @@ u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat, (u64) cat, /* R5 */ qp_handle, /* R6 */ sel_mask, /* R7 */ - __pa(cb_addr), /* R8 */ + virt_to_abs(cb_addr), /* R8 */ 0, 0, 0, 0); /* R9-R12 */ *inv_attr_id = outs[0]; @@ -528,7 +528,7 @@ u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr) { u64 hret, cb_logaddr; - cb_logaddr = __pa(cb_addr); + cb_logaddr = virt_to_abs(cb_addr); hret = ehea_plpar_hcall_norets(H_QUERY_HEA, adapter_handle, /* R4 */ @@ -545,7 +545,7 @@ u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num, void *cb_addr) { u64 port_info; - u64 cb_logaddr = __pa(cb_addr); + u64 cb_logaddr = virt_to_abs(cb_addr); u64 arr_index = 0; port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat) @@ -567,7 +567,7 @@ u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num, unsigned long outs[PLPAR_HCALL9_BUFSIZE]; u64 port_info; u64 arr_index = 0; - u64 cb_logaddr = __pa(cb_addr); + u64 cb_logaddr = virt_to_abs(cb_addr); port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat) | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num); @@ -621,6 +621,6 @@ u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle, return ehea_plpar_hcall_norets(H_ERROR_DATA, adapter_handle, /* R4 */ ressource_handle, /* R5 */ - __pa(rblock), /* R6 */ + virt_to_abs(rblock), /* R6 */ 0, 0, 0, 0); /* R7-R12 */ } diff --git a/trunk/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/trunk/drivers/net/ethernet/ibm/ehea/ehea_qmr.c index 27f881758d16..cb66f574dc97 100644 --- a/trunk/drivers/net/ethernet/ibm/ehea/ehea_qmr.c +++ b/trunk/drivers/net/ethernet/ibm/ehea/ehea_qmr.c @@ -163,7 +163,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, goto out_kill_hwq; } - rpage = __pa(vpage); + rpage = virt_to_abs(vpage); hret = ehea_h_register_rpage(adapter->handle, 0, EHEA_CQ_REGISTER_ORIG, cq->fw_handle, rpage, 1); @@ -290,7 +290,7 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter, goto out_kill_hwq; } - rpage = __pa(vpage); + rpage = virt_to_abs(vpage); hret = ehea_h_register_rpage(adapter->handle, 0, EHEA_EQ_REGISTER_ORIG, @@ -395,7 +395,7 @@ static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, pr_err("hw_qpageit_get_inc failed\n"); goto out_kill_hwq; } - rpage = __pa(vpage); + rpage = virt_to_abs(vpage); hret = ehea_h_register_rpage(adapter->handle, 0, h_call_q_selector, qp->fw_handle, rpage, 1); @@ -790,7 +790,7 @@ u64 ehea_map_vaddr(void *caddr) if (!ehea_bmap) return EHEA_INVAL_ADDR; - index = __pa(caddr) >> SECTION_SIZE_BITS; + index = virt_to_abs(caddr) >> SECTION_SIZE_BITS; top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK; if (!ehea_bmap->top[top]) return EHEA_INVAL_ADDR; @@ -812,7 +812,7 @@ static inline void *ehea_calc_sectbase(int top, int dir, int idx) unsigned long ret = idx; ret |= dir << EHEA_DIR_INDEX_SHIFT; ret |= top << EHEA_TOP_INDEX_SHIFT; - return __va(ret << SECTION_SIZE_BITS); + return abs_to_virt(ret << SECTION_SIZE_BITS); } static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt, @@ -822,7 +822,7 @@ static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt, void *pg; u64 j, m, hret; unsigned long k = 0; - u64 pt_abs = __pa(pt); + u64 pt_abs = virt_to_abs(pt); void *sectbase = ehea_calc_sectbase(top, dir, idx); @@ -830,7 +830,7 @@ static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt, for (m = 0; m < EHEA_MAX_RPAGE; m++) { pg = sectbase + ((k++) * EHEA_PAGESIZE); - pt[m] = __pa(pg); + pt[m] = virt_to_abs(pg); } hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0, 0, pt_abs, EHEA_MAX_RPAGE); diff --git a/trunk/drivers/net/ethernet/intel/e1000e/netdev.c b/trunk/drivers/net/ethernet/intel/e1000e/netdev.c index d01a099475a1..acc030efb67b 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5584,16 +5584,15 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, */ if (adapter->flags & FLAG_IS_QUAD_PORT) { struct pci_dev *us_dev = pdev->bus->self; - int pos = pci_pcie_cap(us_dev); u16 devctl; - pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl); - pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, - (devctl & ~PCI_EXP_DEVCTL_CERE)); + pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl); + pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, + (devctl & ~PCI_EXP_DEVCTL_CERE)); e1000_power_off(pdev, sleep, wake); - pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); + pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); } else { e1000_power_off(pdev, sleep, wake); } @@ -5607,25 +5606,15 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) #else static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) { - int pos; - u16 reg16; - /* * Both device and parent should have the same ASPM setting. * Disable ASPM in downstream component first and then upstream. */ - pos = pci_pcie_cap(pdev); - pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); - reg16 &= ~state; - pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); - - if (!pdev->bus->self) - return; + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state); - pos = pci_pcie_cap(pdev->bus->self); - pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16); - reg16 &= ~state; - pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); + if (pdev->bus->self) + pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL, + state); } #endif static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) diff --git a/trunk/drivers/net/ethernet/intel/igb/igb_main.c b/trunk/drivers/net/ethernet/intel/igb/igb_main.c index 48cc4fb1a307..2036ae365bae 100644 --- a/trunk/drivers/net/ethernet/intel/igb/igb_main.c +++ b/trunk/drivers/net/ethernet/intel/igb/igb_main.c @@ -6538,28 +6538,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) { struct igb_adapter *adapter = hw->back; - u16 cap_offset; - cap_offset = adapter->pdev->pcie_cap; - if (!cap_offset) + if (pcie_capability_read_word(adapter->pdev, reg, value)) return -E1000_ERR_CONFIG; - pci_read_config_word(adapter->pdev, cap_offset + reg, value); - return 0; } s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) { struct igb_adapter *adapter = hw->back; - u16 cap_offset; - cap_offset = adapter->pdev->pcie_cap; - if (!cap_offset) + if (pcie_capability_write_word(adapter->pdev, reg, *value)) return -E1000_ERR_CONFIG; - pci_write_config_word(adapter->pdev, cap_offset + reg, *value); - return 0; } diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 4326f74f7137..976570d4c939 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7527,7 +7527,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, goto skip_bad_vf_detection; bdev = pdev->bus->self; - while (bdev && (bdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT)) + while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) bdev = bdev->bus->self; if (!bdev) diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/reset.c b/trunk/drivers/net/ethernet/mellanox/mlx4/reset.c index 11e7c1cb99bf..dd1b5093d8b1 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/reset.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/reset.c @@ -141,16 +141,16 @@ int mlx4_reset(struct mlx4_dev *dev) /* Now restore the PCI headers */ if (pcie_cap) { devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4]; - if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_DEVCTL, - devctl)) { + if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL, + devctl)) { err = -ENODEV; mlx4_err(dev, "Couldn't restore HCA PCI Express " "Device Control register, aborting.\n"); goto out; } linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; - if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_LNKCTL, - linkctl)) { + if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL, + linkctl)) { err = -ENODEV; mlx4_err(dev, "Couldn't restore HCA PCI Express " "Link control register, aborting.\n"); diff --git a/trunk/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/trunk/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index fa85cf1353fd..83516e3369c9 100644 --- a/trunk/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/trunk/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1078,22 +1078,16 @@ static int myri10ge_reset(struct myri10ge_priv *mgp) #ifdef CONFIG_MYRI10GE_DCA static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on) { - int ret, cap, err; + int ret; u16 ctl; - cap = pci_pcie_cap(pdev); - if (!cap) - return 0; - - err = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl); - if (err) - return 0; + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &ctl); ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4; if (ret != on) { ctl &= ~PCI_EXP_DEVCTL_RELAX_EN; ctl |= (on << 4); - pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl); + pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, ctl); } return ret; } @@ -3192,18 +3186,13 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp) struct device *dev = &mgp->pdev->dev; int cap; unsigned err_cap; - u16 val; - u8 ext_type; int ret; if (!myri10ge_ecrc_enable || !bridge) return; /* check that the bridge is a root port */ - cap = pci_pcie_cap(bridge); - pci_read_config_word(bridge, cap + PCI_CAP_FLAGS, &val); - ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4; - if (ext_type != PCI_EXP_TYPE_ROOT_PORT) { + if (pci_pcie_type(bridge) != PCI_EXP_TYPE_ROOT_PORT) { if (myri10ge_ecrc_enable > 1) { struct pci_dev *prev_bridge, *old_bridge = bridge; @@ -3218,11 +3207,8 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp) " to force ECRC\n"); return; } - cap = pci_pcie_cap(bridge); - pci_read_config_word(bridge, - cap + PCI_CAP_FLAGS, &val); - ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4; - } while (ext_type != PCI_EXP_TYPE_ROOT_PORT); + } while (pci_pcie_type(bridge) != + PCI_EXP_TYPE_ROOT_PORT); dev_info(dev, "Forcing ECRC on non-root port %s" @@ -3335,11 +3321,10 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp) int overridden = 0; if (myri10ge_force_firmware == 0) { - int link_width, exp_cap; + int link_width; u16 lnk; - exp_cap = pci_pcie_cap(mgp->pdev); - pci_read_config_word(mgp->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk); + pcie_capability_read_word(mgp->pdev, PCI_EXP_LNKSTA, &lnk); link_width = (lnk >> 4) & 0x3f; /* Check to see if Link is less than 8 or if the diff --git a/trunk/drivers/net/ethernet/neterion/vxge/vxge-config.c b/trunk/drivers/net/ethernet/neterion/vxge/vxge-config.c index 32d06824fe3e..c2e420a84d22 100644 --- a/trunk/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/trunk/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -757,7 +757,7 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev) u16 lnk; /* Get the negotiated link width and speed from PCI config space */ - pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk); + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk); if ((lnk & PCI_EXP_LNKSTA_CLS) != 1) return VXGE_HW_ERR_INVALID_PCI_INFO; @@ -1982,7 +1982,7 @@ u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev) struct pci_dev *dev = hldev->pdev; u16 lnk; - pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk); + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk); return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4; } diff --git a/trunk/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/trunk/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 342b3a79bd0f..01d6141cedd9 100644 --- a/trunk/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/trunk/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1382,7 +1382,7 @@ static void netxen_mask_aer_correctable(struct netxen_adapter *adapter) adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP) return; - if (root->pcie_type != PCI_EXP_TYPE_ROOT_PORT) + if (pci_pcie_type(root) != PCI_EXP_TYPE_ROOT_PORT) return; aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR); diff --git a/trunk/drivers/net/ethernet/realtek/r8169.c b/trunk/drivers/net/ethernet/realtek/r8169.c index b47d5b35024e..a7cc56007b33 100644 --- a/trunk/drivers/net/ethernet/realtek/r8169.c +++ b/trunk/drivers/net/ethernet/realtek/r8169.c @@ -833,15 +833,8 @@ static void rtl_unlock_work(struct rtl8169_private *tp) static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) { - int cap = pci_pcie_cap(pdev); - - if (cap) { - u16 ctl; - - pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl); - ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force; - pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl); - } + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, force); } struct rtl_cond { @@ -4739,28 +4732,14 @@ static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e, static void rtl_disable_clock_request(struct pci_dev *pdev) { - int cap = pci_pcie_cap(pdev); - - if (cap) { - u16 ctl; - - pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl); - ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN; - pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl); - } + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); } static void rtl_enable_clock_request(struct pci_dev *pdev) { - int cap = pci_pcie_cap(pdev); - - if (cap) { - u16 ctl; - - pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl); - ctl |= PCI_EXP_LNKCTL_CLKREQ_EN; - pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl); - } + pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); } #define R8168_CPCMD_QUIRK_MASK (\ @@ -5405,14 +5384,9 @@ static void rtl_hw_start_8101(struct net_device *dev) tp->event_slow &= ~RxFIFOOver; if (tp->mac_version == RTL_GIGA_MAC_VER_13 || - tp->mac_version == RTL_GIGA_MAC_VER_16) { - int cap = pci_pcie_cap(pdev); - - if (cap) { - pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, - PCI_EXP_DEVCTL_NOSNOOP_EN); - } - } + tp->mac_version == RTL_GIGA_MAC_VER_16) + pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_NOSNOOP_EN); RTL_W8(Cfg9346, Cfg9346_Unlock); diff --git a/trunk/drivers/net/ethernet/sun/niu.c b/trunk/drivers/net/ethernet/sun/niu.c index c2a0fe393267..3208dca66758 100644 --- a/trunk/drivers/net/ethernet/sun/niu.c +++ b/trunk/drivers/net/ethernet/sun/niu.c @@ -9762,9 +9762,8 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev, union niu_parent_id parent_id; struct net_device *dev; struct niu *np; - int err, pos; + int err; u64 dma_mask; - u16 val16; niu_driver_version(); @@ -9787,8 +9786,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev, goto err_out_disable_pdev; } - pos = pci_pcie_cap(pdev); - if (pos <= 0) { + if (!pci_is_pcie(pdev)) { dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); goto err_out_free_res; } @@ -9813,14 +9811,11 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev, goto err_out_free_dev; } - pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); - val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; - val16 |= (PCI_EXP_DEVCTL_CERE | - PCI_EXP_DEVCTL_NFERE | - PCI_EXP_DEVCTL_FERE | - PCI_EXP_DEVCTL_URRE | - PCI_EXP_DEVCTL_RELAX_EN); - pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16); + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_NOSNOOP_EN, + PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | + PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE | + PCI_EXP_DEVCTL_RELAX_EN); dma_mask = DMA_BIT_MASK(44); err = pci_set_dma_mask(pdev, dma_mask); diff --git a/trunk/drivers/net/wireless/ath/ath9k/pci.c b/trunk/drivers/net/wireless/ath/ath9k/pci.c index a978984d78a5..ef11dc639461 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/pci.c +++ b/trunk/drivers/net/wireless/ath/ath9k/pci.c @@ -113,41 +113,32 @@ static void ath_pci_aspm_init(struct ath_common *common) struct ath_hw *ah = sc->sc_ah; struct pci_dev *pdev = to_pci_dev(sc->dev); struct pci_dev *parent; - int pos; - u8 aspm; + u16 aspm; if (!ah->is_pciexpress) return; - pos = pci_pcie_cap(pdev); - if (!pos) - return; - parent = pdev->bus->self; if (!parent) return; if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) { /* Bluetooth coexistance requires disabling ASPM. */ - pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &aspm); - aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); - pci_write_config_byte(pdev, pos + PCI_EXP_LNKCTL, aspm); + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, + PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); /* * Both upstream and downstream PCIe components should * have the same ASPM settings. */ - pos = pci_pcie_cap(parent); - pci_read_config_byte(parent, pos + PCI_EXP_LNKCTL, &aspm); - aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); - pci_write_config_byte(parent, pos + PCI_EXP_LNKCTL, aspm); + pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, + PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); ath_info(common, "Disabling ASPM since BTCOEX is enabled\n"); return; } - pos = pci_pcie_cap(parent); - pci_read_config_byte(parent, pos + PCI_EXP_LNKCTL, &aspm); + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &aspm); if (aspm & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) { ah->aspm_enabled = true; /* Initialize PCIe PM and SERDES registers. */ diff --git a/trunk/drivers/net/wireless/iwlegacy/common.h b/trunk/drivers/net/wireless/iwlegacy/common.h index 5f5017767b99..724682669060 100644 --- a/trunk/drivers/net/wireless/iwlegacy/common.h +++ b/trunk/drivers/net/wireless/iwlegacy/common.h @@ -1832,10 +1832,8 @@ int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd); static inline u16 il_pcie_link_ctl(struct il_priv *il) { - int pos; u16 pci_lnk_ctl; - pos = pci_pcie_cap(il->pci_dev); - pci_read_config_word(il->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); + pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &pci_lnk_ctl); return pci_lnk_ctl; } diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c b/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c index 1e86ea2266d4..e316ca4632b1 100644 --- a/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -675,13 +675,10 @@ static void iwl_set_pwr_vmain(struct iwl_trans *trans) static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int pos; u16 pci_lnk_ctl; - struct pci_dev *pci_dev = trans_pcie->pci_dev; - - pos = pci_pcie_cap(pci_dev); - pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); + pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, + &pci_lnk_ctl); return pci_lnk_ctl; } diff --git a/trunk/drivers/net/wireless/rtlwifi/pci.c b/trunk/drivers/net/wireless/rtlwifi/pci.c index 80f75d3ba84a..5983631a1b1a 100644 --- a/trunk/drivers/net/wireless/rtlwifi/pci.c +++ b/trunk/drivers/net/wireless/rtlwifi/pci.c @@ -372,13 +372,11 @@ static void rtl_pci_parse_configuration(struct pci_dev *pdev, struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); u8 tmp; - int pos; - u8 linkctrl_reg; + u16 linkctrl_reg; /*Link Control Register */ - pos = pci_pcie_cap(pdev); - pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg); - pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg; + pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &linkctrl_reg); + pcipriv->ndis_adapter.linkctrl_reg = (u8)linkctrl_reg; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n", pcipriv->ndis_adapter.linkctrl_reg); diff --git a/trunk/drivers/pci/access.c b/trunk/drivers/pci/access.c index ba91a7e17519..3af0478c057b 100644 --- a/trunk/drivers/pci/access.c +++ b/trunk/drivers/pci/access.c @@ -469,3 +469,205 @@ void pci_cfg_access_unlock(struct pci_dev *dev) raw_spin_unlock_irqrestore(&pci_lock, flags); } EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); + +static inline int pcie_cap_version(const struct pci_dev *dev) +{ + return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS; +} + +static inline bool pcie_cap_has_devctl(const struct pci_dev *dev) +{ + return true; +} + +static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_ENDPOINT || + type == PCI_EXP_TYPE_LEG_END; +} + +static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + (type == PCI_EXP_TYPE_DOWNSTREAM && + dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT); +} + +static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_RC_EC; +} + +static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) +{ + if (!pci_is_pcie(dev)) + return false; + + switch (pos) { + case PCI_EXP_FLAGS_TYPE: + return true; + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVSTA: + return pcie_cap_has_devctl(dev); + case PCI_EXP_LNKCAP: + case PCI_EXP_LNKCTL: + case PCI_EXP_LNKSTA: + return pcie_cap_has_lnkctl(dev); + case PCI_EXP_SLTCAP: + case PCI_EXP_SLTCTL: + case PCI_EXP_SLTSTA: + return pcie_cap_has_sltctl(dev); + case PCI_EXP_RTCTL: + case PCI_EXP_RTCAP: + case PCI_EXP_RTSTA: + return pcie_cap_has_rtctl(dev); + case PCI_EXP_DEVCAP2: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCAP2: + case PCI_EXP_LNKCTL2: + case PCI_EXP_LNKSTA2: + return pcie_cap_version(dev) > 1; + default: + return false; + } +} + +/* + * Note that these accessor functions are only for the "PCI Express + * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the + * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) + */ +int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) +{ + int ret; + + *val = 0; + if (pos & 1) + return -EINVAL; + + if (pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_word() fails, it may + * have been written as 0xFFFF if hardware error happens + * during pci_read_config_word(). + */ + if (ret) + *val = 0; + return ret; + } + + /* + * For Functions that do not implement the Slot Capabilities, + * Slot Status, and Slot Control registers, these spaces must + * be hardwired to 0b, with the exception of the Presence Detect + * State bit in the Slot Status register of Downstream Ports, + * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) + */ + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} +EXPORT_SYMBOL(pcie_capability_read_word); + +int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) +{ + int ret; + + *val = 0; + if (pos & 3) + return -EINVAL; + + if (pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_dword() fails, it may + * have been written as 0xFFFFFFFF if hardware error happens + * during pci_read_config_dword(). + */ + if (ret) + *val = 0; + return ret; + } + + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} +EXPORT_SYMBOL(pcie_capability_read_dword); + +int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) +{ + if (pos & 1) + return -EINVAL; + + if (!pcie_capability_reg_implemented(dev, pos)) + return 0; + + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); +} +EXPORT_SYMBOL(pcie_capability_write_word); + +int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) +{ + if (pos & 3) + return -EINVAL; + + if (!pcie_capability_reg_implemented(dev, pos)) + return 0; + + return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); +} +EXPORT_SYMBOL(pcie_capability_write_dword); + +int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set) +{ + int ret; + u16 val; + + ret = pcie_capability_read_word(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = pcie_capability_write_word(dev, pos, val); + } + + return ret; +} +EXPORT_SYMBOL(pcie_capability_clear_and_set_word); + +int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, + u32 clear, u32 set) +{ + int ret; + u32 val; + + ret = pcie_capability_read_dword(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = pcie_capability_write_dword(dev, pos, val); + } + + return ret; +} +EXPORT_SYMBOL(pcie_capability_clear_and_set_dword); diff --git a/trunk/drivers/pci/hotplug/pciehp_acpi.c b/trunk/drivers/pci/hotplug/pciehp_acpi.c index 376d70d17176..24d709b7388c 100644 --- a/trunk/drivers/pci/hotplug/pciehp_acpi.c +++ b/trunk/drivers/pci/hotplug/pciehp_acpi.c @@ -81,16 +81,12 @@ static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots); /* Dummy driver for dumplicate name detection */ static int __init dummy_probe(struct pcie_device *dev) { - int pos; u32 slot_cap; acpi_handle handle; struct dummy_slot *slot, *tmp; struct pci_dev *pdev = dev->port; - pos = pci_pcie_cap(pdev); - if (!pos) - return -ENODEV; - pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap); + pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap); slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) return -ENOMEM; diff --git a/trunk/drivers/pci/hotplug/pciehp_hpc.c b/trunk/drivers/pci/hotplug/pciehp_hpc.c index 302451e8289d..13b2eaf7ba43 100644 --- a/trunk/drivers/pci/hotplug/pciehp_hpc.c +++ b/trunk/drivers/pci/hotplug/pciehp_hpc.c @@ -44,25 +44,25 @@ static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) { struct pci_dev *dev = ctrl->pcie->port; - return pci_read_config_word(dev, pci_pcie_cap(dev) + reg, value); + return pcie_capability_read_word(dev, reg, value); } static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value) { struct pci_dev *dev = ctrl->pcie->port; - return pci_read_config_dword(dev, pci_pcie_cap(dev) + reg, value); + return pcie_capability_read_dword(dev, reg, value); } static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value) { struct pci_dev *dev = ctrl->pcie->port; - return pci_write_config_word(dev, pci_pcie_cap(dev) + reg, value); + return pcie_capability_write_word(dev, reg, value); } static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value) { struct pci_dev *dev = ctrl->pcie->port; - return pci_write_config_dword(dev, pci_pcie_cap(dev) + reg, value); + return pcie_capability_write_dword(dev, reg, value); } /* Power Control Command */ @@ -855,10 +855,6 @@ struct controller *pcie_init(struct pcie_device *dev) goto abort; } ctrl->pcie = dev; - if (!pci_pcie_cap(pdev)) { - ctrl_err(ctrl, "Cannot find PCI Express capability\n"); - goto abort_ctrl; - } if (pciehp_readl(ctrl, PCI_EXP_SLTCAP, &slot_cap)) { ctrl_err(ctrl, "Cannot read SLOTCAP register\n"); goto abort_ctrl; diff --git a/trunk/drivers/pci/hotplug/pcihp_slot.c b/trunk/drivers/pci/hotplug/pcihp_slot.c index 8c05a18c9770..fec2d5b75440 100644 --- a/trunk/drivers/pci/hotplug/pcihp_slot.c +++ b/trunk/drivers/pci/hotplug/pcihp_slot.c @@ -96,17 +96,11 @@ static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp) static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) { int pos; - u16 reg16; u32 reg32; if (!hpp) return; - /* Find PCI Express capability */ - pos = pci_pcie_cap(dev); - if (!pos) - return; - if (hpp->revision > 1) { dev_warn(&dev->dev, "PCIe settings rev %d not supported\n", hpp->revision); @@ -114,17 +108,13 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) } /* Initialize Device Control Register */ - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, ®16); - reg16 = (reg16 & hpp->pci_exp_devctl_and) | hpp->pci_exp_devctl_or; - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16); + pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, + ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or); /* Initialize Link Control Register */ - if (dev->subordinate) { - pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, ®16); - reg16 = (reg16 & hpp->pci_exp_lnkctl_and) - | hpp->pci_exp_lnkctl_or; - pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, reg16); - } + if (dev->subordinate) + pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, + ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or); /* Find Advanced Error Reporting Enhanced Capability */ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); diff --git a/trunk/drivers/pci/iov.c b/trunk/drivers/pci/iov.c index 74bbaf82638d..aeccc911abb8 100644 --- a/trunk/drivers/pci/iov.c +++ b/trunk/drivers/pci/iov.c @@ -433,8 +433,8 @@ static int sriov_init(struct pci_dev *dev, int pos) struct resource *res; struct pci_dev *pdev; - if (dev->pcie_type != PCI_EXP_TYPE_RC_END && - dev->pcie_type != PCI_EXP_TYPE_ENDPOINT) + if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END && + pci_pcie_type(dev) != PCI_EXP_TYPE_ENDPOINT) return -ENODEV; pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl); @@ -503,7 +503,7 @@ static int sriov_init(struct pci_dev *dev, int pos) iov->self = dev; pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); - if (dev->pcie_type == PCI_EXP_TYPE_RC_END) + if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link); if (pdev) diff --git a/trunk/drivers/pci/pci-driver.c b/trunk/drivers/pci/pci-driver.c index 5270f1a99328..d6fd6b6d9d4b 100644 --- a/trunk/drivers/pci/pci-driver.c +++ b/trunk/drivers/pci/pci-driver.c @@ -280,8 +280,12 @@ static long local_pci_probe(void *_ddi) { struct drv_dev_and_id *ddi = _ddi; struct device *dev = &ddi->dev->dev; + struct device *parent = dev->parent; int rc; + /* The parent bridge must be in active state when probing */ + if (parent) + pm_runtime_get_sync(parent); /* Unbound PCI devices are always set to disabled and suspended. * During probe, the device is set to enabled and active and the * usage count is incremented. If the driver supports runtime PM, @@ -298,6 +302,8 @@ static long local_pci_probe(void *_ddi) pm_runtime_set_suspended(dev); pm_runtime_put_noidle(dev); } + if (parent) + pm_runtime_put(parent); return rc; } diff --git a/trunk/drivers/pci/pci-sysfs.c b/trunk/drivers/pci/pci-sysfs.c index 6869009c7393..02d107b15281 100644 --- a/trunk/drivers/pci/pci-sysfs.c +++ b/trunk/drivers/pci/pci-sysfs.c @@ -458,6 +458,40 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf) } struct device_attribute vga_attr = __ATTR_RO(boot_vga); +static void +pci_config_pm_runtime_get(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct device *parent = dev->parent; + + if (parent) + pm_runtime_get_sync(parent); + pm_runtime_get_noresume(dev); + /* + * pdev->current_state is set to PCI_D3cold during suspending, + * so wait until suspending completes + */ + pm_runtime_barrier(dev); + /* + * Only need to resume devices in D3cold, because config + * registers are still accessible for devices suspended but + * not in D3cold. + */ + if (pdev->current_state == PCI_D3cold) + pm_runtime_resume(dev); +} + +static void +pci_config_pm_runtime_put(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct device *parent = dev->parent; + + pm_runtime_put(dev); + if (parent) + pm_runtime_put_sync(parent); +} + static ssize_t pci_read_config(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, @@ -484,6 +518,8 @@ pci_read_config(struct file *filp, struct kobject *kobj, size = count; } + pci_config_pm_runtime_get(dev); + if ((off & 1) && size) { u8 val; pci_user_read_config_byte(dev, off, &val); @@ -529,6 +565,8 @@ pci_read_config(struct file *filp, struct kobject *kobj, --size; } + pci_config_pm_runtime_put(dev); + return count; } @@ -549,6 +587,8 @@ pci_write_config(struct file* filp, struct kobject *kobj, count = size; } + pci_config_pm_runtime_get(dev); + if ((off & 1) && size) { pci_user_write_config_byte(dev, off, data[off - init_off]); off++; @@ -587,6 +627,8 @@ pci_write_config(struct file* filp, struct kobject *kobj, --size; } + pci_config_pm_runtime_put(dev); + return count; } diff --git a/trunk/drivers/pci/pci.c b/trunk/drivers/pci/pci.c index f3ea977a5b1b..292cb2e0ff7f 100644 --- a/trunk/drivers/pci/pci.c +++ b/trunk/drivers/pci/pci.c @@ -253,38 +253,6 @@ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) return pos; } -/** - * pci_pcie_cap2 - query for devices' PCI_CAP_ID_EXP v2 capability structure - * @dev: PCI device to check - * - * Like pci_pcie_cap() but also checks that the PCIe capability version is - * >= 2. Note that v1 capability structures could be sparse in that not - * all register fields were required. v2 requires the entire structure to - * be present size wise, while still allowing for non-implemented registers - * to exist but they must be hardwired to 0. - * - * Due to the differences in the versions of capability structures, one - * must be careful not to try and access non-existant registers that may - * exist in early versions - v1 - of Express devices. - * - * Returns the offset of the PCIe capability structure as long as the - * capability version is >= 2; otherwise 0 is returned. - */ -static int pci_pcie_cap2(struct pci_dev *dev) -{ - u16 flags; - int pos; - - pos = pci_pcie_cap(dev); - if (pos) { - pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags); - if ((flags & PCI_EXP_FLAGS_VERS) < 2) - pos = 0; - } - - return pos; -} - /** * pci_find_ext_capability - Find an extended capability * @dev: PCI device to query @@ -854,21 +822,6 @@ EXPORT_SYMBOL(pci_choose_state); #define PCI_EXP_SAVE_REGS 7 -#define pcie_cap_has_devctl(type, flags) 1 -#define pcie_cap_has_lnkctl(type, flags) \ - ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ - (type == PCI_EXP_TYPE_ROOT_PORT || \ - type == PCI_EXP_TYPE_ENDPOINT || \ - type == PCI_EXP_TYPE_LEG_END)) -#define pcie_cap_has_sltctl(type, flags) \ - ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ - ((type == PCI_EXP_TYPE_ROOT_PORT) || \ - (type == PCI_EXP_TYPE_DOWNSTREAM && \ - (flags & PCI_EXP_FLAGS_SLOT)))) -#define pcie_cap_has_rtctl(type, flags) \ - ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ - (type == PCI_EXP_TYPE_ROOT_PORT || \ - type == PCI_EXP_TYPE_RC_EC)) static struct pci_cap_saved_state *pci_find_saved_cap( struct pci_dev *pci_dev, char cap) @@ -885,13 +838,11 @@ static struct pci_cap_saved_state *pci_find_saved_cap( static int pci_save_pcie_state(struct pci_dev *dev) { - int pos, i = 0; + int i = 0; struct pci_cap_saved_state *save_state; u16 *cap; - u16 flags; - pos = pci_pcie_cap(dev); - if (!pos) + if (!pci_is_pcie(dev)) return 0; save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); @@ -899,60 +850,37 @@ static int pci_save_pcie_state(struct pci_dev *dev) dev_err(&dev->dev, "buffer not found in %s\n", __func__); return -ENOMEM; } - cap = (u16 *)&save_state->cap.data[0]; - - pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags); - if (pcie_cap_has_devctl(dev->pcie_type, flags)) - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]); - if (pcie_cap_has_lnkctl(dev->pcie_type, flags)) - pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); - if (pcie_cap_has_sltctl(dev->pcie_type, flags)) - pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); - if (pcie_cap_has_rtctl(dev->pcie_type, flags)) - pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); - - pos = pci_pcie_cap2(dev); - if (!pos) - return 0; + cap = (u16 *)&save_state->cap.data[0]; + pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]); + pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]); + pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]); + pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]); + pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]); + pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]); + pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]); - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]); - pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]); - pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]); return 0; } static void pci_restore_pcie_state(struct pci_dev *dev) { - int i = 0, pos; + int i = 0; struct pci_cap_saved_state *save_state; u16 *cap; - u16 flags; save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); - pos = pci_find_capability(dev, PCI_CAP_ID_EXP); - if (!save_state || pos <= 0) - return; - cap = (u16 *)&save_state->cap.data[0]; - - pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags); - - if (pcie_cap_has_devctl(dev->pcie_type, flags)) - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]); - if (pcie_cap_has_lnkctl(dev->pcie_type, flags)) - pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]); - if (pcie_cap_has_sltctl(dev->pcie_type, flags)) - pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]); - if (pcie_cap_has_rtctl(dev->pcie_type, flags)) - pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]); - - pos = pci_pcie_cap2(dev); - if (!pos) + if (!save_state) return; - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]); - pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]); - pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]); + cap = (u16 *)&save_state->cap.data[0]; + pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]); + pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]); + pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]); + pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]); + pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]); + pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]); + pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]); } @@ -1941,6 +1869,7 @@ void pci_pm_init(struct pci_dev *dev) dev->pm_cap = pm; dev->d3_delay = PCI_PM_D3_WAIT; dev->d3cold_delay = PCI_PM_D3COLD_WAIT; + dev->d3cold_allowed = true; dev->d1_support = false; dev->d2_support = false; @@ -2066,35 +1995,24 @@ void pci_free_cap_save_buffers(struct pci_dev *dev) */ void pci_enable_ari(struct pci_dev *dev) { - int pos; u32 cap; - u16 ctrl; struct pci_dev *bridge; if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn) return; - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); - if (!pos) + if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) return; bridge = dev->bus->self; if (!bridge) return; - /* ARI is a PCIe cap v2 feature */ - pos = pci_pcie_cap2(bridge); - if (!pos) - return; - - pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap); + pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap); if (!(cap & PCI_EXP_DEVCAP2_ARI)) return; - pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl); - ctrl |= PCI_EXP_DEVCTL2_ARI; - pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl); - + pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_ARI); bridge->ari_enabled = 1; } @@ -2109,20 +2027,14 @@ void pci_enable_ari(struct pci_dev *dev) */ void pci_enable_ido(struct pci_dev *dev, unsigned long type) { - int pos; - u16 ctrl; + u16 ctrl = 0; - /* ID-based Ordering is a PCIe cap v2 feature */ - pos = pci_pcie_cap2(dev); - if (!pos) - return; - - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl); if (type & PCI_EXP_IDO_REQUEST) ctrl |= PCI_EXP_IDO_REQ_EN; if (type & PCI_EXP_IDO_COMPLETION) ctrl |= PCI_EXP_IDO_CMP_EN; - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); + if (ctrl) + pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, ctrl); } EXPORT_SYMBOL(pci_enable_ido); @@ -2133,20 +2045,14 @@ EXPORT_SYMBOL(pci_enable_ido); */ void pci_disable_ido(struct pci_dev *dev, unsigned long type) { - int pos; - u16 ctrl; + u16 ctrl = 0; - /* ID-based Ordering is a PCIe cap v2 feature */ - pos = pci_pcie_cap2(dev); - if (!pos) - return; - - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl); if (type & PCI_EXP_IDO_REQUEST) - ctrl &= ~PCI_EXP_IDO_REQ_EN; + ctrl |= PCI_EXP_IDO_REQ_EN; if (type & PCI_EXP_IDO_COMPLETION) - ctrl &= ~PCI_EXP_IDO_CMP_EN; - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); + ctrl |= PCI_EXP_IDO_CMP_EN; + if (ctrl) + pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, ctrl); } EXPORT_SYMBOL(pci_disable_ido); @@ -2171,17 +2077,11 @@ EXPORT_SYMBOL(pci_disable_ido); */ int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type) { - int pos; u32 cap; u16 ctrl; int ret; - /* OBFF is a PCIe cap v2 feature */ - pos = pci_pcie_cap2(dev); - if (!pos) - return -ENOTSUPP; - - pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap); + pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); if (!(cap & PCI_EXP_OBFF_MASK)) return -ENOTSUPP; /* no OBFF support at all */ @@ -2192,7 +2092,7 @@ int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type) return ret; } - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl); + pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctrl); if (cap & PCI_EXP_OBFF_WAKE) ctrl |= PCI_EXP_OBFF_WAKE_EN; else { @@ -2210,7 +2110,7 @@ int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type) return -ENOTSUPP; } } - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); + pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, ctrl); return 0; } @@ -2224,17 +2124,7 @@ EXPORT_SYMBOL(pci_enable_obff); */ void pci_disable_obff(struct pci_dev *dev) { - int pos; - u16 ctrl; - - /* OBFF is a PCIe cap v2 feature */ - pos = pci_pcie_cap2(dev); - if (!pos) - return; - - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl); - ctrl &= ~PCI_EXP_OBFF_WAKE_EN; - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); + pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_OBFF_WAKE_EN); } EXPORT_SYMBOL(pci_disable_obff); @@ -2247,15 +2137,9 @@ EXPORT_SYMBOL(pci_disable_obff); */ static bool pci_ltr_supported(struct pci_dev *dev) { - int pos; u32 cap; - /* LTR is a PCIe cap v2 feature */ - pos = pci_pcie_cap2(dev); - if (!pos) - return false; - - pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap); + pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); return cap & PCI_EXP_DEVCAP2_LTR; } @@ -2272,22 +2156,15 @@ static bool pci_ltr_supported(struct pci_dev *dev) */ int pci_enable_ltr(struct pci_dev *dev) { - int pos; - u16 ctrl; int ret; - if (!pci_ltr_supported(dev)) - return -ENOTSUPP; - - /* LTR is a PCIe cap v2 feature */ - pos = pci_pcie_cap2(dev); - if (!pos) - return -ENOTSUPP; - /* Only primary function can enable/disable LTR */ if (PCI_FUNC(dev->devfn) != 0) return -EINVAL; + if (!pci_ltr_supported(dev)) + return -ENOTSUPP; + /* Enable upstream ports first */ if (dev->bus->self) { ret = pci_enable_ltr(dev->bus->self); @@ -2295,11 +2172,7 @@ int pci_enable_ltr(struct pci_dev *dev) return ret; } - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl); - ctrl |= PCI_EXP_LTR_EN; - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); - - return 0; + return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN); } EXPORT_SYMBOL(pci_enable_ltr); @@ -2309,24 +2182,14 @@ EXPORT_SYMBOL(pci_enable_ltr); */ void pci_disable_ltr(struct pci_dev *dev) { - int pos; - u16 ctrl; - - if (!pci_ltr_supported(dev)) - return; - - /* LTR is a PCIe cap v2 feature */ - pos = pci_pcie_cap2(dev); - if (!pos) - return; - /* Only primary function can enable/disable LTR */ if (PCI_FUNC(dev->devfn) != 0) return; - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl); - ctrl &= ~PCI_EXP_LTR_EN; - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); + if (!pci_ltr_supported(dev)) + return; + + pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN); } EXPORT_SYMBOL(pci_disable_ltr); @@ -2409,9 +2272,6 @@ void pci_enable_acs(struct pci_dev *dev) if (!pci_acs_enable) return; - if (!pci_is_pcie(dev)) - return; - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); if (!pos) return; @@ -2459,8 +2319,8 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT); - if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM || - pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT || + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM || + pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || pdev->multifunction) { pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); if (!pos) @@ -3176,15 +3036,10 @@ EXPORT_SYMBOL(pci_set_dma_seg_boundary); static int pcie_flr(struct pci_dev *dev, int probe) { int i; - int pos; u32 cap; - u16 status, control; - - pos = pci_pcie_cap(dev); - if (!pos) - return -ENOTTY; + u16 status; - pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap); + pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); if (!(cap & PCI_EXP_DEVCAP_FLR)) return -ENOTTY; @@ -3196,7 +3051,7 @@ static int pcie_flr(struct pci_dev *dev, int probe) if (i) msleep((1 << (i - 1)) * 100); - pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); if (!(status & PCI_EXP_DEVSTA_TRPND)) goto clear; } @@ -3205,9 +3060,7 @@ static int pcie_flr(struct pci_dev *dev, int probe) "proceeding with reset anyway\n"); clear: - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control); - control |= PCI_EXP_DEVCTL_BCR_FLR; - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control); + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); msleep(100); @@ -3575,18 +3428,11 @@ EXPORT_SYMBOL(pcix_set_mmrbc); */ int pcie_get_readrq(struct pci_dev *dev) { - int ret, cap; u16 ctl; - cap = pci_pcie_cap(dev); - if (!cap) - return -EINVAL; - - ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); - if (!ret) - ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); + pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); - return ret; + return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); } EXPORT_SYMBOL(pcie_get_readrq); @@ -3600,19 +3446,11 @@ EXPORT_SYMBOL(pcie_get_readrq); */ int pcie_set_readrq(struct pci_dev *dev, int rq) { - int cap, err = -EINVAL; - u16 ctl, v; + u16 v; if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) - goto out; - - cap = pci_pcie_cap(dev); - if (!cap) - goto out; + return -EINVAL; - err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); - if (err) - goto out; /* * If using the "performance" PCIe config, we clamp the * read rq size to the max packet size to prevent the @@ -3630,14 +3468,8 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) v = (ffs(rq) - 8) << 12; - if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) { - ctl &= ~PCI_EXP_DEVCTL_READRQ; - ctl |= v; - err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl); - } - -out: - return err; + return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, v); } EXPORT_SYMBOL(pcie_set_readrq); @@ -3650,18 +3482,11 @@ EXPORT_SYMBOL(pcie_set_readrq); */ int pcie_get_mps(struct pci_dev *dev) { - int ret, cap; u16 ctl; - cap = pci_pcie_cap(dev); - if (!cap) - return -EINVAL; - - ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); - if (!ret) - ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); + pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); - return ret; + return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); } /** @@ -3674,32 +3499,18 @@ int pcie_get_mps(struct pci_dev *dev) */ int pcie_set_mps(struct pci_dev *dev, int mps) { - int cap, err = -EINVAL; - u16 ctl, v; + u16 v; if (mps < 128 || mps > 4096 || !is_power_of_2(mps)) - goto out; + return -EINVAL; v = ffs(mps) - 8; if (v > dev->pcie_mpss) - goto out; + return -EINVAL; v <<= 5; - cap = pci_pcie_cap(dev); - if (!cap) - goto out; - - err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); - if (err) - goto out; - - if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) { - ctl &= ~PCI_EXP_DEVCTL_PAYLOAD; - ctl |= v; - err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl); - } -out: - return err; + return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_PAYLOAD, v); } /** diff --git a/trunk/drivers/pci/pcie/aer/aer_inject.c b/trunk/drivers/pci/pcie/aer/aer_inject.c index 52229863e9fe..4e24cb8a94ae 100644 --- a/trunk/drivers/pci/pcie/aer/aer_inject.c +++ b/trunk/drivers/pci/pcie/aer/aer_inject.c @@ -288,7 +288,7 @@ static struct pci_dev *pcie_find_root_port(struct pci_dev *dev) while (1) { if (!pci_is_pcie(dev)) break; - if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) return dev; if (!dev->bus->self) break; diff --git a/trunk/drivers/pci/pcie/aer/aerdrv.c b/trunk/drivers/pci/pcie/aer/aerdrv.c index 58ad7917553c..c78778fc0cba 100644 --- a/trunk/drivers/pci/pcie/aer/aerdrv.c +++ b/trunk/drivers/pci/pcie/aer/aerdrv.c @@ -81,10 +81,11 @@ bool pci_aer_available(void) static int set_device_error_reporting(struct pci_dev *dev, void *data) { bool enable = *((bool *)data); + int type = pci_pcie_type(dev); - if ((dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) || - (dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) || - (dev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)) { + if ((type == PCI_EXP_TYPE_ROOT_PORT) || + (type == PCI_EXP_TYPE_UPSTREAM) || + (type == PCI_EXP_TYPE_DOWNSTREAM)) { if (enable) pci_enable_pcie_error_reporting(dev); else @@ -121,19 +122,17 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev, static void aer_enable_rootport(struct aer_rpc *rpc) { struct pci_dev *pdev = rpc->rpd->port; - int pos, aer_pos; + int aer_pos; u16 reg16; u32 reg32; - pos = pci_pcie_cap(pdev); /* Clear PCIe Capability's Device Status */ - pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, ®16); - pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16); + pcie_capability_read_word(pdev, PCI_EXP_DEVSTA, ®16); + pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, reg16); /* Disable system error generation in response to error messages */ - pci_read_config_word(pdev, pos + PCI_EXP_RTCTL, ®16); - reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK); - pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16); + pcie_capability_clear_word(pdev, PCI_EXP_RTCTL, + SYSTEM_ERROR_INTR_ON_MESG_MASK); aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); /* Clear error status */ @@ -395,9 +394,8 @@ static void aer_error_resume(struct pci_dev *dev) u16 reg16; /* Clean up Root device status */ - pos = pci_pcie_cap(dev); - pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, ®16); - pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16); + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, ®16); + pcie_capability_write_word(dev, PCI_EXP_DEVSTA, reg16); /* Clean AER Root Error Status */ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); diff --git a/trunk/drivers/pci/pcie/aer/aerdrv_acpi.c b/trunk/drivers/pci/pcie/aer/aerdrv_acpi.c index 124f20ff11b2..5194a7d41730 100644 --- a/trunk/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/trunk/drivers/pci/pcie/aer/aerdrv_acpi.c @@ -60,7 +60,7 @@ static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data) p = (struct acpi_hest_aer_common *)(hest_hdr + 1); if (p->flags & ACPI_HEST_GLOBAL) { if ((pci_is_pcie(info->pci_dev) && - info->pci_dev->pcie_type == pcie_type) || bridge) + pci_pcie_type(info->pci_dev) == pcie_type) || bridge) ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); } else if (hest_match_pci(p, info->pci_dev)) diff --git a/trunk/drivers/pci/pcie/aer/aerdrv_core.c b/trunk/drivers/pci/pcie/aer/aerdrv_core.c index 0ca053538146..cefc0ddcacf6 100644 --- a/trunk/drivers/pci/pcie/aer/aerdrv_core.c +++ b/trunk/drivers/pci/pcie/aer/aerdrv_core.c @@ -32,53 +32,28 @@ static bool nosourceid; module_param(forceload, bool, 0); module_param(nosourceid, bool, 0); +#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \ + PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE) + int pci_enable_pcie_error_reporting(struct pci_dev *dev) { - u16 reg16 = 0; - int pos; - if (pcie_aer_get_firmware_first(dev)) return -EIO; - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); - if (!pos) - return -EIO; - - pos = pci_pcie_cap(dev); - if (!pos) + if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) return -EIO; - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, ®16); - reg16 |= (PCI_EXP_DEVCTL_CERE | - PCI_EXP_DEVCTL_NFERE | - PCI_EXP_DEVCTL_FERE | - PCI_EXP_DEVCTL_URRE); - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16); - - return 0; + return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); } EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); int pci_disable_pcie_error_reporting(struct pci_dev *dev) { - u16 reg16 = 0; - int pos; - if (pcie_aer_get_firmware_first(dev)) return -EIO; - pos = pci_pcie_cap(dev); - if (!pos) - return -EIO; - - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, ®16); - reg16 &= ~(PCI_EXP_DEVCTL_CERE | - PCI_EXP_DEVCTL_NFERE | - PCI_EXP_DEVCTL_FERE | - PCI_EXP_DEVCTL_URRE); - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16); - - return 0; + return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, + PCI_EXP_AER_FLAGS); } EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); @@ -151,18 +126,12 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info) */ if (atomic_read(&dev->enable_cnt) == 0) return false; - pos = pci_pcie_cap(dev); - if (!pos) - return false; /* Check if AER is enabled */ - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, ®16); - if (!(reg16 & ( - PCI_EXP_DEVCTL_CERE | - PCI_EXP_DEVCTL_NFERE | - PCI_EXP_DEVCTL_FERE | - PCI_EXP_DEVCTL_URRE))) + pcie_capability_read_word(dev, PCI_EXP_DEVCTL, ®16); + if (!(reg16 & PCI_EXP_AER_FLAGS)) return false; + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (!pos) return false; @@ -465,7 +434,7 @@ static pci_ers_result_t reset_link(struct pci_dev *dev) if (driver && driver->reset_link) { status = driver->reset_link(udev); - } else if (udev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) { + } else if (pci_pcie_type(udev) == PCI_EXP_TYPE_DOWNSTREAM) { status = default_downstream_reset_link(udev); } else { dev_printk(KERN_DEBUG, &dev->dev, diff --git a/trunk/drivers/pci/pcie/aspm.c b/trunk/drivers/pci/pcie/aspm.c index b500840a143b..213753b283a6 100644 --- a/trunk/drivers/pci/pcie/aspm.c +++ b/trunk/drivers/pci/pcie/aspm.c @@ -125,21 +125,16 @@ static int policy_to_clkpm_state(struct pcie_link_state *link) static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable) { - int pos; - u16 reg16; struct pci_dev *child; struct pci_bus *linkbus = link->pdev->subordinate; list_for_each_entry(child, &linkbus->devices, bus_list) { - pos = pci_pcie_cap(child); - if (!pos) - return; - pci_read_config_word(child, pos + PCI_EXP_LNKCTL, ®16); if (enable) - reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN; + pcie_capability_set_word(child, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); else - reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN; - pci_write_config_word(child, pos + PCI_EXP_LNKCTL, reg16); + pcie_capability_clear_word(child, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); } link->clkpm_enabled = !!enable; } @@ -157,7 +152,7 @@ static void pcie_set_clkpm(struct pcie_link_state *link, int enable) static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) { - int pos, capable = 1, enabled = 1; + int capable = 1, enabled = 1; u32 reg32; u16 reg16; struct pci_dev *child; @@ -165,16 +160,13 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) /* All functions should have the same cap and state, take the worst */ list_for_each_entry(child, &linkbus->devices, bus_list) { - pos = pci_pcie_cap(child); - if (!pos) - return; - pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, ®32); + pcie_capability_read_dword(child, PCI_EXP_LNKCAP, ®32); if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) { capable = 0; enabled = 0; break; } - pci_read_config_word(child, pos + PCI_EXP_LNKCTL, ®16); + pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16); if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN)) enabled = 0; } @@ -190,7 +182,7 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) */ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) { - int ppos, cpos, same_clock = 1; + int same_clock = 1; u16 reg16, parent_reg, child_reg[8]; unsigned long start_jiffies; struct pci_dev *child, *parent = link->pdev; @@ -203,46 +195,43 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) BUG_ON(!pci_is_pcie(child)); /* Check downstream component if bit Slot Clock Configuration is 1 */ - cpos = pci_pcie_cap(child); - pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, ®16); + pcie_capability_read_word(child, PCI_EXP_LNKSTA, ®16); if (!(reg16 & PCI_EXP_LNKSTA_SLC)) same_clock = 0; /* Check upstream component if bit Slot Clock Configuration is 1 */ - ppos = pci_pcie_cap(parent); - pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, ®16); + pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); if (!(reg16 & PCI_EXP_LNKSTA_SLC)) same_clock = 0; /* Configure downstream component, all functions */ list_for_each_entry(child, &linkbus->devices, bus_list) { - cpos = pci_pcie_cap(child); - pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, ®16); + pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16); child_reg[PCI_FUNC(child->devfn)] = reg16; if (same_clock) reg16 |= PCI_EXP_LNKCTL_CCC; else reg16 &= ~PCI_EXP_LNKCTL_CCC; - pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, reg16); + pcie_capability_write_word(child, PCI_EXP_LNKCTL, reg16); } /* Configure upstream component */ - pci_read_config_word(parent, ppos + PCI_EXP_LNKCTL, ®16); + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); parent_reg = reg16; if (same_clock) reg16 |= PCI_EXP_LNKCTL_CCC; else reg16 &= ~PCI_EXP_LNKCTL_CCC; - pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16); + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); /* Retrain link */ reg16 |= PCI_EXP_LNKCTL_RL; - pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16); + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); /* Wait for link training end. Break out after waiting for timeout */ start_jiffies = jiffies; for (;;) { - pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, ®16); + pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); if (!(reg16 & PCI_EXP_LNKSTA_LT)) break; if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) @@ -255,12 +244,10 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) /* Training failed. Restore common clock configurations */ dev_printk(KERN_ERR, &parent->dev, "ASPM: Could not configure common clock\n"); - list_for_each_entry(child, &linkbus->devices, bus_list) { - cpos = pci_pcie_cap(child); - pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, - child_reg[PCI_FUNC(child->devfn)]); - } - pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, parent_reg); + list_for_each_entry(child, &linkbus->devices, bus_list) + pcie_capability_write_word(child, PCI_EXP_LNKCTL, + child_reg[PCI_FUNC(child->devfn)]); + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg); } /* Convert L0s latency encoding to ns */ @@ -305,16 +292,14 @@ struct aspm_register_info { static void pcie_get_aspm_reg(struct pci_dev *pdev, struct aspm_register_info *info) { - int pos; u16 reg16; u32 reg32; - pos = pci_pcie_cap(pdev); - pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, ®32); + pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, ®32); info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; - pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); + pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, ®16); info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC; } @@ -412,7 +397,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) * do ASPM for now. */ list_for_each_entry(child, &linkbus->devices, bus_list) { - if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { + if (pci_pcie_type(child) == PCI_EXP_TYPE_PCI_BRIDGE) { link->aspm_disable = ASPM_STATE_ALL; break; } @@ -420,17 +405,15 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) /* Get and check endpoint acceptable latencies */ list_for_each_entry(child, &linkbus->devices, bus_list) { - int pos; u32 reg32, encoding; struct aspm_latency *acceptable = &link->acceptable[PCI_FUNC(child->devfn)]; - if (child->pcie_type != PCI_EXP_TYPE_ENDPOINT && - child->pcie_type != PCI_EXP_TYPE_LEG_END) + if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT && + pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END) continue; - pos = pci_pcie_cap(child); - pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, ®32); + pcie_capability_read_dword(child, PCI_EXP_DEVCAP, ®32); /* Calculate endpoint L0s acceptable latency */ encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; acceptable->l0s = calc_l0s_acceptable(encoding); @@ -444,13 +427,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) { - u16 reg16; - int pos = pci_pcie_cap(pdev); - - pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); - reg16 &= ~0x3; - reg16 |= val; - pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); + pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL, 0x3, val); } static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state) @@ -505,7 +482,6 @@ static void free_link_state(struct pcie_link_state *link) static int pcie_aspm_sanity_check(struct pci_dev *pdev) { struct pci_dev *child; - int pos; u32 reg32; /* @@ -513,8 +489,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) * very strange. Disable ASPM for the whole slot */ list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { - pos = pci_pcie_cap(child); - if (!pos) + if (!pci_is_pcie(child)) return -EINVAL; /* @@ -530,7 +505,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) * Disable ASPM for pre-1.1 PCIe device, we follow MS to use * RBER bit to determine if a function is 1.1 version device */ - pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, ®32); + pcie_capability_read_dword(child, PCI_EXP_DEVCAP, ®32); if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { dev_printk(KERN_INFO, &child->dev, "disabling ASPM" " on pre-1.1 PCIe device. You can enable it" @@ -552,7 +527,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) INIT_LIST_HEAD(&link->children); INIT_LIST_HEAD(&link->link); link->pdev = pdev; - if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) { + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM) { struct pcie_link_state *parent; parent = pdev->bus->parent->self->link_state; if (!parent) { @@ -585,12 +560,12 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) if (!pci_is_pcie(pdev) || pdev->link_state) return; - if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && - pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) + if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT && + pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) return; /* VIA has a strange chipset, root port is under a bridge */ - if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT && + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT && pdev->bus->self) return; @@ -647,8 +622,8 @@ static void pcie_update_aspm_capable(struct pcie_link_state *root) if (link->root != root) continue; list_for_each_entry(child, &linkbus->devices, bus_list) { - if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT) && - (child->pcie_type != PCI_EXP_TYPE_LEG_END)) + if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) && + (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)) continue; pcie_aspm_check_latency(child); } @@ -663,8 +638,8 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) if (!pci_is_pcie(pdev) || !parent || !parent->link_state) return; - if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && - (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) + if ((pci_pcie_type(parent) != PCI_EXP_TYPE_ROOT_PORT) && + (pci_pcie_type(parent) != PCI_EXP_TYPE_DOWNSTREAM)) return; down_read(&pci_bus_sem); @@ -704,8 +679,8 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev) if (aspm_disabled || !pci_is_pcie(pdev) || !link) return; - if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && - (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) + if ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) && + (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)) return; /* * Devices changed PM state, we should recheck if latency @@ -729,8 +704,8 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev) if (aspm_policy != POLICY_POWERSAVE) return; - if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && - (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) + if ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) && + (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)) return; down_read(&pci_bus_sem); @@ -757,8 +732,8 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem, if (!pci_is_pcie(pdev)) return; - if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT || - pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM) parent = pdev; if (!parent || !parent->link_state) return; @@ -933,8 +908,8 @@ void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) struct pcie_link_state *link_state = pdev->link_state; if (!pci_is_pcie(pdev) || - (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && - pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) + (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT && + pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) return; if (link_state->aspm_support) @@ -950,8 +925,8 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) struct pcie_link_state *link_state = pdev->link_state; if (!pci_is_pcie(pdev) || - (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && - pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) + (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT && + pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) return; if (link_state->aspm_support) diff --git a/trunk/drivers/pci/pcie/pme.c b/trunk/drivers/pci/pcie/pme.c index 001f1b78f39c..9ca0dc9ffd84 100644 --- a/trunk/drivers/pci/pcie/pme.c +++ b/trunk/drivers/pci/pcie/pme.c @@ -57,17 +57,12 @@ struct pcie_pme_service_data { */ void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable) { - int rtctl_pos; - u16 rtctl; - - rtctl_pos = pci_pcie_cap(dev) + PCI_EXP_RTCTL; - - pci_read_config_word(dev, rtctl_pos, &rtctl); if (enable) - rtctl |= PCI_EXP_RTCTL_PMEIE; + pcie_capability_set_word(dev, PCI_EXP_RTCTL, + PCI_EXP_RTCTL_PMEIE); else - rtctl &= ~PCI_EXP_RTCTL_PMEIE; - pci_write_config_word(dev, rtctl_pos, rtctl); + pcie_capability_clear_word(dev, PCI_EXP_RTCTL, + PCI_EXP_RTCTL_PMEIE); } /** @@ -120,7 +115,7 @@ static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn) if (!dev) return false; - if (pci_is_pcie(dev) && dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { + if (pci_is_pcie(dev) && pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) { down_read(&pci_bus_sem); if (pcie_pme_walk_bus(bus)) found = true; @@ -226,18 +221,15 @@ static void pcie_pme_work_fn(struct work_struct *work) struct pcie_pme_service_data *data = container_of(work, struct pcie_pme_service_data, work); struct pci_dev *port = data->srv->port; - int rtsta_pos; u32 rtsta; - rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA; - spin_lock_irq(&data->lock); for (;;) { if (data->noirq) break; - pci_read_config_dword(port, rtsta_pos, &rtsta); + pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); if (rtsta & PCI_EXP_RTSTA_PME) { /* * Clear PME status of the port. If there are other @@ -276,17 +268,14 @@ static irqreturn_t pcie_pme_irq(int irq, void *context) { struct pci_dev *port; struct pcie_pme_service_data *data; - int rtsta_pos; u32 rtsta; unsigned long flags; port = ((struct pcie_device *)context)->port; data = get_service_data((struct pcie_device *)context); - rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA; - spin_lock_irqsave(&data->lock, flags); - pci_read_config_dword(port, rtsta_pos, &rtsta); + pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); if (!(rtsta & PCI_EXP_RTSTA_PME)) { spin_unlock_irqrestore(&data->lock, flags); @@ -335,13 +324,13 @@ static void pcie_pme_mark_devices(struct pci_dev *port) struct pci_dev *dev; /* Check if this is a root port event collector. */ - if (port->pcie_type != PCI_EXP_TYPE_RC_EC || !bus) + if (pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC || !bus) return; down_read(&pci_bus_sem); list_for_each_entry(dev, &bus->devices, bus_list) if (pci_is_pcie(dev) - && dev->pcie_type == PCI_EXP_TYPE_RC_END) + && pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) pcie_pme_set_native(dev, NULL); up_read(&pci_bus_sem); } diff --git a/trunk/drivers/pci/pcie/portdrv_bus.c b/trunk/drivers/pci/pcie/portdrv_bus.c index 18bf90f748f6..67be55a7f260 100644 --- a/trunk/drivers/pci/pcie/portdrv_bus.c +++ b/trunk/drivers/pci/pcie/portdrv_bus.c @@ -38,7 +38,7 @@ static int pcie_port_bus_match(struct device *dev, struct device_driver *drv) return 0; if ((driver->port_type != PCIE_ANY_PORT) && - (driver->port_type != pciedev->port->pcie_type)) + (driver->port_type != pci_pcie_type(pciedev->port))) return 0; return 1; diff --git a/trunk/drivers/pci/pcie/portdrv_core.c b/trunk/drivers/pci/pcie/portdrv_core.c index 75915b30ad19..aede99171e90 100644 --- a/trunk/drivers/pci/pcie/portdrv_core.c +++ b/trunk/drivers/pci/pcie/portdrv_core.c @@ -246,8 +246,7 @@ static void cleanup_service_irqs(struct pci_dev *dev) */ static int get_port_device_capability(struct pci_dev *dev) { - int services = 0, pos; - u16 reg16; + int services = 0; u32 reg32; int cap_mask = 0; int err; @@ -265,11 +264,9 @@ static int get_port_device_capability(struct pci_dev *dev) return 0; } - pos = pci_pcie_cap(dev); - pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); /* Hot-Plug Capable */ - if ((cap_mask & PCIE_PORT_SERVICE_HP) && (reg16 & PCI_EXP_FLAGS_SLOT)) { - pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, ®32); + if (cap_mask & PCIE_PORT_SERVICE_HP) { + pcie_capability_read_dword(dev, PCI_EXP_SLTCAP, ®32); if (reg32 & PCI_EXP_SLTCAP_HPC) { services |= PCIE_PORT_SERVICE_HP; /* @@ -277,10 +274,8 @@ static int get_port_device_capability(struct pci_dev *dev) * enabled by the BIOS and the hot-plug service driver * is not loaded. */ - pos += PCI_EXP_SLTCTL; - pci_read_config_word(dev, pos, ®16); - reg16 &= ~(PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE); - pci_write_config_word(dev, pos, reg16); + pcie_capability_clear_word(dev, PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE); } } /* AER capable */ @@ -298,7 +293,7 @@ static int get_port_device_capability(struct pci_dev *dev) services |= PCIE_PORT_SERVICE_VC; /* Root ports are capable of generating PME too */ if ((cap_mask & PCIE_PORT_SERVICE_PME) - && dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) { + && pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) { services |= PCIE_PORT_SERVICE_PME; /* * Disable PME interrupt on this port in case it's been enabled @@ -336,7 +331,7 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq) device->release = release_pcie_device; /* callback to free pcie dev */ dev_set_name(device, "%s:pcie%02x", pci_name(pdev), - get_descriptor_id(pdev->pcie_type, service)); + get_descriptor_id(pci_pcie_type(pdev), service)); device->parent = &pdev->dev; device_enable_async_suspend(device); diff --git a/trunk/drivers/pci/pcie/portdrv_pci.c b/trunk/drivers/pci/pcie/portdrv_pci.c index 3a7eefcb270a..b0340bc3aae4 100644 --- a/trunk/drivers/pci/pcie/portdrv_pci.c +++ b/trunk/drivers/pci/pcie/portdrv_pci.c @@ -64,14 +64,7 @@ __setup("pcie_ports=", pcie_port_setup); */ void pcie_clear_root_pme_status(struct pci_dev *dev) { - int rtsta_pos; - u32 rtsta; - - rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA; - - pci_read_config_dword(dev, rtsta_pos, &rtsta); - rtsta |= PCI_EXP_RTSTA_PME; - pci_write_config_dword(dev, rtsta_pos, rtsta); + pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME); } static int pcie_portdrv_restore_config(struct pci_dev *dev) @@ -95,7 +88,7 @@ static int pcie_port_resume_noirq(struct device *dev) * which breaks ACPI-based runtime wakeup on PCI Express, so clear those * bits now just in case (shouldn't hurt). */ - if(pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) pcie_clear_root_pme_status(pdev); return 0; } @@ -140,9 +133,17 @@ static int pcie_port_runtime_resume(struct device *dev) { return 0; } + +static int pcie_port_runtime_idle(struct device *dev) +{ + /* Delay for a short while to prevent too frequent suspend/resume */ + pm_schedule_suspend(dev, 10); + return -EBUSY; +} #else #define pcie_port_runtime_suspend NULL #define pcie_port_runtime_resume NULL +#define pcie_port_runtime_idle NULL #endif static const struct dev_pm_ops pcie_portdrv_pm_ops = { @@ -155,6 +156,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { .resume_noirq = pcie_port_resume_noirq, .runtime_suspend = pcie_port_runtime_suspend, .runtime_resume = pcie_port_runtime_resume, + .runtime_idle = pcie_port_runtime_idle, }; #define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops) @@ -186,9 +188,9 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev, int status; if (!pci_is_pcie(dev) || - ((dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && - (dev->pcie_type != PCI_EXP_TYPE_UPSTREAM) && - (dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))) + ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) && + (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM) && + (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM))) return -ENODEV; if (!dev->irq && dev->pin) { @@ -200,6 +202,11 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev, return status; pci_save_state(dev); + /* + * D3cold may not work properly on some PCIe port, so disable + * it by default. + */ + dev->d3cold_allowed = false; if (!pci_match_id(port_runtime_pm_black_list, dev)) pm_runtime_put_noidle(&dev->dev); diff --git a/trunk/drivers/pci/probe.c b/trunk/drivers/pci/probe.c index 6c143b4497ca..3cdba8b3f816 100644 --- a/trunk/drivers/pci/probe.c +++ b/trunk/drivers/pci/probe.c @@ -144,15 +144,13 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) case PCI_BASE_ADDRESS_MEM_TYPE_32: break; case PCI_BASE_ADDRESS_MEM_TYPE_1M: - dev_info(&dev->dev, "1M mem BAR treated as 32-bit BAR\n"); + /* 1M mem BAR treated as 32-bit BAR */ break; case PCI_BASE_ADDRESS_MEM_TYPE_64: flags |= IORESOURCE_MEM_64; break; default: - dev_warn(&dev->dev, - "mem unknown type %x treated as 32-bit BAR\n", - mem_type); + /* mem unknown type treated as 32-bit BAR */ break; } return flags; @@ -173,9 +171,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, u32 l, sz, mask; u16 orig_cmd; struct pci_bus_region region; + bool bar_too_big = false, bar_disabled = false; mask = type ? PCI_ROM_ADDRESS_MASK : ~0; + /* No printks while decoding is disabled! */ if (!dev->mmio_always_on) { pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); pci_write_config_word(dev, PCI_COMMAND, @@ -240,8 +240,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, goto fail; if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { - dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", - pos); + bar_too_big = true; goto fail; } @@ -252,12 +251,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, region.start = 0; region.end = sz64; pcibios_bus_to_resource(dev, res, ®ion); + bar_disabled = true; } else { region.start = l64; region.end = l64 + sz64; pcibios_bus_to_resource(dev, res, ®ion); - dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", - pos, res); } } else { sz = pci_size(l, sz, mask); @@ -268,18 +266,23 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, region.start = l; region.end = l + sz; pcibios_bus_to_resource(dev, res, ®ion); - - dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); } - out: + goto out; + + +fail: + res->flags = 0; +out: if (!dev->mmio_always_on) pci_write_config_word(dev, PCI_COMMAND, orig_cmd); + if (bar_too_big) + dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos); + if (res->flags && !bar_disabled) + dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); + return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; - fail: - res->flags = 0; - goto out; } static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) @@ -603,10 +606,10 @@ static void pci_set_bus_speed(struct pci_bus *bus) u32 linkcap; u16 linksta; - pci_read_config_dword(bridge, pos + PCI_EXP_LNKCAP, &linkcap); + pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap); bus->max_bus_speed = pcie_link_speed[linkcap & 0xf]; - pci_read_config_word(bridge, pos + PCI_EXP_LNKSTA, &linksta); + pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta); pcie_update_link_speed(bus, linksta); } } @@ -929,24 +932,16 @@ void set_pcie_port_type(struct pci_dev *pdev) pdev->is_pcie = 1; pdev->pcie_cap = pos; pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); - pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; + pdev->pcie_flags_reg = reg16; pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; } void set_pcie_hotplug_bridge(struct pci_dev *pdev) { - int pos; - u16 reg16; u32 reg32; - pos = pci_pcie_cap(pdev); - if (!pos) - return; - pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); - if (!(reg16 & PCI_EXP_FLAGS_SLOT)) - return; - pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, ®32); + pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, ®32); if (reg32 & PCI_EXP_SLTCAP_HPC) pdev->is_hotplug_bridge = 1; } @@ -1160,8 +1155,7 @@ int pci_cfg_space_size(struct pci_dev *dev) if (class == PCI_CLASS_BRIDGE_HOST) return pci_cfg_space_size_ext(dev); - pos = pci_pcie_cap(dev); - if (!pos) { + if (!pci_is_pcie(dev)) { pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); if (!pos) goto fail; @@ -1383,9 +1377,9 @@ static int only_one_child(struct pci_bus *bus) if (!parent || !pci_is_pcie(parent)) return 0; - if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT) + if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT) return 1; - if (parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM && + if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM && !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS)) return 1; return 0; @@ -1462,7 +1456,7 @@ static int pcie_find_smpss(struct pci_dev *dev, void *data) */ if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || (dev->bus->self && - dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT))) + pci_pcie_type(dev->bus->self) != PCI_EXP_TYPE_ROOT_PORT))) *smpss = 0; if (*smpss > dev->pcie_mpss) @@ -1478,7 +1472,8 @@ static void pcie_write_mps(struct pci_dev *dev, int mps) if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { mps = 128 << dev->pcie_mpss; - if (dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && dev->bus->self) + if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT && + dev->bus->self) /* For "Performance", the assumption is made that * downstream communication will never be larger than * the MRRS. So, the MPS only needs to be configured diff --git a/trunk/drivers/pci/quirks.c b/trunk/drivers/pci/quirks.c index 51553179e967..7a451ff56ecc 100644 --- a/trunk/drivers/pci/quirks.c +++ b/trunk/drivers/pci/quirks.c @@ -3081,17 +3081,36 @@ static int reset_intel_generic_dev(struct pci_dev *dev, int probe) static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe) { - int pos; + int i; + u16 status; - pos = pci_find_capability(dev, PCI_CAP_ID_EXP); - if (!pos) - return -ENOTTY; + /* + * http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf + * + * The 82599 supports FLR on VFs, but FLR support is reported only + * in the PF DEVCAP (sec 9.3.10.4), not in the VF DEVCAP (sec 9.5). + * Therefore, we can't use pcie_flr(), which checks the VF DEVCAP. + */ if (probe) return 0; - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, - PCI_EXP_DEVCTL_BCR_FLR); + /* Wait for Transaction Pending bit clean */ + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + goto clear; + } + + dev_err(&dev->dev, "transaction is not cleared; " + "proceeding with reset anyway\n"); + +clear: + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); + msleep(100); return 0; diff --git a/trunk/drivers/pci/search.c b/trunk/drivers/pci/search.c index 993d4a0a2469..621b162ceb69 100644 --- a/trunk/drivers/pci/search.c +++ b/trunk/drivers/pci/search.c @@ -41,7 +41,7 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev) continue; } /* PCI device should connect to a PCIe bridge */ - if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) { + if (pci_pcie_type(pdev) != PCI_EXP_TYPE_PCI_BRIDGE) { /* Busted hardware? */ WARN_ON_ONCE(1); return NULL; diff --git a/trunk/drivers/pci/setup-bus.c b/trunk/drivers/pci/setup-bus.c index fb506137aaee..1e808ca338f8 100644 --- a/trunk/drivers/pci/setup-bus.c +++ b/trunk/drivers/pci/setup-bus.c @@ -697,6 +697,38 @@ static resource_size_t calculate_memsize(resource_size_t size, return size; } +resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus, + unsigned long type) +{ + return 1; +} + +#define PCI_P2P_DEFAULT_MEM_ALIGN 0x100000 /* 1MiB */ +#define PCI_P2P_DEFAULT_IO_ALIGN 0x1000 /* 4KiB */ +#define PCI_P2P_DEFAULT_IO_ALIGN_1K 0x400 /* 1KiB */ + +static resource_size_t window_alignment(struct pci_bus *bus, + unsigned long type) +{ + resource_size_t align = 1, arch_align; + + if (type & IORESOURCE_MEM) + align = PCI_P2P_DEFAULT_MEM_ALIGN; + else if (type & IORESOURCE_IO) { + /* + * Per spec, I/O windows are 4K-aligned, but some + * bridges have an extension to support 1K alignment. + */ + if (bus->self->io_window_1k) + align = PCI_P2P_DEFAULT_IO_ALIGN_1K; + else + align = PCI_P2P_DEFAULT_IO_ALIGN; + } + + arch_align = pcibios_window_alignment(bus, type); + return max(align, arch_align); +} + /** * pbus_size_io() - size the io window of a given bus * @@ -717,17 +749,12 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); unsigned long size = 0, size0 = 0, size1 = 0; resource_size_t children_add_size = 0; - resource_size_t min_align = 4096, align; + resource_size_t min_align, io_align, align; if (!b_res) return; - /* - * Per spec, I/O windows are 4K-aligned, but some bridges have an - * extension to support 1K alignment. - */ - if (bus->self->io_window_1k) - min_align = 1024; + io_align = min_align = window_alignment(bus, IORESOURCE_IO); list_for_each_entry(dev, &bus->devices, bus_list) { int i; @@ -754,8 +781,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, } } - if (min_align > 4096) - min_align = 4096; + if (min_align > io_align) + min_align = io_align; size0 = calculate_iosize(size, min_size, size1, resource_size(b_res), min_align); @@ -785,6 +812,28 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, } } +static inline resource_size_t calculate_mem_align(resource_size_t *aligns, + int max_order) +{ + resource_size_t align = 0; + resource_size_t min_align = 0; + int order; + + for (order = 0; order <= max_order; order++) { + resource_size_t align1 = 1; + + align1 <<= (order + 20); + + if (!align) + min_align = align1; + else if (ALIGN(align + min_align, min_align) < align1) + min_align = align1 >> 1; + align += aligns[order]; + } + + return min_align; +} + /** * pbus_size_mem() - size the memory window of a given bus * @@ -864,19 +913,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, children_add_size += get_res_add_size(realloc_head, r); } } - align = 0; - min_align = 0; - for (order = 0; order <= max_order; order++) { - resource_size_t align1 = 1; - align1 <<= (order + 20); - - if (!align) - min_align = align1; - else if (ALIGN(align + min_align, min_align) < align1) - min_align = align1 >> 1; - align += aligns[order]; - } + min_align = calculate_mem_align(aligns, max_order); + min_align = max(min_align, window_alignment(bus, b_res->flags & mask)); size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); if (children_add_size > add_size) add_size = children_add_size; diff --git a/trunk/drivers/rapidio/devices/tsi721.c b/trunk/drivers/rapidio/devices/tsi721.c index 5d44252b7342..d5e1625bbac2 100644 --- a/trunk/drivers/rapidio/devices/tsi721.c +++ b/trunk/drivers/rapidio/devices/tsi721.c @@ -2219,9 +2219,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct tsi721_device *priv; - int cap; int err; - u32 regval; priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL); if (priv == NULL) { @@ -2330,20 +2328,16 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); } - cap = pci_pcie_cap(pdev); - BUG_ON(cap == 0); + BUG_ON(!pci_is_pcie(pdev)); /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */ - pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, ®val); - regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | - PCI_EXP_DEVCTL_NOSNOOP_EN); - regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT; - pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval); + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | + PCI_EXP_DEVCTL_NOSNOOP_EN, + 0x2 << MAX_READ_REQUEST_SZ_SHIFT); /* Adjust PCIe completion timeout. */ - pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, ®val); - regval &= ~(0x0f); - pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2); + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2); /* * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block diff --git a/trunk/drivers/rtc/rtc-at91sam9.c b/trunk/drivers/rtc/rtc-at91sam9.c index 831868904e02..1dd61f402b04 100644 --- a/trunk/drivers/rtc/rtc-at91sam9.c +++ b/trunk/drivers/rtc/rtc-at91sam9.c @@ -58,6 +58,7 @@ struct sam9_rtc { struct rtc_device *rtcdev; u32 imr; void __iomem *gpbr; + int irq; }; #define rtt_readl(rtc, field) \ @@ -292,7 +293,7 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev) { struct resource *r, *r_gpbr; struct sam9_rtc *rtc; - int ret; + int ret, irq; u32 mr; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -302,10 +303,18 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev) return -ENODEV; } + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "failed to get interrupt resource\n"); + return irq; + } + rtc = kzalloc(sizeof *rtc, GFP_KERNEL); if (!rtc) return -ENOMEM; + rtc->irq = irq; + /* platform setup code should have handled this; sigh */ if (!device_can_wakeup(&pdev->dev)) device_init_wakeup(&pdev->dev, 1); @@ -345,11 +354,10 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev) } /* register irq handler after we know what name we'll use */ - ret = request_irq(AT91_ID_SYS, at91_rtc_interrupt, - IRQF_SHARED, + ret = request_irq(rtc->irq, at91_rtc_interrupt, IRQF_SHARED, dev_name(&rtc->rtcdev->dev), rtc); if (ret) { - dev_dbg(&pdev->dev, "can't share IRQ %d?\n", AT91_ID_SYS); + dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq); rtc_device_unregister(rtc->rtcdev); goto fail_register; } @@ -386,7 +394,7 @@ static int __devexit at91_rtc_remove(struct platform_device *pdev) /* disable all interrupts */ rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)); - free_irq(AT91_ID_SYS, rtc); + free_irq(rtc->irq, rtc); rtc_device_unregister(rtc->rtcdev); @@ -423,7 +431,7 @@ static int at91_rtc_suspend(struct platform_device *pdev, rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN); if (rtc->imr) { if (device_may_wakeup(&pdev->dev) && (mr & AT91_RTT_ALMIEN)) { - enable_irq_wake(AT91_ID_SYS); + enable_irq_wake(rtc->irq); /* don't let RTTINC cause wakeups */ if (mr & AT91_RTT_RTTINCIEN) rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); @@ -441,7 +449,7 @@ static int at91_rtc_resume(struct platform_device *pdev) if (rtc->imr) { if (device_may_wakeup(&pdev->dev)) - disable_irq_wake(AT91_ID_SYS); + disable_irq_wake(rtc->irq); mr = rtt_readl(rtc, MR); rtt_writel(rtc, MR, mr | rtc->imr); } diff --git a/trunk/drivers/scsi/ipr.c b/trunk/drivers/scsi/ipr.c index 6077c43edacc..467dc38246f9 100644 --- a/trunk/drivers/scsi/ipr.c +++ b/trunk/drivers/scsi/ipr.c @@ -6304,14 +6304,14 @@ static struct ata_port_info sata_port_info = { #ifdef CONFIG_PPC_PSERIES static const u16 ipr_blocked_processors[] = { - PVR_NORTHSTAR, - PVR_PULSAR, - PVR_POWER4, - PVR_ICESTAR, - PVR_SSTAR, - PVR_POWER4p, - PVR_630, - PVR_630p + PV_NORTHSTAR, + PV_PULSAR, + PV_POWER4, + PV_ICESTAR, + PV_SSTAR, + PV_POWER4p, + PV_630, + PV_630p }; /** @@ -6331,7 +6331,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){ - if (pvr_version_is(ipr_blocked_processors[i])) + if (__is_processor(ipr_blocked_processors[i])) return 1; } } diff --git a/trunk/drivers/scsi/qla2xxx/qla_nx.c b/trunk/drivers/scsi/qla2xxx/qla_nx.c index 9ce3a8f8754f..7cfdf2bd8edb 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_nx.c +++ b/trunk/drivers/scsi/qla2xxx/qla_nx.c @@ -1615,13 +1615,11 @@ qla82xx_get_fw_offs(struct qla_hw_data *ha) char * qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str) { - int pcie_reg; struct qla_hw_data *ha = vha->hw; char lwstr[6]; uint16_t lnk; - pcie_reg = pci_pcie_cap(ha->pdev); - pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk); + pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk); ha->link_width = (lnk >> 4) & 0x3f; strcpy(str, "PCIe ("); @@ -2497,7 +2495,6 @@ qla82xx_load_fw(scsi_qla_host_t *vha) int qla82xx_start_firmware(scsi_qla_host_t *vha) { - int pcie_cap; uint16_t lnk; struct qla_hw_data *ha = vha->hw; @@ -2528,8 +2525,7 @@ qla82xx_start_firmware(scsi_qla_host_t *vha) } /* Negotiated Link width */ - pcie_cap = pci_pcie_cap(ha->pdev); - pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk); + pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk); ha->link_width = (lnk >> 4) & 0x3f; /* Synchronize with Receive peg */ diff --git a/trunk/drivers/scsi/qla4xxx/ql4_nx.c b/trunk/drivers/scsi/qla4xxx/ql4_nx.c index 939d7261c37a..807bf76f1b6a 100644 --- a/trunk/drivers/scsi/qla4xxx/ql4_nx.c +++ b/trunk/drivers/scsi/qla4xxx/ql4_nx.c @@ -1566,7 +1566,6 @@ qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha) static int qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start) { - int pcie_cap; uint16_t lnk; /* scrub dma mask expansion register */ @@ -1590,8 +1589,7 @@ qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start) } /* Negotiated Link width */ - pcie_cap = pci_pcie_cap(ha->pdev); - pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk); + pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk); ha->link_width = (lnk >> 4) & 0x3f; /* Synchronize with Receive peg */ diff --git a/trunk/drivers/staging/et131x/et131x.c b/trunk/drivers/staging/et131x/et131x.c index 029725c89e58..49553f88c7b3 100644 --- a/trunk/drivers/staging/et131x/et131x.c +++ b/trunk/drivers/staging/et131x/et131x.c @@ -3995,16 +3995,14 @@ static void et131x_hwaddr_init(struct et131x_adapter *adapter) static int et131x_pci_init(struct et131x_adapter *adapter, struct pci_dev *pdev) { - int cap = pci_pcie_cap(pdev); u16 max_payload; - u16 ctl; int i, rc; rc = et131x_init_eeprom(adapter); if (rc < 0) goto out; - if (!cap) { + if (!pci_is_pcie(pdev)) { dev_err(&pdev->dev, "Missing PCIe capabilities\n"); goto err_out; } @@ -4012,7 +4010,7 @@ static int et131x_pci_init(struct et131x_adapter *adapter, /* Let's set up the PORT LOGIC Register. First we need to know what * the max_payload_size is */ - if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCAP, &max_payload)) { + if (pcie_capability_read_word(pdev, PCI_EXP_DEVCAP, &max_payload)) { dev_err(&pdev->dev, "Could not read PCI config space for Max Payload Size\n"); goto err_out; @@ -4049,17 +4047,10 @@ static int et131x_pci_init(struct et131x_adapter *adapter, } /* Change the max read size to 2k */ - if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl)) { + if (pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, 0x4 << 12)) { dev_err(&pdev->dev, - "Could not read PCI config space for Max read size\n"); - goto err_out; - } - - ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | (0x04 << 12); - - if (pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl)) { - dev_err(&pdev->dev, - "Could not write PCI config space for Max read size\n"); + "Couldn't change PCI config space for Max read size\n"); goto err_out; } diff --git a/trunk/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c b/trunk/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c index ddadcc3e4e7c..5abbee37cdca 100644 --- a/trunk/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c +++ b/trunk/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c @@ -31,12 +31,10 @@ static void rtl8192_parse_pci_configuration(struct pci_dev *pdev, struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); u8 tmp; - int pos; - u8 LinkCtrlReg; + u16 LinkCtrlReg; - pos = pci_find_capability(priv->pdev, PCI_CAP_ID_EXP); - pci_read_config_byte(priv->pdev, pos + PCI_EXP_LNKCTL, &LinkCtrlReg); - priv->NdisAdapter.LinkCtrlReg = LinkCtrlReg; + pcie_capability_read_word(priv->pdev, PCI_EXP_LNKCTL, &LinkCtrlReg); + priv->NdisAdapter.LinkCtrlReg = (u8)LinkCtrlReg; RT_TRACE(COMP_INIT, "Link Control Register =%x\n", priv->NdisAdapter.LinkCtrlReg); diff --git a/trunk/drivers/tty/hvc/hvc_console.c b/trunk/drivers/tty/hvc/hvc_console.c index f1d4d96a4a07..2d691eb7c40a 100644 --- a/trunk/drivers/tty/hvc/hvc_console.c +++ b/trunk/drivers/tty/hvc/hvc_console.c @@ -245,20 +245,6 @@ static void hvc_port_destruct(struct tty_port *port) kfree(hp); } -static void hvc_check_console(int index) -{ - /* Already enabled, bail out */ - if (hvc_console.flags & CON_ENABLED) - return; - - /* If this index is what the user requested, then register - * now (setup won't fail at this point). It's ok to just - * call register again if previously .setup failed. - */ - if (index == hvc_console.index) - register_console(&hvc_console); -} - /* * hvc_instantiate() is an early console discovery method which locates * consoles * prior to the vio subsystem discovering them. Hotplugged @@ -289,8 +275,12 @@ int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops) if (last_hvc < index) last_hvc = index; - /* check if we need to re-register the kernel console */ - hvc_check_console(index); + /* if this index is what the user requested, then register + * now (setup won't fail at this point). It's ok to just + * call register again if previously .setup failed. + */ + if (index == hvc_console.index) + register_console(&hvc_console); return 0; } @@ -868,15 +858,10 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data, i = ++last_hvc; hp->index = i; - cons_ops[i] = ops; - vtermnos[i] = vtermno; list_add_tail(&(hp->next), &hvc_structs); spin_unlock(&hvc_structs_lock); - /* check if we need to re-register the kernel console */ - hvc_check_console(i); - return hp; } EXPORT_SYMBOL_GPL(hvc_alloc); @@ -889,12 +874,8 @@ int hvc_remove(struct hvc_struct *hp) tty = tty_port_tty_get(&hp->port); spin_lock_irqsave(&hp->lock, flags); - if (hp->index < MAX_NR_HVC_CONSOLES) { - console_lock(); + if (hp->index < MAX_NR_HVC_CONSOLES) vtermnos[hp->index] = -1; - cons_ops[hp->index] = NULL; - console_unlock(); - } /* Don't whack hp->irq because tty_hangup() will need to free the irq. */ diff --git a/trunk/drivers/tty/hvc/hvc_vio.c b/trunk/drivers/tty/hvc/hvc_vio.c index 070c0ee68642..ee307799271a 100644 --- a/trunk/drivers/tty/hvc/hvc_vio.c +++ b/trunk/drivers/tty/hvc/hvc_vio.c @@ -230,69 +230,6 @@ static const struct hv_ops hvterm_hvsi_ops = { .tiocmset = hvterm_hvsi_tiocmset, }; -static void udbg_hvc_putc(char c) -{ - int count = -1; - - if (!hvterm_privs[0]) - return; - - if (c == '\n') - udbg_hvc_putc('\r'); - - do { - switch(hvterm_privs[0]->proto) { - case HV_PROTOCOL_RAW: - count = hvterm_raw_put_chars(0, &c, 1); - break; - case HV_PROTOCOL_HVSI: - count = hvterm_hvsi_put_chars(0, &c, 1); - break; - } - } while(count == 0); -} - -static int udbg_hvc_getc_poll(void) -{ - int rc = 0; - char c; - - if (!hvterm_privs[0]) - return -1; - - switch(hvterm_privs[0]->proto) { - case HV_PROTOCOL_RAW: - rc = hvterm_raw_get_chars(0, &c, 1); - break; - case HV_PROTOCOL_HVSI: - rc = hvterm_hvsi_get_chars(0, &c, 1); - break; - } - if (!rc) - return -1; - return c; -} - -static int udbg_hvc_getc(void) -{ - int ch; - - if (!hvterm_privs[0]) - return -1; - - for (;;) { - ch = udbg_hvc_getc_poll(); - if (ch == -1) { - /* This shouldn't be needed...but... */ - volatile unsigned long delay; - for (delay=0; delay < 2000000; delay++) - ; - } else { - return ch; - } - } -} - static int __devinit hvc_vio_probe(struct vio_dev *vdev, const struct vio_device_id *id) { @@ -352,13 +289,6 @@ static int __devinit hvc_vio_probe(struct vio_dev *vdev, return PTR_ERR(hp); dev_set_drvdata(&vdev->dev, hp); - /* register udbg if it's not there already for console 0 */ - if (hp->index == 0 && !udbg_putc) { - udbg_putc = udbg_hvc_putc; - udbg_getc = udbg_hvc_getc; - udbg_getc_poll = udbg_hvc_getc_poll; - } - return 0; } @@ -401,6 +331,59 @@ static void __exit hvc_vio_exit(void) } module_exit(hvc_vio_exit); +static void udbg_hvc_putc(char c) +{ + int count = -1; + + if (c == '\n') + udbg_hvc_putc('\r'); + + do { + switch(hvterm_priv0.proto) { + case HV_PROTOCOL_RAW: + count = hvterm_raw_put_chars(0, &c, 1); + break; + case HV_PROTOCOL_HVSI: + count = hvterm_hvsi_put_chars(0, &c, 1); + break; + } + } while(count == 0); +} + +static int udbg_hvc_getc_poll(void) +{ + int rc = 0; + char c; + + switch(hvterm_priv0.proto) { + case HV_PROTOCOL_RAW: + rc = hvterm_raw_get_chars(0, &c, 1); + break; + case HV_PROTOCOL_HVSI: + rc = hvterm_hvsi_get_chars(0, &c, 1); + break; + } + if (!rc) + return -1; + return c; +} + +static int udbg_hvc_getc(void) +{ + int ch; + for (;;) { + ch = udbg_hvc_getc_poll(); + if (ch == -1) { + /* This shouldn't be needed...but... */ + volatile unsigned long delay; + for (delay=0; delay < 2000000; delay++) + ; + } else { + return ch; + } + } +} + void __init hvc_vio_init_early(void) { struct device_node *stdout_node; diff --git a/trunk/drivers/video/auo_k190x.c b/trunk/drivers/video/auo_k190x.c index 77da6a2f43dc..c03ecdd31e4c 100644 --- a/trunk/drivers/video/auo_k190x.c +++ b/trunk/drivers/video/auo_k190x.c @@ -987,7 +987,6 @@ int __devinit auok190x_common_probe(struct platform_device *pdev, fb_dealloc_cmap(&info->cmap); err_cmap: fb_deferred_io_cleanup(info); - kfree(info->fbdefio); err_defio: vfree((void *)info->screen_base); err_irq: @@ -1022,7 +1021,6 @@ int __devexit auok190x_common_remove(struct platform_device *pdev) fb_dealloc_cmap(&info->cmap); fb_deferred_io_cleanup(info); - kfree(info->fbdefio); vfree((void *)info->screen_base); diff --git a/trunk/drivers/video/console/bitblit.c b/trunk/drivers/video/console/bitblit.c index 28b1a834906b..61b182bf32a2 100644 --- a/trunk/drivers/video/console/bitblit.c +++ b/trunk/drivers/video/console/bitblit.c @@ -162,7 +162,7 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info, image.depth = 1; if (attribute) { - buf = kmalloc(cellsize, GFP_KERNEL); + buf = kmalloc(cellsize, GFP_ATOMIC); if (!buf) return; } diff --git a/trunk/drivers/video/console/fbcon.c b/trunk/drivers/video/console/fbcon.c index 88e92041d8f0..fdefa8fd72c4 100644 --- a/trunk/drivers/video/console/fbcon.c +++ b/trunk/drivers/video/console/fbcon.c @@ -449,7 +449,7 @@ static int __init fb_console_setup(char *this_opt) while ((options = strsep(&this_opt, ",")) != NULL) { if (!strncmp(options, "font:", 5)) - strcpy(fontname, options + 5); + strlcpy(fontname, options + 5, sizeof(fontname)); if (!strncmp(options, "scrollback:", 11)) { options += 11; diff --git a/trunk/drivers/video/mb862xx/mb862xxfbdrv.c b/trunk/drivers/video/mb862xx/mb862xxfbdrv.c index 00ce1f34b496..57d940be5f3d 100644 --- a/trunk/drivers/video/mb862xx/mb862xxfbdrv.c +++ b/trunk/drivers/video/mb862xx/mb862xxfbdrv.c @@ -328,6 +328,8 @@ static int mb862xxfb_ioctl(struct fb_info *fbi, unsigned int cmd, case MB862XX_L1_SET_CFG: if (copy_from_user(l1_cfg, argp, sizeof(*l1_cfg))) return -EFAULT; + if (l1_cfg->dh == 0 || l1_cfg->dw == 0) + return -EINVAL; if ((l1_cfg->sw >= l1_cfg->dw) && (l1_cfg->sh >= l1_cfg->dh)) { /* downscaling */ outreg(cap, GC_CAP_CSC, diff --git a/trunk/drivers/video/omap2/dss/sdi.c b/trunk/drivers/video/omap2/dss/sdi.c index 5d31699fbd3c..f43bfe17b3b6 100644 --- a/trunk/drivers/video/omap2/dss/sdi.c +++ b/trunk/drivers/video/omap2/dss/sdi.c @@ -105,6 +105,20 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) sdi_config_lcd_manager(dssdev); + /* + * LCLK and PCLK divisors are located in shadow registers, and we + * normally write them to DISPC registers when enabling the output. + * However, SDI uses pck-free as source clock for its PLL, and pck-free + * is affected by the divisors. And as we need the PLL before enabling + * the output, we need to write the divisors early. + * + * It seems just writing to the DISPC register is enough, and we don't + * need to care about the shadow register mechanism for pck-free. The + * exact reason for this is unknown. + */ + dispc_mgr_set_clock_div(dssdev->manager->id, + &sdi.mgr_config.clock_info); + dss_sdi_init(dssdev->phy.sdi.datapairs); r = dss_sdi_enable(); if (r) diff --git a/trunk/drivers/video/omap2/omapfb/omapfb-main.c b/trunk/drivers/video/omap2/omapfb/omapfb-main.c index 08ec1a7103f2..fc671d3d8004 100644 --- a/trunk/drivers/video/omap2/omapfb/omapfb-main.c +++ b/trunk/drivers/video/omap2/omapfb/omapfb-main.c @@ -1192,7 +1192,7 @@ static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green, break; if (regno < 16) { - u16 pal; + u32 pal; pal = ((red >> (16 - var->red.length)) << var->red.offset) | ((green >> (16 - var->green.length)) << diff --git a/trunk/drivers/video/ps3fb.c b/trunk/drivers/video/ps3fb.c index 4e292f29bf5d..213fbbcf613b 100644 --- a/trunk/drivers/video/ps3fb.c +++ b/trunk/drivers/video/ps3fb.c @@ -31,6 +31,7 @@ #include #include +#include #include #include #include @@ -1140,7 +1141,7 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev) */ fb_start = ps3fb_videomemory.address + GPU_FB_START; info->screen_base = (char __force __iomem *)fb_start; - info->fix.smem_start = __pa(fb_start); + info->fix.smem_start = virt_to_abs(fb_start); info->fix.smem_len = ps3fb_videomemory.size - GPU_FB_START; info->pseudo_palette = par->pseudo_palette; diff --git a/trunk/drivers/xen/swiotlb-xen.c b/trunk/drivers/xen/swiotlb-xen.c index 1afb4fba11b4..4d519488d304 100644 --- a/trunk/drivers/xen/swiotlb-xen.c +++ b/trunk/drivers/xen/swiotlb-xen.c @@ -232,7 +232,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, return ret; if (hwdev && hwdev->coherent_dma_mask) - dma_mask = hwdev->coherent_dma_mask; + dma_mask = dma_alloc_coherent_mask(hwdev, flags); phys = virt_to_phys(ret); dev_addr = xen_phys_to_bus(phys); diff --git a/trunk/drivers/xen/xen-pciback/pci_stub.c b/trunk/drivers/xen/xen-pciback/pci_stub.c index 097e536e8672..03342728bf23 100644 --- a/trunk/drivers/xen/xen-pciback/pci_stub.c +++ b/trunk/drivers/xen/xen-pciback/pci_stub.c @@ -353,16 +353,16 @@ static int __devinit pcistub_init_device(struct pci_dev *dev) if (err) goto config_release; - dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n"); - __pci_reset_function_locked(dev); - /* We need the device active to save the state. */ dev_dbg(&dev->dev, "save state of device\n"); pci_save_state(dev); dev_data->pci_saved_state = pci_store_saved_state(dev); if (!dev_data->pci_saved_state) dev_err(&dev->dev, "Could not store PCI conf saved state!\n"); - + else { + dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n"); + __pci_reset_function_locked(dev); + } /* Now disable the device (this also ensures some private device * data is setup before we export) */ diff --git a/trunk/include/linux/kernel.h b/trunk/include/linux/kernel.h index 604382143bcf..594b419b7d20 100644 --- a/trunk/include/linux/kernel.h +++ b/trunk/include/linux/kernel.h @@ -82,10 +82,18 @@ __x - (__x % (y)); \ } \ ) + +/* + * Divide positive or negative dividend by positive divisor and round + * to closest integer. Result is undefined for negative divisors. + */ #define DIV_ROUND_CLOSEST(x, divisor)( \ { \ - typeof(divisor) __divisor = divisor; \ - (((x) + ((__divisor) / 2)) / (__divisor)); \ + typeof(x) __x = x; \ + typeof(divisor) __d = divisor; \ + (((typeof(x))-1) >= 0 || (__x) >= 0) ? \ + (((__x) + ((__d) / 2)) / (__d)) : \ + (((__x) - ((__d) / 2)) / (__d)); \ } \ ) diff --git a/trunk/include/linux/mmc/card.h b/trunk/include/linux/mmc/card.h index 111aca5e97f3..4b27f9f503e4 100644 --- a/trunk/include/linux/mmc/card.h +++ b/trunk/include/linux/mmc/card.h @@ -239,6 +239,7 @@ struct mmc_card { #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ +#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */ /* byte mode */ unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */ #define MMC_NO_POWER_NOTIFICATION 0 diff --git a/trunk/include/linux/pci.h b/trunk/include/linux/pci.h index 5faa8310eec9..2c755243eef2 100644 --- a/trunk/include/linux/pci.h +++ b/trunk/include/linux/pci.h @@ -254,10 +254,10 @@ struct pci_dev { u8 revision; /* PCI revision, low byte of class word */ u8 hdr_type; /* PCI header type (`multi' flag masked out) */ u8 pcie_cap; /* PCI-E capability offset */ - u8 pcie_type:4; /* PCI-E device/port type */ u8 pcie_mpss:3; /* PCI-E Max Payload Size Supported */ u8 rom_base_reg; /* which config register controls the ROM */ u8 pin; /* which interrupt pin this device uses */ + u16 pcie_flags_reg; /* cached PCI-E Capabilities Register */ struct pci_driver *driver; /* which driver has allocated this device */ u64 dma_mask; /* Mask of the bits of bus address this @@ -816,6 +816,39 @@ static inline int pci_write_config_dword(const struct pci_dev *dev, int where, return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); } +int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); +int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); +int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); +int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val); +int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set); +int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, + u32 clear, u32 set); + +static inline int pcie_capability_set_word(struct pci_dev *dev, int pos, + u16 set) +{ + return pcie_capability_clear_and_set_word(dev, pos, 0, set); +} + +static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos, + u32 set) +{ + return pcie_capability_clear_and_set_dword(dev, pos, 0, set); +} + +static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear) +{ + return pcie_capability_clear_and_set_word(dev, pos, clear, 0); +} + +static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos, + u32 clear) +{ + return pcie_capability_clear_and_set_dword(dev, pos, clear, 0); +} + /* user-space driven config access */ int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); @@ -1031,6 +1064,8 @@ int pci_cfg_space_size_ext(struct pci_dev *dev); int pci_cfg_space_size(struct pci_dev *dev); unsigned char pci_bus_max_busnr(struct pci_bus *bus); void pci_setup_bridge(struct pci_bus *bus); +resource_size_t pcibios_window_alignment(struct pci_bus *bus, + unsigned long type); #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0) #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1) @@ -1650,6 +1685,15 @@ static inline bool pci_is_pcie(struct pci_dev *dev) return !!pci_pcie_cap(dev); } +/** + * pci_pcie_type - get the PCIe device/port type + * @dev: PCI device + */ +static inline int pci_pcie_type(const struct pci_dev *dev) +{ + return (dev->pcie_flags_reg & PCI_EXP_FLAGS_TYPE) >> 4; +} + void pci_request_acs(void); bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); bool pci_acs_path_enabled(struct pci_dev *start, diff --git a/trunk/include/linux/pci_regs.h b/trunk/include/linux/pci_regs.h index 7fb75b143755..3958f70f3202 100644 --- a/trunk/include/linux/pci_regs.h +++ b/trunk/include/linux/pci_regs.h @@ -549,6 +549,7 @@ #define PCI_EXP_LNKCAP2_SLS_8_0GB 0x04 /* Current Link Speed 8.0GT/s */ #define PCI_EXP_LNKCAP2_CROSSLINK 0x100 /* Crosslink supported */ #define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ +#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ #define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */ /* Extended Capabilities (PCI-X 2.0 and Express) */ diff --git a/trunk/mm/mempolicy.c b/trunk/mm/mempolicy.c index bd92431d4c49..4ada3be6e252 100644 --- a/trunk/mm/mempolicy.c +++ b/trunk/mm/mempolicy.c @@ -2562,7 +2562,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) break; default: - BUG(); + return -EINVAL; } l = strlen(policy_modes[mode]); diff --git a/trunk/net/socket.c b/trunk/net/socket.c index a5471f804d99..edc3c4af9085 100644 --- a/trunk/net/socket.c +++ b/trunk/net/socket.c @@ -2604,7 +2604,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock, err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); set_fs(old_fs); if (!err) - err = compat_put_timeval(up, &ktv); + err = compat_put_timeval(&ktv, up); return err; } @@ -2620,7 +2620,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock, err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); set_fs(old_fs); if (!err) - err = compat_put_timespec(up, &kts); + err = compat_put_timespec(&kts, up); return err; } diff --git a/trunk/scripts/Makefile.fwinst b/trunk/scripts/Makefile.fwinst index 6bf8e87f1dcf..c3f69ae275d1 100644 --- a/trunk/scripts/Makefile.fwinst +++ b/trunk/scripts/Makefile.fwinst @@ -42,7 +42,7 @@ quiet_cmd_install = INSTALL $(subst $(srctree)/,,$@) $(installed-fw-dirs): $(call cmd,mkdir) -$(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $(INSTALL_FW_PATH)/$$(dir %) +$(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $$(dir $(INSTALL_FW_PATH)/%) $(call cmd,install) PHONY += __fw_install __fw_modinst FORCE diff --git a/trunk/scripts/link-vmlinux.sh b/trunk/scripts/link-vmlinux.sh index 4629038c9e5a..4235a6361fec 100644 --- a/trunk/scripts/link-vmlinux.sh +++ b/trunk/scripts/link-vmlinux.sh @@ -211,7 +211,7 @@ if [ -n "${CONFIG_KALLSYMS}" ]; then if ! cmp -s System.map .tmp_System.map; then echo >&2 Inconsistent kallsyms data - echo >&2 echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround + echo >&2 Try "make KALLSYMS_EXTRA_PASS=1" as a workaround cleanup exit 1 fi diff --git a/trunk/sound/pci/hda/hda_codec.c b/trunk/sound/pci/hda/hda_codec.c index f560051a949e..f25c24c743f9 100644 --- a/trunk/sound/pci/hda/hda_codec.c +++ b/trunk/sound/pci/hda/hda_codec.c @@ -1209,6 +1209,9 @@ static void snd_hda_codec_free(struct hda_codec *codec) kfree(codec); } +static bool snd_hda_codec_get_supported_ps(struct hda_codec *codec, + hda_nid_t fg, unsigned int power_state); + static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg, unsigned int power_state); @@ -1317,6 +1320,10 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus, AC_VERB_GET_SUBSYSTEM_ID, 0); } + codec->epss = snd_hda_codec_get_supported_ps(codec, + codec->afg ? codec->afg : codec->mfg, + AC_PWRST_EPSS); + /* power-up all before initialization */ hda_set_power_state(codec, codec->afg ? codec->afg : codec->mfg, @@ -3543,8 +3550,7 @@ static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg, /* this delay seems necessary to avoid click noise at power-down */ if (power_state == AC_PWRST_D3) { /* transition time less than 10ms for power down */ - bool epss = snd_hda_codec_get_supported_ps(codec, fg, AC_PWRST_EPSS); - msleep(epss ? 10 : 100); + msleep(codec->epss ? 10 : 100); } /* repeat power states setting at most 10 times*/ diff --git a/trunk/sound/pci/hda/hda_codec.h b/trunk/sound/pci/hda/hda_codec.h index 7fbc1bcaf1a9..e5a7e19a8071 100644 --- a/trunk/sound/pci/hda/hda_codec.h +++ b/trunk/sound/pci/hda/hda_codec.h @@ -862,6 +862,7 @@ struct hda_codec { unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */ unsigned int no_jack_detect:1; /* Machine has no jack-detection */ unsigned int pcm_format_first:1; /* PCM format must be set first */ + unsigned int epss:1; /* supporting EPSS? */ #ifdef CONFIG_SND_HDA_POWER_SAVE unsigned int power_on :1; /* current (global) power-state */ int power_transition; /* power-state in transition */ diff --git a/trunk/sound/pci/hda/patch_sigmatel.c b/trunk/sound/pci/hda/patch_sigmatel.c index ea5775a1a7db..6f806d3e56bb 100644 --- a/trunk/sound/pci/hda/patch_sigmatel.c +++ b/trunk/sound/pci/hda/patch_sigmatel.c @@ -4543,6 +4543,9 @@ static void stac92xx_line_out_detect(struct hda_codec *codec, struct auto_pin_cfg *cfg = &spec->autocfg; int i; + if (cfg->speaker_outs == 0) + return; + for (i = 0; i < cfg->line_outs; i++) { if (presence) break; @@ -5531,6 +5534,7 @@ static int patch_stac92hd83xxx(struct hda_codec *codec) snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e); } + codec->epss = 0; /* longer delay needed for D3 */ codec->no_trigger_sense = 1; codec->spec = spec; diff --git a/trunk/sound/usb/card.c b/trunk/sound/usb/card.c index d5b5c3388e28..4a469f0cb6d4 100644 --- a/trunk/sound/usb/card.c +++ b/trunk/sound/usb/card.c @@ -553,7 +553,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, struct snd_usb_audio *chip) { struct snd_card *card; - struct list_head *p; + struct list_head *p, *n; if (chip == (void *)-1L) return; @@ -570,7 +570,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, snd_usb_stream_disconnect(p); } /* release the endpoint resources */ - list_for_each(p, &chip->ep_list) { + list_for_each_safe(p, n, &chip->ep_list) { snd_usb_endpoint_free(p); } /* release the midi resources */ diff --git a/trunk/sound/usb/endpoint.c b/trunk/sound/usb/endpoint.c index c41181202688..d6e2bb49c59c 100644 --- a/trunk/sound/usb/endpoint.c +++ b/trunk/sound/usb/endpoint.c @@ -141,7 +141,7 @@ int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep) * * For implicit feedback, next_packet_size() is unused. */ -static int next_packet_size(struct snd_usb_endpoint *ep) +int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep) { unsigned long flags; int ret; @@ -177,15 +177,6 @@ static void retire_inbound_urb(struct snd_usb_endpoint *ep, ep->retire_data_urb(ep->data_subs, urb); } -static void prepare_outbound_urb_sizes(struct snd_usb_endpoint *ep, - struct snd_urb_ctx *ctx) -{ - int i; - - for (i = 0; i < ctx->packets; ++i) - ctx->packet_size[i] = next_packet_size(ep); -} - /* * Prepare a PLAYBACK urb for submission to the bus. */ @@ -370,7 +361,6 @@ static void snd_complete_urb(struct urb *urb) goto exit_clear; } - prepare_outbound_urb_sizes(ep, ctx); prepare_outbound_urb(ep, ctx); } else { retire_inbound_urb(ep, ctx); @@ -799,7 +789,9 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, /** * snd_usb_endpoint_start: start an snd_usb_endpoint * - * @ep: the endpoint to start + * @ep: the endpoint to start + * @can_sleep: flag indicating whether the operation is executed in + * non-atomic context * * A call to this function will increment the use count of the endpoint. * In case it is not already running, the URBs for this endpoint will be @@ -809,7 +801,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, * * Returns an error if the URB submission failed, 0 in all other cases. */ -int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) +int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep) { int err; unsigned int i; @@ -821,6 +813,11 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) if (++ep->use_count != 1) return 0; + /* just to be sure */ + deactivate_urbs(ep, 0, can_sleep); + if (can_sleep) + wait_clear_urbs(ep); + ep->active_mask = 0; ep->unlink_mask = 0; ep->phase = 0; @@ -850,7 +847,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) goto __error; if (usb_pipeout(ep->pipe)) { - prepare_outbound_urb_sizes(ep, urb->context); prepare_outbound_urb(ep, urb->context); } else { prepare_inbound_urb(ep, urb->context); diff --git a/trunk/sound/usb/endpoint.h b/trunk/sound/usb/endpoint.h index ee2723fb174f..cbbbdf226d66 100644 --- a/trunk/sound/usb/endpoint.h +++ b/trunk/sound/usb/endpoint.h @@ -13,7 +13,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, struct audioformat *fmt, struct snd_usb_endpoint *sync_ep); -int snd_usb_endpoint_start(struct snd_usb_endpoint *ep); +int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep); void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep, int force, int can_sleep, int wait); int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep); @@ -21,6 +21,7 @@ int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep); void snd_usb_endpoint_free(struct list_head *head); int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep); +int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep); void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep, struct snd_usb_endpoint *sender, diff --git a/trunk/sound/usb/pcm.c b/trunk/sound/usb/pcm.c index 62ec808ed792..fd5e982fc98c 100644 --- a/trunk/sound/usb/pcm.c +++ b/trunk/sound/usb/pcm.c @@ -212,7 +212,7 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface, } } -static int start_endpoints(struct snd_usb_substream *subs) +static int start_endpoints(struct snd_usb_substream *subs, int can_sleep) { int err; @@ -225,7 +225,7 @@ static int start_endpoints(struct snd_usb_substream *subs) snd_printdd(KERN_DEBUG "Starting data EP @%p\n", ep); ep->data_subs = subs; - err = snd_usb_endpoint_start(ep); + err = snd_usb_endpoint_start(ep, can_sleep); if (err < 0) { clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags); return err; @@ -236,10 +236,25 @@ static int start_endpoints(struct snd_usb_substream *subs) !test_and_set_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags)) { struct snd_usb_endpoint *ep = subs->sync_endpoint; + if (subs->data_endpoint->iface != subs->sync_endpoint->iface || + subs->data_endpoint->alt_idx != subs->sync_endpoint->alt_idx) { + err = usb_set_interface(subs->dev, + subs->sync_endpoint->iface, + subs->sync_endpoint->alt_idx); + if (err < 0) { + snd_printk(KERN_ERR + "%d:%d:%d: cannot set interface (%d)\n", + subs->dev->devnum, + subs->sync_endpoint->iface, + subs->sync_endpoint->alt_idx, err); + return -EIO; + } + } + snd_printdd(KERN_DEBUG "Starting sync EP @%p\n", ep); ep->sync_slave = subs->data_endpoint; - err = snd_usb_endpoint_start(ep); + err = snd_usb_endpoint_start(ep, can_sleep); if (err < 0) { clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags); return err; @@ -544,13 +559,10 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream) subs->last_frame_number = 0; runtime->delay = 0; - /* clear the pending deactivation on the target EPs */ - deactivate_endpoints(subs); - /* for playback, submit the URBs now; otherwise, the first hwptr_done * updates for all URBs would happen at the same time when starting */ if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) - return start_endpoints(subs); + return start_endpoints(subs, 1); return 0; } @@ -1032,6 +1044,7 @@ static void prepare_playback_urb(struct snd_usb_substream *subs, struct urb *urb) { struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; + struct snd_usb_endpoint *ep = subs->data_endpoint; struct snd_urb_ctx *ctx = urb->context; unsigned int counts, frames, bytes; int i, stride, period_elapsed = 0; @@ -1043,7 +1056,11 @@ static void prepare_playback_urb(struct snd_usb_substream *subs, urb->number_of_packets = 0; spin_lock_irqsave(&subs->lock, flags); for (i = 0; i < ctx->packets; i++) { - counts = ctx->packet_size[i]; + if (ctx->packet_size[i]) + counts = ctx->packet_size[i]; + else + counts = snd_usb_endpoint_next_packet_size(ep); + /* set up descriptor */ urb->iso_frame_desc[i].offset = frames * stride; urb->iso_frame_desc[i].length = counts * stride; @@ -1094,7 +1111,16 @@ static void prepare_playback_urb(struct snd_usb_substream *subs, subs->hwptr_done += bytes; if (subs->hwptr_done >= runtime->buffer_size * stride) subs->hwptr_done -= runtime->buffer_size * stride; + + /* update delay with exact number of samples queued */ + runtime->delay = subs->last_delay; runtime->delay += frames; + subs->last_delay = runtime->delay; + + /* realign last_frame_number */ + subs->last_frame_number = usb_get_current_frame_number(subs->dev); + subs->last_frame_number &= 0xFF; /* keep 8 LSBs */ + spin_unlock_irqrestore(&subs->lock, flags); urb->transfer_buffer_length = bytes; if (period_elapsed) @@ -1112,12 +1138,26 @@ static void retire_playback_urb(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; int stride = runtime->frame_bits >> 3; int processed = urb->transfer_buffer_length / stride; + int est_delay; spin_lock_irqsave(&subs->lock, flags); - if (processed > runtime->delay) - runtime->delay = 0; + est_delay = snd_usb_pcm_delay(subs, runtime->rate); + /* update delay with exact number of samples played */ + if (processed > subs->last_delay) + subs->last_delay = 0; else - runtime->delay -= processed; + subs->last_delay -= processed; + runtime->delay = subs->last_delay; + + /* + * Report when delay estimate is off by more than 2ms. + * The error should be lower than 2ms since the estimate relies + * on two reads of a counter updated every ms. + */ + if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2) + snd_printk(KERN_DEBUG "delay: estimated %d, actual %d\n", + est_delay, subs->last_delay); + spin_unlock_irqrestore(&subs->lock, flags); } @@ -1175,7 +1215,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream switch (cmd) { case SNDRV_PCM_TRIGGER_START: - err = start_endpoints(subs); + err = start_endpoints(subs, 0); if (err < 0) return err;