From 573cccfa292dc3322ec2b3dd83878fde1e5840ff Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 12 Aug 2008 17:52:51 -0500 Subject: [PATCH] --- yaml --- r: 108221 b: refs/heads/master c: 40c42076ebd362dc69210cccea101ac80b6d4bd4 h: refs/heads/master i: 108219: bf0ef6061bb1ed02257abdd5cf78ba444f9fc850 v: v3 --- [refs] | 2 +- trunk/Documentation/lguest/lguest.c | 23 +- trunk/arch/x86/kernel/apic_32.c | 14 +- trunk/arch/x86/kernel/cpu/bugs.c | 6 +- trunk/arch/x86/kernel/io_apic_32.c | 6 +- trunk/arch/x86/kernel/io_apic_64.c | 25 +- trunk/arch/x86/kernel/mpparse.c | 11 +- trunk/arch/x86/kernel/pci-calgary_64.c | 2 +- trunk/arch/x86/kernel/setup.c | 22 +- trunk/arch/x86/kernel/smpboot.c | 22 +- trunk/arch/x86/kernel/vmi_32.c | 3 +- trunk/arch/x86/mm/pgtable.c | 3 - trunk/drivers/char/agp/agp.h | 3 - trunk/drivers/char/agp/ali-agp.c | 10 +- trunk/drivers/char/agp/amd-k7-agp.c | 10 +- trunk/drivers/char/agp/amd64-agp.c | 51 ++- trunk/drivers/char/agp/ati-agp.c | 7 +- trunk/drivers/char/agp/backend.c | 28 +- trunk/drivers/char/agp/generic.c | 41 +-- trunk/drivers/char/agp/intel-agp.c | 83 +++-- trunk/drivers/char/agp/isoch.c | 37 ++- trunk/drivers/char/agp/sis-agp.c | 17 +- trunk/drivers/char/agp/sworks-agp.c | 25 +- trunk/drivers/char/agp/uninorth-agp.c | 32 +- .../scsi/device_handler/scsi_dh_alua.c | 2 +- .../drivers/scsi/device_handler/scsi_dh_emc.c | 2 +- .../scsi/device_handler/scsi_dh_hp_sw.c | 2 +- .../scsi/device_handler/scsi_dh_rdac.c | 2 +- trunk/fs/jbd/transaction.c | 4 +- trunk/fs/jbd2/transaction.c | 4 +- trunk/include/asm-x86/efi.h | 2 +- trunk/include/asm-x86/hw_irq.h | 12 +- trunk/include/asm-x86/irq_vectors.h | 10 +- trunk/include/linux/agp_backend.h | 5 - trunk/include/linux/lockdep.h | 70 ++--- trunk/include/linux/rcuclassic.h | 2 +- trunk/include/linux/sched.h | 31 +- trunk/include/linux/spinlock.h | 6 - trunk/include/linux/spinlock_api_smp.h | 2 - trunk/kernel/Kconfig.hz | 2 +- trunk/kernel/cpu.c | 5 +- trunk/kernel/lockdep.c | 295 ++++-------------- trunk/kernel/lockdep_internals.h | 6 +- trunk/kernel/lockdep_proc.c | 37 ++- trunk/kernel/posix-timers.c | 19 +- trunk/kernel/sched.c | 23 +- trunk/kernel/sched_clock.c | 178 +++++++---- trunk/kernel/sched_fair.c | 21 +- trunk/kernel/sched_rt.c | 8 +- trunk/kernel/signal.c | 1 - trunk/kernel/smp.c | 54 +--- trunk/kernel/spinlock.c | 11 - trunk/kernel/time/tick-sched.c | 2 + trunk/kernel/workqueue.c | 24 +- trunk/lib/debug_locks.c | 2 - trunk/mm/mmap.c | 20 +- 56 files changed, 521 insertions(+), 826 deletions(-) diff --git a/[refs] b/[refs] index 4758ff17f2ea..891f9f76a479 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 88fa08f67bee1a0c765237bdac106a32872f57d2 +refs/heads/master: 40c42076ebd362dc69210cccea101ac80b6d4bd4 diff --git a/trunk/Documentation/lguest/lguest.c b/trunk/Documentation/lguest/lguest.c index b88b0ea54e90..655414821edc 100644 --- a/trunk/Documentation/lguest/lguest.c +++ b/trunk/Documentation/lguest/lguest.c @@ -1447,21 +1447,6 @@ static void configure_device(int fd, const char *tapif, u32 ipaddr) err(1, "Bringing interface %s up", tapif); } -static void get_mac(int fd, const char *tapif, unsigned char hwaddr[6]) -{ - struct ifreq ifr; - - memset(&ifr, 0, sizeof(ifr)); - strcpy(ifr.ifr_name, tapif); - - /* SIOC stands for Socket I/O Control. G means Get (vs S for Set - * above). IF means Interface, and HWADDR is hardware address. - * Simple! */ - if (ioctl(fd, SIOCGIFHWADDR, &ifr) != 0) - err(1, "getting hw address for %s", tapif); - memcpy(hwaddr, ifr.ifr_hwaddr.sa_data, 6); -} - static int get_tun_device(char tapif[IFNAMSIZ]) { struct ifreq ifr; @@ -1531,11 +1516,8 @@ static void setup_tun_net(char *arg) p = strchr(arg, ':'); if (p) { str2mac(p+1, conf.mac); + add_feature(dev, VIRTIO_NET_F_MAC); *p = '\0'; - } else { - p = arg + strlen(arg); - /* None supplied; query the randomly assigned mac. */ - get_mac(ipfd, tapif, conf.mac); } /* arg is now either an IP address or a bridge name */ @@ -1547,13 +1529,10 @@ static void setup_tun_net(char *arg) /* Set up the tun device. */ configure_device(ipfd, tapif, ip); - /* Tell Guest what MAC address to use. */ - add_feature(dev, VIRTIO_NET_F_MAC); add_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY); /* Expect Guest to handle everything except UFO */ add_feature(dev, VIRTIO_NET_F_CSUM); add_feature(dev, VIRTIO_NET_F_GUEST_CSUM); - add_feature(dev, VIRTIO_NET_F_MAC); add_feature(dev, VIRTIO_NET_F_GUEST_TSO4); add_feature(dev, VIRTIO_NET_F_GUEST_TSO6); add_feature(dev, VIRTIO_NET_F_GUEST_ECN); diff --git a/trunk/arch/x86/kernel/apic_32.c b/trunk/arch/x86/kernel/apic_32.c index 039a8d4aaf62..d6c898358371 100644 --- a/trunk/arch/x86/kernel/apic_32.c +++ b/trunk/arch/x86/kernel/apic_32.c @@ -1720,19 +1720,15 @@ static int __init parse_lapic_timer_c2_ok(char *arg) } early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok); -static int __init apic_set_verbosity(char *arg) +static int __init apic_set_verbosity(char *str) { - if (!arg) - return -EINVAL; - - if (strcmp(arg, "debug") == 0) + if (strcmp("debug", str) == 0) apic_verbosity = APIC_DEBUG; - else if (strcmp(arg, "verbose") == 0) + else if (strcmp("verbose", str) == 0) apic_verbosity = APIC_VERBOSE; - - return 0; + return 1; } -early_param("apic", apic_set_verbosity); +__setup("apic=", apic_set_verbosity); static int __init lapic_insert_resource(void) { diff --git a/trunk/arch/x86/kernel/cpu/bugs.c b/trunk/arch/x86/kernel/cpu/bugs.c index c8e315f1aa83..c9b58a806e85 100644 --- a/trunk/arch/x86/kernel/cpu/bugs.c +++ b/trunk/arch/x86/kernel/cpu/bugs.c @@ -50,8 +50,6 @@ static double __initdata y = 3145727.0; */ static void __init check_fpu(void) { - s32 fdiv_bug; - if (!boot_cpu_data.hard_math) { #ifndef CONFIG_MATH_EMULATION printk(KERN_EMERG "No coprocessor found and no math emulation present.\n"); @@ -76,10 +74,8 @@ static void __init check_fpu(void) "fistpl %0\n\t" "fwait\n\t" "fninit" - : "=m" (*&fdiv_bug) + : "=m" (*&boot_cpu_data.fdiv_bug) : "m" (*&x), "m" (*&y)); - - boot_cpu_data.fdiv_bug = fdiv_bug; if (boot_cpu_data.fdiv_bug) printk("Hmm, FPU with FDIV bug.\n"); } diff --git a/trunk/arch/x86/kernel/io_apic_32.c b/trunk/arch/x86/kernel/io_apic_32.c index 09cddb57bec4..de9aa0e3a9c5 100644 --- a/trunk/arch/x86/kernel/io_apic_32.c +++ b/trunk/arch/x86/kernel/io_apic_32.c @@ -57,7 +57,7 @@ atomic_t irq_mis_count; static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; static DEFINE_SPINLOCK(ioapic_lock); -DEFINE_SPINLOCK(vector_lock); +static DEFINE_SPINLOCK(vector_lock); int timer_through_8259 __initdata; @@ -1209,6 +1209,10 @@ static int assign_irq_vector(int irq) return vector; } +void setup_vector_irq(int cpu) +{ +} + static struct irq_chip ioapic_chip; #define IOAPIC_AUTO -1 diff --git a/trunk/arch/x86/kernel/io_apic_64.c b/trunk/arch/x86/kernel/io_apic_64.c index 61a83b70c18f..8269434d1707 100644 --- a/trunk/arch/x86/kernel/io_apic_64.c +++ b/trunk/arch/x86/kernel/io_apic_64.c @@ -101,7 +101,7 @@ int timer_through_8259 __initdata; static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; static DEFINE_SPINLOCK(ioapic_lock); -static DEFINE_SPINLOCK(vector_lock); +DEFINE_SPINLOCK(vector_lock); /* * # of IRQ routing registers @@ -697,19 +697,6 @@ static int pin_2_irq(int idx, int apic, int pin) return irq; } -void lock_vector_lock(void) -{ - /* Used to the online set of cpus does not change - * during assign_irq_vector. - */ - spin_lock(&vector_lock); -} - -void unlock_vector_lock(void) -{ - spin_unlock(&vector_lock); -} - static int __assign_irq_vector(int irq, cpumask_t mask) { /* @@ -815,7 +802,7 @@ static void __clear_irq_vector(int irq) cpus_clear(cfg->domain); } -void __setup_vector_irq(int cpu) +static void __setup_vector_irq(int cpu) { /* Initialize vector_irq on a new cpu */ /* This function must be called with vector_lock held */ @@ -838,6 +825,14 @@ void __setup_vector_irq(int cpu) } } +void setup_vector_irq(int cpu) +{ + spin_lock(&vector_lock); + __setup_vector_irq(smp_processor_id()); + spin_unlock(&vector_lock); +} + + static struct irq_chip ioapic_chip; static void ioapic_register_intr(int irq, unsigned long trigger) diff --git a/trunk/arch/x86/kernel/mpparse.c b/trunk/arch/x86/kernel/mpparse.c index 678090508a62..6ae005ccaed8 100644 --- a/trunk/arch/x86/kernel/mpparse.c +++ b/trunk/arch/x86/kernel/mpparse.c @@ -83,7 +83,7 @@ static void __init MP_bus_info(struct mpc_config_bus *m) if (x86_quirks->mpc_oem_bus_info) x86_quirks->mpc_oem_bus_info(m, str); else - apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->mpc_busid, str); + printk(KERN_INFO "Bus #%d is %s\n", m->mpc_busid, str); #if MAX_MP_BUSSES < 256 if (m->mpc_busid >= MAX_MP_BUSSES) { @@ -154,7 +154,7 @@ static void __init MP_ioapic_info(struct mpc_config_ioapic *m) static void print_MP_intsrc_info(struct mpc_config_intsrc *m) { - apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," + printk(KERN_CONT "Int: type %d, pol %d, trig %d, bus %02x," " IRQ %02x, APIC ID %x, APIC INT %02x\n", m->mpc_irqtype, m->mpc_irqflag & 3, (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, @@ -163,7 +163,7 @@ static void print_MP_intsrc_info(struct mpc_config_intsrc *m) static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq) { - apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," + printk(KERN_CONT "Int: type %d, pol %d, trig %d, bus %02x," " IRQ %02x, APIC ID %x, APIC INT %02x\n", mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3, (mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus, @@ -235,7 +235,7 @@ static void __init MP_intsrc_info(struct mpc_config_intsrc *m) static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m) { - apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x," + printk(KERN_INFO "Lint: type %d, pol %d, trig %d, bus %02x," " IRQ %02x, APIC ID %x, APIC LINT %02x\n", m->mpc_irqtype, m->mpc_irqflag & 3, (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid, @@ -695,8 +695,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length, unsigned int *bp = phys_to_virt(base); struct intel_mp_floating *mpf; - apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n", - bp, length); + printk(KERN_DEBUG "Scan SMP from %p for %ld bytes.\n", bp, length); BUILD_BUG_ON(sizeof(*mpf) != 16); while (length > 0) { diff --git a/trunk/arch/x86/kernel/pci-calgary_64.c b/trunk/arch/x86/kernel/pci-calgary_64.c index 02d19328525d..b67a4b1d4eae 100644 --- a/trunk/arch/x86/kernel/pci-calgary_64.c +++ b/trunk/arch/x86/kernel/pci-calgary_64.c @@ -1350,7 +1350,7 @@ static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl) * Function for kdump case. Get the tce tables from first kernel * by reading the contents of the base adress register of calgary iommu */ -static void get_tce_space_from_tar(void) +static void get_tce_space_from_tar() { int bus; void __iomem *target; diff --git a/trunk/arch/x86/kernel/setup.c b/trunk/arch/x86/kernel/setup.c index 68b48e3fbcbd..2d888586385d 100644 --- a/trunk/arch/x86/kernel/setup.c +++ b/trunk/arch/x86/kernel/setup.c @@ -604,14 +604,6 @@ void __init setup_arch(char **cmdline_p) early_cpu_init(); early_ioremap_init(); -#if defined(CONFIG_VMI) && defined(CONFIG_X86_32) - /* - * Must be before kernel pagetables are setup - * or fixmap area is touched. - */ - vmi_init(); -#endif - ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); screen_info = boot_params.screen_info; edid_info = boot_params.edid_info; @@ -825,6 +817,14 @@ void __init setup_arch(char **cmdline_p) kvmclock_init(); #endif +#if defined(CONFIG_VMI) && defined(CONFIG_X86_32) + /* + * Must be after max_low_pfn is determined, and before kernel + * pagetables are setup. + */ + vmi_init(); +#endif + paravirt_pagetable_setup_start(swapper_pg_dir); paging_init(); paravirt_pagetable_setup_done(swapper_pg_dir); @@ -861,6 +861,12 @@ void __init setup_arch(char **cmdline_p) init_apic_mappings(); ioapic_init_mappings(); +#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC) && defined(CONFIG_X86_32) + if (def_to_bigsmp) + printk(KERN_WARNING "More than 8 CPUs detected and " + "CONFIG_X86_PC cannot handle it.\nUse " + "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n"); +#endif kvm_guest_init(); e820_reserve_resources(); diff --git a/trunk/arch/x86/kernel/smpboot.c b/trunk/arch/x86/kernel/smpboot.c index 91055d7fc1b0..332512767f4f 100644 --- a/trunk/arch/x86/kernel/smpboot.c +++ b/trunk/arch/x86/kernel/smpboot.c @@ -326,16 +326,12 @@ static void __cpuinit start_secondary(void *unused) * for which cpus receive the IPI. Holding this * lock helps us to not include this cpu in a currently in progress * smp_call_function(). - * - * We need to hold vector_lock so there the set of online cpus - * does not change while we are assigning vectors to cpus. Holding - * this lock ensures we don't half assign or remove an irq from a cpu. */ ipi_call_lock_irq(); - lock_vector_lock(); - __setup_vector_irq(smp_processor_id()); +#ifdef CONFIG_X86_IO_APIC + setup_vector_irq(smp_processor_id()); +#endif cpu_set(smp_processor_id(), cpu_online_map); - unlock_vector_lock(); ipi_call_unlock_irq(); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; @@ -994,17 +990,7 @@ int __cpuinit native_cpu_up(unsigned int cpu) flush_tlb_all(); low_mappings = 1; -#ifdef CONFIG_X86_PC - if (def_to_bigsmp && apicid > 8) { - printk(KERN_WARNING - "More than 8 CPUs detected - skipping them.\n" - "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n"); - err = -1; - } else - err = do_boot_cpu(apicid, cpu); -#else err = do_boot_cpu(apicid, cpu); -#endif zap_low_mappings(); low_mappings = 0; @@ -1350,9 +1336,7 @@ int __cpu_disable(void) remove_siblinginfo(cpu); /* It's now safe to remove this processor from the online map */ - lock_vector_lock(); remove_cpu_from_maps(cpu); - unlock_vector_lock(); fixup_irqs(cpu_online_map); return 0; } diff --git a/trunk/arch/x86/kernel/vmi_32.c b/trunk/arch/x86/kernel/vmi_32.c index 6ca515d6db54..0a1b1a9d922d 100644 --- a/trunk/arch/x86/kernel/vmi_32.c +++ b/trunk/arch/x86/kernel/vmi_32.c @@ -37,7 +37,6 @@ #include #include #include -#include /* Convenient for calling VMI functions indirectly in the ROM */ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void); @@ -684,7 +683,7 @@ void vmi_bringup(void) { /* We must establish the lowmem mapping for MMU ops to work */ if (vmi_ops.set_linear_mapping) - vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, MAXMEM_PFN, 0); + vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, max_low_pfn, 0); } /* diff --git a/trunk/arch/x86/mm/pgtable.c b/trunk/arch/x86/mm/pgtable.c index d50302774fe2..557b2abceef8 100644 --- a/trunk/arch/x86/mm/pgtable.c +++ b/trunk/arch/x86/mm/pgtable.c @@ -207,9 +207,6 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) unsigned long addr; int i; - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ - return; - pud = pud_offset(pgd, 0); for (addr = i = 0; i < PREALLOCATED_PMDS; diff --git a/trunk/drivers/char/agp/agp.h b/trunk/drivers/char/agp/agp.h index 4bada0e8b812..81e14bea54bd 100644 --- a/trunk/drivers/char/agp/agp.h +++ b/trunk/drivers/char/agp/agp.h @@ -148,9 +148,6 @@ struct agp_bridge_data { char minor_version; struct list_head list; u32 apbase_config; - /* list of agp_memory mapped to the aperture */ - struct list_head mapped_list; - spinlock_t mapped_lock; }; #define KB(x) ((x) * 1024) diff --git a/trunk/drivers/char/agp/ali-agp.c b/trunk/drivers/char/agp/ali-agp.c index 31dcd9142d54..1ffb381130c3 100644 --- a/trunk/drivers/char/agp/ali-agp.c +++ b/trunk/drivers/char/agp/ali-agp.c @@ -110,8 +110,7 @@ static int ali_configure(void) nlvm_addr+= agp_bridge->gart_bus_addr; nlvm_addr|=(agp_bridge->gart_bus_addr>>12); - dev_info(&agp_bridge->dev->dev, "nlvm top &base = %8x\n", - nlvm_addr); + printk(KERN_INFO PFX "nlvm top &base = %8x\n",nlvm_addr); } #endif @@ -316,8 +315,8 @@ static int __devinit agp_ali_probe(struct pci_dev *pdev, goto found; } - dev_err(&pdev->dev, "unsupported ALi chipset [%04x/%04x])\n", - pdev->vendor, pdev->device); + printk(KERN_ERR PFX "Unsupported ALi chipset (device id: %04x)\n", + pdev->device); return -ENODEV; @@ -362,7 +361,8 @@ static int __devinit agp_ali_probe(struct pci_dev *pdev, bridge->driver = &ali_generic_bridge; } - dev_info(&pdev->dev, "ALi %s chipset\n", devs[j].chipset_name); + printk(KERN_INFO PFX "Detected ALi %s chipset\n", + devs[j].chipset_name); /* Fill in the mode register */ pci_read_config_dword(pdev, diff --git a/trunk/drivers/char/agp/amd-k7-agp.c b/trunk/drivers/char/agp/amd-k7-agp.c index e280531843be..39a0718bc616 100644 --- a/trunk/drivers/char/agp/amd-k7-agp.c +++ b/trunk/drivers/char/agp/amd-k7-agp.c @@ -419,8 +419,8 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev, return -ENODEV; j = ent - agp_amdk7_pci_table; - dev_info(&pdev->dev, "AMD %s chipset\n", - amd_agp_device_ids[j].chipset_name); + printk(KERN_INFO PFX "Detected AMD %s chipset\n", + amd_agp_device_ids[j].chipset_name); bridge = agp_alloc_bridge(); if (!bridge) @@ -442,7 +442,7 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev, while (!cap_ptr) { gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard); if (!gfxcard) { - dev_info(&pdev->dev, "no AGP VGA controller\n"); + printk (KERN_INFO PFX "Couldn't find an AGP VGA controller.\n"); return -ENODEV; } cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP); @@ -453,7 +453,7 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev, (if necessary at all). */ if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) { agp_bridge->flags |= AGP_ERRATA_1X; - dev_info(&pdev->dev, "AMD 751 chipset with NVidia GeForce; forcing 1X due to errata\n"); + printk (KERN_INFO PFX "AMD 751 chipset with NVidia GeForce detected. Forcing to 1X due to errata.\n"); } pci_dev_put(gfxcard); } @@ -469,7 +469,7 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev, agp_bridge->flags = AGP_ERRATA_FASTWRITES; agp_bridge->flags |= AGP_ERRATA_SBA; agp_bridge->flags |= AGP_ERRATA_1X; - dev_info(&pdev->dev, "AMD 761 chipset with errata; disabling AGP fast writes & SBA and forcing to 1X\n"); + printk (KERN_INFO PFX "AMD 761 chipset with errata detected - disabling AGP fast writes & SBA and forcing to 1X.\n"); } } diff --git a/trunk/drivers/char/agp/amd64-agp.c b/trunk/drivers/char/agp/amd64-agp.c index 7495c522d8e4..481ffe87c716 100644 --- a/trunk/drivers/char/agp/amd64-agp.c +++ b/trunk/drivers/char/agp/amd64-agp.c @@ -34,7 +34,6 @@ static struct resource *aperture_resource; static int __initdata agp_try_unsupported = 1; -static int agp_bridges_found; static void amd64_tlbflush(struct agp_memory *temp) { @@ -294,13 +293,12 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, * so let double check that order, and lets trust the AMD NB settings */ if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) { - dev_info(&agp->dev, "aperture size %u MB is not right, using settings from NB\n", - 32 << order); + printk(KERN_INFO "Aperture size %u MB is not right, using settings from NB\n", + 32 << order); order = nb_order; } - dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n", - aper, 32 << order); + printk(KERN_INFO PFX "Aperture from AGP @ %Lx size %u MB\n", aper, 32 << order); if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<dev, "no usable aperture found\n"); + printk(KERN_ERR PFX "No usable aperture found.\n"); #ifdef __x86_64__ /* should port this to i386 */ - dev_err(&dev->dev, "consider rebooting with iommu=memaper=2 to get a good aperture\n"); + printk(KERN_ERR PFX "Consider rebooting with iommu=memaper=2 to get a good aperture.\n"); #endif return -1; } @@ -347,14 +345,14 @@ static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data default: revstring="??"; break; } - dev_info(&pdev->dev, "AMD 8151 AGP Bridge rev %s\n", revstring); + printk (KERN_INFO PFX "Detected AMD 8151 AGP Bridge rev %s\n", revstring); /* * Work around errata. * Chips before B2 stepping incorrectly reporting v3.5 */ if (pdev->revision < 0x13) { - dev_info(&pdev->dev, "correcting AGP revision (reports 3.5, is really 3.0)\n"); + printk (KERN_INFO PFX "Correcting AGP revision (reports 3.5, is really 3.0)\n"); bridge->major_version = 3; bridge->minor_version = 0; } @@ -377,11 +375,11 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) struct pci_dev *dev1; int i; unsigned size = amd64_fetch_size(); - - dev_info(&pdev->dev, "setting up ULi AGP\n"); + printk(KERN_INFO "Setting up ULi AGP.\n"); dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0)); if (dev1 == NULL) { - dev_info(&pdev->dev, "can't find ULi secondary device\n"); + printk(KERN_INFO PFX "Detected a ULi chipset, " + "but could not fine the secondary device.\n"); return -ENODEV; } @@ -390,7 +388,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) break; if (i == ARRAY_SIZE(uli_sizes)) { - dev_info(&pdev->dev, "no ULi size found for %d\n", size); + printk(KERN_INFO PFX "No ULi size found for %d\n", size); return -ENODEV; } @@ -435,11 +433,13 @@ static int nforce3_agp_init(struct pci_dev *pdev) int i; unsigned size = amd64_fetch_size(); - dev_info(&pdev->dev, "setting up Nforce3 AGP\n"); + printk(KERN_INFO PFX "Setting up Nforce3 AGP.\n"); dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0)); if (dev1 == NULL) { - dev_info(&pdev->dev, "can't find Nforce3 secondary device\n"); + printk(KERN_INFO PFX "agpgart: Detected an NVIDIA " + "nForce3 chipset, but could not find " + "the secondary device.\n"); return -ENODEV; } @@ -448,7 +448,7 @@ static int nforce3_agp_init(struct pci_dev *pdev) break; if (i == ARRAY_SIZE(nforce3_sizes)) { - dev_info(&pdev->dev, "no NForce3 size found for %d\n", size); + printk(KERN_INFO PFX "No NForce3 size found for %d\n", size); return -ENODEV; } @@ -462,7 +462,7 @@ static int nforce3_agp_init(struct pci_dev *pdev) /* if x86-64 aperture base is beyond 4G, exit here */ if ( (apbase & 0x7fff) >> (32 - 25) ) { - dev_info(&pdev->dev, "aperture base > 4G\n"); + printk(KERN_INFO PFX "aperture base > 4G\n"); return -ENODEV; } @@ -489,7 +489,6 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev, { struct agp_bridge_data *bridge; u8 cap_ptr; - int err; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) @@ -505,8 +504,7 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev, pdev->device == PCI_DEVICE_ID_AMD_8151_0) { amd8151_init(pdev, bridge); } else { - dev_info(&pdev->dev, "AGP bridge [%04x/%04x]\n", - pdev->vendor, pdev->device); + printk(KERN_INFO PFX "Detected AGP bridge %x\n", pdev->devfn); } bridge->driver = &amd_8151_driver; @@ -538,12 +536,7 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev, } pci_set_drvdata(pdev, bridge); - err = agp_add_bridge(bridge); - if (err < 0) - return err; - - agp_bridges_found++; - return 0; + return agp_add_bridge(bridge); } static void __devexit agp_amd64_remove(struct pci_dev *pdev) @@ -720,11 +713,7 @@ int __init agp_amd64_init(void) if (agp_off) return -EINVAL; - err = pci_register_driver(&agp_amd64_pci_driver); - if (err < 0) - return err; - - if (agp_bridges_found == 0) { + if (pci_register_driver(&agp_amd64_pci_driver) < 0) { struct pci_dev *dev; if (!agp_try_unsupported && !agp_try_unsupported_boot) { printk(KERN_INFO PFX "No supported AGP bridge found.\n"); diff --git a/trunk/drivers/char/agp/ati-agp.c b/trunk/drivers/char/agp/ati-agp.c index 6ecbcafb34b1..3a4566c0d84f 100644 --- a/trunk/drivers/char/agp/ati-agp.c +++ b/trunk/drivers/char/agp/ati-agp.c @@ -486,8 +486,8 @@ static int __devinit agp_ati_probe(struct pci_dev *pdev, goto found; } - dev_err(&pdev->dev, "unsupported Ati chipset [%04x/%04x])\n", - pdev->vendor, pdev->device); + printk(KERN_ERR PFX + "Unsupported Ati chipset (device id: %04x)\n", pdev->device); return -ENODEV; found: @@ -500,7 +500,8 @@ static int __devinit agp_ati_probe(struct pci_dev *pdev, bridge->driver = &ati_generic_bridge; - dev_info(&pdev->dev, "Ati %s chipset\n", devs[j].chipset_name); + printk(KERN_INFO PFX "Detected Ati %s chipset\n", + devs[j].chipset_name); /* Fill in the mode register */ pci_read_config_dword(pdev, diff --git a/trunk/drivers/char/agp/backend.c b/trunk/drivers/char/agp/backend.c index 3a3cc03d401c..1ec87104e68c 100644 --- a/trunk/drivers/char/agp/backend.c +++ b/trunk/drivers/char/agp/backend.c @@ -144,8 +144,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge) void *addr = bridge->driver->agp_alloc_page(bridge); if (!addr) { - dev_err(&bridge->dev->dev, - "can't get memory for scratch page\n"); + printk(KERN_ERR PFX "unable to get memory for scratch page.\n"); return -ENOMEM; } @@ -156,13 +155,13 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge) size_value = bridge->driver->fetch_size(); if (size_value == 0) { - dev_err(&bridge->dev->dev, "can't determine aperture size\n"); + printk(KERN_ERR PFX "unable to determine aperture size.\n"); rc = -EINVAL; goto err_out; } if (bridge->driver->create_gatt_table(bridge)) { - dev_err(&bridge->dev->dev, - "can't get memory for graphics translation table\n"); + printk(KERN_ERR PFX + "unable to get memory for graphics translation table.\n"); rc = -ENOMEM; goto err_out; } @@ -170,8 +169,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge) bridge->key_list = vmalloc(PAGE_SIZE * 4); if (bridge->key_list == NULL) { - dev_err(&bridge->dev->dev, - "can't allocate memory for key lists\n"); + printk(KERN_ERR PFX "error allocating memory for key lists.\n"); rc = -ENOMEM; goto err_out; } @@ -181,12 +179,10 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge) memset(bridge->key_list, 0, PAGE_SIZE * 4); if (bridge->driver->configure()) { - dev_err(&bridge->dev->dev, "error configuring host chipset\n"); + printk(KERN_ERR PFX "error configuring host chipset.\n"); rc = -EINVAL; goto err_out; } - INIT_LIST_HEAD(&bridge->mapped_list); - spin_lock_init(&bridge->mapped_lock); return 0; @@ -273,27 +269,25 @@ int agp_add_bridge(struct agp_bridge_data *bridge) /* Grab reference on the chipset driver. */ if (!try_module_get(bridge->driver->owner)) { - dev_info(&bridge->dev->dev, "can't lock chipset driver\n"); + printk (KERN_INFO PFX "Couldn't lock chipset driver.\n"); return -EINVAL; } error = agp_backend_initialize(bridge); if (error) { - dev_info(&bridge->dev->dev, - "agp_backend_initialize() failed\n"); + printk (KERN_INFO PFX "agp_backend_initialize() failed.\n"); goto err_out; } if (list_empty(&agp_bridges)) { error = agp_frontend_initialize(); if (error) { - dev_info(&bridge->dev->dev, - "agp_frontend_initialize() failed\n"); + printk (KERN_INFO PFX "agp_frontend_initialize() failed.\n"); goto frontend_err; } - dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n", - bridge->driver->fetch_size(), bridge->gart_bus_addr); + printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n", + bridge->driver->fetch_size(), bridge->gart_bus_addr); } diff --git a/trunk/drivers/char/agp/generic.c b/trunk/drivers/char/agp/generic.c index 118dbde25dc7..eaa1a355bb32 100644 --- a/trunk/drivers/char/agp/generic.c +++ b/trunk/drivers/char/agp/generic.c @@ -429,10 +429,6 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start) curr->is_bound = true; curr->pg_start = pg_start; - spin_lock(&agp_bridge->mapped_lock); - list_add(&curr->mapped_list, &agp_bridge->mapped_list); - spin_unlock(&agp_bridge->mapped_lock); - return 0; } EXPORT_SYMBOL(agp_bind_memory); @@ -465,34 +461,10 @@ int agp_unbind_memory(struct agp_memory *curr) curr->is_bound = false; curr->pg_start = 0; - spin_lock(&curr->bridge->mapped_lock); - list_del(&curr->mapped_list); - spin_unlock(&curr->bridge->mapped_lock); return 0; } EXPORT_SYMBOL(agp_unbind_memory); -/** - * agp_rebind_emmory - Rewrite the entire GATT, useful on resume - */ -int agp_rebind_memory(void) -{ - struct agp_memory *curr; - int ret_val = 0; - - spin_lock(&agp_bridge->mapped_lock); - list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) { - ret_val = curr->bridge->driver->insert_memory(curr, - curr->pg_start, - curr->type); - if (ret_val != 0) - break; - } - spin_unlock(&agp_bridge->mapped_lock); - return ret_val; -} -EXPORT_SYMBOL(agp_rebind_memory); - /* End - Routines for handling swapping of agp_memory into the GATT */ @@ -799,8 +771,8 @@ void agp_device_command(u32 bridge_agpstat, bool agp_v3) if (!agp) continue; - dev_info(&device->dev, "putting AGP V%d device into %dx mode\n", - agp_v3 ? 3 : 2, mode); + printk(KERN_INFO PFX "Putting AGP V%d device at %s into %dx mode\n", + agp_v3 ? 3 : 2, pci_name(device), mode); pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat); } } @@ -828,8 +800,10 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) get_agp_version(agp_bridge); - dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n", - agp_bridge->major_version, agp_bridge->minor_version); + printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n", + agp_bridge->major_version, + agp_bridge->minor_version, + pci_name(agp_bridge->dev)); pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat); @@ -858,7 +832,8 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) pci_write_config_dword(bridge->dev, bridge->capndx+AGPCTRL, temp); - dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n"); + printk(KERN_INFO PFX "Device is in legacy mode," + " falling back to 2.x\n"); } } diff --git a/trunk/drivers/char/agp/intel-agp.c b/trunk/drivers/char/agp/intel-agp.c index 016fdf0623a4..df702642ab8f 100644 --- a/trunk/drivers/char/agp/intel-agp.c +++ b/trunk/drivers/char/agp/intel-agp.c @@ -32,8 +32,8 @@ #define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 -#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 -#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 +#define PCI_DEVICE_ID_INTEL_IGD_HB 0x2A40 +#define PCI_DEVICE_ID_INTEL_IGD_IG 0x2A42 #define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00 #define PCI_DEVICE_ID_INTEL_IGD_E_IG 0x2E02 #define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 @@ -55,7 +55,7 @@ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB) + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_HB) #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ @@ -161,7 +161,7 @@ static int intel_i810_fetch_size(void) values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { - dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n"); + printk(KERN_WARNING PFX "i810 is disabled\n"); return 0; } if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { @@ -193,8 +193,7 @@ static int intel_i810_configure(void) intel_private.registers = ioremap(temp, 128 * 4096); if (!intel_private.registers) { - dev_err(&intel_private.pcidev->dev, - "can't remap memory\n"); + printk(KERN_ERR PFX "Unable to remap memory.\n"); return -ENOMEM; } } @@ -202,8 +201,7 @@ static int intel_i810_configure(void) if ((readl(intel_private.registers+I810_DRAM_CTL) & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { /* This will need to be dynamically assigned */ - dev_info(&intel_private.pcidev->dev, - "detected 4MB dedicated video ram\n"); + printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n"); intel_private.num_dcache_entries = 1024; } pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); @@ -502,8 +500,8 @@ static void intel_i830_init_gtt_entries(void) size = 1024 + 512; break; default: - dev_info(&intel_private.pcidev->dev, - "unknown page table size, assuming 512KB\n"); + printk(KERN_INFO PFX "Unknown page table size, " + "assuming 512KB\n"); size = 512; } size += 4; /* add in BIOS popup space */ @@ -517,8 +515,8 @@ static void intel_i830_init_gtt_entries(void) size = 2048; break; default: - dev_info(&agp_bridge->dev->dev, - "unknown page table size 0x%x, assuming 512KB\n", + printk(KERN_INFO PFX "Unknown page table size 0x%x, " + "assuming 512KB\n", (gmch_ctrl & G33_PGETBL_SIZE_MASK)); size = 512; } @@ -629,11 +627,11 @@ static void intel_i830_init_gtt_entries(void) } } if (gtt_entries > 0) - dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", + printk(KERN_INFO PFX "Detected %dK %s memory.\n", gtt_entries / KB(1), local ? "local" : "stolen"); else - dev_info(&agp_bridge->dev->dev, - "no pre-allocated video memory detected\n"); + printk(KERN_INFO PFX + "No pre-allocated video memory detected.\n"); gtt_entries /= KB(4); intel_private.gtt_entries = gtt_entries; @@ -803,12 +801,10 @@ static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, num_entries = A_SIZE_FIX(temp)->num_entries; if (pg_start < intel_private.gtt_entries) { - dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, - "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", - pg_start, intel_private.gtt_entries); + printk(KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n", + pg_start, intel_private.gtt_entries); - dev_info(&intel_private.pcidev->dev, - "trying to insert into local/stolen memory\n"); + printk(KERN_INFO PFX "Trying to insert into local/stolen memory\n"); goto out_err; } @@ -855,8 +851,7 @@ static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, return 0; if (pg_start < intel_private.gtt_entries) { - dev_info(&intel_private.pcidev->dev, - "trying to disable local/stolen memory\n"); + printk(KERN_INFO PFX "Trying to disable local/stolen memory\n"); return -EINVAL; } @@ -962,7 +957,7 @@ static void intel_i9xx_setup_flush(void) if (intel_private.ifp_resource.start) { intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); if (!intel_private.i9xx_flush_page) - dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); + printk(KERN_INFO "unable to ioremap flush page - no chipset flushing"); } } @@ -1033,12 +1028,10 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, num_entries = A_SIZE_FIX(temp)->num_entries; if (pg_start < intel_private.gtt_entries) { - dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, - "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", - pg_start, intel_private.gtt_entries); + printk(KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n", + pg_start, intel_private.gtt_entries); - dev_info(&intel_private.pcidev->dev, - "trying to insert into local/stolen memory\n"); + printk(KERN_INFO PFX "Trying to insert into local/stolen memory\n"); goto out_err; } @@ -1085,8 +1078,7 @@ static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, return 0; if (pg_start < intel_private.gtt_entries) { - dev_info(&intel_private.pcidev->dev, - "trying to disable local/stolen memory\n"); + printk(KERN_INFO PFX "Trying to disable local/stolen memory\n"); return -EINVAL; } @@ -1190,7 +1182,7 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) { switch (agp_bridge->dev->device) { - case PCI_DEVICE_ID_INTEL_GM45_HB: + case PCI_DEVICE_ID_INTEL_IGD_HB: case PCI_DEVICE_ID_INTEL_IGD_E_HB: case PCI_DEVICE_ID_INTEL_Q45_HB: case PCI_DEVICE_ID_INTEL_G45_HB: @@ -1387,7 +1379,7 @@ static int intel_815_configure(void) /* the Intel 815 chipset spec. says that bits 29-31 in the * ATTBASE register are reserved -> try not to write them */ if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) { - dev_emerg(&agp_bridge->dev->dev, "gatt bus addr too high"); + printk(KERN_EMERG PFX "gatt bus addr too high"); return -EINVAL; } @@ -2125,8 +2117,8 @@ static const struct intel_driver_description { NULL, &intel_g33_driver }, { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", NULL, &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, - "Mobile Intel? GM45 Express", NULL, &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_IGD_HB, PCI_DEVICE_ID_INTEL_IGD_IG, 0, + "Intel Integrated Graphics Device", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0, "Intel Integrated Graphics Device", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0, @@ -2171,8 +2163,8 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, if (intel_agp_chipsets[i].name == NULL) { if (cap_ptr) - dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n", - pdev->vendor, pdev->device); + printk(KERN_WARNING PFX "Unsupported Intel chipset" + "(device id: %04x)\n", pdev->device); agp_put_bridge(bridge); return -ENODEV; } @@ -2180,8 +2172,9 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, if (bridge->driver == NULL) { /* bridge has no AGP and no IGD detected */ if (cap_ptr) - dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n", - intel_agp_chipsets[i].gmch_chip_id); + printk(KERN_WARNING PFX "Failed to find bridge device " + "(chip_id: %04x)\n", + intel_agp_chipsets[i].gmch_chip_id); agp_put_bridge(bridge); return -ENODEV; } @@ -2190,7 +2183,8 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, bridge->capndx = cap_ptr; bridge->dev_private_data = &intel_private; - dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); + printk(KERN_INFO PFX "Detected an Intel %s Chipset.\n", + intel_agp_chipsets[i].name); /* * The following fixes the case where the BIOS has "forgotten" to @@ -2200,7 +2194,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, r = &pdev->resource[0]; if (!r->start && r->end) { if (pci_assign_resource(pdev, 0)) { - dev_err(&pdev->dev, "can't assign resource 0\n"); + printk(KERN_ERR PFX "could not assign resource 0\n"); agp_put_bridge(bridge); return -ENODEV; } @@ -2212,7 +2206,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, * 20030610 - hamish@zot.org */ if (pci_enable_device(pdev)) { - dev_err(&pdev->dev, "can't enable PCI device\n"); + printk(KERN_ERR PFX "Unable to Enable PCI device\n"); agp_put_bridge(bridge); return -ENODEV; } @@ -2244,7 +2238,6 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev) static int agp_intel_resume(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); - int ret_val; pci_restore_state(pdev); @@ -2272,10 +2265,6 @@ static int agp_intel_resume(struct pci_dev *pdev) else if (bridge->driver == &intel_i965_driver) intel_i915_configure(); - ret_val = agp_rebind_memory(); - if (ret_val != 0) - return ret_val; - return 0; } #endif @@ -2326,7 +2315,7 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_G33_HB), ID(PCI_DEVICE_ID_INTEL_Q35_HB), ID(PCI_DEVICE_ID_INTEL_Q33_HB), - ID(PCI_DEVICE_ID_INTEL_GM45_HB), + ID(PCI_DEVICE_ID_INTEL_IGD_HB), ID(PCI_DEVICE_ID_INTEL_IGD_E_HB), ID(PCI_DEVICE_ID_INTEL_Q45_HB), ID(PCI_DEVICE_ID_INTEL_G45_HB), diff --git a/trunk/drivers/char/agp/isoch.c b/trunk/drivers/char/agp/isoch.c index c73385cc4b8a..3f9ccde62377 100644 --- a/trunk/drivers/char/agp/isoch.c +++ b/trunk/drivers/char/agp/isoch.c @@ -153,7 +153,7 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, /* Check if this configuration has any chance of working */ if (tot_bw > target.maxbw) { - dev_err(&td->dev, "isochronous bandwidth required " + printk(KERN_ERR PFX "isochronous bandwidth required " "by AGP 3.0 devices exceeds that which is supported by " "the AGP 3.0 bridge!\n"); ret = -ENODEV; @@ -188,7 +188,7 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, /* Exit if the minimal ISOCH_N allocation among the masters is more * than the target can handle. */ if (tot_n > target.n) { - dev_err(&td->dev, "number of isochronous " + printk(KERN_ERR PFX "number of isochronous " "transactions per period required by AGP 3.0 devices " "exceeds that which is supported by the AGP 3.0 " "bridge!\n"); @@ -229,7 +229,7 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, /* Exit if the minimal RQ needs of the masters exceeds what the target * can provide. */ if (tot_rq > rq_isoch) { - dev_err(&td->dev, "number of request queue slots " + printk(KERN_ERR PFX "number of request queue slots " "required by the isochronous bandwidth requested by " "AGP 3.0 devices exceeds the number provided by the " "AGP 3.0 bridge!\n"); @@ -359,9 +359,8 @@ int agp_3_5_enable(struct agp_bridge_data *bridge) case 0x0001: /* Unclassified device */ /* Don't know what this is, but log it for investigation. */ if (mcapndx != 0) { - dev_info(&td->dev, "wacky, found unclassified AGP device %s [%04x/%04x]\n", - pci_name(dev), - dev->vendor, dev->device); + printk (KERN_INFO PFX "Wacky, found unclassified AGP device. %x:%x\n", + dev->vendor, dev->device); } continue; @@ -408,18 +407,17 @@ int agp_3_5_enable(struct agp_bridge_data *bridge) } if (mcapndx == 0) { - dev_err(&td->dev, "woah! Non-AGP device %s on " - "secondary bus of AGP 3.5 bridge!\n", - pci_name(dev)); + printk(KERN_ERR PFX "woah! Non-AGP device " + "found on the secondary bus of an AGP 3.5 bridge!\n"); ret = -ENODEV; goto free_and_exit; } mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; if (mmajor < 3) { - dev_err(&td->dev, "woah! AGP 2.0 device %s on " - "secondary bus of AGP 3.5 bridge operating " - "with AGP 3.0 electricals!\n", pci_name(dev)); + printk(KERN_ERR PFX "woah! AGP 2.0 device " + "found on the secondary bus of an AGP 3.5 " + "bridge operating with AGP 3.0 electricals!\n"); ret = -ENODEV; goto free_and_exit; } @@ -429,10 +427,10 @@ int agp_3_5_enable(struct agp_bridge_data *bridge) pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus); if (((mstatus >> 3) & 0x1) == 0) { - dev_err(&td->dev, "woah! AGP 3.x device %s not " - "operating in AGP 3.x mode on secondary bus " - "of AGP 3.5 bridge operating with AGP 3.0 " - "electricals!\n", pci_name(dev)); + printk(KERN_ERR PFX "woah! AGP 3.x device " + "not operating in AGP 3.x mode found on the " + "secondary bus of an AGP 3.5 bridge operating " + "with AGP 3.0 electricals!\n"); ret = -ENODEV; goto free_and_exit; } @@ -446,9 +444,9 @@ int agp_3_5_enable(struct agp_bridge_data *bridge) if (isoch) { ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs); if (ret) { - dev_info(&td->dev, "something bad happened setting " - "up isochronous xfers; falling back to " - "non-isochronous xfer mode\n"); + printk(KERN_INFO PFX "Something bad happened setting " + "up isochronous xfers. Falling back to " + "non-isochronous xfer mode.\n"); } else { goto free_and_exit; } @@ -468,3 +466,4 @@ int agp_3_5_enable(struct agp_bridge_data *bridge) get_out: return ret; } + diff --git a/trunk/drivers/char/agp/sis-agp.c b/trunk/drivers/char/agp/sis-agp.c index 2587ef96a960..b6791846809f 100644 --- a/trunk/drivers/char/agp/sis-agp.c +++ b/trunk/drivers/char/agp/sis-agp.c @@ -79,8 +79,10 @@ static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode) u32 command; int rate; - dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n", - agp_bridge->major_version, agp_bridge->minor_version); + printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n", + agp_bridge->major_version, + agp_bridge->minor_version, + pci_name(agp_bridge->dev)); pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command); command = agp_collect_device_status(bridge, mode, command); @@ -92,8 +94,8 @@ static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode) if (!agp) continue; - dev_info(&agp_bridge->dev->dev, "putting AGP V3 device at %s into %dx mode\n", - pci_name(device), rate); + printk(KERN_INFO PFX "Putting AGP V3 device at %s into %dx mode\n", + pci_name(device), rate); pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command); @@ -103,7 +105,7 @@ static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode) * cannot be configured */ if (device->device == bridge->dev->device) { - dev_info(&agp_bridge->dev->dev, "SiS delay workaround: giving bridge time to recover\n"); + printk(KERN_INFO PFX "SiS delay workaround: giving bridge time to recover.\n"); msleep(10); } } @@ -188,8 +190,7 @@ static int __devinit agp_sis_probe(struct pci_dev *pdev, return -ENODEV; - dev_info(&pdev->dev, "SiS chipset [%04x/%04x]\n", - pdev->vendor, pdev->device); + printk(KERN_INFO PFX "Detected SiS chipset - id:%i\n", pdev->device); bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; @@ -241,7 +242,7 @@ static struct pci_device_id agp_sis_pci_table[] = { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_SI, - .device = PCI_DEVICE_ID_SI_5591, + .device = PCI_DEVICE_ID_SI_5591_AGP, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, diff --git a/trunk/drivers/char/agp/sworks-agp.c b/trunk/drivers/char/agp/sworks-agp.c index 2fb27fe4c10c..0e054c134490 100644 --- a/trunk/drivers/char/agp/sworks-agp.c +++ b/trunk/drivers/char/agp/sworks-agp.c @@ -241,8 +241,7 @@ static void serverworks_tlbflush(struct agp_memory *temp) while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) { cpu_relax(); if (time_after(jiffies, timeout)) { - dev_err(&serverworks_private.svrwrks_dev->dev, - "TLB post flush took more than 3 seconds\n"); + printk(KERN_ERR PFX "TLB post flush took more than 3 seconds\n"); break; } } @@ -252,8 +251,7 @@ static void serverworks_tlbflush(struct agp_memory *temp) while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) { cpu_relax(); if (time_after(jiffies, timeout)) { - dev_err(&serverworks_private.svrwrks_dev->dev, - "TLB Dir flush took more than 3 seconds\n"); + printk(KERN_ERR PFX "TLB Dir flush took more than 3 seconds\n"); break; } } @@ -273,7 +271,7 @@ static int serverworks_configure(void) temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096); if (!serverworks_private.registers) { - dev_err(&agp_bridge->dev->dev, "can't ioremap(%#x)\n", temp); + printk (KERN_ERR PFX "Unable to ioremap() memory.\n"); return -ENOMEM; } @@ -453,7 +451,7 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev, switch (pdev->device) { case 0x0006: - dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n"); + printk (KERN_ERR PFX "ServerWorks CNB20HE is unsupported due to lack of documentation.\n"); return -ENODEV; case PCI_DEVICE_ID_SERVERWORKS_HE: @@ -463,8 +461,8 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev, default: if (cap_ptr) - dev_err(&pdev->dev, "unsupported Serverworks chipset " - "[%04x/%04x]\n", pdev->vendor, pdev->device); + printk(KERN_ERR PFX "Unsupported Serverworks chipset " + "(device id: %04x)\n", pdev->device); return -ENODEV; } @@ -472,7 +470,8 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev, bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 1)); if (!bridge_dev) { - dev_info(&pdev->dev, "can't find secondary device\n"); + printk(KERN_INFO PFX "Detected a Serverworks chipset " + "but could not find the secondary device.\n"); return -ENODEV; } @@ -483,8 +482,8 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev, if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2); if (temp2 != 0) { - dev_info(&pdev->dev, "64 bit aperture address, " - "but top bits are not zero; disabling AGP\n"); + printk(KERN_INFO PFX "Detected 64 bit aperture address, " + "but top bits are not zero. Disabling agp\n"); return -ENODEV; } serverworks_private.mm_addr_ofs = 0x18; @@ -496,8 +495,8 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev, pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs + 4, &temp2); if (temp2 != 0) { - dev_info(&pdev->dev, "64 bit MMIO address, but top " - "bits are not zero; disabling AGP\n"); + printk(KERN_INFO PFX "Detected 64 bit MMIO address, " + "but top bits are not zero. Disabling agp\n"); return -ENODEV; } } diff --git a/trunk/drivers/char/agp/uninorth-agp.c b/trunk/drivers/char/agp/uninorth-agp.c index eef72709ec53..d2fa3cfca02a 100644 --- a/trunk/drivers/char/agp/uninorth-agp.c +++ b/trunk/drivers/char/agp/uninorth-agp.c @@ -46,8 +46,8 @@ static int uninorth_fetch_size(void) break; if (i == agp_bridge->driver->num_aperture_sizes) { - dev_err(&agp_bridge->dev->dev, "invalid aperture size, " - "using default\n"); + printk(KERN_ERR PFX "Invalid aperture size, using" + " default\n"); size = 0; aperture = NULL; } @@ -108,8 +108,8 @@ static int uninorth_configure(void) current_size = A_SIZE_32(agp_bridge->current_size); - dev_info(&agp_bridge->dev->dev, "configuring for size idx: %d\n", - current_size->size_value); + printk(KERN_INFO PFX "configuring for size idx: %d\n", + current_size->size_value); /* aperture size and gatt addr */ pci_write_config_dword(agp_bridge->dev, @@ -197,9 +197,8 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type) gp = (u32 *) &agp_bridge->gatt_table[pg_start]; for (i = 0; i < mem->page_count; ++i) { if (gp[i]) { - dev_info(&agp_bridge->dev->dev, - "u3_insert_memory: entry 0x%x occupied (%x)\n", - i, gp[i]); + printk("u3_insert_memory: entry 0x%x occupied (%x)\n", + i, gp[i]); return -EBUSY; } } @@ -277,8 +276,8 @@ static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode) &scratch); } while ((scratch & PCI_AGP_COMMAND_AGP) == 0 && ++timeout < 1000); if ((scratch & PCI_AGP_COMMAND_AGP) == 0) - dev_err(&bridge->dev->dev, "can't write UniNorth AGP " - "command register\n"); + printk(KERN_ERR PFX "failed to write UniNorth AGP" + " command register\n"); if (uninorth_rev >= 0x30) { /* This is an AGP V3 */ @@ -331,8 +330,8 @@ static int agp_uninorth_suspend(struct pci_dev *pdev) pci_read_config_dword(device, agp + PCI_AGP_COMMAND, &cmd); if (!(cmd & PCI_AGP_COMMAND_AGP)) continue; - dev_info(&pdev->dev, "disabling AGP on device %s\n", - pci_name(device)); + printk("uninorth-agp: disabling AGP on device %s\n", + pci_name(device)); cmd &= ~PCI_AGP_COMMAND_AGP; pci_write_config_dword(device, agp + PCI_AGP_COMMAND, cmd); } @@ -342,7 +341,8 @@ static int agp_uninorth_suspend(struct pci_dev *pdev) pci_read_config_dword(pdev, agp + PCI_AGP_COMMAND, &cmd); bridge->dev_private_data = (void *)(long)cmd; if (cmd & PCI_AGP_COMMAND_AGP) { - dev_info(&pdev->dev, "disabling AGP on bridge\n"); + printk("uninorth-agp: disabling AGP on bridge %s\n", + pci_name(pdev)); cmd &= ~PCI_AGP_COMMAND_AGP; pci_write_config_dword(pdev, agp + PCI_AGP_COMMAND, cmd); } @@ -591,14 +591,14 @@ static int __devinit agp_uninorth_probe(struct pci_dev *pdev, /* probe for known chipsets */ for (j = 0; devs[j].chipset_name != NULL; ++j) { if (pdev->device == devs[j].device_id) { - dev_info(&pdev->dev, "Apple %s chipset\n", - devs[j].chipset_name); + printk(KERN_INFO PFX "Detected Apple %s chipset\n", + devs[j].chipset_name); goto found; } } - dev_err(&pdev->dev, "unsupported Apple chipset [%04x/%04x]\n", - pdev->vendor, pdev->device); + printk(KERN_ERR PFX "Unsupported Apple chipset (device id: %04x).\n", + pdev->device); return -ENODEV; found: diff --git a/trunk/drivers/scsi/device_handler/scsi_dh_alua.c b/trunk/drivers/scsi/device_handler/scsi_dh_alua.c index 994da56fffed..fcdd73f25625 100644 --- a/trunk/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/trunk/drivers/scsi/device_handler/scsi_dh_alua.c @@ -680,7 +680,7 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req) } -static const struct scsi_dh_devlist alua_dev_list[] = { +const struct scsi_dh_devlist alua_dev_list[] = { {"HP", "MSA VOLUME" }, {"HP", "HSV101" }, {"HP", "HSV111" }, diff --git a/trunk/drivers/scsi/device_handler/scsi_dh_emc.c b/trunk/drivers/scsi/device_handler/scsi_dh_emc.c index b9d23e9e9a44..aa46b131b20e 100644 --- a/trunk/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/trunk/drivers/scsi/device_handler/scsi_dh_emc.c @@ -562,7 +562,7 @@ static int clariion_activate(struct scsi_device *sdev) return result; } -static const struct scsi_dh_devlist clariion_dev_list[] = { +const struct scsi_dh_devlist clariion_dev_list[] = { {"DGC", "RAID"}, {"DGC", "DISK"}, {"DGC", "VRAID"}, diff --git a/trunk/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/trunk/drivers/scsi/device_handler/scsi_dh_hp_sw.c index a6a4ef3ad51c..9c7a1f8ebb72 100644 --- a/trunk/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/trunk/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -282,7 +282,7 @@ static int hp_sw_activate(struct scsi_device *sdev) return ret; } -static const struct scsi_dh_devlist hp_sw_dh_data_list[] = { +const struct scsi_dh_devlist hp_sw_dh_data_list[] = { {"COMPAQ", "MSA1000 VOLUME"}, {"COMPAQ", "HSV110"}, {"HP", "HSV100"}, diff --git a/trunk/drivers/scsi/device_handler/scsi_dh_rdac.c b/trunk/drivers/scsi/device_handler/scsi_dh_rdac.c index e7c7b4ebc1fe..b093a501f8ae 100644 --- a/trunk/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/trunk/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -574,7 +574,7 @@ static int rdac_check_sense(struct scsi_device *sdev, return SCSI_RETURN_NOT_HANDLED; } -static const struct scsi_dh_devlist rdac_dev_list[] = { +const struct scsi_dh_devlist rdac_dev_list[] = { {"IBM", "1722"}, {"IBM", "1724"}, {"IBM", "1726"}, diff --git a/trunk/fs/jbd/transaction.c b/trunk/fs/jbd/transaction.c index 0540ca27a446..8dee32007500 100644 --- a/trunk/fs/jbd/transaction.c +++ b/trunk/fs/jbd/transaction.c @@ -291,7 +291,7 @@ handle_t *journal_start(journal_t *journal, int nblocks) goto out; } - lock_map_acquire(&handle->h_lockdep_map); + lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_); out: return handle; @@ -1448,7 +1448,7 @@ int journal_stop(handle_t *handle) spin_unlock(&journal->j_state_lock); } - lock_map_release(&handle->h_lockdep_map); + lock_release(&handle->h_lockdep_map, 1, _THIS_IP_); jbd_free_handle(handle); return err; diff --git a/trunk/fs/jbd2/transaction.c b/trunk/fs/jbd2/transaction.c index e5d540588fa9..4f7cadbb19fa 100644 --- a/trunk/fs/jbd2/transaction.c +++ b/trunk/fs/jbd2/transaction.c @@ -301,7 +301,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks) goto out; } - lock_map_acquire(&handle->h_lockdep_map); + lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_); out: return handle; } @@ -1279,7 +1279,7 @@ int jbd2_journal_stop(handle_t *handle) spin_unlock(&journal->j_state_lock); } - lock_map_release(&handle->h_lockdep_map); + lock_release(&handle->h_lockdep_map, 1, _THIS_IP_); jbd2_free_handle(handle); return err; diff --git a/trunk/include/asm-x86/efi.h b/trunk/include/asm-x86/efi.h index d4f2b0abe929..7ed2bd7a7f51 100644 --- a/trunk/include/asm-x86/efi.h +++ b/trunk/include/asm-x86/efi.h @@ -86,7 +86,7 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3, efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) -extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size); +extern void *efi_ioremap(unsigned long addr, unsigned long size); #endif /* CONFIG_X86_32 */ diff --git a/trunk/include/asm-x86/hw_irq.h b/trunk/include/asm-x86/hw_irq.h index edd0b95f14d0..77ba51df5668 100644 --- a/trunk/include/asm-x86/hw_irq.h +++ b/trunk/include/asm-x86/hw_irq.h @@ -98,17 +98,9 @@ extern void (*const interrupt[NR_IRQS])(void); #else typedef int vector_irq_t[NR_VECTORS]; DECLARE_PER_CPU(vector_irq_t, vector_irq); +extern spinlock_t vector_lock; #endif - -#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_X86_64) -extern void lock_vector_lock(void); -extern void unlock_vector_lock(void); -extern void __setup_vector_irq(int cpu); -#else -static inline void lock_vector_lock(void) {} -static inline void unlock_vector_lock(void) {} -static inline void __setup_vector_irq(int cpu) {} -#endif +extern void setup_vector_irq(int cpu); #endif /* !ASSEMBLY_ */ diff --git a/trunk/include/asm-x86/irq_vectors.h b/trunk/include/asm-x86/irq_vectors.h index b95d167b7fb2..90b1d1f12f08 100644 --- a/trunk/include/asm-x86/irq_vectors.h +++ b/trunk/include/asm-x86/irq_vectors.h @@ -109,15 +109,7 @@ #define LAST_VM86_IRQ 15 #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) -#ifdef CONFIG_X86_64 -# if NR_CPUS < MAX_IO_APICS -# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) -# else -# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) -# endif -# define NR_IRQ_VECTORS NR_IRQS - -#elif !defined(CONFIG_X86_VOYAGER) +#if !defined(CONFIG_X86_VOYAGER) # if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) || defined(CONFIG_X86_VISWS) diff --git a/trunk/include/linux/agp_backend.h b/trunk/include/linux/agp_backend.h index 2b8df8b420fd..972b12bcfb36 100644 --- a/trunk/include/linux/agp_backend.h +++ b/trunk/include/linux/agp_backend.h @@ -30,8 +30,6 @@ #ifndef _AGP_BACKEND_H #define _AGP_BACKEND_H 1 -#include - enum chipset_type { NOT_SUPPORTED, SUPPORTED, @@ -80,8 +78,6 @@ struct agp_memory { bool is_bound; bool is_flushed; bool vmalloc_flag; - /* list of agp_memory mapped to the aperture */ - struct list_head mapped_list; }; #define AGP_NORMAL_MEMORY 0 @@ -100,7 +96,6 @@ extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t, extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *); extern int agp_bind_memory(struct agp_memory *, off_t); extern int agp_unbind_memory(struct agp_memory *); -extern int agp_rebind_memory(void); extern void agp_enable(struct agp_bridge_data *, u32); extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *); extern void agp_backend_release(struct agp_bridge_data *); diff --git a/trunk/include/linux/lockdep.h b/trunk/include/linux/lockdep.h index 331e5f1c2d8e..2486eb4edbf1 100644 --- a/trunk/include/linux/lockdep.h +++ b/trunk/include/linux/lockdep.h @@ -89,7 +89,6 @@ struct lock_class { struct lockdep_subclass_key *key; unsigned int subclass; - unsigned int dep_gen_id; /* * IRQ/softirq usage tracking bits: @@ -190,14 +189,6 @@ struct lock_chain { u64 chain_key; }; -#define MAX_LOCKDEP_KEYS_BITS 13 -/* - * Subtract one because we offset hlock->class_idx by 1 in order - * to make 0 mean no class. This avoids overflowing the class_idx - * bitfield and hitting the BUG in hlock_class(). - */ -#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) - struct held_lock { /* * One-way hash of the dependency chain up to this point. We @@ -214,14 +205,14 @@ struct held_lock { * with zero), here we store the previous hash value: */ u64 prev_chain_key; + struct lock_class *class; unsigned long acquire_ip; struct lockdep_map *instance; - struct lockdep_map *nest_lock; + #ifdef CONFIG_LOCK_STAT u64 waittime_stamp; u64 holdtime_stamp; #endif - unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; /* * The lock-stack is unified in that the lock chains of interrupt * contexts nest ontop of process context chains, but we 'separate' @@ -235,11 +226,11 @@ struct held_lock { * The following field is used to detect when we cross into an * interrupt context: */ - unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ - unsigned int trylock:1; - unsigned int read:2; /* see lock_acquire() comment */ - unsigned int check:2; /* see lock_acquire() comment */ - unsigned int hardirqs_off:1; + int irq_context; + int trylock; + int read; + int check; + int hardirqs_off; }; /* @@ -303,15 +294,11 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, * 2: full validation */ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, - int trylock, int read, int check, - struct lockdep_map *nest_lock, unsigned long ip); + int trylock, int read, int check, unsigned long ip); extern void lock_release(struct lockdep_map *lock, int nested, unsigned long ip); -extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, - unsigned long ip); - # define INIT_LOCKDEP .lockdep_recursion = 0, #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) @@ -326,9 +313,8 @@ static inline void lockdep_on(void) { } -# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) +# define lock_acquire(l, s, t, r, c, i) do { } while (0) # define lock_release(l, n, i) do { } while (0) -# define lock_set_subclass(l, s, i) do { } while (0) # define lockdep_init() do { } while (0) # define lockdep_info() do { } while (0) # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) @@ -414,11 +400,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) -# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) # else -# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) -# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) # endif # define spin_release(l, n, i) lock_release(l, n, i) #else @@ -428,11 +412,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) -# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i) # else -# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) -# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i) # endif # define rwlock_release(l, n, i) lock_release(l, n, i) #else @@ -443,9 +427,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) # else -# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) # endif # define mutex_release(l, n, i) lock_release(l, n, i) #else @@ -455,11 +439,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) -# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i) # else -# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) -# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i) # endif # define rwsem_release(l, n, i) lock_release(l, n, i) #else @@ -468,16 +452,4 @@ static inline void print_irqtrace_events(struct task_struct *curr) # define rwsem_release(l, n, i) do { } while (0) #endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# ifdef CONFIG_PROVE_LOCKING -# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) -# else -# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) -# endif -# define lock_map_release(l) lock_release(l, 1, _THIS_IP_) -#else -# define lock_map_acquire(l) do { } while (0) -# define lock_map_release(l) do { } while (0) -#endif - #endif /* __LINUX_LOCKDEP_H */ diff --git a/trunk/include/linux/rcuclassic.h b/trunk/include/linux/rcuclassic.h index 4ab843622727..8c774905dcfe 100644 --- a/trunk/include/linux/rcuclassic.h +++ b/trunk/include/linux/rcuclassic.h @@ -117,7 +117,7 @@ extern int rcu_needs_cpu(int cpu); #ifdef CONFIG_DEBUG_LOCK_ALLOC extern struct lockdep_map rcu_lock_map; # define rcu_read_acquire() \ - lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) + lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) #else # define rcu_read_acquire() do { } while (0) diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index 5850bfb968a8..5270d449ff9d 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -1551,10 +1551,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) extern unsigned long long sched_clock(void); -extern void sched_clock_init(void); -extern u64 sched_clock_cpu(int cpu); - #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK +static inline void sched_clock_init(void) +{ +} + +static inline u64 sched_clock_cpu(int cpu) +{ + return sched_clock(); +} + static inline void sched_clock_tick(void) { } @@ -1566,11 +1572,28 @@ static inline void sched_clock_idle_sleep_event(void) static inline void sched_clock_idle_wakeup_event(u64 delta_ns) { } -#else + +#ifdef CONFIG_NO_HZ +static inline void sched_clock_tick_stop(int cpu) +{ +} + +static inline void sched_clock_tick_start(int cpu) +{ +} +#endif + +#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ +extern void sched_clock_init(void); +extern u64 sched_clock_cpu(int cpu); extern void sched_clock_tick(void); extern void sched_clock_idle_sleep_event(void); extern void sched_clock_idle_wakeup_event(u64 delta_ns); +#ifdef CONFIG_NO_HZ +extern void sched_clock_tick_stop(int cpu); +extern void sched_clock_tick_start(int cpu); #endif +#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ /* * For kernel-internal use: high-speed (but slightly incorrect) per-cpu diff --git a/trunk/include/linux/spinlock.h b/trunk/include/linux/spinlock.h index e0c0fccced46..61e5610ad165 100644 --- a/trunk/include/linux/spinlock.h +++ b/trunk/include/linux/spinlock.h @@ -183,14 +183,8 @@ do { \ #ifdef CONFIG_DEBUG_LOCK_ALLOC # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) -# define spin_lock_nest_lock(lock, nest_lock) \ - do { \ - typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ - _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ - } while (0) #else # define spin_lock_nested(lock, subclass) _spin_lock(lock) -# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) #endif #define write_lock(lock) _write_lock(lock) diff --git a/trunk/include/linux/spinlock_api_smp.h b/trunk/include/linux/spinlock_api_smp.h index d79845d034b5..8a2307ce7296 100644 --- a/trunk/include/linux/spinlock_api_smp.h +++ b/trunk/include/linux/spinlock_api_smp.h @@ -22,8 +22,6 @@ int in_lock_functions(unsigned long addr); void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock); -void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) - __acquires(lock); void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); diff --git a/trunk/kernel/Kconfig.hz b/trunk/kernel/Kconfig.hz index 94fabd534b03..382dd5a8b2d7 100644 --- a/trunk/kernel/Kconfig.hz +++ b/trunk/kernel/Kconfig.hz @@ -55,4 +55,4 @@ config HZ default 1000 if HZ_1000 config SCHED_HRTICK - def_bool HIGH_RES_TIMERS && (!SMP || USE_GENERIC_SMP_HELPERS) + def_bool HIGH_RES_TIMERS && USE_GENERIC_SMP_HELPERS diff --git a/trunk/kernel/cpu.c b/trunk/kernel/cpu.c index c977c339f559..e202a68d1cc1 100644 --- a/trunk/kernel/cpu.c +++ b/trunk/kernel/cpu.c @@ -349,8 +349,6 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) goto out_notify; BUG_ON(!cpu_online(cpu)); - cpu_set(cpu, cpu_active_map); - /* Now call notifier in preparation. */ raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); @@ -385,6 +383,9 @@ int __cpuinit cpu_up(unsigned int cpu) err = _cpu_up(cpu, 0); + if (cpu_online(cpu)) + cpu_set(cpu, cpu_active_map); + out: cpu_maps_update_done(); return err; diff --git a/trunk/kernel/lockdep.c b/trunk/kernel/lockdep.c index 1aa91fd6b06e..d38a64362973 100644 --- a/trunk/kernel/lockdep.c +++ b/trunk/kernel/lockdep.c @@ -124,15 +124,6 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; unsigned long nr_lock_classes; static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; -static inline struct lock_class *hlock_class(struct held_lock *hlock) -{ - if (!hlock->class_idx) { - DEBUG_LOCKS_WARN_ON(1); - return NULL; - } - return lock_classes + hlock->class_idx - 1; -} - #ifdef CONFIG_LOCK_STAT static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); @@ -231,7 +222,7 @@ static void lock_release_holdtime(struct held_lock *hlock) holdtime = sched_clock() - hlock->holdtime_stamp; - stats = get_lock_stats(hlock_class(hlock)); + stats = get_lock_stats(hlock->class); if (hlock->read) lock_time_inc(&stats->read_holdtime, holdtime); else @@ -381,19 +372,6 @@ unsigned int nr_process_chains; unsigned int max_lockdep_depth; unsigned int max_recursion_depth; -static unsigned int lockdep_dependency_gen_id; - -static bool lockdep_dependency_visit(struct lock_class *source, - unsigned int depth) -{ - if (!depth) - lockdep_dependency_gen_id++; - if (source->dep_gen_id == lockdep_dependency_gen_id) - return true; - source->dep_gen_id = lockdep_dependency_gen_id; - return false; -} - #ifdef CONFIG_DEBUG_LOCKDEP /* * We cannot printk in early bootup code. Not even early_printk() @@ -527,7 +505,7 @@ static void print_lockdep_cache(struct lockdep_map *lock) static void print_lock(struct held_lock *hlock) { - print_lock_name(hlock_class(hlock)); + print_lock_name(hlock->class); printk(", at: "); print_ip_sym(hlock->acquire_ip); } @@ -580,9 +558,6 @@ static void print_lock_dependencies(struct lock_class *class, int depth) { struct lock_list *entry; - if (lockdep_dependency_visit(class, depth)) - return; - if (DEBUG_LOCKS_WARN_ON(depth >= 20)) return; @@ -957,7 +932,7 @@ static noinline int print_circular_bug_tail(void) if (debug_locks_silent) return 0; - this.class = hlock_class(check_source); + this.class = check_source->class; if (!save_trace(&this.trace)) return 0; @@ -984,67 +959,6 @@ static int noinline print_infinite_recursion_bug(void) return 0; } -unsigned long __lockdep_count_forward_deps(struct lock_class *class, - unsigned int depth) -{ - struct lock_list *entry; - unsigned long ret = 1; - - if (lockdep_dependency_visit(class, depth)) - return 0; - - /* - * Recurse this class's dependency list: - */ - list_for_each_entry(entry, &class->locks_after, entry) - ret += __lockdep_count_forward_deps(entry->class, depth + 1); - - return ret; -} - -unsigned long lockdep_count_forward_deps(struct lock_class *class) -{ - unsigned long ret, flags; - - local_irq_save(flags); - __raw_spin_lock(&lockdep_lock); - ret = __lockdep_count_forward_deps(class, 0); - __raw_spin_unlock(&lockdep_lock); - local_irq_restore(flags); - - return ret; -} - -unsigned long __lockdep_count_backward_deps(struct lock_class *class, - unsigned int depth) -{ - struct lock_list *entry; - unsigned long ret = 1; - - if (lockdep_dependency_visit(class, depth)) - return 0; - /* - * Recurse this class's dependency list: - */ - list_for_each_entry(entry, &class->locks_before, entry) - ret += __lockdep_count_backward_deps(entry->class, depth + 1); - - return ret; -} - -unsigned long lockdep_count_backward_deps(struct lock_class *class) -{ - unsigned long ret, flags; - - local_irq_save(flags); - __raw_spin_lock(&lockdep_lock); - ret = __lockdep_count_backward_deps(class, 0); - __raw_spin_unlock(&lockdep_lock); - local_irq_restore(flags); - - return ret; -} - /* * Prove that the dependency graph starting at can not * lead to . Print an error and return 0 if it does. @@ -1054,9 +968,6 @@ check_noncircular(struct lock_class *source, unsigned int depth) { struct lock_list *entry; - if (lockdep_dependency_visit(source, depth)) - return 1; - debug_atomic_inc(&nr_cyclic_check_recursions); if (depth > max_recursion_depth) max_recursion_depth = depth; @@ -1066,7 +977,7 @@ check_noncircular(struct lock_class *source, unsigned int depth) * Check this lock's dependency list: */ list_for_each_entry(entry, &source->locks_after, entry) { - if (entry->class == hlock_class(check_target)) + if (entry->class == check_target->class) return print_circular_bug_header(entry, depth+1); debug_atomic_inc(&nr_cyclic_checks); if (!check_noncircular(entry->class, depth+1)) @@ -1100,9 +1011,6 @@ find_usage_forwards(struct lock_class *source, unsigned int depth) struct lock_list *entry; int ret; - if (lockdep_dependency_visit(source, depth)) - return 1; - if (depth > max_recursion_depth) max_recursion_depth = depth; if (depth >= RECURSION_LIMIT) @@ -1142,9 +1050,6 @@ find_usage_backwards(struct lock_class *source, unsigned int depth) struct lock_list *entry; int ret; - if (lockdep_dependency_visit(source, depth)) - return 1; - if (!__raw_spin_is_locked(&lockdep_lock)) return DEBUG_LOCKS_WARN_ON(1); @@ -1159,11 +1064,6 @@ find_usage_backwards(struct lock_class *source, unsigned int depth) return 2; } - if (!source && debug_locks_off_graph_unlock()) { - WARN_ON(1); - return 0; - } - /* * Check this lock's dependency list: */ @@ -1203,9 +1103,9 @@ print_bad_irq_dependency(struct task_struct *curr, printk("\nand this task is already holding:\n"); print_lock(prev); printk("which would create a new lock dependency:\n"); - print_lock_name(hlock_class(prev)); + print_lock_name(prev->class); printk(" ->"); - print_lock_name(hlock_class(next)); + print_lock_name(next->class); printk("\n"); printk("\nbut this new dependency connects a %s-irq-safe lock:\n", @@ -1246,12 +1146,12 @@ check_usage(struct task_struct *curr, struct held_lock *prev, find_usage_bit = bit_backwards; /* fills in */ - ret = find_usage_backwards(hlock_class(prev), 0); + ret = find_usage_backwards(prev->class, 0); if (!ret || ret == 1) return ret; find_usage_bit = bit_forwards; - ret = find_usage_forwards(hlock_class(next), 0); + ret = find_usage_forwards(next->class, 0); if (!ret || ret == 1) return ret; /* ret == 2 */ @@ -1372,32 +1272,18 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, struct lockdep_map *next_instance, int read) { struct held_lock *prev; - struct held_lock *nest = NULL; int i; for (i = 0; i < curr->lockdep_depth; i++) { prev = curr->held_locks + i; - - if (prev->instance == next->nest_lock) - nest = prev; - - if (hlock_class(prev) != hlock_class(next)) + if (prev->class != next->class) continue; - /* * Allow read-after-read recursion of the same * lock class (i.e. read_lock(lock)+read_lock(lock)): */ if ((read == 2) && prev->read) return 2; - - /* - * We're holding the nest_lock, which serializes this lock's - * nesting behaviour. - */ - if (nest) - return 2; - return print_deadlock_bug(curr, prev, next); } return 1; @@ -1443,7 +1329,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, */ check_source = next; check_target = prev; - if (!(check_noncircular(hlock_class(next), 0))) + if (!(check_noncircular(next->class, 0))) return print_circular_bug_tail(); if (!check_prev_add_irq(curr, prev, next)) @@ -1467,8 +1353,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, * chains - the second one will be new, but L1 already has * L2 added to its dependency list, due to the first chain.) */ - list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) { - if (entry->class == hlock_class(next)) { + list_for_each_entry(entry, &prev->class->locks_after, entry) { + if (entry->class == next->class) { if (distance == 1) entry->distance = 1; return 2; @@ -1479,28 +1365,26 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, * Ok, all validations passed, add the new lock * to the previous lock's dependency list: */ - ret = add_lock_to_list(hlock_class(prev), hlock_class(next), - &hlock_class(prev)->locks_after, - next->acquire_ip, distance); + ret = add_lock_to_list(prev->class, next->class, + &prev->class->locks_after, next->acquire_ip, distance); if (!ret) return 0; - ret = add_lock_to_list(hlock_class(next), hlock_class(prev), - &hlock_class(next)->locks_before, - next->acquire_ip, distance); + ret = add_lock_to_list(next->class, prev->class, + &next->class->locks_before, next->acquire_ip, distance); if (!ret) return 0; /* * Debugging printouts: */ - if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { + if (verbose(prev->class) || verbose(next->class)) { graph_unlock(); printk("\n new dependency: "); - print_lock_name(hlock_class(prev)); + print_lock_name(prev->class); printk(" => "); - print_lock_name(hlock_class(next)); + print_lock_name(next->class); printk("\n"); dump_stack(); return graph_lock(); @@ -1597,7 +1481,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, struct held_lock *hlock, u64 chain_key) { - struct lock_class *class = hlock_class(hlock); + struct lock_class *class = hlock->class; struct list_head *hash_head = chainhashentry(chain_key); struct lock_chain *chain; struct held_lock *hlock_curr, *hlock_next; @@ -1670,7 +1554,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { chain->base = cn; for (j = 0; j < chain->depth - 1; j++, i++) { - int lock_id = curr->held_locks[i].class_idx - 1; + int lock_id = curr->held_locks[i].class - lock_classes; chain_hlocks[chain->base + j] = lock_id; } chain_hlocks[chain->base + j] = class - lock_classes; @@ -1766,7 +1650,7 @@ static void check_chain_key(struct task_struct *curr) WARN_ON(1); return; } - id = hlock->class_idx - 1; + id = hlock->class - lock_classes; if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) return; @@ -1811,7 +1695,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, print_lock(this); printk("{%s} state was registered at:\n", usage_str[prev_bit]); - print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); + print_stack_trace(this->class->usage_traces + prev_bit, 1); print_irqtrace_events(curr); printk("\nother info that might help us debug this:\n"); @@ -1830,7 +1714,7 @@ static inline int valid_state(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) { - if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) + if (unlikely(this->class->usage_mask & (1 << bad_bit))) return print_usage_bug(curr, this, bad_bit, new_bit); return 1; } @@ -1869,7 +1753,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, lockdep_print_held_locks(curr); printk("\nthe first lock's dependencies:\n"); - print_lock_dependencies(hlock_class(this), 0); + print_lock_dependencies(this->class, 0); printk("\nthe second lock's dependencies:\n"); print_lock_dependencies(other, 0); @@ -1892,7 +1776,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this, find_usage_bit = bit; /* fills in */ - ret = find_usage_forwards(hlock_class(this), 0); + ret = find_usage_forwards(this->class, 0); if (!ret || ret == 1) return ret; @@ -1911,7 +1795,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this, find_usage_bit = bit; /* fills in */ - ret = find_usage_backwards(hlock_class(this), 0); + ret = find_usage_backwards(this->class, 0); if (!ret || ret == 1) return ret; @@ -1977,7 +1861,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, LOCK_ENABLED_HARDIRQS_READ, "hard-read")) return 0; #endif - if (hardirq_verbose(hlock_class(this))) + if (hardirq_verbose(this->class)) ret = 2; break; case LOCK_USED_IN_SOFTIRQ: @@ -2002,7 +1886,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) return 0; #endif - if (softirq_verbose(hlock_class(this))) + if (softirq_verbose(this->class)) ret = 2; break; case LOCK_USED_IN_HARDIRQ_READ: @@ -2015,7 +1899,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, if (!check_usage_forwards(curr, this, LOCK_ENABLED_HARDIRQS, "hard")) return 0; - if (hardirq_verbose(hlock_class(this))) + if (hardirq_verbose(this->class)) ret = 2; break; case LOCK_USED_IN_SOFTIRQ_READ: @@ -2028,7 +1912,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, if (!check_usage_forwards(curr, this, LOCK_ENABLED_SOFTIRQS, "soft")) return 0; - if (softirq_verbose(hlock_class(this))) + if (softirq_verbose(this->class)) ret = 2; break; case LOCK_ENABLED_HARDIRQS: @@ -2054,7 +1938,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, LOCK_USED_IN_HARDIRQ_READ, "hard-read")) return 0; #endif - if (hardirq_verbose(hlock_class(this))) + if (hardirq_verbose(this->class)) ret = 2; break; case LOCK_ENABLED_SOFTIRQS: @@ -2080,7 +1964,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) return 0; #endif - if (softirq_verbose(hlock_class(this))) + if (softirq_verbose(this->class)) ret = 2; break; case LOCK_ENABLED_HARDIRQS_READ: @@ -2095,7 +1979,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, LOCK_USED_IN_HARDIRQ, "hard")) return 0; #endif - if (hardirq_verbose(hlock_class(this))) + if (hardirq_verbose(this->class)) ret = 2; break; case LOCK_ENABLED_SOFTIRQS_READ: @@ -2110,7 +1994,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, LOCK_USED_IN_SOFTIRQ, "soft")) return 0; #endif - if (softirq_verbose(hlock_class(this))) + if (softirq_verbose(this->class)) ret = 2; break; default: @@ -2426,7 +2310,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, * If already set then do not dirty the cacheline, * nor do any checks: */ - if (likely(hlock_class(this)->usage_mask & new_mask)) + if (likely(this->class->usage_mask & new_mask)) return 1; if (!graph_lock()) @@ -2434,14 +2318,14 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, /* * Make sure we didnt race: */ - if (unlikely(hlock_class(this)->usage_mask & new_mask)) { + if (unlikely(this->class->usage_mask & new_mask)) { graph_unlock(); return 1; } - hlock_class(this)->usage_mask |= new_mask; + this->class->usage_mask |= new_mask; - if (!save_trace(hlock_class(this)->usage_traces + new_bit)) + if (!save_trace(this->class->usage_traces + new_bit)) return 0; switch (new_bit) { @@ -2521,7 +2405,7 @@ EXPORT_SYMBOL_GPL(lockdep_init_map); */ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, int trylock, int read, int check, int hardirqs_off, - struct lockdep_map *nest_lock, unsigned long ip) + unsigned long ip) { struct task_struct *curr = current; struct lock_class *class = NULL; @@ -2575,12 +2459,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, return 0; hlock = curr->held_locks + depth; - if (DEBUG_LOCKS_WARN_ON(!class)) - return 0; - hlock->class_idx = class - lock_classes + 1; + + hlock->class = class; hlock->acquire_ip = ip; hlock->instance = lock; - hlock->nest_lock = nest_lock; hlock->trylock = trylock; hlock->read = read; hlock->check = check; @@ -2692,55 +2574,6 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, return 1; } -static int -__lock_set_subclass(struct lockdep_map *lock, - unsigned int subclass, unsigned long ip) -{ - struct task_struct *curr = current; - struct held_lock *hlock, *prev_hlock; - struct lock_class *class; - unsigned int depth; - int i; - - depth = curr->lockdep_depth; - if (DEBUG_LOCKS_WARN_ON(!depth)) - return 0; - - prev_hlock = NULL; - for (i = depth-1; i >= 0; i--) { - hlock = curr->held_locks + i; - /* - * We must not cross into another context: - */ - if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) - break; - if (hlock->instance == lock) - goto found_it; - prev_hlock = hlock; - } - return print_unlock_inbalance_bug(curr, lock, ip); - -found_it: - class = register_lock_class(lock, subclass, 0); - hlock->class_idx = class - lock_classes + 1; - - curr->lockdep_depth = i; - curr->curr_chain_key = hlock->prev_chain_key; - - for (; i < depth; i++) { - hlock = curr->held_locks + i; - if (!__lock_acquire(hlock->instance, - hlock_class(hlock)->subclass, hlock->trylock, - hlock->read, hlock->check, hlock->hardirqs_off, - hlock->nest_lock, hlock->acquire_ip)) - return 0; - } - - if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) - return 0; - return 1; -} - /* * Remove the lock to the list of currently held locks in a * potentially non-nested (out of order) manner. This is a @@ -2791,9 +2624,9 @@ lock_release_non_nested(struct task_struct *curr, for (i++; i < depth; i++) { hlock = curr->held_locks + i; if (!__lock_acquire(hlock->instance, - hlock_class(hlock)->subclass, hlock->trylock, + hlock->class->subclass, hlock->trylock, hlock->read, hlock->check, hlock->hardirqs_off, - hlock->nest_lock, hlock->acquire_ip)) + hlock->acquire_ip)) return 0; } @@ -2836,7 +2669,7 @@ static int lock_release_nested(struct task_struct *curr, #ifdef CONFIG_DEBUG_LOCKDEP hlock->prev_chain_key = 0; - hlock->class_idx = 0; + hlock->class = NULL; hlock->acquire_ip = 0; hlock->irq_context = 0; #endif @@ -2905,36 +2738,18 @@ static void check_flags(unsigned long flags) #endif } -void -lock_set_subclass(struct lockdep_map *lock, - unsigned int subclass, unsigned long ip) -{ - unsigned long flags; - - if (unlikely(current->lockdep_recursion)) - return; - - raw_local_irq_save(flags); - current->lockdep_recursion = 1; - check_flags(flags); - if (__lock_set_subclass(lock, subclass, ip)) - check_chain_key(current); - current->lockdep_recursion = 0; - raw_local_irq_restore(flags); -} - -EXPORT_SYMBOL_GPL(lock_set_subclass); - /* * We are not always called with irqs disabled - do that here, * and also avoid lockdep recursion: */ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, - int trylock, int read, int check, - struct lockdep_map *nest_lock, unsigned long ip) + int trylock, int read, int check, unsigned long ip) { unsigned long flags; + if (unlikely(!lock_stat && !prove_locking)) + return; + if (unlikely(current->lockdep_recursion)) return; @@ -2943,7 +2758,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, current->lockdep_recursion = 1; __lock_acquire(lock, subclass, trylock, read, check, - irqs_disabled_flags(flags), nest_lock, ip); + irqs_disabled_flags(flags), ip); current->lockdep_recursion = 0; raw_local_irq_restore(flags); } @@ -2955,6 +2770,9 @@ void lock_release(struct lockdep_map *lock, int nested, { unsigned long flags; + if (unlikely(!lock_stat && !prove_locking)) + return; + if (unlikely(current->lockdep_recursion)) return; @@ -3027,9 +2845,9 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip) found_it: hlock->waittime_stamp = sched_clock(); - point = lock_contention_point(hlock_class(hlock), ip); + point = lock_contention_point(hlock->class, ip); - stats = get_lock_stats(hlock_class(hlock)); + stats = get_lock_stats(hlock->class); if (point < ARRAY_SIZE(stats->contention_point)) stats->contention_point[i]++; if (lock->cpu != smp_processor_id()) @@ -3075,7 +2893,7 @@ __lock_acquired(struct lockdep_map *lock) hlock->holdtime_stamp = now; } - stats = get_lock_stats(hlock_class(hlock)); + stats = get_lock_stats(hlock->class); if (waittime) { if (hlock->read) lock_time_inc(&stats->read_waittime, waittime); @@ -3170,7 +2988,6 @@ static void zap_class(struct lock_class *class) list_del_rcu(&class->hash_entry); list_del_rcu(&class->lock_entry); - class->key = NULL; } static inline int within(const void *addr, void *start, unsigned long size) diff --git a/trunk/kernel/lockdep_internals.h b/trunk/kernel/lockdep_internals.h index 55db193d366d..c3600a091a28 100644 --- a/trunk/kernel/lockdep_internals.h +++ b/trunk/kernel/lockdep_internals.h @@ -17,6 +17,9 @@ */ #define MAX_LOCKDEP_ENTRIES 8192UL +#define MAX_LOCKDEP_KEYS_BITS 11 +#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) + #define MAX_LOCKDEP_CHAINS_BITS 14 #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) @@ -50,9 +53,6 @@ extern unsigned int nr_process_chains; extern unsigned int max_lockdep_depth; extern unsigned int max_recursion_depth; -extern unsigned long lockdep_count_forward_deps(struct lock_class *); -extern unsigned long lockdep_count_backward_deps(struct lock_class *); - #ifdef CONFIG_DEBUG_LOCKDEP /* * Various lockdep statistics: diff --git a/trunk/kernel/lockdep_proc.c b/trunk/kernel/lockdep_proc.c index fa19aee604c2..9b0e940e2545 100644 --- a/trunk/kernel/lockdep_proc.c +++ b/trunk/kernel/lockdep_proc.c @@ -63,6 +63,34 @@ static void l_stop(struct seq_file *m, void *v) { } +static unsigned long count_forward_deps(struct lock_class *class) +{ + struct lock_list *entry; + unsigned long ret = 1; + + /* + * Recurse this class's dependency list: + */ + list_for_each_entry(entry, &class->locks_after, entry) + ret += count_forward_deps(entry->class); + + return ret; +} + +static unsigned long count_backward_deps(struct lock_class *class) +{ + struct lock_list *entry; + unsigned long ret = 1; + + /* + * Recurse this class's dependency list: + */ + list_for_each_entry(entry, &class->locks_before, entry) + ret += count_backward_deps(entry->class); + + return ret; +} + static void print_name(struct seq_file *m, struct lock_class *class) { char str[128]; @@ -96,10 +124,10 @@ static int l_show(struct seq_file *m, void *v) #ifdef CONFIG_DEBUG_LOCKDEP seq_printf(m, " OPS:%8ld", class->ops); #endif - nr_forward_deps = lockdep_count_forward_deps(class); + nr_forward_deps = count_forward_deps(class); seq_printf(m, " FD:%5ld", nr_forward_deps); - nr_backward_deps = lockdep_count_backward_deps(class); + nr_backward_deps = count_backward_deps(class); seq_printf(m, " BD:%5ld", nr_backward_deps); get_usage_chars(class, &c1, &c2, &c3, &c4); @@ -201,9 +229,6 @@ static int lc_show(struct seq_file *m, void *v) for (i = 0; i < chain->depth; i++) { class = lock_chain_get_class(chain, i); - if (!class->key) - continue; - seq_printf(m, "[%p] ", class->key); print_name(m, class); seq_puts(m, "\n"); @@ -325,7 +350,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v) if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) nr_hardirq_read_unsafe++; - sum_forward_deps += lockdep_count_forward_deps(class); + sum_forward_deps += count_forward_deps(class); } #ifdef CONFIG_DEBUG_LOCKDEP DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused); diff --git a/trunk/kernel/posix-timers.c b/trunk/kernel/posix-timers.c index e36d5798cbff..9a21681aa80f 100644 --- a/trunk/kernel/posix-timers.c +++ b/trunk/kernel/posix-timers.c @@ -289,29 +289,21 @@ void do_schedule_next_timer(struct siginfo *info) else schedule_next_timer(timr); - info->si_overrun += timr->it_overrun_last; + info->si_overrun = timr->it_overrun_last; } if (timr) unlock_timer(timr, flags); } -int posix_timer_event(struct k_itimer *timr, int si_private) +int posix_timer_event(struct k_itimer *timr,int si_private) { - /* - * FIXME: if ->sigq is queued we can race with - * dequeue_signal()->do_schedule_next_timer(). - * - * If dequeue_signal() sees the "right" value of - * si_sys_private it calls do_schedule_next_timer(). - * We re-queue ->sigq and drop ->it_lock(). - * do_schedule_next_timer() locks the timer - * and re-schedules it while ->sigq is pending. - * Not really bad, but not that we want. - */ + memset(&timr->sigq->info, 0, sizeof(siginfo_t)); timr->sigq->info.si_sys_private = si_private; + /* Send signal to the process that owns this timer.*/ timr->sigq->info.si_signo = timr->it_sigev_signo; + timr->sigq->info.si_errno = 0; timr->sigq->info.si_code = SI_TIMER; timr->sigq->info.si_tid = timr->it_id; timr->sigq->info.si_value = timr->it_sigev_value; @@ -443,7 +435,6 @@ static struct k_itimer * alloc_posix_timer(void) kmem_cache_free(posix_timers_cache, tmr); tmr = NULL; } - memset(&tmr->sigq->info, 0, sizeof(siginfo_t)); return tmr; } diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index d601fb0406ca..04160d277e7a 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -600,6 +600,7 @@ struct rq { /* BKL stats */ unsigned int bkl_count; #endif + struct lock_class_key rq_lock_key; }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); @@ -833,7 +834,7 @@ static inline u64 global_rt_period(void) static inline u64 global_rt_runtime(void) { - if (sysctl_sched_rt_runtime < 0) + if (sysctl_sched_rt_period < 0) return RUNTIME_INF; return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; @@ -2758,10 +2759,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) } else { if (rq1 < rq2) { spin_lock(&rq1->lock); - spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); + spin_lock(&rq2->lock); } else { spin_lock(&rq2->lock); - spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); + spin_lock(&rq1->lock); } } update_rq_clock(rq1); @@ -2804,21 +2805,14 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) if (busiest < this_rq) { spin_unlock(&this_rq->lock); spin_lock(&busiest->lock); - spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); + spin_lock(&this_rq->lock); ret = 1; } else - spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); + spin_lock(&busiest->lock); } return ret; } -static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) - __releases(busiest->lock) -{ - spin_unlock(&busiest->lock); - lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); -} - /* * If dest_cpu is allowed for this process, migrate the task to it. * This is accomplished by forcing the cpu_allowed mask to only @@ -3643,7 +3637,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, ld_moved = move_tasks(this_rq, this_cpu, busiest, imbalance, sd, CPU_NEWLY_IDLE, &all_pinned); - double_unlock_balance(this_rq, busiest); + spin_unlock(&busiest->lock); if (unlikely(all_pinned)) { cpu_clear(cpu_of(busiest), *cpus); @@ -3758,7 +3752,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) else schedstat_inc(sd, alb_failed); } - double_unlock_balance(busiest_rq, target_rq); + spin_unlock(&target_rq->lock); } #ifdef CONFIG_NO_HZ @@ -8006,6 +8000,7 @@ void __init sched_init(void) rq = cpu_rq(i); spin_lock_init(&rq->lock); + lockdep_set_class(&rq->lock, &rq->rq_lock_key); rq->nr_running = 0; init_cfs_rq(&rq->cfs, rq); init_rt_rq(&rq->rt, rq); diff --git a/trunk/kernel/sched_clock.c b/trunk/kernel/sched_clock.c index 204991a0bfa7..22ed55d1167f 100644 --- a/trunk/kernel/sched_clock.c +++ b/trunk/kernel/sched_clock.c @@ -32,20 +32,14 @@ #include #include -/* - * Scheduler clock - returns current time in nanosec units. - * This is default implementation. - * Architectures and sub-architectures can override this. - */ -unsigned long long __attribute__((weak)) sched_clock(void) -{ - return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); -} - -static __read_mostly int sched_clock_running; #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK +#define MULTI_SHIFT 15 +/* Max is double, Min is 1/2 */ +#define MAX_MULTI (2LL << MULTI_SHIFT) +#define MIN_MULTI (1LL << (MULTI_SHIFT-1)) + struct sched_clock_data { /* * Raw spinlock - this is a special case: this might be called @@ -55,9 +49,14 @@ struct sched_clock_data { raw_spinlock_t lock; unsigned long tick_jiffies; + u64 prev_raw; u64 tick_raw; u64 tick_gtod; u64 clock; + s64 multi; +#ifdef CONFIG_NO_HZ + int check_max; +#endif }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); @@ -72,6 +71,8 @@ static inline struct sched_clock_data *cpu_sdc(int cpu) return &per_cpu(sched_clock_data, cpu); } +static __read_mostly int sched_clock_running; + void sched_clock_init(void) { u64 ktime_now = ktime_to_ns(ktime_get()); @@ -83,39 +84,90 @@ void sched_clock_init(void) scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; scd->tick_jiffies = now_jiffies; + scd->prev_raw = 0; scd->tick_raw = 0; scd->tick_gtod = ktime_now; scd->clock = ktime_now; + scd->multi = 1 << MULTI_SHIFT; +#ifdef CONFIG_NO_HZ + scd->check_max = 1; +#endif } sched_clock_running = 1; } +#ifdef CONFIG_NO_HZ +/* + * The dynamic ticks makes the delta jiffies inaccurate. This + * prevents us from checking the maximum time update. + * Disable the maximum check during stopped ticks. + */ +void sched_clock_tick_stop(int cpu) +{ + struct sched_clock_data *scd = cpu_sdc(cpu); + + scd->check_max = 0; +} + +void sched_clock_tick_start(int cpu) +{ + struct sched_clock_data *scd = cpu_sdc(cpu); + + scd->check_max = 1; +} + +static int check_max(struct sched_clock_data *scd) +{ + return scd->check_max; +} +#else +static int check_max(struct sched_clock_data *scd) +{ + return 1; +} +#endif /* CONFIG_NO_HZ */ + /* * update the percpu scd from the raw @now value * * - filter out backward motion * - use jiffies to generate a min,max window to clip the raw values */ -static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) +static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *time) { unsigned long now_jiffies = jiffies; long delta_jiffies = now_jiffies - scd->tick_jiffies; u64 clock = scd->clock; u64 min_clock, max_clock; - s64 delta = now - scd->tick_raw; + s64 delta = now - scd->prev_raw; WARN_ON_ONCE(!irqs_disabled()); - min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC; + + /* + * At schedule tick the clock can be just under the gtod. We don't + * want to push it too prematurely. + */ + min_clock = scd->tick_gtod + (delta_jiffies * TICK_NSEC); + if (min_clock > TICK_NSEC) + min_clock -= TICK_NSEC / 2; if (unlikely(delta < 0)) { clock++; goto out; } - max_clock = min_clock + TICK_NSEC; + /* + * The clock must stay within a jiffie of the gtod. + * But since we may be at the start of a jiffy or the end of one + * we add another jiffy buffer. + */ + max_clock = scd->tick_gtod + (2 + delta_jiffies) * TICK_NSEC; + + delta *= scd->multi; + delta >>= MULTI_SHIFT; - if (unlikely(clock + delta > max_clock)) { + if (unlikely(clock + delta > max_clock) && check_max(scd)) { if (clock < max_clock) clock = max_clock; else @@ -128,10 +180,12 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) if (unlikely(clock < min_clock)) clock = min_clock; - scd->tick_jiffies = now_jiffies; - scd->clock = clock; - - return clock; + if (time) + *time = clock; + else { + scd->prev_raw = now; + scd->clock = clock; + } } static void lock_double_clock(struct sched_clock_data *data1, @@ -149,7 +203,7 @@ static void lock_double_clock(struct sched_clock_data *data1, u64 sched_clock_cpu(int cpu) { struct sched_clock_data *scd = cpu_sdc(cpu); - u64 now, clock, this_clock, remote_clock; + u64 now, clock; if (unlikely(!sched_clock_running)) return 0ull; @@ -158,44 +212,43 @@ u64 sched_clock_cpu(int cpu) now = sched_clock(); if (cpu != raw_smp_processor_id()) { + /* + * in order to update a remote cpu's clock based on our + * unstable raw time rebase it against: + * tick_raw (offset between raw counters) + * tick_gotd (tick offset between cpus) + */ struct sched_clock_data *my_scd = this_scd(); lock_double_clock(scd, my_scd); - this_clock = __update_sched_clock(my_scd, now); - remote_clock = scd->clock; + now -= my_scd->tick_raw; + now += scd->tick_raw; - /* - * Use the opportunity that we have both locks - * taken to couple the two clocks: we take the - * larger time as the latest time for both - * runqueues. (this creates monotonic movement) - */ - if (likely(remote_clock < this_clock)) { - clock = this_clock; - scd->clock = clock; - } else { - /* - * Should be rare, but possible: - */ - clock = remote_clock; - my_scd->clock = remote_clock; - } + now += my_scd->tick_gtod; + now -= scd->tick_gtod; __raw_spin_unlock(&my_scd->lock); + + __update_sched_clock(scd, now, &clock); + + __raw_spin_unlock(&scd->lock); + } else { __raw_spin_lock(&scd->lock); - clock = __update_sched_clock(scd, now); + __update_sched_clock(scd, now, NULL); + clock = scd->clock; + __raw_spin_unlock(&scd->lock); } - __raw_spin_unlock(&scd->lock); - return clock; } void sched_clock_tick(void) { struct sched_clock_data *scd = this_scd(); + unsigned long now_jiffies = jiffies; + s64 mult, delta_gtod, delta_raw; u64 now, now_gtod; if (unlikely(!sched_clock_running)) @@ -207,14 +260,29 @@ void sched_clock_tick(void) now = sched_clock(); __raw_spin_lock(&scd->lock); - __update_sched_clock(scd, now); + __update_sched_clock(scd, now, NULL); /* * update tick_gtod after __update_sched_clock() because that will * already observe 1 new jiffy; adding a new tick_gtod to that would * increase the clock 2 jiffies. */ + delta_gtod = now_gtod - scd->tick_gtod; + delta_raw = now - scd->tick_raw; + + if ((long)delta_raw > 0) { + mult = delta_gtod << MULTI_SHIFT; + do_div(mult, delta_raw); + scd->multi = mult; + if (scd->multi > MAX_MULTI) + scd->multi = MAX_MULTI; + else if (scd->multi < MIN_MULTI) + scd->multi = MIN_MULTI; + } else + scd->multi = 1 << MULTI_SHIFT; + scd->tick_raw = now; scd->tick_gtod = now_gtod; + scd->tick_jiffies = now_jiffies; __raw_spin_unlock(&scd->lock); } @@ -233,6 +301,7 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); void sched_clock_idle_wakeup_event(u64 delta_ns) { struct sched_clock_data *scd = this_scd(); + u64 now = sched_clock(); /* * Override the previous timestamp and ignore all @@ -241,30 +310,27 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) * rq clock: */ __raw_spin_lock(&scd->lock); + scd->prev_raw = now; scd->clock += delta_ns; + scd->multi = 1 << MULTI_SHIFT; __raw_spin_unlock(&scd->lock); touch_softlockup_watchdog(); } EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); -#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ - -void sched_clock_init(void) -{ - sched_clock_running = 1; -} +#endif -u64 sched_clock_cpu(int cpu) +/* + * Scheduler clock - returns current time in nanosec units. + * This is default implementation. + * Architectures and sub-architectures can override this. + */ +unsigned long long __attribute__((weak)) sched_clock(void) { - if (unlikely(!sched_clock_running)) - return 0; - - return sched_clock(); + return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); } -#endif - unsigned long long cpu_clock(int cpu) { unsigned long long clock; diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index fb8994c6d4bb..cf2cd6ce4cb2 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -899,7 +899,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) * doesn't make sense. Rely on vruntime for fairness. */ if (rq->curr != p) - delta = max_t(s64, 10000LL, delta); + delta = max(10000LL, delta); hrtick_start(rq, delta); } @@ -1442,23 +1442,18 @@ __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next) struct task_struct *p = NULL; struct sched_entity *se; - if (next == &cfs_rq->tasks) - return NULL; - - /* Skip over entities that are not tasks */ - do { + while (next != &cfs_rq->tasks) { se = list_entry(next, struct sched_entity, group_node); next = next->next; - } while (next != &cfs_rq->tasks && !entity_is_task(se)); - if (next == &cfs_rq->tasks) - return NULL; + /* Skip over entities that are not tasks */ + if (entity_is_task(se)) { + p = task_of(se); + break; + } + } cfs_rq->balance_iterator = next; - - if (entity_is_task(se)) - p = task_of(se); - return p; } diff --git a/trunk/kernel/sched_rt.c b/trunk/kernel/sched_rt.c index 6163e4cf885b..908c04f9dad0 100644 --- a/trunk/kernel/sched_rt.c +++ b/trunk/kernel/sched_rt.c @@ -861,8 +861,6 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) #define RT_MAX_TRIES 3 static int double_lock_balance(struct rq *this_rq, struct rq *busiest); -static void double_unlock_balance(struct rq *this_rq, struct rq *busiest); - static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) @@ -1024,7 +1022,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) break; /* try again */ - double_unlock_balance(rq, lowest_rq); + spin_unlock(&lowest_rq->lock); lowest_rq = NULL; } @@ -1093,7 +1091,7 @@ static int push_rt_task(struct rq *rq) resched_task(lowest_rq->curr); - double_unlock_balance(rq, lowest_rq); + spin_unlock(&lowest_rq->lock); ret = 1; out: @@ -1199,7 +1197,7 @@ static int pull_rt_task(struct rq *this_rq) } skip: - double_unlock_balance(this_rq, src_rq); + spin_unlock(&src_rq->lock); } return ret; diff --git a/trunk/kernel/signal.c b/trunk/kernel/signal.c index c539f60c6f41..954f77d7e3bc 100644 --- a/trunk/kernel/signal.c +++ b/trunk/kernel/signal.c @@ -1304,7 +1304,6 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) q->info.si_overrun++; goto out; } - q->info.si_overrun = 0; signalfd_notify(t, sig); pending = group ? &t->signal->shared_pending : &t->pending; diff --git a/trunk/kernel/smp.c b/trunk/kernel/smp.c index e6084f6efb4d..96fc7c0edc59 100644 --- a/trunk/kernel/smp.c +++ b/trunk/kernel/smp.c @@ -260,41 +260,6 @@ void __smp_call_function_single(int cpu, struct call_single_data *data) generic_exec_single(cpu, data); } -/* Dummy function */ -static void quiesce_dummy(void *unused) -{ -} - -/* - * Ensure stack based data used in call function mask is safe to free. - * - * This is needed by smp_call_function_mask when using on-stack data, because - * a single call function queue is shared by all CPUs, and any CPU may pick up - * the data item on the queue at any time before it is deleted. So we need to - * ensure that all CPUs have transitioned through a quiescent state after - * this call. - * - * This is a very slow function, implemented by sending synchronous IPIs to - * all possible CPUs. For this reason, we have to alloc data rather than use - * stack based data even in the case of synchronous calls. The stack based - * data is then just used for deadlock/oom fallback which will be very rare. - * - * If a faster scheme can be made, we could go back to preferring stack based - * data -- the data allocation/free is non-zero cost. - */ -static void smp_call_function_mask_quiesce_stack(cpumask_t mask) -{ - struct call_single_data data; - int cpu; - - data.func = quiesce_dummy; - data.info = NULL; - data.flags = CSD_FLAG_WAIT; - - for_each_cpu_mask(cpu, mask) - generic_exec_single(cpu, &data); -} - /** * smp_call_function_mask(): Run a function on a set of other CPUs. * @mask: The set of cpus to run on. @@ -320,7 +285,6 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, cpumask_t allbutself; unsigned long flags; int cpu, num_cpus; - int slowpath = 0; /* Can deadlock when called with interrupts disabled */ WARN_ON(irqs_disabled()); @@ -342,16 +306,15 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, return smp_call_function_single(cpu, func, info, wait); } - data = kmalloc(sizeof(*data), GFP_ATOMIC); - if (data) { - data->csd.flags = CSD_FLAG_ALLOC; - if (wait) - data->csd.flags |= CSD_FLAG_WAIT; - } else { + if (!wait) { + data = kmalloc(sizeof(*data), GFP_ATOMIC); + if (data) + data->csd.flags = CSD_FLAG_ALLOC; + } + if (!data) { data = &d; data->csd.flags = CSD_FLAG_WAIT; wait = 1; - slowpath = 1; } spin_lock_init(&data->lock); @@ -368,11 +331,8 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, arch_send_call_function_ipi(mask); /* optionally wait for the CPUs to complete */ - if (wait) { + if (wait) csd_flag_wait(&data->csd); - if (unlikely(slowpath)) - smp_call_function_mask_quiesce_stack(allbutself); - } return 0; } diff --git a/trunk/kernel/spinlock.c b/trunk/kernel/spinlock.c index 44baeea94ab9..a1fb54c93cdd 100644 --- a/trunk/kernel/spinlock.c +++ b/trunk/kernel/spinlock.c @@ -292,7 +292,6 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) } EXPORT_SYMBOL(_spin_lock_nested); - unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) { unsigned long flags; @@ -315,16 +314,6 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas EXPORT_SYMBOL(_spin_lock_irqsave_nested); -void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, - struct lockdep_map *nest_lock) -{ - preempt_disable(); - spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); -} - -EXPORT_SYMBOL(_spin_lock_nest_lock); - #endif void __lockfunc _spin_unlock(spinlock_t *lock) diff --git a/trunk/kernel/time/tick-sched.c b/trunk/kernel/time/tick-sched.c index f5da526424a9..825b4c00fe44 100644 --- a/trunk/kernel/time/tick-sched.c +++ b/trunk/kernel/time/tick-sched.c @@ -289,6 +289,7 @@ void tick_nohz_stop_sched_tick(int inidle) ts->tick_stopped = 1; ts->idle_jiffies = last_jiffies; rcu_enter_nohz(); + sched_clock_tick_stop(cpu); } /* @@ -391,6 +392,7 @@ void tick_nohz_restart_sched_tick(void) select_nohz_load_balancer(0); now = ktime_get(); tick_do_update_jiffies64(now); + sched_clock_tick_start(cpu); cpu_clear(cpu, nohz_cpu_mask); /* diff --git a/trunk/kernel/workqueue.c b/trunk/kernel/workqueue.c index 4048e92aa04f..4a26a1382df0 100644 --- a/trunk/kernel/workqueue.c +++ b/trunk/kernel/workqueue.c @@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) BUG_ON(get_wq_data(work) != cwq); work_clear_pending(work); - lock_map_acquire(&cwq->wq->lockdep_map); - lock_map_acquire(&lockdep_map); + lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); + lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_); f(work); - lock_map_release(&lockdep_map); - lock_map_release(&cwq->wq->lockdep_map); + lock_release(&lockdep_map, 1, _THIS_IP_); + lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " @@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq) int cpu; might_sleep(); - lock_map_acquire(&wq->lockdep_map); - lock_map_release(&wq->lockdep_map); + lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); + lock_release(&wq->lockdep_map, 1, _THIS_IP_); for_each_cpu_mask_nr(cpu, *cpu_map) flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); } @@ -441,8 +441,8 @@ int flush_work(struct work_struct *work) if (!cwq) return 0; - lock_map_acquire(&cwq->wq->lockdep_map); - lock_map_release(&cwq->wq->lockdep_map); + lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); + lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); prev = NULL; spin_lock_irq(&cwq->lock); @@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work) might_sleep(); - lock_map_acquire(&work->lockdep_map); - lock_map_release(&work->lockdep_map); + lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_); + lock_release(&work->lockdep_map, 1, _THIS_IP_); cwq = get_wq_data(work); if (!cwq) @@ -872,8 +872,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) if (cwq->thread == NULL) return; - lock_map_acquire(&cwq->wq->lockdep_map); - lock_map_release(&cwq->wq->lockdep_map); + lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); + lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); flush_cpu_workqueue(cwq); /* diff --git a/trunk/lib/debug_locks.c b/trunk/lib/debug_locks.c index 0218b4693dd8..0ef01d14727c 100644 --- a/trunk/lib/debug_locks.c +++ b/trunk/lib/debug_locks.c @@ -8,7 +8,6 @@ * * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar */ -#include #include #include #include @@ -38,7 +37,6 @@ int debug_locks_off(void) { if (xchg(&debug_locks, 0)) { if (!debug_locks_silent) { - oops_in_progress = 1; console_verbose(); return 1; } diff --git a/trunk/mm/mmap.c b/trunk/mm/mmap.c index 339cf5c4d5d8..971d0eda754a 100644 --- a/trunk/mm/mmap.c +++ b/trunk/mm/mmap.c @@ -2273,14 +2273,14 @@ int install_special_mapping(struct mm_struct *mm, static DEFINE_MUTEX(mm_all_locks_mutex); -static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) +static void vm_lock_anon_vma(struct anon_vma *anon_vma) { if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) { /* * The LSB of head.next can't change from under us * because we hold the mm_all_locks_mutex. */ - spin_lock_nest_lock(&anon_vma->lock, &mm->mmap_sem); + spin_lock(&anon_vma->lock); /* * We can safely modify head.next after taking the * anon_vma->lock. If some other vma in this mm shares @@ -2296,7 +2296,7 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) } } -static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) +static void vm_lock_mapping(struct address_space *mapping) { if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { /* @@ -2310,7 +2310,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) */ if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); - spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem); + spin_lock(&mapping->i_mmap_lock); } } @@ -2355,20 +2355,14 @@ int mm_take_all_locks(struct mm_struct *mm) mutex_lock(&mm_all_locks_mutex); - for (vma = mm->mmap; vma; vma = vma->vm_next) { - if (signal_pending(current)) - goto out_unlock; - if (vma->vm_file && vma->vm_file->f_mapping) - vm_lock_mapping(mm, vma->vm_file->f_mapping); - } - for (vma = mm->mmap; vma; vma = vma->vm_next) { if (signal_pending(current)) goto out_unlock; if (vma->anon_vma) - vm_lock_anon_vma(mm, vma->anon_vma); + vm_lock_anon_vma(vma->anon_vma); + if (vma->vm_file && vma->vm_file->f_mapping) + vm_lock_mapping(vma->vm_file->f_mapping); } - ret = 0; out_unlock: