From 7702c4fe317124812313b45be44ac46f3c65695a Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 15 Oct 2008 07:45:08 -0200 Subject: [PATCH] --- yaml --- r: 117978 b: refs/heads/master c: 6ad9f15c94822c3f067a7d443f3b414e08b34460 h: refs/heads/master v: v3 --- [refs] | 2 +- .../scheduler/sched-design-CFS.txt | 2 +- trunk/arch/s390/Kconfig | 16 +- trunk/arch/s390/appldata/appldata_base.c | 2 +- trunk/arch/s390/include/asm/kvm_virtio.h | 2 +- trunk/arch/s390/include/asm/mmu.h | 3 +- trunk/arch/s390/include/asm/mmu_context.h | 19 +- trunk/arch/s390/include/asm/pgtable.h | 8 +- trunk/arch/s390/include/asm/thread_info.h | 5 + trunk/arch/s390/kernel/smp.c | 24 ++- trunk/arch/s390/mm/pgtable.c | 16 +- trunk/arch/x86/include/asm/dma-mapping.h | 4 +- trunk/arch/x86/kernel/genx2apic_uv_x.c | 7 +- trunk/arch/x86/kernel/pci-swiotlb_64.c | 14 +- trunk/arch/x86/kvm/mmu.c | 1 + trunk/arch/x86/mm/init_64.c | 14 +- trunk/arch/x86/xen/mmu.c | 18 +- trunk/drivers/ata/ahci.c | 41 ++--- trunk/drivers/ata/ata_generic.c | 2 +- trunk/drivers/ata/ata_piix.c | 2 +- trunk/drivers/ata/libata-core.c | 35 ++-- trunk/drivers/ata/libata-eh.c | 14 +- trunk/drivers/ata/pata_acpi.c | 2 +- trunk/drivers/ata/pata_ali.c | 1 + trunk/drivers/ata/pata_amd.c | 1 + trunk/drivers/ata/pata_artop.c | 2 +- trunk/drivers/ata/pata_atiixp.c | 1 + trunk/drivers/ata/pata_cmd640.c | 1 + trunk/drivers/ata/pata_cmd64x.c | 2 +- trunk/drivers/ata/pata_cs5530.c | 1 + trunk/drivers/ata/pata_cs5535.c | 2 +- trunk/drivers/ata/pata_cypress.c | 2 +- trunk/drivers/ata/pata_efar.c | 2 +- trunk/drivers/ata/pata_isapnp.c | 2 +- trunk/drivers/ata/pata_it821x.c | 7 +- trunk/drivers/ata/pata_jmicron.c | 2 +- trunk/drivers/ata/pata_legacy.c | 2 +- trunk/drivers/ata/pata_marvell.c | 2 +- trunk/drivers/ata/pata_mpiix.c | 2 +- trunk/drivers/ata/pata_netcell.c | 2 +- trunk/drivers/ata/pata_ninja32.c | 44 ++--- trunk/drivers/ata/pata_ns87410.c | 1 + trunk/drivers/ata/pata_ns87415.c | 2 +- trunk/drivers/ata/pata_oldpiix.c | 2 +- trunk/drivers/ata/pata_opti.c | 1 + trunk/drivers/ata/pata_optidma.c | 1 + trunk/drivers/ata/pata_pcmcia.c | 2 +- trunk/drivers/ata/pata_pdc202xx_old.c | 2 +- trunk/drivers/ata/pata_platform.c | 2 +- trunk/drivers/ata/pata_qdi.c | 2 +- trunk/drivers/ata/pata_radisys.c | 2 +- trunk/drivers/ata/pata_sc1200.c | 2 +- trunk/drivers/ata/pata_scc.c | 2 +- trunk/drivers/ata/pata_serverworks.c | 1 + trunk/drivers/ata/pata_sil680.c | 1 + trunk/drivers/ata/pata_sis.c | 2 +- trunk/drivers/ata/pata_sl82c105.c | 1 + trunk/drivers/ata/pata_triflex.c | 2 +- trunk/drivers/ata/pata_via.c | 1 + trunk/drivers/ata/pata_winbond.c | 2 +- trunk/drivers/ata/sata_sil24.c | 5 - trunk/drivers/s390/char/tape_block.c | 6 +- trunk/drivers/s390/char/tape_core.c | 8 +- trunk/drivers/s390/cio/qdio_debug.c | 19 +- trunk/drivers/s390/cio/qdio_main.c | 1 + trunk/drivers/xen/events.c | 2 +- trunk/drivers/xen/manage.c | 2 + trunk/drivers/xen/xencomm.c | 23 ++- trunk/include/linux/sched.h | 12 +- trunk/kernel/irq/proc.c | 2 +- trunk/kernel/lockdep.c | 17 +- trunk/kernel/printk.c | 39 ++++ trunk/kernel/resource.c | 2 +- trunk/kernel/sched.c | 3 +- trunk/kernel/sched_fair.c | 169 +++++------------- trunk/kernel/sched_idletask.c | 5 +- trunk/kernel/sched_rt.c | 5 +- trunk/lib/swiotlb.c | 6 +- 78 files changed, 294 insertions(+), 394 deletions(-) diff --git a/[refs] b/[refs] index b0cd423990c3..08797ef6417a 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 0d8762c9ee40cf83d5dbf3a22843bc566912b592 +refs/heads/master: 6ad9f15c94822c3f067a7d443f3b414e08b34460 diff --git a/trunk/Documentation/scheduler/sched-design-CFS.txt b/trunk/Documentation/scheduler/sched-design-CFS.txt index eb471c7a905e..9d8eb553884c 100644 --- a/trunk/Documentation/scheduler/sched-design-CFS.txt +++ b/trunk/Documentation/scheduler/sched-design-CFS.txt @@ -92,7 +92,7 @@ other HZ detail. Thus the CFS scheduler has no notion of "timeslices" in the way the previous scheduler had, and has no heuristics whatsoever. There is only one central tunable (you have to switch on CONFIG_SCHED_DEBUG): - /proc/sys/kernel/sched_min_granularity_ns + /proc/sys/kernel/sched_granularity_ns which can be used to tune the scheduler from "desktop" (i.e., low latencies) to "server" (i.e., good batching) workloads. It defaults to a setting suitable diff --git a/trunk/arch/s390/Kconfig b/trunk/arch/s390/Kconfig index 8116a3328a19..70b7645ce745 100644 --- a/trunk/arch/s390/Kconfig +++ b/trunk/arch/s390/Kconfig @@ -241,17 +241,19 @@ config PACK_STACK Say Y if you are unsure. config SMALL_STACK - bool "Use 8kb for kernel stack instead of 16kb" - depends on PACK_STACK && 64BIT && !LOCKDEP + bool "Use 4kb/8kb for kernel stack instead of 8kb/16kb" + depends on PACK_STACK && !LOCKDEP help If you say Y here and the compiler supports the -mkernel-backchain - option the kernel will use a smaller kernel stack size. The reduced - size is 8kb instead of 16kb. This allows to run more threads on a - system and reduces the pressure on the memory management for higher - order page allocations. + option the kernel will use a smaller kernel stack size. For 31 bit + the reduced size is 4kb instead of 8kb and for 64 bit it is 8kb + instead of 16kb. This allows to run more thread on a system and + reduces the pressure on the memory management for higher order + page allocations. Say N if you are unsure. + config CHECK_STACK bool "Detect kernel stack overflow" help @@ -382,7 +384,7 @@ config IPL choice prompt "IPL method generated into head.S" depends on IPL - default IPL_VM + default IPL_TAPE help Select "tape" if you want to IPL the image from a Tape. diff --git a/trunk/arch/s390/appldata/appldata_base.c b/trunk/arch/s390/appldata/appldata_base.c index a06a47cdd5e0..a7f8979fb925 100644 --- a/trunk/arch/s390/appldata/appldata_base.c +++ b/trunk/arch/s390/appldata/appldata_base.c @@ -424,7 +424,7 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, */ int appldata_register_ops(struct appldata_ops *ops) { - if (ops->size > APPLDATA_MAX_REC_SIZE) + if ((ops->size > APPLDATA_MAX_REC_SIZE) || (ops->size < 0)) return -EINVAL; ops->ctl_table = kzalloc(4 * sizeof(struct ctl_table), GFP_KERNEL); diff --git a/trunk/arch/s390/include/asm/kvm_virtio.h b/trunk/arch/s390/include/asm/kvm_virtio.h index c13568b9351c..146100224def 100644 --- a/trunk/arch/s390/include/asm/kvm_virtio.h +++ b/trunk/arch/s390/include/asm/kvm_virtio.h @@ -52,7 +52,7 @@ struct kvm_vqconfig { #ifdef __KERNEL__ /* early virtio console setup */ -#ifdef CONFIG_S390_GUEST +#ifdef CONFIG_VIRTIO_CONSOLE extern void s390_virtio_console_init(void); #else static inline void s390_virtio_console_init(void) diff --git a/trunk/arch/s390/include/asm/mmu.h b/trunk/arch/s390/include/asm/mmu.h index d2b4ff831477..5dd5e7b3476f 100644 --- a/trunk/arch/s390/include/asm/mmu.h +++ b/trunk/arch/s390/include/asm/mmu.h @@ -7,8 +7,7 @@ typedef struct { unsigned long asce_bits; unsigned long asce_limit; int noexec; - int has_pgste; /* The mmu context has extended page tables */ - int alloc_pgste; /* cloned contexts will have extended page tables */ + int pgstes; } mm_context_t; #endif diff --git a/trunk/arch/s390/include/asm/mmu_context.h b/trunk/arch/s390/include/asm/mmu_context.h index 28ec870655af..4c2fbf48c9c4 100644 --- a/trunk/arch/s390/include/asm/mmu_context.h +++ b/trunk/arch/s390/include/asm/mmu_context.h @@ -20,25 +20,12 @@ static inline int init_new_context(struct task_struct *tsk, #ifdef CONFIG_64BIT mm->context.asce_bits |= _ASCE_TYPE_REGION3; #endif - if (current->mm->context.alloc_pgste) { - /* - * alloc_pgste indicates, that any NEW context will be created - * with extended page tables. The old context is unchanged. The - * page table allocation and the page table operations will - * look at has_pgste to distinguish normal and extended page - * tables. The only way to create extended page tables is to - * set alloc_pgste and then create a new context (e.g. dup_mm). - * The page table allocation is called after init_new_context - * and if has_pgste is set, it will create extended page - * tables. - */ + if (current->mm->context.pgstes) { mm->context.noexec = 0; - mm->context.has_pgste = 1; - mm->context.alloc_pgste = 1; + mm->context.pgstes = 1; } else { mm->context.noexec = s390_noexec; - mm->context.has_pgste = 0; - mm->context.alloc_pgste = 0; + mm->context.pgstes = 0; } mm->context.asce_limit = STACK_TOP_MAX; crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); diff --git a/trunk/arch/s390/include/asm/pgtable.h b/trunk/arch/s390/include/asm/pgtable.h index 7fc76133b3e4..1a928f84afd6 100644 --- a/trunk/arch/s390/include/asm/pgtable.h +++ b/trunk/arch/s390/include/asm/pgtable.h @@ -679,7 +679,7 @@ static inline void pmd_clear(pmd_t *pmd) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - if (mm->context.has_pgste) + if (mm->context.pgstes) ptep_rcp_copy(ptep); pte_val(*ptep) = _PAGE_TYPE_EMPTY; if (mm->context.noexec) @@ -763,7 +763,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, struct page *page; unsigned int skey; - if (!mm->context.has_pgste) + if (!mm->context.pgstes) return -EINVAL; rcp_lock(ptep); pgste = (unsigned long *) (ptep + PTRS_PER_PTE); @@ -794,7 +794,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, int young; unsigned long *pgste; - if (!vma->vm_mm->context.has_pgste) + if (!vma->vm_mm->context.pgstes) return 0; physpage = pte_val(*ptep) & PAGE_MASK; pgste = (unsigned long *) (ptep + PTRS_PER_PTE); @@ -844,7 +844,7 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep) static inline void ptep_invalidate(struct mm_struct *mm, unsigned long address, pte_t *ptep) { - if (mm->context.has_pgste) { + if (mm->context.pgstes) { rcp_lock(ptep); __ptep_ipte(address, ptep); ptep_rcp_copy(ptep); diff --git a/trunk/arch/s390/include/asm/thread_info.h b/trunk/arch/s390/include/asm/thread_info.h index c1eaf9604da7..de3fad60c682 100644 --- a/trunk/arch/s390/include/asm/thread_info.h +++ b/trunk/arch/s390/include/asm/thread_info.h @@ -15,8 +15,13 @@ * Size of kernel stack for each process */ #ifndef __s390x__ +#ifndef __SMALL_STACK #define THREAD_ORDER 1 #define ASYNC_ORDER 1 +#else +#define THREAD_ORDER 0 +#define ASYNC_ORDER 0 +#endif #else /* __s390x__ */ #ifndef __SMALL_STACK #define THREAD_ORDER 2 diff --git a/trunk/arch/s390/kernel/smp.c b/trunk/arch/s390/kernel/smp.c index b5595688a477..9e8b1f9b8f4d 100644 --- a/trunk/arch/s390/kernel/smp.c +++ b/trunk/arch/s390/kernel/smp.c @@ -1119,7 +1119,9 @@ int __ref smp_rescan_cpus(void) return rc; } -static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf, +static ssize_t __ref rescan_store(struct sys_device *dev, + struct sysdev_attribute *attr, + const char *buf, size_t count) { int rc; @@ -1127,10 +1129,12 @@ static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf, rc = smp_rescan_cpus(); return rc ? rc : count; } -static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store); +static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store); #endif /* CONFIG_HOTPLUG_CPU */ -static ssize_t dispatching_show(struct sysdev_class *class, char *buf) +static ssize_t dispatching_show(struct sys_device *dev, + struct sysdev_attribute *attr, + char *buf) { ssize_t count; @@ -1140,8 +1144,9 @@ static ssize_t dispatching_show(struct sysdev_class *class, char *buf) return count; } -static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf, - size_t count) +static ssize_t dispatching_store(struct sys_device *dev, + struct sysdev_attribute *attr, + const char *buf, size_t count) { int val, rc; char delim; @@ -1163,8 +1168,7 @@ static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf, put_online_cpus(); return rc ? rc : count; } -static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show, - dispatching_store); +static SYSDEV_ATTR(dispatching, 0644, dispatching_show, dispatching_store); static int __init topology_init(void) { @@ -1174,11 +1178,13 @@ static int __init topology_init(void) register_cpu_notifier(&smp_cpu_nb); #ifdef CONFIG_HOTPLUG_CPU - rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan); + rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, + &attr_rescan.attr); if (rc) return rc; #endif - rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching); + rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, + &attr_dispatching.attr); if (rc) return rc; for_each_present_cpu(cpu) { diff --git a/trunk/arch/s390/mm/pgtable.c b/trunk/arch/s390/mm/pgtable.c index ef3635b52fc0..3d98ba82ea67 100644 --- a/trunk/arch/s390/mm/pgtable.c +++ b/trunk/arch/s390/mm/pgtable.c @@ -169,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) unsigned long *table; unsigned long bits; - bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; + bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL; spin_lock(&mm->page_table_lock); page = NULL; if (!list_empty(&mm->context.pgtable_list)) { @@ -186,7 +186,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) pgtable_page_ctor(page); page->flags &= ~FRAG_MASK; table = (unsigned long *) page_to_phys(page); - if (mm->context.has_pgste) + if (mm->context.pgstes) clear_table_pgstes(table); else clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); @@ -210,7 +210,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) struct page *page; unsigned long bits; - bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; + bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL; bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); page = pfn_to_page(__pa(table) >> PAGE_SHIFT); spin_lock(&mm->page_table_lock); @@ -257,7 +257,7 @@ int s390_enable_sie(void) struct mm_struct *mm, *old_mm; /* Do we have pgstes? if yes, we are done */ - if (tsk->mm->context.has_pgste) + if (tsk->mm->context.pgstes) return 0; /* lets check if we are allowed to replace the mm */ @@ -269,14 +269,14 @@ int s390_enable_sie(void) } task_unlock(tsk); - /* we copy the mm and let dup_mm create the page tables with_pgstes */ - tsk->mm->context.alloc_pgste = 1; + /* we copy the mm with pgstes enabled */ + tsk->mm->context.pgstes = 1; mm = dup_mm(tsk); - tsk->mm->context.alloc_pgste = 0; + tsk->mm->context.pgstes = 0; if (!mm) return -ENOMEM; - /* Now lets check again if something happened */ + /* Now lets check again if somebody attached ptrace etc */ task_lock(tsk); if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) { diff --git a/trunk/arch/x86/include/asm/dma-mapping.h b/trunk/arch/x86/include/asm/dma-mapping.h index 7f225a4b2a26..4a5397bfce27 100644 --- a/trunk/arch/x86/include/asm/dma-mapping.h +++ b/trunk/arch/x86/include/asm/dma-mapping.h @@ -255,11 +255,9 @@ static inline unsigned long dma_alloc_coherent_mask(struct device *dev, static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) { +#ifdef CONFIG_X86_64 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); - if (dma_mask <= DMA_24BIT_MASK) - gfp |= GFP_DMA; -#ifdef CONFIG_X86_64 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) gfp |= GFP_DMA32; #endif diff --git a/trunk/arch/x86/kernel/genx2apic_uv_x.c b/trunk/arch/x86/kernel/genx2apic_uv_x.c index 2c7dbdb98278..680a06557c5e 100644 --- a/trunk/arch/x86/kernel/genx2apic_uv_x.c +++ b/trunk/arch/x86/kernel/genx2apic_uv_x.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -397,16 +398,16 @@ void __init uv_system_init(void) printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); - uv_blade_info = kmalloc(bytes, GFP_KERNEL); + uv_blade_info = alloc_bootmem_pages(bytes); get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); - uv_node_to_blade = kmalloc(bytes, GFP_KERNEL); + uv_node_to_blade = alloc_bootmem_pages(bytes); memset(uv_node_to_blade, 255, bytes); bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); - uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL); + uv_cpu_to_blade = alloc_bootmem_pages(bytes); memset(uv_cpu_to_blade, 255, bytes); blade = 0; diff --git a/trunk/arch/x86/kernel/pci-swiotlb_64.c b/trunk/arch/x86/kernel/pci-swiotlb_64.c index 3c539d111abb..c4ce0332759e 100644 --- a/trunk/arch/x86/kernel/pci-swiotlb_64.c +++ b/trunk/arch/x86/kernel/pci-swiotlb_64.c @@ -18,21 +18,9 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size, return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); } -static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags) -{ - void *vaddr; - - vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags); - if (vaddr) - return vaddr; - - return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); -} - struct dma_mapping_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, - .alloc_coherent = x86_swiotlb_alloc_coherent, + .alloc_coherent = swiotlb_alloc_coherent, .free_coherent = swiotlb_free_coherent, .map_single = swiotlb_map_single_phys, .unmap_single = swiotlb_unmap_single, diff --git a/trunk/arch/x86/kvm/mmu.c b/trunk/arch/x86/kvm/mmu.c index 99c239c5c0ac..2a5e64881d9b 100644 --- a/trunk/arch/x86/kvm/mmu.c +++ b/trunk/arch/x86/kvm/mmu.c @@ -2634,6 +2634,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu, static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) { kvm_x86_ops->tlb_flush(vcpu); + set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); return 1; } diff --git a/trunk/arch/x86/mm/init_64.c b/trunk/arch/x86/mm/init_64.c index f79a02f64d10..b8e461d49412 100644 --- a/trunk/arch/x86/mm/init_64.c +++ b/trunk/arch/x86/mm/init_64.c @@ -350,10 +350,8 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, * pagetable pages as RO. So assume someone who pre-setup * these mappings are more intelligent. */ - if (pte_val(*pte)) { - pages++; + if (pte_val(*pte)) continue; - } if (0) printk(" pte=%p addr=%lx pte=%016lx\n", @@ -420,10 +418,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, * not differ with respect to page frame and * attributes. */ - if (page_size_mask & (1 << PG_LEVEL_2M)) { - pages++; + if (page_size_mask & (1 << PG_LEVEL_2M)) continue; - } new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); } @@ -503,10 +499,8 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, * not differ with respect to page frame and * attributes. */ - if (page_size_mask & (1 << PG_LEVEL_1G)) { - pages++; + if (page_size_mask & (1 << PG_LEVEL_1G)) continue; - } prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); } @@ -837,7 +831,7 @@ int arch_add_memory(int nid, u64 start, u64 size) unsigned long nr_pages = size >> PAGE_SHIFT; int ret; - last_mapped_pfn = init_memory_mapping(start, start + size); + last_mapped_pfn = init_memory_mapping(start, start + size-1); if (last_mapped_pfn > max_pfn_mapped) max_pfn_mapped = last_mapped_pfn; diff --git a/trunk/arch/x86/xen/mmu.c b/trunk/arch/x86/xen/mmu.c index aba77b2b7d18..d4d52f5a1cf7 100644 --- a/trunk/arch/x86/xen/mmu.c +++ b/trunk/arch/x86/xen/mmu.c @@ -246,21 +246,11 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr) { unsigned long address = (unsigned long)vaddr; unsigned int level; - pte_t *pte; - unsigned offset; + pte_t *pte = lookup_address(address, &level); + unsigned offset = address & ~PAGE_MASK; - /* - * if the PFN is in the linear mapped vaddr range, we can just use - * the (quick) virt_to_machine() p2m lookup - */ - if (virt_addr_valid(vaddr)) - return virt_to_machine(vaddr); - - /* otherwise we have to do a (slower) full page-table walk */ - - pte = lookup_address(address, &level); BUG_ON(pte == NULL); - offset = address & ~PAGE_MASK; + return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); } @@ -420,7 +410,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, xen_mc_batch(); - u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; + u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; u.val = pte_val_ma(pte); xen_extend_mmu_update(&u); diff --git a/trunk/drivers/ata/ahci.c b/trunk/drivers/ata/ahci.c index a67b8e7c712d..aeadd00411a1 100644 --- a/trunk/drivers/ata/ahci.c +++ b/trunk/drivers/ata/ahci.c @@ -49,17 +49,6 @@ #define DRV_NAME "ahci" #define DRV_VERSION "3.0" -/* Enclosure Management Control */ -#define EM_CTRL_MSG_TYPE 0x000f0000 - -/* Enclosure Management LED Message Type */ -#define EM_MSG_LED_HBA_PORT 0x0000000f -#define EM_MSG_LED_PMP_SLOT 0x0000ff00 -#define EM_MSG_LED_VALUE 0xffff0000 -#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000 -#define EM_MSG_LED_VALUE_OFF 0xfff80000 -#define EM_MSG_LED_VALUE_ON 0x00010000 - static int ahci_skip_host_reset; module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444); MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)"); @@ -599,9 +588,6 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ - /* Promise */ - { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ - /* Generic, PCI class code for AHCI */ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, @@ -1234,20 +1220,18 @@ static void ahci_sw_activity_blink(unsigned long arg) struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; unsigned long led_message = emp->led_state; u32 activity_led_state; - unsigned long flags; - led_message &= EM_MSG_LED_VALUE; + led_message &= 0xffff0000; led_message |= ap->port_no | (link->pmp << 8); /* check to see if we've had activity. If so, * toggle state of LED and reset timer. If not, * turn LED to desired idle state. */ - spin_lock_irqsave(ap->lock, flags); if (emp->saved_activity != emp->activity) { emp->saved_activity = emp->activity; /* get the current LED state */ - activity_led_state = led_message & EM_MSG_LED_VALUE_ON; + activity_led_state = led_message & 0x00010000; if (activity_led_state) activity_led_state = 0; @@ -1255,18 +1239,17 @@ static void ahci_sw_activity_blink(unsigned long arg) activity_led_state = 1; /* clear old state */ - led_message &= ~EM_MSG_LED_VALUE_ACTIVITY; + led_message &= 0xfff8ffff; /* toggle state */ led_message |= (activity_led_state << 16); mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100)); } else { /* switch to idle */ - led_message &= ~EM_MSG_LED_VALUE_ACTIVITY; + led_message &= 0xfff8ffff; if (emp->blink_policy == BLINK_OFF) led_message |= (1 << 16); } - spin_unlock_irqrestore(ap->lock, flags); ahci_transmit_led_message(ap, led_message, 4); } @@ -1311,7 +1294,7 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, struct ahci_em_priv *emp; /* get the slot number from the message */ - pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; + pmp = (state & 0x0000ff00) >> 8; if (pmp < MAX_SLOTS) emp = &pp->em_priv[pmp]; else @@ -1336,7 +1319,7 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, message[0] |= (4 << 8); /* ignore 0:4 of byte zero, fill in port info yourself */ - message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no); + message[1] = ((state & 0xfffffff0) | ap->port_no); /* write message to EM_LOC */ writel(message[0], mmio + hpriv->em_loc); @@ -1379,7 +1362,7 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, state = simple_strtoul(buf, NULL, 0); /* get the slot number from the message */ - pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; + pmp = (state & 0x0000ff00) >> 8; if (pmp < MAX_SLOTS) emp = &pp->em_priv[pmp]; else @@ -1390,7 +1373,7 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, * activity led through em_message */ if (emp->blink_policy) - state &= ~EM_MSG_LED_VALUE_ACTIVITY; + state &= 0xfff8ffff; return ahci_transmit_led_message(ap, state, size); } @@ -1409,16 +1392,16 @@ static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val) link->flags &= ~(ATA_LFLAG_SW_ACTIVITY); /* set the LED to OFF */ - port_led_state &= EM_MSG_LED_VALUE_OFF; + port_led_state &= 0xfff80000; port_led_state |= (ap->port_no | (link->pmp << 8)); ahci_transmit_led_message(ap, port_led_state, 4); } else { link->flags |= ATA_LFLAG_SW_ACTIVITY; if (val == BLINK_OFF) { /* set LED to ON for idle */ - port_led_state &= EM_MSG_LED_VALUE_OFF; + port_led_state &= 0xfff80000; port_led_state |= (ap->port_no | (link->pmp << 8)); - port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */ + port_led_state |= 0x00010000; /* check this */ ahci_transmit_led_message(ap, port_led_state, 4); } } @@ -2629,7 +2612,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) u32 em_loc = readl(mmio + HOST_EM_LOC); u32 em_ctl = readl(mmio + HOST_EM_CTL); - messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16; + messages = (em_ctl & 0x000f0000) >> 16; /* we only support LED message type right now */ if ((messages & 0x01) && (ahci_em_messages == 1)) { diff --git a/trunk/drivers/ata/ata_generic.c b/trunk/drivers/ata/ata_generic.c index 5c33767e66de..75a406f5e694 100644 --- a/trunk/drivers/ata/ata_generic.c +++ b/trunk/drivers/ata/ata_generic.c @@ -1,6 +1,6 @@ /* * ata_generic.c - Generic PATA/SATA controller driver. - * Copyright 2005 Red Hat Inc, all rights reserved. + * Copyright 2005 Red Hat Inc , all rights reserved. * * Elements from ide/pci/generic.c * Copyright (C) 2001-2002 Andre Hedrick diff --git a/trunk/drivers/ata/ata_piix.c b/trunk/drivers/ata/ata_piix.c index 52dc2d8b8f22..e9e32ed6b1a3 100644 --- a/trunk/drivers/ata/ata_piix.c +++ b/trunk/drivers/ata/ata_piix.c @@ -14,7 +14,7 @@ * * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer * Copyright (C) 1998-2000 Andre Hedrick - * Copyright (C) 2003 Red Hat Inc + * Copyright (C) 2003 Red Hat Inc * * * This program is free software; you can redistribute it and/or modify diff --git a/trunk/drivers/ata/libata-core.c b/trunk/drivers/ata/libata-core.c index 2ff633c119e2..8cb0b360bfd8 100644 --- a/trunk/drivers/ata/libata-core.c +++ b/trunk/drivers/ata/libata-core.c @@ -4156,33 +4156,29 @@ static int cable_is_40wire(struct ata_port *ap) struct ata_link *link; struct ata_device *dev; - /* If the controller thinks we are 40 wire, we are. */ + /* If the controller thinks we are 40 wire, we are */ if (ap->cbl == ATA_CBL_PATA40) return 1; - - /* If the controller thinks we are 80 wire, we are. */ + /* If the controller thinks we are 80 wire, we are */ if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) return 0; - - /* If the system is known to be 40 wire short cable (eg - * laptop), then we allow 80 wire modes even if the drive - * isn't sure. - */ + /* If the system is known to be 40 wire short cable (eg laptop), + then we allow 80 wire modes even if the drive isn't sure */ if (ap->cbl == ATA_CBL_PATA40_SHORT) return 0; - - /* If the controller doesn't know, we scan. - * - * Note: We look for all 40 wire detects at this point. Any - * 80 wire detect is taken to be 80 wire cable because - * - in many setups only the one drive (slave if present) will - * give a valid detect - * - if you have a non detect capable drive you don't want it - * to colour the choice - */ + /* If the controller doesn't know we scan + + - Note: We look for all 40 wire detects at this point. + Any 80 wire detect is taken to be 80 wire cable + because + - In many setups only the one drive (slave if present) + will give a valid detect + - If you have a non detect capable drive you don't + want it to colour the choice + */ ata_port_for_each_link(link, ap) { ata_link_for_each_dev(dev, link) { - if (ata_dev_enabled(dev) && !ata_is_40wire(dev)) + if (!ata_is_40wire(dev)) return 0; } } @@ -4557,7 +4553,6 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) /** * ata_qc_new_init - Request an available ATA command, and initialize it * @dev: Device from whom we request an available command structure - * @tag: command tag * * LOCKING: * None. diff --git a/trunk/drivers/ata/libata-eh.c b/trunk/drivers/ata/libata-eh.c index 8077bdf5d30d..5d687d7cffae 100644 --- a/trunk/drivers/ata/libata-eh.c +++ b/trunk/drivers/ata/libata-eh.c @@ -603,9 +603,6 @@ void ata_scsi_error(struct Scsi_Host *host) ata_link_for_each_dev(dev, link) { int devno = dev->devno; - if (!ata_dev_enabled(dev)) - continue; - ehc->saved_xfer_mode[devno] = dev->xfer_mode; if (ata_ncq_enabled(dev)) ehc->saved_ncq_enabled |= 1 << devno; @@ -1164,7 +1161,6 @@ void ata_eh_detach_dev(struct ata_device *dev) { struct ata_link *link = dev->link; struct ata_port *ap = link->ap; - struct ata_eh_context *ehc = &link->eh_context; unsigned long flags; ata_dev_disable(dev); @@ -1178,11 +1174,9 @@ void ata_eh_detach_dev(struct ata_device *dev) ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; } - /* clear per-dev EH info */ + /* clear per-dev EH actions */ ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); - ehc->saved_xfer_mode[dev->devno] = 0; - ehc->saved_ncq_enabled &= ~(1 << dev->devno); spin_unlock_irqrestore(ap->lock, flags); } @@ -2793,9 +2787,6 @@ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ ata_link_for_each_dev(dev, link) { - if (!ata_dev_enabled(dev)) - continue; - if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { struct ata_ering_entry *ent; @@ -2817,9 +2808,6 @@ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); - if (!ata_dev_enabled(dev)) - continue; - if (dev->xfer_mode != saved_xfer_mode || ata_ncq_enabled(dev) != saved_ncq) dev->flags |= ATA_DFLAG_DUBIOUS_XFER; diff --git a/trunk/drivers/ata/pata_acpi.c b/trunk/drivers/ata/pata_acpi.c index e2e332d8ff95..eb919c16a03e 100644 --- a/trunk/drivers/ata/pata_acpi.c +++ b/trunk/drivers/ata/pata_acpi.c @@ -1,7 +1,7 @@ /* * ACPI PATA driver * - * (c) 2007 Red Hat + * (c) 2007 Red Hat */ #include diff --git a/trunk/drivers/ata/pata_ali.c b/trunk/drivers/ata/pata_ali.c index 73c466e452ca..5ca70fa1f587 100644 --- a/trunk/drivers/ata/pata_ali.c +++ b/trunk/drivers/ata/pata_ali.c @@ -1,6 +1,7 @@ /* * pata_ali.c - ALI 15x3 PATA for new ATA layer * (C) 2005 Red Hat Inc + * Alan Cox * * based in part upon * linux/drivers/ide/pci/alim15x3.c Version 0.17 2003/01/02 diff --git a/trunk/drivers/ata/pata_amd.c b/trunk/drivers/ata/pata_amd.c index 0ec9c7d9fe9d..57dd00f463d3 100644 --- a/trunk/drivers/ata/pata_amd.c +++ b/trunk/drivers/ata/pata_amd.c @@ -1,6 +1,7 @@ /* * pata_amd.c - AMD PATA for new ATA layer * (C) 2005-2006 Red Hat Inc + * Alan Cox * * Based on pata-sil680. Errata information is taken from data sheets * and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are diff --git a/trunk/drivers/ata/pata_artop.c b/trunk/drivers/ata/pata_artop.c index 6b3092c75ffe..0f513bc11193 100644 --- a/trunk/drivers/ata/pata_artop.c +++ b/trunk/drivers/ata/pata_artop.c @@ -1,7 +1,7 @@ /* * pata_artop.c - ARTOP ATA controller driver * - * (C) 2006 Red Hat + * (C) 2006 Red Hat * (C) 2007 Bartlomiej Zolnierkiewicz * * Based in part on drivers/ide/pci/aec62xx.c diff --git a/trunk/drivers/ata/pata_atiixp.c b/trunk/drivers/ata/pata_atiixp.c index 0e2cde8f9973..e8a0d99d7356 100644 --- a/trunk/drivers/ata/pata_atiixp.c +++ b/trunk/drivers/ata/pata_atiixp.c @@ -1,6 +1,7 @@ /* * pata_atiixp.c - ATI PATA for new ATA layer * (C) 2005 Red Hat Inc + * Alan Cox * * Based on * diff --git a/trunk/drivers/ata/pata_cmd640.c b/trunk/drivers/ata/pata_cmd640.c index 34a394264c3d..2de30b990278 100644 --- a/trunk/drivers/ata/pata_cmd640.c +++ b/trunk/drivers/ata/pata_cmd640.c @@ -1,6 +1,7 @@ /* * pata_cmd640.c - CMD640 PCI PATA for new ATA layer * (C) 2007 Red Hat Inc + * Alan Cox * * Based upon * linux/drivers/ide/pci/cmd640.c Version 1.02 Sep 01, 1996 diff --git a/trunk/drivers/ata/pata_cmd64x.c b/trunk/drivers/ata/pata_cmd64x.c index 3167d8fed2f2..ddd09b7d98c9 100644 --- a/trunk/drivers/ata/pata_cmd64x.c +++ b/trunk/drivers/ata/pata_cmd64x.c @@ -1,7 +1,7 @@ /* * pata_cmd64x.c - CMD64x PATA for new ATA layer * (C) 2005 Red Hat Inc - * Alan Cox + * Alan Cox * * Based upon * linux/drivers/ide/pci/cmd64x.c Version 1.30 Sept 10, 2002 diff --git a/trunk/drivers/ata/pata_cs5530.c b/trunk/drivers/ata/pata_cs5530.c index bba453381f44..0c4b271a9d5a 100644 --- a/trunk/drivers/ata/pata_cs5530.c +++ b/trunk/drivers/ata/pata_cs5530.c @@ -1,6 +1,7 @@ /* * pata-cs5530.c - CS5530 PATA for new ATA layer * (C) 2005 Red Hat Inc + * Alan Cox * * based upon cs5530.c by Mark Lord. * diff --git a/trunk/drivers/ata/pata_cs5535.c b/trunk/drivers/ata/pata_cs5535.c index 1b2d4a0f5f74..f1b6556f0483 100644 --- a/trunk/drivers/ata/pata_cs5535.c +++ b/trunk/drivers/ata/pata_cs5535.c @@ -1,7 +1,7 @@ /* * pata-cs5535.c - CS5535 PATA for new ATA layer * (C) 2005-2006 Red Hat Inc - * Alan Cox + * Alan Cox * * based upon cs5535.c from AMD as cleaned up and * made readable and Linux style by Wolfgang Zuleger * * Based heavily on * linux/drivers/ide/pci/cy82c693.c Version 0.40 Sep. 10, 2002 diff --git a/trunk/drivers/ata/pata_efar.c b/trunk/drivers/ata/pata_efar.c index ac6392ea35b0..9fba82976ba6 100644 --- a/trunk/drivers/ata/pata_efar.c +++ b/trunk/drivers/ata/pata_efar.c @@ -1,7 +1,7 @@ /* * pata_efar.c - EFAR PIIX clone controller driver * - * (C) 2005 Red Hat + * (C) 2005 Red Hat * * Some parts based on ata_piix.c by Jeff Garzik and others. * diff --git a/trunk/drivers/ata/pata_isapnp.c b/trunk/drivers/ata/pata_isapnp.c index 15cdb9148aab..6a111baab523 100644 --- a/trunk/drivers/ata/pata_isapnp.c +++ b/trunk/drivers/ata/pata_isapnp.c @@ -1,7 +1,7 @@ /* * pata-isapnp.c - ISA PnP PATA controller driver. - * Copyright 2005/2006 Red Hat Inc, all rights reserved. + * Copyright 2005/2006 Red Hat Inc , all rights reserved. * * Based in part on ide-pnp.c by Andrey Panin */ diff --git a/trunk/drivers/ata/pata_it821x.c b/trunk/drivers/ata/pata_it821x.c index 860ede526282..0221c9a46769 100644 --- a/trunk/drivers/ata/pata_it821x.c +++ b/trunk/drivers/ata/pata_it821x.c @@ -1,7 +1,7 @@ /* * pata_it821x.c - IT821x PATA for new ATA layer * (C) 2005 Red Hat Inc - * Alan Cox + * Alan Cox * (C) 2007 Bartlomiej Zolnierkiewicz * * based upon @@ -10,7 +10,7 @@ * * linux/drivers/ide/pci/it821x.c Version 0.09 December 2004 * - * Copyright (C) 2004 Red Hat + * Copyright (C) 2004 Red Hat * * May be copied or modified under the terms of the GNU General Public License * Based in part on the ITE vendor provided SCSI driver. @@ -557,8 +557,9 @@ static unsigned int it821x_read_id(struct ata_device *adev, if (strstr(model_num, "Integrated Technology Express")) { /* Set feature bits the firmware neglects */ id[49] |= 0x0300; /* LBA, DMA */ + id[82] |= 0x0400; /* LBA48 */ id[83] &= 0x7FFF; - id[83] |= 0x4400; /* Word 83 is valid and LBA48 */ + id[83] |= 0x4000; /* Word 83 is valid */ id[86] |= 0x0400; /* LBA48 on */ id[ATA_ID_MAJOR_VER] |= 0x1F; } diff --git a/trunk/drivers/ata/pata_jmicron.c b/trunk/drivers/ata/pata_jmicron.c index 38cf1ab2d289..73b7596816b4 100644 --- a/trunk/drivers/ata/pata_jmicron.c +++ b/trunk/drivers/ata/pata_jmicron.c @@ -4,7 +4,7 @@ * driven by AHCI in the usual configuration although * this driver can handle other setups if we need it. * - * (c) 2006 Red Hat + * (c) 2006 Red Hat */ #include diff --git a/trunk/drivers/ata/pata_legacy.c b/trunk/drivers/ata/pata_legacy.c index 930c2208640b..bc037ffce200 100644 --- a/trunk/drivers/ata/pata_legacy.c +++ b/trunk/drivers/ata/pata_legacy.c @@ -1,6 +1,6 @@ /* * pata-legacy.c - Legacy port PATA/SATA controller driver. - * Copyright 2005/2006 Red Hat, all rights reserved. + * Copyright 2005/2006 Red Hat , all rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/trunk/drivers/ata/pata_marvell.c b/trunk/drivers/ata/pata_marvell.c index 76e399bf8c1b..0d87eec84966 100644 --- a/trunk/drivers/ata/pata_marvell.c +++ b/trunk/drivers/ata/pata_marvell.c @@ -5,7 +5,7 @@ * isn't making full use of the device functionality but it is * easy to get working. * - * (c) 2006 Red Hat + * (c) 2006 Red Hat */ #include diff --git a/trunk/drivers/ata/pata_mpiix.c b/trunk/drivers/ata/pata_mpiix.c index 7c8faa48b5f3..7d7e3fdab71f 100644 --- a/trunk/drivers/ata/pata_mpiix.c +++ b/trunk/drivers/ata/pata_mpiix.c @@ -1,7 +1,7 @@ /* * pata_mpiix.c - Intel MPIIX PATA for new ATA layer * (C) 2005-2006 Red Hat Inc - * Alan Cox + * Alan Cox * * The MPIIX is different enough to the PIIX4 and friends that we give it * a separate driver. The old ide/pci code handles this by just not tuning diff --git a/trunk/drivers/ata/pata_netcell.c b/trunk/drivers/ata/pata_netcell.c index 9dc05e1656a8..d9719c8b9dbe 100644 --- a/trunk/drivers/ata/pata_netcell.c +++ b/trunk/drivers/ata/pata_netcell.c @@ -1,7 +1,7 @@ /* * pata_netcell.c - Netcell PATA driver * - * (c) 2006 Red Hat + * (c) 2006 Red Hat */ #include diff --git a/trunk/drivers/ata/pata_ninja32.c b/trunk/drivers/ata/pata_ninja32.c index 4e466eae8b46..565e67cd13fa 100644 --- a/trunk/drivers/ata/pata_ninja32.c +++ b/trunk/drivers/ata/pata_ninja32.c @@ -1,6 +1,7 @@ /* * pata_ninja32.c - Ninja32 PATA for new ATA layer * (C) 2007 Red Hat Inc + * Alan Cox * * Note: The controller like many controllers has shared timings for * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back @@ -44,7 +45,7 @@ #include #define DRV_NAME "pata_ninja32" -#define DRV_VERSION "0.1.1" +#define DRV_VERSION "0.0.1" /** @@ -88,17 +89,6 @@ static struct ata_port_operations ninja32_port_ops = { .set_piomode = ninja32_set_piomode, }; -static void ninja32_program(void __iomem *base) -{ - iowrite8(0x05, base + 0x01); /* Enable interrupt lines */ - iowrite8(0xBE, base + 0x02); /* Burst, ?? setup */ - iowrite8(0x01, base + 0x03); /* Unknown */ - iowrite8(0x20, base + 0x04); /* WAIT0 */ - iowrite8(0x8f, base + 0x05); /* Unknown */ - iowrite8(0xa4, base + 0x1c); /* Unknown */ - iowrite8(0x83, base + 0x1d); /* BMDMA control: WAIT0 */ -} - static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) { struct ata_host *host; @@ -144,28 +134,18 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) ap->ioaddr.bmdma_addr = base; ata_sff_std_ports(&ap->ioaddr); - ninja32_program(base); + iowrite8(0x05, base + 0x01); /* Enable interrupt lines */ + iowrite8(0xBE, base + 0x02); /* Burst, ?? setup */ + iowrite8(0x01, base + 0x03); /* Unknown */ + iowrite8(0x20, base + 0x04); /* WAIT0 */ + iowrite8(0x8f, base + 0x05); /* Unknown */ + iowrite8(0xa4, base + 0x1c); /* Unknown */ + iowrite8(0x83, base + 0x1d); /* BMDMA control: WAIT0 */ /* FIXME: Should we disable them at remove ? */ return ata_host_activate(host, dev->irq, ata_sff_interrupt, IRQF_SHARED, &ninja32_sht); } -#ifdef CONFIG_PM - -static int ninja32_reinit_one(struct pci_dev *pdev) -{ - struct ata_host *host = dev_get_drvdata(&pdev->dev); - int rc; - - rc = ata_pci_device_do_resume(pdev); - if (rc) - return rc; - ninja32_program(host->iomap[0]); - ata_host_resume(host); - return 0; -} -#endif - static const struct pci_device_id ninja32[] = { { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, @@ -176,11 +156,7 @@ static struct pci_driver ninja32_pci_driver = { .name = DRV_NAME, .id_table = ninja32, .probe = ninja32_init_one, - .remove = ata_pci_remove_one, -#ifdef CONFIG_PM - .suspend = ata_pci_device_suspend, - .resume = ninja32_reinit_one, -#endif + .remove = ata_pci_remove_one }; static int __init ninja32_init(void) diff --git a/trunk/drivers/ata/pata_ns87410.c b/trunk/drivers/ata/pata_ns87410.c index 40d411c460de..be756b7ef07e 100644 --- a/trunk/drivers/ata/pata_ns87410.c +++ b/trunk/drivers/ata/pata_ns87410.c @@ -1,6 +1,7 @@ /* * pata_ns87410.c - National Semiconductor 87410 PATA for new ATA layer * (C) 2006 Red Hat Inc + * Alan Cox * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/trunk/drivers/ata/pata_ns87415.c b/trunk/drivers/ata/pata_ns87415.c index 89bf5f865d6a..e0aa7eaaee0a 100644 --- a/trunk/drivers/ata/pata_ns87415.c +++ b/trunk/drivers/ata/pata_ns87415.c @@ -1,7 +1,7 @@ /* * pata_ns87415.c - NS87415 (non PARISC) PATA * - * (C) 2005 Red Hat + * (C) 2005 Red Hat * * This is a fairly generic MWDMA controller. It has some limitations * as it requires timing reloads on PIO/DMA transitions but it is otherwise diff --git a/trunk/drivers/ata/pata_oldpiix.c b/trunk/drivers/ata/pata_oldpiix.c index c0dbc46a348e..df64f2443001 100644 --- a/trunk/drivers/ata/pata_oldpiix.c +++ b/trunk/drivers/ata/pata_oldpiix.c @@ -1,7 +1,7 @@ /* * pata_oldpiix.c - Intel PATA/SATA controllers * - * (C) 2005 Red Hat + * (C) 2005 Red Hat * * Some parts based on ata_piix.c by Jeff Garzik and others. * diff --git a/trunk/drivers/ata/pata_opti.c b/trunk/drivers/ata/pata_opti.c index e4fa4d565e96..fb2cf661b0e8 100644 --- a/trunk/drivers/ata/pata_opti.c +++ b/trunk/drivers/ata/pata_opti.c @@ -1,6 +1,7 @@ /* * pata_opti.c - ATI PATA for new ATA layer * (C) 2005 Red Hat Inc + * Alan Cox * * Based on * linux/drivers/ide/pci/opti621.c Version 0.7 Sept 10, 2002 diff --git a/trunk/drivers/ata/pata_optidma.c b/trunk/drivers/ata/pata_optidma.c index 93bb6e91973f..4cd744456313 100644 --- a/trunk/drivers/ata/pata_optidma.c +++ b/trunk/drivers/ata/pata_optidma.c @@ -1,6 +1,7 @@ /* * pata_optidma.c - Opti DMA PATA for new ATA layer * (C) 2006 Red Hat Inc + * Alan Cox * * The Opti DMA controllers are related to the older PIO PCI controllers * and indeed the VLB ones. The main differences are that the timing diff --git a/trunk/drivers/ata/pata_pcmcia.c b/trunk/drivers/ata/pata_pcmcia.c index 271cb64d429e..02b596b9cf6a 100644 --- a/trunk/drivers/ata/pata_pcmcia.c +++ b/trunk/drivers/ata/pata_pcmcia.c @@ -1,6 +1,6 @@ /* * pata_pcmcia.c - PCMCIA PATA controller driver. - * Copyright 2005-2006 Red Hat Inc, all rights reserved. + * Copyright 2005-2006 Red Hat Inc , all rights reserved. * PCMCIA ident update Copyright 2006 Marcin Juszkiewicz * * diff --git a/trunk/drivers/ata/pata_pdc202xx_old.c b/trunk/drivers/ata/pata_pdc202xx_old.c index 799a6a098712..d2673060bc8d 100644 --- a/trunk/drivers/ata/pata_pdc202xx_old.c +++ b/trunk/drivers/ata/pata_pdc202xx_old.c @@ -1,7 +1,7 @@ /* * pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer * (C) 2005 Red Hat Inc - * Alan Cox + * Alan Cox * (C) 2007 Bartlomiej Zolnierkiewicz * * Based in part on linux/drivers/ide/pci/pdc202xx_old.c diff --git a/trunk/drivers/ata/pata_platform.c b/trunk/drivers/ata/pata_platform.c index 77e4e3b17f54..8f65ad61b8af 100644 --- a/trunk/drivers/ata/pata_platform.c +++ b/trunk/drivers/ata/pata_platform.c @@ -5,7 +5,7 @@ * * Based on pata_pcmcia: * - * Copyright 2005-2006 Red Hat Inc, all rights reserved. + * Copyright 2005-2006 Red Hat Inc , all rights reserved. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive diff --git a/trunk/drivers/ata/pata_qdi.c b/trunk/drivers/ata/pata_qdi.c index 3080f371222c..63b7a1c165a5 100644 --- a/trunk/drivers/ata/pata_qdi.c +++ b/trunk/drivers/ata/pata_qdi.c @@ -1,6 +1,6 @@ /* * pata_qdi.c - QDI VLB ATA controllers - * (C) 2006 Red Hat + * (C) 2006 Red Hat * * This driver mostly exists as a proof of concept for non PCI devices under * libata. While the QDI6580 was 'neat' in 1993 it is no longer terribly diff --git a/trunk/drivers/ata/pata_radisys.c b/trunk/drivers/ata/pata_radisys.c index 0b0aa452de14..1c0d9fa7ee54 100644 --- a/trunk/drivers/ata/pata_radisys.c +++ b/trunk/drivers/ata/pata_radisys.c @@ -1,7 +1,7 @@ /* * pata_radisys.c - Intel PATA/SATA controllers * - * (C) 2006 Red Hat + * (C) 2006 Red Hat * * Some parts based on ata_piix.c by Jeff Garzik and others. * diff --git a/trunk/drivers/ata/pata_sc1200.c b/trunk/drivers/ata/pata_sc1200.c index 9a4bdca54616..0278fd2b8fb1 100644 --- a/trunk/drivers/ata/pata_sc1200.c +++ b/trunk/drivers/ata/pata_sc1200.c @@ -1,5 +1,5 @@ /* - * New ATA layer SC1200 driver Alan Cox + * New ATA layer SC1200 driver Alan Cox * * TODO: Mode selection filtering * TODO: Can't enable second channel until ATA core has serialize diff --git a/trunk/drivers/ata/pata_scc.c b/trunk/drivers/ata/pata_scc.c index cf3707e516a2..16673d168573 100644 --- a/trunk/drivers/ata/pata_scc.c +++ b/trunk/drivers/ata/pata_scc.c @@ -8,7 +8,7 @@ * Copyright 2003-2005 Jeff Garzik * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer * Copyright (C) 1998-2000 Andre Hedrick - * Copyright (C) 2003 Red Hat Inc + * Copyright (C) 2003 Red Hat Inc * * and drivers/ata/ahci.c: * Copyright 2004-2005 Red Hat, Inc. diff --git a/trunk/drivers/ata/pata_serverworks.c b/trunk/drivers/ata/pata_serverworks.c index 72e41c9f969b..ffd26d0dc50d 100644 --- a/trunk/drivers/ata/pata_serverworks.c +++ b/trunk/drivers/ata/pata_serverworks.c @@ -1,6 +1,7 @@ /* * pata_serverworks.c - Serverworks PATA for new ATA layer * (C) 2005 Red Hat Inc + * Alan Cox * * based upon * diff --git a/trunk/drivers/ata/pata_sil680.c b/trunk/drivers/ata/pata_sil680.c index 83580a59db58..a598bb36aafc 100644 --- a/trunk/drivers/ata/pata_sil680.c +++ b/trunk/drivers/ata/pata_sil680.c @@ -1,6 +1,7 @@ /* * pata_sil680.c - SIL680 PATA for new ATA layer * (C) 2005 Red Hat Inc + * Alan Cox * * based upon * diff --git a/trunk/drivers/ata/pata_sis.c b/trunk/drivers/ata/pata_sis.c index d34236611752..26345d7b531c 100644 --- a/trunk/drivers/ata/pata_sis.c +++ b/trunk/drivers/ata/pata_sis.c @@ -1,7 +1,7 @@ /* * pata_sis.c - SiS ATA driver * - * (C) 2005 Red Hat + * (C) 2005 Red Hat * (C) 2007 Bartlomiej Zolnierkiewicz * * Based upon linux/drivers/ide/pci/sis5513.c diff --git a/trunk/drivers/ata/pata_sl82c105.c b/trunk/drivers/ata/pata_sl82c105.c index 1b0e7b6d8ef5..69877bd81815 100644 --- a/trunk/drivers/ata/pata_sl82c105.c +++ b/trunk/drivers/ata/pata_sl82c105.c @@ -1,6 +1,7 @@ /* * pata_sl82c105.c - SL82C105 PATA for new ATA layer * (C) 2005 Red Hat Inc + * Alan Cox * * Based in part on linux/drivers/ide/pci/sl82c105.c * SL82C105/Winbond 553 IDE driver diff --git a/trunk/drivers/ata/pata_triflex.c b/trunk/drivers/ata/pata_triflex.c index ef9597517cdd..b181261f2743 100644 --- a/trunk/drivers/ata/pata_triflex.c +++ b/trunk/drivers/ata/pata_triflex.c @@ -1,7 +1,7 @@ /* * pata_triflex.c - Compaq PATA for new ATA layer * (C) 2005 Red Hat Inc - * Alan Cox + * Alan Cox * * based upon * diff --git a/trunk/drivers/ata/pata_via.c b/trunk/drivers/ata/pata_via.c index 681169c9c640..8fdb2ce73210 100644 --- a/trunk/drivers/ata/pata_via.c +++ b/trunk/drivers/ata/pata_via.c @@ -1,6 +1,7 @@ /* * pata_via.c - VIA PATA for new ATA layer * (C) 2005-2006 Red Hat Inc + * Alan Cox * * Documentation * Most chipset documentation available under NDA only diff --git a/trunk/drivers/ata/pata_winbond.c b/trunk/drivers/ata/pata_winbond.c index 319e164a3d74..a7606b044a61 100644 --- a/trunk/drivers/ata/pata_winbond.c +++ b/trunk/drivers/ata/pata_winbond.c @@ -1,6 +1,6 @@ /* * pata_winbond.c - Winbond VLB ATA controllers - * (C) 2006 Red Hat + * (C) 2006 Red Hat * * Support for the Winbond 83759A when operating in advanced mode. * Multichip mode is not currently supported. diff --git a/trunk/drivers/ata/sata_sil24.c b/trunk/drivers/ata/sata_sil24.c index ccee930f1e12..4621807a1a6a 100644 --- a/trunk/drivers/ata/sata_sil24.c +++ b/trunk/drivers/ata/sata_sil24.c @@ -1329,11 +1329,6 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } } - /* Set max read request size to 4096. This slightly increases - * write throughput for pci-e variants. - */ - pcie_set_readrq(pdev, 4096); - sil24_init_controller(host); pci_set_master(pdev); diff --git a/trunk/drivers/s390/char/tape_block.c b/trunk/drivers/s390/char/tape_block.c index ae18baf59f06..023803dbb0c7 100644 --- a/trunk/drivers/s390/char/tape_block.c +++ b/trunk/drivers/s390/char/tape_block.c @@ -76,7 +76,7 @@ tapeblock_trigger_requeue(struct tape_device *device) static void tapeblock_end_request(struct request *req, int error) { - if (blk_end_request(req, error, blk_rq_bytes(req))) + if (__blk_end_request(req, error, blk_rq_bytes(req))) BUG(); } @@ -166,7 +166,7 @@ tapeblock_requeue(struct work_struct *work) { nr_queued++; spin_unlock(get_ccwdev_lock(device->cdev)); - spin_lock_irq(&device->blk_data.request_queue_lock); + spin_lock(&device->blk_data.request_queue_lock); while ( !blk_queue_plugged(queue) && elv_next_request(queue) && @@ -176,9 +176,7 @@ tapeblock_requeue(struct work_struct *work) { if (rq_data_dir(req) == WRITE) { DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); blkdev_dequeue_request(req); - spin_unlock_irq(&device->blk_data.request_queue_lock); tapeblock_end_request(req, -EIO); - spin_lock_irq(&device->blk_data.request_queue_lock); continue; } blkdev_dequeue_request(req); diff --git a/trunk/drivers/s390/char/tape_core.c b/trunk/drivers/s390/char/tape_core.c index f9bb51fa7f5b..d7073dbf825c 100644 --- a/trunk/drivers/s390/char/tape_core.c +++ b/trunk/drivers/s390/char/tape_core.c @@ -1200,7 +1200,7 @@ tape_open(struct tape_device *device) { int rc; - spin_lock_irq(get_ccwdev_lock(device->cdev)); + spin_lock(get_ccwdev_lock(device->cdev)); if (device->tape_state == TS_NOT_OPER) { DBF_EVENT(6, "TAPE:nodev\n"); rc = -ENODEV; @@ -1218,7 +1218,7 @@ tape_open(struct tape_device *device) tape_state_set(device, TS_IN_USE); rc = 0; } - spin_unlock_irq(get_ccwdev_lock(device->cdev)); + spin_unlock(get_ccwdev_lock(device->cdev)); return rc; } @@ -1228,11 +1228,11 @@ tape_open(struct tape_device *device) int tape_release(struct tape_device *device) { - spin_lock_irq(get_ccwdev_lock(device->cdev)); + spin_lock(get_ccwdev_lock(device->cdev)); if (device->tape_state == TS_IN_USE) tape_state_set(device, TS_UNUSED); module_put(device->discipline->owner); - spin_unlock_irq(get_ccwdev_lock(device->cdev)); + spin_unlock(get_ccwdev_lock(device->cdev)); return 0; } diff --git a/trunk/drivers/s390/cio/qdio_debug.c b/trunk/drivers/s390/cio/qdio_debug.c index f05590355be8..b5390821434f 100644 --- a/trunk/drivers/s390/cio/qdio_debug.c +++ b/trunk/drivers/s390/cio/qdio_debug.c @@ -20,7 +20,6 @@ static struct dentry *debugfs_root; #define MAX_DEBUGFS_QUEUES 32 static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL }; static DEFINE_MUTEX(debugfs_mutex); -#define QDIO_DEBUGFS_NAME_LEN 40 void qdio_allocate_do_dbf(struct qdio_initialize *init_data) { @@ -153,6 +152,17 @@ static int qstat_seq_open(struct inode *inode, struct file *filp) filp->f_path.dentry->d_inode->i_private); } +static void get_queue_name(struct qdio_q *q, struct ccw_device *cdev, char *name) +{ + memset(name, 0, sizeof(name)); + sprintf(name, "%s", dev_name(&cdev->dev)); + if (q->is_input_q) + sprintf(name + strlen(name), "_input"); + else + sprintf(name + strlen(name), "_output"); + sprintf(name + strlen(name), "_%d", q->nr); +} + static void remove_debugfs_entry(struct qdio_q *q) { int i; @@ -179,17 +189,14 @@ static struct file_operations debugfs_fops = { static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev) { int i = 0; - char name[QDIO_DEBUGFS_NAME_LEN]; + char name[40]; while (debugfs_queues[i] != NULL) { i++; if (i >= MAX_DEBUGFS_QUEUES) return; } - snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%s_%d", - dev_name(&cdev->dev), - q->is_input_q ? "input" : "output", - q->nr); + get_queue_name(q, cdev, name); debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR, debugfs_root, q, &debugfs_fops); } diff --git a/trunk/drivers/s390/cio/qdio_main.c b/trunk/drivers/s390/cio/qdio_main.c index 7c8659151993..a50682d2a0fa 100644 --- a/trunk/drivers/s390/cio/qdio_main.c +++ b/trunk/drivers/s390/cio/qdio_main.c @@ -1083,6 +1083,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, case -EIO: sprintf(dbf_text, "ierr%4x", irq_ptr->schid.sch_no); QDIO_DBF_TEXT2(1, setup, dbf_text); + qdio_int_error(cdev); return; case -ETIMEDOUT: sprintf(dbf_text, "qtoh%4x", irq_ptr->schid.sch_no); diff --git a/trunk/drivers/xen/events.c b/trunk/drivers/xen/events.c index 1e3b934a4cf7..9ce1ab6c268d 100644 --- a/trunk/drivers/xen/events.c +++ b/trunk/drivers/xen/events.c @@ -774,7 +774,7 @@ void xen_poll_irq(int irq) poll.nr_ports = 1; poll.timeout = 0; - set_xen_guest_handle(poll.ports, &evtchn); + poll.ports = &evtchn; if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) BUG(); diff --git a/trunk/drivers/xen/manage.c b/trunk/drivers/xen/manage.c index 9b91617b9582..d0e87cbe157c 100644 --- a/trunk/drivers/xen/manage.c +++ b/trunk/drivers/xen/manage.c @@ -39,6 +39,8 @@ static int xen_suspend(void *data) BUG_ON(!irqs_disabled()); + load_cr3(swapper_pg_dir); + err = device_power_down(PMSG_SUSPEND); if (err) { printk(KERN_ERR "xen_suspend: device_power_down failed: %d\n", diff --git a/trunk/drivers/xen/xencomm.c b/trunk/drivers/xen/xencomm.c index a240b2c20b99..797cb4e31f07 100644 --- a/trunk/drivers/xen/xencomm.c +++ b/trunk/drivers/xen/xencomm.c @@ -23,7 +23,13 @@ #include #include #include -#include /* for xencomm_is_phys_contiguous() */ +#ifdef __ia64__ +#include /* for is_kern_addr() */ +#endif + +#ifdef HAVE_XEN_PLATFORM_COMPAT_H +#include +#endif static int xencomm_init(struct xencomm_desc *desc, void *buffer, unsigned long bytes) @@ -151,11 +157,20 @@ static int xencomm_create(void *buffer, unsigned long bytes, return 0; } +/* check if memory address is within VMALLOC region */ +static int is_phys_contiguous(unsigned long addr) +{ + if (!is_kernel_addr(addr)) + return 0; + + return (addr < VMALLOC_START) || (addr >= VMALLOC_END); +} + static struct xencomm_handle *xencomm_create_inline(void *ptr) { unsigned long paddr; - BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr)); + BUG_ON(!is_phys_contiguous((unsigned long)ptr)); paddr = (unsigned long)xencomm_pa(ptr); BUG_ON(paddr & XENCOMM_INLINE_FLAG); @@ -187,7 +202,7 @@ struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes) int rc; struct xencomm_desc *desc; - if (xencomm_is_phys_contiguous((unsigned long)ptr)) + if (is_phys_contiguous((unsigned long)ptr)) return xencomm_create_inline(ptr); rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL); @@ -204,7 +219,7 @@ struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes, int rc; struct xencomm_desc *desc = NULL; - if (xencomm_is_phys_contiguous((unsigned long)ptr)) + if (is_phys_contiguous((unsigned long)ptr)) return xencomm_create_inline(ptr); rc = xencomm_create_mini(ptr, bytes, xc_desc, diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index b483f39a7112..8478f334d732 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -936,6 +936,7 @@ struct sched_class { void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); void (*yield_task) (struct rq *rq); + int (*select_task_rq)(struct task_struct *p, int sync); void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync); @@ -943,8 +944,6 @@ struct sched_class { void (*put_prev_task) (struct rq *rq, struct task_struct *p); #ifdef CONFIG_SMP - int (*select_task_rq)(struct task_struct *p, int sync); - unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, struct rq *busiest, unsigned long max_load_move, struct sched_domain *sd, enum cpu_idle_type idle, @@ -956,17 +955,16 @@ struct sched_class { void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); void (*post_schedule) (struct rq *this_rq); void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); +#endif + void (*set_curr_task) (struct rq *rq); + void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); + void (*task_new) (struct rq *rq, struct task_struct *p); void (*set_cpus_allowed)(struct task_struct *p, const cpumask_t *newmask); void (*rq_online)(struct rq *rq); void (*rq_offline)(struct rq *rq); -#endif - - void (*set_curr_task) (struct rq *rq); - void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); - void (*task_new) (struct rq *rq, struct task_struct *p); void (*switched_from) (struct rq *this_rq, struct task_struct *task, int running); diff --git a/trunk/kernel/irq/proc.c b/trunk/kernel/irq/proc.c index 4d161c70ba55..fac014a81b24 100644 --- a/trunk/kernel/irq/proc.c +++ b/trunk/kernel/irq/proc.c @@ -220,7 +220,7 @@ void unregister_handler_proc(unsigned int irq, struct irqaction *action) } } -static void register_default_affinity_proc(void) +void register_default_affinity_proc(void) { #ifdef CONFIG_SMP proc_create("irq/default_smp_affinity", 0600, NULL, diff --git a/trunk/kernel/lockdep.c b/trunk/kernel/lockdep.c index 06e157119d2b..dbda475b13bd 100644 --- a/trunk/kernel/lockdep.c +++ b/trunk/kernel/lockdep.c @@ -2169,11 +2169,12 @@ void early_boot_irqs_on(void) /* * Hardirqs will be enabled: */ -void trace_hardirqs_on_caller(unsigned long ip) +void trace_hardirqs_on_caller(unsigned long a0) { struct task_struct *curr = current; + unsigned long ip; - time_hardirqs_on(CALLER_ADDR0, ip); + time_hardirqs_on(CALLER_ADDR0, a0); if (unlikely(!debug_locks || current->lockdep_recursion)) return; @@ -2187,6 +2188,7 @@ void trace_hardirqs_on_caller(unsigned long ip) } /* we'll do an OFF -> ON transition: */ curr->hardirqs_enabled = 1; + ip = (unsigned long) __builtin_return_address(0); if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return; @@ -2222,11 +2224,11 @@ EXPORT_SYMBOL(trace_hardirqs_on); /* * Hardirqs were disabled: */ -void trace_hardirqs_off_caller(unsigned long ip) +void trace_hardirqs_off_caller(unsigned long a0) { struct task_struct *curr = current; - time_hardirqs_off(CALLER_ADDR0, ip); + time_hardirqs_off(CALLER_ADDR0, a0); if (unlikely(!debug_locks || current->lockdep_recursion)) return; @@ -2239,7 +2241,7 @@ void trace_hardirqs_off_caller(unsigned long ip) * We have done an ON -> OFF transition: */ curr->hardirqs_enabled = 0; - curr->hardirq_disable_ip = ip; + curr->hardirq_disable_ip = _RET_IP_; curr->hardirq_disable_event = ++curr->irq_events; debug_atomic_inc(&hardirqs_off_events); } else @@ -3415,10 +3417,9 @@ void debug_show_all_locks(void) } printk(" ignoring it.\n"); unlock = 0; - } else { - if (count != 10) - printk(KERN_CONT " locked it.\n"); } + if (count != 10) + printk(" locked it.\n"); do_each_thread(g, p) { /* diff --git a/trunk/kernel/printk.c b/trunk/kernel/printk.c index f492f1583d77..6341af77eb65 100644 --- a/trunk/kernel/printk.c +++ b/trunk/kernel/printk.c @@ -232,6 +232,45 @@ static inline void boot_delay_msec(void) } #endif +/* + * Return the number of unread characters in the log buffer. + */ +static int log_buf_get_len(void) +{ + return logged_chars; +} + +/* + * Copy a range of characters from the log buffer. + */ +int log_buf_copy(char *dest, int idx, int len) +{ + int ret, max; + bool took_lock = false; + + if (!oops_in_progress) { + spin_lock_irq(&logbuf_lock); + took_lock = true; + } + + max = log_buf_get_len(); + if (idx < 0 || idx >= max) { + ret = -1; + } else { + if (len > max) + len = max; + ret = len; + idx += (log_end - max); + while (len-- > 0) + dest[len] = LOG_BUF(idx + len); + } + + if (took_lock) + spin_unlock_irq(&logbuf_lock); + + return ret; +} + /* * Commands to do_syslog: * diff --git a/trunk/kernel/resource.c b/trunk/kernel/resource.c index 7fec0e427234..4089d12af6e0 100644 --- a/trunk/kernel/resource.c +++ b/trunk/kernel/resource.c @@ -571,7 +571,7 @@ static void __init __reserve_region_with_split(struct resource *root, } -void __init reserve_region_with_split(struct resource *root, +void reserve_region_with_split(struct resource *root, resource_size_t start, resource_size_t end, const char *name) { diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index e8819bc6f462..6625c3c4b10d 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -386,6 +386,7 @@ struct cfs_rq { u64 exec_clock; u64 min_vruntime; + u64 pair_start; struct rb_root tasks_timeline; struct rb_node *rb_leftmost; @@ -3343,7 +3344,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, } else this_load_per_task = cpu_avg_load_per_task(this_cpu); - if (max_load - this_load + busiest_load_per_task >= + if (max_load - this_load + 2*busiest_load_per_task >= busiest_load_per_task * imbn) { *imbalance = busiest_load_per_task; return busiest; diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index ce514afd78ff..9573c33688b8 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -143,49 +143,6 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se) return se->parent; } -/* return depth at which a sched entity is present in the hierarchy */ -static inline int depth_se(struct sched_entity *se) -{ - int depth = 0; - - for_each_sched_entity(se) - depth++; - - return depth; -} - -static void -find_matching_se(struct sched_entity **se, struct sched_entity **pse) -{ - int se_depth, pse_depth; - - /* - * preemption test can be made between sibling entities who are in the - * same cfs_rq i.e who have a common parent. Walk up the hierarchy of - * both tasks until we find their ancestors who are siblings of common - * parent. - */ - - /* First walk up until both entities are at same depth */ - se_depth = depth_se(*se); - pse_depth = depth_se(*pse); - - while (se_depth > pse_depth) { - se_depth--; - *se = parent_entity(*se); - } - - while (pse_depth > se_depth) { - pse_depth--; - *pse = parent_entity(*pse); - } - - while (!is_same_group(*se, *pse)) { - *se = parent_entity(*se); - *pse = parent_entity(*pse); - } -} - #else /* CONFIG_FAIR_GROUP_SCHED */ static inline struct rq *rq_of(struct cfs_rq *cfs_rq) @@ -236,11 +193,6 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se) return NULL; } -static inline void -find_matching_se(struct sched_entity **se, struct sched_entity **pse) -{ -} - #endif /* CONFIG_FAIR_GROUP_SCHED */ @@ -271,27 +223,6 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) return se->vruntime - cfs_rq->min_vruntime; } -static void update_min_vruntime(struct cfs_rq *cfs_rq) -{ - u64 vruntime = cfs_rq->min_vruntime; - - if (cfs_rq->curr) - vruntime = cfs_rq->curr->vruntime; - - if (cfs_rq->rb_leftmost) { - struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, - struct sched_entity, - run_node); - - if (vruntime == cfs_rq->min_vruntime) - vruntime = se->vruntime; - else - vruntime = min_vruntime(vruntime, se->vruntime); - } - - cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); -} - /* * Enqueue an entity into the rb-tree: */ @@ -325,8 +256,15 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) * Maintain a cache of leftmost tree entries (it is frequently * used): */ - if (leftmost) + if (leftmost) { cfs_rq->rb_leftmost = &se->run_node; + /* + * maintain cfs_rq->min_vruntime to be a monotonic increasing + * value tracking the leftmost vruntime in the tree. + */ + cfs_rq->min_vruntime = + max_vruntime(cfs_rq->min_vruntime, se->vruntime); + } rb_link_node(&se->run_node, parent, link); rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); @@ -336,9 +274,18 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) { if (cfs_rq->rb_leftmost == &se->run_node) { struct rb_node *next_node; + struct sched_entity *next; next_node = rb_next(&se->run_node); cfs_rq->rb_leftmost = next_node; + + if (next_node) { + next = rb_entry(next_node, + struct sched_entity, run_node); + cfs_rq->min_vruntime = + max_vruntime(cfs_rq->min_vruntime, + next->vruntime); + } } if (cfs_rq->next == se) @@ -477,7 +424,6 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, schedstat_add(cfs_rq, exec_clock, delta_exec); delta_exec_weighted = calc_delta_fair(delta_exec, curr); curr->vruntime += delta_exec_weighted; - update_min_vruntime(cfs_rq); } static void update_curr(struct cfs_rq *cfs_rq) @@ -667,7 +613,13 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) { - u64 vruntime = cfs_rq->min_vruntime; + u64 vruntime; + + if (first_fair(cfs_rq)) { + vruntime = min_vruntime(cfs_rq->min_vruntime, + __pick_next_entity(cfs_rq)->vruntime); + } else + vruntime = cfs_rq->min_vruntime; /* * The 'current' period is already promised to the current tasks, @@ -744,7 +696,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) if (se != cfs_rq->curr) __dequeue_entity(cfs_rq, se); account_entity_dequeue(cfs_rq, se); - update_min_vruntime(cfs_rq); } /* @@ -791,14 +742,16 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) se->prev_sum_exec_runtime = se->sum_exec_runtime; } -static int -wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); - static struct sched_entity * pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se) { - if (!cfs_rq->next || wakeup_preempt_entity(cfs_rq->next, se) == 1) + struct rq *rq = rq_of(cfs_rq); + u64 pair_slice = rq->clock - cfs_rq->pair_start; + + if (!cfs_rq->next || pair_slice > sysctl_sched_min_granularity) { + cfs_rq->pair_start = rq->clock; return se; + } return cfs_rq->next; } @@ -1169,9 +1122,10 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq, if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) return 0; - if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost || - p->se.avg_overlap > sysctl_sched_migration_cost)) - sync = 0; + if (!sync && sched_feat(SYNC_WAKEUPS) && + curr->se.avg_overlap < sysctl_sched_migration_cost && + p->se.avg_overlap < sysctl_sched_migration_cost) + sync = 1; /* * If sync wakeup then subtract the (maximum possible) @@ -1290,41 +1244,12 @@ static unsigned long wakeup_gran(struct sched_entity *se) * More easily preempt - nice tasks, while not making it harder for * + nice tasks. */ - if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD) - gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se); + if (sched_feat(ASYM_GRAN)) + gran = calc_delta_mine(gran, NICE_0_LOAD, &se->load); return gran; } -/* - * Should 'se' preempt 'curr'. - * - * |s1 - * |s2 - * |s3 - * g - * |<--->|c - * - * w(c, s1) = -1 - * w(c, s2) = 0 - * w(c, s3) = 1 - * - */ -static int -wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) -{ - s64 gran, vdiff = curr->vruntime - se->vruntime; - - if (vdiff <= 0) - return -1; - - gran = wakeup_gran(curr); - if (vdiff > gran) - return 1; - - return 0; -} - /* * Preempt the current task with a newly woken task if needed: */ @@ -1333,6 +1258,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) struct task_struct *curr = rq->curr; struct cfs_rq *cfs_rq = task_cfs_rq(curr); struct sched_entity *se = &curr->se, *pse = &p->se; + s64 delta_exec; if (unlikely(rt_prio(p->prio))) { update_rq_clock(rq); @@ -1370,19 +1296,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) return; } - find_matching_se(&se, &pse); - - while (se) { - BUG_ON(!pse); - - if (wakeup_preempt_entity(se, pse) == 1) { - resched_task(curr); - break; - } - - se = parent_entity(se); - pse = parent_entity(pse); - } + delta_exec = se->sum_exec_runtime - se->prev_sum_exec_runtime; + if (delta_exec > wakeup_gran(pse)) + resched_task(curr); } static struct task_struct *pick_next_task_fair(struct rq *rq) @@ -1678,6 +1594,9 @@ static const struct sched_class fair_sched_class = { .enqueue_task = enqueue_task_fair, .dequeue_task = dequeue_task_fair, .yield_task = yield_task_fair, +#ifdef CONFIG_SMP + .select_task_rq = select_task_rq_fair, +#endif /* CONFIG_SMP */ .check_preempt_curr = check_preempt_wakeup, @@ -1685,8 +1604,6 @@ static const struct sched_class fair_sched_class = { .put_prev_task = put_prev_task_fair, #ifdef CONFIG_SMP - .select_task_rq = select_task_rq_fair, - .load_balance = load_balance_fair, .move_one_task = move_one_task_fair, #endif diff --git a/trunk/kernel/sched_idletask.c b/trunk/kernel/sched_idletask.c index 8a21a2e28c13..dec4ccabe2f5 100644 --- a/trunk/kernel/sched_idletask.c +++ b/trunk/kernel/sched_idletask.c @@ -105,6 +105,9 @@ static const struct sched_class idle_sched_class = { /* dequeue is not valid, we print a debug message there: */ .dequeue_task = dequeue_task_idle, +#ifdef CONFIG_SMP + .select_task_rq = select_task_rq_idle, +#endif /* CONFIG_SMP */ .check_preempt_curr = check_preempt_curr_idle, @@ -112,8 +115,6 @@ static const struct sched_class idle_sched_class = { .put_prev_task = put_prev_task_idle, #ifdef CONFIG_SMP - .select_task_rq = select_task_rq_idle, - .load_balance = load_balance_idle, .move_one_task = move_one_task_idle, #endif diff --git a/trunk/kernel/sched_rt.c b/trunk/kernel/sched_rt.c index d9ba9d5f99d6..b446dc87494f 100644 --- a/trunk/kernel/sched_rt.c +++ b/trunk/kernel/sched_rt.c @@ -1504,6 +1504,9 @@ static const struct sched_class rt_sched_class = { .enqueue_task = enqueue_task_rt, .dequeue_task = dequeue_task_rt, .yield_task = yield_task_rt, +#ifdef CONFIG_SMP + .select_task_rq = select_task_rq_rt, +#endif /* CONFIG_SMP */ .check_preempt_curr = check_preempt_curr_rt, @@ -1511,8 +1514,6 @@ static const struct sched_class rt_sched_class = { .put_prev_task = put_prev_task_rt, #ifdef CONFIG_SMP - .select_task_rq = select_task_rq_rt, - .load_balance = load_balance_rt, .move_one_task = move_one_task_rt, .set_cpus_allowed = set_cpus_allowed_rt, diff --git a/trunk/lib/swiotlb.c b/trunk/lib/swiotlb.c index 78330c37a61b..f8eebd489149 100644 --- a/trunk/lib/swiotlb.c +++ b/trunk/lib/swiotlb.c @@ -497,10 +497,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", (unsigned long long)*hwdev->dma_mask, (unsigned long long)dev_addr); - - /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ - unmap_single(hwdev, ret, size, DMA_TO_DEVICE); - return NULL; + panic("swiotlb_alloc_coherent: allocated memory is out of " + "range for device"); } *dma_handle = dev_addr; return ret;