Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 117978
b: refs/heads/master
c: 6ad9f15
h: refs/heads/master
v: v3
  • Loading branch information
Marcelo Tosatti authored and Avi Kivity committed Oct 28, 2008
1 parent bd31e9e commit 7702c4f
Show file tree
Hide file tree
Showing 78 changed files with 294 additions and 394 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0d8762c9ee40cf83d5dbf3a22843bc566912b592
refs/heads/master: 6ad9f15c94822c3f067a7d443f3b414e08b34460
2 changes: 1 addition & 1 deletion trunk/Documentation/scheduler/sched-design-CFS.txt
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ other HZ detail. Thus the CFS scheduler has no notion of "timeslices" in the
way the previous scheduler had, and has no heuristics whatsoever. There is
only one central tunable (you have to switch on CONFIG_SCHED_DEBUG):

/proc/sys/kernel/sched_min_granularity_ns
/proc/sys/kernel/sched_granularity_ns

which can be used to tune the scheduler from "desktop" (i.e., low latencies) to
"server" (i.e., good batching) workloads. It defaults to a setting suitable
Expand Down
16 changes: 9 additions & 7 deletions trunk/arch/s390/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -241,17 +241,19 @@ config PACK_STACK
Say Y if you are unsure.

config SMALL_STACK
bool "Use 8kb for kernel stack instead of 16kb"
depends on PACK_STACK && 64BIT && !LOCKDEP
bool "Use 4kb/8kb for kernel stack instead of 8kb/16kb"
depends on PACK_STACK && !LOCKDEP
help
If you say Y here and the compiler supports the -mkernel-backchain
option the kernel will use a smaller kernel stack size. The reduced
size is 8kb instead of 16kb. This allows to run more threads on a
system and reduces the pressure on the memory management for higher
order page allocations.
option the kernel will use a smaller kernel stack size. For 31 bit
the reduced size is 4kb instead of 8kb and for 64 bit it is 8kb
instead of 16kb. This allows to run more thread on a system and
reduces the pressure on the memory management for higher order
page allocations.

Say N if you are unsure.


config CHECK_STACK
bool "Detect kernel stack overflow"
help
Expand Down Expand Up @@ -382,7 +384,7 @@ config IPL
choice
prompt "IPL method generated into head.S"
depends on IPL
default IPL_VM
default IPL_TAPE
help
Select "tape" if you want to IPL the image from a Tape.

Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/s390/appldata/appldata_base.c
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,7 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
*/
int appldata_register_ops(struct appldata_ops *ops)
{
if (ops->size > APPLDATA_MAX_REC_SIZE)
if ((ops->size > APPLDATA_MAX_REC_SIZE) || (ops->size < 0))
return -EINVAL;

ops->ctl_table = kzalloc(4 * sizeof(struct ctl_table), GFP_KERNEL);
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/s390/include/asm/kvm_virtio.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ struct kvm_vqconfig {

#ifdef __KERNEL__
/* early virtio console setup */
#ifdef CONFIG_S390_GUEST
#ifdef CONFIG_VIRTIO_CONSOLE
extern void s390_virtio_console_init(void);
#else
static inline void s390_virtio_console_init(void)
Expand Down
3 changes: 1 addition & 2 deletions trunk/arch/s390/include/asm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,7 @@ typedef struct {
unsigned long asce_bits;
unsigned long asce_limit;
int noexec;
int has_pgste; /* The mmu context has extended page tables */
int alloc_pgste; /* cloned contexts will have extended page tables */
int pgstes;
} mm_context_t;

#endif
19 changes: 3 additions & 16 deletions trunk/arch/s390/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,25 +20,12 @@ static inline int init_new_context(struct task_struct *tsk,
#ifdef CONFIG_64BIT
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#endif
if (current->mm->context.alloc_pgste) {
/*
* alloc_pgste indicates, that any NEW context will be created
* with extended page tables. The old context is unchanged. The
* page table allocation and the page table operations will
* look at has_pgste to distinguish normal and extended page
* tables. The only way to create extended page tables is to
* set alloc_pgste and then create a new context (e.g. dup_mm).
* The page table allocation is called after init_new_context
* and if has_pgste is set, it will create extended page
* tables.
*/
if (current->mm->context.pgstes) {
mm->context.noexec = 0;
mm->context.has_pgste = 1;
mm->context.alloc_pgste = 1;
mm->context.pgstes = 1;
} else {
mm->context.noexec = s390_noexec;
mm->context.has_pgste = 0;
mm->context.alloc_pgste = 0;
mm->context.pgstes = 0;
}
mm->context.asce_limit = STACK_TOP_MAX;
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
Expand Down
8 changes: 4 additions & 4 deletions trunk/arch/s390/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -679,7 +679,7 @@ static inline void pmd_clear(pmd_t *pmd)

static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
if (mm->context.has_pgste)
if (mm->context.pgstes)
ptep_rcp_copy(ptep);
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
if (mm->context.noexec)
Expand Down Expand Up @@ -763,7 +763,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
struct page *page;
unsigned int skey;

if (!mm->context.has_pgste)
if (!mm->context.pgstes)
return -EINVAL;
rcp_lock(ptep);
pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
Expand Down Expand Up @@ -794,7 +794,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
int young;
unsigned long *pgste;

if (!vma->vm_mm->context.has_pgste)
if (!vma->vm_mm->context.pgstes)
return 0;
physpage = pte_val(*ptep) & PAGE_MASK;
pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
Expand Down Expand Up @@ -844,7 +844,7 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
static inline void ptep_invalidate(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
if (mm->context.has_pgste) {
if (mm->context.pgstes) {
rcp_lock(ptep);
__ptep_ipte(address, ptep);
ptep_rcp_copy(ptep);
Expand Down
5 changes: 5 additions & 0 deletions trunk/arch/s390/include/asm/thread_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,13 @@
* Size of kernel stack for each process
*/
#ifndef __s390x__
#ifndef __SMALL_STACK
#define THREAD_ORDER 1
#define ASYNC_ORDER 1
#else
#define THREAD_ORDER 0
#define ASYNC_ORDER 0
#endif
#else /* __s390x__ */
#ifndef __SMALL_STACK
#define THREAD_ORDER 2
Expand Down
24 changes: 15 additions & 9 deletions trunk/arch/s390/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1119,18 +1119,22 @@ int __ref smp_rescan_cpus(void)
return rc;
}

static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf,
static ssize_t __ref rescan_store(struct sys_device *dev,
struct sysdev_attribute *attr,
const char *buf,
size_t count)
{
int rc;

rc = smp_rescan_cpus();
return rc ? rc : count;
}
static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
#endif /* CONFIG_HOTPLUG_CPU */

static ssize_t dispatching_show(struct sysdev_class *class, char *buf)
static ssize_t dispatching_show(struct sys_device *dev,
struct sysdev_attribute *attr,
char *buf)
{
ssize_t count;

Expand All @@ -1140,8 +1144,9 @@ static ssize_t dispatching_show(struct sysdev_class *class, char *buf)
return count;
}

static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf,
size_t count)
static ssize_t dispatching_store(struct sys_device *dev,
struct sysdev_attribute *attr,
const char *buf, size_t count)
{
int val, rc;
char delim;
Expand All @@ -1163,8 +1168,7 @@ static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf,
put_online_cpus();
return rc ? rc : count;
}
static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
dispatching_store);
static SYSDEV_ATTR(dispatching, 0644, dispatching_show, dispatching_store);

static int __init topology_init(void)
{
Expand All @@ -1174,11 +1178,13 @@ static int __init topology_init(void)
register_cpu_notifier(&smp_cpu_nb);

#ifdef CONFIG_HOTPLUG_CPU
rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan);
rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
&attr_rescan.attr);
if (rc)
return rc;
#endif
rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching);
rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
&attr_dispatching.attr);
if (rc)
return rc;
for_each_present_cpu(cpu) {
Expand Down
16 changes: 8 additions & 8 deletions trunk/arch/s390/mm/pgtable.c
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
unsigned long *table;
unsigned long bits;

bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL;
spin_lock(&mm->page_table_lock);
page = NULL;
if (!list_empty(&mm->context.pgtable_list)) {
Expand All @@ -186,7 +186,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
pgtable_page_ctor(page);
page->flags &= ~FRAG_MASK;
table = (unsigned long *) page_to_phys(page);
if (mm->context.has_pgste)
if (mm->context.pgstes)
clear_table_pgstes(table);
else
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
Expand All @@ -210,7 +210,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
struct page *page;
unsigned long bits;

bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock(&mm->page_table_lock);
Expand Down Expand Up @@ -257,7 +257,7 @@ int s390_enable_sie(void)
struct mm_struct *mm, *old_mm;

/* Do we have pgstes? if yes, we are done */
if (tsk->mm->context.has_pgste)
if (tsk->mm->context.pgstes)
return 0;

/* lets check if we are allowed to replace the mm */
Expand All @@ -269,14 +269,14 @@ int s390_enable_sie(void)
}
task_unlock(tsk);

/* we copy the mm and let dup_mm create the page tables with_pgstes */
tsk->mm->context.alloc_pgste = 1;
/* we copy the mm with pgstes enabled */
tsk->mm->context.pgstes = 1;
mm = dup_mm(tsk);
tsk->mm->context.alloc_pgste = 0;
tsk->mm->context.pgstes = 0;
if (!mm)
return -ENOMEM;

/* Now lets check again if something happened */
/* Now lets check again if somebody attached ptrace etc */
task_lock(tsk);
if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
Expand Down
4 changes: 1 addition & 3 deletions trunk/arch/x86/include/asm/dma-mapping.h
Original file line number Diff line number Diff line change
Expand Up @@ -255,11 +255,9 @@ static inline unsigned long dma_alloc_coherent_mask(struct device *dev,

static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
{
#ifdef CONFIG_X86_64
unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);

if (dma_mask <= DMA_24BIT_MASK)
gfp |= GFP_DMA;
#ifdef CONFIG_X86_64
if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
gfp |= GFP_DMA32;
#endif
Expand Down
7 changes: 4 additions & 3 deletions trunk/arch/x86/kernel/genx2apic_uv_x.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <asm/smp.h>
Expand Down Expand Up @@ -397,16 +398,16 @@ void __init uv_system_init(void)
printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());

bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
uv_blade_info = kmalloc(bytes, GFP_KERNEL);
uv_blade_info = alloc_bootmem_pages(bytes);

get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);

bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
uv_node_to_blade = alloc_bootmem_pages(bytes);
memset(uv_node_to_blade, 255, bytes);

bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
uv_cpu_to_blade = alloc_bootmem_pages(bytes);
memset(uv_cpu_to_blade, 255, bytes);

blade = 0;
Expand Down
14 changes: 1 addition & 13 deletions trunk/arch/x86/kernel/pci-swiotlb_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,21 +18,9 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
}

static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
void *vaddr;

vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags);
if (vaddr)
return vaddr;

return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
}

struct dma_mapping_ops swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error,
.alloc_coherent = x86_swiotlb_alloc_coherent,
.alloc_coherent = swiotlb_alloc_coherent,
.free_coherent = swiotlb_free_coherent,
.map_single = swiotlb_map_single_phys,
.unmap_single = swiotlb_unmap_single,
Expand Down
1 change: 1 addition & 0 deletions trunk/arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -2634,6 +2634,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
{
kvm_x86_ops->tlb_flush(vcpu);
set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
return 1;
}

Expand Down
14 changes: 4 additions & 10 deletions trunk/arch/x86/mm/init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -350,10 +350,8 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
* pagetable pages as RO. So assume someone who pre-setup
* these mappings are more intelligent.
*/
if (pte_val(*pte)) {
pages++;
if (pte_val(*pte))
continue;
}

if (0)
printk(" pte=%p addr=%lx pte=%016lx\n",
Expand Down Expand Up @@ -420,10 +418,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
* not differ with respect to page frame and
* attributes.
*/
if (page_size_mask & (1 << PG_LEVEL_2M)) {
pages++;
if (page_size_mask & (1 << PG_LEVEL_2M))
continue;
}
new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
}

Expand Down Expand Up @@ -503,10 +499,8 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
* not differ with respect to page frame and
* attributes.
*/
if (page_size_mask & (1 << PG_LEVEL_1G)) {
pages++;
if (page_size_mask & (1 << PG_LEVEL_1G))
continue;
}
prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
}

Expand Down Expand Up @@ -837,7 +831,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;

last_mapped_pfn = init_memory_mapping(start, start + size);
last_mapped_pfn = init_memory_mapping(start, start + size-1);
if (last_mapped_pfn > max_pfn_mapped)
max_pfn_mapped = last_mapped_pfn;

Expand Down
Loading

0 comments on commit 7702c4f

Please sign in to comment.