Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 145239
b: refs/heads/master
c: 88dff49
h: refs/heads/master
i:
  145237: 576c10a
  145235: 7589410
  145231: 8c9ad3d
v: v3
  • Loading branch information
Zhang Rui authored and Ingo Molnar committed May 22, 2009
1 parent e63ed17 commit 1df1948
Show file tree
Hide file tree
Showing 157 changed files with 463 additions and 9,826 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 93c32483808c5d82c9bd1eef2208854a6d1326db
refs/heads/master: 88dff4936c0a5fa53080cca68dc963a8a2a674b0
2 changes: 1 addition & 1 deletion trunk/Documentation/filesystems/tmpfs.txt
Original file line number Diff line number Diff line change
Expand Up @@ -133,4 +133,4 @@ RAM/SWAP in 10240 inodes and it is only accessible by root.
Author:
Christoph Rohland <cr@sap.com>, 1.12.01
Updated:
Hugh Dickins, 4 June 2007
Hugh Dickins <hugh@veritas.com>, 4 June 2007
15 changes: 3 additions & 12 deletions trunk/Documentation/networking/ip-sysctl.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1266,22 +1266,13 @@ sctp_rmem - vector of 3 INTEGERs: min, default, max
sctp_wmem - vector of 3 INTEGERs: min, default, max
See tcp_wmem for a description.

UNDOCUMENTED:

/proc/sys/net/core/*
dev_weight - INTEGER
The maximum number of packets that kernel can handle on a NAPI
interrupt, it's a Per-CPU variable.

Default: 64
dev_weight FIXME

/proc/sys/net/unix/*
max_dgram_qlen - INTEGER
The maximum length of dgram socket receive queue

Default: 10


UNDOCUMENTED:
max_dgram_qlen FIXME

/proc/sys/net/irda/*
fast_poll_increase FIXME
Expand Down
16 changes: 0 additions & 16 deletions trunk/MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -1431,14 +1431,6 @@ P: Russell King
M: linux@arm.linux.org.uk
F: include/linux/clk.h

CISCO FCOE HBA DRIVER
P: Abhijeet Joglekar
M: abjoglek@cisco.com
P: Joe Eykholt
M: jeykholt@cisco.com
L: linux-scsi@vger.kernel.org
S: Supported

CODA FILE SYSTEM
P: Jan Harkes
M: jaharkes@cs.cmu.edu
Expand Down Expand Up @@ -5587,14 +5579,6 @@ M: ian@mnementh.co.uk
S: Maintained
F: drivers/mmc/host/tmio_mmc.*

TMPFS (SHMEM FILESYSTEM)
P: Hugh Dickins
M: hugh.dickins@tiscali.co.uk
L: linux-mm@kvack.org
S: Maintained
F: include/linux/shmem_fs.h
F: mm/shmem.c

TPM DEVICE DRIVER
P: Debora Velarde
M: debora@linux.vnet.ibm.com
Expand Down
4 changes: 2 additions & 2 deletions trunk/Makefile
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 30
EXTRAVERSION = -rc7
NAME = Man-Eating Seals of Antiquity
EXTRAVERSION = -rc6
NAME = Vindictive Armadillo

# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
Expand Down
6 changes: 3 additions & 3 deletions trunk/arch/arm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,6 @@ config ARCH_EP93XX
select HAVE_CLK
select COMMON_CLKDEV
select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_HOLES_MEMORYMODEL
help
This enables support for the Cirrus EP93xx series of CPUs.

Expand Down Expand Up @@ -977,9 +976,10 @@ config OABI_COMPAT
UNPREDICTABLE (in fact it can be predicted that it won't work
at all). If in doubt say Y.

config ARCH_HAS_HOLES_MEMORYMODEL
config ARCH_FLATMEM_HAS_HOLES
bool
default n
default y
depends on FLATMEM

# Discontigmem is deprecated
config ARCH_DISCONTIGMEM_ENABLE
Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/arm/common/gic.c
Original file line number Diff line number Diff line change
Expand Up @@ -253,9 +253,9 @@ void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
}

#ifdef CONFIG_SMP
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
void gic_raise_softirq(cpumask_t cpumask, unsigned int irq)
{
unsigned long map = *cpus_addr(*mask);
unsigned long map = *cpus_addr(cpumask);

/* this always happens on GIC0 */
writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/arm/include/asm/hardware/gic.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start);
void gic_cpu_init(unsigned int gic_nr, void __iomem *base);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
void gic_raise_softirq(cpumask_t cpumask, unsigned int irq);
#endif

#endif
12 changes: 8 additions & 4 deletions trunk/arch/arm/include/asm/smp.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,17 @@ extern void smp_store_cpu_info(unsigned int cpuid);
/*
* Raise an IPI cross call on CPUs in callmap.
*/
extern void smp_cross_call(const struct cpumask *mask);
extern void smp_cross_call(cpumask_t callmap);

/*
* Broadcast a timer interrupt to the other CPUs.
*/
extern void smp_send_timer(void);

/*
* Broadcast a clock event to other CPUs.
*/
extern void smp_timer_broadcast(const struct cpumask *mask);
extern void smp_timer_broadcast(cpumask_t mask);

/*
* Boot a secondary CPU, and assign it the specified idle task.
Expand Down Expand Up @@ -97,8 +102,7 @@ extern int platform_cpu_kill(unsigned int cpu);
extern void platform_cpu_enable(unsigned int cpu);

extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
extern void arch_send_call_function_ipi(cpumask_t mask);

/*
* Local timer interrupt handling function (can be IPI'ed).
Expand Down
46 changes: 30 additions & 16 deletions trunk/arch/arm/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -326,14 +326,14 @@ void __init smp_prepare_boot_cpu(void)
per_cpu(cpu_data, cpu).idle = current;
}

static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg)
{
unsigned long flags;
unsigned int cpu;

local_irq_save(flags);

for_each_cpu(cpu, mask) {
for_each_cpu_mask(cpu, callmap) {
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);

spin_lock(&ipi->lock);
Expand All @@ -344,19 +344,19 @@ static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
/*
* Call the platform specific cross-CPU call function.
*/
smp_cross_call(mask);
smp_cross_call(callmap);

local_irq_restore(flags);
}

void arch_send_call_function_ipi_mask(const struct cpumask *mask)
void arch_send_call_function_ipi(cpumask_t mask)
{
send_ipi_message(mask, IPI_CALL_FUNC);
}

void arch_send_call_function_single_ipi(int cpu)
{
send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
}

void show_ipi_list(struct seq_file *p)
Expand Down Expand Up @@ -498,10 +498,17 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs)

void smp_send_reschedule(int cpu)
{
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
}

void smp_timer_broadcast(const struct cpumask *mask)
void smp_send_timer(void)
{
cpumask_t mask = cpu_online_map;
cpu_clear(smp_processor_id(), mask);
send_ipi_message(mask, IPI_TIMER);
}

void smp_timer_broadcast(cpumask_t mask)
{
send_ipi_message(mask, IPI_TIMER);
}
Expand All @@ -510,7 +517,7 @@ void smp_send_stop(void)
{
cpumask_t mask = cpu_online_map;
cpu_clear(smp_processor_id(), mask);
send_ipi_message(&mask, IPI_CPU_STOP);
send_ipi_message(mask, IPI_CPU_STOP);
}

/*
Expand All @@ -521,17 +528,20 @@ int setup_profiling_timer(unsigned int multiplier)
return -EINVAL;
}

static void
on_each_cpu_mask(void (*func)(void *), void *info, int wait,
const struct cpumask *mask)
static int
on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask)
{
int ret = 0;

preempt_disable();

smp_call_function_many(mask, func, info, wait);
if (cpumask_test_cpu(smp_processor_id(), mask))
ret = smp_call_function_mask(mask, func, info, wait);
if (cpu_isset(smp_processor_id(), mask))
func(info);

preempt_enable();

return ret;
}

/**********************************************************************/
Expand Down Expand Up @@ -592,17 +602,20 @@ void flush_tlb_all(void)

void flush_tlb_mm(struct mm_struct *mm)
{
on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask);
cpumask_t mask = mm->cpu_vm_mask;

on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask);
}

void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
cpumask_t mask = vma->vm_mm->cpu_vm_mask;
struct tlb_args ta;

ta.ta_vma = vma;
ta.ta_start = uaddr;

on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask);
on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask);
}

void flush_tlb_kernel_page(unsigned long kaddr)
Expand All @@ -617,13 +630,14 @@ void flush_tlb_kernel_page(unsigned long kaddr)
void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
cpumask_t mask = vma->vm_mm->cpu_vm_mask;
struct tlb_args ta;

ta.ta_vma = vma;
ta.ta_start = start;
ta.ta_end = end;

on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask);
on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask);
}

void flush_tlb_kernel_range(unsigned long start, unsigned long end)
Expand Down
Loading

0 comments on commit 1df1948

Please sign in to comment.