Skip to content

Commit

Permalink
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/g…
Browse files Browse the repository at this point in the history
…it/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] Fix large MCA bootmem allocation
  [IA64] Simplify cpu_idle_wait
  [IA64] Synchronize RBS on PTRACE_ATTACH
  [IA64] Synchronize kernel RSE to user-space and back
  [IA64] Rename TIF_PERFMON_WORK back to TIF_NOTIFY_RESUME
  [IA64] Wire up timerfd_{create,settime,gettime} syscalls
  • Loading branch information
Linus Torvalds committed Feb 8, 2008
2 parents 3bf8f5a + 785285f commit 6a306e8
Show file tree
Hide file tree
Showing 8 changed files with 225 additions and 81 deletions.
5 changes: 4 additions & 1 deletion arch/ia64/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -1573,7 +1573,7 @@ sys_call_table:
data8 sys_fchmodat
data8 sys_faccessat
data8 sys_pselect6
data8 sys_ppoll
data8 sys_ppoll // 1295
data8 sys_unshare
data8 sys_splice
data8 sys_set_robust_list
Expand All @@ -1588,5 +1588,8 @@ sys_call_table:
data8 sys_signalfd
data8 sys_ni_syscall
data8 sys_eventfd
data8 sys_timerfd_create // 1310
data8 sys_timerfd_settime
data8 sys_timerfd_gettime

.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
55 changes: 26 additions & 29 deletions arch/ia64/kernel/mca.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
* Copyright (C) 2000 Intel
* Copyright (C) Chuck Fleckenstein <cfleck@co.intel.com>
*
* Copyright (C) 1999, 2004 Silicon Graphics, Inc.
* Copyright (C) 1999, 2004-2008 Silicon Graphics, Inc.
* Copyright (C) Vijay Chander <vijay@engr.sgi.com>
*
* Copyright (C) 2006 FUJITSU LIMITED
Expand Down Expand Up @@ -1762,45 +1762,42 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
/* Caller prevents this from being called after init */
static void * __init_refok mca_bootmem(void)
{
void *p;

p = alloc_bootmem(sizeof(struct ia64_mca_cpu) * NR_CPUS +
KERNEL_STACK_SIZE);
return (void *)ALIGN((unsigned long)p, KERNEL_STACK_SIZE);
return __alloc_bootmem(sizeof(struct ia64_mca_cpu),
KERNEL_STACK_SIZE, 0);
}

/* Do per-CPU MCA-related initialization. */
void __cpuinit
ia64_mca_cpu_init(void *cpu_data)
{
void *pal_vaddr;
void *data;
long sz = sizeof(struct ia64_mca_cpu);
int cpu = smp_processor_id();
static int first_time = 1;

if (first_time) {
void *mca_data;
int cpu;

first_time = 0;
mca_data = mca_bootmem();
for (cpu = 0; cpu < NR_CPUS; cpu++) {
format_mca_init_stack(mca_data,
offsetof(struct ia64_mca_cpu, mca_stack),
"MCA", cpu);
format_mca_init_stack(mca_data,
offsetof(struct ia64_mca_cpu, init_stack),
"INIT", cpu);
__per_cpu_mca[cpu] = __pa(mca_data);
mca_data += sizeof(struct ia64_mca_cpu);
}
}

/*
* The MCA info structure was allocated earlier and its
* physical address saved in __per_cpu_mca[cpu]. Copy that
* address * to ia64_mca_data so we can access it as a per-CPU
* variable.
* Structure will already be allocated if cpu has been online,
* then offlined.
*/
__get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()];
if (__per_cpu_mca[cpu]) {
data = __va(__per_cpu_mca[cpu]);
} else {
if (first_time) {
data = mca_bootmem();
first_time = 0;
} else
data = page_address(alloc_pages_node(numa_node_id(),
GFP_KERNEL, get_order(sz)));
if (!data)
panic("Could not allocate MCA memory for cpu %d\n",
cpu);
}
format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, mca_stack),
"MCA", cpu);
format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack),
"INIT", cpu);
__get_cpu_var(ia64_mca_data) = __per_cpu_mca[cpu] = __pa(data);

/*
* Stash away a copy of the PTE needed to map the per-CPU page.
Expand Down
21 changes: 3 additions & 18 deletions arch/ia64/kernel/perfmon.c
Original file line number Diff line number Diff line change
Expand Up @@ -585,21 +585,6 @@ pfm_put_task(struct task_struct *task)
if (task != current) put_task_struct(task);
}

static inline void
pfm_set_task_notify(struct task_struct *task)
{
struct thread_info *info;

info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE);
set_bit(TIF_PERFMON_WORK, &info->flags);
}

static inline void
pfm_clear_task_notify(void)
{
clear_thread_flag(TIF_PERFMON_WORK);
}

static inline void
pfm_reserve_page(unsigned long a)
{
Expand Down Expand Up @@ -3724,7 +3709,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)

PFM_SET_WORK_PENDING(task, 1);

pfm_set_task_notify(task);
tsk_set_notify_resume(task);

/*
* XXX: send reschedule if task runs on another CPU
Expand Down Expand Up @@ -5082,7 +5067,7 @@ pfm_handle_work(void)

PFM_SET_WORK_PENDING(current, 0);

pfm_clear_task_notify();
tsk_clear_notify_resume(current);

regs = task_pt_regs(current);

Expand Down Expand Up @@ -5450,7 +5435,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
* when coming from ctxsw, current still points to the
* previous task, therefore we must work with task and not current.
*/
pfm_set_task_notify(task);
tsk_set_notify_resume(task);
}
/*
* defer until state is changed (shorten spin window). the context is locked
Expand Down
59 changes: 30 additions & 29 deletions arch/ia64/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@
#include "sigframe.h"

void (*ia64_mark_idle)(int);
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);

unsigned long boot_option_idle_override = 0;
EXPORT_SYMBOL(boot_option_idle_override);
Expand Down Expand Up @@ -157,6 +156,17 @@ show_regs (struct pt_regs *regs)
show_stack(NULL, NULL);
}

void tsk_clear_notify_resume(struct task_struct *tsk)
{
#ifdef CONFIG_PERFMON
if (tsk->thread.pfm_needs_checking)
return;
#endif
if (test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_RSE))
return;
clear_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME);
}

void
do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall)
{
Expand All @@ -175,6 +185,10 @@ do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall
/* deal with pending signal delivery */
if (test_thread_flag(TIF_SIGPENDING)||test_thread_flag(TIF_RESTORE_SIGMASK))
ia64_do_signal(scr, in_syscall);

/* copy user rbs to kernel rbs */
if (unlikely(test_thread_flag(TIF_RESTORE_RSE)))
ia64_sync_krbs();
}

static int pal_halt = 1;
Expand Down Expand Up @@ -239,33 +253,23 @@ static inline void play_dead(void)
}
#endif /* CONFIG_HOTPLUG_CPU */

void cpu_idle_wait(void)
static void do_nothing(void *unused)
{
unsigned int cpu, this_cpu = get_cpu();
cpumask_t map;
cpumask_t tmp = current->cpus_allowed;

set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
put_cpu();

cpus_clear(map);
for_each_online_cpu(cpu) {
per_cpu(cpu_idle_state, cpu) = 1;
cpu_set(cpu, map);
}

__get_cpu_var(cpu_idle_state) = 0;
}

wmb();
do {
ssleep(1);
for_each_online_cpu(cpu) {
if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
cpu_clear(cpu, map);
}
cpus_and(map, map, cpu_online_map);
} while (!cpus_empty(map));
set_cpus_allowed(current, tmp);
/*
* cpu_idle_wait - Used to ensure that all the CPUs discard old value of
* pm_idle and update to new pm_idle value. Required while changing pm_idle
* handler on SMP systems.
*
* Caller must have changed pm_idle to the new value before the call. Old
* pm_idle value will not be used by any CPU after the return of this function.
*/
void cpu_idle_wait(void)
{
smp_mb();
/* kick all the CPUs so that they exit out of pm_idle */
smp_call_function(do_nothing, NULL, 0, 1);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);

Expand Down Expand Up @@ -293,9 +297,6 @@ cpu_idle (void)
#ifdef CONFIG_SMP
min_xtp();
#endif
if (__get_cpu_var(cpu_idle_state))
__get_cpu_var(cpu_idle_state) = 0;

rmb();
if (mark_idle)
(*mark_idle)(1);
Expand Down
Loading

0 comments on commit 6a306e8

Please sign in to comment.