Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 107628
b: refs/heads/master
c: cd5bc89
h: refs/heads/master
v: v3
  • Loading branch information
David S. Miller committed Aug 4, 2008
1 parent 737a9ec commit a5536b1
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 19 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 622824dbb536f7bdc241eefc3e1ae31c463b4eb8
refs/heads/master: cd5bc89debb4045d55eeffe325b97f2dfba4ddea
39 changes: 21 additions & 18 deletions trunk/arch/sparc64/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -459,28 +459,31 @@ static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, u
}
}

static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
{
u64 pstate;
int i;

__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
for_each_cpu_mask(i, mask)
for_each_cpu_mask_nr(i, *mask)
spitfire_xcall_helper(data0, data1, data2, pstate, i);
}

/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
* packet, but we have no use for that. However we do take advantage of
* the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
*/
static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask_p)
{
u64 pstate, ver, busy_mask;
int nack_busy_id, is_jbus, need_more;
cpumask_t mask;

if (cpus_empty(mask))
if (cpus_empty(*mask_p))
return;

mask = *mask_p;

/* Unfortunately, someone at Sun had the brilliant idea to make the
* busy/nack fields hard-coded by ITID number for this Ultra-III
* derivative processor.
Expand Down Expand Up @@ -511,7 +514,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
{
int i;

for_each_cpu_mask(i, mask) {
for_each_cpu_mask_nr(i, mask) {
u64 target = (i << 14) | 0x70;

if (is_jbus) {
Expand Down Expand Up @@ -550,7 +553,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
: : "r" (pstate));
if (unlikely(need_more)) {
int i, cnt = 0;
for_each_cpu_mask(i, mask) {
for_each_cpu_mask_nr(i, mask) {
cpu_clear(i, mask);
cnt++;
if (cnt == 32)
Expand Down Expand Up @@ -584,7 +587,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
/* Clear out the mask bits for cpus which did not
* NACK us.
*/
for_each_cpu_mask(i, mask) {
for_each_cpu_mask_nr(i, mask) {
u64 check_mask;

if (is_jbus)
Expand All @@ -605,16 +608,16 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
}

/* Multi-cpu list version. */
static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
{
int cnt, retries, this_cpu, prev_sent, i;
unsigned long flags, status;
cpumask_t error_mask;
struct trap_per_cpu *tb;
u16 *cpu_list;
u64 *mondo;
cpumask_t error_mask;
unsigned long flags, status;
int cnt, retries, this_cpu, prev_sent, i;

if (cpus_empty(mask))
if (cpus_empty(*mask))
return;

/* We have to do this whole thing with interrupts fully disabled.
Expand Down Expand Up @@ -642,7 +645,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t

/* Setup the initial cpu list. */
cnt = 0;
for_each_cpu_mask(i, mask)
for_each_cpu_mask_nr(i, *mask)
cpu_list[cnt++] = i;

cpus_clear(error_mask);
Expand Down Expand Up @@ -729,7 +732,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
"were in error state\n",
this_cpu);
printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
for_each_cpu_mask(i, error_mask)
for_each_cpu_mask_nr(i, error_mask)
printk("%d ", i);
printk("]\n");
return;
Expand All @@ -756,7 +759,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
printk("]\n");
}

static void (*xcall_deliver)(u64, u64, u64, cpumask_t);
static void (*xcall_deliver)(u64, u64, u64, const cpumask_t *);

/* Send cross call to all processors mentioned in MASK
* except self.
Expand All @@ -769,7 +772,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
cpus_and(mask, mask, cpu_online_map);
cpu_clear(this_cpu, mask);

xcall_deliver(data0, data1, data2, mask);
xcall_deliver(data0, data1, data2, &mask);
/* NOTE: Caller runs local copy on master. */

put_cpu();
Expand Down Expand Up @@ -903,7 +906,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
}
if (data0) {
xcall_deliver(data0, __pa(pg_addr),
(u64) pg_addr, mask);
(u64) pg_addr, &mask);
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes_xcall);
#endif
Expand Down Expand Up @@ -945,7 +948,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
}
if (data0) {
xcall_deliver(data0, __pa(pg_addr),
(u64) pg_addr, mask);
(u64) pg_addr, &mask);
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes_xcall);
#endif
Expand Down

0 comments on commit a5536b1

Please sign in to comment.