Skip to content

Commit

Permalink
x86/apic/x2apic: Limit the vector reservation to the user specified mask
Browse files Browse the repository at this point in the history
For the x2apic cluster mode, vector for an interrupt is
currently reserved on all the cpu's that are part of the x2apic
cluster. But the interrupts will be routed only to the cluster
(derived from the first cpu in the mask) members specified in
the mask. So there is no need to reserve the vector in the
unused cluster members.

Modify __assign_irq_vector() to reserve the vectors based on the
user specified irq destination mask. If the new mask is a proper
subset of the currently used mask, cleanup the vector allocation
on the unused cpu members.

Also, allow the apic driver to tune the vector domain based on
the affinity mask (which in most cases is the user-specified
mask).

Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Acked-by: Yinghai Lu <yinghai@kernel.org>
Acked-by: Alexander Gordeev <agordeev@redhat.com>
Acked-by: Cyrill Gorcunov <gorcunov@openvz.org>
Link: http://lkml.kernel.org/r/1340656709-11423-3-git-send-email-suresh.b.siddha@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Suresh Siddha authored and Ingo Molnar committed Jul 6, 2012
1 parent b39f25a commit 1ac322d
Show file tree
Hide file tree
Showing 5 changed files with 28 additions and 24 deletions.
9 changes: 6 additions & 3 deletions arch/x86/include/asm/apic.h
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,8 @@ struct apic {
unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
unsigned long (*check_apicid_present)(int apicid);

void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask,
const struct cpumask *mask);
void (*init_apic_ldr)(void);

void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
Expand Down Expand Up @@ -615,7 +616,8 @@ default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
unsigned int *apicid);

static inline void
flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
const struct cpumask *mask)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
Expand All @@ -630,7 +632,8 @@ flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
}

static inline void
default_vector_allocation_domain(int cpu, struct cpumask *retmask)
default_vector_allocation_domain(int cpu, struct cpumask *retmask,
const struct cpumask *mask)
{
cpumask_copy(retmask, cpumask_of(cpu));
}
Expand Down
3 changes: 2 additions & 1 deletion arch/x86/kernel/apic/apic_noop.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,8 @@ static unsigned long noop_check_apicid_present(int bit)
return physid_isset(bit, phys_cpu_present_map);
}

static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask)
static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask,
const struct cpumask *mask)
{
if (cpu != 0)
pr_warning("APIC: Vector allocated for non-BSP cpu\n");
Expand Down
31 changes: 15 additions & 16 deletions arch/x86/kernel/apic/io_apic.c
Original file line number Diff line number Diff line change
Expand Up @@ -1113,7 +1113,6 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
*/
static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
static int current_offset = VECTOR_OFFSET_START % 16;
unsigned int old_vector;
int cpu, err;
cpumask_var_t tmp_mask;

Expand All @@ -1123,28 +1122,28 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
return -ENOMEM;

old_vector = cfg->vector;
if (old_vector) {
cpumask_and(tmp_mask, mask, cpu_online_mask);
if (cpumask_subset(tmp_mask, cfg->domain)) {
free_cpumask_var(tmp_mask);
return 0;
}
}

/* Only try and allocate irqs on cpus that are present */
err = -ENOSPC;
cpumask_clear(cfg->old_domain);
cpu = cpumask_first_and(mask, cpu_online_mask);
while (cpu < nr_cpu_ids) {
int new_cpu;
int vector, offset;
int new_cpu, vector, offset;

apic->vector_allocation_domain(cpu, tmp_mask);
apic->vector_allocation_domain(cpu, tmp_mask, mask);

if (cpumask_subset(tmp_mask, cfg->domain)) {
free_cpumask_var(tmp_mask);
return 0;
err = 0;
if (cpumask_equal(tmp_mask, cfg->domain))
break;
/*
* New cpumask using the vector is a proper subset of
* the current in use mask. So cleanup the vector
* allocation for the members that are not used anymore.
*/
cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
cfg->move_in_progress = 1;
cpumask_and(cfg->domain, cfg->domain, tmp_mask);
break;
}

vector = current_vector;
Expand Down Expand Up @@ -1172,7 +1171,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
/* Found one! */
current_vector = vector;
current_offset = offset;
if (old_vector) {
if (cfg->vector) {
cfg->move_in_progress = 1;
cpumask_copy(cfg->old_domain, cfg->domain);
}
Expand Down
6 changes: 3 additions & 3 deletions arch/x86/kernel/apic/x2apic_cluster.c
Original file line number Diff line number Diff line change
Expand Up @@ -212,10 +212,10 @@ static int x2apic_cluster_probe(void)
/*
* Each x2apic cluster is an allocation domain.
*/
static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask)
static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
const struct cpumask *mask)
{
cpumask_clear(retmask);
cpumask_copy(retmask, per_cpu(cpus_in_cluster, cpu));
cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
}

static struct apic apic_x2apic_cluster = {
Expand Down
3 changes: 2 additions & 1 deletion arch/x86/kernel/vsmp_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,8 @@ static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
* In vSMP, all cpus should be capable of handling interrupts, regardless of
* the APIC used.
*/
static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask)
static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask,
const struct cpumask *mask)
{
cpumask_setall(retmask);
}
Expand Down

0 comments on commit 1ac322d

Please sign in to comment.