Skip to content

Commit

Permalink
x86, PAT: Add support for struct page pointer array in cpa set_clr
Browse files Browse the repository at this point in the history
Add struct page array pointer to cpa struct and CPA_PAGES_ARRAY.

With that we can add change_page_attr_set_clr() a parameter to pass
struct page array pointer and that can be handled by the underlying
cpa code.

cpa_flush_array() is also changed to support both addr array or
struct page pointer array, depending on the flag.

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: arjan@infradead.org
Cc: eric@anholt.net
Cc: airlied@redhat.com
LKML-Reference: <20090319215358.758513000@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
venkatesh.pallipadi@intel.com authored and Ingo Molnar committed Mar 20, 2009
1 parent 728c951 commit 9ae2847
Showing 1 changed file with 50 additions and 29 deletions.
79 changes: 50 additions & 29 deletions arch/x86/mm/pageattr.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ struct cpa_data {
unsigned long pfn;
unsigned force_split : 1;
int curpage;
struct page **pages;
};

/*
Expand All @@ -46,6 +47,7 @@ static DEFINE_SPINLOCK(cpa_lock);

#define CPA_FLUSHTLB 1
#define CPA_ARRAY 2
#define CPA_PAGES_ARRAY 4

#ifdef CONFIG_PROC_FS
static unsigned long direct_pages_count[PG_LEVEL_NUM];
Expand Down Expand Up @@ -202,10 +204,10 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
}
}

static void cpa_flush_array(unsigned long *start, int numpages, int cache)
static void cpa_flush_array(unsigned long *start, int numpages, int cache,
int in_flags, struct page **pages)
{
unsigned int i, level;
unsigned long *addr;

BUG_ON(irqs_disabled());

Expand All @@ -226,14 +228,22 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache)
* will cause all other CPUs to flush the same
* cachelines:
*/
for (i = 0, addr = start; i < numpages; i++, addr++) {
pte_t *pte = lookup_address(*addr, &level);
for (i = 0; i < numpages; i++) {
unsigned long addr;
pte_t *pte;

if (in_flags & CPA_PAGES_ARRAY)
addr = (unsigned long)page_address(pages[i]);
else
addr = start[i];

pte = lookup_address(addr, &level);

/*
* Only flush present addresses:
*/
if (pte && (pte_val(*pte) & _PAGE_PRESENT))
clflush_cache_range((void *) *addr, PAGE_SIZE);
clflush_cache_range((void *)addr, PAGE_SIZE);
}
}

Expand Down Expand Up @@ -585,7 +595,9 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
unsigned int level;
pte_t *kpte, old_pte;

if (cpa->flags & CPA_ARRAY)
if (cpa->flags & CPA_PAGES_ARRAY)
address = (unsigned long)page_address(cpa->pages[cpa->curpage]);
else if (cpa->flags & CPA_ARRAY)
address = cpa->vaddr[cpa->curpage];
else
address = *cpa->vaddr;
Expand Down Expand Up @@ -688,7 +700,9 @@ static int cpa_process_alias(struct cpa_data *cpa)
* No need to redo, when the primary call touched the direct
* mapping already:
*/
if (cpa->flags & CPA_ARRAY)
if (cpa->flags & CPA_PAGES_ARRAY)
vaddr = (unsigned long)page_address(cpa->pages[cpa->curpage]);
else if (cpa->flags & CPA_ARRAY)
vaddr = cpa->vaddr[cpa->curpage];
else
vaddr = *cpa->vaddr;
Expand All @@ -699,7 +713,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
alias_cpa = *cpa;
temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
alias_cpa.vaddr = &temp_cpa_vaddr;
alias_cpa.flags &= ~CPA_ARRAY;
alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);


ret = __change_page_attr_set_clr(&alias_cpa, 0);
Expand All @@ -725,7 +739,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
alias_cpa = *cpa;
temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base;
alias_cpa.vaddr = &temp_cpa_vaddr;
alias_cpa.flags &= ~CPA_ARRAY;
alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);

/*
* The high mapping range is imprecise, so ignore the return value.
Expand All @@ -746,7 +760,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
*/
cpa->numpages = numpages;
/* for array changes, we can't use large page */
if (cpa->flags & CPA_ARRAY)
if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
cpa->numpages = 1;

if (!debug_pagealloc)
Expand All @@ -770,7 +784,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
*/
BUG_ON(cpa->numpages > numpages);
numpages -= cpa->numpages;
if (cpa->flags & CPA_ARRAY)
if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
cpa->curpage++;
else
*cpa->vaddr += cpa->numpages * PAGE_SIZE;
Expand All @@ -787,7 +801,8 @@ static inline int cache_attr(pgprot_t attr)

static int change_page_attr_set_clr(unsigned long *addr, int numpages,
pgprot_t mask_set, pgprot_t mask_clr,
int force_split, int in_flag)
int force_split, int in_flag,
struct page **pages)
{
struct cpa_data cpa;
int ret, cache, checkalias;
Expand All @@ -802,22 +817,26 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
return 0;

/* Ensure we are PAGE_SIZE aligned */
if (!(in_flag & CPA_ARRAY)) {
if (*addr & ~PAGE_MASK) {
*addr &= PAGE_MASK;
/*
* People should not be passing in unaligned addresses:
*/
WARN_ON_ONCE(1);
}
} else {
if (in_flag & CPA_ARRAY) {
int i;
for (i = 0; i < numpages; i++) {
if (addr[i] & ~PAGE_MASK) {
addr[i] &= PAGE_MASK;
WARN_ON_ONCE(1);
}
}
} else if (!(in_flag & CPA_PAGES_ARRAY)) {
/*
* in_flag of CPA_PAGES_ARRAY implies it is aligned.
* No need to cehck in that case
*/
if (*addr & ~PAGE_MASK) {
*addr &= PAGE_MASK;
/*
* People should not be passing in unaligned addresses:
*/
WARN_ON_ONCE(1);
}
}

/* Must avoid aliasing mappings in the highmem code */
Expand All @@ -833,15 +852,16 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
arch_flush_lazy_mmu_mode();

cpa.vaddr = addr;
cpa.pages = pages;
cpa.numpages = numpages;
cpa.mask_set = mask_set;
cpa.mask_clr = mask_clr;
cpa.flags = 0;
cpa.curpage = 0;
cpa.force_split = force_split;

if (in_flag & CPA_ARRAY)
cpa.flags |= CPA_ARRAY;
if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
cpa.flags |= in_flag;

/* No alias checking for _NX bit modifications */
checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
Expand All @@ -867,9 +887,10 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
* wbindv):
*/
if (!ret && cpu_has_clflush) {
if (cpa.flags & CPA_ARRAY)
cpa_flush_array(addr, numpages, cache);
else
if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
cpa_flush_array(addr, numpages, cache,
cpa.flags, pages);
} else
cpa_flush_range(*addr, numpages, cache);
} else
cpa_flush_all(cache);
Expand All @@ -889,14 +910,14 @@ static inline int change_page_attr_set(unsigned long *addr, int numpages,
pgprot_t mask, int array)
{
return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
(array ? CPA_ARRAY : 0));
(array ? CPA_ARRAY : 0), NULL);
}

static inline int change_page_attr_clear(unsigned long *addr, int numpages,
pgprot_t mask, int array)
{
return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
(array ? CPA_ARRAY : 0));
(array ? CPA_ARRAY : 0), NULL);
}

int _set_memory_uc(unsigned long addr, int numpages)
Expand Down Expand Up @@ -1044,7 +1065,7 @@ int set_memory_np(unsigned long addr, int numpages)
int set_memory_4k(unsigned long addr, int numpages)
{
return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
__pgprot(0), 1, 0);
__pgprot(0), 1, 0, NULL);
}

int set_pages_uc(struct page *page, int numpages)
Expand Down

0 comments on commit 9ae2847

Please sign in to comment.