Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 99496
b: refs/heads/master
c: 6cf514f
h: refs/heads/master
v: v3
  • Loading branch information
Hugh Dickins authored and Ingo Molnar committed Jun 18, 2008
1 parent e1b23c9 commit 5565222
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 36 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: faeca31d068090285b77c39574d2bda14b079c50
refs/heads/master: 6cf514fce18589ea1e0521c5f2d7c2bb280fefc7
47 changes: 12 additions & 35 deletions trunk/arch/x86/mm/pat.c
Original file line number Diff line number Diff line change
Expand Up @@ -159,47 +159,31 @@ static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
* The intersection is based on "Effective Memory Type" tables in IA-32
* SDM vol 3a
*/
static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot,
unsigned long *ret_prot)
static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
{
unsigned long pat_type;
u8 mtrr_type;

pat_type = prot & _PAGE_CACHE_MASK;
prot &= (~_PAGE_CACHE_MASK);

/*
* We return the PAT request directly for types where PAT takes
* precedence with respect to MTRR and for UC_MINUS.
* Consistency checks with other PAT requests is done later
* while going through memtype list.
*/
if (pat_type == _PAGE_CACHE_WC) {
*ret_prot = prot | _PAGE_CACHE_WC;
return 0;
} else if (pat_type == _PAGE_CACHE_UC_MINUS) {
*ret_prot = prot | _PAGE_CACHE_UC_MINUS;
return 0;
} else if (pat_type == _PAGE_CACHE_UC) {
*ret_prot = prot | _PAGE_CACHE_UC;
return 0;
}
if (req_type == _PAGE_CACHE_WC ||
req_type == _PAGE_CACHE_UC_MINUS ||
req_type == _PAGE_CACHE_UC)
return req_type;

/*
* Look for MTRR hint to get the effective type in case where PAT
* request is for WB.
*/
mtrr_type = mtrr_type_lookup(start, end);

if (mtrr_type == MTRR_TYPE_UNCACHABLE) {
*ret_prot = prot | _PAGE_CACHE_UC;
} else if (mtrr_type == MTRR_TYPE_WRCOMB) {
*ret_prot = prot | _PAGE_CACHE_WC;
} else {
*ret_prot = prot | _PAGE_CACHE_WB;
}

return 0;
if (mtrr_type == MTRR_TYPE_UNCACHABLE)
return _PAGE_CACHE_UC;
if (mtrr_type == MTRR_TYPE_WRCOMB)
return _PAGE_CACHE_WC;
return _PAGE_CACHE_WB;
}

/*
Expand Down Expand Up @@ -232,7 +216,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
if (req_type == -1) {
*ret_type = _PAGE_CACHE_WB;
} else {
*ret_type = req_type;
*ret_type = req_type & _PAGE_CACHE_MASK;
}
}
return 0;
Expand Down Expand Up @@ -264,14 +248,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
}
} else {
req_type &= _PAGE_CACHE_MASK;
err = pat_x_mtrr_type(start, end, req_type, &actual_type);
}

if (err) {
if (ret_type)
*ret_type = actual_type;

return -EINVAL;
actual_type = pat_x_mtrr_type(start, end, req_type);
}

new_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
Expand Down

0 comments on commit 5565222

Please sign in to comment.