Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 63038
b: refs/heads/master
c: 46b2835
h: refs/heads/master
v: v3
  • Loading branch information
Paul Mackerras committed Jul 26, 2007
1 parent 6adb395 commit 184f416
Show file tree
Hide file tree
Showing 152 changed files with 1,288 additions and 756 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 141707892e92dca69b7b8af65b9367da2d1f8120
refs/heads/master: 46b2835771ad8ef19b8e081e8c90439408c7645f
2 changes: 1 addition & 1 deletion trunk/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,7 @@ CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ -Wbitwise $(C
MODFLAGS = -DMODULE
CFLAGS_MODULE = $(MODFLAGS)
AFLAGS_MODULE = $(MODFLAGS)
LDFLAGS_MODULE = -r
LDFLAGS_MODULE =
CFLAGS_KERNEL =
AFLAGS_KERNEL =

Expand Down
7 changes: 4 additions & 3 deletions trunk/arch/i386/boot/apm.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,14 +40,15 @@ int query_apm_bios(void)
if (bx != 0x504d) /* "PM" signature */
return -1;

if (cx & 0x02) /* 32 bits supported? */
if (!(cx & 0x02)) /* 32 bits supported? */
return -1;

/* Disconnect first, just in case */
ax = 0x5304;
bx = 0;
asm volatile("pushl %%ebp ; int $0x15 ; popl %%ebp"
: "+a" (ax)
: : "ebx", "ecx", "edx", "esi", "edi");
: "+a" (ax), "+b" (bx)
: : "ecx", "edx", "esi", "edi");

/* Paranoia */
ebx = esi = 0;
Expand Down
16 changes: 8 additions & 8 deletions trunk/arch/i386/boot/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,15 +73,15 @@ static void keyboard_set_repeat(void)
}

/*
* Get Intel SpeedStep IST information.
* Get Intel SpeedStep (IST) information.
*/
static void query_speedstep_ist(void)
static void query_ist(void)
{
asm("int $0x15"
: "=a" (boot_params.speedstep_info[0]),
"=b" (boot_params.speedstep_info[1]),
"=c" (boot_params.speedstep_info[2]),
"=d" (boot_params.speedstep_info[3])
: "=a" (boot_params.ist_info.signature),
"=b" (boot_params.ist_info.command),
"=c" (boot_params.ist_info.event),
"=d" (boot_params.ist_info.perf_level)
: "a" (0x0000e980), /* IST Support */
"d" (0x47534943)); /* Request value */
}
Expand Down Expand Up @@ -144,8 +144,8 @@ void main(void)
query_voyager();
#endif

/* Query SpeedStep IST information */
query_speedstep_ist();
/* Query Intel SpeedStep (IST) information */
query_ist();

/* Query APM information */
#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE)
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/i386/kernel/acpi/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ obj-$(CONFIG_ACPI) += boot.o
ifneq ($(CONFIG_PCI),)
obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o
endif
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
obj-$(CONFIG_ACPI) += sleep.o wakeup.o

ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += cstate.o processor.o
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/i386/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,7 @@ void __init setup_bootmem_allocator(void)
*/
reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
#endif
#ifdef CONFIG_ACPI_SLEEP
#ifdef CONFIG_ACPI
/*
* Reserve low memory region for sleep support.
*/
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/i386/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -432,7 +432,7 @@ static void __init pagetable_init (void)
paravirt_pagetable_setup_done(pgd_base);
}

#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI)
/*
* Swap suspend & friends need this for resume because things like the intel-agp
* driver might have split up a kernel 4MB mapping.
Expand Down
8 changes: 4 additions & 4 deletions trunk/arch/ia64/ia32/ia32_support.c
Original file line number Diff line number Diff line change
Expand Up @@ -249,11 +249,11 @@ ia32_init (void)

#if PAGE_SHIFT > IA32_PAGE_SHIFT
{
extern struct kmem_cache *partial_page_cachep;
extern struct kmem_cache *ia64_partial_page_cachep;

partial_page_cachep = kmem_cache_create("partial_page_cache",
sizeof(struct partial_page),
0, SLAB_PANIC, NULL);
ia64_partial_page_cachep = kmem_cache_create("ia64_partial_page_cache",
sizeof(struct ia64_partial_page),
0, SLAB_PANIC, NULL);
}
#endif
return 0;
Expand Down
12 changes: 6 additions & 6 deletions trunk/arch/ia64/ia32/ia32priv.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,26 +25,26 @@
* partially mapped pages provide precise accounting of which 4k sub pages
* are mapped and which ones are not, thereby improving IA-32 compatibility.
*/
struct partial_page {
struct partial_page *next; /* linked list, sorted by address */
struct ia64_partial_page {
struct ia64_partial_page *next; /* linked list, sorted by address */
struct rb_node pp_rb;
/* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64
* should suffice.*/
unsigned long bitmap;
unsigned int base;
};

struct partial_page_list {
struct partial_page *pp_head; /* list head, points to the lowest
struct ia64_partial_page_list {
struct ia64_partial_page *pp_head; /* list head, points to the lowest
* addressed partial page */
struct rb_root ppl_rb;
struct partial_page *pp_hint; /* pp_hint->next is the last
struct ia64_partial_page *pp_hint; /* pp_hint->next is the last
* accessed partial page */
atomic_t pp_count; /* reference count */
};

#if PAGE_SHIFT > IA32_PAGE_SHIFT
struct partial_page_list* ia32_init_pp_list (void);
struct ia64_partial_page_list* ia32_init_pp_list (void);
#else
# define ia32_init_pp_list() 0
#endif
Expand Down
81 changes: 41 additions & 40 deletions trunk/arch/ia64/ia32/sys_ia32.c
Original file line number Diff line number Diff line change
Expand Up @@ -253,17 +253,17 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
return ret;
}

/* SLAB cache for partial_page structures */
struct kmem_cache *partial_page_cachep;
/* SLAB cache for ia64_partial_page structures */
struct kmem_cache *ia64_partial_page_cachep;

/*
* init partial_page_list.
* init ia64_partial_page_list.
* return 0 means kmalloc fail.
*/
struct partial_page_list*
struct ia64_partial_page_list*
ia32_init_pp_list(void)
{
struct partial_page_list *p;
struct ia64_partial_page_list *p;

if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)
return p;
Expand All @@ -280,12 +280,12 @@ ia32_init_pp_list(void)
* Else, return 0 and provide @pprev, @rb_link, @rb_parent to
* be used by later __ia32_insert_pp().
*/
static struct partial_page *
__ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
struct partial_page **pprev, struct rb_node ***rb_link,
static struct ia64_partial_page *
__ia32_find_pp(struct ia64_partial_page_list *ppl, unsigned int start,
struct ia64_partial_page **pprev, struct rb_node ***rb_link,
struct rb_node **rb_parent)
{
struct partial_page *pp;
struct ia64_partial_page *pp;
struct rb_node **__rb_link, *__rb_parent, *rb_prev;

pp = ppl->pp_hint;
Expand All @@ -297,7 +297,7 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,

while (*__rb_link) {
__rb_parent = *__rb_link;
pp = rb_entry(__rb_parent, struct partial_page, pp_rb);
pp = rb_entry(__rb_parent, struct ia64_partial_page, pp_rb);

if (pp->base == start) {
ppl->pp_hint = pp;
Expand All @@ -314,17 +314,17 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
*rb_parent = __rb_parent;
*pprev = NULL;
if (rb_prev)
*pprev = rb_entry(rb_prev, struct partial_page, pp_rb);
*pprev = rb_entry(rb_prev, struct ia64_partial_page, pp_rb);
return NULL;
}

/*
* insert @pp into @ppl.
*/
static void
__ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
struct partial_page *prev, struct rb_node **rb_link,
struct rb_node *rb_parent)
__ia32_insert_pp(struct ia64_partial_page_list *ppl,
struct ia64_partial_page *pp, struct ia64_partial_page *prev,
struct rb_node **rb_link, struct rb_node *rb_parent)
{
/* link list */
if (prev) {
Expand All @@ -334,7 +334,7 @@ __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
ppl->pp_head = pp;
if (rb_parent)
pp->next = rb_entry(rb_parent,
struct partial_page, pp_rb);
struct ia64_partial_page, pp_rb);
else
pp->next = NULL;
}
Expand All @@ -350,8 +350,8 @@ __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
* delete @pp from partial page list @ppl.
*/
static void
__ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp,
struct partial_page *prev)
__ia32_delete_pp(struct ia64_partial_page_list *ppl,
struct ia64_partial_page *pp, struct ia64_partial_page *prev)
{
if (prev) {
prev->next = pp->next;
Expand All @@ -363,15 +363,15 @@ __ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp,
ppl->pp_hint = pp->next;
}
rb_erase(&pp->pp_rb, &ppl->ppl_rb);
kmem_cache_free(partial_page_cachep, pp);
kmem_cache_free(ia64_partial_page_cachep, pp);
}

static struct partial_page *
__pp_prev(struct partial_page *pp)
static struct ia64_partial_page *
__pp_prev(struct ia64_partial_page *pp)
{
struct rb_node *prev = rb_prev(&pp->pp_rb);
if (prev)
return rb_entry(prev, struct partial_page, pp_rb);
return rb_entry(prev, struct ia64_partial_page, pp_rb);
else
return NULL;
}
Expand All @@ -383,7 +383,7 @@ __pp_prev(struct partial_page *pp)
static void
__ia32_delete_pp_range(unsigned int start, unsigned int end)
{
struct partial_page *pp, *prev;
struct ia64_partial_page *pp, *prev;
struct rb_node **rb_link, *rb_parent;

if (start >= end)
Expand All @@ -401,7 +401,7 @@ __ia32_delete_pp_range(unsigned int start, unsigned int end)
}

while (pp && pp->base < end) {
struct partial_page *tmp = pp->next;
struct ia64_partial_page *tmp = pp->next;
__ia32_delete_pp(current->thread.ppl, pp, prev);
pp = tmp;
}
Expand All @@ -414,7 +414,7 @@ __ia32_delete_pp_range(unsigned int start, unsigned int end)
static int
__ia32_set_pp(unsigned int start, unsigned int end, int flags)
{
struct partial_page *pp, *prev;
struct ia64_partial_page *pp, *prev;
struct rb_node ** rb_link, *rb_parent;
unsigned int pstart, start_bit, end_bit, i;

Expand Down Expand Up @@ -450,8 +450,8 @@ __ia32_set_pp(unsigned int start, unsigned int end, int flags)
return 0;
}

/* new a partial_page */
pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
/* new a ia64_partial_page */
pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
if (!pp)
return -ENOMEM;
pp->base = pstart;
Expand Down Expand Up @@ -504,7 +504,7 @@ ia32_set_pp(unsigned int start, unsigned int end, int flags)
static int
__ia32_unset_pp(unsigned int start, unsigned int end)
{
struct partial_page *pp, *prev;
struct ia64_partial_page *pp, *prev;
struct rb_node ** rb_link, *rb_parent;
unsigned int pstart, start_bit, end_bit, i;
struct vm_area_struct *vma;
Expand Down Expand Up @@ -532,8 +532,8 @@ __ia32_unset_pp(unsigned int start, unsigned int end)
return -ENOMEM;
}

/* new a partial_page */
pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
/* new a ia64_partial_page */
pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
if (!pp)
return -ENOMEM;
pp->base = pstart;
Expand Down Expand Up @@ -605,7 +605,7 @@ ia32_unset_pp(unsigned int *startp, unsigned int *endp)
static int
__ia32_compare_pp(unsigned int start, unsigned int end)
{
struct partial_page *pp, *prev;
struct ia64_partial_page *pp, *prev;
struct rb_node ** rb_link, *rb_parent;
unsigned int pstart, start_bit, end_bit, size;
unsigned int first_bit, next_zero_bit; /* the first range in bitmap */
Expand Down Expand Up @@ -682,23 +682,23 @@ ia32_compare_pp(unsigned int *startp, unsigned int *endp)
}

static void
__ia32_drop_pp_list(struct partial_page_list *ppl)
__ia32_drop_pp_list(struct ia64_partial_page_list *ppl)
{
struct partial_page *pp = ppl->pp_head;
struct ia64_partial_page *pp = ppl->pp_head;

while (pp) {
struct partial_page *next = pp->next;
kmem_cache_free(partial_page_cachep, pp);
struct ia64_partial_page *next = pp->next;
kmem_cache_free(ia64_partial_page_cachep, pp);
pp = next;
}

kfree(ppl);
}

void
ia32_drop_partial_page_list(struct task_struct *task)
ia32_drop_ia64_partial_page_list(struct task_struct *task)
{
struct partial_page_list* ppl = task->thread.ppl;
struct ia64_partial_page_list* ppl = task->thread.ppl;

if (ppl && atomic_dec_and_test(&ppl->pp_count))
__ia32_drop_pp_list(ppl);
Expand All @@ -708,9 +708,9 @@ ia32_drop_partial_page_list(struct task_struct *task)
* Copy current->thread.ppl to ppl (already initialized).
*/
static int
__ia32_copy_pp_list(struct partial_page_list *ppl)
__ia32_copy_pp_list(struct ia64_partial_page_list *ppl)
{
struct partial_page *pp, *tmp, *prev;
struct ia64_partial_page *pp, *tmp, *prev;
struct rb_node **rb_link, *rb_parent;

ppl->pp_head = NULL;
Expand All @@ -721,7 +721,7 @@ __ia32_copy_pp_list(struct partial_page_list *ppl)
prev = NULL;

for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) {
tmp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
tmp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
*tmp = *pp;
Expand All @@ -734,7 +734,8 @@ __ia32_copy_pp_list(struct partial_page_list *ppl)
}

int
ia32_copy_partial_page_list(struct task_struct *p, unsigned long clone_flags)
ia32_copy_ia64_partial_page_list(struct task_struct *p,
unsigned long clone_flags)
{
int retval = 0;

Expand Down
Loading

0 comments on commit 184f416

Please sign in to comment.