Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
1
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
a21b069
Documentation
arch
alpha
arm
arm26
avr32
cris
frv
h8300
i386
ia64
m32r
m68k
m68knommu
mips
arc
au1000
basler
boot
cobalt
configs
ddb5xxx
dec
emma2rh
gt64120
jazz
jmr3927
kernel
lasat
lib-32
lib-64
lib
math-emu
mips-boards
mm
Makefile
c-r3k.c
c-r4k.c
c-sb1.c
c-tx39.c
cache.c
cerr-sb1.c
cex-gen.S
cex-sb1.S
dma-coherent.c
dma-ip27.c
dma-ip32.c
dma-noncoherent.c
extable.c
fault.c
highmem.c
init.c
ioremap.c
pg-r4k.c
pg-sb1.c
pgtable-32.c
pgtable-64.c
pgtable.c
sc-ip22.c
sc-mips.c
sc-r5k.c
sc-rm7k.c
tlb-r3k.c
tlb-r4k.c
tlb-r8k.c
tlbex-fault.S
tlbex.c
momentum
oprofile
pci
philips
pmc-sierra
qemu
sgi-ip22
sgi-ip27
sgi-ip32
sibyte
sni
tx4927
tx4938
vr41xx
Kconfig
Kconfig.debug
Makefile
defconfig
parisc
powerpc
ppc
s390
sh
sh64
sparc
sparc64
um
v850
x86_64
xtensa
block
crypto
drivers
fs
include
init
ipc
kernel
lib
mm
net
scripts
security
sound
usr
.gitignore
COPYING
CREDITS
Kbuild
MAINTAINERS
Makefile
README
REPORTING-BUGS
Breadcrumbs
linux
/
arch
/
mips
/
mm
/
init.c
Copy path
Blame
Blame
Latest commit
History
History
510 lines (437 loc) · 12.4 KB
Breadcrumbs
linux
/
arch
/
mips
/
mm
/
init.c
Top
File metadata and controls
Code
Blame
510 lines (437 loc) · 12.4 KB
Raw
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994 - 2000 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. */ #include <linux/init.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/pagemap.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/swap.h> #include <linux/proc_fs.h> #include <linux/pfn.h> #include <asm/bootinfo.h> #include <asm/cachectl.h> #include <asm/cpu.h> #include <asm/dma.h> #include <asm/kmap_types.h> #include <asm/mmu_context.h> #include <asm/sections.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/fixmap.h> /* Atomicity and interruptability */ #ifdef CONFIG_MIPS_MT_SMTC #include <asm/mipsmtregs.h> #define ENTER_CRITICAL(flags) \ { \ unsigned int mvpflags; \ local_irq_save(flags);\ mvpflags = dvpe() #define EXIT_CRITICAL(flags) \ evpe(mvpflags); \ local_irq_restore(flags); \ } #else #define ENTER_CRITICAL(flags) local_irq_save(flags) #define EXIT_CRITICAL(flags) local_irq_restore(flags) #endif /* CONFIG_MIPS_MT_SMTC */ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); unsigned long highstart_pfn, highend_pfn; /* * We have up to 8 empty zeroed pages so we can map one of the right colour * when needed. This is necessary only on R4000 / R4400 SC and MC versions * where we have to avoid VCED / VECI exceptions for good performance at * any price. Since page is never written to after the initialization we * don't have to care about aliases on other CPUs. */ unsigned long empty_zero_page, zero_page_mask; /* * Not static inline because used by IP27 special magic initialization code */ unsigned long setup_zero_pages(void) { unsigned int order; unsigned long size; struct page *page; if (cpu_has_vce) order = 3; else order = 0; empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!empty_zero_page) panic("Oh boy, that early out of memory?"); page = virt_to_page((void *)empty_zero_page); split_page(page, order); while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) { SetPageReserved(page); page++; } size = PAGE_SIZE << order; zero_page_mask = (size - 1) & PAGE_MASK; return 1UL << order; } /* * These are almost like kmap_atomic / kunmap_atmic except they take an * additional address argument as the hint. */ #define kmap_get_fixmap_pte(vaddr) \ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) #ifdef CONFIG_MIPS_MT_SMTC static pte_t *kmap_coherent_pte; static void __init kmap_coherent_init(void) { unsigned long vaddr; /* cache the first coherent kmap pte */ vaddr = __fix_to_virt(FIX_CMAP_BEGIN); kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); } #else static inline void kmap_coherent_init(void) {} #endif static inline void *kmap_coherent(struct page *page, unsigned long addr) { enum fixed_addresses idx; unsigned long vaddr, flags, entrylo; unsigned long old_ctx; pte_t pte; int tlbidx; inc_preempt_count(); idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); #ifdef CONFIG_MIPS_MT_SMTC idx += FIX_N_COLOURS * smp_processor_id(); #endif vaddr = __fix_to_virt(FIX_CMAP_END - idx); pte = mk_pte(page, PAGE_KERNEL); #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) entrylo = pte.pte_high; #else entrylo = pte_val(pte) >> 6; #endif ENTER_CRITICAL(flags); old_ctx = read_c0_entryhi(); write_c0_entryhi(vaddr & (PAGE_MASK << 1)); write_c0_entrylo0(entrylo); write_c0_entrylo1(entrylo); #ifdef CONFIG_MIPS_MT_SMTC set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); /* preload TLB instead of local_flush_tlb_one() */ mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); tlbidx = read_c0_index(); mtc0_tlbw_hazard(); if (tlbidx < 0) tlb_write_random(); else tlb_write_indexed(); #else tlbidx = read_c0_wired(); write_c0_wired(tlbidx + 1); write_c0_index(tlbidx); mtc0_tlbw_hazard(); tlb_write_indexed(); #endif tlbw_use_hazard(); write_c0_entryhi(old_ctx); EXIT_CRITICAL(flags); return (void*) vaddr; } #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) static inline void kunmap_coherent(struct page *page) { #ifndef CONFIG_MIPS_MT_SMTC unsigned int wired; unsigned long flags, old_ctx; ENTER_CRITICAL(flags); old_ctx = read_c0_entryhi(); wired = read_c0_wired() - 1; write_c0_wired(wired); write_c0_index(wired); write_c0_entryhi(UNIQUE_ENTRYHI(wired)); write_c0_entrylo0(0); write_c0_entrylo1(0); mtc0_tlbw_hazard(); tlb_write_indexed(); tlbw_use_hazard(); write_c0_entryhi(old_ctx); EXIT_CRITICAL(flags); #endif dec_preempt_count(); preempt_check_resched(); } void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { void *vfrom, *vto; vto = kmap_atomic(to, KM_USER1); if (cpu_has_dc_aliases) { vfrom = kmap_coherent(from, vaddr); copy_page(vto, vfrom); kunmap_coherent(from); } else { vfrom = kmap_atomic(from, KM_USER0); copy_page(vto, vfrom); kunmap_atomic(vfrom, KM_USER0); } if (((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) || pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) flush_data_cache_page((unsigned long)vto); kunmap_atomic(vto, KM_USER1); /* Make sure this page is cleared on other CPU's too before using it */ smp_wmb(); } EXPORT_SYMBOL(copy_user_highpage); void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { if (cpu_has_dc_aliases) { void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(vto, src, len); kunmap_coherent(page); } else memcpy(dst, src, len); if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) flush_cache_page(vma, vaddr, page_to_pfn(page)); } EXPORT_SYMBOL(copy_to_user_page); void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { if (cpu_has_dc_aliases) { void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(dst, vfrom, len); kunmap_coherent(page); } else memcpy(dst, src, len); } EXPORT_SYMBOL(copy_from_user_page); #ifdef CONFIG_HIGHMEM pte_t *kmap_pte; pgprot_t kmap_prot; static void __init kmap_init(void) { unsigned long kmap_vstart; /* cache the first kmap pte */ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); kmap_pte = kmap_get_fixmap_pte(kmap_vstart); kmap_prot = PAGE_KERNEL; } #endif /* CONFIG_HIGHMEM */ void __init fixrange_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC) pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; int i, j, k; unsigned long vaddr; vaddr = start; i = __pgd_offset(vaddr); j = __pud_offset(vaddr); k = __pmd_offset(vaddr); pgd = pgd_base + i; for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { pud = (pud_t *)pgd; for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { pmd = (pmd_t *)pud; for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { if (pmd_none(*pmd)) { pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); set_pmd(pmd, __pmd((unsigned long)pte)); if (pte != pte_offset_kernel(pmd, 0)) BUG(); } vaddr += PMD_SIZE; } k = 0; } j = 0; } #endif } #ifndef CONFIG_NEED_MULTIPLE_NODES extern void pagetable_init(void); static int __init page_is_ram(unsigned long pagenr) { int i; for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long addr, end; if (boot_mem_map.map[i].type != BOOT_MEM_RAM) /* not usable memory */ continue; addr = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); if (pagenr >= addr && pagenr < end) return 1; } return 0; } void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES] = { 0, }; unsigned long max_dma, low; #ifndef CONFIG_FLATMEM unsigned long zholes_size[MAX_NR_ZONES] = { 0, }; unsigned long i, j, pfn; #endif pagetable_init(); #ifdef CONFIG_HIGHMEM kmap_init(); #endif kmap_coherent_init(); max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; low = max_low_pfn; #ifdef CONFIG_ISA if (low < max_dma) zones_size[ZONE_DMA] = low; else { zones_size[ZONE_DMA] = max_dma; zones_size[ZONE_NORMAL] = low - max_dma; } #else zones_size[ZONE_DMA] = low; #endif #ifdef CONFIG_HIGHMEM zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn; if (cpu_has_dc_aliases && zones_size[ZONE_HIGHMEM]) { printk(KERN_WARNING "This processor doesn't support highmem." " %ldk highmem ignored\n", zones_size[ZONE_HIGHMEM]); zones_size[ZONE_HIGHMEM] = 0; } #endif #ifdef CONFIG_FLATMEM free_area_init(zones_size); #else pfn = 0; for (i = 0; i < MAX_NR_ZONES; i++) for (j = 0; j < zones_size[i]; j++, pfn++) if (!page_is_ram(pfn)) zholes_size[i]++; free_area_init_node(0, NODE_DATA(0), zones_size, 0, zholes_size); #endif } static struct kcore_list kcore_mem, kcore_vmalloc; #ifdef CONFIG_64BIT static struct kcore_list kcore_kseg0; #endif void __init mem_init(void) { unsigned long codesize, reservedpages, datasize, initsize; unsigned long tmp, ram; #ifdef CONFIG_HIGHMEM #ifdef CONFIG_DISCONTIGMEM #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" #endif max_mapnr = highend_pfn; #else max_mapnr = max_low_pfn; #endif high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); totalram_pages += free_all_bootmem(); totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ reservedpages = ram = 0; for (tmp = 0; tmp < max_low_pfn; tmp++) if (page_is_ram(tmp)) { ram++; if (PageReserved(pfn_to_page(tmp))) reservedpages++; } num_physpages = ram; #ifdef CONFIG_HIGHMEM for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { struct page *page = mem_map + tmp; if (!page_is_ram(tmp)) { SetPageReserved(page); continue; } ClearPageReserved(page); #ifdef CONFIG_LIMITED_DMA set_page_address(page, lowmem_page_address(page)); #endif init_page_count(page); __free_page(page); totalhigh_pages++; } totalram_pages += totalhigh_pages; num_physpages += totalhigh_pages; #endif codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; #ifdef CONFIG_64BIT if ((unsigned long) &_text > (unsigned long) CKSEG0) /* The -4 is a hack so that user tools don't have to handle the overflow. */ kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4); #endif kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), ram << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10, (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ static void free_init_pages(char *what, unsigned long begin, unsigned long end) { unsigned long pfn; for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { struct page *page = pfn_to_page(pfn); void *addr = phys_to_virt(PFN_PHYS(pfn)); ClearPageReserved(page); init_page_count(page); memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); __free_page(page); totalram_pages++; } printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { free_init_pages("initrd memory", virt_to_phys((void *)start), virt_to_phys((void *)end)); } #endif extern unsigned long prom_free_prom_memory(void); void free_initmem(void) { unsigned long freed; freed = prom_free_prom_memory(); if (freed) printk(KERN_INFO "Freeing firmware memory: %ldkb freed\n", freed >> 10); free_init_pages("unused kernel memory", __pa_symbol(&__init_begin), __pa_symbol(&__init_end)); }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
You can’t perform that action at this time.