summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjdike <jdike>2003-08-29 16:46:42 +0000
committerjdike <jdike>2003-08-29 16:46:42 +0000
commit672808176907ae1de8af6d2a62f25f6ab0a8c073 (patch)
treef7f85e66f4fbf7d506a430715fd3b4a06eeaab7e
parentccb9dfde59193b67239438f473ab6a7043bec33f (diff)
downloaduml-history-672808176907ae1de8af6d2a62f25f6ab0a8c073.tar.gz
Replaced the old low-level physmem stuff to allow ubd-mmap support. Mainly,
this involved adding support for page-by-page mapping of physical memory.
-rw-r--r--arch/um/include/mem.h13
-rw-r--r--arch/um/include/mem_kern.h30
-rw-r--r--arch/um/include/mem_user.h20
-rw-r--r--arch/um/kernel/Makefile10
-rw-r--r--arch/um/kernel/ksyms.c4
-rw-r--r--arch/um/kernel/mem.c688
-rw-r--r--arch/um/kernel/mem_user.c142
-rw-r--r--arch/um/kernel/physmem.c349
-rw-r--r--arch/um/kernel/skas/mem_user.c11
-rw-r--r--arch/um/kernel/um_arch.c23
-rw-r--r--include/asm-um/page.h33
-rw-r--r--include/asm-um/pgtable.h46
-rw-r--r--include/linux/mm.h4
13 files changed, 544 insertions, 829 deletions
diff --git a/arch/um/include/mem.h b/arch/um/include/mem.h
index bad6b30..053375d 100644
--- a/arch/um/include/mem.h
+++ b/arch/um/include/mem.h
@@ -1,19 +1,18 @@
/*
- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2002, 2003 Jeff Dike (jdike@addtoit.com)
* Licensed under the GPL
*/
#ifndef __MEM_H__
#define __MEM_H__
-struct vm_reserved {
- struct list_head list;
- unsigned long start;
- unsigned long end;
-};
+#include "linux/types.h"
-extern void set_usable_vm(unsigned long start, unsigned long end);
extern void set_kmem_end(unsigned long new);
+extern int phys_mapping(unsigned long phys, __u64 *offset_out);
+extern int physmem_subst_mapping(void *virt, int fd, __u64 offset, int w);
+extern int is_remapped(void *virt);
+extern int physmem_remove_mapping(void *virt);
#endif
diff --git a/arch/um/include/mem_kern.h b/arch/um/include/mem_kern.h
new file mode 100644
index 0000000..b39f03d
--- /dev/null
+++ b/arch/um/include/mem_kern.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2003 Jeff Dike (jdike@addtoit.com)
+ * Licensed under the GPL
+ */
+
+#ifndef __MEM_KERN_H__
+#define __MEM_KERN_H__
+
+#include "linux/list.h"
+#include "linux/types.h"
+
+struct remapper {
+ struct list_head list;
+ int (*proc)(int, unsigned long, int, __u64);
+};
+
+extern void register_remapper(struct remapper *info);
+
+#endif
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-file-style: "linux"
+ * End:
+ */
diff --git a/arch/um/include/mem_user.h b/arch/um/include/mem_user.h
index cecece9..b0e01a9 100644
--- a/arch/um/include/mem_user.h
+++ b/arch/um/include/mem_user.h
@@ -32,17 +32,17 @@
#ifndef _MEM_USER_H
#define _MEM_USER_H
-struct mem_region {
+struct iomem_region {
+ struct iomem_region *next;
char *driver;
- unsigned long start_pfn;
- unsigned long start;
- unsigned long len;
- void *mem_map;
int fd;
+ int size;
+ unsigned long phys;
+ unsigned long virt;
};
-extern struct mem_region *regions[];
-extern struct mem_region physmem_region;
+extern struct iomem_region *iomem_regions;
+extern int iomem_size;
#define ROUND_4M(n) ((((unsigned long) (n)) + (1 << 22)) & ~((1 << 22) - 1))
@@ -53,15 +53,11 @@ extern int init_mem_user(void);
extern int create_mem_file(unsigned long len);
extern void setup_memory(void *entry);
extern unsigned long find_iomem(char *driver, unsigned long *len_out);
-extern int init_maps(struct mem_region *region);
-extern int nregions(void);
-extern int reserve_vm(unsigned long start, unsigned long end, void *e);
+extern int init_maps(unsigned long len);
extern unsigned long get_vm(unsigned long len);
extern void setup_physmem(unsigned long start, unsigned long usable,
unsigned long len);
-extern int setup_region(struct mem_region *region, void *entry);
extern void add_iomem(char *name, int fd, unsigned long size);
-extern struct mem_region *phys_region(unsigned long phys);
extern unsigned long phys_offset(unsigned long phys);
extern void unmap_physmem(void);
extern int map_memory(unsigned long virt, unsigned long phys,
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 867821c..d49c83f 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -7,11 +7,11 @@ O_TARGET = built-in.o
obj-y = config.o checksum.o exec_kern.o exitcode.o frame_kern.o frame.o \
helper.o init_task.o irq.o irq_user.o ksyms.o mem.o mem_user.o \
- process.o process_kern.o ptrace.o reboot.o resource.o sigio_user.o \
- sigio_kern.o signal_kern.o signal_user.o smp.o syscall_kern.o \
- syscall_user.o sysrq.o sys_call_table.o tempfile.o time.o \
- time_kern.o tlb.o trap_kern.o trap_user.o uaccess_user.o um_arch.o \
- umid.o user_syms.o user_util.o
+ physmem.o process.o process_kern.o ptrace.o reboot.o resource.o \
+ sigio_user.o sigio_kern.o signal_kern.o signal_user.o smp.o \
+ syscall_kern.o syscall_user.o sysrq.o sys_call_table.o tempfile.o \
+ time.o time_kern.o tlb.o trap_kern.o trap_user.o uaccess_user.o \
+ um_arch.o umid.o user_syms.o user_util.o
obj-$(CONFIG_BLK_DEV_INITRD) += initrd_kern.o initrd_user.o
obj-$(CONFIG_GPROF) += gprof_syms.o
diff --git a/arch/um/kernel/ksyms.c b/arch/um/kernel/ksyms.c
index 0a36065..f2bd3fe 100644
--- a/arch/um/kernel/ksyms.c
+++ b/arch/um/kernel/ksyms.c
@@ -34,10 +34,6 @@ EXPORT_SYMBOL(flush_tlb_range);
EXPORT_SYMBOL(host_task_size);
EXPORT_SYMBOL(arch_validate);
-EXPORT_SYMBOL(region_pa);
-EXPORT_SYMBOL(region_va);
-EXPORT_SYMBOL(phys_mem_map);
-EXPORT_SYMBOL(page_mem_map);
EXPORT_SYMBOL(high_physmem);
EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(um_virt_to_phys);
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index fca646b..aba6258 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -1,78 +1,44 @@
/*
- * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
* Licensed under the GPL
*/
-#include "linux/config.h"
-#include "linux/types.h"
+#include "linux/stddef.h"
+#include "linux/kernel.h"
#include "linux/mm.h"
-#include "linux/fs.h"
-#include "linux/init.h"
#include "linux/bootmem.h"
-#include "linux/swap.h"
-#include "linux/slab.h"
-#include "linux/vmalloc.h"
#include "linux/highmem.h"
#include "asm/page.h"
-#include "asm/pgtable.h"
+#include "asm/fixmap.h"
#include "asm/pgalloc.h"
-#include "asm/bitops.h"
-#include "asm/uaccess.h"
-#include "asm/tlb.h"
#include "user_util.h"
#include "kern_util.h"
-#include "mem_user.h"
-#include "mem.h"
#include "kern.h"
-#include "init.h"
-#include "os.h"
-#include "mode_kern.h"
+#include "mem_user.h"
#include "uml_uaccess.h"
+#include "os.h"
+
+extern char __binary_start;
/* Changed during early boot */
-pgd_t swapper_pg_dir[1024];
-unsigned long high_physmem;
-unsigned long vm_start;
-unsigned long vm_end;
-unsigned long highmem;
unsigned long *empty_zero_page = NULL;
unsigned long *empty_bad_page = NULL;
-
-/* Not modified */
-const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
-
-/* Changed during early boot */
-static unsigned long totalram_pages = 0;
-
-extern char __init_begin, __init_end;
-extern long physmem_size;
-
-#ifdef CONFIG_SMP
-/* Not changed by UML */
-mmu_gather_t mmu_gathers[NR_CPUS];
-#endif
-
-/* Changed during early boot */
+pgd_t swapper_pg_dir[1024];
+unsigned long highmem;
int kmalloc_ok = 0;
-#define NREGIONS (phys_region_index(0xffffffff) - phys_region_index(0x0) + 1)
-struct mem_region *regions[NREGIONS] = { [ 0 ... NREGIONS - 1 ] = NULL };
-#define REGION_SIZE ((0xffffffff & ~REGION_MASK) + 1)
-
-/* Changed during early boot */
static unsigned long brk_end;
-
-static void map_cb(void *unused)
-{
- map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
-}
+static unsigned long totalram_pages = 0;
void unmap_physmem(void)
{
os_unmap_memory((void *) brk_end, uml_reserved - brk_end);
}
-extern char __binary_start;
+static void map_cb(void *unused)
+{
+ map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
+}
void mem_init(void)
{
@@ -104,48 +70,12 @@ void mem_init(void)
/* this will put all low memory onto the freelists */
totalram_pages = free_all_bootmem();
totalram_pages += highmem >> PAGE_SHIFT;
- max_mapnr = totalram_pages;
num_physpages = totalram_pages;
printk(KERN_INFO "Memory: %luk available\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10));
kmalloc_ok = 1;
}
-/* Changed during early boot */
-static unsigned long kmem_top = 0;
-
-unsigned long get_kmem_end(void)
-{
- if(kmem_top == 0)
- kmem_top = CHOOSE_MODE(kmem_end_tt, kmem_end_skas);
- return(kmem_top);
-}
-
-void set_kmem_end(unsigned long new)
-{
- kmem_top = new;
-}
-
-#if CONFIG_HIGHMEM
-/* Changed during early boot */
-pte_t *kmap_pte;
-pgprot_t kmap_prot;
-
-#define kmap_get_fixmap_pte(vaddr) \
- pte_offset(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
-
-void __init kmap_init(void)
-{
- unsigned long kmap_vstart;
-
- /* cache the first kmap pte */
- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
-
- kmap_prot = PAGE_KERNEL;
-}
-#endif /* CONFIG_HIGHMEM */
-
static void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
@@ -176,138 +106,10 @@ static void __init fixrange_init(unsigned long start, unsigned long end,
}
}
-int init_maps(struct mem_region *region)
-{
- struct page *p, *map;
- int i, n, len;
-
- if(region == &physmem_region){
- region->mem_map = mem_map;
- return(0);
- }
- else if(region->mem_map != NULL) return(0);
-
- n = region->len >> PAGE_SHIFT;
- len = n * sizeof(struct page);
- if(kmalloc_ok){
- map = kmalloc(len, GFP_KERNEL);
- if(map == NULL) map = vmalloc(len);
- }
- else map = alloc_bootmem_low_pages(len);
-
- if(map == NULL)
- return(-ENOMEM);
- for(i = 0; i < n; i++){
- p = &map[i];
- set_page_count(p, 0);
- SetPageReserved(p);
- INIT_LIST_HEAD(&p->list);
- }
- region->mem_map = map;
- return(0);
-}
-
-DECLARE_MUTEX(regions_sem);
-
-static int setup_one_range(int fd, char *driver, unsigned long start,
- unsigned long pfn, int len,
- struct mem_region *region)
-{
- int i;
-
- down(&regions_sem);
- for(i = 0; i < NREGIONS; i++){
- if(regions[i] == NULL) break;
- }
- if(i == NREGIONS){
- printk("setup_one_range : no free regions\n");
- i = -1;
- goto out;
- }
-
- if(fd == -1)
- fd = create_mem_file(len);
-
- if(region == NULL){
- if(kmalloc_ok)
- region = kmalloc(sizeof(*region), GFP_KERNEL);
- else region = alloc_bootmem_low_pages(sizeof(*region));
- if(region == NULL)
- panic("Failed to allocating mem_region");
- }
-
- *region = ((struct mem_region) { .driver = driver,
- .start_pfn = pfn,
- .start = start,
- .len = len,
- .fd = fd } );
- regions[i] = region;
- out:
- up(&regions_sem);
- return(i);
-}
-
-#ifdef CONFIG_HIGHMEM
-static void init_highmem(void)
-{
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte;
- unsigned long vaddr;
-
- /*
- * Permanent kmaps:
- */
- vaddr = PKMAP_BASE;
- fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
-
- pgd = swapper_pg_dir + __pgd_offset(vaddr);
- pmd = pmd_offset(pgd, vaddr);
- pte = pte_offset(pmd, vaddr);
- pkmap_page_table = pte;
-
- kmap_init();
-}
-
-void setup_highmem(unsigned long len)
-{
- struct mem_region *region;
- struct page *page, *map;
- unsigned long phys;
- int i, cur, index;
-
- phys = physmem_size;
- do {
- cur = min(len, (unsigned long) REGION_SIZE);
- i = setup_one_range(-1, NULL, -1, phys >> PAGE_SHIFT, cur,
- NULL);
- if(i == -1){
- printk("setup_highmem - setup_one_range failed\n");
- return;
- }
- region = regions[i];
- index = phys / PAGE_SIZE;
- region->mem_map = &mem_map[index];
-
- map = region->mem_map;
- for(i = 0; i < (cur >> PAGE_SHIFT); i++){
- page = &map[i];
- ClearPageReserved(page);
- set_bit(PG_highmem, &page->flags);
- atomic_set(&page->count, 1);
- __free_page(page);
- }
- phys += cur;
- len -= cur;
- } while(len > 0);
-}
-#endif
-
void paging_init(void)
{
- struct mem_region *region;
- unsigned long zones_size[MAX_NR_ZONES], start, end, vaddr;
- int i, index;
+ unsigned long zones_size[MAX_NR_ZONES], vaddr;
+ int i;
empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
@@ -317,14 +119,6 @@ void paging_init(void)
(uml_physmem >> PAGE_SHIFT);
zones_size[2] = highmem >> PAGE_SHIFT;
free_area_init(zones_size);
- start = phys_region_index(__pa(uml_physmem));
- end = phys_region_index(__pa(high_physmem - 1));
- for(i = start; i <= end; i++){
- region = regions[i];
- index = (region->start - uml_physmem) / PAGE_SIZE;
- region->mem_map = &mem_map[index];
- if(i > start) free_bootmem(__pa(region->start), region->len);
- }
/*
* Fixed mappings, only the page table structure has to be
@@ -339,39 +133,30 @@ void paging_init(void)
#endif
}
-/* Changed by meminfo_compat, which is a setup */
-static int meminfo_22 = 0;
-
-static int meminfo_compat(char *str)
+struct page *arch_validate(struct page *page, int mask, int order)
{
- meminfo_22 = 1;
- return(1);
-}
+ unsigned long addr, zero = 0;
+ int i;
-__setup("22_meminfo", meminfo_compat);
+ again:
+ if(page == NULL) return(page);
+ if(PageHighMem(page)) return(page);
-void si_meminfo(struct sysinfo *val)
-{
- val->totalram = totalram_pages;
- val->sharedram = 0;
- val->freeram = nr_free_pages();
- val->bufferram = atomic_read(&buffermem_pages);
- val->totalhigh = highmem >> PAGE_SHIFT;
- val->freehigh = nr_free_highpages();
- val->mem_unit = PAGE_SIZE;
- if(meminfo_22){
- val->freeram <<= PAGE_SHIFT;
- val->bufferram <<= PAGE_SHIFT;
- val->totalram <<= PAGE_SHIFT;
- val->sharedram <<= PAGE_SHIFT;
+ addr = (unsigned long) page_address(page);
+ for(i = 0; i < (1 << order); i++){
+ current->thread.fault_addr = (void *) addr;
+ if(__do_copy_to_user((void *) addr, &zero,
+ sizeof(zero),
+ &current->thread.fault_addr,
+ &current->thread.fault_catcher)){
+ if(!(mask & __GFP_WAIT)) return(NULL);
+ else break;
+ }
+ addr += PAGE_SIZE;
}
-}
-
-pte_t __bad_page(void)
-{
- clear_page(empty_bad_page);
- return pte_mkdirty(mk_pte((struct page *) empty_bad_page,
- PAGE_SHARED));
+ if(i == (1 << order)) return(page);
+ page = _alloc_pages(mask, order);
+ goto again;
}
/* This can't do anything because nothing in the kernel image can be freed
@@ -451,395 +236,34 @@ void show_mem(void)
show_buffers();
}
-static int __init uml_mem_setup(char *line, int *add)
-{
- char *retptr;
- physmem_size = memparse(line,&retptr);
- return 0;
-}
-__uml_setup("mem=", uml_mem_setup,
-"mem=<Amount of desired ram>\n"
-" This controls how much \"physical\" memory the kernel allocates\n"
-" for the system. The size is specified as a number followed by\n"
-" one of 'k', 'K', 'm', 'M', which have the obvious meanings.\n"
-" This is not related to the amount of memory in the physical\n"
-" machine. It can be more, and the excess, if it's ever used, will\n"
-" just be swapped out.\n Example: mem=64M\n\n"
-);
-
-struct page *arch_validate(struct page *page, int mask, int order)
-{
- unsigned long addr, zero = 0;
- int i;
-
- again:
- if(page == NULL) return(page);
- if(PageHighMem(page)) return(page);
-
- addr = (unsigned long) page_address(page);
- for(i = 0; i < (1 << order); i++){
- current->thread.fault_addr = (void *) addr;
- if(__do_copy_to_user((void *) addr, &zero,
- sizeof(zero),
- &current->thread.fault_addr,
- &current->thread.fault_catcher)){
- if(!(mask & __GFP_WAIT)) return(NULL);
- else break;
- }
- addr += PAGE_SIZE;
- }
- if(i == (1 << order)) return(page);
- page = _alloc_pages(mask, order);
- goto again;
-}
-
-DECLARE_MUTEX(vm_reserved_sem);
-static struct list_head vm_reserved = LIST_HEAD_INIT(vm_reserved);
-
-/* Static structures, linked in to the list in early boot */
-static struct vm_reserved head = {
- .list = LIST_HEAD_INIT(head.list),
- .start = 0,
- .end = 0xffffffff
-};
-
-static struct vm_reserved tail = {
- .list = LIST_HEAD_INIT(tail.list),
- .start = 0,
- .end = 0xffffffff
-};
-
-void set_usable_vm(unsigned long start, unsigned long end)
-{
- list_add(&head.list, &vm_reserved);
- list_add(&tail.list, &head.list);
- head.end = start;
- tail.start = end;
-}
-
-int reserve_vm(unsigned long start, unsigned long end, void *e)
-
-{
- struct vm_reserved *entry = e, *reserved, *prev;
- struct list_head *ele;
- int err;
-
- down(&vm_reserved_sem);
- list_for_each(ele, &vm_reserved){
- reserved = list_entry(ele, struct vm_reserved, list);
- if(reserved->start >= end) goto found;
- }
- panic("Reserved vm out of range");
- found:
- prev = list_entry(ele->prev, struct vm_reserved, list);
- if(prev->end > start)
- panic("Can't reserve vm");
- if(entry == NULL)
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if(entry == NULL){
- printk("reserve_vm : Failed to allocate entry\n");
- err = -ENOMEM;
- goto out;
- }
- *entry = ((struct vm_reserved)
- { .list = LIST_HEAD_INIT(entry->list),
- .start = start,
- .end = end });
- list_add(&entry->list, &prev->list);
- err = 0;
- out:
- up(&vm_reserved_sem);
- return(0);
-}
-
-unsigned long get_vm(unsigned long len)
-{
- struct vm_reserved *this, *next;
- struct list_head *ele;
- unsigned long start;
- int err;
-
- down(&vm_reserved_sem);
- list_for_each(ele, &vm_reserved){
- this = list_entry(ele, struct vm_reserved, list);
- next = list_entry(ele->next, struct vm_reserved, list);
- if((this->start < next->start) &&
- (this->end + len + PAGE_SIZE <= next->start))
- goto found;
- }
- up(&vm_reserved_sem);
- return(0);
- found:
- up(&vm_reserved_sem);
- start = (unsigned long) UML_ROUND_UP(this->end) + PAGE_SIZE;
- err = reserve_vm(start, start + len, NULL);
- if(err) return(0);
- return(start);
-}
-
-int nregions(void)
-{
- return(NREGIONS);
-}
-
-static void setup_range(int fd, char *driver, unsigned long start,
- unsigned long pfn, unsigned long len, int need_vm,
- struct mem_region *region, void *reserved)
-{
- int i, cur;
-
- do {
- cur = min(len, (unsigned long) REGION_SIZE);
- i = setup_one_range(fd, driver, start, pfn, cur, region);
- region = regions[i];
- if(need_vm && setup_region(region, reserved)){
- kfree(region);
- regions[i] = NULL;
- return;
- }
- start += cur;
- if(pfn != -1) pfn += cur;
- len -= cur;
- } while(len > 0);
-}
-
-struct iomem {
- char *name;
- int fd;
- unsigned long size;
-};
-
-/* iomem regions can only be added on the command line at the moment.
- * Locking will be needed when they can be added via mconsole.
- */
-
-struct iomem iomem_regions[NREGIONS] = { [ 0 ... NREGIONS - 1 ] =
- { .name = NULL,
- .fd = -1,
- .size = 0 } };
-
-int num_iomem_regions = 0;
-
-void add_iomem(char *name, int fd, unsigned long size)
-{
- if(num_iomem_regions == sizeof(iomem_regions)/sizeof(iomem_regions[0]))
- return;
- size = (size + PAGE_SIZE - 1) & PAGE_MASK;
- iomem_regions[num_iomem_regions++] =
- ((struct iomem) { .name = name,
- .fd = fd,
- .size = size } );
-}
-
-int setup_iomem(void)
-{
- struct iomem *iomem;
- int i;
-
- for(i = 0; i < num_iomem_regions; i++){
- iomem = &iomem_regions[i];
- setup_range(iomem->fd, iomem->name, -1, -1, iomem->size, 1,
- NULL, NULL);
- }
- return(0);
-}
-
-__initcall(setup_iomem);
-
-#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
-#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
-
-/* Changed during early boot */
-static struct mem_region physmem_region;
-static struct vm_reserved physmem_reserved;
-
-void setup_physmem(unsigned long start, unsigned long reserve_end,
- unsigned long len)
-{
- struct mem_region *region = &physmem_region;
- struct vm_reserved *reserved = &physmem_reserved;
- unsigned long cur, pfn = 0;
- int do_free = 1, bootmap_size;
-
- do {
- cur = min(len, (unsigned long) REGION_SIZE);
- if(region == NULL)
- region = alloc_bootmem_low_pages(sizeof(*region));
- if(reserved == NULL)
- reserved = alloc_bootmem_low_pages(sizeof(*reserved));
- if((region == NULL) || (reserved == NULL))
- panic("Couldn't allocate physmem region or vm "
- "reservation\n");
- setup_range(-1, NULL, start, pfn, cur, 1, region, reserved);
-
- if(do_free){
- unsigned long reserve = reserve_end - start;
- int pfn = PFN_UP(__pa(reserve_end));
- int delta = (len - reserve) >> PAGE_SHIFT;
-
- bootmap_size = init_bootmem(pfn, pfn + delta);
- free_bootmem(__pa(reserve_end) + bootmap_size,
- cur - bootmap_size - reserve);
- do_free = 0;
- }
- start += cur;
- pfn += cur >> PAGE_SHIFT;
- len -= cur;
- region = NULL;
- reserved = NULL;
- } while(len > 0);
-}
-
-struct mem_region *phys_region(unsigned long phys)
-{
- unsigned int n = phys_region_index(phys);
-
- if(regions[n] == NULL)
- panic("Physical address in uninitialized region");
- return(regions[n]);
-}
-
-unsigned long phys_offset(unsigned long phys)
-{
- return(phys_addr(phys));
-}
-
-struct page *phys_mem_map(unsigned long phys)
-{
- return((struct page *) phys_region(phys)->mem_map);
-}
-
-struct page *pte_mem_map(pte_t pte)
-{
- return(phys_mem_map(pte_val(pte)));
-}
-
-struct mem_region *page_region(struct page *page, int *index_out)
-{
- int i;
- struct mem_region *region;
- struct page *map;
-
- for(i = 0; i < NREGIONS; i++){
- region = regions[i];
- if(region == NULL) continue;
- map = region->mem_map;
- if((page >= map) && (page < &map[region->len >> PAGE_SHIFT])){
- if(index_out != NULL) *index_out = i;
- return(region);
- }
- }
- panic("No region found for page");
- return(NULL);
-}
-
-unsigned long page_to_pfn(struct page *page)
-{
- struct mem_region *region = page_region(page, NULL);
-
- return(region->start_pfn + (page - (struct page *) region->mem_map));
-}
-
-struct mem_region *pfn_to_region(unsigned long pfn, int *index_out)
-{
- struct mem_region *region;
- int i;
-
- for(i = 0; i < NREGIONS; i++){
- region = regions[i];
- if(region == NULL)
- continue;
-
- if((region->start_pfn <= pfn) &&
- (region->start_pfn + (region->len >> PAGE_SHIFT) > pfn)){
- if(index_out != NULL)
- *index_out = i;
- return(region);
- }
- }
- return(NULL);
-}
-
-struct page *pfn_to_page(unsigned long pfn)
-{
- struct mem_region *region = pfn_to_region(pfn, NULL);
- struct page *mem_map = (struct page *) region->mem_map;
-
- return(&mem_map[pfn - region->start_pfn]);
-}
-
-unsigned long phys_to_pfn(unsigned long p)
-{
- struct mem_region *region = regions[phys_region_index(p)];
-
- return(region->start_pfn + (phys_addr(p) >> PAGE_SHIFT));
-}
-
-unsigned long pfn_to_phys(unsigned long pfn)
-{
- int n;
- struct mem_region *region = pfn_to_region(pfn, &n);
-
- return(mk_phys((pfn - region->start_pfn) << PAGE_SHIFT, n));
-}
-
-struct page *page_mem_map(struct page *page)
-{
- return((struct page *) page_region(page, NULL)->mem_map);
-}
-
-extern unsigned long region_pa(void *virt)
-{
- struct mem_region *region;
- unsigned long addr = (unsigned long) virt;
- int i;
-
- for(i = 0; i < NREGIONS; i++){
- region = regions[i];
- if(region == NULL) continue;
- if((region->start <= addr) &&
- (addr <= region->start + region->len))
- return(mk_phys(addr - region->start, i));
- }
- panic("region_pa : no region for virtual address");
- return(0);
-}
-
-extern void *region_va(unsigned long phys)
-{
- return((void *) (phys_region(phys)->start + phys_addr(phys)));
-}
+/* Changed by meminfo_compat, which is a setup */
+static int meminfo_22 = 0;
-unsigned long page_to_phys(struct page *page)
+static int meminfo_compat(char *str)
{
- int n;
- struct mem_region *region = page_region(page, &n);
- struct page *map = region->mem_map;
- return(mk_phys((page - map) << PAGE_SHIFT, n));
+ meminfo_22 = 1;
+ return(1);
}
-struct page *phys_to_page(unsigned long phys)
-{
- struct page *mem_map;
-
- mem_map = phys_mem_map(phys);
- return(mem_map + (phys_offset(phys) >> PAGE_SHIFT));
-}
+__setup("22_meminfo", meminfo_compat);
-static int setup_mem_maps(void)
+void si_meminfo(struct sysinfo *val)
{
- struct mem_region *region;
- int i;
-
- for(i = 0; i < NREGIONS; i++){
- region = regions[i];
- if((region != NULL) && (region->fd > 0)) init_maps(region);
+ val->totalram = totalram_pages;
+ val->sharedram = 0;
+ val->freeram = nr_free_pages();
+ val->bufferram = atomic_read(&buffermem_pages);
+ val->totalhigh = highmem >> PAGE_SHIFT;
+ val->freehigh = nr_free_highpages();
+ val->mem_unit = PAGE_SIZE;
+ if(meminfo_22){
+ val->freeram <<= PAGE_SHIFT;
+ val->bufferram <<= PAGE_SHIFT;
+ val->totalram <<= PAGE_SHIFT;
+ val->sharedram <<= PAGE_SHIFT;
}
- return(0);
}
-__initcall(setup_mem_maps);
-
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
diff --git a/arch/um/kernel/mem_user.c b/arch/um/kernel/mem_user.c
index cf17c0a..116c338 100644
--- a/arch/um/kernel/mem_user.c
+++ b/arch/um/kernel/mem_user.c
@@ -47,6 +47,7 @@
#include "init.h"
#include "os.h"
#include "tempfile.h"
+#include "kern_constants.h"
extern struct mem_region physmem_region;
@@ -76,57 +77,12 @@ int create_mem_file(unsigned long len)
return(fd);
}
-int setup_region(struct mem_region *region, void *entry)
-{
- void *loc, *start;
- char *driver;
- int err, offset;
-
- if(region->start != -1){
- err = reserve_vm(region->start,
- region->start + region->len, entry);
- if(err){
- printk("setup_region : failed to reserve "
- "0x%x - 0x%x for driver '%s'\n",
- region->start,
- region->start + region->len,
- region->driver);
- return(-1);
- }
- }
- else region->start = get_vm(region->len);
- if(region->start == 0){
- if(region->driver == NULL) driver = "physmem";
- else driver = region->driver;
- printk("setup_region : failed to find vm for "
- "driver '%s' (length %d)\n", driver, region->len);
- return(-1);
- }
- if(region->start == uml_physmem){
- start = (void *) uml_reserved;
- offset = uml_reserved - uml_physmem;
- }
- else {
- start = (void *) region->start;
- offset = 0;
- }
-
- if(offset >= region->len){
- printf("%d bytes of physical memory is insufficient\n",
- region->len);
- exit(1);
- }
- loc = mmap(start, region->len - offset, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_FIXED, region->fd, offset);
- if(loc != start){
- perror("Mapping memory");
- exit(1);
- }
- return(0);
-}
+struct iomem_region *iomem_regions = NULL;
+int iomem_size = 0;
static int __init parse_iomem(char *str, int *add)
{
+ struct iomem_region *new;
struct stat64 buf;
char *file, *driver;
int fd;
@@ -135,22 +91,40 @@ static int __init parse_iomem(char *str, int *add)
file = strchr(str,',');
if(file == NULL){
printf("parse_iomem : failed to parse iomem\n");
- return(1);
+ goto out;
}
*file = '\0';
file++;
fd = os_open_file(file, of_rdwr(OPENFLAGS()), 0);
if(fd < 0){
- printf("parse_iomem - Couldn't open io file, errno = %d\n",
- errno);
- return(1);
+ perror("parse_iomem - Couldn't open io file");
+ goto out;
}
- if(fstat64(fd, &buf) < 0) {
- printf("parse_iomem - cannot fstat file, errno = %d\n", errno);
- return(1);
+ if(fstat64(fd, &buf) < 0){
+ perror("parse_iomem - cannot fstat file");
+ goto out_close;
}
- add_iomem(driver, fd, buf.st_size);
+
+ new = malloc(sizeof(*new));
+ if(new == NULL){
+ perror("Couldn't allocate iomem_region struct");
+ goto out_close;
+ }
+
+ *new = ((struct iomem_region) { .next = iomem_regions,
+ .driver = driver,
+ .fd = fd,
+ .size = buf.st_size,
+ .phys = 0,
+ .virt = 0 });
+ iomem_regions = new;
+ iomem_size += new->size + UM_KERN_PAGE_SIZE;
+
return(0);
+ out_close:
+ close(fd);
+ out:
+ return(1);
}
__uml_setup("iomem=", parse_iomem,
@@ -158,43 +132,6 @@ __uml_setup("iomem=", parse_iomem,
" Configure <file> as an IO memory region named <name>.\n\n"
);
-#ifdef notdef
-int logging = 0;
-int logging_fd = -1;
-
-int logging_line = 0;
-char logging_buf[256];
-
-void log(char *fmt, ...)
-{
- va_list ap;
- struct timeval tv;
- struct openflags flags;
-
- if(logging == 0) return;
- if(logging_fd < 0){
- flags = of_create(of_trunc(of_rdrw(OPENFLAGS())));
- logging_fd = os_open_file("log", flags, 0644);
- }
- gettimeofday(&tv, NULL);
- sprintf(logging_buf, "%d\t %u.%u ", logging_line++, tv.tv_sec,
- tv.tv_usec);
- va_start(ap, fmt);
- vsprintf(&logging_buf[strlen(logging_buf)], fmt, ap);
- va_end(ap);
- write(logging_fd, logging_buf, strlen(logging_buf));
-}
-#endif
-
-int map_memory(unsigned long virt, unsigned long phys, unsigned long len,
- int r, int w, int x)
-{
- struct mem_region *region = phys_region(phys);
-
- return(os_map_memory((void *) virt, region->fd, phys_offset(phys), len,
- r, w, x));
-}
-
int protect_memory(unsigned long addr, unsigned long len, int r, int w, int x,
int must_succeed)
{
@@ -206,25 +143,6 @@ int protect_memory(unsigned long addr, unsigned long len, int r, int w, int x,
return(0);
}
-unsigned long find_iomem(char *driver, unsigned long *len_out)
-{
- struct mem_region *region;
- int i, n;
-
- n = nregions();
- for(i = 0; i < n; i++){
- region = regions[i];
- if(region == NULL) continue;
- if((region->driver != NULL) &&
- !strcmp(region->driver, driver)){
- *len_out = region->len;
- return(region->start);
- }
- }
- *len_out = 0;
- return 0;
-}
-
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
new file mode 100644
index 0000000..4bae24c
--- /dev/null
+++ b/arch/um/kernel/physmem.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
+ * Licensed under the GPL
+ */
+
+#include "linux/mm.h"
+#include "linux/ghash.h"
+#include "linux/slab.h"
+#include "linux/vmalloc.h"
+#include "linux/bootmem.h"
+#include "asm/types.h"
+#include "asm/pgtable.h"
+#include "kern_util.h"
+#include "user_util.h"
+#include "mode_kern.h"
+#include "mem.h"
+#include "mem_user.h"
+#include "os.h"
+#include "kern.h"
+#include "init.h"
+
+#define PHYS_HASHSIZE (8192)
+
+struct phys_desc;
+
+DEF_HASH_STRUCTS(virtmem, PHYS_HASHSIZE, struct phys_desc);
+DEF_HASH_STRUCTS(physmem, PHYS_HASHSIZE, struct phys_desc);
+
+struct phys_desc {
+ struct virtmem_ptrs virt_ptrs;
+ struct physmem_ptrs phys_ptrs;
+ int fd;
+ __u64 offset;
+ void *virt;
+ unsigned long phys;
+};
+
+struct virtmem_table virtmem_hash;
+
+static int virt_cmp(void *virt1, void *virt2)
+{
+ return(virt1 != virt2);
+}
+
+static int virt_hash(void *virt)
+{
+ unsigned long addr = ((unsigned long) virt) >> PAGE_SHIFT;
+ return(addr % PHYS_HASHSIZE);
+}
+
+DEF_HASH(static, virtmem, struct phys_desc, virt_ptrs, void *, virt, virt_cmp,
+ virt_hash);
+
+struct physmem_table physmem_hash;
+
+static int phys_cmp(unsigned long phys1, unsigned long phys2)
+{
+ return(phys1 != phys2);
+}
+
+static int phys_hash(unsigned long phys)
+{
+ return((phys >> PAGE_SHIFT) % PHYS_HASHSIZE);
+}
+
+DEF_HASH(static, physmem, struct phys_desc, phys_ptrs, unsigned long, phys,
+ phys_cmp, phys_hash);
+
+int physmem_subst_mapping(void *virt, int fd, __u64 offset, int w)
+{
+ struct phys_desc *desc;
+ unsigned long phys;
+ int err;
+
+ virt = (void *) ((unsigned long) virt & PAGE_MASK);
+ err = os_map_memory(virt, fd, offset, PAGE_SIZE, 1, w, 0);
+ if(err)
+ goto out;
+
+ phys = __pa(virt);
+ if((find_physmem_hash(&physmem_hash, phys) != NULL) ||
+ (find_virtmem_hash(&virtmem_hash, virt) != NULL))
+ panic("Address 0x%p is already substituted\n", virt);
+
+ err = -ENOMEM;
+ desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
+ if(desc == NULL)
+ goto out;
+
+ *desc = ((struct phys_desc) { .virt_ptrs = { NULL, NULL },
+ .phys_ptrs = { NULL, NULL },
+ .fd = fd,
+ .offset = offset,
+ .virt = virt,
+ .phys = __pa(virt) });
+ insert_physmem_hash(&physmem_hash, desc);
+ insert_virtmem_hash(&virtmem_hash, desc);
+ err = 0;
+ out:
+ return(err);
+}
+
+static int physmem_fd = -1;
+
+int physmem_remove_mapping(void *virt)
+{
+ struct phys_desc *desc;
+ int err;
+
+ virt = (void *) ((unsigned long) virt & PAGE_MASK);
+ desc = find_virtmem_hash(&virtmem_hash, virt);
+ if(desc == NULL)
+ return(0);
+
+ remove_physmem_hash(&physmem_hash, desc);
+ remove_virtmem_hash(&virtmem_hash, desc);
+ kfree(desc);
+
+ err = os_map_memory(virt, physmem_fd, __pa(virt), PAGE_SIZE, 1, 1, 0);
+ if(err)
+ panic("Failed to unmap block device page from physical memory, "
+ "errno = %d", err);
+ return(1);
+}
+
+void arch_free_page(struct page *page, int order)
+{
+ void *virt;
+ int i;
+
+ for(i = 0; i < 1 << order; i++){
+ virt = __va(page_to_phys(page + i));
+ physmem_remove_mapping(virt);
+ }
+}
+
+int is_remapped(void *virt)
+{
+ return(find_virtmem_hash(&virtmem_hash, virt) != NULL);
+}
+
+/* Changed during early boot */
+unsigned long high_physmem;
+
+extern unsigned long physmem_size;
+
+void *to_virt(unsigned long phys)
+{
+ return((void *) uml_physmem + phys);
+}
+
+unsigned long to_phys(void *virt)
+{
+ return(((unsigned long) virt) - uml_physmem);
+}
+
+int init_maps(unsigned long len)
+{
+ struct page *p, *map;
+ int i, n;
+
+ n = len >> PAGE_SHIFT;
+ len = n * sizeof(struct page);
+
+ if(kmalloc_ok){
+ map = kmalloc(len, GFP_KERNEL);
+ if(map == NULL) map = vmalloc(len);
+ }
+ else map = alloc_bootmem_low_pages(len);
+
+ if(map == NULL)
+ return(-ENOMEM);
+
+ for(i = 0; i < n; i++){
+ p = &map[i];
+ set_page_count(p, 0);
+ SetPageReserved(p);
+ INIT_LIST_HEAD(&p->list);
+ }
+
+ mem_map = map;
+ max_mapnr = n;
+ return(0);
+}
+
+struct page *__virt_to_page(const unsigned long virt)
+{
+ int index = (virt - uml_physmem) >> PAGE_SHIFT;
+
+ return(&mem_map[index]);
+}
+
+unsigned long page_to_phys(struct page *page)
+{
+ int index = page - mem_map;
+
+ return(virt_to_phys((void *) (uml_physmem + (index << PAGE_SHIFT))));
+}
+
+pte_t mk_pte(struct page *page, pgprot_t pgprot)
+{
+ pte_t pte;
+
+ pte_val(pte) = page_to_phys(page) + pgprot_val(pgprot);
+ if(pte_present(pte)) pte_mknewprot(pte_mknewpage(pte));
+ return(pte);
+}
+
+/* Changed during early boot */
+static unsigned long kmem_top = 0;
+
+unsigned long get_kmem_end(void)
+{
+ if(kmem_top == 0)
+ kmem_top = CHOOSE_MODE(kmem_end_tt, kmem_end_skas);
+ return(kmem_top);
+}
+
+int map_memory(unsigned long virt, unsigned long phys, unsigned long len,
+ int r, int w, int x)
+{
+ __u64 offset;
+ int fd;
+
+ fd = phys_mapping(phys, &offset);
+ return(os_map_memory((void *) virt, fd, offset, len, r, w, x));
+}
+
+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+
+void setup_physmem(unsigned long start, unsigned long reserve_end,
+ unsigned long len)
+{
+ unsigned long reserve = reserve_end - start;
+ int pfn = PFN_UP(__pa(reserve_end));
+ int delta = (len - reserve) >> PAGE_SHIFT;
+ int err, offset, bootmap_size;
+
+ physmem_fd = create_mem_file(len);
+
+ offset = uml_reserved - uml_physmem;
+ err = os_map_memory((void *) uml_reserved, physmem_fd, offset,
+ len - offset, 1, 1, 0);
+ if(err){
+ perror("Mapping memory");
+ exit(1);
+ }
+
+ bootmap_size = init_bootmem(pfn, pfn + delta);
+ free_bootmem(__pa(reserve_end) + bootmap_size,
+ len - bootmap_size - reserve);
+}
+
+int phys_mapping(unsigned long phys, __u64 *offset_out)
+{
+ struct phys_desc *desc = find_virtmem_hash(&virtmem_hash,
+ __va(phys & PAGE_MASK));
+ int fd = -1;
+
+ if(desc != NULL){
+ fd = desc->fd;
+ *offset_out = desc->offset;
+ }
+ else if((phys >= 0) && (phys < physmem_size)){
+ fd = physmem_fd;
+ *offset_out = phys;
+ }
+ else {
+ struct iomem_region *region = iomem_regions;
+
+ while(region != NULL){
+ if((phys >= region->phys) &&
+ (phys < region->phys + region->size)){
+ fd = region->fd;
+ *offset_out = phys - region->phys;
+ break;
+ }
+ region = region->next;
+ }
+ }
+ return(fd);
+}
+
+static int __init uml_mem_setup(char *line, int *add)
+{
+ char *retptr;
+ physmem_size = memparse(line,&retptr);
+ return 0;
+}
+__uml_setup("mem=", uml_mem_setup,
+"mem=<Amount of desired ram>\n"
+" This controls how much \"physical\" memory the kernel allocates\n"
+" for the system. The size is specified as a number followed by\n"
+" one of 'k', 'K', 'm', 'M', which have the obvious meanings.\n"
+" This is not related to the amount of memory in the host. It can\n"
+" be more, and the excess, if it's ever used, will just be swapped out.\n"
+" Example: mem=64M\n\n"
+);
+
+unsigned long find_iomem(char *driver, unsigned long *len_out)
+{
+ struct iomem_region *region = iomem_regions;
+
+ while(region != NULL){
+ if(!strcmp(region->driver, driver)){
+ *len_out = region->size;
+ return(region->virt);
+ }
+ }
+
+ return(0);
+}
+
+int setup_iomem(void)
+{
+ struct iomem_region *region = iomem_regions;
+ unsigned long iomem_start = high_physmem + PAGE_SIZE;
+ int err;
+
+ while(region != NULL){
+ err = os_map_memory((void *) iomem_start, region->fd, 0,
+ region->size, 1, 1, 0);
+ if(err)
+ printk("Mapping iomem region for driver '%s' failed, "
+ "errno = %d\n", region->driver, err);
+ else {
+ region->virt = iomem_start;
+ region->phys = __pa(region->virt);
+ }
+
+ iomem_start += region->size + PAGE_SIZE;
+ region = region->next;
+ }
+
+ return(0);
+}
+
+__initcall(setup_iomem);
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-file-style: "linux"
+ * End:
+ */
diff --git a/arch/um/kernel/skas/mem_user.c b/arch/um/kernel/skas/mem_user.c
index d163090..f6f2957 100644
--- a/arch/um/kernel/skas/mem_user.c
+++ b/arch/um/kernel/skas/mem_user.c
@@ -7,6 +7,7 @@
#include <sys/mman.h>
#include <sys/ptrace.h>
#include "mem_user.h"
+#include "mem.h"
#include "user.h"
#include "os.h"
#include "proc_mm.h"
@@ -15,12 +16,12 @@ void map(int fd, unsigned long virt, unsigned long phys, unsigned long len,
int r, int w, int x)
{
struct proc_mm_op map;
- struct mem_region *region;
- int prot, n;
+ __u64 offset;
+ int prot, n, phys_fd;
prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
(x ? PROT_EXEC : 0);
- region = phys_region(phys);
+ phys_fd = phys_mapping(phys, &offset);
map = ((struct proc_mm_op) { .op = MM_MMAP,
.u =
@@ -30,8 +31,8 @@ void map(int fd, unsigned long virt, unsigned long phys, unsigned long len,
.prot = prot,
.flags = MAP_SHARED |
MAP_FIXED,
- .fd = region->fd,
- .offset = phys_offset(phys)
+ .fd = phys_fd,
+ .offset = offset
} } } );
n = os_write_file(fd, &map, sizeof(map));
if(n != sizeof(map))
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 50de3cb..57b42ba 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -57,10 +57,10 @@ unsigned long thread_saved_pc(struct thread_struct *thread)
static int show_cpuinfo(struct seq_file *m, void *v)
{
- int index;
+ int index = 0;
- index = (struct cpuinfo_um *)v - cpu_data;
#ifdef CONFIG_SMP
+ index = (struct cpuinfo_um *)v - cpu_data;
if (!(cpu_online_map & (1 << index)))
return 0;
#endif
@@ -298,7 +298,7 @@ static void __init uml_postsetup(void)
/* Set during early boot */
unsigned long brk_start;
-static struct vm_reserved kernel_vm_reserved;
+unsigned long end_iomem;
#define MIN_VMALLOC (32 * 1024 * 1024)
@@ -306,7 +306,7 @@ int linux_main(int argc, char **argv)
{
unsigned long avail;
unsigned long virtmem_size, max_physmem;
- unsigned int i, add, err;
+ unsigned int i, add;
for (i = 1; i < argc; i++){
if((i == 1) && (argv[i][0] == ' ')) continue;
@@ -335,8 +335,6 @@ int linux_main(int argc, char **argv)
argv1_end = &argv[1][strlen(argv[1])];
#endif
- set_usable_vm(uml_physmem, get_kmem_end());
-
highmem = 0;
max_physmem = get_kmem_end() - uml_physmem - MIN_VMALLOC;
if(physmem_size > max_physmem){
@@ -351,10 +349,17 @@ int linux_main(int argc, char **argv)
high_physmem = uml_physmem + physmem_size;
high_memory = (void *) high_physmem;
+ end_iomem = high_physmem + PAGE_SIZE + iomem_size;
start_vm = VMALLOC_START;
setup_physmem(uml_physmem, uml_reserved, physmem_size);
+ if(init_maps(physmem_size)){
+ printf("Failed to allocate mem_map for %ld bytes of physical "
+ "memory\n", physmem_size);
+ exit(1);
+ }
+
virtmem_size = physmem_size;
avail = get_kmem_end() - start_vm;
if(physmem_size > avail) virtmem_size = avail;
@@ -364,12 +369,6 @@ int linux_main(int argc, char **argv)
printf("Kernel virtual memory size shrunk to %ld bytes\n",
virtmem_size);
- err = reserve_vm(high_physmem, end_vm, &kernel_vm_reserved);
- if(err){
- printf("Failed to reserve VM area for kernel VM\n");
- exit(1);
- }
-
uml_postsetup();
init_task.thread.kernel_stack = (unsigned long) &init_task +
diff --git a/include/asm-um/page.h b/include/asm-um/page.h
index 35c8aac..809b6bd 100644
--- a/include/asm-um/page.h
+++ b/include/asm-um/page.h
@@ -1,3 +1,8 @@
+/*
+ * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
+ * Licensed under the GPL
+ */
+
#ifndef __UM_PAGE_H
#define __UM_PAGE_H
@@ -35,19 +40,29 @@ extern void stop(void);
#define __va_space (8*1024*1024)
-extern unsigned long region_pa(void *virt);
-extern void *region_va(unsigned long phys);
-
-#define __pa(virt) region_pa((void *) (virt))
-#define __va(phys) region_va((unsigned long) (phys))
-
-extern struct page *page_mem_map(struct page *page);
+extern unsigned long to_phys(void *virt);
+extern void *to_virt(unsigned long phys);
-extern struct page *pfn_to_page(unsigned long pfn);
+#define __pa(virt) to_phys((void *) virt)
+#define __va(phys) to_virt((unsigned long) phys)
-#define VALID_PAGE(page) (page_mem_map(page) != NULL)
+#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
extern struct page *arch_validate(struct page *page, int mask, int order);
#define HAVE_ARCH_VALIDATE
+extern void arch_free_page(struct page *page, int order);
+#define HAVE_ARCH_FREE_PAGE
+
#endif
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-file-style: "linux"
+ * End:
+ */
diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h
index 1222762..a226925 100644
--- a/include/asm-um/pgtable.h
+++ b/include/asm-um/pgtable.h
@@ -82,10 +82,10 @@ extern unsigned long *empty_zero_page;
* area for the same reason. ;)
*/
-extern unsigned long high_physmem;
+extern unsigned long end_iomem;
#define VMALLOC_OFFSET (__va_space)
-#define VMALLOC_START (((unsigned long) high_physmem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
+#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
#if CONFIG_HIGHMEM
@@ -161,7 +161,8 @@ extern pte_t * __bad_pagetable(void);
#define BAD_PAGETABLE __bad_pagetable()
#define BAD_PAGE __bad_page()
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
/* number of bits that fit into a memory pointer */
#define BITS_PER_PTR (8*sizeof(unsigned long))
@@ -182,9 +183,6 @@ extern pte_t * __bad_pagetable(void);
#define pte_clear(xp) do { pte_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
-#define phys_region_index(x) (((x) & REGION_MASK) >> REGION_SHIFT)
-#define pte_region_index(x) phys_region_index(pte_val(x))
-
#define pmd_none(x) (!(pmd_val(x) & ~_PAGE_NEWPAGE))
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
@@ -205,18 +203,11 @@ static inline void pgd_clear(pgd_t * pgdp) { }
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-extern struct page *pte_mem_map(pte_t pte);
-extern struct page *phys_mem_map(unsigned long phys);
-extern unsigned long phys_to_pfn(unsigned long p);
+#define pte_page(pte) virt_to_page(__va(pte_val(pte)))
+#define pmd_page(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define pte_address(x) (__va(pte_val(x) & PAGE_MASK))
-#define mk_phys(a, r) ((a) + (r << REGION_SHIFT))
-#define phys_addr(p) ((p) & ~REGION_MASK)
-#define phys_page(p) (phys_mem_map(p) + ((phys_addr(p)) >> PAGE_SHIFT))
-#define virt_to_page(kaddr) \
- (phys_mem_map(__pa(kaddr)) + (phys_addr(__pa(kaddr)) >> PAGE_SHIFT))
-#define pte_pfn(x) phys_to_pfn(pte_val(x))
+extern struct page *__virt_to_page(const unsigned long virt);
+#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
static inline pte_t pte_mknewprot(pte_t pte)
{
@@ -350,18 +341,13 @@ extern unsigned long page_to_phys(struct page *page);
* and a page entry and page directory to the page they refer to.
*/
-#define mk_pte(page, pgprot) \
-({ \
- pte_t __pte; \
- \
- pte_val(__pte) = page_to_phys(page) + pgprot_val(pgprot);\
- if(pte_present(__pte)) pte_mknewprot(pte_mknewpage(__pte)); \
- __pte; \
-})
+extern pte_t mk_pte(struct page *page, pgprot_t pgprot);
-/* This takes a physical page address that is used by the remapping functions */
-#define mk_pte_phys(physpage, pgprot) \
- pte_mknewpage(mk_pte(phys_page(physpage), pgprot))
+/* This takes a physical page address that is used by the remapping
+ * functions
+ */
+#define mk_pte_phys(phys, pgprot) \
+ (pte_mknewpage(mk_pte(virt_to_page(__va(phys)), pgprot)))
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
@@ -370,8 +356,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
return pte;
}
-#define pmd_page(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-
/* to find an entry in a page-table-directory. */
#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define __pgd_offset(address) pgd_index(address)
@@ -394,7 +378,7 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
/* Find an entry in the third-level page table.. */
#define pte_offset(pmd, address) \
-((pte_t *) (pmd_page(*pmd) + ((address>>10) & ((PTRS_PER_PTE-1)<<2))))
+ ((pte_t *) (pmd_page(*pmd) + ((address>>10) & ((PTRS_PER_PTE-1)<<2))))
#define update_mmu_cache(vma,address,pte) do ; while (0)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6fbaba6..5096382 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -433,6 +433,10 @@ static inline struct page *arch_validate(struct page *page,
}
#endif
+#ifndef HAVE_ARCH_FREE_PAGE
+static inline void arch_free_page(struct page *page, int order) { }
+#endif
+
static inline struct page * alloc_pages(unsigned int gfp_mask, unsigned int order)
{
/*