aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorClaudio Imbrenda <imbrenda@linux.ibm.com>2020-07-06 18:43:24 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2020-07-06 13:03:52 -0400
commit0d622fcb2ba0ac8e9c7f7a282badac694a3592c5 (patch)
tree47663daadf773b746f457363355dc435f169ea21
parent9e801bd9d2d4b37bfad26123bcd58548adb0d82c (diff)
downloadkvm-unit-tests-0d622fcb2ba0ac8e9c7f7a282badac694a3592c5.tar.gz
lib/vmalloc: allow vm_memalign with alignment > PAGE_SIZE
Allow allocating aligned virtual memory with alignment larger than only one page. Add a check that the backing pages were actually allocated. Export the alloc_vpages_aligned function to allow users to allocate non-backed aligned virtual addresses. Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Reviewed-by: Andrew Jones <drjones@redhat.com> Message-Id: <20200706164324.81123-5-imbrenda@linux.ibm.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--lib/vmalloc.c35
-rw-r--r--lib/vmalloc.h3
2 files changed, 30 insertions, 8 deletions
diff --git a/lib/vmalloc.c b/lib/vmalloc.c
index 9237a0f..e0c7b6b 100644
--- a/lib/vmalloc.c
+++ b/lib/vmalloc.c
@@ -12,19 +12,28 @@
#include "alloc.h"
#include "alloc_phys.h"
#include "alloc_page.h"
+#include <bitops.h>
#include "vmalloc.h"
static struct spinlock lock;
static void *vfree_top = 0;
static void *page_root;
-void *alloc_vpages(ulong nr)
+/*
+ * Allocate a certain number of pages from the virtual address space (without
+ * physical backing).
+ *
+ * nr is the number of pages to allocate
+ * alignment_pages is the alignment of the allocation *in pages*
+ */
+void *alloc_vpages_aligned(ulong nr, unsigned int align_order)
{
uintptr_t ptr;
spin_lock(&lock);
ptr = (uintptr_t)vfree_top;
ptr -= PAGE_SIZE * nr;
+ ptr &= GENMASK_ULL(63, PAGE_SHIFT + align_order);
vfree_top = (void *)ptr;
spin_unlock(&lock);
@@ -32,6 +41,11 @@ void *alloc_vpages(ulong nr)
return (void *)ptr;
}
+void *alloc_vpages(ulong nr)
+{
+ return alloc_vpages_aligned(nr, 0);
+}
+
void *alloc_vpage(void)
{
return alloc_vpages(1);
@@ -55,17 +69,22 @@ void *vmap(phys_addr_t phys, size_t size)
return mem;
}
+/*
+ * Allocate virtual memory, with the specified minimum alignment.
+ */
static void *vm_memalign(size_t alignment, size_t size)
{
+ phys_addr_t pa;
void *mem, *p;
- size_t pages;
- assert(alignment <= PAGE_SIZE);
- size = PAGE_ALIGN(size);
- pages = size / PAGE_SIZE;
- mem = p = alloc_vpages(pages);
- while (pages--) {
- phys_addr_t pa = virt_to_phys(alloc_page());
+ assert(is_power_of_2(alignment));
+
+ size = PAGE_ALIGN(size) / PAGE_SIZE;
+ alignment = get_order(PAGE_ALIGN(alignment) / PAGE_SIZE);
+ mem = p = alloc_vpages_aligned(size, alignment);
+ while (size--) {
+ pa = virt_to_phys(alloc_page());
+ assert(pa);
install_page(page_root, pa, p);
p += PAGE_SIZE;
}
diff --git a/lib/vmalloc.h b/lib/vmalloc.h
index 2b563f4..8b158f5 100644
--- a/lib/vmalloc.h
+++ b/lib/vmalloc.h
@@ -5,6 +5,9 @@
/* Allocate consecutive virtual pages (without backing) */
extern void *alloc_vpages(ulong nr);
+/* Allocate consecutive and aligned virtual pages (without backing) */
+extern void *alloc_vpages_aligned(ulong nr, unsigned int alignment_order);
+
/* Allocate one virtual page (without backing) */
extern void *alloc_vpage(void);
/* Set the top of the virtual address space */