aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLuis R. Rodriguez <mcgrof@kernel.org>2016-06-03 11:58:05 -0700
committerLuis R. Rodriguez <mcgrof@kernel.org>2016-06-23 15:23:28 -0700
commit5fe2db96e27cc2c651b4937603faf3dd78c730ed (patch)
tree0286a12422d57b6c8f05158705dd9ac321821edc
parent1487d7f18655be4bb39c3b667bd66509b1cef35d (diff)
downloadlinker-tables-5fe2db96e27cc2c651b4937603faf3dd78c730ed.tar.gz
x86: update to account for feedback
This updates the x86 init tables to account for some initial feedback after the v2 series. In particular, some highlights: o removed run time sorting: this is not needed yet o since we now have only 2 declarers: DECLARE_LINKTABLE() -- const DECLARE_LINKTABLE_DATA() - non const We use use DECLARE_LINKTABLE_DATA() for the table, and the definition actually pegs it to the right section. o we drop x86_init_fn_setup_arch() and x86_init_fn_late_init() as this is better evolved with time with upstream. The point is made and I think its understood what the goal is here. o we drop the char *name from the struct x86_init_fn given to match the upstream kernel use. The kernel can get the name using %pF, but we can't since libc stdio printf does not support this. Maybe we can port this somehow in... but I can't see how. For now just comment out %pF uses and deal with it by cluttering our init calls so we know what triggered where. For instance the kernel can use but we'll comment this sort of stuff out: pr_err("Init sequence fails to declares any supported subarchs: %pF\n", fn->early_init); o The X86_INIT_EARLY_*() macros are simplified to only provide what we need, we also use lower case, so x86_init_early_pc(), etc. o The xen macro x86_init_early_xen() is added but won't be part of the v3 upstream submission. That will later be used but we have to re-consider a different level order for it, perhaps a hypervisor order level. That can only happen *iff* we can get load_idt() issue addressed. As it stands we can't use the subarch on x86_64_start_kernel() prior to load_idt(), our userspace demo here uses it as that's the goal but in practice this doesn't work yet on x86 kernels.. we can only access the subarch after load_idt(), so upstream v3 submission will use the x86_init_fn_early_init() on x86_64_start_reservations(). Signed-off-by: Luis R. Rodriguez <mcgrof@kernel.org>
-rw-r--r--Makefile4
-rw-r--r--alpha.c7
-rw-r--r--arch/x86/include/asm/x86_init_fn.h256
-rw-r--r--arch/x86/kernel/head64.c6
-rw-r--r--arch/x86/kernel/init.c56
-rw-r--r--arch/x86/kernel/sort-init.c114
-rw-r--r--beta.c7
-rw-r--r--driver.c9
-rw-r--r--kasan.c15
-rw-r--r--kprobes.c7
-rw-r--r--memory.c14
-rw-r--r--pci.c10
-rw-r--r--xen-driver.c6
-rw-r--r--xen.c5
14 files changed, 144 insertions, 372 deletions
diff --git a/Makefile b/Makefile
index 45a28b5..a984c59 100644
--- a/Makefile
+++ b/Makefile
@@ -38,10 +38,10 @@ parse-bzimage: parse-bzimage.c
# *(SORT(SECTION_TBL_ALL(SECTION_INIT_DATA)))
#
# The name of the struct places no effect to the actual linker order.
-OBJS = arch/x86/kernel/sort-init.o \
+OBJS = \
arch/x86/kernel/head64.c \
- memory.o \
kasan.o\
+ memory.o \
arch/x86/kernel/init.o \
kernel/locking/mutex.o \
kernel/locking/spinlock.o \
diff --git a/alpha.c b/alpha.c
index 1130def..0f9d457 100644
--- a/alpha.c
+++ b/alpha.c
@@ -1,10 +1,9 @@
-#include <stdio.h>
-#include <unistd.h>
-
#include <linux/kernel.h>
#include <asm/x86_init_fn.h>
static void early_init_alpha(void) {
+ pr_info("Initializing alpha ...\n");
+ pr_info("Completed initializing alpha !\n");
}
-X86_INIT_EARLY_PC(alpha, NULL, NULL, early_init_alpha, NULL, NULL);
+x86_init_early_pc(early_init_alpha);
diff --git a/arch/x86/include/asm/x86_init_fn.h b/arch/x86/include/asm/x86_init_fn.h
index 1651815..c035a89 100644
--- a/arch/x86/include/asm/x86_init_fn.h
+++ b/arch/x86/include/asm/x86_init_fn.h
@@ -2,21 +2,20 @@
#define __X86_INIT_TABLES_H
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/tables.h>
-#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
#include <asm/bootparam.h>
/**
* struct x86_init_fn - x86 generic kernel init call
*
- * Linux x86 features vary in complexity, features may require work
- * done at different levels of the full x86 init sequence. Today there
- * are also two different possible entry points for Linux on x86, one for
- * bare metal, KVM and Xen HVM, and another for Xen PV guests / dom0.
- * Assuming bootloader has set up 64-bit mode, roughly the x86 init sequence
- * follows this path:
+ * Linux x86 features vary in complexity, features may require work done at
+ * different levels of the full x86 init sequence. Today there are also two
+ * different possible entry points for Linux on x86, one for bare metal, KVM
+ * and Xen HVM, and another for Xen PV guests / dom0. Assuming a bootloader
+ * has set up 64-bit mode, roughly the x86 init sequence follows this path:
*
* Bare metal, KVM, Xen HVM Xen PV / dom0
* startup_64() startup_xen()
@@ -34,28 +33,43 @@
* x86_64_start_kernel() and xen_start_kernel() are the respective first C code
* entry starting points. The different entry points exist to enable Xen to
* skip a lot of hardware setup already done and managed on behalf of the
- * hypervisor. The different levels of init calls exist to account for these
- * slight differences but also share a common entry x86 specific path,
- * x86_64_start_reservations().
+ * hypervisor, we refer to this as "paravirtualization yielding". The different
+ * levels of init calls on the x86 init sequence exist to account for these
+ * slight differences and requirements. These different entry points also share
+ * a common entry x86 specific path, x86_64_start_reservations().
*
- * A generic x86 feature can have different initialization calls, one on
- * each different init sequence, but must also address both entry points in
- * order to work properly across the board on all supported x86
- * subarchitectures. x86 features can also have dependencies on other setup
- * code or features. Using struct x86_init_fn x86 feature developers must
- * annotate supported subarchitectures, dependencies and also declare
- * a two-digit decimal number to impose an ordering relative to other
- * features when required.
+ * A generic x86 feature can have different initialization calls, one on each
+ * of the different main x86 init sequences, but must also address both entry
+ * points in order to work properly across the board on all supported x86
+ * subarchitectures. Since x86 features can also have dependencies on other
+ * setup code or features, x86 features can at times be subordinate to other
+ * x86 features, or conditions. struct x86_init_fn enables feature developers
+ * to annotate dependency relationships to ensure subsequent init calls only
+ * run once a subordinate's dependencies have run. When needed custom
+ * dependency requirements can also be spelled out through a custom dependency
+ * checker. In order to account for the dual entry point nature of x86-64 Linux
+ * for "paravirtualization yielding" and to make annotations for support for
+ * these explicit each struct x86_init_fn must specify supported
+ * subarchitectures. The earliest x86-64 code can read the subarchitecture
+ * though is after load_idt(), as such the earliest we can currently rely on
+ * subarchitecture for semantics and a common init sequences is on the shared
+ * common x86_64_start_reservations(). Each struct x86_init_fn is associated
+ * with a specific special link order number which has been careflly thought
+ * out by x86 maintainers. You should pick a link order level associated with
+ * the specific directory your code lies in, a respective macro is used to
+ * build association to a link oder with a routine, you should use one of the
+ * provided x86_init_*() macros. You should not use __x86_init() directly.
*
- * @order_level: linker order level, this corresponds to the table
- * section sub-table index, we record this only for semantic
- * validation purposes.
- * @supp_hardware_subarch: Must be set, it represents the bitmask of supported
+ * x86_init_fn enables strong semantics and dependencies to be defined and
+ * implemented on the full x86 initialization sequence.
+ *
+ * @supp_hardware_subarch: must be set, it represents the bitmask of supported
* subarchitectures. We require each struct x86_init_fn to have this set
* to require developer considerations for each supported x86
* subarchitecture and to build strong annotations of different possible
* run time states particularly in consideration for the two main
- * different entry points for x86 Linux.
+ * different entry points for x86 Linux, to account for paravirtualization
+ * yielding.
*
* The subarchitecture is read by the kernel at early boot from the
* struct boot_params hardware_subarch. Support for the subarchitecture
@@ -84,155 +98,79 @@
*
* BIT(X86_SUBARCH_PC) |
* BIT(X86_SUBARCH_XEN);
- * @detect: if set returns true if the feature has been detected to be
- * required, it returns false if the feature has been detected to
- * not be required.
- * @depend: if set this set of init routines must be called prior to the
- * init routine who's respective detect routine we have set this
- * depends callback to. This is only used for sorting purposes.
- * If you do not have a depend callback set its assumed the order level
- * (__x86_init_fn(level)) set by the init routine suffices to set the order
- * for when the feature's respective callbacks are called with respect to
- * other calls. Sorting of init calls between on the same order level is
- * determined by linker order, determined by order listed on the Makefile.
- * @early_init: if set would be run during before x86_64_start_reservations().
- * Memory is not yet available.
- * @setup_arch: if set would be run during setup_arch().
- * @late_init: if set would be run right before init is spawned. You can count
- * on memory being set up.
- * @flags: private internal flags
+ *
+ * @early_init: required, routine which will run in x86_64_start_reservations()
+ * after we ensure boot_params.hdr.hardware_subarch is accessible and
+ * properly set. Memory is not yet available. This the earliest we can
+ * currently define a common shared callback since all callbacks need to
+ * check for boot_params.hdr.hardware_subarch and this becomes accessible
+ * on x86-64 until after load_idt().
*/
struct x86_init_fn {
- __u32 order_level;
__u32 supp_hardware_subarch;
- bool (*detect)(void);
- bool (*depend)(void);
void (*early_init)(void);
- void (*setup_arch)(void);
- void (*late_init)(void);
- const char *name;
- __u32 flags;
};
-/**
- * enum x86_init_fn_flags: private flags for init sequences
+DECLARE_LINKTABLE_DATA(struct x86_init_fn, x86_init_fns);
+
+/* Init order levels, we can start at 0000 but reserve 0000-0999 for now */
+
+/*
+ * X86_INIT_ORDER_EARLY - early kernel init code
*
- * INIT_FINISH_IF_DETECTED: tells the core that once this init sequence
- * has completed it can break out of the loop for init sequences on
- * its own level.
- * INIT_DETECTED: the x86 core has determined that this
- * init sequence has been detected and it all of its callbacks
- * must be run during initialization.
+ * This consists of the first parts of the Linux kernel executed.
*/
-enum x86_init_fn_flags {
- INIT_FINISH_IF_DETECTED = BIT(0),
- INIT_DETECTED = BIT(1),
-};
+#define X86_INIT_ORDER_EARLY 1000
-DECLARE_LINKTABLE_DATA(struct x86_init_fn, x86_init_fns);
+/* X86_INIT_ORDER_PLATFORM - platform kernel code
+ *
+ * Code the kernel needs to initialize under arch/x86/platform/
+ * early in boot.
+ */
+#define X86_INIT_ORDER_PLATFORM 3000
-/* Init order levels, we can start at 01 but reserve 01-09 for now */
-#define X86_INIT_ORDER_EARLY 10
-#define X86_INIT_ORDER_NORMAL 30
-#define X86_INIT_ORDER_LATE 50
+/*
+ * Use LTO_REFERENCE_INITCALL just in case of issues with old versions of gcc.
+ * This might not be needed for linker tables due to how we compartamentalize
+ * sections and then order them at linker time, but just in case.
+ */
-#define X86_INIT(__name, \
- __level, \
- __supp_hardware_subarch, \
- __detect, \
- __depend, \
- __early_init, \
- __setup_arch, \
- __late_init) \
- static LINKTABLE_INIT_DATA(x86_init_fns, __level) \
+#define __x86_init(__level, \
+ __supp_hardware_subarch, \
+ __early_init) \
+ static LINKTABLE_INIT_DATA(x86_init_fns, __level) \
__x86_init_fn_##__early_init = { \
- .order_level = __level, \
.supp_hardware_subarch = __supp_hardware_subarch, \
- .detect = __detect, \
- .depend = __depend, \
.early_init = __early_init, \
- .setup_arch = __setup_arch, \
- .late_init = __late_init, \
- .name = #__name, \
- };
-
-#define X86_INIT_EARLY(__name, \
- __supp_hardware_subarch, \
- __detect, \
- __depend, \
- __early_init, \
- __setup_arch, \
- __late_init) \
- X86_INIT(__name, X86_INIT_ORDER_EARLY, __supp_hardware_subarch, \
- __detect, __depend, \
- __early_init, __setup_arch, __late_init);
-
-#define X86_INIT_NORMAL(__name, \
- __supp_hardware_subarch, \
- __detect, \
- __depend, \
- __early_init, \
- __setup_arch, \
- __late_init) \
- X86_INIT(__name, X86_INIT_ORDER_NORMAL, __supp_hardware_subarch,\
- __detect, __depend, \
- __early_init, __setup_arch, __late_init);
-
-#define X86_INIT_EARLY_ALL(__name, \
- __detect, \
- __depend, \
- __early_init, \
- __setup_arch, \
- __late_init) \
- X86_INIT_EARLY(__name, X86_SUBARCH_ALL_SUBARCHS, \
- __detect, __depend, \
- __early_init, __setup_arch, __late_init);
-
-#define X86_INIT_EARLY_PC(__name, \
- __detect, \
- __depend, \
- __early_init, \
- __setup_arch, \
- __late_init) \
- X86_INIT_EARLY(__name, BIT(X86_SUBARCH_PC), \
- __detect, __depend, \
- __early_init, __setup_arch, __late_init);
-
-#define X86_INIT_NORMAL_ALL(__name, \
- __detect, \
- __depend, \
- __early_init, \
- __setup_arch, \
- __late_init) \
- X86_INIT_NORMAL(__name, X86_SUBARCH_ALL_SUBARCHS, \
- __detect, __depend, \
- __early_init, __setup_arch, __late_init);
-
-#define X86_INIT_NORMAL_PC(__name, \
- __detect, \
- __depend, \
- __early_init, \
- __setup_arch, \
- __late_init) \
- X86_INIT_NORMAL(__name, BIT(X86_SUBARCH_PC), \
- __detect, __depend, \
- __early_init, __setup_arch, __late_init);
-
-
-#define X86_INIT_NORMAL_XEN(__name, \
- __detect, \
- __depend, \
- __early_init, \
- __setup_arch, \
- __late_init) \
- X86_INIT_NORMAL(__name, BIT(X86_SUBARCH_XEN), \
- __detect, __depend, \
- __early_init, __setup_arch, __late_init);
-
+ }; \
+ LTO_REFERENCE_INITCALL(__x86_init_fn_##__early_init);
+
+#define x86_init_early(__supp_hardware_subarch, \
+ __early_init) \
+ __x86_init(X86_INIT_ORDER_EARLY, __supp_hardware_subarch, \
+ __early_init);
+
+#define x86_init_platform(__supp_hardware_subarch, \
+ __early_init) \
+ __x86_init(__name, X86_INIT_ORDER_PLATFORM, __supp_hardware_subarch,\
+ __early_init);
+
+#define x86_init_early_all(__early_init) \
+ x86_init_early(X86_SUBARCH_ALL_SUBARCHS, \
+ __early_init);
+
+#define x86_init_early_pc(__early_init) \
+ x86_init_early(BIT(X86_SUBARCH_PC), \
+ __early_init);
+
+#define x86_init_early_xen(__early_init) \
+ x86_init_early(BIT(X86_SUBARCH_XEN), \
+ __early_init);
+/**
+ * x86_init_fn_early_init: call all early_init() callbacks
+ *
+ * This calls all early_init() callbacks on the x86_init_fns linker table.
+ */
void x86_init_fn_early_init(void);
-void x86_init_fn_setup_arch(void);
-void x86_init_fn_late_init(void);
-
-void x86_init_fn_init_tables(void);
#endif /* __X86_INIT_TABLES_H */
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index a3eb8de..d14dcdf 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -1,3 +1,4 @@
+#include <linux/kernel.h>
#include <linux/tables.h>
#include <asm/x86_init_fn.h>
@@ -35,7 +36,6 @@ void x86_64_start_reservations(void)
static void x86_64_start_kernel(void)
{
- x86_init_fn_init_tables();
x86_init_fn_early_init();
x86_64_start_reservations();
@@ -49,10 +49,10 @@ void startup_64(void)
void setup_arch(void)
{
- x86_init_fn_setup_arch();
+ /* TODO: x86_init_fn_setup_arch(); */
}
void late_init(void)
{
- x86_init_fn_late_init();
+ /* TODO: x86_init_fn_late_init(); */
}
diff --git a/arch/x86/kernel/init.c b/arch/x86/kernel/init.c
index e7ed26f..2270236 100644
--- a/arch/x86/kernel/init.c
+++ b/arch/x86/kernel/init.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) "x86-init: " fmt
+
#include <linux/bug.h>
#include <linux/kernel.h>
@@ -11,7 +13,7 @@ DEFINE_LINKTABLE_INIT_DATA(struct x86_init_fn, x86_init_fns);
static bool x86_init_fn_supports_subarch(struct x86_init_fn *fn)
{
if (!fn->supp_hardware_subarch) {
- pr_info("Init sequence fails to declares supported subarchs: %s\n", fn->name);
+ //pr_err("Init sequence fails to declares any supported subarchs: %pF\n", fn->early_init);
WARN_ON(1);
}
if (BIT(boot_params.hdr.hardware_subarch) & fn->supp_hardware_subarch)
@@ -19,58 +21,22 @@ static bool x86_init_fn_supports_subarch(struct x86_init_fn *fn)
return false;
}
-void x86_init_fn_early_init(void)
+void __ref x86_init_fn_early_init(void)
{
- int ret;
struct x86_init_fn *init_fn;
-
unsigned int num_inits = LINUX_SECTION_SIZE(x86_init_fns);
- pr_info("Number of init entries: %d\n", num_inits);
+ if (!num_inits)
+ return;
+
+ pr_debug("Number of init entries: %d\n", num_inits);
LINKTABLE_FOR_EACH(init_fn, x86_init_fns) {
if (!x86_init_fn_supports_subarch(init_fn))
continue;
- if (!init_fn->detect)
- init_fn->flags |= INIT_DETECTED;
- else {
- ret = init_fn->detect();
- if (ret > 0)
- init_fn->flags |= INIT_DETECTED;
- }
-
- if (init_fn->flags & INIT_DETECTED) {
- init_fn->flags |= INIT_DETECTED;
- pr_info("Initializing %s ...\n", init_fn->name);
- init_fn->early_init();
- pr_info("Completed initializing %s !\n", init_fn->name);
- if (init_fn->flags & INIT_FINISH_IF_DETECTED)
- break;
- }
- }
-}
-
-void x86_init_fn_late_init(void)
-{
- struct x86_init_fn *init_fn;
- LINKTABLE_FOR_EACH(init_fn, x86_init_fns) {
- if ((init_fn->flags & INIT_DETECTED) && init_fn->late_init) {
- pr_info("Running late init for %s ...\n", init_fn->name);
- init_fn->late_init();
- pr_info("Completed late initializing of %s !\n", init_fn->name);
- }
- }
-}
-
-void x86_init_fn_setup_arch(void)
-{
- struct x86_init_fn *init_fn;
-
- LINKTABLE_FOR_EACH(init_fn, x86_init_fns) {
- if ((init_fn->flags & INIT_DETECTED) && init_fn->setup_arch) {
- pr_info("Running setup_arch for %s ...\n", init_fn->name);
- init_fn->setup_arch();
- }
+ //pr_debug("Running early init %pF ...\n", init_fn->early_init);
+ init_fn->early_init();
+ //pr_debug("Completed early init %pF\n", init_fn->early_init);
}
}
diff --git a/arch/x86/kernel/sort-init.c b/arch/x86/kernel/sort-init.c
deleted file mode 100644
index 6bf5eac..0000000
--- a/arch/x86/kernel/sort-init.c
+++ /dev/null
@@ -1,114 +0,0 @@
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <asm/x86_init_fn.h>
-
-static struct x86_init_fn *x86_init_fn_find_dep(struct x86_init_fn *start,
- struct x86_init_fn *finish,
- struct x86_init_fn *q)
-{
- struct x86_init_fn *p;
-
- if (!q)
- return NULL;
-
- for (p = start; p < finish; p++)
- if (p->detect == q->depend)
- return p;
-
- return NULL;
-}
-
-static void x86_init_fn_sort(struct x86_init_fn *start,
- struct x86_init_fn *finish)
-{
-
- struct x86_init_fn *p, *q, tmp;
-
- for (p = start; p < finish; p++) {
-again:
- q = x86_init_fn_find_dep(start, finish, p);
- /*
- * We are bit sneaky here. We use the memory address to figure
- * out if the node we depend on is past our point, if so, swap.
- */
- if (q > p) {
- tmp = *p;
- memmove(p, q, sizeof(*p));
- *q = tmp;
- goto again;
- }
- }
-
-}
-
-static void x86_init_fn_check(struct x86_init_fn *start,
- struct x86_init_fn *finish)
-{
- struct x86_init_fn *p, *q, *x;
-
- /* Simple cyclic dependency checker. */
- for (p = start; p < finish; p++) {
- if (!p->depend)
- continue;
- q = x86_init_fn_find_dep(start, finish, p);
- x = x86_init_fn_find_dep(start, finish, q);
- if (p == x) {
- pr_info("CYCLIC DEPENDENCY FOUND! %pS depends on %pS and vice-versa. BREAKING IT.\n",
- p->name, q->name);
- /* Heavy handed way..*/
- x->depend = 0;
- }
- }
-
- /*
- * Validate sorting semantics.
- *
- * p depends on q so:
- * - q must run first, so q < p. If q > p that's an issue
- * as its saying p must run prior to q. We already sorted
- * this table, this is a problem.
- *
- * - q's order level must be <= than p's as it should run first
- */
- for (p = start; p < finish; p++) {
- if (!p->depend)
- continue;
- /*
- * Be pedantic and do a full search on the entire table,
- * if we need further validation, after this is called
- * one could use an optimized version which just searches
- * on x86_init_fn_find_dep(p, finish, p), as we would have
- * guarantee on proper ordering both at the dependency level
- * and by order level.
- */
- q = x86_init_fn_find_dep(start, finish, p);
- if (q && q > p) {
- pr_info("EXECUTION ORDER INVALID! %s should be called before %s!\n",
- p->name, q->name);
- }
-
- /*
- * Technically this would still work as the memmove() would
- * have forced the dependency to run first, however we want
- * strong semantics, so lets avoid these.
- */
- if (q && q->order_level > p->order_level) {
- pr_info("INVALID ORDER LEVEL! %s should have an order level <= be called before %s!\n",
- p->name, q->name);
- }
- }
-}
-
-void __ref x86_init_fn_init_tables(void)
-{
- unsigned int num_inits = LINUX_SECTION_SIZE(x86_init_fns);
-
- if (!num_inits)
- return;
-
- x86_init_fn_sort(LINUX_SECTION_START(x86_init_fns),
- LINUX_SECTION_END(x86_init_fns));
- x86_init_fn_check(LINUX_SECTION_START(x86_init_fns),
- LINUX_SECTION_END(x86_init_fns));
-}
diff --git a/beta.c b/beta.c
index 8646c0d..2bdbf49 100644
--- a/beta.c
+++ b/beta.c
@@ -1,10 +1,9 @@
-#include <stdio.h>
-#include <unistd.h>
-
#include <linux/kernel.h>
#include <asm/x86_init_fn.h>
static void early_init_beta(void) {
+ pr_info("Initializing beta ...\n");
+ pr_info("Completed initializing beta !\n");
}
-X86_INIT_EARLY_PC(beta, NULL, NULL, early_init_beta, NULL, NULL);
+x86_init_early_pc(early_init_beta);
diff --git a/driver.c b/driver.c
index c506b9b..2a076aa 100644
--- a/driver.c
+++ b/driver.c
@@ -3,12 +3,9 @@
#include <asm/x86_init_fn.h>
static void early_init_driver(void) {
+ pr_info("Initializing acme ...\n");
sleep(2);
+ pr_info("Completed initializing acme !\n");
}
-static bool detect_driver(void) {
- return true;
-}
-
-X86_INIT_NORMAL_PC(acme, detect_driver, detect_pci,
- early_init_driver, NULL, NULL);
+x86_init_early_pc(early_init_driver);
diff --git a/kasan.c b/kasan.c
index bb85728..c012067 100644
--- a/kasan.c
+++ b/kasan.c
@@ -1,19 +1,10 @@
-#include <stdio.h>
-#include <unistd.h>
-#include <errno.h>
-
#include <linux/kernel.h>
-
#include <asm/x86_init_fn.h>
-#include <asm/bootparam.h>
void kasan_early_init(void) {
+ pr_info("Initializing kasan ...\n");
pr_info("Early init for Kasan...\n");
+ pr_info("Completed initializing kasan !\n");
}
-void kasan_init(void)
-{
- pr_info("Calling setup_arch work for Kasan...\n");
-}
-
-X86_INIT_EARLY_PC(kasan, NULL, NULL, kasan_early_init, kasan_init, NULL);
+x86_init_early_pc(kasan_early_init);
diff --git a/kprobes.c b/kprobes.c
index 3d4b024..1d5a74b 100644
--- a/kprobes.c
+++ b/kprobes.c
@@ -1,3 +1,4 @@
+#include <linux/kernel.h>
#include <linux/tables.h>
#include <asm/x86_init_fn.h>
#include <linux/ranges.h>
@@ -34,6 +35,8 @@ void early_init_kprobes(void)
{
unsigned long addr;
+ pr_info("Initializing kprobes ...\n");
+
addr = (unsigned long) &test_kprobe_0001;
test_kprobe_addr("test_kprobe_0001", addr, true);
@@ -41,6 +44,8 @@ void early_init_kprobes(void)
addr = (unsigned long) &test_kprobe_0002;
test_kprobe_addr("test_kprobe_0002", addr, false);
+
+ pr_info("Completed initializing kprobes !\n");
}
-X86_INIT_EARLY_ALL(kprobes, NULL, NULL, early_init_kprobes, NULL, NULL);
+x86_init_early_all(early_init_kprobes);
diff --git a/memory.c b/memory.c
index 7a63fab..dbf1a85 100644
--- a/memory.c
+++ b/memory.c
@@ -1,16 +1,10 @@
-#include <stdio.h>
-#include <unistd.h>
-#include <errno.h>
-
+#include <linux/kernel.h>
#include <asm/x86_init_fn.h>
static void early_init_memory(void) {
+ pr_info("Initializing memory ...\n");
sleep(1);
+ pr_info("Completed initializing memory !\n");
}
-static bool detect_memory(void) {
- return true;
-}
-
-X86_INIT_EARLY_ALL(memory, detect_memory, NULL,
- early_init_memory, NULL, NULL);
+x86_init_early_all(early_init_memory);
diff --git a/pci.c b/pci.c
index 0279a32..d2a5770 100644
--- a/pci.c
+++ b/pci.c
@@ -1,7 +1,7 @@
+#include <linux/kernel.h>
#include <linux/tables.h>
#include <asm/x86_init_fn.h>
#include <asm/bootparam.h>
-
#include <linux/pci.h>
DECLARE_LINKTABLE(struct pci_fixup, pci_fixup_early);
@@ -11,6 +11,8 @@ void early_init_pci(void) {
const struct pci_fixup *fixup;
unsigned int tbl_size = LINUX_SECTION_SIZE(pci_fixup_early);
+ pr_info("Initializing pci ...\n");
+
pr_info("PCI fixup size: %d\n", tbl_size);
sleep(1);
@@ -20,10 +22,8 @@ void early_init_pci(void) {
pr_info("Demo: Using LINKTABLE_RUN_ALL\n");
LINKTABLE_RUN_ALL(pci_fixup_early, hook,);
-}
-bool detect_pci(void) {
- return true;
+ pr_info("Completed initializing pci !\n");
}
-X86_INIT_EARLY_ALL(pci, detect_pci, NULL, early_init_pci, NULL, NULL);
+x86_init_early_all(early_init_pci);
diff --git a/xen-driver.c b/xen-driver.c
index e81042d..79d229f 100644
--- a/xen-driver.c
+++ b/xen-driver.c
@@ -1,11 +1,11 @@
#include <asm/x86_init_fn.h>
-
+#include <linux/kernel.h>
#include <linux/pci.h>
#include <xen/xen.h>
static void early_xen_init_driver(void) {
+ pr_info("Initializing xen driver\n");
sleep(2);
}
-X86_INIT_NORMAL_XEN(xen_driver, NULL, detect_pci,
- early_xen_init_driver, NULL, NULL);
+x86_init_early_xen(early_xen_init_driver);
diff --git a/xen.c b/xen.c
index f5ccdcb..40ad493 100644
--- a/xen.c
+++ b/xen.c
@@ -1,6 +1,4 @@
-#include <stdio.h>
-#include <stdbool.h>
-
+#include <linux/kernel.h>
#include <linux/tables.h>
#include <asm/x86_init_fn.h>
#include <asm/x86.h>
@@ -9,7 +7,6 @@ void startup_xen(void)
{
pr_info("Initializing Xen guest\n");
- x86_init_fn_init_tables();
x86_init_fn_early_init();
x86_64_start_reservations();