[syslinux:firmware] efi: Support booting 32-bit kernels from 64-bit EFI
syslinux-bot for Matt Fleming
matt.fleming at intel.com
Mon Jul 8 09:30:04 PDT 2013
Commit-ID: c07dbea2b90c97839edde5a32e80d3d9d47f8dc4
Gitweb: http://www.syslinux.org/commit/c07dbea2b90c97839edde5a32e80d3d9d47f8dc4
Author: Matt Fleming <matt.fleming at intel.com>
AuthorDate: Mon, 24 Jun 2013 20:09:58 +0100
Committer: Matt Fleming <matt.fleming at intel.com>
CommitDate: Tue, 25 Jun 2013 16:27:14 +0100
efi: Support booting 32-bit kernels from 64-bit EFI
The default scheme for booting Linux kernels should be to switch to
32-bit protected mode and jump to the start of the kernel image. The
kernel has always had the know-how to switch 64-bit capable CPUs into
64-bit mode if necessary. By using this scheme, we can transparently
boot either 32-bit or 64-bit kernels.
This change necessitated moving kernel_jump() to a .S file for both i386
and x86-64. Writing inline assembly is fun for about 5 minutes, but then
becomes monstrously tedious.
Cc: H. Peter Anvin <hpa at zytor.com>
Signed-off-by: Matt Fleming <matt.fleming at intel.com>
---
efi/Makefile | 8 +++-
com32/modules/cmd.c => efi/i386/linux.S | 24 +++++-------
efi/main.c | 66 +--------------------------------
core/path.c => efi/x86_64/linux.S | 63 ++++++++++++++++---------------
4 files changed, 51 insertions(+), 110 deletions(-)
diff --git a/efi/Makefile b/efi/Makefile
index c89ca06..8e575ae 100644
--- a/efi/Makefile
+++ b/efi/Makefile
@@ -38,7 +38,13 @@ LIB_OBJS = $(addprefix $(objdir)/com32/lib/,$(CORELIBOBJS))
CSRC = $(wildcard $(SRC)/*.c)
OBJS = $(subst $(SRC)/,,$(filter-out %wrapper.o, $(patsubst %.c,%.o,$(CSRC))))
-OBJS += $(objdir)/core/codepage.o
+OBJS += $(objdir)/core/codepage.o $(ARCH)/linux.o
+
+.PHONY: subdirs
+subdirs:
+ mkdir -p $(ARCH)
+
+$(OBJS): subdirs
# The targets to build in this directory
BTARGET = syslinux.efi
diff --git a/com32/modules/cmd.c b/efi/i386/linux.S
similarity index 69%
copy from com32/modules/cmd.c
copy to efi/i386/linux.S
index 233c7ca..557d3e2 100644
--- a/com32/modules/cmd.c
+++ b/efi/i386/linux.S
@@ -1,6 +1,6 @@
/* ----------------------------------------------------------------------- *
*
- * Copyright 2008 Michael Brown - All Rights Reserved
+ * Copyright 2013 Intel Corporation; author: Matt Fleming
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -10,17 +10,11 @@
*
* ----------------------------------------------------------------------- */
-/*
- * cmd.c
- *
- * Execute arbitrary commands
- */
-
-#include <com32.h>
-#include <syslinux/boot.h>
-
-int main(void)
-{
- syslinux_run_command(com32_cmdline());
- return -1;
-}
+ .globl kernel_jump
+ .type kernel_jump, at function
+ .text
+kernel_jump:
+ cli
+ movl 0x8(%esp), %esi
+ movl 0x4(%esp), %ecx
+ jmp *%ecx
diff --git a/efi/main.c b/efi/main.c
index 71333a4..0bae399 100644
--- a/efi/main.c
+++ b/efi/main.c
@@ -417,74 +417,12 @@ struct boot_params {
* allocate_pool()/free_pool()
* memory_map()
*/
+extern void kernel_jump(EFI_PHYSICAL_ADDRESS kernel_start,
+ struct boot_params *boot_params);
#if __SIZEOF_POINTER__ == 4
#define EFI_LOAD_SIG "EL32"
-static inline void kernel_jump(EFI_PHYSICAL_ADDRESS kernel_start,
- struct boot_params *boot_params)
-{
- asm volatile ("cli \n"
- "movl %0, %%esi \n"
- "movl %1, %%ecx \n"
- "jmp *%%ecx \n"
- :: "m" (boot_params), "m" (kernel_start));
-}
-
-static inline void handover_jump(EFI_HANDLE image, struct boot_params *bp,
- EFI_PHYSICAL_ADDRESS kernel_start)
-{
- /* handover protocol not implemented yet; the linux header needs to be updated */
-#if 0
- kernel_start += hdr->handover_offset;
-
- asm volatile ("cli \n"
- "pushl %0 \n"
- "pushl %1 \n"
- "pushl %2 \n"
- "movl %3, %%ecx \n"
- "jmp *%%ecx \n"
- :: "m" (bp), "m" (ST),
- "m" (image), "m" (kernel_start));
-#endif
-}
#elif __SIZEOF_POINTER__ == 8
#define EFI_LOAD_SIG "EL64"
-typedef void(*kernel_func)(void *, struct boot_params *);
-typedef void(*handover_func)(void *, EFI_SYSTEM_TABLE *, struct boot_params *);
-static inline void kernel_jump(EFI_PHYSICAL_ADDRESS kernel_start,
- struct boot_params *boot_params)
-{
- kernel_func kf;
-
- asm volatile ("cli");
-
- /* The 64-bit kernel entry is 512 bytes after the start. */
- kf = (kernel_func)kernel_start + 512;
-
- /*
- * The first parameter is a dummy because the kernel expects
- * boot_params in %[re]si.
- */
- kf(NULL, boot_params);
-}
-
-static inline void handover_jump(EFI_HANDLE image, struct boot_params *bp,
- EFI_PHYSICAL_ADDRESS kernel_start)
-{
-#if 0
- /* handover protocol not implemented yet the linux header needs to be updated */
-
- UINT32 offset = bp->hdr.handover_offset;
- handover_func hf;
-
- asm volatile ("cli");
-
- /* The 64-bit kernel entry is 512 bytes after the start. */
- kernel_start += 512;
-
- hf = (handover_func)(kernel_start + offset);
- hf(image, ST, bp);
-#endif
-}
#else
#error "unsupported architecture"
#endif
diff --git a/core/path.c b/efi/x86_64/linux.S
similarity index 50%
copy from core/path.c
copy to efi/x86_64/linux.S
index 8e517ca..4b1b88b 100644
--- a/core/path.c
+++ b/efi/x86_64/linux.S
@@ -10,33 +10,36 @@
*
* ----------------------------------------------------------------------- */
-#include <klibc/compiler.h>
-#include <linux/list.h>
-#include <fs.h>
-#include <string.h>
-
-__export LIST_HEAD(PATH);
-
-__export struct path_entry *path_add(const char *str)
-{
- struct path_entry *entry;
-
- if (!strlen(str))
- return NULL;
-
- entry = malloc(sizeof(*entry));
- if (!entry)
- return NULL;
-
- entry->str = strdup(str);
- if (!entry->str)
- goto bail;
-
- list_add(&entry->list, &PATH);
-
- return entry;
-
-bail:
- free(entry);
- return NULL;
-}
+#define CR0_PG_FLAG 0x80000000
+#define MSR_EFER 0xc0000080
+
+ .globl kernel_jump
+ .type kernel_jump, at function
+ .code64
+kernel_jump:
+ cli
+
+ /*
+ * Setup our segment selector (0x10) and return address (%rdi)
+ * on the stack in preparation for the far return below.
+ */
+ mov $0x1000000000, %rcx
+ addq %rcx, %rdi
+ pushq %rdi
+
+ .code32
+pm_code:
+
+ /* Disable IA-32e mode by clearing IA32_EFER.LME */
+ xorl %eax, %eax
+ xorl %edx, %edx
+ movl $MSR_EFER, %ecx
+ wrmsr
+
+ /* Turn off paging to disable long mode */
+ movl %cr0, %eax
+ andl $~CR0_PG_FLAG, %eax
+ movl %eax, %cr0
+
+ /* Far return */
+ lret
More information about the Syslinux-commits
mailing list