OcAfterBootCompatLib: Start using call gate for kernel interception

This commit is contained in:
vit9696 2020-11-08 04:23:35 +03:00
parent 80fa9dbb44
commit 05dc52acd2
9 changed files with 108 additions and 753 deletions

View File

@ -27,14 +27,6 @@
#include <Protocol/LoadedImage.h>
#include <Protocol/OcFirmwareRuntime.h>
#if defined(MDE_CPU_X64)
#include "X64/ContextSwitch.h"
#elif defined(MDE_CPU_IA32)
#include <Ia32/ContextSwitch.h>
#else
#error "Unsupported architecture!"
#endif
//
// The kernel is normally allocated at base 0x100000 + slide address.
//
@ -73,7 +65,12 @@
/**
Kernel physical base address.
**/
#define KERNEL_BASE_PADDR (KERNEL_TEXT_VADDR - KERNEL_HIB_VADDR)
#define KERNEL_BASE_PADDR ((UINT32) KERNEL_HIB_VADDR)
/**
Kernel physical base address.
**/
#define KERNEL_TEXT_PADDR ((UINT32) KERNEL_TEXT_VADDR)
/**
Slide offset per slide entry
@ -103,6 +100,39 @@
**/
#define ESTIMATED_KERNEL_SIZE ((UINTN) (200 * SIZE_1MB))
/**
Assume call gate (normally a little over 100 bytes) can be up to 256 bytes.
It is allocated in its own page and is relocatable.
**/
#define ESTIMATED_CALL_GATE_SIZE 256
/**
Size of jump from call gate inserted before Call Gate to jump to our code.
**/
#define CALL_GATE_JUMP_SIZE (sizeof (CALL_GATE_JUMP))
/**
Command used to perform an absolute 64-bit jump from Call Gate to our code.
**/
#pragma pack(push,1)
typedef struct CALL_GATE_JUMP_ {
UINT16 Command;
UINT32 Argument;
UINT64 Address;
} CALL_GATE_JUMP;
STATIC_ASSERT (sizeof (CALL_GATE_JUMP) == 14, "Invalid CALL_GATE_JUMP size");
#pragma pack(pop)
/**
Kernel call gate prototype.
**/
typedef
UINTN
(EFIAPI *KERNEL_CALL_GATE) (
IN UINTN Args,
IN UINTN EntryPoint
);
/**
Preserved relocation entry.
**/
@ -209,6 +239,17 @@ typedef struct SERVICES_OVERRIDE_STATE_ {
///
EFI_PHYSICAL_ADDRESS HibernateImageAddress;
///
/// Kernel call gate is an assembly function that takes boot arguments (rcx)
/// and kernel entry point (rdx) and jumps to the kernel (pstart) in 32-bit mode.
///
/// It is only used for normal booting, so for general interception we do not
/// use call gate but rather patch the kernel entry point. However, when it comes
/// to booting with relocation block (it does not support hibernation) we need
/// to update kernel entry point with the relocation block offset and that can
/// only be done in the call gate as it will otherwise jump to lower memory.
///
EFI_PHYSICAL_ADDRESS KernelCallGate;
///
/// Last descriptor size obtained from GetMemoryMap.
///
UINTN MemoryMapDescriptorSize;
@ -238,18 +279,6 @@ typedef struct SERVICES_OVERRIDE_STATE_ {
Apple kernel support internal state..
**/
typedef struct KERNEL_SUPPORT_STATE_ {
///
/// Assembly support internal state.
///
ASM_SUPPORT_STATE AsmState;
///
/// Kernel jump trampoline.
///
ASM_KERNEL_JUMP KernelJump;
///
/// Original kernel memory.
///
UINT8 KernelOrg[sizeof (ASM_KERNEL_JUMP)];
///
/// Custom kernel UEFI System Table.
///
@ -409,30 +438,16 @@ AppleMapPrepareBooterState (
IN EFI_GET_MEMORY_MAP GetMemoryMap OPTIONAL
);
/**
Save UEFI environment state in implementation specific way.
@param[in,out] AsmState Assembly state to update, can be preserved.
@param[out] KernelJump Kernel jump trampoline to fill.
**/
VOID
AppleMapPlatformSaveState (
IN OUT ASM_SUPPORT_STATE *AsmState,
OUT ASM_KERNEL_JUMP *KernelJump
);
/**
Patch kernel entry point with KernelJump to later land in AppleMapPrepareKernelState.
@param[in,out] BootCompat Boot compatibility context.
@param[in] ImageAddress Kernel or hibernation image address.
@param[in] AppleHibernateWake TRUE when ImageAddress points to hibernation image.
@param[in] CallGate Kernel call gate address.
**/
VOID
AppleMapPrepareKernelJump (
IN OUT BOOT_COMPAT_CONTEXT *BootCompat,
IN UINTN ImageAddress,
IN BOOLEAN AppleHibernateWake
IN EFI_PHYSICAL_ADDRESS CallGate
);
/**
@ -455,18 +470,20 @@ AppleMapPrepareMemState (
/**
Prepare environment for Apple kernel bootloader in boot or wake cases.
This callback arrives when boot.efi jumps to kernel.
This callback arrives when boot.efi jumps to kernel call gate.
Should transfer control to kernel call gate + CALL_GATE_JUMP_SIZE
with the same arguments.
@param[in] Args Case-specific kernel argument handle.
@param[in] ModeX64 Debug flag about kernel context type, TRUE when X64.
@param[in] Args Case-specific kernel argument handle.
@param[in] EntryPoint Case-specific kernel entry point.
@retval Args must be returned with the necessary modifications if any.
@returns Case-specific value if any.
**/
UINTN
EFIAPI
AppleMapPrepareKernelState (
IN UINTN Args,
IN BOOLEAN ModeX64
IN UINTN EntryPoint
);
/**

View File

@ -1,49 +0,0 @@
/** @file
Copyright (C) 2019, vit9696. All rights reserved.
All rights reserved.
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#ifndef CONTEXT_SWITCH_H
#define CONTEXT_SWITCH_H
//
// Structure definitions shared with ASM code.
// Keep these definitions in sync with ContextSwitch.nasm!
//
#pragma pack(push, 1)
/**
Assembly support state.
This state is used as an intermediate structure to hold UEFI environment
context and kernel environment context for switching between 32-bit
and 64-bit modes during booting as normal XNU boot still happens in 32-bit.
**/
typedef PACKED struct ASM_SUPPORT_STATE_ {
VOID *KernelEntry;
} ASM_SUPPORT_STATE;
/**
Assembly kernel trampoline.
This structure contains encoded assembly to jump from kernel
code to UEFI code through AsmAppleMapPlatformPrepareKernelState
intermediate handler.
**/
typedef PACKED struct ASM_KERNEL_JUMP_ {
UINT8 MovInst;
UINT32 Addr;
UINT16 CallInst;
} ASM_KERNEL_JUMP;
#pragma pack(pop)
#endif // CONTEXT_SWITCH_H

View File

@ -1,30 +0,0 @@
/** @file
Copyright (C) 2019, vit9696. All rights reserved.
All rights reserved.
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "../BootCompatInternal.h"
#include <Library/BaseLib.h>
#include <Library/DebugLib.h>
VOID
AppleMapPlatformSaveState (
IN OUT ASM_SUPPORT_STATE *AsmState,
OUT ASM_KERNEL_JUMP *KernelJump
)
{
//
// Currently unsupported as not required.
// The warning is present in AppleMapPrepareKernelJump when quirks are accidentally enabled.
//
}

View File

@ -521,14 +521,6 @@ AppleMapPrepareBooterState (
BootCompat
);
//
// This function may be called twice, do not redo in this case.
//
AppleMapPlatformSaveState (
&BootCompat->KernelState.AsmState,
&BootCompat->KernelState.KernelJump
);
if (BootCompat->Settings.AvoidRuntimeDefrag) {
if (BootCompat->KernelState.SysTableRtArea == 0) {
//
@ -581,13 +573,10 @@ AppleMapPrepareBooterState (
VOID
AppleMapPrepareKernelJump (
IN OUT BOOT_COMPAT_CONTEXT *BootCompat,
IN UINTN ImageAddress,
IN BOOLEAN AppleHibernateWake
IN EFI_PHYSICAL_ADDRESS CallGate
)
{
UINT64 KernelEntryVaddr;
UINT32 KernelEntry;
IOHibernateImageHeader *ImageHeader;
CALL_GATE_JUMP *CallGateJump;
//
// There is no reason to patch the kernel when we do not need it.
@ -602,65 +591,27 @@ AppleMapPrepareKernelJump (
#endif
//
// Check whether we have image address and abort if not.
// Check whether we have address and abort if not.
//
if (ImageAddress == 0) {
RUNTIME_DEBUG ((DEBUG_ERROR, "OCABC: Failed to find image address, hibernate %d\n", AppleHibernateWake));
if (CallGate == 0) {
RUNTIME_DEBUG ((DEBUG_ERROR, "OCABC: Failed to find call gate address\n"));
return;
}
if (!AppleHibernateWake) {
//
// ImageAddress points to the first kernel segment, __HIB.
// Kernel image header is located in __TEXT, which follows __HIB.
//
ImageAddress += KERNEL_BASE_PADDR;
//
// Cut higher virtual address bits.
//
KernelEntryVaddr = MachoRuntimeGetEntryAddress (
(VOID*) ImageAddress
);
if (KernelEntryVaddr == 0) {
RUNTIME_DEBUG ((DEBUG_ERROR, "OCABC: Kernel entry point was not found!"));
return;
}
//
// Perform virtual to physical address conversion by subtracting __TEXT base
// and adding current physical kernel location.
//
KernelEntry = (UINT32) (KernelEntryVaddr - KERNEL_TEXT_VADDR + ImageAddress);
} else {
//
// Read kernel entry from hibernation image and patch it with jump.
// At this stage HIB section is not yet copied from sleep image to it's
// proper memory destination. so we'll patch entry point in sleep image.
// Note the virtual -> physical conversion through truncation.
//
ImageHeader = (IOHibernateImageHeader *) ImageAddress;
KernelEntry = ((UINT32)(UINTN) &ImageHeader->fileExtentMap[0])
+ ImageHeader->fileExtentMapSize + ImageHeader->restore1CodeOffset;
}
CallGateJump = (VOID *)(UINTN) CallGate;
//
// Save original kernel entry code.
// Move call gate jump bytes front.
//
CopyMem (
&BootCompat->KernelState.KernelOrg[0],
(VOID *)(UINTN) KernelEntry,
sizeof (BootCompat->KernelState.KernelOrg)
CallGateJump + 1,
CallGateJump,
ESTIMATED_CALL_GATE_SIZE
);
//
// Copy kernel jump code to kernel entry address.
//
CopyMem (
(VOID *)(UINTN) KernelEntry,
&BootCompat->KernelState.KernelJump,
sizeof (BootCompat->KernelState.KernelJump)
);
CallGateJump->Command = 0x25FF;
CallGateJump->Argument = 0x0;
CallGateJump->Address = (UINTN) AppleMapPrepareKernelState;
}
EFI_STATUS
@ -726,10 +677,11 @@ UINTN
EFIAPI
AppleMapPrepareKernelState (
IN UINTN Args,
IN BOOLEAN ModeX64
IN UINTN EntryPoint
)
{
BOOT_COMPAT_CONTEXT *BootCompatContext;
KERNEL_CALL_GATE CallGate;
BootCompatContext = GetBootCompatContext ();
@ -745,14 +697,8 @@ AppleMapPrepareKernelState (
);
}
//
// Restore original kernel entry code.
//
CopyMem (
BootCompatContext->KernelState.AsmState.KernelEntry,
&BootCompatContext->KernelState.KernelOrg[0],
sizeof (BootCompatContext->KernelState.KernelOrg)
CallGate = (KERNEL_CALL_GATE)(UINTN) (
BootCompatContext->ServiceState.KernelCallGate + CALL_GATE_JUMP_SIZE
);
return Args;
return CallGate (Args, EntryPoint);
}

View File

@ -36,16 +36,6 @@
KernelSupport.c
OcAfterBootCompatLib.c
ServiceOverrides.c
# All headers should go to [Sources], this sounds like a bug in EDK II Build System.
Ia32/ContextSwitch.h
X64/ContextSwitch.h
[Sources.Ia32]
Ia32/ContextSwitchSupport.c
[Sources.X64]
X64/ContextSwitch.nasm
X64/ContextSwitchSupport.c
[Packages]
MdePkg/MdePkg.dec

View File

@ -334,9 +334,18 @@ OcAllocatePages (
EFI_STATUS Status;
BOOT_COMPAT_CONTEXT *BootCompat;
BOOLEAN IsPerfAlloc;
BOOLEAN IsCallGateAlloc;
BootCompat = GetBootCompatContext ();
IsPerfAlloc = FALSE;
//
// Filter out garbage right away.
//
if (Memory == NULL) {
return EFI_INVALID_PARAMETER;
}
BootCompat = GetBootCompatContext ();
IsPerfAlloc = FALSE;
IsCallGateAlloc = FALSE;
if (BootCompat->ServiceState.AwaitingPerfAlloc) {
if (BootCompat->ServiceState.AppleBootNestedCount > 0) {
@ -350,6 +359,14 @@ OcAllocatePages (
}
}
if (BootCompat->ServiceState.AppleBootNestedCount > 0
&& Type == AllocateMaxAddress
&& MemoryType == EfiLoaderCode
&& *Memory == BASE_4GB - 1
&& NumberOfPages == 1) {
IsCallGateAlloc = TRUE;
}
Status = BootCompat->ServicePtrs.AllocatePages (
Type,
MemoryType,
@ -361,7 +378,13 @@ OcAllocatePages (
FixRuntimeAttributes (BootCompat, MemoryType);
if (BootCompat->ServiceState.AppleBootNestedCount > 0) {
if (IsPerfAlloc) {
if (IsCallGateAlloc) {
//
// Called from boot.efi.
// Memory allocated for boot.efi to kernel trampoline.
//
BootCompat->ServiceState.KernelCallGate = *Memory;
} else if (IsPerfAlloc) {
//
// Called from boot.efi.
// New perf data, it can be reallocated multiple times.
@ -632,6 +655,7 @@ OcStartImage (
// Clear monitoring vars
//
BootCompat->ServiceState.MinAllocatedAddr = 0;
BootCompat->ServiceState.KernelCallGate = 0;
if (AppleLoadedImage != NULL) {
//
@ -827,19 +851,10 @@ OcExitBootServices (
return Status;
}
if (!BootCompat->ServiceState.AppleHibernateWake) {
AppleMapPrepareKernelJump (
BootCompat,
(UINTN) BootCompat->ServiceState.MinAllocatedAddr,
FALSE
);
} else {
AppleMapPrepareKernelJump (
BootCompat,
(UINTN) BootCompat->ServiceState.HibernateImageAddress,
TRUE
);
}
AppleMapPrepareKernelJump (
BootCompat,
(UINTN) BootCompat->ServiceState.KernelCallGate
);
return Status;
}

View File

@ -1,95 +0,0 @@
/** @file
Copyright (C) 2019, vit9696. All rights reserved.
All rights reserved.
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#ifndef CONTEXT_SWITCH_H
#define CONTEXT_SWITCH_H
//
// Structure definitions shared with ASM code.
// Keep these definitions in sync with ContextSwitch.nasm!
//
#pragma pack(push, 1)
/**
Assembly support state.
This state is used as an intermediate structure to hold UEFI environment
context and kernel environment context for switching between 32-bit
and 64-bit modes during booting as normal XNU boot still happens in 32-bit.
**/
typedef PACKED struct ASM_SUPPORT_STATE_ {
UINT64 SavedGDTR;
UINT16 SavedGDTRLimit;
UINT64 SavedIDTR;
UINT16 SavedIDTRLimit;
UINT64 SavedCR3;
UINT16 SavedCS;
UINT16 SavedDS;
UINT16 SavedES;
UINT16 SavedFS;
UINT16 SavedGS;
UINT64 SavedGDTR32;
UINT16 SavedGDTR32Limit;
UINT64 SavedIDTR32;
UINT16 SavedIDTR32Limit;
UINT16 SavedCS32;
UINT16 SavedDS32;
UINT16 SavedES32;
UINT16 SavedFS32;
UINT16 SavedGS32;
VOID *KernelEntry;
} ASM_SUPPORT_STATE;
/**
Assembly kernel trampoline.
This structure contains encoded assembly to jump from kernel
code to UEFI code through AsmAppleMapPlatformPrepareKernelState
intermediate handler.
**/
typedef PACKED struct ASM_KERNEL_JUMP_ {
UINT8 MovInst;
UINT32 Addr;
UINT16 CallInst;
} ASM_KERNEL_JUMP;
#pragma pack(pop)
/**
Assembly interface to save UEFI environment state in specific way.
@param[in,out] AsmState Assembly state to update, can be preserved.
**/
VOID
EFIAPI
AsmAppleMapPlatformSaveState (
IN OUT ASM_SUPPORT_STATE *AsmState
);
/**
Assembly interface for backjump from kernel code.
Takes kernel arguments through RAX or EAX register.
**/
VOID
AsmAppleMapPlatformPrepareKernelState (
);
/**
Assembly global variable containing ASM_SUPPORT_STATE address.
Must fit into lower 32-bit bytes due to 32-bit .
**/
extern UINT32 gOcAbcAsmStateAddr32;
#endif // CONTEXT_SWITCH_H

View File

@ -1,391 +0,0 @@
;------------------------------------------------------------------------------
; @file
; Copyright (C) 2013, dmazar. All rights reserved.
; Copyright (C) 2019, vit9696. All rights reserved.
;
; All rights reserved.
;
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
; http://opensource.org/licenses/bsd-license.php
;
; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
;------------------------------------------------------------------------------
BITS 64
DEFAULT REL
;------------------------------------------------------------------------------
; Structure definitions shared with C code.
; Keep these definitions in sync with ContextSwitch.h!
;------------------------------------------------------------------------------
struc ASM_SUPPORT_STATE
;------------------------------------------------------------------------------
; 64-bit state
;------------------------------------------------------------------------------
.SavedGDTR resq 1
.SavedGDTRLimit resw 1
.SavedIDTR resq 1
.SavedIDTRLimit resw 1
.SavedCR3 resq 1
.SavedCS resw 1
.SavedDS resw 1
.SavedES resw 1
.SavedFS resw 1
.SavedGS resw 1
;------------------------------------------------------------------------------
; 32-bit state
;------------------------------------------------------------------------------
.SavedGDTR32 resq 1
.SavedGDTR32Limit resw 1
.SavedIDTR32 resq 1
.SavedIDTR32Limit resw 1
.SavedCS32 resw 1
.SavedDS32 resw 1
.SavedES32 resw 1
.SavedFS32 resw 1
.SavedGS32 resw 1
;------------------------------------------------------------------------------
; Kernel entry address.
;------------------------------------------------------------------------------
.KernelEntry resq 1
.Size:
endstruc
struc ASM_KERNEL_JUMP
.MovInst resb 1
.Addr resd 1
.CallInst resw 1
.Size:
endstruc
;------------------------------------------------------------------------------
; C callback method called on jump to kernel after boot.efi finishes.
;------------------------------------------------------------------------------
extern ASM_PFX(AppleMapPrepareKernelState)
SECTION .text
;------------------------------------------------------------------------------
; VOID
; EFIAPI
; AsmAppleMapPlatformSaveState (
; OUT ASM_SUPPORT_STATE *AsmState
; );
;------------------------------------------------------------------------------
align 8
global ASM_PFX(AsmAppleMapPlatformSaveState)
ASM_PFX(AsmAppleMapPlatformSaveState):
BITS 64
sgdt [rcx + ASM_SUPPORT_STATE.SavedGDTR]
sidt [rcx + ASM_SUPPORT_STATE.SavedIDTR]
mov rax, cr3
mov [rcx + ASM_SUPPORT_STATE.SavedCR3], rax
mov word [rcx + ASM_SUPPORT_STATE.SavedCS], cs
mov word [rcx + ASM_SUPPORT_STATE.SavedDS], ds
mov word [rcx + ASM_SUPPORT_STATE.SavedES], es
mov word [rcx + ASM_SUPPORT_STATE.SavedFS], fs
mov word [rcx + ASM_SUPPORT_STATE.SavedGS], gs
ret
;------------------------------------------------------------------------------
; Apple kernel starts through call gate, an assembly structure allocated in
; 32-bit high memory, that transitions to 32-bit mode and then calls the kernel
; with 32-bit GDT and UEFI stack.
;
; KernelCallGate:
; lea rax, StartKernelIn32Bit
; mov cs:gKernelBooter32, eax
; lea rax, gKernelGdtTable
; mov cs:gKernelGdtBase, rax
; lgdt fword ptr cs:gKernelGdtLimit
; mov ax, 10h
; mov ds, ax
; mov es, ax
; mov gs, ax
; mov fs, ax
; lea rax, gKernelBooter32
; jmp fword ptr [rax]
;
; StartKernelIn32Bit:
; mov rax, cr0
; btr eax, 1Fh
; mov cr0, rax
; mov ebx, ecx ; ebx = boot-args
; mov edi, edx
; ; Disable long mode
; mov ecx, 0C0000080h ; EFER MSR number.
; rdmsr
; btr eax, 8 ; Set LME=0.
; wrmsr
; jmp short SwitchTo32Bit
;
; SwitchTo32Bit:
; mov eax, ebx
; jmp rdi ; Jump to kernel
; hlt
; ret
;
; gKernelBooter32:
; dd 0
; dw 8 ; New CS value
; gKernelGdtLimit: ; IA32_DESCRIPTOR
; dw 18h
; gKernelGdtBase:
; dq 0
; gKernelGdtTable: ; Array of IA32_GDT.
; dw 0 ; [0] = LimitLow
; dw 0 ; [0] = BaseLow
; db 0 ; [0] = BaseMid
; dw 0 ; [0] = Flags
; db 0 ; [0] = BaseHigh
; dw 0FFFFh ; [1] = LimitLow
; dw 0 ; [1] = BaseLow
; db 0 ; [1] = BaseMid
; dw 0CF9Eh ; [1] - Flags
; db 0 ; [1] = BaseHigh
; dw 0FFFFh ; [2] = LimitLow
; dw 0 ; [2] = BaseLow
; db 0 ; [2] = BaseMid
; dw 0CF92h ; [2] = Flags
; db 0 ; [2] = BaseHigh
;------------------------------------------------------------------------------
;------------------------------------------------------------------------------
; Long (far) return.
; retfq (lretq) - 64-bit encoding 48 CB
; retf (lret) - 32-bit encoding CB
;------------------------------------------------------------------------------
LONG_RET64:
db 048h
LONG_RET32:
db 0CBh
;------------------------------------------------------------------------------
; AsmAppleMapPlatformPrepareKernelState
;
; Callback from boot.efi - this is where we jump when boot.efi jumps to kernel.
; eax register contains boot arguments for the kernel.
;
; - test if we are in 32 bit or in 64 bit
; - if 64 bit, then jump to AsmJumpFromKernel64
; - else just continue with AsmJumpFromKernel32
;------------------------------------------------------------------------------
global ASM_PFX(AsmAppleMapPlatformPrepareKernelState)
ASM_PFX(AsmAppleMapPlatformPrepareKernelState):
BITS 32
push eax ; save bootArgs pointer to stack
mov dword ecx, 0C0000080h ; EFER MSR number.
rdmsr ; Read EFER.
bt eax, 8 ; Check if LME==1 -> CF=1.
pop eax
jc AsmJumpFromKernel64 ; LME==1 -> jump to 64 bit code
; otherwise, continue with AsmJumpFromKernel32
; Above 32-bit code must give the opcodes equivalent to following in 64-bit.
;BITS 64
; push rax ; save bootArgs pointer to stack
; mov ecx, C0000080h ; EFER MSR number.
; rdmsr ; Read EFER.
; bt eax, 8 ; Check if LME==1 -> CF=1.
; pop rax
; jc AsmJumpFromKernel64 ; LME==1 -> jump to 64 bit code
;------------------------------------------------------------------------------
; AsmJumpFromKernel32
;
; Callback from boot.efi in 32 bit mode.
;------------------------------------------------------------------------------
AsmJumpFromKernel32:
BITS 32
; Save bootArgs pointer to edi.
mov edi, eax
; Load ebx with AsmState - we'll access our saved data with it.
db 0BBh ; mov ebx, OFFSET DataBase
;------------------------------------------------------------------------------
; 32-bit pointer to AsmState used to reduce global variable access.
; Defined here becuase 32-bit mode does not support relative addressing.
; As both jumps can happen from 64-bit kernel, the address must fit in 4 bytes.
;------------------------------------------------------------------------------
global ASM_PFX(gOcAbcAsmStateAddr32)
ASM_PFX(gOcAbcAsmStateAddr32):
dd 0
; Store kernel entery point prior to hunk code.
pop ecx
sub ecx, ASM_KERNEL_JUMP.Size
mov dword [ebx + ASM_SUPPORT_STATE.KernelEntry], ecx
; Store 32-bit state to be able to recover it later.
sgdt [ebx + ASM_SUPPORT_STATE.SavedGDTR32]
sidt [ebx + ASM_SUPPORT_STATE.SavedIDTR32]
mov word [ebx + ASM_SUPPORT_STATE.SavedCS32], cs
mov word [ebx + ASM_SUPPORT_STATE.SavedDS32], ds
mov word [ebx + ASM_SUPPORT_STATE.SavedES32], es
mov word [ebx + ASM_SUPPORT_STATE.SavedFS32], fs
mov word [ebx + ASM_SUPPORT_STATE.SavedGS32], gs
;
; Transition to 64-bit mode...
; boot.efi disables interrupts for us, so we are safe.
;
; Load saved UEFI GDT and IDT.
; They will become active after code segment is changed in long jump.
lgdt [ebx + ASM_SUPPORT_STATE.SavedGDTR]
lidt [ebx + ASM_SUPPORT_STATE.SavedIDTR]
; Enable the 64-bit page-translation-table entries by setting CR4.PAE=1.
mov eax, cr4
bts eax, 5
mov cr4, eax
; Set the long-mode page tables by reusing saved UEFI tables.
mov eax, dword [ebx + ASM_SUPPORT_STATE.SavedCR3]
mov cr3, eax
; Enable long mode (set EFER.LME=1).
mov ecx, 0C0000080h ; EFER MSR number.
rdmsr ; Read EFER.
bts eax, 8 ; Set LME=1.
wrmsr ; Write EFER.
; Enable paging to activate long mode (set CR0.PG=1).
mov eax, cr0 ; Read CR0.
bts eax, 31 ; Set PG=1.
mov cr0, eax ; Write CR0.
; Jump to the 64-bit code segment.
mov ax, word [ebx + ASM_SUPPORT_STATE.SavedCS]
push eax
call LONG_RET32
BITS 64
;
; Done transitioning to 64-bit code.
;
; Set other segment selectors. In most types of firmware, all segment registers
; apart from cs point to the same selector, but we must not rely on it.
mov ax, word [rbx + ASM_SUPPORT_STATE.SavedDS]
mov ds, ax
mov ax, word [rbx + ASM_SUPPORT_STATE.SavedES]
mov es, ax
mov ax, word [rbx + ASM_SUPPORT_STATE.SavedFS]
mov fs, ax
mov ax, word [rbx + ASM_SUPPORT_STATE.SavedGS]
mov gs, ax
; boot.efi preserves ss selector from UEFI GDT to just use UEFI stack (and memory) as is.
; For this reason just assume the stack is useable but align it for definite 64-bit compat.
and rsp, 0FFFFFFFFFFFFFFF0h
; Call AppleMapPrepareKernelState (rcx = rax = bootArgs, rdx = 0 = 32 bit kernel jump).
mov rcx, rdi
xor edx, edx
push rdx
push rdx
push rdx
push rcx
call ASM_PFX(AppleMapPrepareKernelState)
; Return value in rax is bootArgs pointer again.
mov rdi, rax
;
; Transition back to 32-bit.
;
; Load saved 32-bit GDT.
lgdt [rbx + ASM_SUPPORT_STATE.SavedGDTR32]
; Jump to the 32-bit code segment.
mov ax, word [rbx + ASM_SUPPORT_STATE.SavedCS32]
push rax
call LONG_RET64
BITS 32
;
; Done transitioning to 32-bit code.
;
; Disable paging (set CR0.PG=0).
mov eax, cr0 ; Read CR0.
btr eax, 31 ; Set PG=0.
mov cr0, eax ; Write CR0.
; Disable long mode (set EFER.LME=0).
mov ecx, 0C0000080h ; EFER MSR number.
rdmsr ; Read EFER.
btr eax, 8 ; Set LME=0.
wrmsr ; Write EFER.
jmp AsmJumpFromKernel32Compatibility
AsmJumpFromKernel32Compatibility:
;
; We are in 32-bit protected mode, no paging.
;
; Reload saved 32 bit state data.
; Since boot.efi relies on segment registers shadow part and preserves ss value,
; which contains previously read GDT data, we are not allowed to later update it.
lidt [ebx + ASM_SUPPORT_STATE.SavedIDTR32]
mov ax, word [ebx + ASM_SUPPORT_STATE.SavedDS32]
mov ds, ax
mov ax, word [ebx + ASM_SUPPORT_STATE.SavedES32]
mov es, ax
mov ax, word [ebx + ASM_SUPPORT_STATE.SavedFS32]
mov fs, ax
mov ax, word [ebx + ASM_SUPPORT_STATE.SavedGS32]
mov gs, ax
; Jump back to the kernel passing boot arguments in eax.
mov eax, edi
mov edx, dword [ebx + ASM_SUPPORT_STATE.KernelEntry]
jmp edx
;------------------------------------------------------------------------------
; AsmJumpFromKernel64
;
; Callback from boot.efi in 64 bit mode.
; State is prepared for kernel: 64 bit, pointer to bootArgs in rax.
;------------------------------------------------------------------------------
AsmJumpFromKernel64:
BITS 64
; Load rbx with AsmState - we'll access our saved data with it.
mov ebx, dword [ASM_PFX(gOcAbcAsmStateAddr32)]
; Store kernel entery point prior to hunk code.
pop rcx
sub rcx, ASM_KERNEL_JUMP.Size
mov qword [rbx + ASM_SUPPORT_STATE.KernelEntry], rcx
; Call AppleMapPrepareKernelState (rcx = rax = bootArgs, rdx = 1 = 64-bit kernel jump).
mov rcx, rax
xor edx, edx
push rdx
push rdx
push rdx
push rcx
inc edx
call ASM_PFX(AppleMapPrepareKernelState)
; Jump back to the kernel passing boot arguments in rax.
mov rdx, [rbx + ASM_SUPPORT_STATE.KernelEntry]
jmp rdx

View File

@ -1,48 +0,0 @@
/** @file
Copyright (C) 2019, vit9696. All rights reserved.
All rights reserved.
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "../BootCompatInternal.h"
#include <Library/DebugLib.h>
VOID
AppleMapPlatformSaveState (
IN OUT ASM_SUPPORT_STATE *AsmState,
OUT ASM_KERNEL_JUMP *KernelJump
)
{
//
// Save current 64-bit state - will be used later in callback from kernel jump
// to be able to transition to 64-bit in case 32-bit kernel startup code is used.
//
AsmAppleMapPlatformSaveState (AsmState);
//
// Assembly state must fit into 32-bit address as we may jumo from 32-bit kernel
// startup code. This is used instead of GetBootCompatContext.
//
ASSERT ((UINT32)(UINTN) AsmState == (UINTN) AsmState);
gOcAbcAsmStateAddr32 = (UINT32)(UINTN) AsmState;
//
// Kernel trampoline for jumping to kernel.
//
ASSERT (
(UINT32)(UINTN) AsmAppleMapPlatformPrepareKernelState
== (UINTN) AsmAppleMapPlatformPrepareKernelState
);
KernelJump->MovInst = 0xB9;
KernelJump->Addr = (UINT32)(UINTN) AsmAppleMapPlatformPrepareKernelState;
KernelJump->CallInst = 0xD1FF;
}