NCE: C++ ftw

This commit is contained in:
MrPurple666 2025-05-23 21:47:37 -03:00
parent 2e422ab082
commit df3c5036d7

View file

@ -7,47 +7,48 @@
namespace Core { namespace Core {
HaltReason ArmNce::ReturnToRunCodeByTrampoline(void* tpidr, GuestContext* ctx, uint64_t trampoline_addr) { HaltReason ArmNce::ReturnToRunCodeByTrampoline(void* tpidr, GuestContext* ctx, uint64_t trampoline_addr) {
// This function needs to be implemented with inline assembly because it // This function needs to be implemented with inline assembly because it
// involves saving/restoring registers and modifying the stack pointer // involves saving/restoring registers and modifying the stack pointer
HaltReason result; HaltReason result;
asm volatile( asm volatile(
// Back up host sp and tpidr_el0 // Back up host sp and tpidr_el0
"mov x3, sp\n" "mov x3, sp\n" // Save current stack pointer
"mrs x4, tpidr_el0\n" "mrs x4, tpidr_el0\n" // Save current thread-local storage pointer
// Load guest sp // Load guest sp
"ldr x5, [%1, %4]\n" "ldr x5, [%1, %4]\n" // Load guest's stack pointer from GuestContext
"mov sp, x5\n" "mov sp, x5\n" // Switch to guest's stack
// Offset GuestContext pointer to the host member // Setup pointer to host context for saving registers
"add x5, %1, %5\n" "add x5, %1, %5\n" // Calculate pointer to HostContext
// Save original host sp and tpidr_el0 to host context // Save original host sp and tpidr_el0 to host context
"stp x3, x4, [x5, %6]\n" "stp x3, x4, [x5, %6]\n" // Store sp and tpidr_el0 pair
// Save all callee-saved host GPRs // Save all callee-saved host GPRs (x19-x30)
"stp x19, x20, [x5, #0x0]\n" "stp x19, x20, [x5, #0x0]\n" // host_saved_regs[0-1]
"stp x21, x22, [x5, #0x10]\n" "stp x21, x22, [x5, #0x10]\n" // host_saved_regs[2-3]
"stp x23, x24, [x5, #0x20]\n" "stp x23, x24, [x5, #0x20]\n" // host_saved_regs[4-5]
"stp x25, x26, [x5, #0x30]\n" "stp x25, x26, [x5, #0x30]\n" // host_saved_regs[6-7]
"stp x27, x28, [x5, #0x40]\n" "stp x27, x28, [x5, #0x40]\n" // host_saved_regs[8-9]
"stp x29, x30, [x5, #0x50]\n" "stp x29, x30, [x5, #0x50]\n" // host_saved_regs[10-11] (fp and lr)
// Save all callee-saved host FPRs // Save all callee-saved host FPRs (q8-q15)
"stp q8, q9, [x5, #0x0 + %7]\n" "stp q8, q9, [x5, #0x0 + %7]\n" // host_saved_vregs[0-1]
"stp q10, q11, [x5, #0x20 + %7]\n" "stp q10, q11, [x5, #0x20 + %7]\n" // host_saved_vregs[2-3]
"stp q12, q13, [x5, #0x40 + %7]\n" "stp q12, q13, [x5, #0x40 + %7]\n" // host_saved_vregs[4-5]
"stp q14, q15, [x5, #0x60 + %7]\n" "stp q14, q15, [x5, #0x60 + %7]\n" // host_saved_vregs[6-7]
// Load guest tpidr_el0 from argument // Setup guest context - switch thread-local storage pointer
"msr tpidr_el0, %0\n" "msr tpidr_el0, %0\n" // Set tpidr_el0 to the thread parameters
// Tail call the trampoline to restore guest state // Call the trampoline which will restore guest registers and return to guest code
"blr %2\n" "blr %2\n" // Jump to trampoline function
// Return value will be in x0, store it in result // When control returns here, save the result
"mov %3, x0\n" "mov %3, x0\n" // Save halt reason to result
: "+r"(tpidr), "+r"(ctx), "+r"(trampoline_addr), "=r"(result) : "+r"(tpidr), "+r"(ctx), "+r"(trampoline_addr), "=r"(result)
: "i"(GuestContextSp), "i"(GuestContextHostContext), : "i"(GuestContextSp), "i"(GuestContextHostContext),
@ -59,20 +60,29 @@ HaltReason ArmNce::ReturnToRunCodeByTrampoline(void* tpidr, GuestContext* ctx, u
} }
HaltReason ArmNce::ReturnToRunCodeByExceptionLevelChange(int tid, void* tpidr) { HaltReason ArmNce::ReturnToRunCodeByExceptionLevelChange(int tid, void* tpidr) {
// This function uses syscall to send a signal // This function sends a signal to the specified thread using the tkill syscall
register long x8 asm("x8") = __NR_tkill; // The thread-local storage pointer is preserved across the syscall
register long x0 asm("x0") = tid;
register long x1 asm("x1") = ReturnToRunCodeByExceptionLevelChangeSignal;
register void* x9 asm("x9") = tpidr; // Preserve tpidr
asm volatile( // The original assembly implementation was:
"svc #0\n" // register long x8 asm("x8") = __NR_tkill;
: "+r"(x0) // register long x0 asm("x0") = tid;
: "r"(x8), "r"(x1), "r"(x9) // register long x1 asm("x1") = ReturnToRunCodeByExceptionLevelChangeSignal;
: "memory", "cc" // register void* x9 asm("x9") = tpidr; // Preserve tpidr
); // asm volatile("svc #0\n" : "+r"(x0) : "r"(x8), "r"(x1), "r"(x9) : "memory", "cc");
// Should never reach here, but if it does, return BreakLoop // We can achieve the same with a direct syscall in C++
// Store the tpidr_el0 value so we can restore it after the syscall
uint64_t current_tpidr;
asm volatile("mrs %0, tpidr_el0" : "=r"(current_tpidr) :: "memory");
// Call the tkill syscall to send the signal
syscall(__NR_tkill, tid, ReturnToRunCodeByExceptionLevelChangeSignal);
// Restore tpidr_el0 after the syscall
asm volatile("msr tpidr_el0, %0" :: "r"(current_tpidr) : "memory");
// Should never reach here (the signal handler should take over),
// but if it does, return BreakLoop
return HaltReason::BreakLoop; return HaltReason::BreakLoop;
} }
@ -85,14 +95,14 @@ void ArmNce::ReturnToRunCodeByExceptionLevelChangeSignalHandler(int sig, void* i
auto* guest_ctx = static_cast<GuestContext*>(params->native_context); auto* guest_ctx = static_cast<GuestContext*>(params->native_context);
// Save the old value of tpidr_el0 // Save the old value of tpidr_el0
uint64_t old_tpidr; uintptr_t old_tpidr;
asm volatile("mrs %0, tpidr_el0" : "=r"(old_tpidr)); asm volatile("mrs %0, tpidr_el0" : "=r"(old_tpidr));
// Store it in the guest context // Store it in the guest context
guest_ctx->host_ctx.host_tpidr_el0 = reinterpret_cast<void*>(old_tpidr); guest_ctx->host_ctx.host_tpidr_el0 = reinterpret_cast<void*>(old_tpidr);
// Set our new tpidr_el0 // Set our new tpidr_el0
asm volatile("msr tpidr_el0, %0" : : "r"(tpidr) : "memory"); asm volatile("msr tpidr_el0, %0" : : "r"(tpidr));
// Unlock the context // Unlock the context
UnlockThreadParameters(tpidr); UnlockThreadParameters(tpidr);
@ -100,135 +110,72 @@ void ArmNce::ReturnToRunCodeByExceptionLevelChangeSignalHandler(int sig, void* i
// Returning from here will enter the guest // Returning from here will enter the guest
} }
void ArmNce::BreakFromRunCodeSignalHandler(int sig, void* info, void* raw_context) { void ArmNce::BreakFromRunCodeSignalHandler(int sig, void* raw_info, void* raw_context) {
// Check if we have the correct TLS magic // Extract the guest context from tpidr_el0
uint64_t tpidr_value; uint64_t tpidr_el0;
uint32_t magic_value; asm volatile("mrs %0, tpidr_el0" : "=r"(tpidr_el0));
asm volatile( auto* tpidr = reinterpret_cast<Kernel::KThread::NativeExecutionParameters*>(tpidr_el0);
"mrs %0, tpidr_el0\n"
: "=r"(tpidr_value)
:
: "memory"
);
auto* tpidr = reinterpret_cast<Kernel::KThread::NativeExecutionParameters*>(tpidr_value); // Check if the magic value is correct
magic_value = tpidr->magic; if (tpidr->magic != TpidrEl0TlsMagic) {
if (magic_value != TlsMagic) {
// Incorrect TLS magic, so this is a spurious signal
return; return;
} }
// Correct TLS magic, so this is a guest interrupt // Save the guest context and unlock the thread
// Restore host tpidr_el0
auto* guest_ctx = static_cast<GuestContext*>(tpidr->native_context); auto* guest_ctx = static_cast<GuestContext*>(tpidr->native_context);
uint64_t host_tpidr = reinterpret_cast<uint64_t>(guest_ctx->host_ctx.host_tpidr_el0);
asm volatile(
"msr tpidr_el0, %0\n"
:
: "r"(host_tpidr)
: "memory"
);
// Save the guest context
SaveGuestContext(guest_ctx, raw_context); SaveGuestContext(guest_ctx, raw_context);
UnlockThreadParameters(tpidr);
// Exit from running guest code by marking is_running as false
tpidr->is_running = false;
} }
void ArmNce::GuestAlignmentFaultSignalHandler(int sig, void* info, void* raw_context) { void ArmNce::GuestAlignmentFaultSignalHandler(int sig, void* raw_info, void* raw_context) {
// Check if we have the correct TLS magic // Extract the guest context from tpidr_el0
uint64_t tpidr_value; uint64_t tpidr_el0;
uint32_t magic_value; asm volatile("mrs %0, tpidr_el0" : "=r"(tpidr_el0));
asm volatile( auto* tpidr = reinterpret_cast<Kernel::KThread::NativeExecutionParameters*>(tpidr_el0);
"mrs %0, tpidr_el0\n"
: "=r"(tpidr_value)
:
: "memory"
);
auto* tpidr = reinterpret_cast<Kernel::KThread::NativeExecutionParameters*>(tpidr_value); // Check if the magic value is correct and the context is locked
magic_value = tpidr->magic; if (tpidr->magic != TpidrEl0TlsMagic || tpidr->lock.load(std::memory_order_relaxed) != SpinLockLocked) {
// Not our context, call original handler
if (magic_value != TlsMagic) { HandleHostAlignmentFault(sig, raw_info, raw_context);
// Incorrect TLS magic, so this is a host fault
HandleHostAlignmentFault(sig, info, raw_context);
return; return;
} }
// Correct TLS magic, so this is a guest fault
auto* guest_ctx = static_cast<GuestContext*>(tpidr->native_context); auto* guest_ctx = static_cast<GuestContext*>(tpidr->native_context);
uint64_t guest_tpidr = tpidr_value;
uint64_t host_tpidr = reinterpret_cast<uint64_t>(guest_ctx->host_ctx.host_tpidr_el0);
// Restore host tpidr_el0 // Call the handler and check if we should restore the guest context
asm volatile( if (HandleGuestAlignmentFault(guest_ctx, raw_info, raw_context)) {
"msr tpidr_el0, %0\n" // Restore the guest context
: tpidr_el0 = reinterpret_cast<uint64_t>(RestoreGuestContext(raw_context));
: "r"(host_tpidr) asm volatile("msr tpidr_el0, %0" :: "r"(tpidr_el0));
: "memory"
);
// Call the handler
bool restore_guest = HandleGuestAlignmentFault(guest_ctx, info, raw_context);
// If the handler returned true, restore guest tpidr_el0
if (restore_guest) {
asm volatile(
"msr tpidr_el0, %0\n"
:
: "r"(guest_tpidr)
: "memory"
);
} }
} }
void ArmNce::GuestAccessFaultSignalHandler(int sig, void* info, void* raw_context) { void ArmNce::GuestAccessFaultSignalHandler(int sig, void* raw_info, void* raw_context) {
// Check if we have the correct TLS magic // Extract the guest context from tpidr_el0
uint64_t tpidr_value; uint64_t tpidr_el0;
uint32_t magic_value; asm volatile("mrs %0, tpidr_el0" : "=r"(tpidr_el0) :: "memory");
asm volatile( auto* tpidr = reinterpret_cast<Kernel::KThread::NativeExecutionParameters*>(tpidr_el0);
"mrs %0, tpidr_el0\n"
: "=r"(tpidr_value)
:
: "memory"
);
auto* tpidr = reinterpret_cast<Kernel::KThread::NativeExecutionParameters*>(tpidr_value); // Check if the magic value is correct and the context is locked
magic_value = tpidr->magic; if (tpidr->magic != TpidrEl0TlsMagic || tpidr->lock.load(std::memory_order_relaxed) != SpinLockLocked) {
// Not our context, call original handler
if (magic_value != TlsMagic) { HandleHostAccessFault(sig, raw_info, raw_context);
// Incorrect TLS magic, so this is a host fault
HandleHostAccessFault(sig, info, raw_context);
return; return;
} }
// Correct TLS magic, so this is a guest fault
auto* guest_ctx = static_cast<GuestContext*>(tpidr->native_context); auto* guest_ctx = static_cast<GuestContext*>(tpidr->native_context);
uint64_t guest_tpidr = tpidr_value;
uint64_t host_tpidr = reinterpret_cast<uint64_t>(guest_ctx->host_ctx.host_tpidr_el0);
// Restore host tpidr_el0 // Call the handler and check if we should restore the guest context
asm volatile( if (HandleGuestAccessFault(guest_ctx, raw_info, raw_context)) {
"msr tpidr_el0, %0\n" // Restore the guest context
: tpidr_el0 = reinterpret_cast<uint64_t>(RestoreGuestContext(raw_context));
: "r"(host_tpidr) asm volatile("msr tpidr_el0, %0" :: "r"(tpidr_el0) : "memory");
: "memory"
);
// Call the handler
bool restore_guest = HandleGuestAccessFault(guest_ctx, info, raw_context);
// If the handler returned true, restore guest tpidr_el0
if (restore_guest) {
asm volatile(
"msr tpidr_el0, %0\n"
:
: "r"(guest_tpidr)
: "memory"
);
} }
} }