-
Notifications
You must be signed in to change notification settings - Fork 41
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[libunwind] Support rtld-c18n as the runtime linker.
This commit adds support for backtrace and exception handling in libunwind when the process is running with the compartmentalization runtime linker. The unwinding process remains the same until a trampoline is encountered as the return address. This means that we are crossing compartment boundaries and we need to gather the unwind information from the runtime linker. We do this by reading information from the executive stack that the runtime linker populates for us in unw_getcontext. In order to implement this correctly however, an additional class called CompartmentInfo is needed. This class abstracts away a thread-local hash map that maintains restricted stack mappings during unwinding without modifying anything on the executive stack. Currently, the hash map uses the heap, making it impossible to compile this code without heap support. Furthermore, the hash map can possibly leak memory at the end of the process. This happens because libunwind does not have a public API equivalent of unw_teardown(), making it impossible to enforce a reset of the table once a thread has finished unwinding. This problem can appear with actions like _Unwind_Backtrace(). This memory leak will only exist up until the next call to unw_init_local() by the same thread, at which point the table will get reset and anything that might have been allocated will get free'd. Exception handling code does not have this problem because unw_resume() can ensure that the table gets free'd before resuming. There are two ways to compile this code: - LIBUNWIND_SANDBOX_OTYPES only; - LIBUNWIND_SANDBOX_OTYPES and LIBUNWIND_SANDBOX_HARDENED. When LIBUNWIND_SANDBOX_HARDENED is specified, every stack pointer, frame pointer and callee-saved register will be sealed in the unwind register context. This is to prevent leakage of capabilities through the register context as much as possible. There are two exceptions to this: - When unw_set_reg() is called from a libunwind consumer, the caller might expect to be able to retrieve the capability it stored in the context, and sealing it will break the API semantics; - When the capability that is in the context is a sentry. These can't be sealed using an otype. The otype allocated to libunwind is given to libunwind by the runtime linker via the _rtld_unw_getsealer function.
- Loading branch information
Showing
12 changed files
with
566 additions
and
35 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,131 @@ | ||
//===----------------------------------------------------------------------===// | ||
// | ||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||
// See https://llvm.org/LICENSE.txt for license information. | ||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||
// | ||
// | ||
// Abstracts unwind information when used with a compartmentalizing runtime | ||
// linker. | ||
// | ||
//===----------------------------------------------------------------------===// | ||
|
||
#ifndef __COMPARTMENT_INFO_HPP__ | ||
#define __COMPARTMENT_INFO_HPP__ | ||
|
||
#include "AddressSpace.hpp" | ||
|
||
#if defined(__CHERI_PURE_CAPABILITY__) && defined(_LIBUNWIND_SANDBOX_OTYPES) | ||
namespace libunwind { | ||
class _LIBUNWIND_HIDDEN CompartmentInfo { | ||
// CompartmentInfo understands how compartments are represented when running | ||
// with a sandboxing runtime linker and is responsible for simulating how the | ||
// runtime linker juggles restricted stacks. | ||
// | ||
// XXX: Have this call into rtld to get a notion of a "compartent ID" as | ||
// opposed to understanding how rtld juggles stacks under the hood? | ||
struct StackTableEntry { | ||
uintcap_t key = kInvalidRCSP; | ||
uintcap_t value = kInvalidRCSP; | ||
StackTableEntry *next = nullptr; | ||
}; | ||
static const uint32_t kStackTableSize = 1 << 10; // XXX: Is this a good size? | ||
static const uint32_t kStackTableMask = kStackTableSize - 1; | ||
// This stack table is "eventually not leaky". If a thread T1 calls into | ||
// unw_step(), e.g. via _Unwind_Backtrace(), the heap-allocated data will | ||
// remain allocated until the next call from T1 into _Unwind_Backtrace(), | ||
// which will call unw_init_local() and reset the table. | ||
// | ||
// For exceptions, the table will be reset before we resume execution in the | ||
// catch and will not leak any memory. | ||
// | ||
// stackTable : start of restricted stack -> top of next caller's stack | ||
StackTableEntry stackTable[kStackTableSize]; | ||
|
||
static uint32_t stackHash(uintcap_t stack) { | ||
ptraddr_t stackAddr = (ptraddr_t)stack; | ||
return stackAddr & kStackTableMask; | ||
} | ||
StackTableEntry *findStack(uintcap_t startOfRCSP) { | ||
uint32_t hashIndex = stackHash(startOfRCSP); | ||
CHERI_DBG("findStack(): hashIndex = %u\n", hashIndex); | ||
assert(hashIndex < kStackTableSize); | ||
StackTableEntry *entry = &stackTable[hashIndex]; | ||
assert(entry != nullptr); | ||
CHERI_DBG("findStack(): looking for 0x%lx\n", (ptraddr_t)startOfRCSP); | ||
while (entry && entry->key != startOfRCSP) { | ||
CHERI_DBG("findStack(): entry->key = 0x%lx\n", (ptraddr_t)entry->key); | ||
entry = entry->next; | ||
} | ||
return entry; | ||
} | ||
bool insertNewStack(uintcap_t k, uintcap_t v) { | ||
uint32_t hashIndex = stackHash(k); | ||
StackTableEntry *entry = &stackTable[hashIndex]; | ||
assert(entry != nullptr); | ||
if (entry->key == kInvalidRCSP) { | ||
entry->key = k; | ||
entry->value = v; | ||
return true; | ||
} | ||
while (entry->next) { | ||
entry = entry->next; | ||
} | ||
StackTableEntry *newEntry = | ||
(StackTableEntry *)malloc(sizeof(StackTableEntry)); | ||
newEntry->key = k; | ||
newEntry->value = v; | ||
newEntry->next = nullptr; | ||
entry->next = newEntry; | ||
CHERI_DBG("insertNewStack(): 0x%lx ==> 0x%lx\n", (ptraddr_t)k, | ||
(ptraddr_t)v); | ||
return true; | ||
} | ||
|
||
public: | ||
static const uintcap_t kInvalidRCSP = (uintcap_t)0; | ||
static thread_local CompartmentInfo sThisCompartmentInfo; | ||
uintcap_t getAndUpdateRestrictedStack(uintcap_t startOfRCSP, | ||
uintcap_t oldCallerSPTop, | ||
uintcap_t nextRCSP) { | ||
CHERI_DBG("getAndUpdateRestrictedStack(0x%lx, 0x%lx)\n", | ||
(ptraddr_t)startOfRCSP, (ptraddr_t)nextRCSP); | ||
StackTableEntry *entry = findStack(startOfRCSP); | ||
if (entry == nullptr) { | ||
// If there is no entry in our table for a given restricted stack, we will | ||
// simply return nextRCSP which the runtime linker gave us. | ||
CHERI_DBG("stack not found in compartment info, adding 0x%lx ==> 0x%lx\n", | ||
(ptraddr_t)startOfRCSP, (ptraddr_t)oldCallerSPTop); | ||
insertNewStack(startOfRCSP, oldCallerSPTop); | ||
return nextRCSP; | ||
} | ||
// There's already an entry for the restricted stack. Return the next | ||
// restricted stack we expect to unwind or resume from and update the value | ||
// to the next one. | ||
uintcap_t stackToReturn = entry->value; | ||
entry->value = oldCallerSPTop; | ||
CHERI_DBG("getAndUpdateRestrictedStack(): return 0x%lx\n", | ||
(ptraddr_t)stackToReturn); | ||
return stackToReturn; | ||
} | ||
void reset(void) { | ||
// Reinitialize the table to 0 and free anything we might have allocated on | ||
// the heap. | ||
for (size_t i = 0; i < kStackTableSize; i++) { | ||
StackTableEntry *entry = &stackTable[i]; | ||
assert(entry != nullptr); | ||
StackTableEntry *heapEntry = entry->next; | ||
while (heapEntry) { | ||
StackTableEntry *temp = heapEntry; | ||
heapEntry = heapEntry->next; | ||
free(temp); | ||
} | ||
entry->value = kInvalidRCSP; | ||
entry->key = kInvalidRCSP; | ||
entry->next = nullptr; | ||
} | ||
} | ||
}; | ||
} // namespace libunwind | ||
#endif // __CHERI_PURE_CAPABILITY__ && _LIBUNWIND_SANDBOX_OTYPES | ||
#endif // __COMPARTMENT_INFO_HPP__ |
Oops, something went wrong.