Skip to content

Commit

Permalink
[libunwind] Support rtld-c18n as the runtime linker.
Browse files Browse the repository at this point in the history
This commit adds support for backtrace and exception handling in
libunwind when the process is running with the compartmentalization
runtime linker. The unwinding process remains the same until a
trampoline is encountered as the return address. This means that we are
crossing compartment boundaries and we need to gather the unwind
information from the runtime linker. We do this by reading information
from the executive stack that the runtime linker populates for us in
unw_getcontext.

In order to implement this correctly however, an additional class called
CompartmentInfo is needed. This class abstracts away a thread-local
hash map that maintains restricted stack mappings during unwinding
without modifying anything on the executive stack. Currently, the hash
map uses the heap, making it impossible to compile this code without
heap support. Furthermore, the hash map can possibly leak memory at the
end of the process. This happens because libunwind does not have a
public API equivalent of unw_teardown(), making it impossible to enforce
a reset of the table once a thread has finished unwinding. This problem
can appear with actions like _Unwind_Backtrace(). This memory leak will
only exist up until the next call to unw_init_local() by the same
thread, at which point the table will get reset and anything that might
have been allocated will get free'd. Exception handling code does not have
this problem because unw_resume() can ensure that the table gets free'd
before resuming.

There are two ways to compile this code:
 - LIBUNWIND_SANDBOX_OTYPES only;
 - LIBUNWIND_SANDBOX_OTYPES and LIBUNWIND_SANDBOX_HARDENED.

When LIBUNWIND_SANDBOX_HARDENED is specified, every stack pointer, frame
pointer and callee-saved register will be sealed in the unwind register
context. This is to prevent leakage of capabilities through the register
context as much as possible. There are two exceptions to this:
 - When unw_set_reg() is called from a libunwind consumer, the caller
   might expect to be able to retrieve the capability it stored in the
   context, and sealing it will break the API semantics;
 - When the capability that is in the context is a sentry. These can't
   be sealed using an otype.

The otype allocated to libunwind is given to libunwind by the runtime
linker via the _rtld_unw_getsealer function.
  • Loading branch information
dstolfa committed Mar 4, 2024
1 parent fa821e3 commit cfdbc19
Show file tree
Hide file tree
Showing 12 changed files with 566 additions and 35 deletions.
14 changes: 11 additions & 3 deletions libunwind/include/__libunwind_config.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,14 @@

#define _LIBUNWIND_VERSION 15000

#if defined(_LIBUNWIND_SANDBOX_HARDENED) && !defined(_LIBUNWIND_SANDBOX_OTYPES)
#error "_LIBUNWIND_SANDBOX_HARDENED is invalid without a sandboxing mechanism"
#endif

#if defined(_LIBUNWIND_SANDBOX_OTYPES) && defined(_LIBUNWIND_NO_HEAP)
#error "_LIBUNWIND_NO_HEAP cannot be used with _LIBUNWIND_SANDBOX_OTYPES"
#endif

#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
!defined(__ARM_DWARF_EH__) && !defined(__SEH__)
#define _LIBUNWIND_ARM_EHABI
Expand All @@ -20,7 +28,7 @@
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_X86_64 32
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_PPC 112
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_PPC64 116
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_MORELLO 229
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_MORELLO 230
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_ARM64 95
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_ARM 287
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_OR1K 32
Expand Down Expand Up @@ -76,11 +84,11 @@
# elif defined(__aarch64__)
# define _LIBUNWIND_TARGET_AARCH64 1
# if defined(__CHERI_PURE_CAPABILITY__)
# define _LIBUNWIND_CONTEXT_SIZE 100
# define _LIBUNWIND_CONTEXT_SIZE 102
# if defined(__SEH__)
# error "Pure-capability aarch64 SEH not supported"
# else
# define _LIBUNWIND_CURSOR_SIZE 124
# define _LIBUNWIND_CURSOR_SIZE 126
# endif
# define _LIBUNWIND_HIGHEST_DWARF_REGISTER _LIBUNWIND_HIGHEST_DWARF_REGISTER_MORELLO
# else
Expand Down
3 changes: 2 additions & 1 deletion libunwind/include/libunwind.h
Original file line number Diff line number Diff line change
Expand Up @@ -678,7 +678,8 @@ enum {
UNW_ARM64_C30 = 228,
UNW_ARM64_CLR = 228,
UNW_ARM64_C31 = 229,
UNW_ARM64_CSP = 229
UNW_ARM64_CSP = 229,
UNW_ARM64_ECSP = 230,
};

// 32-bit ARM registers. Numbers match DWARF for ARM spec #3.1 Table 1.
Expand Down
26 changes: 25 additions & 1 deletion libunwind/src/AddressSpace.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
#include "dwarf2.h"
#include "EHHeaderParser.hpp"
#include "Registers.hpp"
#include "unwind_cheri.h"

// We can no longer include C++ headers so duplicate std::min() here
template<typename T> T uw_min(T a, T b) { return a < b ? a : b; }
Expand Down Expand Up @@ -320,6 +321,9 @@ class _LIBUNWIND_HIDDEN LocalAddressSpace {
return get<v128>(addr);
}
capability_t getCapability(pint_t addr) { return get<capability_t>(addr); }
#if defined(__CHERI_PURE_CAPABILITY__) && defined(_LIBUNWIND_SANDBOX_OTYPES)
static uintcap_t getUnwindSealer();
#endif // __CHERI_PURE_CAPABILITY__ && _LIBUNWIND_SANDBOX_OTYPES
__attribute__((always_inline))
uintptr_t getP(pint_t addr);
uint64_t getRegister(pint_t addr);
Expand Down Expand Up @@ -408,6 +412,25 @@ inline uint64_t LocalAddressSpace::getRegister(pint_t addr) {
#endif
}

#if defined(__CHERI_PURE_CAPABILITY__) && defined(_LIBUNWIND_SANDBOX_OTYPES)
extern "C" {
/// Call into the RTLD to get a sealer capability. This sealer will be used to
/// seal information in the unwinding context if _LIBUNWIND_SANDBOX_HARDENED is
/// specified.
uintptr_t _rtld_unw_getsealer(void);
uintptr_t __rtld_unw_getsealer();
_LIBUNWIND_HIDDEN uintptr_t __rtld_unw_getsealer() {
return (uintptr_t)-1;
}
_LIBUNWIND_WEAK_ALIAS(__rtld_unw_getsealer, _rtld_unw_getsealer)
}

/// C++ wrapper for calling into RTLD.
inline uintcap_t LocalAddressSpace::getUnwindSealer() {
return _rtld_unw_getsealer();
}
#endif // __CHERI_PURE_CAPABILITY__ && _LIBUNWIND_SANDBOX_OTYPES

/// Read a ULEB128 into a 64-bit word.
inline uint64_t LocalAddressSpace::getULEB128(pint_t &addr, pint_t end) {
const uint8_t *p = (uint8_t *)addr;
Expand Down Expand Up @@ -932,7 +955,8 @@ inline bool LocalAddressSpace::findUnwindSections(pc_t targetAddr,
return true;
#elif defined(_LIBUNWIND_USE_DL_ITERATE_PHDR)
dl_iterate_cb_data cb_data = {this, &info, targetAddr};
CHERI_DBG("Calling dl_iterate_phdr()\n");
CHERI_DBG("Calling dl_iterate_phdr(0x%jx)\n",
(uintmax_t)targetAddr.address());
int found = dl_iterate_phdr(findUnwindSectionsByPhdr, &cb_data);
return static_cast<bool>(found);
#endif
Expand Down
1 change: 1 addition & 0 deletions libunwind/src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ set(LIBUNWIND_HEADERS
Registers.hpp
RWMutex.hpp
Unwind-EHABI.h
unwind_cheri.h
UnwindCursor.hpp
../include/libunwind.h
../include/unwind.h
Expand Down
131 changes: 131 additions & 0 deletions libunwind/src/CompartmentInfo.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//
// Abstracts unwind information when used with a compartmentalizing runtime
// linker.
//
//===----------------------------------------------------------------------===//

#ifndef __COMPARTMENT_INFO_HPP__
#define __COMPARTMENT_INFO_HPP__

#include "AddressSpace.hpp"

#if defined(__CHERI_PURE_CAPABILITY__) && defined(_LIBUNWIND_SANDBOX_OTYPES)
namespace libunwind {
class _LIBUNWIND_HIDDEN CompartmentInfo {
// CompartmentInfo understands how compartments are represented when running
// with a sandboxing runtime linker and is responsible for simulating how the
// runtime linker juggles restricted stacks.
//
// XXX: Have this call into rtld to get a notion of a "compartent ID" as
// opposed to understanding how rtld juggles stacks under the hood?
struct StackTableEntry {
uintcap_t key = kInvalidRCSP;
uintcap_t value = kInvalidRCSP;
StackTableEntry *next = nullptr;
};
static const uint32_t kStackTableSize = 1 << 10; // XXX: Is this a good size?
static const uint32_t kStackTableMask = kStackTableSize - 1;
// This stack table is "eventually not leaky". If a thread T1 calls into
// unw_step(), e.g. via _Unwind_Backtrace(), the heap-allocated data will
// remain allocated until the next call from T1 into _Unwind_Backtrace(),
// which will call unw_init_local() and reset the table.
//
// For exceptions, the table will be reset before we resume execution in the
// catch and will not leak any memory.
//
// stackTable : start of restricted stack -> top of next caller's stack
StackTableEntry stackTable[kStackTableSize];

static uint32_t stackHash(uintcap_t stack) {
ptraddr_t stackAddr = (ptraddr_t)stack;
return stackAddr & kStackTableMask;
}
StackTableEntry *findStack(uintcap_t startOfRCSP) {
uint32_t hashIndex = stackHash(startOfRCSP);
CHERI_DBG("findStack(): hashIndex = %u\n", hashIndex);
assert(hashIndex < kStackTableSize);
StackTableEntry *entry = &stackTable[hashIndex];
assert(entry != nullptr);
CHERI_DBG("findStack(): looking for 0x%lx\n", (ptraddr_t)startOfRCSP);
while (entry && entry->key != startOfRCSP) {
CHERI_DBG("findStack(): entry->key = 0x%lx\n", (ptraddr_t)entry->key);
entry = entry->next;
}
return entry;
}
bool insertNewStack(uintcap_t k, uintcap_t v) {
uint32_t hashIndex = stackHash(k);
StackTableEntry *entry = &stackTable[hashIndex];
assert(entry != nullptr);
if (entry->key == kInvalidRCSP) {
entry->key = k;
entry->value = v;
return true;
}
while (entry->next) {
entry = entry->next;
}
StackTableEntry *newEntry =
(StackTableEntry *)malloc(sizeof(StackTableEntry));
newEntry->key = k;
newEntry->value = v;
newEntry->next = nullptr;
entry->next = newEntry;
CHERI_DBG("insertNewStack(): 0x%lx ==> 0x%lx\n", (ptraddr_t)k,
(ptraddr_t)v);
return true;
}

public:
static const uintcap_t kInvalidRCSP = (uintcap_t)0;
static thread_local CompartmentInfo sThisCompartmentInfo;
uintcap_t getAndUpdateRestrictedStack(uintcap_t startOfRCSP,
uintcap_t oldCallerSPTop,
uintcap_t nextRCSP) {
CHERI_DBG("getAndUpdateRestrictedStack(0x%lx, 0x%lx)\n",
(ptraddr_t)startOfRCSP, (ptraddr_t)nextRCSP);
StackTableEntry *entry = findStack(startOfRCSP);
if (entry == nullptr) {
// If there is no entry in our table for a given restricted stack, we will
// simply return nextRCSP which the runtime linker gave us.
CHERI_DBG("stack not found in compartment info, adding 0x%lx ==> 0x%lx\n",
(ptraddr_t)startOfRCSP, (ptraddr_t)oldCallerSPTop);
insertNewStack(startOfRCSP, oldCallerSPTop);
return nextRCSP;
}
// There's already an entry for the restricted stack. Return the next
// restricted stack we expect to unwind or resume from and update the value
// to the next one.
uintcap_t stackToReturn = entry->value;
entry->value = oldCallerSPTop;
CHERI_DBG("getAndUpdateRestrictedStack(): return 0x%lx\n",
(ptraddr_t)stackToReturn);
return stackToReturn;
}
void reset(void) {
// Reinitialize the table to 0 and free anything we might have allocated on
// the heap.
for (size_t i = 0; i < kStackTableSize; i++) {
StackTableEntry *entry = &stackTable[i];
assert(entry != nullptr);
StackTableEntry *heapEntry = entry->next;
while (heapEntry) {
StackTableEntry *temp = heapEntry;
heapEntry = heapEntry->next;
free(temp);
}
entry->value = kInvalidRCSP;
entry->key = kInvalidRCSP;
entry->next = nullptr;
}
}
};
} // namespace libunwind
#endif // __CHERI_PURE_CAPABILITY__ && _LIBUNWIND_SANDBOX_OTYPES
#endif // __COMPARTMENT_INFO_HPP__
Loading

0 comments on commit cfdbc19

Please sign in to comment.