Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[3.x -> 4.0] EOS-VM: merge memory issue fix from release/3.x #14

Merged
merged 3 commits into from
Jul 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
110 changes: 78 additions & 32 deletions include/eosio/vm/allocator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -214,9 +214,13 @@ namespace eosio { namespace vm {

blocks_by_size_t::iterator allocate_segment(std::size_t min_size) {
std::size_t size = std::max(min_size, segment_size);
void* base = mmap(nullptr, size, PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
segment s{base, size};
// To avoid additional memory mappings being created during permission changes of
// from PROT_EXEC to PROT_READ | PROT_WRITE, and back to PROT_EXEC,
// set permisions to PROT_READ | PROT_WRITE initially.
// The permission will be changed to PROT_EXEC after executible code is copied.
void* base = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
EOS_VM_ASSERT(base != MAP_FAILED, wasm_bad_alloc, "failed to allocate jit segment");
segment s{base, size};
_segments.emplace_back(std::move(s));
bool success = false;
auto guard_1 = scope_guard{[&] { if(!success) { _segments.pop_back(); } }};
Expand Down Expand Up @@ -267,7 +271,6 @@ namespace eosio { namespace vm {
class growable_allocator {
public:
static constexpr size_t max_memory_size = 1024 * 1024 * 1024; // 1GB
static constexpr size_t chunk_size = 128 * 1024; // 128KB
template<std::size_t align_amt>
static constexpr size_t align_offset(size_t offset) { return (offset + align_amt - 1) & ~(align_amt - 1); }

Expand All @@ -277,22 +280,51 @@ namespace eosio { namespace vm {
return (offset + pagesize - 1) & ~(pagesize - 1);
}

growable_allocator() {}

// size in bytes
growable_allocator(size_t size) {
explicit growable_allocator(size_t size) {
EOS_VM_ASSERT(size <= max_memory_size, wasm_bad_alloc, "Too large initial memory size");
_base = (char*)mmap(NULL, max_memory_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
EOS_VM_ASSERT(_base != MAP_FAILED, wasm_bad_alloc, "mmap failed.");
if (size != 0) {
size_t chunks_to_alloc = (align_offset<chunk_size>(size) / chunk_size);
_size += (chunk_size * chunks_to_alloc);
mprotect((char*)_base, _size, PROT_READ | PROT_WRITE);
}
use_default_memory();
}

void use_default_memory() {
EOS_VM_ASSERT(_base == nullptr, wasm_bad_alloc, "default memory already allocated");

// uses mmap for big memory allocation
_base = (char*)mmap(NULL, max_memory_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
EOS_VM_ASSERT(_base != MAP_FAILED, wasm_bad_alloc, "failed to mmap for default memory.");
_mmap_used = true;
_capacity = max_memory_size;
}

// size in bytes
void use_fixed_memory(bool is_jit, size_t size) {
EOS_VM_ASSERT(0 < size && size <= max_memory_size, wasm_bad_alloc, "Too large or 0 fixed memory size");
EOS_VM_ASSERT(_base == nullptr, wasm_bad_alloc, "Fixed memory already allocated");

_is_jit = is_jit;
if (_is_jit) {
_base = (char*)std::calloc(size, sizeof(char));
EOS_VM_ASSERT(_base != nullptr, wasm_bad_alloc, "malloc in use_fixed_memory failed.");
_mmap_used = false;
} else {
_base = (char*)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
EOS_VM_ASSERT(_base != MAP_FAILED, wasm_bad_alloc, "mmap in use_fixed_memoryfailed.");
_mmap_used = true;
}
_capacity = size;
}

~growable_allocator() {
munmap(_base, _capacity);
if (is_jit) {
if (_base != nullptr) {
if (_mmap_used) {
munmap(_base, _capacity);
} else {
std::free(_base);
}
}
if (_is_jit && _code_base) {
jit_allocator::instance().free(_code_base);
}
}
Expand All @@ -301,19 +333,19 @@ namespace eosio { namespace vm {
template <typename T>
T* alloc(size_t size = 0) {
static_assert(max_memory_size % alignof(T) == 0, "alignment must divide max_memory_size.");
EOS_VM_ASSERT(_capacity % alignof(T) == 0, wasm_bad_alloc, "alignment must divide _capacity.");
_offset = align_offset<alignof(T)>(_offset);
// Evaluating the inequality in this form cannot cause integer overflow.
// Once this assertion passes, the rest of the function is safe.
EOS_VM_ASSERT ((max_memory_size - _offset) / sizeof(T) >= size, wasm_bad_alloc, "Allocated too much memory");
EOS_VM_ASSERT ((_capacity - _offset) / sizeof(T) >= size, wasm_bad_alloc, "Allocated too much memory");
size_t aligned = (sizeof(T) * size) + _offset;
if (aligned > _size) {
size_t chunks_to_alloc = align_offset<chunk_size>(aligned - _size) / chunk_size;
mprotect((char*)_base + _size, (chunk_size * chunks_to_alloc), PROT_READ | PROT_WRITE);
_size += (chunk_size * chunks_to_alloc);
}
EOS_VM_ASSERT (aligned <= _capacity, wasm_bad_alloc, "Allocated too much memory after aligned");

T* ptr = (T*)(_base + _offset);
_offset = aligned;
if (_offset > _largest_offset) {
_largest_offset = _offset;
}
return ptr;
}

Expand All @@ -334,18 +366,24 @@ namespace eosio { namespace vm {
int err = mprotect(executable_code, _code_size, PROT_READ | PROT_WRITE);
EOS_VM_ASSERT(err == 0, wasm_bad_alloc, "mprotect failed");
std::memcpy(executable_code, _code_base, _code_size);
is_jit = true;
_code_base = (char*)executable_code;
enable_code(IsJit);
_is_jit = true;
_offset = (char*)code_base - _base;
}
enable_code(IsJit);
}

void set_code_base_and_size(char* code_base, size_t code_size) {
_code_base = code_base;
_code_size = code_size;
}

// Sets protection on code pages to allow them to be executed.
void enable_code(bool is_jit) {
mprotect(_code_base, _code_size, is_jit?PROT_EXEC:(PROT_READ|PROT_WRITE));
}
// Make code pages unexecutable
// Make code pages unexecutable so deadline timer can kill an
// execution (in both JIT and Interpreter)
void disable_code() {
mprotect(_code_base, _code_size, PROT_NONE);
}
Expand All @@ -363,28 +401,36 @@ namespace eosio { namespace vm {
_offset = ((char*)ptr - _base);
}

size_t largest_used_size() {
return align_to_page(_largest_offset);
}

/*
* Finalize the memory by unmapping any excess pages, this means that the allocator will no longer grow
*/
void finalize() {
if(_capacity != _offset) {
if(_mmap_used && _capacity != _offset) {
std::size_t final_size = align_to_page(_offset);
EOS_VM_ASSERT(munmap(_base + final_size, _capacity - final_size) == 0, wasm_bad_alloc, "failed to finalize growable_allocator");
_capacity = _size = _offset = final_size;
if (final_size < _capacity) { // final_size can grow to _capacity after align_to_page.
// make sure no 0 size passed to munmap
EOS_VM_ASSERT(munmap(_base + final_size, _capacity - final_size) == 0, wasm_bad_alloc, "failed to finalize growable_allocator");
}
_capacity = _offset = final_size;
}
}

void free() { EOS_VM_ASSERT(false, wasm_bad_alloc, "unimplemented"); }

void reset() { _offset = 0; }

size_t _offset = 0;
size_t _size = 0;
std::size_t _capacity = 0;
char* _base;
char* _code_base = nullptr;
size_t _code_size = 0;
bool is_jit = false;
size_t _offset = 0;
size_t _largest_offset = 0;
size_t _capacity = 0;
char* _base = nullptr;
char* _code_base = nullptr;
size_t _code_size = 0;
bool _is_jit = false;
bool _mmap_used = false;
};

template <typename T>
Expand Down
56 changes: 49 additions & 7 deletions include/eosio/vm/backend.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,36 +74,78 @@ namespace eosio { namespace vm {
}
public:
backend(wasm_code&& code, host_t& host, wasm_allocator* alloc, const Options& options = Options{})
: memory_alloc(alloc), ctx(parser_t{ mod.allocator, options }.parse_module(code, mod, debug), detail::get_max_call_depth(options)) {
: memory_alloc(alloc), ctx(parse_module(code, options), detail::get_max_call_depth(options)) {
ctx.set_max_pages(detail::get_max_pages(options));
construct(&host);
}
backend(wasm_code&& code, wasm_allocator* alloc, const Options& options = Options{})
: memory_alloc(alloc), ctx(parser_t{ mod.allocator, options }.parse_module(code, mod, debug), detail::get_max_call_depth(options)) {
: memory_alloc(alloc), ctx(parse_module(code, options), detail::get_max_call_depth(options)) {
ctx.set_max_pages(detail::get_max_pages(options));
construct();
}
backend(wasm_code& code, host_t& host, wasm_allocator* alloc, const Options& options = Options{})
: memory_alloc(alloc), ctx(parser_t{ mod.allocator, options }.parse_module(code, mod, debug), detail::get_max_call_depth(options)) {
: memory_alloc(alloc), ctx(parse_module(code, options), detail::get_max_call_depth(options)) {
ctx.set_max_pages(detail::get_max_pages(options));
construct(&host);
}
backend(wasm_code& code, wasm_allocator* alloc, const Options& options = Options{})
: memory_alloc(alloc), ctx(parser_t{ mod.allocator, options }.parse_module(code, mod, debug), detail::get_max_call_depth(options)) {
: memory_alloc(alloc), ctx(parse_module(code, options), detail::get_max_call_depth(options)) {
ctx.set_max_pages(detail::get_max_pages(options));
construct();
}
backend(wasm_code_ptr& ptr, size_t sz, host_t& host, wasm_allocator* alloc, const Options& options = Options{})
: memory_alloc(alloc), ctx(parser_t{ mod.allocator, options }.parse_module2(ptr, sz, mod, debug), detail::get_max_call_depth(options)) {
: memory_alloc(alloc), ctx(parse_module2(ptr, sz, options, true), detail::get_max_call_depth(options)) { // single parsing. original behavior
ctx.set_max_pages(detail::get_max_pages(options));
construct(&host);
}
backend(wasm_code_ptr& ptr, size_t sz, wasm_allocator* alloc, const Options& options = Options{})
: memory_alloc(alloc), ctx(parser_t{ mod.allocator, options }.parse_module2(ptr, sz, mod, debug), detail::get_max_call_depth(options)) {
// Leap:
// * Contract validation only needs single parsing as the instantiated module is not cached.
// * Contract execution requires two-passes parsing to prevent memory mappings exhaustion
backend(wasm_code_ptr& ptr, size_t sz, wasm_allocator* alloc, const Options& options = Options{}, bool single_parsing = true)
: memory_alloc(alloc), ctx(parse_module2(ptr, sz, options, single_parsing), detail::get_max_call_depth(options)) {
ctx.set_max_pages(detail::get_max_pages(options));
construct();
}

module& parse_module(wasm_code& code, const Options& options) {
mod.allocator.use_default_memory();
return parser_t{ mod.allocator, options }.parse_module(code, mod, debug);
}

module& parse_module2(wasm_code_ptr& ptr, size_t sz, const Options& options, bool single_parsing) {
if (single_parsing) {
mod.allocator.use_default_memory();
return parser_t{ mod.allocator, options }.parse_module2(ptr, sz, mod, debug);
}

// To prevent large number of memory mappings used, use two-passes parsing.
// The first pass finds max size of memory required for parsing;
// this memory is released after parsing.
// The second pass uses malloc with the required size of memory.
wasm_code_ptr orig_ptr = ptr;
size_t largest_size = 0;

// First pass: finds max size of memory required by parsing.
// Memory used by parsing will be freed when going out of the scope
{
module first_pass_module;
// For JIT, skips code generation as it is not needed and
// does not count the code memory size
detail::code_generate_mode code_gen_mode = Impl::is_jit ? detail::code_generate_mode::skip : detail::code_generate_mode::use_same_allocator;
first_pass_module.allocator.use_default_memory();
parser_t{ first_pass_module.allocator, options }.parse_module2(ptr, sz, first_pass_module, debug, code_gen_mode);
first_pass_module.finalize();
largest_size = first_pass_module.allocator.largest_used_size();
}

// Second pass: uses largest_size of memory for actual parsing
mod.allocator.use_fixed_memory(Impl::is_jit, largest_size);
// For JIT, uses a seperate allocator for code generation as mod's memory
// does not include memory for code
detail::code_generate_mode code_gen_mode = Impl::is_jit ? detail::code_generate_mode::use_seperate_allocator : detail::code_generate_mode::use_same_allocator;
return parser_t{ mod.allocator, options }.parse_module2(orig_ptr, sz, mod, debug, code_gen_mode);
}

template <typename... Args>
inline auto operator()(host_t& host, const std::string_view& mod, const std::string_view& func, Args... args) {
return call(host, mod, func, args...);
Expand Down
7 changes: 6 additions & 1 deletion include/eosio/vm/constants.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,12 @@ namespace eosio { namespace vm {
id_size = sizeof(uint8_t),
varuint32_size = 5,
max_call_depth = 250,
initial_stack_size = 8*1024,
// initial_stack_size is used for reserving initial memory for operand stack.
// For JIT, operand stack is only used in calling host function calls, where
// number of elements required can never be big.
// For Interpreter, performance is not a concern.
// Intentionally set to a small number.
initial_stack_size = 8,
initial_module_size = 1 * 1024 * 1024,
max_memory = 4ull << 31,
max_useable_memory = (1ull << 32), //4GiB
Expand Down
4 changes: 4 additions & 0 deletions include/eosio/vm/execution_context.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,10 @@ namespace eosio { namespace vm {
inline void reset() {
EOS_VM_ASSERT(_mod.error == nullptr, wasm_interpreter_exception, _mod.error);

// Reset the capacity of underlying memory used by operand stack if it is
// greater than initial_stack_size
_os.reset_capacity();

_linear_memory = _wasm_alloc->get_base_ptr<char>();
if(_mod.memories.size()) {
EOS_VM_ASSERT(_mod.memories[0].limits.initial <= _max_pages, wasm_bad_alloc, "Cannot allocate initial linear memory.");
Expand Down
38 changes: 32 additions & 6 deletions include/eosio/vm/parser.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,12 @@ namespace eosio { namespace vm {

namespace detail {

enum class code_generate_mode {
use_same_allocator = 0, // uses the same allocator as in module for code generation
use_seperate_allocator = 1, // uses a different temporary allocator for code generation
skip = 2 // skip code generation
};

static constexpr unsigned get_size_for_type(uint8_t type) {
switch(type) {
case types::i32:
Expand Down Expand Up @@ -295,12 +301,12 @@ namespace eosio { namespace vm {
return mod;
}

inline module& parse_module2(wasm_code_ptr& code_ptr, size_t sz, module& mod, DebugInfo& debug) {
parse_module(code_ptr, sz, mod, debug);
inline module& parse_module2(wasm_code_ptr& code_ptr, size_t sz, module& mod, DebugInfo& debug, detail::code_generate_mode mode = detail::code_generate_mode::use_same_allocator) {
parse_module(code_ptr, sz, mod, debug, mode);
return mod;
}

void parse_module(wasm_code_ptr& code_ptr, size_t sz, module& mod, DebugInfo& debug) {
void parse_module(wasm_code_ptr& code_ptr, size_t sz, module& mod, DebugInfo& debug, detail::code_generate_mode mode = detail::code_generate_mode::use_same_allocator) {
_mod = &mod;
EOS_VM_ASSERT(parse_magic(code_ptr) == constants::magic, wasm_parse_exception, "magic number did not match");
EOS_VM_ASSERT(parse_version(code_ptr) == constants::version, wasm_parse_exception,
Expand Down Expand Up @@ -338,7 +344,7 @@ namespace eosio { namespace vm {
case section_id::element_section:
parse_section<section_id::element_section>(code_ptr, mod.elements);
break;
case section_id::code_section: parse_section<section_id::code_section>(code_ptr, mod.code); break;
case section_id::code_section: parse_section<section_id::code_section>(code_ptr, mod.code, mode); break;
case section_id::data_section: parse_section<section_id::data_section>(code_ptr, mod.data); break;
default: EOS_VM_ASSERT(false, wasm_parse_exception, "error invalid section id");
}
Expand Down Expand Up @@ -1309,12 +1315,31 @@ namespace eosio { namespace vm {
}
template <uint8_t id>
inline void parse_section(wasm_code_ptr& code,
vec<typename std::enable_if_t<id == section_id::code_section, function_body>>& elems) {
vec<typename std::enable_if_t<id == section_id::code_section, function_body>>& elems, detail::code_generate_mode mode) {
const void* code_start = code.raw() - code.offset();
parse_section_impl(code, elems, detail::get_max_function_section_elements(_options),
[&](wasm_code_ptr& code, function_body& fb, std::size_t idx) { parse_function_body(code, fb, idx); });
EOS_VM_ASSERT( elems.size() == _mod->functions.size(), wasm_parse_exception, "code section must have the same size as the function section" );
Writer code_writer(_allocator, code.bounds() - code.offset(), *_mod);

if (mode == detail::code_generate_mode::skip) {
return;
} else if (mode == detail::code_generate_mode::use_seperate_allocator) {
// Leap: in 2-pass parsing, save temporary JIT executible in a
// seperate allocator so the executible will not be part of
// instantiated module's allocator and won't be cached.
growable_allocator allocator;
allocator.use_default_memory();
write_code_out(allocator, code, code_start);
// pass the code base address and size to the main module's allocator
_mod->allocator.set_code_base_and_size(allocator._code_base, allocator._code_size);
allocator._code_base = nullptr; // make sure code_base won't be freed when going out of current scope by allocator's destructor
} else {
write_code_out(_allocator, code, code_start);
}
}

void write_code_out(growable_allocator& allocator, wasm_code_ptr& code, const void* code_start) {
Writer code_writer(allocator, code.bounds() - code.offset(), *_mod);
imap.on_code_start(code_writer.get_base_addr(), code_start);
for (size_t i = 0; i < _function_bodies.size(); i++) {
function_body& fb = _mod->code[i];
Expand All @@ -1328,6 +1353,7 @@ namespace eosio { namespace vm {
}
imap.on_code_end(code_writer.get_addr(), code.raw());
}

template <uint8_t id>
inline void parse_section(wasm_code_ptr& code,
vec<typename std::enable_if_t<id == section_id::data_section, data_segment>>& elems) {
Expand Down
Loading