diff --git a/.clang-format b/.clang-format new file mode 100644 index 000000000..d64128aa8 --- /dev/null +++ b/.clang-format @@ -0,0 +1,9 @@ +--- +BasedOnStyle: LLVM +Language: Cpp +IndentWidth: 2 +BreakConstructorInitializersBeforeComma: 'true' +PointerAlignment: Left +IncludeBlocks: Preserve +SortIncludes: false +ColumnLimit: 120 \ No newline at end of file diff --git a/.github/workflows/check-diff.yml b/.github/workflows/check-diff.yml deleted file mode 100644 index ecb22827d..000000000 --- a/.github/workflows/check-diff.yml +++ /dev/null @@ -1,46 +0,0 @@ -name: Check diff from main - -on: - workflow_call: - outputs: - run_arduino: - description: 'Return true if Arduino tests should be run' - value: ${{ jobs.check.outputs.run_arduino == 'true' }} - run_macos: - description: 'Return true if macOS tests should be run' - value: ${{ jobs.check.outputs.run_macos == 'true' }} - run_python: - description: 'Return true if Python tests should be run' - value: ${{ jobs.check.outputs.run_python == 'true' }} - run_windows: - description: 'Return true if Windows tests should be run' - value: ${{ jobs.check.outputs.run_windows == 'true' }} - run_zephyr: - description: 'Return true if Zephyr tests should be run' - value: ${{ jobs.check.outputs.run_zephyr == 'true' }} - -jobs: - check: - runs-on: ubuntu-latest - outputs: - run_arduino: ${{ steps.do.outputs.changed_arduino == 1 || github.ref == 'refs/heads/main' }} - run_macos: ${{ steps.do.outputs.changed_macos == 1 || github.ref == 'refs/heads/main' }} - run_python: ${{ steps.do.outputs.changed_python == 1 || github.ref == 'refs/heads/main' }} - run_windows: ${{ steps.do.outputs.changed_windows == 1 || github.ref == 'refs/heads/main' }} - run_zephyr: ${{ steps.do.outputs.changed_zephyr == 1 || github.ref == 'refs/heads/main' }} - - steps: - - name: Check out reactor-c repository - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - id: do - name: Check which targets have changes - run: | - ./check-diff.sh "arduino" arduino - ./check-diff.sh "macos|unix" macos - ./check-diff.sh "python" python - ./check-diff.sh "windows" windows - ./check-diff.sh "zephyr" zephyr - shell: bash - working-directory: .github/scripts diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 26795c446..39267f34f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -5,10 +5,19 @@ on: branches: - main pull_request: - # Also allow running this workflow manually from the Actions tab. + types: [synchronize, opened, reopened, ready_for_review, converted_to_draft] workflow_dispatch: + merge_group: + +concurrency: + group: ci-${{ github.ref }}-${{ github.event_path }} + cancel-in-progress: ${{ github.ref != 'refs/heads/master' }} jobs: + check-labels: + uses: lf-lang/lingua-franca/.github/workflows/check-labels.yml@master + if: ${{ github.event_name == 'pull_request' }} + unit-tests-single: uses: ./.github/workflows/unit-tests.yml with: @@ -25,60 +34,59 @@ jobs: build-trace-tools: uses: ./.github/workflows/build-trace-tools.yml - check-diff: - uses: ./.github/workflows/check-diff.yml - fetch-lf: uses: lf-lang/lingua-franca/.github/workflows/extract-ref.yml@master with: file: 'lingua-franca-ref.txt' lf-default-arduino: - needs: [fetch-lf, check-diff] + needs: [fetch-lf] uses: lf-lang/lingua-franca/.github/workflows/c-arduino-tests.yml@master with: runtime-ref: ${{ github.ref }} compiler-ref: ${{ needs.fetch-lf.outputs.ref }} - if: ${{ needs.check-diff.outputs.run_arduino == 'true' }} + if: ${{ !github.event.pull_request.draft || contains( github.event.pull_request.labels.*.name, 'arduino') }} lf-default-zephyr: - needs: [fetch-lf, check-diff] + needs: [fetch-lf] uses: lf-lang/lingua-franca/.github/workflows/c-zephyr-tests.yml@master with: runtime-ref: ${{ github.ref }} compiler-ref: ${{ needs.fetch-lf.outputs.ref }} - if: ${{ needs.check-diff.outputs.run_zephyr == 'true' }} + if: ${{ !github.event.pull_request.draft ||contains( github.event.pull_request.labels.*.name, 'zephyr') }} lf-default: - needs: [fetch-lf, check-diff] + needs: [fetch-lf] uses: lf-lang/lingua-franca/.github/workflows/c-tests.yml@master with: runtime-ref: ${{ github.ref }} compiler-ref: ${{ needs.fetch-lf.outputs.ref }} - all-platforms: ${{ needs.check-diff.outputs.run_macos == 'true' || needs.check-diff.outputs.run_windows == 'true' }} + all-platforms: ${{ !github.event.pull_request.draft || contains( github.event.pull_request.labels.*.name, 'mac') || contains( github.event.pull_request.labels.*.name, 'windows') }} lf-python: - needs: [fetch-lf, check-diff] + needs: [fetch-lf] uses: lf-lang/lingua-franca/.github/workflows/py-tests.yml@master with: reactor-c-ref: ${{ github.ref }} compiler-ref: ${{ needs.fetch-lf.outputs.ref }} - if: ${{ needs.check-diff.outputs.run_python == 'true' }} + if: ${{ !github.event.pull_request.draft || contains( github.event.pull_request.labels.*.name, 'python') }} lf-gedf-np: - needs: [fetch-lf, check-diff] + needs: [fetch-lf] uses: lf-lang/lingua-franca/.github/workflows/c-tests.yml@master with: runtime-ref: ${{ github.ref }} compiler-ref: ${{ needs.fetch-lf.outputs.ref }} scheduler: GEDF_NP - all-platforms: ${{ needs.check-diff.outputs.run_macos == 'true' || needs.check-diff.outputs.run_windows == 'true' }} + all-platforms: ${{ !github.event.pull_request.draft || contains( github.event.pull_request.labels.*.name, 'mac') || contains( github.event.pull_request.labels.*.name, 'windows') }} + if: ${{ !github.event.pull_request.draft || contains( github.event.pull_request.labels.*.name, 'schedulers') }} lf-adaptive: - needs: [fetch-lf, check-diff] + needs: [fetch-lf] uses: lf-lang/lingua-franca/.github/workflows/c-tests.yml@master with: runtime-ref: ${{ github.ref }} compiler-ref: ${{ needs.fetch-lf.outputs.ref }} scheduler: ADAPTIVE - all-platforms: ${{ needs.check-diff.outputs.run_macos == 'true' || needs.check-diff.outputs.run_windows == 'true' }} + all-platforms: ${{ !github.event.pull_request.draft || contains( github.event.pull_request.labels.*.name, 'mac') || contains( github.event.pull_request.labels.*.name, 'windows') }} + if: ${{ !github.event.pull_request.draft || contains( github.event.pull_request.labels.*.name, 'schedulers') }} \ No newline at end of file diff --git a/.github/workflows/clang-format.yml b/.github/workflows/clang-format.yml new file mode 100644 index 000000000..11028dd38 --- /dev/null +++ b/.github/workflows/clang-format.yml @@ -0,0 +1,16 @@ +name: clang-format-review + +# You can be more specific, but it currently only works on pull requests +on: [pull_request] + +jobs: + clang-format: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + - name: Install clang-tidy + run: | + sudo apt-get update + sudo apt-get install -y clang-tidy + - name: Analyze + run: make format-check diff --git a/.gitignore b/.gitignore index 55234644a..742de2ac4 100644 --- a/.gitignore +++ b/.gitignore @@ -2,7 +2,8 @@ /docs/_build /docs/api **/.vscode/ -/build/ +**/build/ +**/lib/ **/.DS_Store /core/federated/RTI/build/ /cmake-build-debug/ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 96a244dc1..000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,264 +0,0 @@ -## Style guidelines - -We follow the guidelines proposed in the small book, [_Embedded C Coding -Standard_](https://barrgroup.com/sites/default/files/barr_c_coding_standard_2018.pdf) by Michael -Barr, with some exceptions. This standard was chosen because it is available for free and because -it was similar in some superficial ways to the style that we already prefer. - -### Project-specific exceptions to the standard -We use some arbitrary exceptions to the Barr standard. These serve -* to minimize the version history noise required to come into compliance, and -* to avoid unnecessary differences between our C style guidelines and our Java style guidelines. - -#### Braces (1.3) - -We use K&R-style braces. Example: -``` -if (NULL == p_event) { - lf_print_error_and_exit("Out of memory!"); -} -``` - -#### The pointer operator * in declarations (3.1g) - -We only use a space on the variable name side (RHS) of *. Example: -``` -int* ptr; -``` - -#### Function declarations (3.1j) - -When defining a function, we do not add a space between the function name and the opening -parenthesis. - -#### Horizontal alignment (3.2a, c) -Tabs are always forbidden. The use of multiple consecutive spaces is forbidden except as indentation -at the beginning of a line or as required by rule 3.2d. - -#### Blank lines separating natural blocks of code (3.3b) - -The only natural blocks of code that must be preceded and followed by a blank line are procedure -definitions. Elsewhere, blank lines should only be used where they aid readability. - -#### End-of-file comments (3.3c) - -We do not print out code for the purpose of code review. We do not require end-of-file comments. - -#### Header files (4.2a) - -Multiple source files corresponding to one header file are permitted if and only if they represent -alternative functionality that depends on a preprocessor definition. - -#### Names of public data types (5.1c) - -The names of all public data types shall be prefixed with `lf_`. - -#### Names of public functions (6.1i) - -The names of all public functions shall be prefixed with `lf_`. - -#### Macro capitalization (6.1f) - -The only acceptable use of macros containing lowercase letters is macros prefixed by `lf_` that -appear in the `reactor-c` public API. - -#### Printed pages (6.2 and elsewhere) - -We do not print code for the purpose of code review. In cases where Barr refers to the size of a -printed page, consider the size of a typical computer monitor instead. In particular, all reasonable -effort shall be taken to keep the length of each function limited to a maximum of **40 lines**. - -#### Short variable names (7.1e) - -We place no restrictions on the length of variable names; however, recommendations against cryptic -abbreviations and abbreviations not in a version-controlled table still apply. - -#### Hungarian notation (7.1 j, k, l, m, n, o) - -Hungarian notation is never required and is only permitted in unusual cases where it aids -readability. The abbreviations listed in the "Abbreviations" table remain valid. Example: The `p_` -and `pp_` prefixes are permitted in the navigation of potentially confusing data structures. - -### Addenda - -#### Inlining-related performance optimizations - -Before attempting to encourage or force inlining, e.g. using the `inline` keyword and/or -parameterized macros, consider whether similar performance could be realized using link-time -optimization instead. - -#### Return types in function declarations - -The return type of a function must appear on the same line as the function name. - -#### Line breaks - -If the parenthesized expression(s) in a parameter list, `if` statement,`for` loop, `while loop`, or -similar is too long to fit on one line, then the opening parenthesis must be immediately followed by -a line feed, and the closing parenthesis and opening bracket must appear one their own line. -Example: - -``` -while ( - NULL != ( - current_reaction_to_execute = lf_sched_get_ready_reaction(worker_number) - ) -) { - // Do something -} -``` - -#### Section headers in files - -Sections within files shall not be marked by explicit section headers. They shall be made clear by -adhering to the sectioning suggested by the source and header file templates. - -#### Documentation comment format - -We use the Javadoc-style `/**` to mark documentation comments, and we precede any Doxygen commands -with an `@` sign. Example: - -```c -/** - * @brief Enqueue port absent reactions that will send a PORT_ABSENT - * message to downstream federates if a given network output port is not present. - */ -``` - -The opening `/**` marker must be immediately followed by a line feed. - -#### Documentation comment placement -Documentation comments of public procedures must be provided in the corresponding header files. - -Documentation comments for nontrivial private procedures must be provided where those procedures are -implemented. - -Duplication of multiline comments or of sigificant parts of multiline comments is forbidden. - -## Abbreviations - -The following is an extended version of the table provided by the Barr standard. - -| Abbreviation | Meaning | -| ------------ | ---------------------- | -| adc | analog-to-digital converter | -| addr | address | -| argc | argument count | -| argv | argument vector | -| avg | average | -| b_ | boolean | -| buf | buffer | -| cfg | configuration | -| cond | condition variable | -| ctor | constructor | -| curr | current (item in a list) | -| dac | digital-to-analog converter | -| dtor | destructor | -| ee | EEPROM | -| err | error | -| fed | federate/federated | -| g_ | global | -| gpio | general purpose I/0 pins | -| h_ | handle (to) | -| id | ID | -| init | initialize | -| io | input/output | -| ip | Internet Protocol | -| isr | interrupt service routine | -| lcd | liquid crystal display | -| led | light-emitting diode | -| lf | Lingua Franca | -| max | maximum | -| min | minumum | -| msec | millisecond | -| msg | message | -| net | network | -| next | next (item in a list) | -| nsec | nanosecond | -| num | number (of) | -| p_ | pointer (to) | -| param | parameter | -| pp_ | pointer to a pointer (to) | -| pqueue | priority queue | -| prev | previous (item in a list) | -| prio | priority | -| ptag | provisional tag | -| pwm | pulse width modulation | -| q | queue | -| ref | reference | -| reg | register | -| ret | return value | -| rti | runtime infrastructure | -| rx | receive | -| sched | scheduler | -| sem | semaphore | -| sta | safe to advance (time) | -| staa | safe to assume absent | -| stp | safe to process | -| str | string (null-terminated) | -| sync | synchronize | -| tcp | transmission control protocol | -| temp | temperature | -| tmp | temporary | -| tx | transmit | -| udp | User Datagram Protocol | -| usec | microsecond | -| util | utilities | - -## Source file template - -```c -/** - * @file - * @author - * @copyright See "LICENSE.md." - */ - -<#includes for standard libraries and files associated with the target platform> - -<#includes for header files belonging to our project> - - - - - - - - - - - - - - - - -``` - -## Header file template - -```c -/** - * @file - * @brief - * @author - * @copyright See "LICENSE.md." - */ - -#ifdef -#define - -<#includes for standard libraries and files associated with the target platform> - -<#includes for header files belonging to our project> - - - - - - - - - -#endif // -``` diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..ec6a1b1eb --- /dev/null +++ b/Makefile @@ -0,0 +1,9 @@ +# This file lets you format the code-base with a single command. +FILES := $(shell find . -name '*.c' -o -name '*.h') +.PHONY: format +format: + clang-format -i -style=file $(FILES) + +.PHONY: format-check +format-check: + clang-format --dry-run --Werror -style=file $(FILES) diff --git a/README.md b/README.md index d5d8c3cee..3d1313a1f 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,19 @@ ## Documentation To generate and view documentation, see [docs/README.md](docs/README.md). +## Code-formatting +We use clang-format to format our codebase. To run the formatter on all source and header files in reactor-c: +``` +make format +``` +The CI will do a "dry-run" of the formatter to verify that all files are correctly formatted. + +VSCode can be configured to run clang-format on files as they are saved. To achieve this set the following settings: +- editor.formatOnSave: true +- C_Cpp.formatting: clang-format +- C_Cpp.clang_format_style: file + + ## Testing The Github Actions tests for this repo will automatically run all the C Lingua Franca tests with each of the available schedulers. The version of the lingua-franca repo that is used to do this is specified by the lingua-franca-ref.txt file in this directory. diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index 73d8e4b93..8eec31eb8 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -1,4 +1,17 @@ set(CORE_ROOT ${CMAKE_CURRENT_SOURCE_DIR}) +set(LF_ROOT ${CMAKE_CURRENT_LIST_DIR}/..) + +if(${CMAKE_SYSTEM_NAME} STREQUAL "Windows") + set(CMAKE_SYSTEM_VERSION 10.0) + message("Using Windows SDK version ${CMAKE_VS_WINDOWS_TARGET_PLATFORM_VERSION}") +elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Nrf52") + list(APPEND REACTORC_COMPILE_DEFS PLATFORM_NRF52) +elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Zephyr") + list(APPEND REACTORC_COMPILE_DEFS PLATFORM_ZEPHYR) + set(PLATFORM_ZEPHYR true) +elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Rp2040") + list(APPEND REACTORC_COMPILE_DEFS PLATFORM_RP2040) +endif() # Get the general common sources for reactor-c list(APPEND GENERAL_SOURCES tag.c clock.c port.c mixed_radix.c reactor_common.c lf_token.c environment.c) @@ -6,7 +19,7 @@ list(APPEND GENERAL_SOURCES tag.c clock.c port.c mixed_radix.c reactor_common.c # Add tracing support if requested if (DEFINED LF_TRACE) message(STATUS "Including sources specific to tracing.") - list(APPEND GENERAL_SOURCES trace.c) + list(APPEND GENERAL_SOURCES tracepoint.c) endif() # Add the general sources to the list of REACTORC_SOURCES @@ -38,7 +51,6 @@ endif() # Include sources from subdirectories include(utils/CMakeLists.txt) include(modal_models/CMakeLists.txt) -include(platform/CMakeLists.txt) # Print sources used for compilation list(JOIN REACTORC_SOURCES ", " PRINTABLE_SOURCE_LIST) @@ -52,9 +64,40 @@ if(PLATFORM_ZEPHYR) zephyr_library_sources(${REACTORC_SOURCES}) zephyr_library_link_libraries(kernel) else() - add_library(reactor-c ${REACTORC_SOURCES}) + add_library(reactor-c) + target_sources(reactor-c PRIVATE ${REACTORC_SOURCES}) +endif() + +if (DEFINED LF_TRACE) + include(${LF_ROOT}/trace/api/CMakeLists.txt) + if(NOT LF_TRACE_PLUGIN) + set(LF_TRACE_PLUGIN lf::trace-impl) + include(${LF_ROOT}/trace/impl/CMakeLists.txt) + endif() + message(STATUS "linking trace plugin library ${LF_TRACE_PLUGIN}") + target_link_libraries(reactor-c PUBLIC lf::trace-api) + target_link_libraries(reactor-c PRIVATE "${LF_TRACE_PLUGIN}") endif() +include(${LF_ROOT}/version/api/CMakeLists.txt) +target_link_libraries(reactor-c PUBLIC lf::version-api) + +include(${LF_ROOT}/logging/api/CMakeLists.txt) +target_link_libraries(reactor-c PUBLIC lf::logging-api) + +include(${LF_ROOT}/tag/api/CMakeLists.txt) +target_link_libraries(reactor-c PUBLIC lf::tag-api) + +include(${LF_ROOT}/low_level_platform/api/CMakeLists.txt) +include(${LF_ROOT}/low_level_platform/impl/CMakeLists.txt) +target_link_libraries(reactor-c PUBLIC lf::low-level-platform-api) +target_link_libraries(reactor-c PRIVATE lf::low-level-platform-impl) + +include(${LF_ROOT}/platform/api/CMakeLists.txt) +include(${LF_ROOT}/platform/impl/CMakeLists.txt) +target_link_libraries(reactor-c PUBLIC lf::platform-api) +target_link_libraries(reactor-c PRIVATE lf::platform-impl) + # Apply compile definitions to the reactor-c library. target_compile_definitions(reactor-c PUBLIC ${REACTORC_COMPILE_DEFS}) @@ -128,6 +171,7 @@ define(FEDERATED_CENTRALIZED) define(FEDERATED_DECENTRALIZED) define(FEDERATED) define(FEDERATED_AUTHENTICATED) +define(FEDERATE_ID) define(LF_REACTION_GRAPH_BREADTH) define(LF_TRACE) define(LF_SINGLE_THREADED) diff --git a/core/clock.c b/core/clock.c index 0620c6ccd..d5b250fc3 100644 --- a/core/clock.c +++ b/core/clock.c @@ -4,9 +4,9 @@ * @copyright (c) 2020-2024, The University of California at Berkeley. * License: BSD 2-clause * @brief Implementations of functions in clock.h. -*/ + */ #include "clock.h" -#include "platform.h" +#include "low_level_platform.h" #if defined(_LF_CLOCK_SYNC_ON) #include "clock-sync.h" @@ -14,47 +14,47 @@ static instant_t last_read_physical_time = NEVER; -int lf_clock_gettime(instant_t *now) { - instant_t last_read_local; - int res = _lf_clock_gettime(now); - if (res != 0) { - return -1; +int lf_clock_gettime(instant_t* now) { + instant_t last_read_local; + int res = _lf_clock_gettime(now); + if (res != 0) { + return -1; + } +#if defined(_LF_CLOCK_SYNC_ON) + clock_sync_apply_offset(now); +#endif + do { + // Atomically fetch the last read value. This is done with + // atomics to guarantee that it works on 32bit platforms as well. + last_read_local = lf_atomic_fetch_add64(&last_read_physical_time, 0); + + // Ensure monotonicity. + if (*now < last_read_local) { + *now = last_read_local + 1; } - #if defined (_LF_CLOCK_SYNC_ON) - clock_sync_apply_offset(now); - #endif - do { - // Atomically fetch the last read value. This is done with - // atomics to guarantee that it works on 32bit platforms as well. - last_read_local = lf_atomic_fetch_add64(&last_read_physical_time, 0); - - // Ensure monotonicity. - if (*now < last_read_local) { - *now = last_read_local+1; - } - - // Update the last read value, atomically and also make sure that another - // thread has not been here in between and changed it. If so. We must redo - // the monotonicity calculation. - } while(!lf_atomic_bool_compare_and_swap64(&last_read_physical_time, last_read_local, *now)); - - return 0; + + // Update the last read value, atomically and also make sure that another + // thread has not been here in between and changed it. If so. We must redo + // the monotonicity calculation. + } while (!lf_atomic_bool_compare_and_swap64(&last_read_physical_time, last_read_local, *now)); + + return 0; } -int lf_clock_interruptable_sleep_until_locked(environment_t *env, instant_t wakeup_time) { - #if defined (_LF_CLOCK_SYNC_ON) - // Remove any clock sync offset and call the Platform API. - clock_sync_remove_offset(&wakeup_time); - #endif - return _lf_interruptable_sleep_until_locked(env, wakeup_time); +int lf_clock_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup_time) { +#if defined(_LF_CLOCK_SYNC_ON) + // Remove any clock sync offset and call the Platform API. + clock_sync_remove_offset(&wakeup_time); +#endif + return _lf_interruptable_sleep_until_locked(env, wakeup_time); } #if !defined(LF_SINGLE_THREADED) -int lf_clock_cond_timedwait(lf_cond_t *cond, instant_t wakeup_time) { - #if defined (_LF_CLOCK_SYNC_ON) - // Remove any clock sync offset and call the Platform API. - clock_sync_remove_offset(&wakeup_time); - #endif - return _lf_cond_timedwait(cond, wakeup_time); +int lf_clock_cond_timedwait(lf_cond_t* cond, instant_t wakeup_time) { +#if defined(_LF_CLOCK_SYNC_ON) + // Remove any clock sync offset and call the Platform API. + clock_sync_remove_offset(&wakeup_time); +#endif + return _lf_cond_timedwait(cond, wakeup_time); } #endif diff --git a/core/environment.c b/core/environment.c index 701614dfd..7189090e4 100644 --- a/core/environment.c +++ b/core/environment.c @@ -33,7 +33,7 @@ #include "util.h" #include "lf_types.h" #include -#include "trace.h" +#include "tracepoint.h" #if !defined(LF_SINGLE_THREADED) #include "scheduler.h" #endif @@ -43,16 +43,16 @@ */ static void environment_init_threaded(environment_t* env, int num_workers) { #if !defined(LF_SINGLE_THREADED) - env->num_workers = num_workers; - env->thread_ids = (lf_thread_t*)calloc(num_workers, sizeof(lf_thread_t)); - LF_ASSERT_NON_NULL(env->thread_ids); - env->barrier.requestors = 0; - env->barrier.horizon = FOREVER_TAG; - - // Initialize synchronization objects. - LF_MUTEX_INIT(&env->mutex); - LF_COND_INIT(&env->event_q_changed, &env->mutex); - LF_COND_INIT(&env->global_tag_barrier_requestors_reached_zero, &env->mutex); + env->num_workers = num_workers; + env->thread_ids = (lf_thread_t*)calloc(num_workers, sizeof(lf_thread_t)); + LF_ASSERT_NON_NULL(env->thread_ids); + env->barrier.requestors = 0; + env->barrier.horizon = FOREVER_TAG; + + // Initialize synchronization objects. + LF_MUTEX_INIT(&env->mutex); + LF_COND_INIT(&env->event_q_changed, &env->mutex); + LF_COND_INIT(&env->global_tag_barrier_requestors_reached_zero, &env->mutex); #endif } @@ -61,11 +61,11 @@ static void environment_init_threaded(environment_t* env, int num_workers) { */ static void environment_init_single_threaded(environment_t* env) { #ifdef LF_SINGLE_THREADED - // Reaction queue ordered first by deadline, then by level. - // The index of the reaction holds the deadline in the 48 most significant bits, - // the level in the 16 least significant bits. - env->reaction_q = pqueue_init(INITIAL_REACT_QUEUE_SIZE, in_reverse_order, get_reaction_index, - get_reaction_position, set_reaction_position, reaction_matches, print_reaction); + // Reaction queue ordered first by deadline, then by level. + // The index of the reaction holds the deadline in the 48 most significant bits, + // the level in the 16 least significant bits. + env->reaction_q = pqueue_init(INITIAL_REACT_QUEUE_SIZE, in_reverse_order, get_reaction_index, get_reaction_position, + set_reaction_position, reaction_matches, print_reaction); #endif } @@ -75,27 +75,28 @@ static void environment_init_single_threaded(environment_t* env) { */ static void environment_init_modes(environment_t* env, int num_modes, int num_state_resets) { #ifdef MODAL_REACTORS - if (num_modes > 0) { - mode_environment_t* modes = (mode_environment_t *) calloc(1, sizeof(mode_environment_t)); - LF_ASSERT_NON_NULL(modes); - modes->modal_reactor_states = (reactor_mode_state_t**) calloc(num_modes, sizeof(reactor_mode_state_t*)); - LF_ASSERT_NON_NULL(modes->modal_reactor_states); - modes->modal_reactor_states_size = num_modes; - modes->triggered_reactions_request = 0; - - modes->state_resets_size = num_state_resets; - if (modes->state_resets_size > 0) { - modes->state_resets = (mode_state_variable_reset_data_t *) calloc(modes->state_resets_size, sizeof(mode_state_variable_reset_data_t)); - LF_ASSERT_NON_NULL(modes->state_resets); - } else { - modes->state_resets = NULL; - } - - env->modes = modes; + if (num_modes > 0) { + mode_environment_t* modes = (mode_environment_t*)calloc(1, sizeof(mode_environment_t)); + LF_ASSERT_NON_NULL(modes); + modes->modal_reactor_states = (reactor_mode_state_t**)calloc(num_modes, sizeof(reactor_mode_state_t*)); + LF_ASSERT_NON_NULL(modes->modal_reactor_states); + modes->modal_reactor_states_size = num_modes; + modes->triggered_reactions_request = 0; + modes->state_resets_size = num_state_resets; + if (modes->state_resets_size > 0) { + modes->state_resets = + (mode_state_variable_reset_data_t*)calloc(modes->state_resets_size, sizeof(mode_state_variable_reset_data_t)); + LF_ASSERT_NON_NULL(modes->state_resets); } else { - env->modes = NULL; + modes->state_resets = NULL; } + + env->modes = modes; + + } else { + env->modes = NULL; + } #endif } @@ -104,171 +105,155 @@ static void environment_init_modes(environment_t* env, int num_modes, int num_st */ static void environment_init_federated(environment_t* env, int num_is_present_fields) { #ifdef FEDERATED_DECENTRALIZED - if (num_is_present_fields > 0) { - env->_lf_intended_tag_fields = (tag_t**) calloc(num_is_present_fields, sizeof(tag_t*)); - LF_ASSERT_NON_NULL(env->_lf_intended_tag_fields); - env->_lf_intended_tag_fields_size = num_is_present_fields; - } else { - env->_lf_intended_tag_fields = NULL; - env->_lf_intended_tag_fields_size = 0; - } + if (num_is_present_fields > 0) { + env->_lf_intended_tag_fields = (tag_t**)calloc(num_is_present_fields, sizeof(tag_t*)); + LF_ASSERT_NON_NULL(env->_lf_intended_tag_fields); + env->_lf_intended_tag_fields_size = num_is_present_fields; + } else { + env->_lf_intended_tag_fields = NULL; + env->_lf_intended_tag_fields_size = 0; + } #endif } -void environment_init_tags( environment_t *env, instant_t start_time, interval_t duration) { - env->current_tag = (tag_t){.time = start_time, .microstep = 0u}; - - tag_t stop_tag = FOREVER_TAG_INITIALIZER; - if (duration >= 0LL) { - // A duration has been specified. Calculate the stop time. - stop_tag.time = env->current_tag.time + duration; - stop_tag.microstep = 0; - } - env->stop_tag = stop_tag; +void environment_init_tags(environment_t* env, instant_t start_time, interval_t duration) { + env->current_tag = (tag_t){.time = start_time, .microstep = 0u}; + + tag_t stop_tag = FOREVER_TAG_INITIALIZER; + if (duration >= 0LL) { + // A duration has been specified. Calculate the stop time. + stop_tag.time = env->current_tag.time + duration; + stop_tag.microstep = 0; + } + env->stop_tag = stop_tag; } static void environment_free_threaded(environment_t* env) { #if !defined(LF_SINGLE_THREADED) - free(env->thread_ids); - lf_sched_free(env->scheduler); + free(env->thread_ids); + lf_sched_free(env->scheduler); #endif } static void environment_free_single_threaded(environment_t* env) { #ifdef LF_SINGLE_THREADED - pqueue_free(env->reaction_q); + pqueue_free(env->reaction_q); #endif } static void environment_free_modes(environment_t* env) { #ifdef MODAL_REACTORS - if (env->modes) { - free(env->modes->modal_reactor_states); - free(env->modes->state_resets); - free(env->modes); - } + if (env->modes) { + free(env->modes->modal_reactor_states); + free(env->modes->state_resets); + free(env->modes); + } #endif } static void environment_free_federated(environment_t* env) { #ifdef FEDERATED_DECENTRALIZED - free(env->_lf_intended_tag_fields); + free(env->_lf_intended_tag_fields); #endif } void environment_free(environment_t* env) { - free(env->name); - free(env->timer_triggers); - free(env->startup_reactions); - free(env->shutdown_reactions); - free(env->reset_reactions); - free(env->is_present_fields); - free(env->is_present_fields_abbreviated); - pqueue_free(env->event_q); - pqueue_free(env->recycle_q); - pqueue_free(env->next_q); - - environment_free_threaded(env); - environment_free_single_threaded(env); - environment_free_modes(env); - environment_free_federated(env); - trace_free(env->trace); + free(env->name); + free(env->timer_triggers); + free(env->startup_reactions); + free(env->shutdown_reactions); + free(env->reset_reactions); + free(env->is_present_fields); + free(env->is_present_fields_abbreviated); + pqueue_free(env->event_q); + pqueue_free(env->recycle_q); + pqueue_free(env->next_q); + + environment_free_threaded(env); + environment_free_single_threaded(env); + environment_free_modes(env); + environment_free_federated(env); } +int environment_init(environment_t* env, const char* name, int id, int num_workers, int num_timers, + int num_startup_reactions, int num_shutdown_reactions, int num_reset_reactions, + int num_is_present_fields, int num_modes, int num_state_resets, int num_watchdogs, + const char* trace_file_name) { -int environment_init( - environment_t* env, - const char *name, - int id, - int num_workers, - int num_timers, - int num_startup_reactions, - int num_shutdown_reactions, - int num_reset_reactions, - int num_is_present_fields, - int num_modes, - int num_state_resets, - int num_watchdogs, - const char * trace_file_name -) { - - env->name = malloc(strlen(name) + 1); // +1 for the null terminator - LF_ASSERT_NON_NULL(env->name); - strcpy(env->name, name); - - env->id = id; - env->stop_tag = FOREVER_TAG; - - env->timer_triggers_size=num_timers; - if(env->timer_triggers_size > 0) { - env->timer_triggers = (trigger_t **) calloc(num_timers, sizeof(trigger_t)); - LF_ASSERT_NON_NULL(env->timer_triggers); - } else { - env->timer_triggers = NULL; - } + env->name = malloc(strlen(name) + 1); // +1 for the null terminator + LF_ASSERT_NON_NULL(env->name); + strcpy(env->name, name); - env->startup_reactions_size=num_startup_reactions; - if (env->startup_reactions_size > 0) { - env->startup_reactions = (reaction_t **) calloc(num_startup_reactions, sizeof(reaction_t)); - LF_ASSERT_NON_NULL(env->startup_reactions); - } else { - env->startup_reactions = NULL; - } + env->id = id; + env->stop_tag = FOREVER_TAG; - env->shutdown_reactions_size=num_shutdown_reactions; - if(env->shutdown_reactions_size > 0) { - env->shutdown_reactions = (reaction_t **) calloc(num_shutdown_reactions, sizeof(reaction_t)); - LF_ASSERT_NON_NULL(env->shutdown_reactions); - } else { - env->shutdown_reactions = NULL; - } + env->timer_triggers_size = num_timers; + if (env->timer_triggers_size > 0) { + env->timer_triggers = (trigger_t**)calloc(num_timers, sizeof(trigger_t)); + LF_ASSERT_NON_NULL(env->timer_triggers); + } else { + env->timer_triggers = NULL; + } - env->reset_reactions_size=num_reset_reactions; - if (env->reset_reactions_size > 0) { - env->reset_reactions = (reaction_t **) calloc(num_reset_reactions, sizeof(reaction_t)); - LF_ASSERT_NON_NULL(env->reset_reactions); - } else { - env->reset_reactions = NULL; - } + env->startup_reactions_size = num_startup_reactions; + if (env->startup_reactions_size > 0) { + env->startup_reactions = (reaction_t**)calloc(num_startup_reactions, sizeof(reaction_t)); + LF_ASSERT_NON_NULL(env->startup_reactions); + } else { + env->startup_reactions = NULL; + } - env->is_present_fields_size = num_is_present_fields; - env->is_present_fields_abbreviated_size = 0; + env->shutdown_reactions_size = num_shutdown_reactions; + if (env->shutdown_reactions_size > 0) { + env->shutdown_reactions = (reaction_t**)calloc(num_shutdown_reactions, sizeof(reaction_t)); + LF_ASSERT_NON_NULL(env->shutdown_reactions); + } else { + env->shutdown_reactions = NULL; + } - if (env->is_present_fields_size > 0) { - env->is_present_fields = (bool**)calloc(num_is_present_fields, sizeof(bool*)); - LF_ASSERT_NON_NULL(env->is_present_fields); - env->is_present_fields_abbreviated = (bool**)calloc(num_is_present_fields, sizeof(bool*)); - LF_ASSERT_NON_NULL(env->is_present_fields_abbreviated); - } else { - env->is_present_fields = NULL; - env->is_present_fields_abbreviated = NULL; - } + env->reset_reactions_size = num_reset_reactions; + if (env->reset_reactions_size > 0) { + env->reset_reactions = (reaction_t**)calloc(num_reset_reactions, sizeof(reaction_t)); + LF_ASSERT_NON_NULL(env->reset_reactions); + } else { + env->reset_reactions = NULL; + } - env->watchdogs_size = num_watchdogs; - if(env->watchdogs_size > 0) { - env->watchdogs = (watchdog_t** )calloc(env->watchdogs_size, sizeof(watchdog_t*)); - LF_ASSERT(env->watchdogs, "Out of memory"); - } + env->is_present_fields_size = num_is_present_fields; + env->is_present_fields_abbreviated_size = 0; + + if (env->is_present_fields_size > 0) { + env->is_present_fields = (bool**)calloc(num_is_present_fields, sizeof(bool*)); + LF_ASSERT_NON_NULL(env->is_present_fields); + env->is_present_fields_abbreviated = (bool**)calloc(num_is_present_fields, sizeof(bool*)); + LF_ASSERT_NON_NULL(env->is_present_fields_abbreviated); + } else { + env->is_present_fields = NULL; + env->is_present_fields_abbreviated = NULL; + } + + env->watchdogs_size = num_watchdogs; + if (env->watchdogs_size > 0) { + env->watchdogs = (watchdog_t**)calloc(env->watchdogs_size, sizeof(watchdog_t*)); + LF_ASSERT(env->watchdogs, "Out of memory"); + } + + env->_lf_handle = 1; + + // Initialize our priority queues. + env->event_q = pqueue_init(INITIAL_EVENT_QUEUE_SIZE, in_reverse_order, get_event_time, get_event_position, + set_event_position, event_matches, print_event); + env->recycle_q = pqueue_init(INITIAL_EVENT_QUEUE_SIZE, in_no_particular_order, get_event_time, get_event_position, + set_event_position, event_matches, print_event); + env->next_q = pqueue_init(INITIAL_EVENT_QUEUE_SIZE, in_no_particular_order, get_event_time, get_event_position, + set_event_position, event_matches, print_event); + + // Initialize functionality depending on target properties. + environment_init_threaded(env, num_workers); + environment_init_single_threaded(env); + environment_init_modes(env, num_modes, num_state_resets); + environment_init_federated(env, num_is_present_fields); - env->_lf_handle=1; - - // Initialize our priority queues. - env->event_q = pqueue_init(INITIAL_EVENT_QUEUE_SIZE, in_reverse_order, get_event_time, - get_event_position, set_event_position, event_matches, print_event); - env->recycle_q = pqueue_init(INITIAL_EVENT_QUEUE_SIZE, in_no_particular_order, get_event_time, - get_event_position, set_event_position, event_matches, print_event); - env->next_q = pqueue_init(INITIAL_EVENT_QUEUE_SIZE, in_no_particular_order, get_event_time, - get_event_position, set_event_position, event_matches, print_event); - - // If tracing is enabled. Initialize a tracing struct on the env struct. - env->trace = trace_new(env, trace_file_name); - - // Initialize functionality depending on target properties. - environment_init_threaded(env, num_workers); - environment_init_single_threaded(env); - environment_init_modes(env, num_modes, num_state_resets); - environment_init_federated(env, num_is_present_fields); - - env->initialized = true; - return 0; + env->initialized = true; + return 0; } diff --git a/core/federated/RTI/CMakeLists.txt b/core/federated/RTI/CMakeLists.txt index 3f36b7060..9c4f996d2 100644 --- a/core/federated/RTI/CMakeLists.txt +++ b/core/federated/RTI/CMakeLists.txt @@ -25,7 +25,7 @@ # To enable simple HMAC-based authentication of federates, # add `-DAUTH=ON` option to the cmake command as shown below: -# +# # $> mkdir build && cd build # $> cmake -DAUTH=ON ../ # $> make @@ -38,23 +38,14 @@ cmake_minimum_required(VERSION 3.12) project(RTI VERSION 1.0.0 LANGUAGES C) set(CoreLib ../../../core) - -# Check which system we are running on to select the correct platform support -# file and assign the file's path to LF_PLATFORM_FILE -if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") - set(LF_PLATFORM_FILE ${CoreLib}/platform/lf_linux_support.c) -elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Darwin") - set(LF_PLATFORM_FILE ${CoreLib}/platform/lf_macos_support.c) -else() - message(FATAL_ERROR "Your platform is not supported! RTI supports Linux and MacOS.") -endif() +set(LF_ROOT ${CMAKE_CURRENT_LIST_DIR}/../../..) set(IncludeDir ../../../include/core) +include_directories(../../../include) include_directories(${IncludeDir}) include_directories(${IncludeDir}/federated) include_directories(${IncludeDir}/federated/network) include_directories(${IncludeDir}/modal_models) -include_directories(${IncludeDir}/platform) include_directories(${IncludeDir}/utils) @@ -64,10 +55,7 @@ add_executable( main.c rti_common.c rti_remote.c - ${CoreLib}/trace.c - ${LF_PLATFORM_FILE} - ${CoreLib}/platform/lf_atomic_gcc_clang.c - ${CoreLib}/platform/lf_unix_clock_support.c + ${CoreLib}/tracepoint.c ${CoreLib}/utils/util.c ${CoreLib}/tag.c ${CoreLib}/clock.c @@ -76,12 +64,43 @@ add_executable( ${CoreLib}/utils/pqueue_tag.c ${CoreLib}/utils/pqueue.c ) +if (NOT DEFINED LOG_LEVEL) + set(LOG_LEVEL 0) +ENDIF(NOT DEFINED LOG_LEVEL) IF(CMAKE_BUILD_TYPE MATCHES DEBUG) # Set the LOG_LEVEL to 4 to get DEBUG messages message("-- Building RTI with DEBUG messages enabled") - target_compile_definitions(RTI PUBLIC LOG_LEVEL=4) + set(LOG_LEVEL 4) ENDIF(CMAKE_BUILD_TYPE MATCHES DEBUG) +target_compile_definitions(RTI PUBLIC LOG_LEVEL=${LOG_LEVEL}) + +include(${LF_ROOT}/version/api/CMakeLists.txt) +target_link_libraries(RTI lf::version-api) + +include(${LF_ROOT}/logging/api/CMakeLists.txt) +target_link_libraries(RTI lf::logging-api) + +include(${LF_ROOT}/tag/api/CMakeLists.txt) +target_link_libraries(RTI lf::tag-api) + +include(${LF_ROOT}/platform/api/CMakeLists.txt) +target_link_libraries(RTI lf::platform-api) + +include(${LF_ROOT}/platform/impl/CMakeLists.txt) +target_link_libraries(RTI lf::platform-impl) + +include(${LF_ROOT}/trace/api/CMakeLists.txt) +target_link_libraries(RTI lf::trace-api) + +include(${LF_ROOT}/trace/impl/CMakeLists.txt) +target_link_libraries(RTI lf::trace-impl) + +include(${LF_ROOT}/low_level_platform/impl/CMakeLists.txt) +target_link_libraries(RTI lf::low-level-platform-impl) + +include(${LF_ROOT}/low_level_platform/api/CMakeLists.txt) +target_link_libraries(RTI lf::low-level-platform-api) # Set the STANDALONE_RTI flag to include the rti_remote and rti_common. target_compile_definitions(RTI PUBLIC STANDALONE_RTI=1) diff --git a/core/federated/RTI/main.c b/core/federated/RTI/main.c index dc492a7e8..294dd1f2f 100644 --- a/core/federated/RTI/main.c +++ b/core/federated/RTI/main.c @@ -49,10 +49,9 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "rti_remote.h" #include "net_util.h" -#include // To trap ctrl-c and invoke a clean stop to save the trace file, if needed. +#include // To trap ctrl-c and invoke a clean stop to save the trace file, if needed. #include - /** * The tracing mechanism uses the number of workers variable `_lf_number_of_workers`. * For RTI tracing, the number of workers is set as the number of federates. @@ -66,7 +65,7 @@ static rti_remote_t rti; /** * RTI trace file name */ -const char *rti_trace_file_name = "rti.lft"; +const char* rti_trace_file_name = "rti.lft"; /** Indicator that normal termination of the RTI has occurred. */ bool normal_termination = false; @@ -75,23 +74,23 @@ bool normal_termination = false; * Send a failed signal to the specified federate. */ static void send_failed_signal(federate_info_t* fed) { - size_t bytes_to_write = 1; - unsigned char buffer[bytes_to_write]; - buffer[0] = MSG_TYPE_FAILED; - if (rti.base.tracing_enabled) { - tracepoint_rti_to_federate(rti.base.trace, send_FAILED, fed->enclave.id, NULL); - } - int failed = write_to_socket(fed->socket, bytes_to_write, &(buffer[0])); - if (failed == 0) { - LF_PRINT_LOG("RTI has sent failed signal to federate %d due to abnormal termination.", fed->enclave.id); - } else { - lf_print_error("RTI failed to send failed signal to federate %d on socket ID %d.", fed->enclave.id, fed->socket); - } + size_t bytes_to_write = 1; + unsigned char buffer[bytes_to_write]; + buffer[0] = MSG_TYPE_FAILED; + if (rti.base.tracing_enabled) { + tracepoint_rti_to_federate(send_FAILED, fed->enclave.id, NULL); + } + int failed = write_to_socket(fed->socket, bytes_to_write, &(buffer[0])); + if (failed == 0) { + LF_PRINT_LOG("RTI has sent failed signal to federate %d due to abnormal termination.", fed->enclave.id); + } else { + lf_print_error("RTI failed to send failed signal to federate %d on socket ID %d.", fed->enclave.id, fed->socket); + } } /** * @brief Function to run upon termination. - * + * * This function will be invoked both after main() returns and when a signal * that results in terminating the process, such as SIGINT. In the former * case, it should do nothing. In the latter case, it will send a MSG_TYPE_FAILED @@ -101,256 +100,256 @@ static void send_failed_signal(federate_info_t* fed) { * collected so far. */ void termination() { - if (!normal_termination) { - for (int i = 0; i < rti.base.number_of_scheduling_nodes; i++) { - federate_info_t *f = (federate_info_t*)rti.base.scheduling_nodes[i]; - if (!f || f->enclave.state == NOT_CONNECTED) continue; - send_failed_signal(f); - } - if (rti.base.tracing_enabled) { - stop_trace_locked(rti.base.trace); - lf_print("RTI trace file saved."); - } - lf_print("RTI is exiting abnormally."); - } + if (!normal_termination) { + for (int i = 0; i < rti.base.number_of_scheduling_nodes; i++) { + federate_info_t* f = (federate_info_t*)rti.base.scheduling_nodes[i]; + if (!f || f->enclave.state == NOT_CONNECTED) + continue; + send_failed_signal(f); + } + if (rti.base.tracing_enabled) { + lf_tracing_global_shutdown(); + lf_print("RTI trace file saved."); + } + lf_print("RTI is exiting abnormally."); + } } void usage(int argc, const char* argv[]) { - lf_print("\nCommand-line arguments: \n"); - lf_print(" -i, --id "); - lf_print(" The ID of the federation that this RTI will control.\n"); - lf_print(" -n, --number_of_federates "); - lf_print(" The number of federates in the federation that this RTI will control.\n"); - lf_print(" -p, --port "); - lf_print(" The port number to use for the RTI. Must be larger than 0 and smaller than %d. Default is %d.\n", UINT16_MAX, DEFAULT_PORT); - lf_print(" -c, --clock_sync [off|init|on] [period ] [exchanges-per-interval ]"); - lf_print(" The status of clock synchronization for this federate."); - lf_print(" - off: Clock synchronization is off."); - lf_print(" - init (default): Clock synchronization is done only during startup."); - lf_print(" - on: Clock synchronization is done both at startup and during the execution."); - lf_print(" Relevant parameters that can be set: "); - lf_print(" - period (in nanoseconds): Controls how often a clock synchronization attempt is made"); - lf_print(" (period in nanoseconds, default is 5 msec). Only applies to 'on'."); - lf_print(" - exchanges-per-interval : Controls the number of messages that are exchanged for each"); - lf_print(" clock sync attempt (default is 10). Applies to 'init' and 'on'.\n"); - lf_print(" -a, --auth Turn on HMAC authentication options.\n"); - lf_print(" -t, --tracing Turn on tracing.\n"); + lf_print("\nCommand-line arguments: \n"); + lf_print(" -i, --id "); + lf_print(" The ID of the federation that this RTI will control.\n"); + lf_print(" -n, --number_of_federates "); + lf_print(" The number of federates in the federation that this RTI will control.\n"); + lf_print(" -p, --port "); + lf_print(" The port number to use for the RTI. Must be larger than 0 and smaller than %d. Default is %d.\n", + UINT16_MAX, DEFAULT_PORT); + lf_print(" -c, --clock_sync [off|init|on] [period ] [exchanges-per-interval ]"); + lf_print(" The status of clock synchronization for this federate."); + lf_print(" - off: Clock synchronization is off."); + lf_print(" - init (default): Clock synchronization is done only during startup."); + lf_print(" - on: Clock synchronization is done both at startup and during the execution."); + lf_print(" Relevant parameters that can be set: "); + lf_print(" - period (in nanoseconds): Controls how often a clock synchronization attempt is made"); + lf_print(" (period in nanoseconds, default is 5 msec). Only applies to 'on'."); + lf_print(" - exchanges-per-interval : Controls the number of messages that are exchanged for each"); + lf_print(" clock sync attempt (default is 10). Applies to 'init' and 'on'.\n"); + lf_print(" -a, --auth Turn on HMAC authentication options.\n"); + lf_print(" -t, --tracing Turn on tracing.\n"); - lf_print("Command given:"); - for (int i = 0; i < argc; i++) { - lf_print("%s ", argv[i]); - } - lf_print("\n"); + lf_print("Command given:"); + for (int i = 0; i < argc; i++) { + lf_print("%s ", argv[i]); + } + lf_print("\n"); } int process_clock_sync_args(int argc, const char* argv[]) { - for (int i = 0; i < argc; i++) { - if (strcmp(argv[i], "off") == 0) { - rti.clock_sync_global_status = clock_sync_off; - lf_print("RTI: Clock sync: off"); - } else if (strcmp(argv[i], "init") == 0 || strcmp(argv[i], "initial") == 0) { - rti.clock_sync_global_status = clock_sync_init; - lf_print("RTI: Clock sync: init"); - } else if (strcmp(argv[i], "on") == 0) { - rti.clock_sync_global_status = clock_sync_on; - lf_print("RTI: Clock sync: on"); - } else if (strcmp(argv[i], "period") == 0) { - if (rti.clock_sync_global_status != clock_sync_on) { - lf_print_error("clock sync period can only be set if --clock-sync is set to on."); - usage(argc, argv); - i++; - continue; // Try to parse the rest of the arguments as clock sync args. - } else if (argc < i + 2) { - lf_print_error("clock sync period needs a time (in nanoseconds) argument."); - usage(argc, argv); - continue; - } - i++; - long long period_ns = strtoll(argv[i], NULL, 10); - if (period_ns == 0LL || period_ns == LLONG_MAX || period_ns == LLONG_MIN) { - lf_print_error("clock sync period value is invalid."); - continue; // Try to parse the rest of the arguments as clock sync args. - } - rti.clock_sync_period_ns = (int64_t)period_ns; - lf_print("RTI: Clock sync period: %lld", (long long int)rti.clock_sync_period_ns); - } else if (strcmp(argv[i], "exchanges-per-interval") == 0) { - if (rti.clock_sync_global_status != clock_sync_on && rti.clock_sync_global_status != clock_sync_init) { - lf_print_error("clock sync exchanges-per-interval can only be set if\n" - "--clock-sync is set to on or init."); - usage(argc, argv); - continue; // Try to parse the rest of the arguments as clock sync args. - } else if (argc < i + 2) { - lf_print_error("clock sync exchanges-per-interval needs an integer argument."); - usage(argc, argv); - continue; // Try to parse the rest of the arguments as clock sync args. - } - i++; - long exchanges = (long)strtol(argv[i], NULL, 10); - if (exchanges == 0L || exchanges == LONG_MAX || exchanges == LONG_MIN) { - lf_print_error("clock sync exchanges-per-interval value is invalid."); - continue; // Try to parse the rest of the arguments as clock sync args. - } - rti.clock_sync_exchanges_per_interval = (int32_t)exchanges; // FIXME: Loses numbers on 64-bit machines - lf_print("RTI: Clock sync exchanges per interval: %d", rti.clock_sync_exchanges_per_interval); - } else if (strcmp(argv[i], " ") == 0) { - // Tolerate spaces - continue; - } else { - // Either done with the clock sync args or there is an invalid - // character. In either case, let the parent function deal with - // the rest of the characters; - return i; - } + for (int i = 0; i < argc; i++) { + if (strcmp(argv[i], "off") == 0) { + rti.clock_sync_global_status = clock_sync_off; + lf_print("RTI: Clock sync: off"); + } else if (strcmp(argv[i], "init") == 0 || strcmp(argv[i], "initial") == 0) { + rti.clock_sync_global_status = clock_sync_init; + lf_print("RTI: Clock sync: init"); + } else if (strcmp(argv[i], "on") == 0) { + rti.clock_sync_global_status = clock_sync_on; + lf_print("RTI: Clock sync: on"); + } else if (strcmp(argv[i], "period") == 0) { + if (rti.clock_sync_global_status != clock_sync_on) { + lf_print_error("clock sync period can only be set if --clock-sync is set to on."); + usage(argc, argv); + i++; + continue; // Try to parse the rest of the arguments as clock sync args. + } else if (argc < i + 2) { + lf_print_error("clock sync period needs a time (in nanoseconds) argument."); + usage(argc, argv); + continue; + } + i++; + long long period_ns = strtoll(argv[i], NULL, 10); + if (period_ns == 0LL || period_ns == LLONG_MAX || period_ns == LLONG_MIN) { + lf_print_error("clock sync period value is invalid."); + continue; // Try to parse the rest of the arguments as clock sync args. + } + rti.clock_sync_period_ns = (int64_t)period_ns; + lf_print("RTI: Clock sync period: %lld", (long long int)rti.clock_sync_period_ns); + } else if (strcmp(argv[i], "exchanges-per-interval") == 0) { + if (rti.clock_sync_global_status != clock_sync_on && rti.clock_sync_global_status != clock_sync_init) { + lf_print_error("clock sync exchanges-per-interval can only be set if\n" + "--clock-sync is set to on or init."); + usage(argc, argv); + continue; // Try to parse the rest of the arguments as clock sync args. + } else if (argc < i + 2) { + lf_print_error("clock sync exchanges-per-interval needs an integer argument."); + usage(argc, argv); + continue; // Try to parse the rest of the arguments as clock sync args. + } + i++; + long exchanges = (long)strtol(argv[i], NULL, 10); + if (exchanges == 0L || exchanges == LONG_MAX || exchanges == LONG_MIN) { + lf_print_error("clock sync exchanges-per-interval value is invalid."); + continue; // Try to parse the rest of the arguments as clock sync args. + } + rti.clock_sync_exchanges_per_interval = (int32_t)exchanges; // FIXME: Loses numbers on 64-bit machines + lf_print("RTI: Clock sync exchanges per interval: %d", rti.clock_sync_exchanges_per_interval); + } else if (strcmp(argv[i], " ") == 0) { + // Tolerate spaces + continue; + } else { + // Either done with the clock sync args or there is an invalid + // character. In either case, let the parent function deal with + // the rest of the characters; + return i; } - return argc; + } + return argc; } int process_args(int argc, const char* argv[]) { - for (int i = 1; i < argc; i++) { - if (strcmp(argv[i], "-i") == 0 || strcmp(argv[i], "--id") == 0) { - if (argc < i + 2) { - lf_print_error("--id needs a string argument."); - usage(argc, argv); - return 0; - } - i++; - lf_print("RTI: Federation ID: %s", argv[i]); - rti.federation_id = argv[i]; - } else if (strcmp(argv[i], "-n") == 0 || strcmp(argv[i], "--number_of_federates") == 0) { - if (argc < i + 2) { - lf_print_error("--number_of_federates needs an integer argument."); - usage(argc, argv); - return 0; - } - i++; - long num_federates = strtol(argv[i], NULL, 10); - if (num_federates == 0L || num_federates == LONG_MAX || num_federates == LONG_MIN) { - lf_print_error("--number_of_federates needs a valid positive integer argument."); - usage(argc, argv); - return 0; - } - rti.base.number_of_scheduling_nodes = (int32_t)num_federates; // FIXME: Loses numbers on 64-bit machines - lf_print("RTI: Number of federates: %d", rti.base.number_of_scheduling_nodes); - } else if (strcmp(argv[i], "-p") == 0 || strcmp(argv[i], "--port") == 0) { - if (argc < i + 2) { - lf_print_error( - "--port needs a short unsigned integer argument ( > 0 and < %d).", - UINT16_MAX - ); - usage(argc, argv); - return 0; - } - i++; - uint32_t RTI_port = (uint32_t)strtoul(argv[i], NULL, 10); - if (RTI_port <= 0 || RTI_port >= UINT16_MAX) { - lf_print_error( - "--port needs a short unsigned integer argument ( > 0 and < %d).", - UINT16_MAX - ); - usage(argc, argv); - return 0; - } - rti.user_specified_port = (uint16_t)RTI_port; - } else if (strcmp(argv[i], "-c") == 0 || strcmp(argv[i], "--clock_sync") == 0) { - if (argc < i + 2) { - lf_print_error("--clock-sync needs off|init|on."); - usage(argc, argv); - return 0; - } - i++; - i += process_clock_sync_args((argc-i), &argv[i]); - } else if (strcmp(argv[i], "-a") == 0 || strcmp(argv[i], "--auth") == 0) { - #ifndef __RTI_AUTH__ - lf_print_error("--auth requires the RTI to be built with the -DAUTH=ON option."); - usage(argc, argv); - return 0; - #endif - rti.authentication_enabled = true; - } else if (strcmp(argv[i], "-t") == 0 || strcmp(argv[i], "--tracing") == 0) { - rti.base.tracing_enabled = true; - } else if (strcmp(argv[i], " ") == 0) { - // Tolerate spaces - continue; - } else { - lf_print_error("Unrecognized command-line argument: %s", argv[i]); - usage(argc, argv); - return 0; - } - } - if (rti.base.number_of_scheduling_nodes == 0) { + for (int i = 1; i < argc; i++) { + if (strcmp(argv[i], "-i") == 0 || strcmp(argv[i], "--id") == 0) { + if (argc < i + 2) { + lf_print_error("--id needs a string argument."); + usage(argc, argv); + return 0; + } + i++; + lf_print("RTI: Federation ID: %s", argv[i]); + rti.federation_id = argv[i]; + } else if (strcmp(argv[i], "-n") == 0 || strcmp(argv[i], "--number_of_federates") == 0) { + if (argc < i + 2) { + lf_print_error("--number_of_federates needs an integer argument."); + usage(argc, argv); + return 0; + } + i++; + long num_federates = strtol(argv[i], NULL, 10); + if (num_federates == 0L || num_federates == LONG_MAX || num_federates == LONG_MIN) { lf_print_error("--number_of_federates needs a valid positive integer argument."); usage(argc, argv); return 0; + } + rti.base.number_of_scheduling_nodes = (int32_t)num_federates; // FIXME: Loses numbers on 64-bit machines + lf_print("RTI: Number of federates: %d", rti.base.number_of_scheduling_nodes); + } else if (strcmp(argv[i], "-p") == 0 || strcmp(argv[i], "--port") == 0) { + if (argc < i + 2) { + lf_print_error("--port needs a short unsigned integer argument ( > 0 and < %d).", UINT16_MAX); + usage(argc, argv); + return 0; + } + i++; + uint32_t RTI_port = (uint32_t)strtoul(argv[i], NULL, 10); + if (RTI_port <= 0 || RTI_port >= UINT16_MAX) { + lf_print_error("--port needs a short unsigned integer argument ( > 0 and < %d).", UINT16_MAX); + usage(argc, argv); + return 0; + } + rti.user_specified_port = (uint16_t)RTI_port; + } else if (strcmp(argv[i], "-c") == 0 || strcmp(argv[i], "--clock_sync") == 0) { + if (argc < i + 2) { + lf_print_error("--clock-sync needs off|init|on."); + usage(argc, argv); + return 0; + } + i++; + i += process_clock_sync_args((argc - i), &argv[i]); + } else if (strcmp(argv[i], "-a") == 0 || strcmp(argv[i], "--auth") == 0) { +#ifndef __RTI_AUTH__ + lf_print_error("--auth requires the RTI to be built with the -DAUTH=ON option."); + usage(argc, argv); + return 0; +#endif + rti.authentication_enabled = true; + } else if (strcmp(argv[i], "-t") == 0 || strcmp(argv[i], "--tracing") == 0) { + rti.base.tracing_enabled = true; + } else if (strcmp(argv[i], " ") == 0) { + // Tolerate spaces + continue; + } else { + lf_print_error("Unrecognized command-line argument: %s", argv[i]); + usage(argc, argv); + return 0; } - return 1; + } + if (rti.base.number_of_scheduling_nodes == 0) { + lf_print_error("--number_of_federates needs a valid positive integer argument."); + usage(argc, argv); + return 0; + } + return 1; } int main(int argc, const char* argv[]) { + initialize_lf_thread_id(); + initialize_RTI(&rti); - initialize_RTI(&rti); - - // Catch the Ctrl-C signal, for a clean exit that does not lose the trace information - signal(SIGINT, exit); + // Catch the Ctrl-C signal, for a clean exit that does not lose the trace information + signal(SIGINT, exit); #ifdef SIGPIPE - // Ignore SIGPIPE errors, which terminate the entire application if - // socket write() fails because the reader has closed the socket. - // Instead, cause an EPIPE error to be set when write() fails. - // NOTE: The reason for a broken socket causing a SIGPIPE signal - // instead of just having write() return an error is to robutly - // a foo | bar pipeline where bar crashes. The default behavior - // is for foo to also exit. - signal(SIGPIPE, SIG_IGN); + // Ignore SIGPIPE errors, which terminate the entire application if + // socket write() fails because the reader has closed the socket. + // Instead, cause an EPIPE error to be set when write() fails. + // NOTE: The reason for a broken socket causing a SIGPIPE signal + // instead of just having write() return an error is to robutly + // a foo | bar pipeline where bar crashes. The default behavior + // is for foo to also exit. + signal(SIGPIPE, SIG_IGN); #endif // SIGPIPE - if (atexit(termination) != 0) { - lf_print_warning("Failed to register termination function!"); - } + if (atexit(termination) != 0) { + lf_print_warning("Failed to register termination function!"); + } - if (!process_args(argc, argv)) { - // Processing command-line arguments failed. - return -1; - } + if (!process_args(argc, argv)) { + // Processing command-line arguments failed. + return -1; + } - if (rti.base.tracing_enabled) { - _lf_number_of_workers = rti.base.number_of_scheduling_nodes; - rti.base.trace = trace_new(NULL, rti_trace_file_name); - LF_ASSERT_NON_NULL(rti.base.trace); - start_trace(rti.base.trace); - lf_print("Tracing the RTI execution in %s file.", rti_trace_file_name); - } + if (rti.base.tracing_enabled) { + _lf_number_of_workers = rti.base.number_of_scheduling_nodes; + // One thread communicating to each federate. Add 1 for 1 ephemeral + // timeout thread for each federate (this should be created only once + // per federate because shutdown only occurs once). Add 1 for the clock + // sync thread. Add 1 for the thread that responds to erroneous + // connections attempted after initialization phase has completed. Add 1 + // for the main thread. + lf_tracing_global_init("rti", -1, _lf_number_of_workers * 2 + 3); + lf_print("Tracing the RTI execution in %s file.", rti_trace_file_name); + } + + lf_print("Starting RTI for %d federates in federation ID %s.", rti.base.number_of_scheduling_nodes, + rti.federation_id); + assert(rti.base.number_of_scheduling_nodes < UINT16_MAX); - lf_print("Starting RTI for %d federates in federation ID %s.", rti.base.number_of_scheduling_nodes, rti.federation_id); - assert(rti.base.number_of_scheduling_nodes < UINT16_MAX); - - // Allocate memory for the federates - int n = rti.base.number_of_scheduling_nodes; - rti.base.scheduling_nodes = (scheduling_node_t**)calloc(n, sizeof(scheduling_node_t*)); - // Allocate memory for min_delays. - rti.base.min_delays = (tag_t*)calloc((n*n), sizeof(tag_t)); - for (uint16_t i = 0; i < n; i++) { - for (uint16_t j = 0; j < n; j++) { - rti.base.min_delays[i+j*n] = FOREVER_TAG; - } - federate_info_t *fed_info = (federate_info_t *) calloc(1, sizeof(federate_info_t)); - initialize_federate(fed_info, i); - rti.base.scheduling_nodes[i] = (scheduling_node_t *) fed_info; + // Allocate memory for the federates + int n = rti.base.number_of_scheduling_nodes; + rti.base.scheduling_nodes = (scheduling_node_t**)calloc(n, sizeof(scheduling_node_t*)); + // Allocate memory for min_delays. + rti.base.min_delays = (tag_t*)calloc((n * n), sizeof(tag_t)); + for (uint16_t i = 0; i < n; i++) { + for (uint16_t j = 0; j < n; j++) { + rti.base.min_delays[i + j * n] = FOREVER_TAG; } + federate_info_t* fed_info = (federate_info_t*)calloc(1, sizeof(federate_info_t)); + initialize_federate(fed_info, i); + rti.base.scheduling_nodes[i] = (scheduling_node_t*)fed_info; + } - int socket_descriptor = start_rti_server(rti.user_specified_port); - if (socket_descriptor >= 0) { - wait_for_federates(socket_descriptor); - normal_termination = true; - if (rti.base.tracing_enabled) { - // No need for a mutex lock because all threads have exited. - stop_trace_locked(rti.base.trace); - lf_print("RTI trace file saved."); - } + int socket_descriptor = start_rti_server(rti.user_specified_port); + if (socket_descriptor >= 0) { + wait_for_federates(socket_descriptor); + normal_termination = true; + if (rti.base.tracing_enabled) { + // No need for a mutex lock because all threads have exited. + lf_tracing_global_shutdown(); + lf_print("RTI trace file saved."); } + } - lf_print("RTI is exiting."); // Do this before freeing scheduling nodes. - free_scheduling_nodes(rti.base.scheduling_nodes, rti.base.number_of_scheduling_nodes); + lf_print("RTI is exiting."); // Do this before freeing scheduling nodes. + free_scheduling_nodes(rti.base.scheduling_nodes, rti.base.number_of_scheduling_nodes); - // Even if the RTI is exiting normally, it should report an error code if one of the - // federates has reported an error. - return (int)_lf_federate_reports_error; + // Even if the RTI is exiting normally, it should report an error code if one of the + // federates has reported an error. + return (int)_lf_federate_reports_error; } #endif // STANDALONE_RTI - diff --git a/core/federated/RTI/rti_common.c b/core/federated/RTI/rti_common.c index 519719645..dfd37ac36 100644 --- a/core/federated/RTI/rti_common.c +++ b/core/federated/RTI/rti_common.c @@ -18,525 +18,518 @@ static rti_common_t* rti_common = NULL; // Global variables defined in tag.c: extern instant_t start_time; - -void initialize_rti_common(rti_common_t * _rti_common) { - rti_common = _rti_common; - rti_common->max_stop_tag = NEVER_TAG; - rti_common->number_of_scheduling_nodes = 0; - rti_common->num_scheduling_nodes_handling_stop = 0; +void initialize_rti_common(rti_common_t* _rti_common) { + rti_common = _rti_common; + rti_common->max_stop_tag = NEVER_TAG; + rti_common->number_of_scheduling_nodes = 0; + rti_common->num_scheduling_nodes_handling_stop = 0; } -// FIXME: Should scheduling_nodes tracing use the same mechanism as federates? +// FIXME: Should scheduling_nodes tracing use the same mechanism as federates? // It needs to account a federate having itself a number of scheduling_nodes. -// Currently, all calls to tracepoint_from_federate() and +// Currently, all calls to tracepoint_from_federate() and // tracepoint_to_federate() are in rti_lib.c #define IS_IN_ZERO_DELAY_CYCLE 1 #define IS_IN_CYCLE 2 void invalidate_min_delays_upstream(scheduling_node_t* node) { - if(node->all_upstreams != NULL) { - free(node->all_upstreams); - for (int i = 0; i < rti_common->number_of_scheduling_nodes; i++) { - rti_common->min_delays[i*rti_common->number_of_scheduling_nodes + node->id] = FOREVER_TAG; - } + if (node->all_upstreams != NULL) { + free(node->all_upstreams); + for (int i = 0; i < rti_common->number_of_scheduling_nodes; i++) { + rti_common->min_delays[i * rti_common->number_of_scheduling_nodes + node->id] = FOREVER_TAG; } - if(node->all_downstreams != NULL) free(node->all_downstreams); - node->all_upstreams = NULL; - node->num_all_upstreams = 0; - node->all_downstreams = NULL; - node->num_all_downstreams = 0; - node->flags = 0; // All flags cleared because they get set lazily. + } + if (node->all_downstreams != NULL) + free(node->all_downstreams); + node->all_upstreams = NULL; + node->num_all_upstreams = 0; + node->all_downstreams = NULL; + node->num_all_downstreams = 0; + node->flags = 0; // All flags cleared because they get set lazily. } void initialize_scheduling_node(scheduling_node_t* e, uint16_t id) { - e->id = id; - e->completed = NEVER_TAG; - e->last_granted = NEVER_TAG; - e->last_provisionally_granted = NEVER_TAG; - e->next_event = NEVER_TAG; - e->last_DNET = NEVER_TAG; - e->state = NOT_CONNECTED; - e->immediate_upstreams = NULL; - e->immediate_upstream_delays = NULL; - e->num_immediate_upstreams = 0; - e->immediate_downstreams = NULL; - e->num_immediate_downstreams = 0; - e->mode = REALTIME; - invalidate_min_delays_upstream(e); + e->id = id; + e->completed = NEVER_TAG; + e->last_granted = NEVER_TAG; + e->last_provisionally_granted = NEVER_TAG; + e->next_event = NEVER_TAG; + e->last_DNET = NEVER_TAG; + e->state = NOT_CONNECTED; + e->immediate_upstreams = NULL; + e->immediate_upstream_delays = NULL; + e->num_immediate_upstreams = 0; + e->immediate_downstreams = NULL; + e->num_immediate_downstreams = 0; + e->mode = REALTIME; + invalidate_min_delays_upstream(e); } void _logical_tag_complete(scheduling_node_t* enclave, tag_t completed) { - // FIXME: Consolidate this message with NET to get NMR (Next Message Request). - // Careful with handling startup and shutdown. - LF_MUTEX_LOCK(rti_common->mutex); - - enclave->completed = completed; - - LF_PRINT_LOG("RTI received from federate/enclave %d the latest tag complete (LTC) " PRINTF_TAG ".", - enclave->id, enclave->completed.time - start_time, enclave->completed.microstep); - - // Check downstream scheduling_nodes to see whether they should now be granted a TAG. - for (int i = 0; i < enclave->num_immediate_downstreams; i++) { - scheduling_node_t *downstream = rti_common->scheduling_nodes[enclave->immediate_downstreams[i]]; - // Notify downstream enclave if appropriate. - notify_advance_grant_if_safe(downstream); - bool *visited = (bool *)calloc(rti_common->number_of_scheduling_nodes, sizeof(bool)); // Initializes to 0. - // Notify scheduling_nodes downstream of downstream if appropriate. - notify_downstream_advance_grant_if_safe(downstream, visited); - free(visited); - } + // FIXME: Consolidate this message with NET to get NMR (Next Message Request). + // Careful with handling startup and shutdown. + LF_MUTEX_LOCK(rti_common->mutex); + + enclave->completed = completed; + + LF_PRINT_LOG("RTI received from federate/enclave %d the latest tag complete (LTC) " PRINTF_TAG ".", enclave->id, + enclave->completed.time - start_time, enclave->completed.microstep); + + // Check downstream scheduling_nodes to see whether they should now be granted a TAG. + for (int i = 0; i < enclave->num_immediate_downstreams; i++) { + scheduling_node_t* downstream = rti_common->scheduling_nodes[enclave->immediate_downstreams[i]]; + // Notify downstream enclave if appropriate. + notify_advance_grant_if_safe(downstream); + bool* visited = (bool*)calloc(rti_common->number_of_scheduling_nodes, sizeof(bool)); // Initializes to 0. + // Notify scheduling_nodes downstream of downstream if appropriate. + notify_downstream_advance_grant_if_safe(downstream, visited); + free(visited); + } - LF_MUTEX_UNLOCK(rti_common->mutex); + LF_MUTEX_UNLOCK(rti_common->mutex); } tag_t earliest_future_incoming_message_tag(scheduling_node_t* e) { - // First, we need to find the shortest path (minimum delay) path to each upstream node - // and then find the minimum of the node's recorded NET plus the minimum path delay. - // Update the shortest paths, if necessary. - update_min_delays_upstream(e); - update_all_downstreams(e); - - // Next, find the tag of the earliest possible incoming message from upstream enclaves or - // federates, which will be the smallest upstream NET plus the least delay. - // This could be NEVER_TAG if the RTI has not seen a NET from some upstream node. - tag_t t_d = FOREVER_TAG; - int n = rti_common->number_of_scheduling_nodes; - for (int i = 0; i < e->num_all_upstreams; i++) { - // Node e->all_upstreams[i] is upstream of e with - // min delay rti_common->min_delays[e->all_upstreams[i]*n + e->id] - scheduling_node_t* upstream = rti_common->scheduling_nodes[e->all_upstreams[i]]; - // If we haven't heard from the upstream node, then assume it can send an event at the start time. - if (lf_tag_compare(upstream->next_event, NEVER_TAG) == 0) { - tag_t start_tag = {.time = start_time, .microstep = 0}; - upstream->next_event = start_tag; - } - // The min_delay here is a tag_t, not an interval_t because it may account for more than - // one connection. No delay at all is represented by (0,0). A delay of 0 is represented - // by (0,1). If the time part of the delay is greater than 0, then we want to ignore - // the microstep in upstream->next_event because that microstep will have been lost. - // Otherwise, we want preserve it and add to it. This is handled by lf_tag_add(). - tag_t earliest_tag_from_upstream = lf_tag_add(upstream->next_event, rti_common->min_delays[e->all_upstreams[i]*n + e->id]); - - /* Following debug message is too verbose for normal use: - LF_PRINT_DEBUG("RTI: Earliest next event upstream of fed/encl %d at fed/encl %d has tag " PRINTF_TAG ".", - e->id, - upstream->id, - earliest_tag_from_upstream.time - start_time, earliest_tag_from_upstream.microstep); - */ - if (lf_tag_compare(earliest_tag_from_upstream, t_d) < 0) { - t_d = earliest_tag_from_upstream; - } + // First, we need to find the shortest path (minimum delay) path to each upstream node + // and then find the minimum of the node's recorded NET plus the minimum path delay. + // Update the shortest paths, if necessary. + update_min_delays_upstream(e); + update_all_downstreams(e); + + // Next, find the tag of the earliest possible incoming message from upstream enclaves or + // federates, which will be the smallest upstream NET plus the least delay. + // This could be NEVER_TAG if the RTI has not seen a NET from some upstream node. + tag_t t_d = FOREVER_TAG; + int n = rti_common->number_of_scheduling_nodes; + for (int i = 0; i < e->num_all_upstreams; i++) { + // Node e->all_upstreams[i] is upstream of e with + // min delay rti_common->min_delays[e->all_upstreams[i]*n + e->id] + scheduling_node_t* upstream = rti_common->scheduling_nodes[e->all_upstreams[i]]; + // If we haven't heard from the upstream node, then assume it can send an event at the start time. + if (lf_tag_compare(upstream->next_event, NEVER_TAG) == 0) { + tag_t start_tag = {.time = start_time, .microstep = 0}; + upstream->next_event = start_tag; + } + // The min_delay here is a tag_t, not an interval_t because it may account for more than + // one connection. No delay at all is represented by (0,0). A delay of 0 is represented + // by (0,1). If the time part of the delay is greater than 0, then we want to ignore + // the microstep in upstream->next_event because that microstep will have been lost. + // Otherwise, we want preserve it and add to it. This is handled by lf_tag_add(). + tag_t earliest_tag_from_upstream = + lf_tag_add(upstream->next_event, rti_common->min_delays[e->all_upstreams[i] * n + e->id]); + + /* Following debug message is too verbose for normal use: + LF_PRINT_DEBUG("RTI: Earliest next event upstream of fed/encl %d at fed/encl %d has tag " PRINTF_TAG ".", + e->id, + upstream->id, + earliest_tag_from_upstream.time - start_time, earliest_tag_from_upstream.microstep); + */ + if (lf_tag_compare(earliest_tag_from_upstream, t_d) < 0) { + t_d = earliest_tag_from_upstream; } - return t_d; + } + return t_d; } tag_t eimt_strict(scheduling_node_t* e) { - // Find the tag of the earliest possible incoming message from immediately upstream - // enclaves or federates that are not part of a zero-delay cycle. - // This will be the smallest upstream NET plus the least delay. - // This could be NEVER_TAG if the RTI has not seen a NET from some upstream node. - tag_t t_d = FOREVER_TAG; - for (int i = 0; i < e->num_immediate_upstreams; i++) { - scheduling_node_t* upstream = rti_common->scheduling_nodes[e->immediate_upstreams[i]]; - // Skip this node if it is part of a zero-delay cycle. - if (is_in_zero_delay_cycle(upstream)) continue; - // If we haven't heard from the upstream node, then assume it can send an event at the start time. - if (lf_tag_compare(upstream->next_event, NEVER_TAG) == 0) { - tag_t start_tag = {.time = start_time, .microstep = 0}; - upstream->next_event = start_tag; - } - // Need to consider nodes that are upstream of the upstream node because those - // nodes may send messages to the upstream node. - tag_t earliest = earliest_future_incoming_message_tag(upstream); - // If the next event of the upstream node is earlier, then use that. - if (lf_tag_compare(upstream->next_event, earliest) < 0) { - earliest = upstream->next_event; - } - tag_t earliest_tag_from_upstream = lf_delay_tag(earliest, e->immediate_upstream_delays[i]); - LF_PRINT_DEBUG("RTI: Strict EIMT of fed/encl %d at fed/encl %d has tag " PRINTF_TAG ".", - e->id, - upstream->id, - earliest_tag_from_upstream.time - start_time, earliest_tag_from_upstream.microstep); - if (lf_tag_compare(earliest_tag_from_upstream, t_d) < 0) { - t_d = earliest_tag_from_upstream; - } + // Find the tag of the earliest possible incoming message from immediately upstream + // enclaves or federates that are not part of a zero-delay cycle. + // This will be the smallest upstream NET plus the least delay. + // This could be NEVER_TAG if the RTI has not seen a NET from some upstream node. + tag_t t_d = FOREVER_TAG; + for (int i = 0; i < e->num_immediate_upstreams; i++) { + scheduling_node_t* upstream = rti_common->scheduling_nodes[e->immediate_upstreams[i]]; + // Skip this node if it is part of a zero-delay cycle. + if (is_in_zero_delay_cycle(upstream)) + continue; + // If we haven't heard from the upstream node, then assume it can send an event at the start time. + if (lf_tag_compare(upstream->next_event, NEVER_TAG) == 0) { + tag_t start_tag = {.time = start_time, .microstep = 0}; + upstream->next_event = start_tag; } - return t_d; + // Need to consider nodes that are upstream of the upstream node because those + // nodes may send messages to the upstream node. + tag_t earliest = earliest_future_incoming_message_tag(upstream); + // If the next event of the upstream node is earlier, then use that. + if (lf_tag_compare(upstream->next_event, earliest) < 0) { + earliest = upstream->next_event; + } + tag_t earliest_tag_from_upstream = lf_delay_tag(earliest, e->immediate_upstream_delays[i]); + LF_PRINT_DEBUG("RTI: Strict EIMT of fed/encl %d at fed/encl %d has tag " PRINTF_TAG ".", e->id, upstream->id, + earliest_tag_from_upstream.time - start_time, earliest_tag_from_upstream.microstep); + if (lf_tag_compare(earliest_tag_from_upstream, t_d) < 0) { + t_d = earliest_tag_from_upstream; + } + } + return t_d; } tag_advance_grant_t tag_advance_grant_if_safe(scheduling_node_t* e) { - tag_advance_grant_t result = {.tag = NEVER_TAG, .is_provisional = false}; + tag_advance_grant_t result = {.tag = NEVER_TAG, .is_provisional = false}; - // Find the earliest LTC of upstream scheduling_nodes (M). - tag_t min_upstream_completed = FOREVER_TAG; + // Find the earliest LTC of upstream scheduling_nodes (M). + tag_t min_upstream_completed = FOREVER_TAG; - for (int j = 0; j < e->num_immediate_upstreams; j++) { - scheduling_node_t *upstream = rti_common->scheduling_nodes[e->immediate_upstreams[j]]; + for (int j = 0; j < e->num_immediate_upstreams; j++) { + scheduling_node_t* upstream = rti_common->scheduling_nodes[e->immediate_upstreams[j]]; - // Ignore this enclave/federate if it is not connected. - if (upstream->state == NOT_CONNECTED) continue; + // Ignore this enclave/federate if it is not connected. + if (upstream->state == NOT_CONNECTED) + continue; - // Adjust by the "after" delay. - // Note that "no delay" is encoded as NEVER, - // whereas one microstep delay is encoded as 0LL. - tag_t candidate = lf_delay_strict(upstream->completed, e->immediate_upstream_delays[j]); + // Adjust by the "after" delay. + // Note that "no delay" is encoded as NEVER, + // whereas one microstep delay is encoded as 0LL. + tag_t candidate = lf_delay_strict(upstream->completed, e->immediate_upstream_delays[j]); - if (lf_tag_compare(candidate, min_upstream_completed) < 0) { - min_upstream_completed = candidate; - } - } - LF_PRINT_LOG("RTI: Minimum upstream LTC for federate/enclave %d is " PRINTF_TAG - "(adjusted by after delay).", - e->id, - min_upstream_completed.time - start_time, min_upstream_completed.microstep); - if (lf_tag_compare(min_upstream_completed, e->last_granted) > 0 - && lf_tag_compare(min_upstream_completed, e->next_event) >= 0 // The enclave has to advance its tag - ) { - result.tag = min_upstream_completed; - return result; - } - - // Can't make progress based only on upstream LTCs. - // If all (transitive) upstream scheduling_nodes of the enclave - // have earliest event tags such that the - // enclave can now advance its tag, then send it a TAG message. - // Find the tag of the earliest event that may be later received from an upstream enclave - // or federate (which includes any after delays on the connections). - tag_t t_d = earliest_future_incoming_message_tag(e); - // Non-ZDC version of the above. This is a tag that must be strictly greater than - // that of the next granted PTAG. - tag_t t_d_strict = eimt_strict(e); - - LF_PRINT_LOG("RTI: Earliest next event upstream of node %d has tag " PRINTF_TAG ".", - e->id, t_d.time - start_time, t_d.microstep); - - // Given an EIMT (earliest incoming message tag) there are these possible scenarios: - // 1) The EIMT is greater than the NET we want to advance to. Grant a TAG. - // 2) The EIMT is equal to the NET and the strict EIMT is greater than the net - // and the federate is part of a zero-delay cycle (ZDC). Grant a PTAG. - // 3) Otherwise, grant nothing and wait for further updates. - - if ( // Scenario (1) above - lf_tag_compare(t_d, e->next_event) > 0 // EIMT greater than NET - && lf_tag_compare(e->next_event, NEVER_TAG) > 0 // NET is not NEVER_TAG - && lf_tag_compare(t_d, e->last_provisionally_granted) >= 0 // The grant is not redundant - // (equal is important to override any previous - // PTAGs). - && lf_tag_compare(t_d, e->last_granted) > 0 // The grant is not redundant. - ) { - // No upstream node can send events that will be received with a tag less than or equal to - // e->next_event, so it is safe to send a TAG. - LF_PRINT_LOG("RTI: Earliest upstream message time for fed/encl %d is " PRINTF_TAG - "(adjusted by after delay). Granting tag advance (TAG) for " PRINTF_TAG, - e->id, - t_d.time - lf_time_start(), t_d.microstep, - e->next_event.time - lf_time_start(), - e->next_event.microstep); - result.tag = e->next_event; - } else if( // Scenario (2) above - lf_tag_compare(t_d, e->next_event) == 0 // EIMT equal to NET - && is_in_zero_delay_cycle(e) // The node is part of a ZDC - && lf_tag_compare(t_d_strict, e->next_event) > 0 // The strict EIMT is greater than the NET - && lf_tag_compare(t_d, e->last_provisionally_granted) > 0 // The grant is not redundant - && lf_tag_compare(t_d, e->last_granted) > 0 // The grant is not redundant. - ) { - // Some upstream node may send an event that has the same tag as this node's next event, - // so we can only grant a PTAG. - LF_PRINT_LOG("RTI: Earliest upstream message time for fed/encl %d is " PRINTF_TAG - " (adjusted by after delay). Granting provisional tag advance (PTAG) for " PRINTF_TAG, - e->id, - t_d.time - start_time, t_d.microstep, - e->next_event.time - lf_time_start(), - e->next_event.microstep); - result.tag = e->next_event; - result.is_provisional = true; + if (lf_tag_compare(candidate, min_upstream_completed) < 0) { + min_upstream_completed = candidate; } + } + LF_PRINT_LOG("RTI: Minimum upstream LTC for federate/enclave %d is " PRINTF_TAG "(adjusted by after delay).", e->id, + min_upstream_completed.time - start_time, min_upstream_completed.microstep); + if (lf_tag_compare(min_upstream_completed, e->last_granted) > 0 && + lf_tag_compare(min_upstream_completed, e->next_event) >= 0 // The enclave has to advance its tag + ) { + result.tag = min_upstream_completed; return result; + } + + // Can't make progress based only on upstream LTCs. + // If all (transitive) upstream scheduling_nodes of the enclave + // have earliest event tags such that the + // enclave can now advance its tag, then send it a TAG message. + // Find the tag of the earliest event that may be later received from an upstream enclave + // or federate (which includes any after delays on the connections). + tag_t t_d = earliest_future_incoming_message_tag(e); + // Non-ZDC version of the above. This is a tag that must be strictly greater than + // that of the next granted PTAG. + tag_t t_d_strict = eimt_strict(e); + + LF_PRINT_LOG("RTI: Earliest next event upstream of node %d has tag " PRINTF_TAG ".", e->id, t_d.time - start_time, + t_d.microstep); + + // Given an EIMT (earliest incoming message tag) there are these possible scenarios: + // 1) The EIMT is greater than the NET we want to advance to. Grant a TAG. + // 2) The EIMT is equal to the NET and the strict EIMT is greater than the net + // and the federate is part of a zero-delay cycle (ZDC). Grant a PTAG. + // 3) Otherwise, grant nothing and wait for further updates. + + if ( // Scenario (1) above + lf_tag_compare(t_d, e->next_event) > 0 // EIMT greater than NET + && lf_tag_compare(e->next_event, NEVER_TAG) > 0 // NET is not NEVER_TAG + && lf_tag_compare(t_d, e->last_provisionally_granted) >= 0 // The grant is not redundant + // (equal is important to override any previous + // PTAGs). + && lf_tag_compare(t_d, e->last_granted) > 0 // The grant is not redundant. + ) { + // No upstream node can send events that will be received with a tag less than or equal to + // e->next_event, so it is safe to send a TAG. + LF_PRINT_LOG("RTI: Earliest upstream message time for fed/encl %d is " PRINTF_TAG + "(adjusted by after delay). Granting tag advance (TAG) for " PRINTF_TAG, + e->id, t_d.time - lf_time_start(), t_d.microstep, e->next_event.time - lf_time_start(), + e->next_event.microstep); + result.tag = e->next_event; + } else if ( // Scenario (2) above + lf_tag_compare(t_d, e->next_event) == 0 // EIMT equal to NET + && is_in_zero_delay_cycle(e) // The node is part of a ZDC + && lf_tag_compare(t_d_strict, e->next_event) > 0 // The strict EIMT is greater than the NET + && lf_tag_compare(t_d, e->last_provisionally_granted) > 0 // The grant is not redundant + && lf_tag_compare(t_d, e->last_granted) > 0 // The grant is not redundant. + ) { + // Some upstream node may send an event that has the same tag as this node's next event, + // so we can only grant a PTAG. + LF_PRINT_LOG("RTI: Earliest upstream message time for fed/encl %d is " PRINTF_TAG + " (adjusted by after delay). Granting provisional tag advance (PTAG) for " PRINTF_TAG, + e->id, t_d.time - start_time, t_d.microstep, e->next_event.time - lf_time_start(), + e->next_event.microstep); + result.tag = e->next_event; + result.is_provisional = true; + } + return result; } void notify_downstream_advance_grant_if_safe(scheduling_node_t* e, bool visited[]) { - visited[e->id] = true; - for (int i = 0; i < e->num_immediate_downstreams; i++) { - scheduling_node_t* downstream = rti_common->scheduling_nodes[e->immediate_downstreams[i]]; - if (visited[downstream->id]) continue; - notify_advance_grant_if_safe(downstream); - notify_downstream_advance_grant_if_safe(downstream, visited); - } + visited[e->id] = true; + for (int i = 0; i < e->num_immediate_downstreams; i++) { + scheduling_node_t* downstream = rti_common->scheduling_nodes[e->immediate_downstreams[i]]; + if (visited[downstream->id]) + continue; + notify_advance_grant_if_safe(downstream); + notify_downstream_advance_grant_if_safe(downstream, visited); + } } void update_scheduling_node_next_event_tag_locked(scheduling_node_t* e, tag_t next_event_tag) { - e->next_event = next_event_tag; - - LF_PRINT_DEBUG( - "RTI: Updated the recorded next event tag for federate/enclave %d to " PRINTF_TAG, - e->id, - next_event_tag.time - lf_time_start(), - next_event_tag.microstep - ); - - // Check to see whether we can reply now with a tag advance grant. - // If the enclave has no upstream scheduling_nodes, then it does not wait for - // nor expect a reply. It just proceeds to advance time. - if (e->num_immediate_upstreams > 0) { - notify_advance_grant_if_safe(e); - } else { - // Even though there was no grant, mark the tag as if there was. - e->last_granted = next_event_tag; - } - // Check downstream scheduling_nodes to see whether they should now be granted a TAG. - // To handle cycles, need to create a boolean array to keep - // track of which downstream scheduling_nodes have been visited. - // FIXME: As we have all_downstreams field now, we don't need the function notify_downstream_davnace_grnat_if_safe. - update_all_downstreams(e); - bool *visited = (bool *)calloc(rti_common->number_of_scheduling_nodes, sizeof(bool)); // Initializes to 0. - notify_downstream_advance_grant_if_safe(e, visited); - free(visited); - - // Send DNET to the node's upstreams if needed - for (int i = 0; i < e->num_all_upstreams; i++) { - int target_upstream_id = e->all_upstreams[i]; - if (target_upstream_id == e->id) { - // FIXME: This shouldn't be entered, but currently, it's entered. - continue; - } - send_downstream_next_event_tag_if_needed(rti_common->scheduling_nodes[target_upstream_id], e->id); + e->next_event = next_event_tag; + + LF_PRINT_DEBUG("RTI: Updated the recorded next event tag for federate/enclave %d to " PRINTF_TAG, e->id, + next_event_tag.time - lf_time_start(), next_event_tag.microstep); + + // Check to see whether we can reply now with a tag advance grant. + // If the enclave has no upstream scheduling_nodes, then it does not wait for + // nor expect a reply. It just proceeds to advance time. + if (e->num_immediate_upstreams > 0) { + notify_advance_grant_if_safe(e); + } else { + // Even though there was no grant, mark the tag as if there was. + e->last_granted = next_event_tag; + } + // Check downstream scheduling_nodes to see whether they should now be granted a TAG. + // To handle cycles, need to create a boolean array to keep + // track of which downstream scheduling_nodes have been visited. + // FIXME: As we have all_downstreams field now, we don't need the function notify_downstream_davnace_grnat_if_safe. + update_all_downstreams(e); + bool* visited = (bool*)calloc(rti_common->number_of_scheduling_nodes, sizeof(bool)); // Initializes to 0. + notify_downstream_advance_grant_if_safe(e, visited); + free(visited); + + // Send DNET to the node's upstreams if needed + for (int i = 0; i < e->num_all_upstreams; i++) { + int target_upstream_id = e->all_upstreams[i]; + if (target_upstream_id == e->id) { + // FIXME: This shouldn't be entered, but currently, it's entered. + continue; } + send_downstream_next_event_tag_if_needed(rti_common->scheduling_nodes[target_upstream_id], e->id); + } } void notify_advance_grant_if_safe(scheduling_node_t* e) { - tag_advance_grant_t grant = tag_advance_grant_if_safe(e); - if (lf_tag_compare(grant.tag, NEVER_TAG) != 0) { - if (grant.is_provisional) { - notify_provisional_tag_advance_grant(e, grant.tag); - } else { - notify_tag_advance_grant(e, grant.tag); - } + tag_advance_grant_t grant = tag_advance_grant_if_safe(e); + if (lf_tag_compare(grant.tag, NEVER_TAG) != 0) { + if (grant.is_provisional) { + notify_provisional_tag_advance_grant(e, grant.tag); + } else { + notify_tag_advance_grant(e, grant.tag); } + } } // Local function used recursively to find minimum delays upstream. // Return in count the number of non-FOREVER_TAG entries in path_delays[]. -static void _update_min_delays_upstream( - scheduling_node_t* end, - scheduling_node_t* intermediate, - tag_t path_delays[], - size_t* count) { - // On first call, intermediate will be NULL, so the path delay is initialized to zero. - tag_t delay_from_intermediate_so_far = ZERO_TAG; - if (intermediate == NULL) { - intermediate = end; - } else { - // Not the first call, so intermediate is upstream of end. - delay_from_intermediate_so_far = path_delays[intermediate->id]; - } - if (intermediate->state == NOT_CONNECTED) { - // Enclave or federate is not connected. - // No point in checking upstream scheduling_nodes. - return; - } - // Check nodes upstream of intermediate (or end on first call). - // NOTE: It would be better to iterate through these sorted by minimum delay, - // but for most programs, the gain might be negligible since there are relatively few - // upstream nodes. - for (int i = 0; i < intermediate->num_immediate_upstreams; i++) { - // Add connection delay to path delay so far. Because tag addition is not commutative, - // the calculation order should be carefully handled. Specifically, we should calculate - // intermediate->upstream_delay[i] + delay_from_intermediate_so_far, - // NOT delay_from_intermediate_so_far + intermediate->upstream_delay[i]. - // Before calculating path delay, convert intermediate->upstream_delay[i] to a tag - // cause there is no function that adds a tag to an interval. - tag_t connection_delay = lf_delay_tag(ZERO_TAG, intermediate->immediate_upstream_delays[i]); - tag_t path_delay = lf_tag_add(connection_delay, delay_from_intermediate_so_far); - // If the path delay is less than the so-far recorded path delay from upstream, update upstream. - if (lf_tag_compare(path_delay, path_delays[intermediate->immediate_upstreams[i]]) < 0) { - if (path_delays[intermediate->immediate_upstreams[i]].time == FOREVER) { - // Found a finite path. - *count = *count + 1; - } - path_delays[intermediate->immediate_upstreams[i]] = path_delay; - // Since the path delay to upstream has changed, recursively update those upstream of it. - // Do not do this, however, if the upstream node is the end node because this means we have - // completed a cycle. - if (end->id != intermediate->immediate_upstreams[i]) { - _update_min_delays_upstream(end, rti_common->scheduling_nodes[intermediate->immediate_upstreams[i]], path_delays, count); - } else { - // Found a cycle. - end->flags = end->flags | IS_IN_CYCLE; - // Is it a zero-delay cycle? - if (lf_tag_compare(path_delay, ZERO_TAG) == 0 && intermediate->immediate_upstream_delays[i] < 0) { - end->flags = end->flags | IS_IN_ZERO_DELAY_CYCLE; - } else { - // Clear the flag. - end->flags = end->flags & ~IS_IN_ZERO_DELAY_CYCLE; - } - } +static void _update_min_delays_upstream(scheduling_node_t* end, scheduling_node_t* intermediate, tag_t path_delays[], + size_t* count) { + // On first call, intermediate will be NULL, so the path delay is initialized to zero. + tag_t delay_from_intermediate_so_far = ZERO_TAG; + if (intermediate == NULL) { + intermediate = end; + } else { + // Not the first call, so intermediate is upstream of end. + delay_from_intermediate_so_far = path_delays[intermediate->id]; + } + if (intermediate->state == NOT_CONNECTED) { + // Enclave or federate is not connected. + // No point in checking upstream scheduling_nodes. + return; + } + // Check nodes upstream of intermediate (or end on first call). + // NOTE: It would be better to iterate through these sorted by minimum delay, + // but for most programs, the gain might be negligible since there are relatively few + // upstream nodes. + for (int i = 0; i < intermediate->num_immediate_upstreams; i++) { + // Add connection delay to path delay so far. Because tag addition is not commutative, + // the calculation order should be carefully handled. Specifically, we should calculate + // intermediate->upstream_delay[i] + delay_from_intermediate_so_far, + // NOT delay_from_intermediate_so_far + intermediate->upstream_delay[i]. + // Before calculating path delay, convert intermediate->upstream_delay[i] to a tag + // cause there is no function that adds a tag to an interval. + tag_t connection_delay = lf_delay_tag(ZERO_TAG, intermediate->immediate_upstream_delays[i]); + tag_t path_delay = lf_tag_add(connection_delay, delay_from_intermediate_so_far); + // If the path delay is less than the so-far recorded path delay from upstream, update upstream. + if (lf_tag_compare(path_delay, path_delays[intermediate->immediate_upstreams[i]]) < 0) { + if (path_delays[intermediate->immediate_upstreams[i]].time == FOREVER) { + // Found a finite path. + *count = *count + 1; + } + path_delays[intermediate->immediate_upstreams[i]] = path_delay; + // Since the path delay to upstream has changed, recursively update those upstream of it. + // Do not do this, however, if the upstream node is the end node because this means we have + // completed a cycle. + if (end->id != intermediate->immediate_upstreams[i]) { + _update_min_delays_upstream(end, rti_common->scheduling_nodes[intermediate->immediate_upstreams[i]], + path_delays, count); + } else { + // Found a cycle. + end->flags = end->flags | IS_IN_CYCLE; + // Is it a zero-delay cycle? + if (lf_tag_compare(path_delay, ZERO_TAG) == 0 && intermediate->immediate_upstream_delays[i] < 0) { + end->flags = end->flags | IS_IN_ZERO_DELAY_CYCLE; + } else { + // Clear the flag. + end->flags = end->flags & ~IS_IN_ZERO_DELAY_CYCLE; } + } } + } } void update_min_delays_upstream(scheduling_node_t* node) { - // Check whether cached result is valid. - if (node->all_upstreams == NULL) { + // Check whether cached result is valid. + if (node->all_upstreams == NULL) { - // This is not Dijkstra's algorithm, but rather one optimized for sparse upstream nodes. - // There must be a name for this algorithm. + // This is not Dijkstra's algorithm, but rather one optimized for sparse upstream nodes. + // There must be a name for this algorithm. - // Array of results on the stack: - tag_t path_delays[rti_common->number_of_scheduling_nodes]; - // This will be the number of non-FOREVER entries put into path_delays. - size_t count = 0; + // Array of results on the stack: + tag_t path_delays[rti_common->number_of_scheduling_nodes]; + // This will be the number of non-FOREVER entries put into path_delays. + size_t count = 0; - for (int i = 0; i < rti_common->number_of_scheduling_nodes; i++) { - path_delays[i] = FOREVER_TAG; - } - _update_min_delays_upstream(node, NULL, path_delays, &count); - - // Put the results onto the matrix. - node->num_all_upstreams = count; - node->all_upstreams = (uint16_t*)calloc(count, sizeof(uint16_t)); - LF_PRINT_DEBUG("++++ Node %hu is in ZDC: %d", node->id, is_in_zero_delay_cycle(node)); - int k = 0; - for (int i = 0; i < rti_common->number_of_scheduling_nodes; i++) { - if (lf_tag_compare(path_delays[i], FOREVER_TAG) < 0) { - // Node i is upstream. - if (k >= count) { - lf_print_error_and_exit("Internal error! Count of upstream nodes %zu for node %d is wrong!", count, i); - } - rti_common->min_delays[node->id + i*rti_common->number_of_scheduling_nodes] = path_delays[i]; - node->all_upstreams[k++] = i; - // N^2 debug statement could be a problem with large benchmarks. - // LF_PRINT_DEBUG("++++ Node %hu is upstream with delay" PRINTF_TAG "\n", i, path_delays[i].time, path_delays[i].microstep); - } + for (int i = 0; i < rti_common->number_of_scheduling_nodes; i++) { + path_delays[i] = FOREVER_TAG; + } + _update_min_delays_upstream(node, NULL, path_delays, &count); + + // Put the results onto the matrix. + node->num_all_upstreams = count; + node->all_upstreams = (uint16_t*)calloc(count, sizeof(uint16_t)); + LF_PRINT_DEBUG("++++ Node %hu is in ZDC: %d", node->id, is_in_zero_delay_cycle(node)); + int k = 0; + for (int i = 0; i < rti_common->number_of_scheduling_nodes; i++) { + if (lf_tag_compare(path_delays[i], FOREVER_TAG) < 0) { + // Node i is upstream. + if (k >= count) { + lf_print_error_and_exit("Internal error! Count of upstream nodes %zu for node %d is wrong!", count, i); } + rti_common->min_delays[node->id + i * rti_common->number_of_scheduling_nodes] = path_delays[i]; + node->all_upstreams[k++] = i; + // N^2 debug statement could be a problem with large benchmarks. + // LF_PRINT_DEBUG("++++ Node %hu is upstream with delay" PRINTF_TAG "\n", i, path_delays[i].time, + // path_delays[i].microstep); + } } + } } void update_all_downstreams(scheduling_node_t* node) { - if (node->all_downstreams == NULL) { - bool visited[rti_common->number_of_scheduling_nodes]; - for (int i = 0; i < rti_common->number_of_scheduling_nodes; i++) { - visited[i] = false; - } + if (node->all_downstreams == NULL) { + bool visited[rti_common->number_of_scheduling_nodes]; + for (int i = 0; i < rti_common->number_of_scheduling_nodes; i++) { + visited[i] = false; + } - uint16_t queue[rti_common->number_of_scheduling_nodes]; - int front = 0, rear = 0; - - visited[node->id] = true; - queue[rear++] = node->id; - - size_t count = 0; - while (front != rear) { - int current_id = queue[front++]; - scheduling_node_t* current_node = rti_common->scheduling_nodes[current_id]; - for (uint16_t i = 0; i < current_node->num_immediate_downstreams; i++) { - uint16_t downstream_id = current_node->immediate_downstreams[i]; - if (visited[downstream_id] == false) { - visited[downstream_id] = true; - queue[rear++] = downstream_id; - count++; - } - } + uint16_t queue[rti_common->number_of_scheduling_nodes]; + int front = 0, rear = 0; + + visited[node->id] = true; + queue[rear++] = node->id; + + size_t count = 0; + while (front != rear) { + int current_id = queue[front++]; + scheduling_node_t* current_node = rti_common->scheduling_nodes[current_id]; + for (uint16_t i = 0; i < current_node->num_immediate_downstreams; i++) { + uint16_t downstream_id = current_node->immediate_downstreams[i]; + if (visited[downstream_id] == false) { + visited[downstream_id] = true; + queue[rear++] = downstream_id; + count++; } + } + } - int k = 0; - node->all_downstreams = (uint16_t*)calloc(count, sizeof(uint16_t)); - node->num_all_downstreams = count; - for (uint16_t i = 0; i < rti_common->number_of_scheduling_nodes; i++) { - if (visited[i] == true && i != node->id) { - if (k >= count) { - lf_print_error_and_exit("Internal error! Count of downstream nodes %zu for node %d is wrong!", count, i); - } - node->all_downstreams[k++] = i; - } + int k = 0; + node->all_downstreams = (uint16_t*)calloc(count, sizeof(uint16_t)); + node->num_all_downstreams = count; + for (uint16_t i = 0; i < rti_common->number_of_scheduling_nodes; i++) { + if (visited[i] == true && i != node->id) { + if (k >= count) { + lf_print_error_and_exit("Internal error! Count of downstream nodes %zu for node %d is wrong!", count, i); } + node->all_downstreams[k++] = i; + } } + } } tag_t get_DNET_candidate(tag_t received_tag, tag_t minimum_delay) { - // FIXME: FOREVER - FOREVER is not handled. - // (A.t, A.m) - (B.t - B.m) - // The least B is (0, 0) which indicates NEVER delay. - // 1) If A.t = NEVER, return NEVER. - // 2) If A.t = FOREVER, return FOREVER. - // 3) A.t is not NEVER neither FOREVER - // a) If A < B (A.t < B.t or A.t == B.t and A.m < B.m) return NEVER - // b) A >= B - // i) If A.m < B.m return (A.t - B.t - 1, UINT_MAX) - // ii) If A.m >= B.m - // If B.t is 0 return (A.t, A.m - B.m) - // Else, return (A.t - B.t, UINT_MAX) - if (received_tag.time == NEVER || lf_tag_compare(received_tag, minimum_delay) < 0) return NEVER_TAG; - if (received_tag.time == FOREVER) return FOREVER_TAG; - tag_t result = {.time = received_tag.time - minimum_delay.time, .microstep = received_tag.microstep - minimum_delay.microstep}; - if (received_tag.microstep < minimum_delay.microstep) { - result.time -= 1; - result.microstep = UINT_MAX; + // FIXME: FOREVER - FOREVER is not handled. + // (A.t, A.m) - (B.t - B.m) + // The least B is (0, 0) which indicates NEVER delay. + // 1) If A.t = NEVER, return NEVER. + // 2) If A.t = FOREVER, return FOREVER. + // 3) A.t is not NEVER neither FOREVER + // a) If A < B (A.t < B.t or A.t == B.t and A.m < B.m) return NEVER + // b) A >= B + // i) If A.m < B.m return (A.t - B.t - 1, UINT_MAX) + // ii) If A.m >= B.m + // If B.t is 0 return (A.t, A.m - B.m) + // Else, return (A.t - B.t, UINT_MAX) + if (received_tag.time == NEVER || lf_tag_compare(received_tag, minimum_delay) < 0) + return NEVER_TAG; + if (received_tag.time == FOREVER) + return FOREVER_TAG; + tag_t result = {.time = received_tag.time - minimum_delay.time, + .microstep = received_tag.microstep - minimum_delay.microstep}; + if (received_tag.microstep < minimum_delay.microstep) { + result.time -= 1; + result.microstep = UINT_MAX; + } else { + if (minimum_delay.time == 0) { + // } else { - if (minimum_delay.time == 0) { - // - } else { - result.microstep = UINT_MAX; - } + result.microstep = UINT_MAX; } - return result; + } + return result; } // It should be static because it's used only in this file. Remove from the header file. void send_downstream_next_event_tag_if_needed(scheduling_node_t* node, uint16_t new_NET_source_federate_id) { - if (is_in_zero_delay_cycle(node)) { + if (is_in_zero_delay_cycle(node)) { + return; + } + + tag_t DNET = FOREVER_TAG; + scheduling_node_t* new_NET_source_federate = rti_common->scheduling_nodes[new_NET_source_federate_id]; + if (is_in_zero_delay_cycle(new_NET_source_federate)) { + return; + } + + int index = node->id * rti_common->number_of_scheduling_nodes + new_NET_source_federate_id; + tag_t DNET_candidate = get_DNET_candidate(new_NET_source_federate->next_event, rti_common->min_delays[index]); + + if (lf_tag_compare(node->last_DNET, DNET_candidate) >= 0) { + DNET = DNET_candidate; + } else { + for (int i = 0; i < node->num_all_downstreams; i++) { + uint16_t target_downstream_id = node->all_downstreams[i]; + scheduling_node_t* target_dowstream = rti_common->scheduling_nodes[target_downstream_id]; + + if (is_in_zero_delay_cycle(target_dowstream)) { + // This node is an upstream of ZDC. Do not send DNET to this node. return; - } + } - tag_t DNET = FOREVER_TAG; - scheduling_node_t* new_NET_source_federate = rti_common->scheduling_nodes[new_NET_source_federate_id]; - if (is_in_zero_delay_cycle(new_NET_source_federate)) { - return; - } + index = node->id * rti_common->number_of_scheduling_nodes + target_downstream_id; + DNET_candidate = get_DNET_candidate(target_dowstream->next_event, rti_common->min_delays[index]); - int index = node->id * rti_common->number_of_scheduling_nodes + new_NET_source_federate_id; - tag_t DNET_candidate = get_DNET_candidate(new_NET_source_federate->next_event, rti_common->min_delays[index]); - - if (lf_tag_compare(node->last_DNET, DNET_candidate) >= 0) { + if (lf_tag_compare(DNET, DNET_candidate) > 0) { DNET = DNET_candidate; - } else { - for (int i = 0; i < node->num_all_downstreams; i++) { - uint16_t target_downstream_id = node->all_downstreams[i]; - scheduling_node_t* target_dowstream = rti_common->scheduling_nodes[target_downstream_id]; - - if (is_in_zero_delay_cycle(target_dowstream)) { - // This node is an upstream of ZDC. Do not send DNET to this node. - return; - } - - index = node->id * rti_common->number_of_scheduling_nodes + target_downstream_id; - DNET_candidate = get_DNET_candidate(target_dowstream->next_event, rti_common->min_delays[index]); - - if (lf_tag_compare(DNET, DNET_candidate) > 0) { - DNET = DNET_candidate; - } - } - } - if (DNET.time < start_time) { - // DNET is NEVER. - DNET = NEVER_TAG; - } - - if (lf_tag_compare(node->last_DNET, DNET) != 0 - && lf_tag_compare(node->completed, DNET) < 0 - && lf_tag_compare(node->next_event, DNET) <= 0) { - send_downstream_next_event_tag(node, DNET); + } } + } + if (DNET.time < start_time) { + // DNET is NEVER. + DNET = NEVER_TAG; + } + + if (lf_tag_compare(node->last_DNET, DNET) != 0 && lf_tag_compare(node->completed, DNET) < 0 && + lf_tag_compare(node->next_event, DNET) <= 0) { + send_downstream_next_event_tag(node, DNET); + } } bool is_in_zero_delay_cycle(scheduling_node_t* node) { - update_min_delays_upstream(node); - return node->flags & IS_IN_ZERO_DELAY_CYCLE; + update_min_delays_upstream(node); + return node->flags & IS_IN_ZERO_DELAY_CYCLE; } bool is_in_cycle(scheduling_node_t* node) { - update_min_delays_upstream(node); - return node->flags & IS_IN_CYCLE; + update_min_delays_upstream(node); + return node->flags & IS_IN_CYCLE; } #endif diff --git a/core/federated/RTI/rti_common.h b/core/federated/RTI/rti_common.h index 810b3cfd8..1c2bed1e4 100644 --- a/core/federated/RTI/rti_common.h +++ b/core/federated/RTI/rti_common.h @@ -13,30 +13,27 @@ #ifndef RTI_COMMON_H #define RTI_COMMON_H -#include // Defines perror(), errno +#include // Defines perror(), errno #include -#include "platform.h" // Platform-specific types and functions -#include "util.h" // Defines print functions (e.g., lf_print). -#include "tag.h" // Time-related types and functions. -#include "trace.h" // Tracing related functions +#include "low_level_platform.h" // Platform-specific types and functions +#include "util.h" // Defines print functions (e.g., lf_print). +#include "tag.h" // Time-related types and functions. +#include "tracepoint.h" // Tracing related functions /** Mode of execution of a federate. */ -typedef enum execution_mode_t { - FAST, - REALTIME -} execution_mode_t; +typedef enum execution_mode_t { FAST, REALTIME } execution_mode_t; /** State of the scheduling node during execution. */ typedef enum scheduling_node_state_t { - NOT_CONNECTED, // The scheduling node has not connected. - GRANTED, // Most recent MSG_TYPE_NEXT_EVENT_TAG has been granted. - PENDING // Waiting for upstream scheduling nodes. + NOT_CONNECTED, // The scheduling node has not connected. + GRANTED, // Most recent MSG_TYPE_NEXT_EVENT_TAG has been granted. + PENDING // Waiting for upstream scheduling nodes. } scheduling_node_state_t; /** Struct for minimum delays from upstream nodes. */ typedef struct minimum_delay_t { - int id; // ID of the upstream node. - tag_t min_delay; // Minimum delay from upstream. + int id; // ID of the upstream node. + tag_t min_delay; // Minimum delay from upstream. } minimum_delay_t; /** @@ -50,91 +47,88 @@ typedef struct minimum_delay_t { * any scheduling constraints. */ typedef struct scheduling_node_t { - uint16_t id; // ID of this scheduling node. - tag_t completed; // The largest logical tag completed by the scheduling node - // (or NEVER if no LTC has been received). - tag_t last_granted; // The maximum TAG that has been granted so far (or NEVER if none granted) - tag_t last_provisionally_granted; // The maximum PTAG that has been provisionally granted (or NEVER if none granted) - tag_t next_event; // Most recent NET received from the scheduling node (or NEVER if none received). - tag_t last_DNET; // Most recent DNET. - scheduling_node_state_t state; // State of the scheduling node. - uint16_t* immediate_upstreams; // Array of immediate upstream scheduling node ids. - interval_t* immediate_upstream_delays; // Minimum delay on connections from immdediate upstream scheduling nodes. - // Here, NEVER encodes no delay. 0LL is a microstep delay. - uint16_t num_immediate_upstreams; // Size of the array of immediate upstream scheduling nodes and delays. - uint16_t* immediate_downstreams; // Array of immediate downstream scheduling node ids. - uint16_t num_immediate_downstreams; // Size of the array of immediate downstream scheduling nodes. - execution_mode_t mode; // FAST or REALTIME. - uint16_t* all_upstreams; // Array of all upstream scheduling node ids. - uint16_t num_all_upstreams; // Size of the array of all upstream scheduling nodes and delays. - uint16_t* all_downstreams; // Array of all downstream scheduling node ids. - uint16_t num_all_downstreams; // Size of the array of all downstream scheduling nodes. - int flags; // Or of IS_IN_ZERO_DELAY_CYCLE, IS_IN_CYCLE + uint16_t id; // ID of this scheduling node. + tag_t completed; // The largest logical tag completed by the scheduling node + // (or NEVER if no LTC has been received). + tag_t last_granted; // The maximum TAG that has been granted so far (or NEVER if none granted) + tag_t last_provisionally_granted; // The maximum PTAG that has been provisionally granted (or NEVER if none granted) + tag_t next_event; // Most recent NET received from the scheduling node (or NEVER if none received). + tag_t last_DNET; // Most recent DNET. + scheduling_node_state_t state; // State of the scheduling node. + uint16_t* immediate_upstreams; // Array of immediate upstream scheduling node ids. + interval_t* immediate_upstream_delays; // Minimum delay on connections from immdediate upstream scheduling nodes. + // Here, NEVER encodes no delay. 0LL is a microstep delay. + uint16_t num_immediate_upstreams; // Size of the array of immediate upstream scheduling nodes and delays. + uint16_t* immediate_downstreams; // Array of immediate downstream scheduling node ids. + uint16_t num_immediate_downstreams; // Size of the array of immediate downstream scheduling nodes. + execution_mode_t mode; // FAST or REALTIME. + uint16_t* all_upstreams; // Array of all upstream scheduling node ids. + uint16_t num_all_upstreams; // Size of the array of all upstream scheduling nodes and delays. + uint16_t* all_downstreams; // Array of all downstream scheduling node ids. + uint16_t num_all_downstreams; // Size of the array of all downstream scheduling nodes. + int flags; // Or of IS_IN_ZERO_DELAY_CYCLE, IS_IN_CYCLE } scheduling_node_t; /** * Data structure which is common to both the remote standalone RTI and the local RTI used in enclaved execution. - * rti_remote_t and rti_local_t will "inherit" from this data structure. The first field is an array of pointers - * to scheduling nodes. These will be scheduling nodes for the local RTI and federates for the remote RTI + * rti_remote_t and rti_local_t will "inherit" from this data structure. The first field is an array of pointers + * to scheduling nodes. These will be scheduling nodes for the local RTI and federates for the remote RTI */ typedef struct rti_common_t { - // The scheduling nodes. - scheduling_node_t **scheduling_nodes; + // The scheduling nodes. + scheduling_node_t** scheduling_nodes; - // Number of scheduling nodes - uint16_t number_of_scheduling_nodes; + // Number of scheduling nodes + uint16_t number_of_scheduling_nodes; - // Matrix of minimum delays between each nodes - // Rows represent upstreams and Columns represent downstreams. - // FOREVER_TAG means there is no path, ZERO_TAG means there is no delay. - tag_t* min_delays; + // Matrix of minimum delays between each nodes + // Rows represent upstreams and Columns represent downstreams. + // FOREVER_TAG means there is no path, ZERO_TAG means there is no delay. + tag_t* min_delays; - // RTI's decided stop tag for the scheduling nodes - tag_t max_stop_tag; + // RTI's decided stop tag for the scheduling nodes + tag_t max_stop_tag; - // Number of scheduling nodes handling stop - int num_scheduling_nodes_handling_stop; + // Number of scheduling nodes handling stop + int num_scheduling_nodes_handling_stop; - // Boolean indicating that tracing is enabled. - bool tracing_enabled; - - // Pointer to a tracing object - trace_t* trace; + // Boolean indicating that tracing is enabled. + bool tracing_enabled; - // The RTI mutex for making thread-safe access to the shared state. - lf_mutex_t* mutex; + // The RTI mutex for making thread-safe access to the shared state. + lf_mutex_t* mutex; } rti_common_t; typedef struct { - tag_t tag; // NEVER if there is no tag advance grant. - bool is_provisional; // True for PTAG, false for TAG. + tag_t tag; // NEVER if there is no tag advance grant. + bool is_provisional; // True for PTAG, false for TAG. } tag_advance_grant_t; /** * @brief Initialize the fields of the rti_common struct. It also stores * the pointer to the struct and uses it internally. - * - * @param The rti_common_t struct to initialize. + * + * @param The rti_common_t struct to initialize. */ -void initialize_rti_common(rti_common_t * rti_common); +void initialize_rti_common(rti_common_t* rti_common); /** * @brief Update the completed tag for the specified node. - * + * * This checks whether any downstream nodes become eligible to receive TAG * or PTAG, and sends those signals if appropriate. - * + * * The function is prepended with an underscore because a function called * `logical_tag_complete` is code-generated by the compiler. - * + * * @param e The scheduling node. * @param completed The completed tag of the scheduling node. */ void _logical_tag_complete(scheduling_node_t* e, tag_t completed); -/** +/** * Initialize the scheduling node with the specified ID. - * + * * @param e The scheduling node. * @param id The scheduling node ID. */ @@ -161,7 +155,7 @@ void notify_downstream_advance_grant_if_safe(scheduling_node_t* e, bool visited[ * field. * * This function assumes that the caller holds the RTI mutex. - * + * * @param e The scheduling node. * @param tag The tag to grant. */ @@ -171,9 +165,9 @@ void notify_tag_advance_grant(scheduling_node_t* e, tag_t tag); * @brief Either send to a federate or unblock an enclave to give it a tag. * This function requires two different implementations, one for enclaves * and one for federates. - * + * * This assumes the caller holds the RTI mutex. - * + * * @param e The scheduling node. */ void notify_advance_grant_if_safe(scheduling_node_t* e); @@ -193,9 +187,10 @@ void notify_advance_grant_if_safe(scheduling_node_t* e); void notify_provisional_tag_advance_grant(scheduling_node_t* e, tag_t tag); /** - * Determine whether the specified scheduling node is eligible for a tag advance grant, - * (TAG) and, if so, return the details. This is called upon receiving a LTC, NET - * or resign from an upstream node. + * @brief Determine whether the specified scheduling node is eligible for a tag advance grant, + * (TAG) and, if so, return the details. + * + * This is called upon receiving a LTC, NET or resign from an upstream node. * * This function calculates the minimum M over * all upstream scheduling nodes of the "after" delay plus the most recently @@ -219,7 +214,7 @@ void notify_provisional_tag_advance_grant(scheduling_node_t* e, tag_t tag); * This function assumes that the caller holds the RTI mutex. * * @param e The scheduling node. - * @return If granted, return the tag value and whether it is provisional. + * @return If granted, return the tag value and whether it is provisional. * Otherwise, return the NEVER_TAG. */ tag_advance_grant_t tag_advance_grant_if_safe(scheduling_node_t* e); @@ -284,7 +279,7 @@ void update_min_delays_upstream(scheduling_node_t* node); * `num_all_downstreams`. These fields will be updated only if they have not been previously updated * or if invalidate_min_delays_upstream has been called since they were last updated. * @param node The node. -*/ + */ void update_all_downstreams(scheduling_node_t* node); /** @@ -292,20 +287,20 @@ void update_all_downstreams(scheduling_node_t* node); * minimum_delay cannot be NEVER. * @param received_tag * @param minimum_delay -*/ + */ tag_t get_DNET_candidate(tag_t received_tag, tag_t minimum_delay); /** * FIXME: Add this function to rti_local either. * @param e The target node. * @param tag The downstream next event tag for e. -*/ -void send_downstream_next_event_tag(scheduling_node_t *e, tag_t tag); + */ +void send_downstream_next_event_tag(scheduling_node_t* e, tag_t tag); /** * @param node * @param new_NET -*/ + */ void send_downstream_next_event_tag_if_needed(scheduling_node_t* node, uint16_t new_NET_source_federate_id); /** diff --git a/core/federated/RTI/rti_local.c b/core/federated/RTI/rti_local.c index 3f4a30b85..416c81557 100644 --- a/core/federated/RTI/rti_local.c +++ b/core/federated/RTI/rti_local.c @@ -6,7 +6,7 @@ * @author Soroush Bateni (soroush@utdallas.edu) * @copyright (c) 2020-2023, The University of California at Berkeley * License in [BSD 2-clause](https://github.com/lf-lang/reactor-c/blob/main/LICENSE.md) - * + * * This file implements the enclave coordination logic. * Here we are dealing with multiple mutexes. To avoid deadlocking we follow the * following rules: @@ -26,144 +26,145 @@ #include "util.h" #include "platform.h" #include "environment.h" -#include "trace.h" +#include "tracepoint.h" #include "reactor.h" // Static global pointer to the RTI object. -static rti_local_t * rti_local; +static rti_local_t* rti_local; // The RTI mutex. A pointer to this mutex will be put on the rti_local struct lf_mutex_t rti_mutex; -void initialize_local_rti(environment_t *envs, int num_envs) { - rti_local = (rti_local_t*)calloc(1, sizeof(rti_local_t)); - LF_ASSERT_NON_NULL(rti_local); - - initialize_rti_common(&rti_local->base); - LF_MUTEX_INIT(&rti_mutex); - rti_local->base.mutex = &rti_mutex; - rti_local->base.number_of_scheduling_nodes = num_envs; - rti_local->base.tracing_enabled = (envs[0].trace != NULL); - - // Allocate memory for the enclave_info objects - rti_local->base.scheduling_nodes = (scheduling_node_t**)calloc(num_envs, sizeof(scheduling_node_t*)); - for (int i = 0; i < num_envs; i++) { - enclave_info_t *enclave_info = (enclave_info_t *) calloc(1, sizeof(enclave_info_t)); - initialize_enclave_info(enclave_info, i, &envs[i]); - rti_local->base.scheduling_nodes[i] = (scheduling_node_t *) enclave_info; - - // Encode the connection topology into the enclave_info object. - enclave_info->base.num_immediate_downstreams = _lf_get_downstream_of(i, &enclave_info->base.immediate_downstreams); - enclave_info->base.num_immediate_upstreams = _lf_get_upstream_of(i, &enclave_info->base.immediate_upstreams); - _lf_get_upstream_delay_of(i, &enclave_info->base.immediate_upstream_delays); - - enclave_info->base.state = GRANTED; - } +void initialize_local_rti(environment_t* envs, int num_envs) { + rti_local = (rti_local_t*)calloc(1, sizeof(rti_local_t)); + LF_ASSERT_NON_NULL(rti_local); + + initialize_rti_common(&rti_local->base); + LF_MUTEX_INIT(&rti_mutex); + rti_local->base.mutex = &rti_mutex; + rti_local->base.number_of_scheduling_nodes = num_envs; + rti_local->base.tracing_enabled = (envs[0].trace != NULL); + + // Allocate memory for the enclave_info objects + rti_local->base.scheduling_nodes = (scheduling_node_t**)calloc(num_envs, sizeof(scheduling_node_t*)); + for (int i = 0; i < num_envs; i++) { + enclave_info_t* enclave_info = (enclave_info_t*)calloc(1, sizeof(enclave_info_t)); + initialize_enclave_info(enclave_info, i, &envs[i]); + rti_local->base.scheduling_nodes[i] = (scheduling_node_t*)enclave_info; + + // Encode the connection topology into the enclave_info object. + enclave_info->base.num_immediate_downstreams = _lf_get_downstream_of(i, &enclave_info->base.immediate_downstreams); + enclave_info->base.num_immediate_upstreams = _lf_get_upstream_of(i, &enclave_info->base.immediate_upstreams); + _lf_get_upstream_delay_of(i, &enclave_info->base.immediate_upstream_delays); + + enclave_info->base.state = GRANTED; + } } void free_local_rti() { - free_scheduling_nodes(rti_local->base.scheduling_nodes, rti_local->base.number_of_scheduling_nodes); - free(rti_local); + free_scheduling_nodes(rti_local->base.scheduling_nodes, rti_local->base.number_of_scheduling_nodes); + free(rti_local); } -void initialize_enclave_info(enclave_info_t* enclave, int idx, environment_t * env) { - initialize_scheduling_node(&enclave->base, idx); +void initialize_enclave_info(enclave_info_t* enclave, int idx, environment_t* env) { + initialize_scheduling_node(&enclave->base, idx); + + env->enclave_info = enclave; + enclave->env = env; - env->enclave_info = enclave; - enclave->env = env; - - // Initialize the next event condition variable. - LF_COND_INIT(&enclave->next_event_condition, &rti_mutex); + // Initialize the next event condition variable. + LF_COND_INIT(&enclave->next_event_condition, &rti_mutex); } tag_t rti_next_event_tag_locked(enclave_info_t* e, tag_t next_event_tag) { - LF_PRINT_LOG("RTI: enclave %u sends NET of " PRINTF_TAG " ", - e->base.id, next_event_tag.time - lf_time_start(), next_event_tag.microstep); - - // Return early if there are only a single enclave in the program. - if (rti_local->base.number_of_scheduling_nodes == 1) { - return next_event_tag; - } - // This is called from a critical section within the source enclave. Leave - // this critical section and acquire the RTI mutex. - LF_MUTEX_UNLOCK(&e->env->mutex); - LF_MUTEX_LOCK(rti_local->base.mutex); - tracepoint_federate_to_rti(e->env->trace, send_NET, e->base.id, &next_event_tag); - // First, update the enclave data structure to record this next_event_tag, - // and notify any downstream scheduling_nodes, and unblock them if appropriate. - tag_advance_grant_t result; - - tag_t previous_tag = e->base.last_granted; - tag_t previous_ptag = e->base.last_provisionally_granted; - - update_scheduling_node_next_event_tag_locked(&e->base, next_event_tag); - - // Return early if we already have been granted past the NET. - if (lf_tag_compare(e->base.last_granted, next_event_tag) >= 0) { - LF_PRINT_LOG("RTI: enclave %u has already been granted a TAG to" PRINTF_TAG ". Returning with a TAG to" PRINTF_TAG " ", - e->base.id, e->base.last_granted.time - lf_time_start(), e->base.last_granted.microstep, - next_event_tag.time - lf_time_start(), next_event_tag.microstep); - tracepoint_federate_from_rti(e->env->trace, receive_TAG, e->base.id, &next_event_tag); - // Release RTI mutex and re-enter the critical section of the source enclave before returning. - LF_MUTEX_UNLOCK(rti_local->base.mutex); - LF_MUTEX_LOCK(&e->env->mutex); - return next_event_tag; - } - - // If this enclave has no upstream, then we give a TAG till forever straight away. - if (e->base.num_immediate_upstreams == 0) { - LF_PRINT_LOG("RTI: enclave %u has no upstream. Giving it a to the NET", e->base.id); - e->base.last_granted = next_event_tag; - } - - while(true) { - // Determine whether the above call notified a TAG. - // If so, return that value. Note that we dont care about PTAGs as we - // have disallowed zero-delay enclave loops. - if (lf_tag_compare(previous_tag, e->base.last_granted) < 0) { - result.tag = e->base.last_granted; - result.is_provisional = false; - break; - } - // If not, block. - LF_PRINT_LOG("RTI: enclave %u sleeps waiting for TAG to" PRINTF_TAG " ", - e->base.id, e->base.next_event.time - lf_time_start(), e->base.next_event.microstep); - LF_ASSERT(lf_cond_wait(&e->next_event_condition) == 0, "Could not wait for cond var"); - } - - // At this point we have gotten a new TAG. - LF_PRINT_LOG("RTI: enclave %u returns with TAG to" PRINTF_TAG " ", - e->base.id, e->base.next_event.time - lf_time_start(), e->base.next_event.microstep); - tracepoint_federate_from_rti(e->env->trace, receive_TAG, e->base.id, &result.tag); - // Release RTI mutex and re-enter the critical section of the source enclave. + LF_PRINT_LOG("RTI: enclave %u sends NET of " PRINTF_TAG " ", e->base.id, next_event_tag.time - lf_time_start(), + next_event_tag.microstep); + + // Return early if there are only a single enclave in the program. + if (rti_local->base.number_of_scheduling_nodes == 1) { + return next_event_tag; + } + // This is called from a critical section within the source enclave. Leave + // this critical section and acquire the RTI mutex. + LF_MUTEX_UNLOCK(&e->env->mutex); + LF_MUTEX_LOCK(rti_local->base.mutex); + tracepoint_federate_to_rti(send_NET, e->base.id, &next_event_tag); + // First, update the enclave data structure to record this next_event_tag, + // and notify any downstream scheduling_nodes, and unblock them if appropriate. + tag_advance_grant_t result; + + tag_t previous_tag = e->base.last_granted; + tag_t previous_ptag = e->base.last_provisionally_granted; + + update_scheduling_node_next_event_tag_locked(&e->base, next_event_tag); + + // Return early if we already have been granted past the NET. + if (lf_tag_compare(e->base.last_granted, next_event_tag) >= 0) { + LF_PRINT_LOG("RTI: enclave %u has already been granted a TAG to" PRINTF_TAG ". Returning with a TAG to" PRINTF_TAG + " ", + e->base.id, e->base.last_granted.time - lf_time_start(), e->base.last_granted.microstep, + next_event_tag.time - lf_time_start(), next_event_tag.microstep); + tracepoint_federate_from_rti(receive_TAG, e->base.id, &next_event_tag); + // Release RTI mutex and re-enter the critical section of the source enclave before returning. LF_MUTEX_UNLOCK(rti_local->base.mutex); LF_MUTEX_LOCK(&e->env->mutex); - return result.tag; + return next_event_tag; + } + + // If this enclave has no upstream, then we give a TAG till forever straight away. + if (e->base.num_immediate_upstreams == 0) { + LF_PRINT_LOG("RTI: enclave %u has no upstream. Giving it a to the NET", e->base.id); + e->base.last_granted = next_event_tag; + } + + while (true) { + // Determine whether the above call notified a TAG. + // If so, return that value. Note that we dont care about PTAGs as we + // have disallowed zero-delay enclave loops. + if (lf_tag_compare(previous_tag, e->base.last_granted) < 0) { + result.tag = e->base.last_granted; + result.is_provisional = false; + break; + } + // If not, block. + LF_PRINT_LOG("RTI: enclave %u sleeps waiting for TAG to" PRINTF_TAG " ", e->base.id, + e->base.next_event.time - lf_time_start(), e->base.next_event.microstep); + LF_ASSERT(lf_cond_wait(&e->next_event_condition) == 0, "Could not wait for cond var"); + } + + // At this point we have gotten a new TAG. + LF_PRINT_LOG("RTI: enclave %u returns with TAG to" PRINTF_TAG " ", e->base.id, + e->base.next_event.time - lf_time_start(), e->base.next_event.microstep); + tracepoint_federate_from_rti(receive_TAG, e->base.id, &result.tag); + // Release RTI mutex and re-enter the critical section of the source enclave. + LF_MUTEX_UNLOCK(rti_local->base.mutex); + LF_MUTEX_LOCK(&e->env->mutex); + return result.tag; } void rti_logical_tag_complete_locked(enclave_info_t* enclave, tag_t completed) { - if (rti_local->base.number_of_scheduling_nodes == 1) { - return; - } - // Release the enclave mutex while doing the local RTI work. - LF_MUTEX_UNLOCK(&enclave->env->mutex); - tracepoint_federate_to_rti(enclave->env->trace, send_LTC, enclave->base.id, &completed); - _logical_tag_complete(&enclave->base, completed); - // Acquire the enclave mutex again before returning. - LF_MUTEX_LOCK(&enclave->env->mutex); + if (rti_local->base.number_of_scheduling_nodes == 1) { + return; + } + // Release the enclave mutex while doing the local RTI work. + LF_MUTEX_UNLOCK(&enclave->env->mutex); + tracepoint_federate_to_rti(send_LTC, enclave->base.id, &completed); + _logical_tag_complete(&enclave->base, completed); + // Acquire the enclave mutex again before returning. + LF_MUTEX_LOCK(&enclave->env->mutex); } -void rti_update_other_net_locked(enclave_info_t* src, enclave_info_t * target, tag_t net) { - // Here we do NOT leave the critical section of the target enclave before we - // acquire the RTI mutex. This means that we cannot block within this function. - LF_MUTEX_LOCK(rti_local->base.mutex); - tracepoint_federate_to_federate(src->env->trace, send_TAGGED_MSG, src->base.id, target->base.id, &net); - - // If our proposed NET is less than the current NET, update it. - if (lf_tag_compare(net, target->base.next_event) < 0) { - target->base.next_event = net; - } - LF_MUTEX_UNLOCK(rti_local->base.mutex); +void rti_update_other_net_locked(enclave_info_t* src, enclave_info_t* target, tag_t net) { + // Here we do NOT leave the critical section of the target enclave before we + // acquire the RTI mutex. This means that we cannot block within this function. + LF_MUTEX_LOCK(rti_local->base.mutex); + tracepoint_federate_to_federate(send_TAGGED_MSG, src->base.id, target->base.id, &net); + + // If our proposed NET is less than the current NET, update it. + if (lf_tag_compare(net, target->base.next_event) < 0) { + target->base.next_event = net; + } + LF_MUTEX_UNLOCK(rti_local->base.mutex); } /////////////////////////////////////////////////////////////////////////////// @@ -171,30 +172,27 @@ void rti_update_other_net_locked(enclave_info_t* src, enclave_info_t * target, t /////////////////////////////////////////////////////////////////////////////// void notify_tag_advance_grant(scheduling_node_t* e, tag_t tag) { - if (e->state == NOT_CONNECTED - || lf_tag_compare(tag, e->last_granted) <= 0 - || lf_tag_compare(tag, e->last_provisionally_granted) < 0 - ) { - return; - } - if (rti_local->base.tracing_enabled) { - tracepoint_rti_to_federate(e->env->trace, send_TAG, e->id, &tag); - } - e->last_granted = tag; - // TODO: Here we can consider adding a flag to the RTI struct and only signal the cond var if we have - // sleeping enclaves. - LF_ASSERT(lf_cond_signal(&((enclave_info_t *)e)->next_event_condition) == 0, "Could not signal cond var"); + if (e->state == NOT_CONNECTED || lf_tag_compare(tag, e->last_granted) <= 0 || + lf_tag_compare(tag, e->last_provisionally_granted) < 0) { + return; + } + if (rti_local->base.tracing_enabled) { + tracepoint_rti_to_federate(send_TAG, e->id, &tag); + } + e->last_granted = tag; + // TODO: Here we can consider adding a flag to the RTI struct and only signal the cond var if we have + // sleeping enclaves. + LF_ASSERT(lf_cond_signal(&((enclave_info_t*)e)->next_event_condition) == 0, "Could not signal cond var"); } // We currently ignore the PTAGs, because they are only relevant with zero // delay enclave loops. void notify_provisional_tag_advance_grant(scheduling_node_t* e, tag_t tag) { - LF_PRINT_LOG("RTI: enclave %u callback with PTAG " PRINTF_TAG " ", - e->id, tag.time - lf_time_start(), tag.microstep); + LF_PRINT_LOG("RTI: enclave %u callback with PTAG " PRINTF_TAG " ", e->id, tag.time - lf_time_start(), tag.microstep); } void free_scheduling_nodes(scheduling_node_t** scheduling_nodes, uint16_t number_of_scheduling_nodes) { - // Nothing to do here. + // Nothing to do here. } -#endif //LF_ENCLAVES +#endif // LF_ENCLAVES diff --git a/core/federated/RTI/rti_local.h b/core/federated/RTI/rti_local.h index 30c255c42..0d3eef96c 100644 --- a/core/federated/RTI/rti_local.h +++ b/core/federated/RTI/rti_local.h @@ -6,9 +6,9 @@ * @author Soroush Bateni (soroush@utdallas.edu) * @copyright (c) 2020-2024, The University of California at Berkeley * License in [BSD 2-clause](https://github.com/lf-lang/reactor-c/blob/main/LICENSE.md) - * + * * @brief This file declares functions used to implement scheduling enclaves. - * + * * A scheduling enclave is portion of the runtime system that maintains its own event * and reaction queues and has its own scheduler. It uses a local runtime infrastructure (RTI) * to coordinate the advancement of tags across enclaves. @@ -19,27 +19,26 @@ #ifdef LF_ENCLAVES - #include "lf_types.h" #include "rti_common.h" /** * @brief Structure holding information about each enclave in the program. - * + * * The first field is the generic scheduling_node_info struct */ typedef struct enclave_info_t { - scheduling_node_t base; - environment_t * env; // A pointer to the environment of the enclave - lf_cond_t next_event_condition; // Condition variable used by scheduling_nodes to notify an enclave - // that it's call to next_event_tag() should unblock. + scheduling_node_t base; + environment_t* env; // A pointer to the environment of the enclave + lf_cond_t next_event_condition; // Condition variable used by scheduling_nodes to notify an enclave + // that it's call to next_event_tag() should unblock. } enclave_info_t; /** * @brief Structure holding information about the local RTI */ typedef struct { - rti_common_t base; + rti_common_t base; } rti_local_t; /** @@ -60,37 +59,37 @@ void free_local_rti(); * @param idx The index of the enclave. * @param env The environment of the enclave. */ -void initialize_enclave_info(enclave_info_t* enclave, int idx, environment_t *env); +void initialize_enclave_info(enclave_info_t* enclave, int idx, environment_t* env); /** * @brief Notify the local RTI of a next event tag (NET). - * - * This function call may block. A call to this function serves two purposes. + * + * This function call may block. A call to this function serves two purposes. * 1) It is a promise that, unless receiving events from other enclaves, this * enclave will not produce any event until the next_event_tag (NET) argument. * 2) It is a request for permission to advance the logical tag of the enclave * until the NET. - * + * * This function call will block until the enclave has been granted a TAG, * which might not be the tag requested. - * + * * This assumes the caller is holding the environment mutex of the source enclave. - * + * * @param enclave The enclave requesting to advance to the NET. * @param next_event_tag The tag of the next event in the enclave - * @return tag_t A tag which the enclave can safely advance its time to. It + * @return tag_t A tag which the enclave can safely advance its time to. It * might be smaller than the requested tag. */ tag_t rti_next_event_tag_locked(enclave_info_t* enclave, tag_t next_event_tag); /** * @brief Inform the local RTI that `enclave` has completed tag `completed`. - * + * * This will update the data structures and can release other * enclaves waiting on a TAG. - * + * * This assumes the caller is holding the environment mutex of the source enclave. - * + * * @param enclave The enclave * @param completed The tag just completed by the enclave. */ @@ -98,13 +97,13 @@ void rti_logical_tag_complete_locked(enclave_info_t* enclave, tag_t completed); /** * @brief Notify the local RTI to update the next event tag (NET) of a target enclave. - * - * This function is called after scheduling an event onto the event queue of another enclave. + * + * This function is called after scheduling an event onto the event queue of another enclave. * The source enclave must call this function to potentially update - * the NET of the target enclave. - * + * the NET of the target enclave. + * * This assumes the caller is holding the environment mutex of the target enclave. - * + * * @param src The enclave that has scheduled an event. * @param target The enclave of which we want to update the NET of. * @param net The proposed next event tag. @@ -113,10 +112,10 @@ void rti_update_other_net_locked(enclave_info_t* src, enclave_info_t* target, ta /** * @brief Get the array of ids of enclaves directly upstream of the specified enclave. - * + * * This updates the specified result pointer to point to a statically allocated array of IDs * and returns the length of the array. The implementation is code-generated. - * + * * @param enclave_id The enclave for which to report upstream IDs. * @param result The pointer to dereference and update to point to the resulting array. * @return The number of direct upstream enclaves. @@ -125,10 +124,10 @@ int lf_get_upstream_of(int enclave_id, int** result); /** * @brief Get the array of ids of enclaves directly downstream of the specified enclave. - * + * * This updates the specified result pointer to point to a statically allocated array of IDs * and returns the length of the array. The implementation is code-generated. - * + * * @param enclave_id The enclave for which to report downstream IDs. * @param result The pointer to dereference and update to point to the resulting array. * @return The number of direct downstream enclaves. @@ -137,10 +136,10 @@ int lf_get_downstream_of(int enclave_id, int** result); /** * @brief Retrieve the delays on the connections to direct upstream enclaves. - * + * * This updates the result pointer to point to a statically allocated array of delays. * The implementation is code-generated. - * + * * @param enclave_id The enclave for which to search for upstream delays. * @param result The pointer to dereference and update to point to the resulting array. * @return int The number of direct upstream enclaves. diff --git a/core/federated/RTI/rti_remote.c b/core/federated/RTI/rti_remote.c index efbfc045b..69eabbde6 100644 --- a/core/federated/RTI/rti_remote.c +++ b/core/federated/RTI/rti_remote.c @@ -36,25 +36,21 @@ extern instant_t start_time; /** * Local reference to the rti_remote object */ -static rti_remote_t *rti_remote; +static rti_remote_t* rti_remote; bool _lf_federate_reports_error = false; // A convenient macro for getting the `federate_info_t *` at index `_idx` // and casting it. -#define GET_FED_INFO(_idx) (federate_info_t *)rti_remote->base.scheduling_nodes[_idx] +#define GET_FED_INFO(_idx) (federate_info_t*)rti_remote->base.scheduling_nodes[_idx] lf_mutex_t rti_mutex; lf_cond_t received_start_times; lf_cond_t sent_start_time; -extern int lf_critical_section_enter(environment_t *env) { - return lf_mutex_lock(&rti_mutex); -} +extern int lf_critical_section_enter(environment_t* env) { return lf_mutex_lock(&rti_mutex); } -extern int lf_critical_section_exit(environment_t *env) { - return lf_mutex_unlock(&rti_mutex); -} +extern int lf_critical_section_exit(environment_t* env) { return lf_mutex_unlock(&rti_mutex); } /** * Create a server and enable listening for socket connections. @@ -72,516 +68,481 @@ extern int lf_critical_section_exit(environment_t *env) { * @return The socket descriptor on which to accept connections. */ static int create_rti_server(uint16_t port, socket_type_t socket_type) { - // Timeout time for the communications of the server - struct timeval timeout_time = { - .tv_sec = TCP_TIMEOUT_TIME / BILLION, - .tv_usec = (TCP_TIMEOUT_TIME % BILLION) / 1000 - }; - // Create an IPv4 socket for TCP (not UDP) communication over IP (0). - int socket_descriptor = -1; - if (socket_type == TCP) { - socket_descriptor = create_real_time_tcp_socket_errexit(); - } else if (socket_type == UDP) { - socket_descriptor = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); - // Set the appropriate timeout time - timeout_time = (struct timeval){ - .tv_sec = UDP_TIMEOUT_TIME / BILLION, - .tv_usec = (UDP_TIMEOUT_TIME % BILLION) / 1000 - }; - } - if (socket_descriptor < 0) { - lf_print_error_system_failure("Failed to create RTI socket."); - } - - // Set the option for this socket to reuse the same address - int true_variable = 1; // setsockopt() requires a reference to the value assigned to an option - if (setsockopt( - socket_descriptor, - SOL_SOCKET, - SO_REUSEADDR, - &true_variable, - sizeof(int32_t)) < 0) { - lf_print_error("RTI failed to set SO_REUSEADDR option on the socket: %s.", strerror(errno)); - } - // Set the timeout on the socket so that read and write operations don't block for too long - if (setsockopt( - socket_descriptor, - SOL_SOCKET, - SO_RCVTIMEO, - (const char *)&timeout_time, - sizeof(timeout_time)) < 0) { - lf_print_error("RTI failed to set SO_RCVTIMEO option on the socket: %s.", strerror(errno)); - } - if (setsockopt( - socket_descriptor, - SOL_SOCKET, - SO_SNDTIMEO, - (const char *)&timeout_time, - sizeof(timeout_time)) < 0) { - lf_print_error("RTI failed to set SO_SNDTIMEO option on the socket: %s.", strerror(errno)); - } - - /* - * The following used to permit reuse of a port that an RTI has previously - * used that has not been released. We no longer do this, and instead retry - * some number of times after waiting. - - // SO_REUSEPORT (since Linux 3.9) - // Permits multiple AF_INET or AF_INET6 sockets to be bound to an - // identical socket address. This option must be set on each - // socket (including the first socket) prior to calling bind(2) - // on the socket. To prevent port hijacking, all of the - // processes binding to the same address must have the same - // effective UID. This option can be employed with both TCP and - // UDP sockets. - - int reuse = 1; - #ifdef SO_REUSEPORT - if (setsockopt(socket_descriptor, SOL_SOCKET, SO_REUSEPORT, - (const char*)&reuse, sizeof(reuse)) < 0) { - perror("setsockopt(SO_REUSEPORT) failed"); - } - #endif - */ - - // Server file descriptor. - struct sockaddr_in server_fd; - // Zero out the server address structure. - bzero((char *)&server_fd, sizeof(server_fd)); - - uint16_t specified_port = port; - if (specified_port == 0) port = DEFAULT_PORT; - - server_fd.sin_family = AF_INET; // IPv4 - server_fd.sin_addr.s_addr = INADDR_ANY; // All interfaces, 0.0.0.0. - // Convert the port number from host byte order to network byte order. - server_fd.sin_port = htons(port); - - int result = bind( - socket_descriptor, - (struct sockaddr *)&server_fd, - sizeof(server_fd)); - - // Try repeatedly to bind to a port. If no specific port is specified, then - // increment the port number each time. - - int count = 1; - while (result != 0 && count++ < PORT_BIND_RETRY_LIMIT) { - if (specified_port == 0) { - lf_print_warning("RTI failed to get port %d.", port); - port++; - if (port >= DEFAULT_PORT + MAX_NUM_PORT_ADDRESSES) port = DEFAULT_PORT; - lf_print_warning("RTI will try again with port %d.", port); - server_fd.sin_port = htons(port); - // Do not sleep. - } else { - lf_print("RTI failed to get port %d. Will try again.", port); - lf_sleep(PORT_BIND_RETRY_INTERVAL); - } - result = bind( - socket_descriptor, - (struct sockaddr *)&server_fd, - sizeof(server_fd)); - } - if (result != 0) { - lf_print_error_and_exit("Failed to bind the RTI socket. Port %d is not available. ", port); - } - char *type = "TCP"; - if (socket_type == UDP) { - type = "UDP"; - } - lf_print("RTI using %s port %d for federation %s.", type, port, rti_remote->federation_id); - - if (socket_type == TCP) { - rti_remote->final_port_TCP = port; - // Enable listening for socket connections. - // The second argument is the maximum number of queued socket requests, - // which according to the Mac man page is limited to 128. - listen(socket_descriptor, 128); - } else if (socket_type == UDP) { - rti_remote->final_port_UDP = port; - // No need to listen on the UDP socket - } - - return socket_descriptor; -} - -void notify_tag_advance_grant(scheduling_node_t *e, tag_t tag) { - if (e->state == NOT_CONNECTED - || lf_tag_compare(tag, e->last_granted) <= 0 - || lf_tag_compare(tag, e->last_provisionally_granted) < 0) { - return; - } - // Need to make sure that the destination federate's thread has already - // sent the starting MSG_TYPE_TIMESTAMP message. - while (e->state == PENDING) { - // Need to wait here. - lf_cond_wait(&sent_start_time); - } - size_t message_length = 1 + sizeof(int64_t) + sizeof(uint32_t); - unsigned char buffer[message_length]; - buffer[0] = MSG_TYPE_TAG_ADVANCE_GRANT; - encode_int64(tag.time, &(buffer[1])); - encode_int32((int32_t)tag.microstep, &(buffer[1 + sizeof(int64_t)])); - - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_to_federate(rti_remote->base.trace, send_TAG, e->id, &tag); - } - // This function is called in notify_advance_grant_if_safe(), which is a long - // function. During this call, the socket might close, causing the following write_to_socket - // to fail. Consider a failure here a soft failure and update the federate's status. - if (write_to_socket(((federate_info_t *)e)->socket, message_length, buffer)) { - lf_print_error("RTI failed to send tag advance grant to federate %d.", e->id); - e->state = NOT_CONNECTED; + // Timeout time for the communications of the server + struct timeval timeout_time = {.tv_sec = TCP_TIMEOUT_TIME / BILLION, .tv_usec = (TCP_TIMEOUT_TIME % BILLION) / 1000}; + // Create an IPv4 socket for TCP (not UDP) communication over IP (0). + int socket_descriptor = -1; + if (socket_type == TCP) { + socket_descriptor = create_real_time_tcp_socket_errexit(); + } else if (socket_type == UDP) { + socket_descriptor = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); + // Set the appropriate timeout time + timeout_time = + (struct timeval){.tv_sec = UDP_TIMEOUT_TIME / BILLION, .tv_usec = (UDP_TIMEOUT_TIME % BILLION) / 1000}; + } + if (socket_descriptor < 0) { + lf_print_error_system_failure("Failed to create RTI socket."); + } + + // Set the option for this socket to reuse the same address + int true_variable = 1; // setsockopt() requires a reference to the value assigned to an option + if (setsockopt(socket_descriptor, SOL_SOCKET, SO_REUSEADDR, &true_variable, sizeof(int32_t)) < 0) { + lf_print_error("RTI failed to set SO_REUSEADDR option on the socket: %s.", strerror(errno)); + } + // Set the timeout on the socket so that read and write operations don't block for too long + if (setsockopt(socket_descriptor, SOL_SOCKET, SO_RCVTIMEO, (const char*)&timeout_time, sizeof(timeout_time)) < 0) { + lf_print_error("RTI failed to set SO_RCVTIMEO option on the socket: %s.", strerror(errno)); + } + if (setsockopt(socket_descriptor, SOL_SOCKET, SO_SNDTIMEO, (const char*)&timeout_time, sizeof(timeout_time)) < 0) { + lf_print_error("RTI failed to set SO_SNDTIMEO option on the socket: %s.", strerror(errno)); + } + + /* + * The following used to permit reuse of a port that an RTI has previously + * used that has not been released. We no longer do this, and instead retry + * some number of times after waiting. + + // SO_REUSEPORT (since Linux 3.9) + // Permits multiple AF_INET or AF_INET6 sockets to be bound to an + // identical socket address. This option must be set on each + // socket (including the first socket) prior to calling bind(2) + // on the socket. To prevent port hijacking, all of the + // processes binding to the same address must have the same + // effective UID. This option can be employed with both TCP and + // UDP sockets. + + int reuse = 1; + #ifdef SO_REUSEPORT + if (setsockopt(socket_descriptor, SOL_SOCKET, SO_REUSEPORT, + (const char*)&reuse, sizeof(reuse)) < 0) { + perror("setsockopt(SO_REUSEPORT) failed"); + } + #endif + */ + + // Server file descriptor. + struct sockaddr_in server_fd; + // Zero out the server address structure. + bzero((char*)&server_fd, sizeof(server_fd)); + + uint16_t specified_port = port; + if (specified_port == 0) + port = DEFAULT_PORT; + + server_fd.sin_family = AF_INET; // IPv4 + server_fd.sin_addr.s_addr = INADDR_ANY; // All interfaces, 0.0.0.0. + // Convert the port number from host byte order to network byte order. + server_fd.sin_port = htons(port); + + int result = bind(socket_descriptor, (struct sockaddr*)&server_fd, sizeof(server_fd)); + + // Try repeatedly to bind to a port. If no specific port is specified, then + // increment the port number each time. + + int count = 1; + while (result != 0 && count++ < PORT_BIND_RETRY_LIMIT) { + if (specified_port == 0) { + lf_print_warning("RTI failed to get port %d.", port); + port++; + if (port >= DEFAULT_PORT + MAX_NUM_PORT_ADDRESSES) + port = DEFAULT_PORT; + lf_print_warning("RTI will try again with port %d.", port); + server_fd.sin_port = htons(port); + // Do not sleep. } else { - e->last_granted = tag; - LF_PRINT_LOG("RTI sent to federate %d the tag advance grant (TAG) " PRINTF_TAG ".", - e->id, tag.time - start_time, tag.microstep); - } + lf_print("RTI failed to get port %d. Will try again.", port); + lf_sleep(PORT_BIND_RETRY_INTERVAL); + } + result = bind(socket_descriptor, (struct sockaddr*)&server_fd, sizeof(server_fd)); + } + if (result != 0) { + lf_print_error_and_exit("Failed to bind the RTI socket. Port %d is not available. ", port); + } + char* type = "TCP"; + if (socket_type == UDP) { + type = "UDP"; + } + lf_print("RTI using %s port %d for federation %s.", type, port, rti_remote->federation_id); + + if (socket_type == TCP) { + rti_remote->final_port_TCP = port; + // Enable listening for socket connections. + // The second argument is the maximum number of queued socket requests, + // which according to the Mac man page is limited to 128. + listen(socket_descriptor, 128); + } else if (socket_type == UDP) { + rti_remote->final_port_UDP = port; + // No need to listen on the UDP socket + } + + return socket_descriptor; } -void notify_provisional_tag_advance_grant(scheduling_node_t *e, tag_t tag) { - if (e->state == NOT_CONNECTED - || lf_tag_compare(tag, e->last_granted) <= 0 - || lf_tag_compare(tag, e->last_provisionally_granted) <= 0) { - return; - } - // Need to make sure that the destination federate's thread has already - // sent the starting MSG_TYPE_TIMESTAMP message. - while (e->state == PENDING) { - // Need to wait here. - lf_cond_wait(&sent_start_time); - } - size_t message_length = 1 + sizeof(int64_t) + sizeof(uint32_t); - unsigned char buffer[message_length]; - buffer[0] = MSG_TYPE_PROVISIONAL_TAG_ADVANCE_GRANT; - encode_int64(tag.time, &(buffer[1])); - encode_int32((int32_t)tag.microstep, &(buffer[1 + sizeof(int64_t)])); +void notify_tag_advance_grant(scheduling_node_t* e, tag_t tag) { + if (e->state == NOT_CONNECTED || lf_tag_compare(tag, e->last_granted) <= 0 || + lf_tag_compare(tag, e->last_provisionally_granted) < 0) { + return; + } + // Need to make sure that the destination federate's thread has already + // sent the starting MSG_TYPE_TIMESTAMP message. + while (e->state == PENDING) { + // Need to wait here. + lf_cond_wait(&sent_start_time); + } + size_t message_length = 1 + sizeof(int64_t) + sizeof(uint32_t); + unsigned char buffer[message_length]; + buffer[0] = MSG_TYPE_TAG_ADVANCE_GRANT; + encode_int64(tag.time, &(buffer[1])); + encode_int32((int32_t)tag.microstep, &(buffer[1 + sizeof(int64_t)])); + + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_to_federate(send_TAG, e->id, &tag); + } + // This function is called in notify_advance_grant_if_safe(), which is a long + // function. During this call, the socket might close, causing the following write_to_socket + // to fail. Consider a failure here a soft failure and update the federate's status. + if (write_to_socket(((federate_info_t*)e)->socket, message_length, buffer)) { + lf_print_error("RTI failed to send tag advance grant to federate %d.", e->id); + e->state = NOT_CONNECTED; + } else { + e->last_granted = tag; + LF_PRINT_LOG("RTI sent to federate %d the tag advance grant (TAG) " PRINTF_TAG ".", e->id, tag.time - start_time, + tag.microstep); + } +} - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_to_federate(rti_remote->base.trace, send_PTAG, e->id, &tag); - } - // This function is called in notify_advance_grant_if_safe(), which is a long - // function. During this call, the socket might close, causing the following write_to_socket - // to fail. Consider a failure here a soft failure and update the federate's status. - if (write_to_socket(((federate_info_t *)e)->socket, message_length, buffer)) { - lf_print_error("RTI failed to send tag advance grant to federate %d.", e->id); - e->state = NOT_CONNECTED; - } else { - e->last_provisionally_granted = tag; - LF_PRINT_LOG("RTI sent to federate %d the Provisional Tag Advance Grant (PTAG) " PRINTF_TAG ".", - e->id, tag.time - start_time, tag.microstep); - - // Send PTAG to all upstream federates, if they have not had - // a later or equal PTAG or TAG sent previously and if their transitive - // NET is greater than or equal to the tag. - // This is needed to stimulate absent messages from upstream and break deadlocks. - // The scenario this deals with is illustrated in `test/C/src/federated/FeedbackDelay2.lf` - // and `test/C/src/federated/FeedbackDelay4.lf`. - // Note that this is transitive. - // NOTE: This is not needed for enclaves because zero-delay loops are prohibited. - // It's only needed for federates, which is why this is implemented here. - for (int j = 0; j < e->num_immediate_upstreams; j++) { - scheduling_node_t *upstream = rti_remote->base.scheduling_nodes[e->immediate_upstreams[j]]; - - // Ignore this federate if it has resigned. - if (upstream->state == NOT_CONNECTED) - continue; - - tag_t earliest = earliest_future_incoming_message_tag(upstream); - tag_t strict_earliest = eimt_strict(upstream); // Non-ZDC version. - - // If these tags are equal, then a TAG or PTAG should have already been granted, - // in which case, another will not be sent. But it may not have been already granted. - if (lf_tag_compare(earliest, tag) > 0) { - notify_tag_advance_grant(upstream, tag); - } else if (lf_tag_compare(earliest, tag) == 0 && lf_tag_compare(strict_earliest, tag) > 0) { - notify_provisional_tag_advance_grant(upstream, tag); - } - } - } +void notify_provisional_tag_advance_grant(scheduling_node_t* e, tag_t tag) { + if (e->state == NOT_CONNECTED || lf_tag_compare(tag, e->last_granted) <= 0 || + lf_tag_compare(tag, e->last_provisionally_granted) <= 0) { + return; + } + // Need to make sure that the destination federate's thread has already + // sent the starting MSG_TYPE_TIMESTAMP message. + while (e->state == PENDING) { + // Need to wait here. + lf_cond_wait(&sent_start_time); + } + size_t message_length = 1 + sizeof(int64_t) + sizeof(uint32_t); + unsigned char buffer[message_length]; + buffer[0] = MSG_TYPE_PROVISIONAL_TAG_ADVANCE_GRANT; + encode_int64(tag.time, &(buffer[1])); + encode_int32((int32_t)tag.microstep, &(buffer[1 + sizeof(int64_t)])); + + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_to_federate(send_PTAG, e->id, &tag); + } + // This function is called in notify_advance_grant_if_safe(), which is a long + // function. During this call, the socket might close, causing the following write_to_socket + // to fail. Consider a failure here a soft failure and update the federate's status. + if (write_to_socket(((federate_info_t*)e)->socket, message_length, buffer)) { + lf_print_error("RTI failed to send tag advance grant to federate %d.", e->id); + e->state = NOT_CONNECTED; + } else { + e->last_provisionally_granted = tag; + LF_PRINT_LOG("RTI sent to federate %d the Provisional Tag Advance Grant (PTAG) " PRINTF_TAG ".", e->id, + tag.time - start_time, tag.microstep); + + // Send PTAG to all upstream federates, if they have not had + // a later or equal PTAG or TAG sent previously and if their transitive + // NET is greater than or equal to the tag. + // This is needed to stimulate absent messages from upstream and break deadlocks. + // The scenario this deals with is illustrated in `test/C/src/federated/FeedbackDelay2.lf` + // and `test/C/src/federated/FeedbackDelay4.lf`. + // Note that this is transitive. + // NOTE: This is not needed for enclaves because zero-delay loops are prohibited. + // It's only needed for federates, which is why this is implemented here. + for (int j = 0; j < e->num_immediate_upstreams; j++) { + scheduling_node_t* upstream = rti_remote->base.scheduling_nodes[e->immediate_upstreams[j]]; + + // Ignore this federate if it has resigned. + if (upstream->state == NOT_CONNECTED) + continue; + + tag_t earliest = earliest_future_incoming_message_tag(upstream); + tag_t strict_earliest = eimt_strict(upstream); // Non-ZDC version. + + // If these tags are equal, then a TAG or PTAG should have already been granted, + // in which case, another will not be sent. But it may not have been already granted. + if (lf_tag_compare(earliest, tag) > 0) { + notify_tag_advance_grant(upstream, tag); + } else if (lf_tag_compare(earliest, tag) == 0 && lf_tag_compare(strict_earliest, tag) > 0) { + notify_provisional_tag_advance_grant(upstream, tag); + } + } + } } -void send_downstream_next_event_tag(scheduling_node_t *e, tag_t tag) { - size_t message_length = 1 + sizeof(int64_t) + sizeof(uint32_t); - unsigned char buffer[message_length]; - buffer[0] = MSG_TYPE_DOWNSTREAM_NEXT_EVENT_TAG; - encode_int64(tag.time, &(buffer[1])); - encode_int32((int32_t)tag.microstep, &(buffer[1 + sizeof(int64_t)])); +void send_downstream_next_event_tag(scheduling_node_t* e, tag_t tag) { + size_t message_length = 1 + sizeof(int64_t) + sizeof(uint32_t); + unsigned char buffer[message_length]; + buffer[0] = MSG_TYPE_DOWNSTREAM_NEXT_EVENT_TAG; + encode_int64(tag.time, &(buffer[1])); + encode_int32((int32_t)tag.microstep, &(buffer[1 + sizeof(int64_t)])); + + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_to_federate(rti_remote->base.trace, send_DNET, e->id, &tag); + } + if (write_to_socket(((federate_info_t*)e)->socket, message_length, buffer)) { + lf_print_error("RTI failed to send downstream next event tag to federate %d.", e->id); + e->state = NOT_CONNECTED; + } else { + e->last_DNET = tag; + LF_PRINT_LOG("RTI sent to federate %d the Downstream Next Event Tag (DNET) " PRINTF_TAG ".", e->id, + tag.time - start_time, tag.microstep); + } +} - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_to_federate(rti_remote->base.trace, send_DNET, e->id, &tag); - } - if (write_to_socket(((federate_info_t *)e)->socket, message_length, buffer)) { - lf_print_error("RTI failed to send downstream next event tag to federate %d.", e->id); - e->state = NOT_CONNECTED; - } else { - e->last_DNET = tag; - LF_PRINT_LOG("RTI sent to federate %d the Downstream Next Event Tag (DNET) " PRINTF_TAG ".", - e->id, tag.time - start_time, tag.microstep); - } +void send_downstream_next_event_tag(scheduling_node_t* e, tag_t tag) { + size_t message_length = 1 + sizeof(int64_t) + sizeof(uint32_t); + unsigned char buffer[message_length]; + buffer[0] = MSG_TYPE_DOWNSTREAM_NEXT_EVENT_TAG; + encode_int64(tag.time, &(buffer[1])); + encode_int32((int32_t)tag.microstep, &(buffer[1 + sizeof(int64_t)])); + + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_to_federate(rti_remote->base.trace, send_DNET, e->id, &tag); + } + if (write_to_socket(((federate_info_t*)e)->socket, message_length, buffer)) { + lf_print_error("RTI failed to send downstream next event tag to federate %d.", e->id); + e->state = NOT_CONNECTED; + } else { + e->last_DNET = tag; + LF_PRINT_LOG("RTI sent to federate %d the Downstream Next Event Tag (DNET) " PRINTF_TAG ".", e->id, + tag.time - start_time, tag.microstep); + } } void update_federate_next_event_tag_locked(uint16_t federate_id, tag_t next_event_tag) { - federate_info_t *fed = GET_FED_INFO(federate_id); - tag_t min_in_transit_tag = pqueue_tag_peek_tag(fed->in_transit_message_tags); - if (lf_tag_compare(min_in_transit_tag, next_event_tag) < 0) { - next_event_tag = min_in_transit_tag; - } - update_scheduling_node_next_event_tag_locked(&(fed->enclave), next_event_tag); + federate_info_t* fed = GET_FED_INFO(federate_id); + tag_t min_in_transit_tag = pqueue_tag_peek_tag(fed->in_transit_message_tags); + if (lf_tag_compare(min_in_transit_tag, next_event_tag) < 0) { + next_event_tag = min_in_transit_tag; + } + update_scheduling_node_next_event_tag_locked(&(fed->enclave), next_event_tag); } -void handle_port_absent_message(federate_info_t *sending_federate, unsigned char *buffer) { - size_t message_size = sizeof(uint16_t) + sizeof(uint16_t) + sizeof(int64_t) + sizeof(uint32_t); +void handle_port_absent_message(federate_info_t* sending_federate, unsigned char* buffer) { + size_t message_size = sizeof(uint16_t) + sizeof(uint16_t) + sizeof(int64_t) + sizeof(uint32_t); - read_from_socket_fail_on_error( - &sending_federate->socket, message_size, &(buffer[1]), NULL, - " RTI failed to read port absent message from federate %u.", - sending_federate->enclave.id); + read_from_socket_fail_on_error(&sending_federate->socket, message_size, &(buffer[1]), NULL, + " RTI failed to read port absent message from federate %u.", + sending_federate->enclave.id); - uint16_t reactor_port_id = extract_uint16(&(buffer[1])); - uint16_t federate_id = extract_uint16(&(buffer[1 + sizeof(uint16_t)])); - tag_t tag = extract_tag(&(buffer[1 + 2 * sizeof(uint16_t)])); + uint16_t reactor_port_id = extract_uint16(&(buffer[1])); + uint16_t federate_id = extract_uint16(&(buffer[1 + sizeof(uint16_t)])); + tag_t tag = extract_tag(&(buffer[1 + 2 * sizeof(uint16_t)])); - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_from_federate(rti_remote->base.trace, receive_PORT_ABS, sending_federate->enclave.id, &tag); - } - - // Need to acquire the mutex lock to ensure that the thread handling - // messages coming from the socket connected to the destination does not - // issue a TAG before this message has been forwarded. - LF_MUTEX_LOCK(&rti_mutex); - - // If the destination federate is no longer connected, issue a warning - // and return. - federate_info_t *fed = GET_FED_INFO(federate_id); - if (fed->enclave.state == NOT_CONNECTED) { - LF_MUTEX_UNLOCK(&rti_mutex); - lf_print_warning("RTI: Destination federate %d is no longer connected. Dropping message.", - federate_id); - LF_PRINT_LOG("Fed status: next_event " PRINTF_TAG ", " - "completed " PRINTF_TAG ", " - "last_granted " PRINTF_TAG ", " - "last_provisionally_granted " PRINTF_TAG ".", - fed->enclave.next_event.time - start_time, - fed->enclave.next_event.microstep, - fed->enclave.completed.time - start_time, - fed->enclave.completed.microstep, - fed->enclave.last_granted.time - start_time, - fed->enclave.last_granted.microstep, - fed->enclave.last_provisionally_granted.time - start_time, - fed->enclave.last_provisionally_granted.microstep); - return; - } - - LF_PRINT_LOG("RTI forwarding port absent message for port %u to federate %u.", - reactor_port_id, - federate_id); - - // Need to make sure that the destination federate's thread has already - // sent the starting MSG_TYPE_TIMESTAMP message. - while (fed->enclave.state == PENDING) { - // Need to wait here. - lf_cond_wait(&sent_start_time); - } - - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_to_federate(rti_remote->base.trace, send_PORT_ABS, federate_id, &tag); - } + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_from_federate(receive_PORT_ABS, sending_federate->enclave.id, &tag); + } - // Forward the message. - write_to_socket_fail_on_error(&fed->socket, message_size + 1, buffer, &rti_mutex, - "RTI failed to forward message to federate %d.", federate_id); + // Need to acquire the mutex lock to ensure that the thread handling + // messages coming from the socket connected to the destination does not + // issue a TAG before this message has been forwarded. + LF_MUTEX_LOCK(&rti_mutex); + // If the destination federate is no longer connected, issue a warning + // and return. + federate_info_t* fed = GET_FED_INFO(federate_id); + if (fed->enclave.state == NOT_CONNECTED) { LF_MUTEX_UNLOCK(&rti_mutex); + lf_print_warning("RTI: Destination federate %d is no longer connected. Dropping message.", federate_id); + LF_PRINT_LOG("Fed status: next_event " PRINTF_TAG ", " + "completed " PRINTF_TAG ", " + "last_granted " PRINTF_TAG ", " + "last_provisionally_granted " PRINTF_TAG ".", + fed->enclave.next_event.time - start_time, fed->enclave.next_event.microstep, + fed->enclave.completed.time - start_time, fed->enclave.completed.microstep, + fed->enclave.last_granted.time - start_time, fed->enclave.last_granted.microstep, + fed->enclave.last_provisionally_granted.time - start_time, + fed->enclave.last_provisionally_granted.microstep); + return; + } + + LF_PRINT_LOG("RTI forwarding port absent message for port %u to federate %u.", reactor_port_id, federate_id); + + // Need to make sure that the destination federate's thread has already + // sent the starting MSG_TYPE_TIMESTAMP message. + while (fed->enclave.state == PENDING) { + // Need to wait here. + lf_cond_wait(&sent_start_time); + } + + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_to_federate(send_PORT_ABS, federate_id, &tag); + } + + // Forward the message. + write_to_socket_fail_on_error(&fed->socket, message_size + 1, buffer, &rti_mutex, + "RTI failed to forward message to federate %d.", federate_id); + + LF_MUTEX_UNLOCK(&rti_mutex); } -void handle_timed_message(federate_info_t *sending_federate, unsigned char *buffer) { - size_t header_size = 1 + sizeof(uint16_t) + sizeof(uint16_t) + sizeof(int32_t) - + sizeof(int64_t) + sizeof(uint32_t); - // Read the header, minus the first byte which has already been read. - read_from_socket_fail_on_error( - &sending_federate->socket, header_size - 1, &(buffer[1]), NULL, - "RTI failed to read the timed message header from remote federate."); - // Extract the header information. of the sender - uint16_t reactor_port_id; - uint16_t federate_id; - size_t length; - tag_t intended_tag; - // Extract information from the header. - extract_timed_header(&(buffer[1]), &reactor_port_id, &federate_id, &length, &intended_tag); - - size_t total_bytes_to_read = length + header_size; - size_t bytes_to_read = length; - - if (FED_COM_BUFFER_SIZE < header_size + 1) { - lf_print_error_and_exit("Buffer size (%d) is not large enough to " - "read the header plus one byte.", - FED_COM_BUFFER_SIZE); - } - - // Cut up the payload in chunks. - if (bytes_to_read > FED_COM_BUFFER_SIZE - header_size) { - bytes_to_read = FED_COM_BUFFER_SIZE - header_size; - } - - LF_PRINT_LOG("RTI received message from federate %d for federate %u port %u with intended tag " PRINTF_TAG ". Forwarding.", - sending_federate->enclave.id, federate_id, reactor_port_id, - intended_tag.time - lf_time_start(), intended_tag.microstep); - - read_from_socket_fail_on_error( - &sending_federate->socket, bytes_to_read, &(buffer[header_size]), NULL, - "RTI failed to read timed message from federate %d.", federate_id); - size_t bytes_read = bytes_to_read + header_size; - // Following only works for string messages. - // LF_PRINT_DEBUG("Message received by RTI: %s.", buffer + header_size); - - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_from_federate(rti_remote->base.trace, receive_TAGGED_MSG, sending_federate->enclave.id, &intended_tag); - } - - // Need to acquire the mutex lock to ensure that the thread handling - // messages coming from the socket connected to the destination does not - // issue a TAG before this message has been forwarded. - LF_MUTEX_LOCK(&rti_mutex); - - // If the destination federate is no longer connected, issue a warning - // and return. - federate_info_t *fed = GET_FED_INFO(federate_id); - if (fed->enclave.state == NOT_CONNECTED) { - LF_MUTEX_UNLOCK(&rti_mutex); - lf_print_warning("RTI: Destination federate %d is no longer connected. Dropping message.", - federate_id); - LF_PRINT_LOG("Fed status: next_event " PRINTF_TAG ", " - "completed " PRINTF_TAG ", " - "last_granted " PRINTF_TAG ", " - "last_provisionally_granted " PRINTF_TAG ".", - fed->enclave.next_event.time - start_time, - fed->enclave.next_event.microstep, - fed->enclave.completed.time - start_time, - fed->enclave.completed.microstep, - fed->enclave.last_granted.time - start_time, - fed->enclave.last_granted.microstep, - fed->enclave.last_provisionally_granted.time - start_time, - fed->enclave.last_provisionally_granted.microstep); - return; - } - - LF_PRINT_DEBUG( - "RTI forwarding message to port %d of federate %hu of length %zu.", - reactor_port_id, - federate_id, - length); - - // Need to make sure that the destination federate's thread has already - // sent the starting MSG_TYPE_TIMESTAMP message. - while (fed->enclave.state == PENDING) { - // Need to wait here. - lf_cond_wait(&sent_start_time); - } - - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_to_federate(rti_remote->base.trace, send_TAGGED_MSG, federate_id, &intended_tag); - } - - write_to_socket_fail_on_error(&fed->socket, bytes_read, buffer, &rti_mutex, - "RTI failed to forward message to federate %d.", federate_id); - - // The message length may be longer than the buffer, - // in which case we have to handle it in chunks. - size_t total_bytes_read = bytes_read; - while (total_bytes_read < total_bytes_to_read) { - LF_PRINT_DEBUG("Forwarding message in chunks."); - bytes_to_read = total_bytes_to_read - total_bytes_read; - if (bytes_to_read > FED_COM_BUFFER_SIZE) { - bytes_to_read = FED_COM_BUFFER_SIZE; - } - read_from_socket_fail_on_error(&sending_federate->socket, bytes_to_read, buffer, NULL, - "RTI failed to read message chunks."); - total_bytes_read += bytes_to_read; - - // FIXME: a mutex needs to be held for this so that other threads - // do not write to destination_socket and cause interleaving. However, - // holding the rti_mutex might be very expensive. Instead, each outgoing - // socket should probably have its own mutex. - write_to_socket_fail_on_error(&fed->socket, bytes_to_read, buffer, &rti_mutex, - "RTI failed to send message chunks."); - } - - // Record this in-transit message in federate's in-transit message queue. - if (lf_tag_compare(fed->enclave.completed, intended_tag) < 0) { - // Add a record of this message to the list of in-transit messages to this federate. - pqueue_tag_insert_if_no_match( - fed->in_transit_message_tags, - intended_tag); - LF_PRINT_DEBUG( - "RTI: Adding a message with tag " PRINTF_TAG " to the list of in-transit messages for federate %d.", - intended_tag.time - lf_time_start(), - intended_tag.microstep, - federate_id); - } else { - lf_print_error( - "RTI: Federate %d has already completed tag " PRINTF_TAG - ", but there is an in-transit message with tag " PRINTF_TAG " from federate %hu. " - "This is going to cause an STP violation under centralized coordination.", - federate_id, - fed->enclave.completed.time - lf_time_start(), - fed->enclave.completed.microstep, - intended_tag.time - lf_time_start(), - intended_tag.microstep, - sending_federate->enclave.id); - // FIXME: Drop the federate? - } - - // If the message tag is less than the most recently received NET from the federate, - // then update the federate's next event tag to match the message tag. - if (lf_tag_compare(intended_tag, fed->enclave.next_event) < 0) { - update_federate_next_event_tag_locked(federate_id, intended_tag); - } - +void handle_timed_message(federate_info_t* sending_federate, unsigned char* buffer) { + size_t header_size = 1 + sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t) + sizeof(int64_t) + sizeof(uint32_t); + // Read the header, minus the first byte which has already been read. + read_from_socket_fail_on_error(&sending_federate->socket, header_size - 1, &(buffer[1]), NULL, + "RTI failed to read the timed message header from remote federate."); + // Extract the header information. of the sender + uint16_t reactor_port_id; + uint16_t federate_id; + size_t length; + tag_t intended_tag; + // Extract information from the header. + extract_timed_header(&(buffer[1]), &reactor_port_id, &federate_id, &length, &intended_tag); + + size_t total_bytes_to_read = length + header_size; + size_t bytes_to_read = length; + + if (FED_COM_BUFFER_SIZE < header_size + 1) { + lf_print_error_and_exit("Buffer size (%d) is not large enough to " + "read the header plus one byte.", + FED_COM_BUFFER_SIZE); + } + + // Cut up the payload in chunks. + if (bytes_to_read > FED_COM_BUFFER_SIZE - header_size) { + bytes_to_read = FED_COM_BUFFER_SIZE - header_size; + } + + LF_PRINT_LOG("RTI received message from federate %d for federate %u port %u with intended tag " PRINTF_TAG + ". Forwarding.", + sending_federate->enclave.id, federate_id, reactor_port_id, intended_tag.time - lf_time_start(), + intended_tag.microstep); + + read_from_socket_fail_on_error(&sending_federate->socket, bytes_to_read, &(buffer[header_size]), NULL, + "RTI failed to read timed message from federate %d.", federate_id); + size_t bytes_read = bytes_to_read + header_size; + // Following only works for string messages. + // LF_PRINT_DEBUG("Message received by RTI: %s.", buffer + header_size); + + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_from_federate(receive_TAGGED_MSG, sending_federate->enclave.id, &intended_tag); + } + + // Need to acquire the mutex lock to ensure that the thread handling + // messages coming from the socket connected to the destination does not + // issue a TAG before this message has been forwarded. + LF_MUTEX_LOCK(&rti_mutex); + + // If the destination federate is no longer connected, issue a warning + // and return. + federate_info_t* fed = GET_FED_INFO(federate_id); + if (fed->enclave.state == NOT_CONNECTED) { LF_MUTEX_UNLOCK(&rti_mutex); + lf_print_warning("RTI: Destination federate %d is no longer connected. Dropping message.", federate_id); + LF_PRINT_LOG("Fed status: next_event " PRINTF_TAG ", " + "completed " PRINTF_TAG ", " + "last_granted " PRINTF_TAG ", " + "last_provisionally_granted " PRINTF_TAG ".", + fed->enclave.next_event.time - start_time, fed->enclave.next_event.microstep, + fed->enclave.completed.time - start_time, fed->enclave.completed.microstep, + fed->enclave.last_granted.time - start_time, fed->enclave.last_granted.microstep, + fed->enclave.last_provisionally_granted.time - start_time, + fed->enclave.last_provisionally_granted.microstep); + return; + } + + LF_PRINT_DEBUG("RTI forwarding message to port %d of federate %hu of length %zu.", reactor_port_id, federate_id, + length); + + // Need to make sure that the destination federate's thread has already + // sent the starting MSG_TYPE_TIMESTAMP message. + while (fed->enclave.state == PENDING) { + // Need to wait here. + lf_cond_wait(&sent_start_time); + } + + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_to_federate(send_TAGGED_MSG, federate_id, &intended_tag); + } + + write_to_socket_fail_on_error(&fed->socket, bytes_read, buffer, &rti_mutex, + "RTI failed to forward message to federate %d.", federate_id); + + // The message length may be longer than the buffer, + // in which case we have to handle it in chunks. + size_t total_bytes_read = bytes_read; + while (total_bytes_read < total_bytes_to_read) { + LF_PRINT_DEBUG("Forwarding message in chunks."); + bytes_to_read = total_bytes_to_read - total_bytes_read; + if (bytes_to_read > FED_COM_BUFFER_SIZE) { + bytes_to_read = FED_COM_BUFFER_SIZE; + } + read_from_socket_fail_on_error(&sending_federate->socket, bytes_to_read, buffer, NULL, + "RTI failed to read message chunks."); + total_bytes_read += bytes_to_read; + + // FIXME: a mutex needs to be held for this so that other threads + // do not write to destination_socket and cause interleaving. However, + // holding the rti_mutex might be very expensive. Instead, each outgoing + // socket should probably have its own mutex. + write_to_socket_fail_on_error(&fed->socket, bytes_to_read, buffer, &rti_mutex, + "RTI failed to send message chunks."); + } + + // Record this in-transit message in federate's in-transit message queue. + if (lf_tag_compare(fed->enclave.completed, intended_tag) < 0) { + // Add a record of this message to the list of in-transit messages to this federate. + pqueue_tag_insert_if_no_match(fed->in_transit_message_tags, intended_tag); + LF_PRINT_DEBUG("RTI: Adding a message with tag " PRINTF_TAG " to the list of in-transit messages for federate %d.", + intended_tag.time - lf_time_start(), intended_tag.microstep, federate_id); + } else { + lf_print_error("RTI: Federate %d has already completed tag " PRINTF_TAG + ", but there is an in-transit message with tag " PRINTF_TAG " from federate %hu. " + "This is going to cause an STP violation under centralized coordination.", + federate_id, fed->enclave.completed.time - lf_time_start(), fed->enclave.completed.microstep, + intended_tag.time - lf_time_start(), intended_tag.microstep, sending_federate->enclave.id); + // FIXME: Drop the federate? + } + + // If the message tag is less than the most recently received NET from the federate, + // then update the federate's next event tag to match the message tag. + if (lf_tag_compare(intended_tag, fed->enclave.next_event) < 0) { + update_federate_next_event_tag_locked(federate_id, intended_tag); + } + + LF_MUTEX_UNLOCK(&rti_mutex); } -void handle_latest_tag_complete(federate_info_t *fed) { - unsigned char buffer[sizeof(int64_t) + sizeof(uint32_t)]; - read_from_socket_fail_on_error(&fed->socket, sizeof(int64_t) + sizeof(uint32_t), buffer, NULL, - "RTI failed to read the content of the logical tag complete from federate %d.", - fed->enclave.id); - tag_t completed = extract_tag(buffer); - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_from_federate(rti_remote->base.trace, receive_LTC, fed->enclave.id, &completed); - } - _logical_tag_complete(&(fed->enclave), completed); - - // FIXME: Should this function be in the enclave version? - LF_MUTEX_LOCK(&rti_mutex); - // See if we can remove any of the recorded in-transit messages for this. - pqueue_tag_remove_up_to(fed->in_transit_message_tags, completed); - LF_MUTEX_UNLOCK(&rti_mutex); +void handle_latest_tag_complete(federate_info_t* fed) { + unsigned char buffer[sizeof(int64_t) + sizeof(uint32_t)]; + read_from_socket_fail_on_error(&fed->socket, sizeof(int64_t) + sizeof(uint32_t), buffer, NULL, + "RTI failed to read the content of the logical tag complete from federate %d.", + fed->enclave.id); + tag_t completed = extract_tag(buffer); + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_from_federate(receive_LTC, fed->enclave.id, &completed); + } + _logical_tag_complete(&(fed->enclave), completed); + + // FIXME: Should this function be in the enclave version? + LF_MUTEX_LOCK(&rti_mutex); + // See if we can remove any of the recorded in-transit messages for this. + pqueue_tag_remove_up_to(fed->in_transit_message_tags, completed); + LF_MUTEX_UNLOCK(&rti_mutex); } -void handle_next_event_tag(federate_info_t *fed) { - unsigned char buffer[sizeof(int64_t) + sizeof(uint32_t)]; - read_from_socket_fail_on_error(&fed->socket, sizeof(int64_t) + sizeof(uint32_t), buffer, NULL, - "RTI failed to read the content of the next event tag from federate %d.", - fed->enclave.id); - - // Acquire a mutex lock to ensure that this state does not change while a - // message is in transport or being used to determine a TAG. - LF_MUTEX_LOCK(&rti_mutex); // FIXME: Instead of using a mutex, it might be more efficient to use a - // select() mechanism to read and process federates' buffers in an orderly fashion. - - tag_t intended_tag = extract_tag(buffer); - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_from_federate(rti_remote->base.trace, receive_NET, fed->enclave.id, &intended_tag); - } - LF_PRINT_LOG("RTI received from federate %d the Next Event Tag (NET) " PRINTF_TAG, - fed->enclave.id, intended_tag.time - start_time, - intended_tag.microstep); - update_federate_next_event_tag_locked( - fed->enclave.id, - intended_tag); - LF_MUTEX_UNLOCK(&rti_mutex); +void handle_next_event_tag(federate_info_t* fed) { + unsigned char buffer[sizeof(int64_t) + sizeof(uint32_t)]; + read_from_socket_fail_on_error(&fed->socket, sizeof(int64_t) + sizeof(uint32_t), buffer, NULL, + "RTI failed to read the content of the next event tag from federate %d.", + fed->enclave.id); + + // Acquire a mutex lock to ensure that this state does not change while a + // message is in transport or being used to determine a TAG. + LF_MUTEX_LOCK(&rti_mutex); // FIXME: Instead of using a mutex, it might be more efficient to use a + // select() mechanism to read and process federates' buffers in an orderly fashion. + + tag_t intended_tag = extract_tag(buffer); + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_from_federate(receive_NET, fed->enclave.id, &intended_tag); + } + LF_PRINT_LOG("RTI received from federate %d the Next Event Tag (NET) " PRINTF_TAG, fed->enclave.id, + intended_tag.time - start_time, intended_tag.microstep); + update_federate_next_event_tag_locked(fed->enclave.id, intended_tag); + LF_MUTEX_UNLOCK(&rti_mutex); } /////////////////// STOP functions //////////////////// @@ -601,36 +562,34 @@ bool stop_granted_already_sent_to_federates = false; * This function assumes the caller holds the rti_mutex lock. */ static void broadcast_stop_time_to_federates_locked() { - if (stop_granted_already_sent_to_federates == true) { - return; + if (stop_granted_already_sent_to_federates == true) { + return; + } + stop_granted_already_sent_to_federates = true; + + // Reply with a stop granted to all federates + unsigned char outgoing_buffer[MSG_TYPE_STOP_GRANTED_LENGTH]; + ENCODE_STOP_GRANTED(outgoing_buffer, rti_remote->base.max_stop_tag.time, rti_remote->base.max_stop_tag.microstep); + + // Iterate over federates and send each the message. + for (int i = 0; i < rti_remote->base.number_of_scheduling_nodes; i++) { + federate_info_t* fed = GET_FED_INFO(i); + if (fed->enclave.state == NOT_CONNECTED) { + continue; } - stop_granted_already_sent_to_federates = true; - - // Reply with a stop granted to all federates - unsigned char outgoing_buffer[MSG_TYPE_STOP_GRANTED_LENGTH]; - ENCODE_STOP_GRANTED(outgoing_buffer, rti_remote->base.max_stop_tag.time, rti_remote->base.max_stop_tag.microstep); - - // Iterate over federates and send each the message. - for (int i = 0; i < rti_remote->base.number_of_scheduling_nodes; i++) { - federate_info_t *fed = GET_FED_INFO(i); - if (fed->enclave.state == NOT_CONNECTED) { - continue; - } - if (lf_tag_compare(fed->enclave.next_event, rti_remote->base.max_stop_tag) >= 0) { - // Need the next_event to be no greater than the stop tag. - fed->enclave.next_event = rti_remote->base.max_stop_tag; - } - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_to_federate(rti_remote->base.trace, send_STOP_GRN, fed->enclave.id, &rti_remote->base.max_stop_tag); - } - write_to_socket_fail_on_error( - &fed->socket, MSG_TYPE_STOP_GRANTED_LENGTH, outgoing_buffer, &rti_mutex, - "RTI failed to send MSG_TYPE_STOP_GRANTED message to federate %d.", fed->enclave.id); + if (lf_tag_compare(fed->enclave.next_event, rti_remote->base.max_stop_tag) >= 0) { + // Need the next_event to be no greater than the stop tag. + fed->enclave.next_event = rti_remote->base.max_stop_tag; } + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_to_federate(send_STOP_GRN, fed->enclave.id, &rti_remote->base.max_stop_tag); + } + write_to_socket_fail_on_error(&fed->socket, MSG_TYPE_STOP_GRANTED_LENGTH, outgoing_buffer, &rti_mutex, + "RTI failed to send MSG_TYPE_STOP_GRANTED message to federate %d.", fed->enclave.id); + } - LF_PRINT_LOG("RTI sent to federates MSG_TYPE_STOP_GRANTED with tag " PRINTF_TAG, - rti_remote->base.max_stop_tag.time - start_time, - rti_remote->base.max_stop_tag.microstep); + LF_PRINT_LOG("RTI sent to federates MSG_TYPE_STOP_GRANTED with tag " PRINTF_TAG, + rti_remote->base.max_stop_tag.time - start_time, rti_remote->base.max_stop_tag.microstep); } /** @@ -640,424 +599,410 @@ static void broadcast_stop_time_to_federates_locked() { * @param fed The federate that has requested a stop. * @return 1 if stop time has been sent to all federates and 0 otherwise. */ -static int mark_federate_requesting_stop(federate_info_t *fed) { - if (!fed->requested_stop) { - rti_remote->base.num_scheduling_nodes_handling_stop++; - fed->requested_stop = true; - } - if (rti_remote->base.num_scheduling_nodes_handling_stop - == rti_remote->base.number_of_scheduling_nodes) { - // We now have information about the stop time of all - // federates. - broadcast_stop_time_to_federates_locked(); - return 1; - } - return 0; +static int mark_federate_requesting_stop(federate_info_t* fed) { + if (!fed->requested_stop) { + rti_remote->base.num_scheduling_nodes_handling_stop++; + fed->requested_stop = true; + } + if (rti_remote->base.num_scheduling_nodes_handling_stop == rti_remote->base.number_of_scheduling_nodes) { + // We now have information about the stop time of all + // federates. + broadcast_stop_time_to_federates_locked(); + return 1; + } + return 0; } /** * Thread to time out if federates do not reply to stop request. */ static void* wait_for_stop_request_reply(void* args) { - // Divide the time into small chunks and check periodically. - interval_t chunk = MAX_TIME_FOR_REPLY_TO_STOP_REQUEST/30; - int count = 0; - while (count++ < 30) { - if (stop_granted_already_sent_to_federates) return NULL; - lf_sleep(chunk); - } - // If we reach here, then error out. - lf_print_error_and_exit("Received only %d stop request replies within timeout " - PRINTF_TIME "ns. RTI is exiting.", - rti_remote->base.num_scheduling_nodes_handling_stop, - MAX_TIME_FOR_REPLY_TO_STOP_REQUEST - ); - return NULL; + initialize_lf_thread_id(); + // Divide the time into small chunks and check periodically. + interval_t chunk = MAX_TIME_FOR_REPLY_TO_STOP_REQUEST / 30; + int count = 0; + while (count++ < 30) { + if (stop_granted_already_sent_to_federates) + return NULL; + lf_sleep(chunk); + } + // If we reach here, then error out. + lf_print_error_and_exit("Received only %d stop request replies within timeout " PRINTF_TIME "ns. RTI is exiting.", + rti_remote->base.num_scheduling_nodes_handling_stop, MAX_TIME_FOR_REPLY_TO_STOP_REQUEST); + return NULL; } -void handle_stop_request_message(federate_info_t *fed) { - LF_PRINT_DEBUG("RTI handling stop_request from federate %d.", fed->enclave.id); - - size_t bytes_to_read = MSG_TYPE_STOP_REQUEST_LENGTH - 1; - unsigned char buffer[bytes_to_read]; - read_from_socket_fail_on_error(&fed->socket, bytes_to_read, buffer, NULL, - "RTI failed to read the MSG_TYPE_STOP_REQUEST payload from federate %d.", - fed->enclave.id); - - // Extract the proposed stop tag for the federate - tag_t proposed_stop_tag = extract_tag(buffer); - - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_from_federate(rti_remote->base.trace, receive_STOP_REQ, fed->enclave.id, &proposed_stop_tag); - } - - LF_PRINT_LOG("RTI received from federate %d a MSG_TYPE_STOP_REQUEST message with tag " PRINTF_TAG ".", - fed->enclave.id, proposed_stop_tag.time - start_time, proposed_stop_tag.microstep); +void handle_stop_request_message(federate_info_t* fed) { + LF_PRINT_DEBUG("RTI handling stop_request from federate %d.", fed->enclave.id); - // Acquire a mutex lock to ensure that this state does change while a - // message is in transport or being used to determine a TAG. - LF_MUTEX_LOCK(&rti_mutex); + size_t bytes_to_read = MSG_TYPE_STOP_REQUEST_LENGTH - 1; + unsigned char buffer[bytes_to_read]; + read_from_socket_fail_on_error(&fed->socket, bytes_to_read, buffer, NULL, + "RTI failed to read the MSG_TYPE_STOP_REQUEST payload from federate %d.", + fed->enclave.id); - // Check whether we have already received a stop_tag - // from this federate - if (fed->requested_stop) { - // If stop request messages have already been broadcast, treat this as if it were a reply. - if (rti_remote->stop_in_progress) { - mark_federate_requesting_stop(fed); - } - LF_MUTEX_UNLOCK(&rti_mutex); - return; - } + // Extract the proposed stop tag for the federate + tag_t proposed_stop_tag = extract_tag(buffer); - // Update the maximum stop tag received from federates - if (lf_tag_compare(proposed_stop_tag, rti_remote->base.max_stop_tag) > 0) { - rti_remote->base.max_stop_tag = proposed_stop_tag; - } + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_from_federate(receive_STOP_REQ, fed->enclave.id, &proposed_stop_tag); + } - // If all federates have replied, send stop request granted. - if (mark_federate_requesting_stop(fed)) { - // Have send stop request granted to all federates. Nothing more to do. - LF_MUTEX_UNLOCK(&rti_mutex); - return; - } + LF_PRINT_LOG("RTI received from federate %d a MSG_TYPE_STOP_REQUEST message with tag " PRINTF_TAG ".", + fed->enclave.id, proposed_stop_tag.time - start_time, proposed_stop_tag.microstep); - // Forward the stop request to all other federates that have not - // also issued a stop request. - unsigned char stop_request_buffer[MSG_TYPE_STOP_REQUEST_LENGTH]; - ENCODE_STOP_REQUEST(stop_request_buffer, - rti_remote->base.max_stop_tag.time, rti_remote->base.max_stop_tag.microstep); + // Acquire a mutex lock to ensure that this state does change while a + // message is in transport or being used to determine a TAG. + LF_MUTEX_LOCK(&rti_mutex); - // Iterate over federates and send each the MSG_TYPE_STOP_REQUEST message - // if we do not have a stop_time already for them. Do not do this more than once. + // Check whether we have already received a stop_tag + // from this federate + if (fed->requested_stop) { + // If stop request messages have already been broadcast, treat this as if it were a reply. if (rti_remote->stop_in_progress) { - LF_MUTEX_UNLOCK(&rti_mutex); - return; - } - rti_remote->stop_in_progress = true; - // Need a timeout here in case a federate never replies. - lf_thread_t timeout_thread; - lf_thread_create(&timeout_thread, wait_for_stop_request_reply, NULL); - - for (int i = 0; i < rti_remote->base.number_of_scheduling_nodes; i++) { - federate_info_t *f = GET_FED_INFO(i); - if (f->enclave.id != fed->enclave.id && f->requested_stop == false) { - if (f->enclave.state == NOT_CONNECTED) { - mark_federate_requesting_stop(f); - continue; - } - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_to_federate(rti_remote->base.trace, send_STOP_REQ, f->enclave.id, &rti_remote->base.max_stop_tag); - } - write_to_socket_fail_on_error(&f->socket, MSG_TYPE_STOP_REQUEST_LENGTH, stop_request_buffer, &rti_mutex, - "RTI failed to forward MSG_TYPE_STOP_REQUEST message to federate %d.", f->enclave.id); - } + mark_federate_requesting_stop(fed); } - LF_PRINT_LOG("RTI forwarded to federates MSG_TYPE_STOP_REQUEST with tag (" PRINTF_TIME ", %u).", - rti_remote->base.max_stop_tag.time - start_time, - rti_remote->base.max_stop_tag.microstep); LF_MUTEX_UNLOCK(&rti_mutex); -} - -void handle_stop_request_reply(federate_info_t *fed) { - size_t bytes_to_read = MSG_TYPE_STOP_REQUEST_REPLY_LENGTH - 1; - unsigned char buffer_stop_time[bytes_to_read]; - read_from_socket_fail_on_error(&fed->socket, bytes_to_read, buffer_stop_time, NULL, - "RTI failed to read the reply to MSG_TYPE_STOP_REQUEST message from federate %d.", - fed->enclave.id); + return; + } - tag_t federate_stop_tag = extract_tag(buffer_stop_time); + // Update the maximum stop tag received from federates + if (lf_tag_compare(proposed_stop_tag, rti_remote->base.max_stop_tag) > 0) { + rti_remote->base.max_stop_tag = proposed_stop_tag; + } - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_from_federate(rti_remote->base.trace, receive_STOP_REQ_REP, fed->enclave.id, &federate_stop_tag); - } + // If all federates have replied, send stop request granted. + if (mark_federate_requesting_stop(fed)) { + // Have send stop request granted to all federates. Nothing more to do. + LF_MUTEX_UNLOCK(&rti_mutex); + return; + } - LF_PRINT_LOG("RTI received from federate %d STOP reply tag " PRINTF_TAG ".", fed->enclave.id, - federate_stop_tag.time - start_time, - federate_stop_tag.microstep); + // Forward the stop request to all other federates that have not + // also issued a stop request. + unsigned char stop_request_buffer[MSG_TYPE_STOP_REQUEST_LENGTH]; + ENCODE_STOP_REQUEST(stop_request_buffer, rti_remote->base.max_stop_tag.time, rti_remote->base.max_stop_tag.microstep); - // Acquire the mutex lock so that we can change the state of the RTI - LF_MUTEX_LOCK(&rti_mutex); - // If the federate has not requested stop before, count the reply - if (lf_tag_compare(federate_stop_tag, rti_remote->base.max_stop_tag) > 0) { - rti_remote->base.max_stop_tag = federate_stop_tag; - } - mark_federate_requesting_stop(fed); + // Iterate over federates and send each the MSG_TYPE_STOP_REQUEST message + // if we do not have a stop_time already for them. Do not do this more than once. + if (rti_remote->stop_in_progress) { LF_MUTEX_UNLOCK(&rti_mutex); + return; + } + rti_remote->stop_in_progress = true; + // Need a timeout here in case a federate never replies. + lf_thread_t timeout_thread; + lf_thread_create(&timeout_thread, wait_for_stop_request_reply, NULL); + + for (int i = 0; i < rti_remote->base.number_of_scheduling_nodes; i++) { + federate_info_t* f = GET_FED_INFO(i); + if (f->enclave.id != fed->enclave.id && f->requested_stop == false) { + if (f->enclave.state == NOT_CONNECTED) { + mark_federate_requesting_stop(f); + continue; + } + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_to_federate(send_STOP_REQ, f->enclave.id, &rti_remote->base.max_stop_tag); + } + write_to_socket_fail_on_error(&f->socket, MSG_TYPE_STOP_REQUEST_LENGTH, stop_request_buffer, &rti_mutex, + "RTI failed to forward MSG_TYPE_STOP_REQUEST message to federate %d.", + f->enclave.id); + } + } + LF_PRINT_LOG("RTI forwarded to federates MSG_TYPE_STOP_REQUEST with tag (" PRINTF_TIME ", %u).", + rti_remote->base.max_stop_tag.time - start_time, rti_remote->base.max_stop_tag.microstep); + LF_MUTEX_UNLOCK(&rti_mutex); +} + +void handle_stop_request_reply(federate_info_t* fed) { + size_t bytes_to_read = MSG_TYPE_STOP_REQUEST_REPLY_LENGTH - 1; + unsigned char buffer_stop_time[bytes_to_read]; + read_from_socket_fail_on_error(&fed->socket, bytes_to_read, buffer_stop_time, NULL, + "RTI failed to read the reply to MSG_TYPE_STOP_REQUEST message from federate %d.", + fed->enclave.id); + + tag_t federate_stop_tag = extract_tag(buffer_stop_time); + + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_from_federate(receive_STOP_REQ_REP, fed->enclave.id, &federate_stop_tag); + } + + LF_PRINT_LOG("RTI received from federate %d STOP reply tag " PRINTF_TAG ".", fed->enclave.id, + federate_stop_tag.time - start_time, federate_stop_tag.microstep); + + // Acquire the mutex lock so that we can change the state of the RTI + LF_MUTEX_LOCK(&rti_mutex); + // If the federate has not requested stop before, count the reply + if (lf_tag_compare(federate_stop_tag, rti_remote->base.max_stop_tag) > 0) { + rti_remote->base.max_stop_tag = federate_stop_tag; + } + mark_federate_requesting_stop(fed); + LF_MUTEX_UNLOCK(&rti_mutex); } ////////////////////////////////////////////////// void handle_address_query(uint16_t fed_id) { - federate_info_t *fed = GET_FED_INFO(fed_id); - // Use buffer both for reading and constructing the reply. - // The length is what is needed for the reply. - unsigned char buffer[1 + sizeof(int32_t)]; - read_from_socket_fail_on_error(&fed->socket, sizeof(uint16_t), (unsigned char *)buffer, NULL, - "Failed to read address query."); - uint16_t remote_fed_id = extract_uint16(buffer); - - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_from_federate(rti_remote->base.trace, receive_ADR_QR, fed_id, NULL); - } - - LF_PRINT_DEBUG("RTI received address query from %d for %d.", fed_id, remote_fed_id); - - // NOTE: server_port initializes to -1, which means the RTI does not know - // the port number because it has not yet received an MSG_TYPE_ADDRESS_ADVERTISEMENT message - // from this federate. In that case, it will respond by sending -1. - - // Response message is also of type MSG_TYPE_ADDRESS_QUERY. - buffer[0] = MSG_TYPE_ADDRESS_QUERY; - - // Encode the port number. - federate_info_t *remote_fed = GET_FED_INFO(remote_fed_id); - - // Send the port number (which could be -1). - LF_MUTEX_LOCK(&rti_mutex); - encode_int32(remote_fed->server_port, (unsigned char *)&buffer[1]); - write_to_socket_fail_on_error( - &fed->socket, sizeof(int32_t) + 1, (unsigned char *)buffer, &rti_mutex, - "Failed to write port number to socket of federate %d.", fed_id); - - // Send the server IP address to federate. - write_to_socket_fail_on_error( - &fed->socket, sizeof(remote_fed->server_ip_addr), - (unsigned char *)&remote_fed->server_ip_addr, &rti_mutex, - "Failed to write ip address to socket of federate %d.", fed_id); - LF_MUTEX_UNLOCK(&rti_mutex); - - LF_PRINT_DEBUG("Replied to address query from federate %d with address %s:%d.", - fed_id, remote_fed->server_hostname, remote_fed->server_port); + federate_info_t* fed = GET_FED_INFO(fed_id); + // Use buffer both for reading and constructing the reply. + // The length is what is needed for the reply. + unsigned char buffer[1 + sizeof(int32_t)]; + read_from_socket_fail_on_error(&fed->socket, sizeof(uint16_t), (unsigned char*)buffer, NULL, + "Failed to read address query."); + uint16_t remote_fed_id = extract_uint16(buffer); + + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_from_federate(receive_ADR_QR, fed_id, NULL); + } + + LF_PRINT_DEBUG("RTI received address query from %d for %d.", fed_id, remote_fed_id); + + // NOTE: server_port initializes to -1, which means the RTI does not know + // the port number because it has not yet received an MSG_TYPE_ADDRESS_ADVERTISEMENT message + // from this federate. In that case, it will respond by sending -1. + + // Response message is MSG_TYPE_ADDRESS_QUERY_REPLY. + buffer[0] = MSG_TYPE_ADDRESS_QUERY_REPLY; + + // Encode the port number. + federate_info_t* remote_fed = GET_FED_INFO(remote_fed_id); + + // Send the port number (which could be -1). + LF_MUTEX_LOCK(&rti_mutex); + encode_int32(remote_fed->server_port, (unsigned char*)&buffer[1]); + write_to_socket_fail_on_error(&fed->socket, sizeof(int32_t) + 1, (unsigned char*)buffer, &rti_mutex, + "Failed to write port number to socket of federate %d.", fed_id); + + // Send the server IP address to federate. + write_to_socket_fail_on_error(&fed->socket, sizeof(remote_fed->server_ip_addr), + (unsigned char*)&remote_fed->server_ip_addr, &rti_mutex, + "Failed to write ip address to socket of federate %d.", fed_id); + LF_MUTEX_UNLOCK(&rti_mutex); + + LF_PRINT_DEBUG("Replied to address query from federate %d with address %s:%d.", fed_id, remote_fed->server_hostname, + remote_fed->server_port); } void handle_address_ad(uint16_t federate_id) { - federate_info_t *fed = GET_FED_INFO(federate_id); - // Read the port number of the federate that can be used for physical - // connections to other federates - int32_t server_port = -1; - unsigned char buffer[sizeof(int32_t)]; - read_from_socket_fail_on_error(&fed->socket, sizeof(int32_t), (unsigned char *)buffer, NULL, - "Error reading port data from federate %d.", federate_id); - - server_port = extract_int32(buffer); - - assert(server_port < 65536); - - LF_MUTEX_LOCK(&rti_mutex); - fed->server_port = server_port; - LF_MUTEX_UNLOCK(&rti_mutex); - - LF_PRINT_LOG("Received address advertisement with port %d from federate %d.", server_port, federate_id); - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_from_federate(rti_remote->base.trace, receive_ADR_AD, federate_id, NULL); - } -} - -void handle_timestamp(federate_info_t *my_fed) { - unsigned char buffer[sizeof(int64_t)]; - // Read bytes from the socket. We need 8 bytes. - read_from_socket_fail_on_error(&my_fed->socket, sizeof(int64_t), (unsigned char *)&buffer, NULL, - "ERROR reading timestamp from federate %d.\n", my_fed->enclave.id); - - int64_t timestamp = swap_bytes_if_big_endian_int64(*((int64_t *)(&buffer))); - if (rti_remote->base.tracing_enabled) { - tag_t tag = {.time = timestamp, .microstep = 0}; - tracepoint_rti_from_federate(rti_remote->base.trace, receive_TIMESTAMP, my_fed->enclave.id, &tag); - } - LF_PRINT_DEBUG("RTI received timestamp message with time: " PRINTF_TIME ".", timestamp); - - LF_MUTEX_LOCK(&rti_mutex); - rti_remote->num_feds_proposed_start++; - if (timestamp > rti_remote->max_start_time) { - rti_remote->max_start_time = timestamp; - } - if (rti_remote->num_feds_proposed_start == rti_remote->base.number_of_scheduling_nodes) { - // All federates have proposed a start time. - lf_cond_broadcast(&received_start_times); - } else { - // Some federates have not yet proposed a start time. - // wait for a notification. - while (rti_remote->num_feds_proposed_start < rti_remote->base.number_of_scheduling_nodes) { - // FIXME: Should have a timeout here? - lf_cond_wait(&received_start_times); - } - } - - LF_MUTEX_UNLOCK(&rti_mutex); - - // Send back to the federate the maximum time plus an offset on a TIMESTAMP - // message. - unsigned char start_time_buffer[MSG_TYPE_TIMESTAMP_LENGTH]; - start_time_buffer[0] = MSG_TYPE_TIMESTAMP; - // Add an offset to this start time to get everyone starting together. - start_time = rti_remote->max_start_time + DELAY_START; - encode_int64(swap_bytes_if_big_endian_int64(start_time), &start_time_buffer[1]); - - if (rti_remote->base.tracing_enabled) { - tag_t tag = {.time = start_time, .microstep = 0}; - tracepoint_rti_to_federate(rti_remote->base.trace, send_TIMESTAMP, my_fed->enclave.id, &tag); - } - if (write_to_socket(my_fed->socket, MSG_TYPE_TIMESTAMP_LENGTH, start_time_buffer)) { - lf_print_error("Failed to send the starting time to federate %d.", my_fed->enclave.id); - } - - LF_MUTEX_LOCK(&rti_mutex); - // Update state for the federate to indicate that the MSG_TYPE_TIMESTAMP - // message has been sent. That MSG_TYPE_TIMESTAMP message grants time advance to - // the federate to the start time. - my_fed->enclave.state = GRANTED; - lf_cond_broadcast(&sent_start_time); - LF_PRINT_LOG("RTI sent start time " PRINTF_TIME " to federate %d.", start_time, my_fed->enclave.id); - LF_MUTEX_UNLOCK(&rti_mutex); + federate_info_t* fed = GET_FED_INFO(federate_id); + // Read the port number of the federate that can be used for physical + // connections to other federates + int32_t server_port = -1; + unsigned char buffer[sizeof(int32_t)]; + read_from_socket_fail_on_error(&fed->socket, sizeof(int32_t), (unsigned char*)buffer, NULL, + "Error reading port data from federate %d.", federate_id); + + server_port = extract_int32(buffer); + + assert(server_port < 65536); + + LF_MUTEX_LOCK(&rti_mutex); + fed->server_port = server_port; + LF_MUTEX_UNLOCK(&rti_mutex); + + LF_PRINT_LOG("Received address advertisement with port %d from federate %d.", server_port, federate_id); + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_from_federate(receive_ADR_AD, federate_id, NULL); + } } -void send_physical_clock(unsigned char message_type, federate_info_t *fed, socket_type_t socket_type) { - if (fed->enclave.state == NOT_CONNECTED) { - lf_print_warning("Clock sync: RTI failed to send physical time to federate %d. Socket not connected.\n", - fed->enclave.id); - return; - } - unsigned char buffer[sizeof(int64_t) + 1]; - buffer[0] = message_type; - int64_t current_physical_time = lf_time_physical(); - encode_int64(current_physical_time, &(buffer[1])); - - // Send the message - if (socket_type == UDP) { - // FIXME: UDP_addr is never initialized. - LF_PRINT_DEBUG("Clock sync: RTI sending UDP message type %u.", buffer[0]); - ssize_t bytes_written = sendto(rti_remote->socket_descriptor_UDP, buffer, 1 + sizeof(int64_t), 0, - (struct sockaddr *)&fed->UDP_addr, sizeof(fed->UDP_addr)); - if (bytes_written < (ssize_t)sizeof(int64_t) + 1) { - lf_print_warning("Clock sync: RTI failed to send physical time to federate %d: %s\n", - fed->enclave.id, - strerror(errno)); - return; - } - } - else if (socket_type == TCP) { - LF_PRINT_DEBUG("Clock sync: RTI sending TCP message type %u.", buffer[0]); - LF_MUTEX_LOCK(&rti_mutex); - write_to_socket_fail_on_error(&fed->socket, 1 + sizeof(int64_t), buffer, &rti_mutex, - "Clock sync: RTI failed to send physical time to federate %d.", - fed->enclave.id); - LF_MUTEX_UNLOCK(&rti_mutex); - } - LF_PRINT_DEBUG("Clock sync: RTI sent PHYSICAL_TIME_SYNC_MESSAGE with timestamp " PRINTF_TIME - " to federate %d.", - current_physical_time, - fed->enclave.id); +void handle_timestamp(federate_info_t* my_fed) { + unsigned char buffer[sizeof(int64_t)]; + // Read bytes from the socket. We need 8 bytes. + read_from_socket_fail_on_error(&my_fed->socket, sizeof(int64_t), (unsigned char*)&buffer, NULL, + "ERROR reading timestamp from federate %d.\n", my_fed->enclave.id); + + int64_t timestamp = swap_bytes_if_big_endian_int64(*((int64_t*)(&buffer))); + if (rti_remote->base.tracing_enabled) { + tag_t tag = {.time = timestamp, .microstep = 0}; + tracepoint_rti_from_federate(receive_TIMESTAMP, my_fed->enclave.id, &tag); + } + LF_PRINT_DEBUG("RTI received timestamp message with time: " PRINTF_TIME ".", timestamp); + + LF_MUTEX_LOCK(&rti_mutex); + rti_remote->num_feds_proposed_start++; + if (timestamp > rti_remote->max_start_time) { + rti_remote->max_start_time = timestamp; + } + if (rti_remote->num_feds_proposed_start == rti_remote->base.number_of_scheduling_nodes) { + // All federates have proposed a start time. + lf_cond_broadcast(&received_start_times); + } else { + // Some federates have not yet proposed a start time. + // wait for a notification. + while (rti_remote->num_feds_proposed_start < rti_remote->base.number_of_scheduling_nodes) { + // FIXME: Should have a timeout here? + lf_cond_wait(&received_start_times); + } + } + + LF_MUTEX_UNLOCK(&rti_mutex); + + // Send back to the federate the maximum time plus an offset on a TIMESTAMP + // message. + unsigned char start_time_buffer[MSG_TYPE_TIMESTAMP_LENGTH]; + start_time_buffer[0] = MSG_TYPE_TIMESTAMP; + // Add an offset to this start time to get everyone starting together. + start_time = rti_remote->max_start_time + DELAY_START; + lf_tracing_set_start_time(start_time); + encode_int64(swap_bytes_if_big_endian_int64(start_time), &start_time_buffer[1]); + + if (rti_remote->base.tracing_enabled) { + tag_t tag = {.time = start_time, .microstep = 0}; + tracepoint_rti_to_federate(send_TIMESTAMP, my_fed->enclave.id, &tag); + } + if (write_to_socket(my_fed->socket, MSG_TYPE_TIMESTAMP_LENGTH, start_time_buffer)) { + lf_print_error("Failed to send the starting time to federate %d.", my_fed->enclave.id); + } + + LF_MUTEX_LOCK(&rti_mutex); + // Update state for the federate to indicate that the MSG_TYPE_TIMESTAMP + // message has been sent. That MSG_TYPE_TIMESTAMP message grants time advance to + // the federate to the start time. + my_fed->enclave.state = GRANTED; + lf_cond_broadcast(&sent_start_time); + LF_PRINT_LOG("RTI sent start time " PRINTF_TIME " to federate %d.", start_time, my_fed->enclave.id); + LF_MUTEX_UNLOCK(&rti_mutex); } -void handle_physical_clock_sync_message(federate_info_t *my_fed, socket_type_t socket_type) { - // Lock the mutex to prevent interference between sending the two - // coded probe messages. +void send_physical_clock(unsigned char message_type, federate_info_t* fed, socket_type_t socket_type) { + if (fed->enclave.state == NOT_CONNECTED) { + lf_print_warning("Clock sync: RTI failed to send physical time to federate %d. Socket not connected.\n", + fed->enclave.id); + return; + } + unsigned char buffer[sizeof(int64_t) + 1]; + buffer[0] = message_type; + int64_t current_physical_time = lf_time_physical(); + encode_int64(current_physical_time, &(buffer[1])); + + // Send the message + if (socket_type == UDP) { + // FIXME: UDP_addr is never initialized. + LF_PRINT_DEBUG("Clock sync: RTI sending UDP message type %u.", buffer[0]); + ssize_t bytes_written = sendto(rti_remote->socket_descriptor_UDP, buffer, 1 + sizeof(int64_t), 0, + (struct sockaddr*)&fed->UDP_addr, sizeof(fed->UDP_addr)); + if (bytes_written < (ssize_t)sizeof(int64_t) + 1) { + lf_print_warning("Clock sync: RTI failed to send physical time to federate %d: %s\n", fed->enclave.id, + strerror(errno)); + return; + } + } else if (socket_type == TCP) { + LF_PRINT_DEBUG("Clock sync: RTI sending TCP message type %u.", buffer[0]); LF_MUTEX_LOCK(&rti_mutex); - // Reply with a T4 type message - send_physical_clock(MSG_TYPE_CLOCK_SYNC_T4, my_fed, socket_type); - // Send the corresponding coded probe immediately after, - // but only if this is a UDP channel. - if (socket_type == UDP) { - send_physical_clock(MSG_TYPE_CLOCK_SYNC_CODED_PROBE, my_fed, socket_type); - } + write_to_socket_fail_on_error(&fed->socket, 1 + sizeof(int64_t), buffer, &rti_mutex, + "Clock sync: RTI failed to send physical time to federate %d.", fed->enclave.id); LF_MUTEX_UNLOCK(&rti_mutex); + } + LF_PRINT_DEBUG("Clock sync: RTI sent PHYSICAL_TIME_SYNC_MESSAGE with timestamp " PRINTF_TIME " to federate %d.", + current_physical_time, fed->enclave.id); } -void *clock_synchronization_thread(void *noargs) { - - // Wait until all federates have been notified of the start time. - // FIXME: Use lf_ version of this when merged with master. - LF_MUTEX_LOCK(&rti_mutex); - while (rti_remote->num_feds_proposed_start < rti_remote->base.number_of_scheduling_nodes) { - lf_cond_wait(&received_start_times); - } - LF_MUTEX_UNLOCK(&rti_mutex); - - // Wait until the start time before starting clock synchronization. - // The above wait ensures that start_time has been set. - interval_t ns_to_wait = start_time - lf_time_physical(); - - if (ns_to_wait > 0LL) { - lf_sleep(ns_to_wait); - } +void handle_physical_clock_sync_message(federate_info_t* my_fed, socket_type_t socket_type) { + // Lock the mutex to prevent interference between sending the two + // coded probe messages. + LF_MUTEX_LOCK(&rti_mutex); + // Reply with a T4 type message + send_physical_clock(MSG_TYPE_CLOCK_SYNC_T4, my_fed, socket_type); + // Send the corresponding coded probe immediately after, + // but only if this is a UDP channel. + if (socket_type == UDP) { + send_physical_clock(MSG_TYPE_CLOCK_SYNC_CODED_PROBE, my_fed, socket_type); + } + LF_MUTEX_UNLOCK(&rti_mutex); +} - // Initiate a clock synchronization every rti->clock_sync_period_ns - // Initiate a clock synchronization every rti->clock_sync_period_ns - struct timespec sleep_time = {(time_t)rti_remote->clock_sync_period_ns / BILLION, - rti_remote->clock_sync_period_ns % BILLION}; - struct timespec remaining_time; - - bool any_federates_connected = true; - while (any_federates_connected) { - // Sleep - lf_sleep(rti_remote->clock_sync_period_ns); // Can be interrupted - any_federates_connected = false; - for (int fed_id = 0; fed_id < rti_remote->base.number_of_scheduling_nodes; fed_id++) { - federate_info_t *fed = GET_FED_INFO(fed_id); - if (fed->enclave.state == NOT_CONNECTED) { - // FIXME: We need better error handling here, but clock sync failure - // should not stop execution. - lf_print_error("Clock sync failed with federate %d. Not connected.", fed_id); - continue; - } else if (!fed->clock_synchronization_enabled) { - continue; - } - // Send the RTI's current physical time to the federate - // Send on UDP. - LF_PRINT_DEBUG("RTI sending T1 message to initiate clock sync round."); - send_physical_clock(MSG_TYPE_CLOCK_SYNC_T1, fed, UDP); - - // Listen for reply message, which should be T3. - size_t message_size = 1 + sizeof(int32_t); - unsigned char buffer[message_size]; - // Maximum number of messages that we discard before giving up on this cycle. - // If the T3 message from this federate does not arrive and we keep receiving - // other messages, then give up on this federate and move to the next federate. - int remaining_attempts = 5; - while (remaining_attempts > 0) { - remaining_attempts--; - int read_failed = read_from_socket(rti_remote->socket_descriptor_UDP, message_size, buffer); - // If any errors occur, either discard the message or the clock sync round. - if (!read_failed) { - if (buffer[0] == MSG_TYPE_CLOCK_SYNC_T3) { - int32_t fed_id_2 = extract_int32(&(buffer[1])); - // Check that this message came from the correct federate. - if (fed_id_2 != fed->enclave.id) { - // Message is from the wrong federate. Discard the message. - lf_print_warning("Clock sync: Received T3 message from federate %d, " - "but expected one from %d. Discarding message.", - fed_id_2, fed->enclave.id); - continue; - } - LF_PRINT_DEBUG("Clock sync: RTI received T3 message from federate %d.", fed_id_2); - handle_physical_clock_sync_message(GET_FED_INFO(fed_id_2), UDP); - break; - } else { - // The message is not a T3 message. Discard the message and - // continue waiting for the T3 message. This is possibly a message - // from a previous cycle that was discarded. - lf_print_warning( - "Clock sync: Unexpected UDP message %u. Expected %u from federate %d. " - "Discarding message.", - buffer[0], - MSG_TYPE_CLOCK_SYNC_T3, - fed->enclave.id); - continue; - } - } else { - lf_print_warning("Clock sync: Read from UDP socket failed: %s. " - "Skipping clock sync round for federate %d.", - strerror(errno), - fed->enclave.id); - remaining_attempts = -1; - } - } - if (remaining_attempts > 0) { - any_federates_connected = true; +void* clock_synchronization_thread(void* noargs) { + initialize_lf_thread_id(); + // Wait until all federates have been notified of the start time. + // FIXME: Use lf_ version of this when merged with master. + LF_MUTEX_LOCK(&rti_mutex); + while (rti_remote->num_feds_proposed_start < rti_remote->base.number_of_scheduling_nodes) { + lf_cond_wait(&received_start_times); + } + LF_MUTEX_UNLOCK(&rti_mutex); + + // Wait until the start time before starting clock synchronization. + // The above wait ensures that start_time has been set. + interval_t ns_to_wait = start_time - lf_time_physical(); + + if (ns_to_wait > 0LL) { + lf_sleep(ns_to_wait); + } + + // Initiate a clock synchronization every rti->clock_sync_period_ns + // Initiate a clock synchronization every rti->clock_sync_period_ns + struct timespec sleep_time = {(time_t)rti_remote->clock_sync_period_ns / BILLION, + rti_remote->clock_sync_period_ns % BILLION}; + struct timespec remaining_time; + + bool any_federates_connected = true; + while (any_federates_connected) { + // Sleep + lf_sleep(rti_remote->clock_sync_period_ns); // Can be interrupted + any_federates_connected = false; + for (int fed_id = 0; fed_id < rti_remote->base.number_of_scheduling_nodes; fed_id++) { + federate_info_t* fed = GET_FED_INFO(fed_id); + if (fed->enclave.state == NOT_CONNECTED) { + // FIXME: We need better error handling here, but clock sync failure + // should not stop execution. + lf_print_error("Clock sync failed with federate %d. Not connected.", fed_id); + continue; + } else if (!fed->clock_synchronization_enabled) { + continue; + } + // Send the RTI's current physical time to the federate + // Send on UDP. + LF_PRINT_DEBUG("RTI sending T1 message to initiate clock sync round."); + send_physical_clock(MSG_TYPE_CLOCK_SYNC_T1, fed, UDP); + + // Listen for reply message, which should be T3. + size_t message_size = 1 + sizeof(int32_t); + unsigned char buffer[message_size]; + // Maximum number of messages that we discard before giving up on this cycle. + // If the T3 message from this federate does not arrive and we keep receiving + // other messages, then give up on this federate and move to the next federate. + int remaining_attempts = 5; + while (remaining_attempts > 0) { + remaining_attempts--; + int read_failed = read_from_socket(rti_remote->socket_descriptor_UDP, message_size, buffer); + // If any errors occur, either discard the message or the clock sync round. + if (!read_failed) { + if (buffer[0] == MSG_TYPE_CLOCK_SYNC_T3) { + int32_t fed_id_2 = extract_int32(&(buffer[1])); + // Check that this message came from the correct federate. + if (fed_id_2 != fed->enclave.id) { + // Message is from the wrong federate. Discard the message. + lf_print_warning("Clock sync: Received T3 message from federate %d, " + "but expected one from %d. Discarding message.", + fed_id_2, fed->enclave.id); + continue; } + LF_PRINT_DEBUG("Clock sync: RTI received T3 message from federate %d.", fed_id_2); + handle_physical_clock_sync_message(GET_FED_INFO(fed_id_2), UDP); + break; + } else { + // The message is not a T3 message. Discard the message and + // continue waiting for the T3 message. This is possibly a message + // from a previous cycle that was discarded. + lf_print_warning("Clock sync: Unexpected UDP message %u. Expected %u from federate %d. " + "Discarding message.", + buffer[0], MSG_TYPE_CLOCK_SYNC_T3, fed->enclave.id); + continue; + } + } else { + lf_print_warning("Clock sync: Read from UDP socket failed: %s. " + "Skipping clock sync round for federate %d.", + strerror(errno), fed->enclave.id); + remaining_attempts = -1; } + } + if (remaining_attempts > 0) { + any_federates_connected = true; + } } - return NULL; + } + return NULL; } /** @@ -1069,46 +1014,46 @@ void *clock_synchronization_thread(void *noargs) { * * @param my_fed The federate sending a MSG_TYPE_FAILED message. */ -static void handle_federate_failed(federate_info_t *my_fed) { - // Nothing more to do. Close the socket and exit. - LF_MUTEX_LOCK(&rti_mutex); +static void handle_federate_failed(federate_info_t* my_fed) { + // Nothing more to do. Close the socket and exit. + LF_MUTEX_LOCK(&rti_mutex); - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_from_federate(rti_remote->base.trace, receive_FAILED, my_fed->enclave.id, NULL); - } + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_from_federate(receive_FAILED, my_fed->enclave.id, NULL); + } - // Set the flag telling the RTI to exit with an error code when it exits. - _lf_federate_reports_error = true; - lf_print_error("RTI: Federate %d reports an error and has exited.", my_fed->enclave.id); + // Set the flag telling the RTI to exit with an error code when it exits. + _lf_federate_reports_error = true; + lf_print_error("RTI: Federate %d reports an error and has exited.", my_fed->enclave.id); - my_fed->enclave.state = NOT_CONNECTED; + my_fed->enclave.state = NOT_CONNECTED; - // Indicate that there will no further events from this federate. - my_fed->enclave.next_event = FOREVER_TAG; + // Indicate that there will no further events from this federate. + my_fed->enclave.next_event = FOREVER_TAG; - // According to this: https://stackoverflow.com/questions/4160347/close-vs-shutdown-socket, - // the close should happen when receiving a 0 length message from the other end. - // Here, we just signal the other side that no further writes to the socket are - // forthcoming, which should result in the other end getting a zero-length reception. - shutdown(my_fed->socket, SHUT_RDWR); + // According to this: https://stackoverflow.com/questions/4160347/close-vs-shutdown-socket, + // the close should happen when receiving a 0 length message from the other end. + // Here, we just signal the other side that no further writes to the socket are + // forthcoming, which should result in the other end getting a zero-length reception. + shutdown(my_fed->socket, SHUT_RDWR); - // We can now safely close the socket. - close(my_fed->socket); // from unistd.h + // We can now safely close the socket. + close(my_fed->socket); // from unistd.h - // Check downstream federates to see whether they should now be granted a TAG. - // To handle cycles, need to create a boolean array to keep - // track of which upstream federates have been visited. - bool *visited = (bool *)calloc(rti_remote->base.number_of_scheduling_nodes, sizeof(bool)); // Initializes to 0. - notify_downstream_advance_grant_if_safe(&(my_fed->enclave), visited); - free(visited); + // Check downstream federates to see whether they should now be granted a TAG. + // To handle cycles, need to create a boolean array to keep + // track of which upstream federates have been visited. + bool* visited = (bool*)calloc(rti_remote->base.number_of_scheduling_nodes, sizeof(bool)); // Initializes to 0. + notify_downstream_advance_grant_if_safe(&(my_fed->enclave), visited); + free(visited); - LF_MUTEX_UNLOCK(&rti_mutex); + LF_MUTEX_UNLOCK(&rti_mutex); } /** * Handle MSG_TYPE_RESIGN sent by a federate. This message is sent at the time of termination * after all shutdown events are processed on the federate. - * + * * This function assumes the caller does not hold the mutex. * * @note At this point, the RTI might have outgoing messages to the federate. This @@ -1117,135 +1062,138 @@ static void handle_federate_failed(federate_info_t *my_fed) { * * @param my_fed The federate sending a MSG_TYPE_RESIGN message. */ -static void handle_federate_resign(federate_info_t *my_fed) { - // Nothing more to do. Close the socket and exit. - LF_MUTEX_LOCK(&rti_mutex); +static void handle_federate_resign(federate_info_t* my_fed) { + // Nothing more to do. Close the socket and exit. + LF_MUTEX_LOCK(&rti_mutex); - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_from_federate(rti_remote->base.trace, receive_RESIGN, my_fed->enclave.id, NULL); - } + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_from_federate(receive_RESIGN, my_fed->enclave.id, NULL); + } - lf_print("RTI: Federate %d has resigned.", my_fed->enclave.id); + lf_print("RTI: Federate %d has resigned.", my_fed->enclave.id); - my_fed->enclave.state = NOT_CONNECTED; + my_fed->enclave.state = NOT_CONNECTED; - // Indicate that there will no further events from this federate. - my_fed->enclave.next_event = FOREVER_TAG; + // Indicate that there will no further events from this federate. + my_fed->enclave.next_event = FOREVER_TAG; - // According to this: https://stackoverflow.com/questions/4160347/close-vs-shutdown-socket, - // the close should happen when receiving a 0 length message from the other end. - // Here, we just signal the other side that no further writes to the socket are - // forthcoming, which should result in the other end getting a zero-length reception. - shutdown(my_fed->socket, SHUT_WR); + // According to this: https://stackoverflow.com/questions/4160347/close-vs-shutdown-socket, + // the close should happen when receiving a 0 length message from the other end. + // Here, we just signal the other side that no further writes to the socket are + // forthcoming, which should result in the other end getting a zero-length reception. + shutdown(my_fed->socket, SHUT_WR); - // Wait for the federate to send an EOF or a socket error to occur. - // Discard any incoming bytes. Normally, this read should return 0 because - // the federate is resigning and should itself invoke shutdown. - unsigned char buffer[10]; - while (read(my_fed->socket, buffer, 10) > 0); + // Wait for the federate to send an EOF or a socket error to occur. + // Discard any incoming bytes. Normally, this read should return 0 because + // the federate is resigning and should itself invoke shutdown. + unsigned char buffer[10]; + while (read(my_fed->socket, buffer, 10) > 0) + ; - // We can now safely close the socket. - close(my_fed->socket); // from unistd.h + // We can now safely close the socket. + close(my_fed->socket); // from unistd.h - // Check downstream federates to see whether they should now be granted a TAG. - // To handle cycles, need to create a boolean array to keep - // track of which upstream federates have been visited. - bool *visited = (bool *)calloc(rti_remote->base.number_of_scheduling_nodes, sizeof(bool)); // Initializes to 0. - notify_downstream_advance_grant_if_safe(&(my_fed->enclave), visited); - free(visited); + // Check downstream federates to see whether they should now be granted a TAG. + // To handle cycles, need to create a boolean array to keep + // track of which upstream federates have been visited. + bool* visited = (bool*)calloc(rti_remote->base.number_of_scheduling_nodes, sizeof(bool)); // Initializes to 0. + notify_downstream_advance_grant_if_safe(&(my_fed->enclave), visited); + free(visited); - LF_MUTEX_UNLOCK(&rti_mutex); + LF_MUTEX_UNLOCK(&rti_mutex); } -void *federate_info_thread_TCP(void *fed) { - federate_info_t *my_fed = (federate_info_t *)fed; - - // Buffer for incoming messages. - // This does not constrain the message size because messages - // are forwarded piece by piece. - unsigned char buffer[FED_COM_BUFFER_SIZE]; - - // Listen for messages from the federate. - while (my_fed->enclave.state != NOT_CONNECTED) { - // Read no more than one byte to get the message type. - int read_failed = read_from_socket(my_fed->socket, 1, buffer); - if (read_failed) { - // Socket is closed - lf_print_warning("RTI: Socket to federate %d is closed. Exiting the thread.", my_fed->enclave.id); - my_fed->enclave.state = NOT_CONNECTED; - my_fed->socket = -1; - // FIXME: We need better error handling here, but do not stop execution here. - break; - } - LF_PRINT_DEBUG("RTI: Received message type %u from federate %d.", buffer[0], my_fed->enclave.id); - switch (buffer[0]) { - case MSG_TYPE_TIMESTAMP: - handle_timestamp(my_fed); - break; - case MSG_TYPE_ADDRESS_QUERY: - handle_address_query(my_fed->enclave.id); - break; - case MSG_TYPE_ADDRESS_ADVERTISEMENT: - handle_address_ad(my_fed->enclave.id); - break; - case MSG_TYPE_TAGGED_MESSAGE: - handle_timed_message(my_fed, buffer); - break; - case MSG_TYPE_RESIGN: - handle_federate_resign(my_fed); - return NULL; - case MSG_TYPE_NEXT_EVENT_TAG: - handle_next_event_tag(my_fed); - break; - case MSG_TYPE_LATEST_TAG_COMPLETE: - handle_latest_tag_complete(my_fed); - break; - case MSG_TYPE_STOP_REQUEST: - handle_stop_request_message(my_fed); // FIXME: Reviewed until here. - // Need to also look at - // notify_advance_grant_if_safe() - // and notify_downstream_advance_grant_if_safe() - break; - case MSG_TYPE_STOP_REQUEST_REPLY: - handle_stop_request_reply(my_fed); - break; - case MSG_TYPE_PORT_ABSENT: - handle_port_absent_message(my_fed, buffer); - break; - case MSG_TYPE_FAILED: - handle_federate_failed(my_fed); - return NULL; - default: - lf_print_error("RTI received from federate %d an unrecognized TCP message type: %u.", my_fed->enclave.id, buffer[0]); - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_from_federate(rti_remote->base.trace, receive_UNIDENTIFIED, my_fed->enclave.id, NULL); - } - } - } - - // Nothing more to do. Close the socket and exit. - // Prevent multiple threads from closing the same socket at the same time. - LF_MUTEX_LOCK(&rti_mutex); - close(my_fed->socket); // from unistd.h - LF_MUTEX_UNLOCK(&rti_mutex); - return NULL; +void* federate_info_thread_TCP(void* fed) { + initialize_lf_thread_id(); + federate_info_t* my_fed = (federate_info_t*)fed; + + // Buffer for incoming messages. + // This does not constrain the message size because messages + // are forwarded piece by piece. + unsigned char buffer[FED_COM_BUFFER_SIZE]; + + // Listen for messages from the federate. + while (my_fed->enclave.state != NOT_CONNECTED) { + // Read no more than one byte to get the message type. + int read_failed = read_from_socket(my_fed->socket, 1, buffer); + if (read_failed) { + // Socket is closed + lf_print_warning("RTI: Socket to federate %d is closed. Exiting the thread.", my_fed->enclave.id); + my_fed->enclave.state = NOT_CONNECTED; + my_fed->socket = -1; + // FIXME: We need better error handling here, but do not stop execution here. + break; + } + LF_PRINT_DEBUG("RTI: Received message type %u from federate %d.", buffer[0], my_fed->enclave.id); + switch (buffer[0]) { + case MSG_TYPE_TIMESTAMP: + handle_timestamp(my_fed); + break; + case MSG_TYPE_ADDRESS_QUERY: + handle_address_query(my_fed->enclave.id); + break; + case MSG_TYPE_ADDRESS_ADVERTISEMENT: + handle_address_ad(my_fed->enclave.id); + break; + case MSG_TYPE_TAGGED_MESSAGE: + handle_timed_message(my_fed, buffer); + break; + case MSG_TYPE_RESIGN: + handle_federate_resign(my_fed); + return NULL; + case MSG_TYPE_NEXT_EVENT_TAG: + handle_next_event_tag(my_fed); + break; + case MSG_TYPE_LATEST_TAG_COMPLETE: + handle_latest_tag_complete(my_fed); + break; + case MSG_TYPE_STOP_REQUEST: + handle_stop_request_message(my_fed); // FIXME: Reviewed until here. + // Need to also look at + // notify_advance_grant_if_safe() + // and notify_downstream_advance_grant_if_safe() + break; + case MSG_TYPE_STOP_REQUEST_REPLY: + handle_stop_request_reply(my_fed); + break; + case MSG_TYPE_PORT_ABSENT: + handle_port_absent_message(my_fed, buffer); + break; + case MSG_TYPE_FAILED: + handle_federate_failed(my_fed); + return NULL; + default: + lf_print_error("RTI received from federate %d an unrecognized TCP message type: %u.", my_fed->enclave.id, + buffer[0]); + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_from_federate(receive_UNIDENTIFIED, my_fed->enclave.id, NULL); + } + } + } + + // Nothing more to do. Close the socket and exit. + // Prevent multiple threads from closing the same socket at the same time. + LF_MUTEX_LOCK(&rti_mutex); + close(my_fed->socket); // from unistd.h + LF_MUTEX_UNLOCK(&rti_mutex); + return NULL; } -void send_reject(int *socket_id, unsigned char error_code) { - LF_PRINT_DEBUG("RTI sending MSG_TYPE_REJECT."); - unsigned char response[2]; - response[0] = MSG_TYPE_REJECT; - response[1] = error_code; - LF_MUTEX_LOCK(&rti_mutex); - // NOTE: Ignore errors on this response. - if (write_to_socket(*socket_id, 2, response)) { - lf_print_warning("RTI failed to write MSG_TYPE_REJECT message on the socket."); - } - // Close the socket. - shutdown(*socket_id, SHUT_RDWR); - close(*socket_id); - *socket_id = -1; - LF_MUTEX_UNLOCK(&rti_mutex); +void send_reject(int* socket_id, unsigned char error_code) { + LF_PRINT_DEBUG("RTI sending MSG_TYPE_REJECT."); + unsigned char response[2]; + response[0] = MSG_TYPE_REJECT; + response[1] = error_code; + LF_MUTEX_LOCK(&rti_mutex); + // NOTE: Ignore errors on this response. + if (write_to_socket(*socket_id, 2, response)) { + lf_print_warning("RTI failed to write MSG_TYPE_REJECT message on the socket."); + } + // Close the socket. + shutdown(*socket_id, SHUT_RDWR); + close(*socket_id); + *socket_id = -1; + LF_MUTEX_UNLOCK(&rti_mutex); } /** @@ -1257,136 +1205,134 @@ void send_reject(int *socket_id, unsigned char error_code) { * @param client_fd The socket address. * @return The federate ID for success or -1 for failure. */ -static int32_t receive_and_check_fed_id_message(int *socket_id, struct sockaddr_in *client_fd) { - // Buffer for message ID, federate ID, and federation ID length. - size_t length = 1 + sizeof(uint16_t) + 1; // Message ID, federate ID, length of fedration ID. - unsigned char buffer[length]; - - // Read bytes from the socket. We need 4 bytes. - if (read_from_socket_close_on_error(socket_id, length, buffer)) { - lf_print_error("RTI failed to read from accepted socket."); - return -1; - } +static int32_t receive_and_check_fed_id_message(int* socket_id, struct sockaddr_in* client_fd) { + // Buffer for message ID, federate ID, and federation ID length. + size_t length = 1 + sizeof(uint16_t) + 1; // Message ID, federate ID, length of fedration ID. + unsigned char buffer[length]; - uint16_t fed_id = rti_remote->base.number_of_scheduling_nodes; // Initialize to an invalid value. + // Read bytes from the socket. We need 4 bytes. + if (read_from_socket_close_on_error(socket_id, length, buffer)) { + lf_print_error("RTI failed to read from accepted socket."); + return -1; + } - // First byte received is the message type. - if (buffer[0] != MSG_TYPE_FED_IDS) { - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_to_federate(rti_remote->base.trace, send_REJECT, fed_id, NULL); - } - if (buffer[0] == MSG_TYPE_P2P_SENDING_FED_ID || buffer[0] == MSG_TYPE_P2P_TAGGED_MESSAGE) { - // The federate is trying to connect to a peer, not to the RTI. - // It has connected to the RTI instead. - // FIXME: This should not happen, but apparently has been observed. - // It should not happen because the peers get the port and IP address - // of the peer they want to connect to from the RTI. - // If the connection is a peer-to-peer connection between two - // federates, reject the connection with the WRONG_SERVER error. - send_reject(socket_id, WRONG_SERVER); - } else { - send_reject(socket_id, UNEXPECTED_MESSAGE); - } - lf_print_error("RTI expected a MSG_TYPE_FED_IDS message. Got %u (see net_common.h).", buffer[0]); - return -1; + uint16_t fed_id = rti_remote->base.number_of_scheduling_nodes; // Initialize to an invalid value. + + // First byte received is the message type. + if (buffer[0] != MSG_TYPE_FED_IDS) { + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_to_federate(send_REJECT, fed_id, NULL); + } + if (buffer[0] == MSG_TYPE_P2P_SENDING_FED_ID || buffer[0] == MSG_TYPE_P2P_TAGGED_MESSAGE) { + // The federate is trying to connect to a peer, not to the RTI. + // It has connected to the RTI instead. + // FIXME: This should not happen, but apparently has been observed. + // It should not happen because the peers get the port and IP address + // of the peer they want to connect to from the RTI. + // If the connection is a peer-to-peer connection between two + // federates, reject the connection with the WRONG_SERVER error. + send_reject(socket_id, WRONG_SERVER); } else { - // Received federate ID. - fed_id = extract_uint16(buffer + 1); - LF_PRINT_DEBUG("RTI received federate ID: %d.", fed_id); - - // Read the federation ID. First read the length, which is one byte. - size_t federation_id_length = (size_t)buffer[sizeof(uint16_t) + 1]; - char federation_id_received[federation_id_length + 1]; // One extra for null terminator. - // Next read the actual federation ID. - if (read_from_socket_close_on_error(socket_id, federation_id_length, - (unsigned char *)federation_id_received)) { - lf_print_error("RTI failed to read federation id from federate %d.", fed_id); - return -1; - } + send_reject(socket_id, UNEXPECTED_MESSAGE); + } + lf_print_error("RTI expected a MSG_TYPE_FED_IDS message. Got %u (see net_common.h).", buffer[0]); + return -1; + } else { + // Received federate ID. + fed_id = extract_uint16(buffer + 1); + LF_PRINT_DEBUG("RTI received federate ID: %d.", fed_id); + + // Read the federation ID. First read the length, which is one byte. + size_t federation_id_length = (size_t)buffer[sizeof(uint16_t) + 1]; + char federation_id_received[federation_id_length + 1]; // One extra for null terminator. + // Next read the actual federation ID. + if (read_from_socket_close_on_error(socket_id, federation_id_length, (unsigned char*)federation_id_received)) { + lf_print_error("RTI failed to read federation id from federate %d.", fed_id); + return -1; + } - // Terminate the string with a null. - federation_id_received[federation_id_length] = 0; + // Terminate the string with a null. + federation_id_received[federation_id_length] = 0; - LF_PRINT_DEBUG("RTI received federation ID: %s.", federation_id_received); + LF_PRINT_DEBUG("RTI received federation ID: %s.", federation_id_received); + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_from_federate(receive_FED_ID, fed_id, NULL); + } + // Compare the received federation ID to mine. + if (strncmp(rti_remote->federation_id, federation_id_received, federation_id_length) != 0) { + // Federation IDs do not match. Send back a MSG_TYPE_REJECT message. + lf_print_warning("Federate from another federation %s attempted to connect to RTI in federation %s.", + federation_id_received, rti_remote->federation_id); + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_to_federate(send_REJECT, fed_id, NULL); + } + send_reject(socket_id, FEDERATION_ID_DOES_NOT_MATCH); + return -1; + } else { + if (fed_id >= rti_remote->base.number_of_scheduling_nodes) { + // Federate ID is out of range. + lf_print_error("RTI received federate ID %d, which is out of range.", fed_id); if (rti_remote->base.tracing_enabled) { - tracepoint_rti_from_federate(rti_remote->base.trace, receive_FED_ID, fed_id, NULL); + tracepoint_rti_to_federate(send_REJECT, fed_id, NULL); } - // Compare the received federation ID to mine. - if (strncmp(rti_remote->federation_id, federation_id_received, federation_id_length) != 0) { - // Federation IDs do not match. Send back a MSG_TYPE_REJECT message. - lf_print_warning("Federate from another federation %s attempted to connect to RTI in federation %s.", - federation_id_received, - rti_remote->federation_id); - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_to_federate(rti_remote->base.trace, send_REJECT, fed_id, NULL); - } - send_reject(socket_id, FEDERATION_ID_DOES_NOT_MATCH); - return -1; - } else { - if (fed_id >= rti_remote->base.number_of_scheduling_nodes) { - // Federate ID is out of range. - lf_print_error("RTI received federate ID %d, which is out of range.", fed_id); - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_to_federate(rti_remote->base.trace, send_REJECT, fed_id, NULL); - } - send_reject(socket_id, FEDERATE_ID_OUT_OF_RANGE); - return -1; - } else { - if ((rti_remote->base.scheduling_nodes[fed_id])->state != NOT_CONNECTED) { - lf_print_error("RTI received duplicate federate ID: %d.", fed_id); - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_to_federate(rti_remote->base.trace, send_REJECT, fed_id, NULL); - } - send_reject(socket_id, FEDERATE_ID_IN_USE); - return -1; - } - } + send_reject(socket_id, FEDERATE_ID_OUT_OF_RANGE); + return -1; + } else { + if ((rti_remote->base.scheduling_nodes[fed_id])->state != NOT_CONNECTED) { + lf_print_error("RTI received duplicate federate ID: %d.", fed_id); + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_to_federate(send_REJECT, fed_id, NULL); + } + send_reject(socket_id, FEDERATE_ID_IN_USE); + return -1; } - } - federate_info_t *fed = GET_FED_INFO(fed_id); - // The MSG_TYPE_FED_IDS message has the right federation ID. - // Assign the address information for federate. - // The IP address is stored here as an in_addr struct (in .server_ip_addr) that can be useful - // to create sockets and can be efficiently sent over the network. - // First, convert the sockaddr structure into a sockaddr_in that contains an internet address. - struct sockaddr_in *pV4_addr = client_fd; - // Then extract the internet address (which is in IPv4 format) and assign it as the federate's socket server - fed->server_ip_addr = pV4_addr->sin_addr; + } + } + } + federate_info_t* fed = GET_FED_INFO(fed_id); + // The MSG_TYPE_FED_IDS message has the right federation ID. + // Assign the address information for federate. + // The IP address is stored here as an in_addr struct (in .server_ip_addr) that can be useful + // to create sockets and can be efficiently sent over the network. + // First, convert the sockaddr structure into a sockaddr_in that contains an internet address. + struct sockaddr_in* pV4_addr = client_fd; + // Then extract the internet address (which is in IPv4 format) and assign it as the federate's socket server + fed->server_ip_addr = pV4_addr->sin_addr; #if LOG_LEVEL >= LOG_LEVEL_DEBUG - // Create the human readable format and copy that into - // the .server_hostname field of the federate. - char str[INET_ADDRSTRLEN + 1]; - inet_ntop(AF_INET, &fed->server_ip_addr, str, INET_ADDRSTRLEN); - strncpy(fed->server_hostname, str, INET_ADDRSTRLEN); + // Create the human readable format and copy that into + // the .server_hostname field of the federate. + char str[INET_ADDRSTRLEN + 1]; + inet_ntop(AF_INET, &fed->server_ip_addr, str, INET_ADDRSTRLEN); + strncpy(fed->server_hostname, str, INET_ADDRSTRLEN); - LF_PRINT_DEBUG("RTI got address %s from federate %d.", fed->server_hostname, fed_id); + LF_PRINT_DEBUG("RTI got address %s from federate %d.", fed->server_hostname, fed_id); #endif - fed->socket = *socket_id; - - // Set the federate's state as pending - // because it is waiting for the start time to be - // sent by the RTI before beginning its execution. - fed->enclave.state = PENDING; - - LF_PRINT_DEBUG("RTI responding with MSG_TYPE_ACK to federate %d.", fed_id); - // Send an MSG_TYPE_ACK message. - unsigned char ack_message = MSG_TYPE_ACK; - if (rti_remote->base.tracing_enabled) { - tracepoint_rti_to_federate(rti_remote->base.trace, send_ACK, fed_id, NULL); - } - LF_MUTEX_LOCK(&rti_mutex); - if (write_to_socket_close_on_error(&fed->socket, 1, &ack_message)) { - LF_MUTEX_UNLOCK(&rti_mutex); - lf_print_error("RTI failed to write MSG_TYPE_ACK message to federate %d.", fed_id); - return -1; - } + fed->socket = *socket_id; + + // Set the federate's state as pending + // because it is waiting for the start time to be + // sent by the RTI before beginning its execution. + fed->enclave.state = PENDING; + + LF_PRINT_DEBUG("RTI responding with MSG_TYPE_ACK to federate %d.", fed_id); + // Send an MSG_TYPE_ACK message. + unsigned char ack_message = MSG_TYPE_ACK; + if (rti_remote->base.tracing_enabled) { + tracepoint_rti_to_federate(send_ACK, fed_id, NULL); + } + LF_MUTEX_LOCK(&rti_mutex); + if (write_to_socket_close_on_error(&fed->socket, 1, &ack_message)) { LF_MUTEX_UNLOCK(&rti_mutex); + lf_print_error("RTI failed to write MSG_TYPE_ACK message to federate %d.", fed_id); + return -1; + } + LF_MUTEX_UNLOCK(&rti_mutex); - LF_PRINT_DEBUG("RTI sent MSG_TYPE_ACK to federate %d.", fed_id); + LF_PRINT_DEBUG("RTI sent MSG_TYPE_ACK to federate %d.", fed_id); - return (int32_t)fed_id; + return (int32_t)fed_id; } /** @@ -1394,85 +1340,72 @@ static int32_t receive_and_check_fed_id_message(int *socket_id, struct sockaddr_ * out the relevant information in the federate's struct. * @return 1 on success and 0 on failure. */ -static int receive_connection_information(int *socket_id, uint16_t fed_id) { - LF_PRINT_DEBUG("RTI waiting for MSG_TYPE_NEIGHBOR_STRUCTURE from federate %d.", fed_id); - unsigned char connection_info_header[MSG_TYPE_NEIGHBOR_STRUCTURE_HEADER_SIZE]; - read_from_socket_fail_on_error( - socket_id, - MSG_TYPE_NEIGHBOR_STRUCTURE_HEADER_SIZE, - connection_info_header, - NULL, - "RTI failed to read MSG_TYPE_NEIGHBOR_STRUCTURE message header from federate %d.", - fed_id); - - if (connection_info_header[0] != MSG_TYPE_NEIGHBOR_STRUCTURE) { - lf_print_error( - "RTI was expecting a MSG_TYPE_UDP_PORT message from federate %d. Got %u instead. " - "Rejecting federate.", - fed_id, connection_info_header[0]); - send_reject(socket_id, UNEXPECTED_MESSAGE); - return 0; +static int receive_connection_information(int* socket_id, uint16_t fed_id) { + LF_PRINT_DEBUG("RTI waiting for MSG_TYPE_NEIGHBOR_STRUCTURE from federate %d.", fed_id); + unsigned char connection_info_header[MSG_TYPE_NEIGHBOR_STRUCTURE_HEADER_SIZE]; + read_from_socket_fail_on_error(socket_id, MSG_TYPE_NEIGHBOR_STRUCTURE_HEADER_SIZE, connection_info_header, NULL, + "RTI failed to read MSG_TYPE_NEIGHBOR_STRUCTURE message header from federate %d.", + fed_id); + + if (connection_info_header[0] != MSG_TYPE_NEIGHBOR_STRUCTURE) { + lf_print_error("RTI was expecting a MSG_TYPE_UDP_PORT message from federate %d. Got %u instead. " + "Rejecting federate.", + fed_id, connection_info_header[0]); + send_reject(socket_id, UNEXPECTED_MESSAGE); + return 0; + } else { + federate_info_t* fed = GET_FED_INFO(fed_id); + // Read the number of upstream and downstream connections + fed->enclave.num_immediate_upstreams = extract_int32(&(connection_info_header[1])); + fed->enclave.num_immediate_downstreams = extract_int32(&(connection_info_header[1 + sizeof(int32_t)])); + LF_PRINT_DEBUG("RTI got %d upstreams and %d downstreams from federate %d.", fed->enclave.num_immediate_upstreams, + fed->enclave.num_immediate_downstreams, fed_id); + + // Allocate memory for the upstream and downstream pointers + if (fed->enclave.num_immediate_upstreams > 0) { + fed->enclave.immediate_upstreams = (uint16_t*)malloc(sizeof(uint16_t) * fed->enclave.num_immediate_upstreams); + // Allocate memory for the upstream delay pointers + fed->enclave.immediate_upstream_delays = + (interval_t*)malloc(sizeof(interval_t) * fed->enclave.num_immediate_upstreams); } else { - federate_info_t *fed = GET_FED_INFO(fed_id); - // Read the number of upstream and downstream connections - fed->enclave.num_immediate_upstreams = extract_int32(&(connection_info_header[1])); - fed->enclave.num_immediate_downstreams = extract_int32(&(connection_info_header[1 + sizeof(int32_t)])); - LF_PRINT_DEBUG( - "RTI got %d upstreams and %d downstreams from federate %d.", - fed->enclave.num_immediate_upstreams, - fed->enclave.num_immediate_downstreams, - fed_id); - - // Allocate memory for the upstream and downstream pointers - if (fed->enclave.num_immediate_upstreams > 0) { - fed->enclave.immediate_upstreams = (uint16_t *)malloc(sizeof(uint16_t) * fed->enclave.num_immediate_upstreams); - // Allocate memory for the upstream delay pointers - fed->enclave.immediate_upstream_delays = (interval_t *)malloc( - sizeof(interval_t) * fed->enclave.num_immediate_upstreams); - } else { - fed->enclave.immediate_upstreams = (uint16_t *)NULL; - fed->enclave.immediate_upstream_delays = (interval_t *)NULL; - } - if (fed->enclave.num_immediate_downstreams > 0) { - fed->enclave.immediate_downstreams = (uint16_t *)malloc(sizeof(uint16_t) * fed->enclave.num_immediate_downstreams); - } else { - fed->enclave.immediate_downstreams = (uint16_t *)NULL; - } - - size_t connections_info_body_size = ( - (sizeof(uint16_t) + sizeof(int64_t)) * fed->enclave.num_immediate_upstreams) - + (sizeof(uint16_t) * fed->enclave.num_immediate_downstreams); - unsigned char *connections_info_body = NULL; - if (connections_info_body_size > 0) { - connections_info_body = (unsigned char *)malloc(connections_info_body_size); - read_from_socket_fail_on_error( - socket_id, - connections_info_body_size, - connections_info_body, - NULL, - "RTI failed to read MSG_TYPE_NEIGHBOR_STRUCTURE message body from federate %d.", - fed_id); - // Keep track of where we are in the buffer - size_t message_head = 0; - // First, read the info about upstream federates - for (int i = 0; i < fed->enclave.num_immediate_upstreams; i++) { - fed->enclave.immediate_upstreams[i] = extract_uint16(&(connections_info_body[message_head])); - message_head += sizeof(uint16_t); - fed->enclave.immediate_upstream_delays[i] = extract_int64(&(connections_info_body[message_head])); - message_head += sizeof(int64_t); - } - - // Next, read the info about downstream federates - for (int i = 0; i < fed->enclave.num_immediate_downstreams; i++) { - fed->enclave.immediate_downstreams[i] = extract_uint16(&(connections_info_body[message_head])); - message_head += sizeof(uint16_t); - } - - free(connections_info_body); - } + fed->enclave.immediate_upstreams = (uint16_t*)NULL; + fed->enclave.immediate_upstream_delays = (interval_t*)NULL; } - LF_PRINT_DEBUG("RTI received neighbor structure from federate %d.", fed_id); - return 1; + if (fed->enclave.num_immediate_downstreams > 0) { + fed->enclave.immediate_downstreams = (uint16_t*)malloc(sizeof(uint16_t) * fed->enclave.num_immediate_downstreams); + } else { + fed->enclave.immediate_downstreams = (uint16_t*)NULL; + } + + size_t connections_info_body_size = ((sizeof(uint16_t) + sizeof(int64_t)) * fed->enclave.num_immediate_upstreams) + + (sizeof(uint16_t) * fed->enclave.num_immediate_downstreams); + unsigned char* connections_info_body = NULL; + if (connections_info_body_size > 0) { + connections_info_body = (unsigned char*)malloc(connections_info_body_size); + read_from_socket_fail_on_error(socket_id, connections_info_body_size, connections_info_body, NULL, + "RTI failed to read MSG_TYPE_NEIGHBOR_STRUCTURE message body from federate %d.", + fed_id); + // Keep track of where we are in the buffer + size_t message_head = 0; + // First, read the info about upstream federates + for (int i = 0; i < fed->enclave.num_immediate_upstreams; i++) { + fed->enclave.immediate_upstreams[i] = extract_uint16(&(connections_info_body[message_head])); + message_head += sizeof(uint16_t); + fed->enclave.immediate_upstream_delays[i] = extract_int64(&(connections_info_body[message_head])); + message_head += sizeof(int64_t); + } + + // Next, read the info about downstream federates + for (int i = 0; i < fed->enclave.num_immediate_downstreams; i++) { + fed->enclave.immediate_downstreams[i] = extract_uint16(&(connections_info_body[message_head])); + message_head += sizeof(uint16_t); + } + + free(connections_info_body); + } + } + LF_PRINT_DEBUG("RTI received neighbor structure from federate %d.", fed_id); + return 1; } /** @@ -1487,371 +1420,365 @@ static int receive_connection_information(int *socket_id, uint16_t fed_id) { * @param fed_id The federate ID. * @return 1 for success, 0 for failure. */ -static int receive_udp_message_and_set_up_clock_sync(int *socket_id, uint16_t fed_id) { - // Read the MSG_TYPE_UDP_PORT message from the federate regardless of the status of - // clock synchronization. This message will tell the RTI whether the federate - // is doing clock synchronization, and if it is, what port to use for UDP. - LF_PRINT_DEBUG("RTI waiting for MSG_TYPE_UDP_PORT from federate %d.", fed_id); - unsigned char response[1 + sizeof(uint16_t)]; - read_from_socket_fail_on_error(socket_id, 1 + sizeof(uint16_t), response, NULL, - "RTI failed to read MSG_TYPE_UDP_PORT message from federate %d.", fed_id); - if (response[0] != MSG_TYPE_UDP_PORT) { - lf_print_error( - "RTI was expecting a MSG_TYPE_UDP_PORT message from federate %d. Got %u instead. " - "Rejecting federate.", - fed_id, response[0]); - send_reject(socket_id, UNEXPECTED_MESSAGE); - return 0; - } else { - federate_info_t *fed = GET_FED_INFO(fed_id); - if (rti_remote->clock_sync_global_status >= clock_sync_init) { - // If no initial clock sync, no need perform initial clock sync. - uint16_t federate_UDP_port_number = extract_uint16(&(response[1])); - - LF_PRINT_DEBUG("RTI got MSG_TYPE_UDP_PORT %u from federate %d.", federate_UDP_port_number, fed_id); - - // A port number of UINT16_MAX means initial clock sync should not be performed. - if (federate_UDP_port_number != UINT16_MAX) { - // Perform the initialization clock synchronization with the federate. - // Send the required number of messages for clock synchronization - for (int i = 0; i < rti_remote->clock_sync_exchanges_per_interval; i++) { - // Send the RTI's current physical time T1 to the federate. - send_physical_clock(MSG_TYPE_CLOCK_SYNC_T1, fed, TCP); - - // Listen for reply message, which should be T3. - size_t message_size = 1 + sizeof(int32_t); - unsigned char buffer[message_size]; - read_from_socket_fail_on_error(socket_id, message_size, buffer, NULL, - "Socket to federate %d unexpectedly closed.", fed_id); - if (buffer[0] == MSG_TYPE_CLOCK_SYNC_T3) { - int32_t fed_id = extract_int32(&(buffer[1])); - assert(fed_id > -1); - assert(fed_id < 65536); - LF_PRINT_DEBUG("RTI received T3 clock sync message from federate %d.", fed_id); - handle_physical_clock_sync_message(fed, TCP); - } else { - lf_print_error("Unexpected message %u from federate %d.", buffer[0], fed_id); - send_reject(socket_id, UNEXPECTED_MESSAGE); - return 0; - } - } - LF_PRINT_DEBUG("RTI finished initial clock synchronization with federate %d.", fed_id); - } - if (rti_remote->clock_sync_global_status >= clock_sync_on) { - // If no runtime clock sync, no need to set up the UDP port. - if (federate_UDP_port_number > 0) { - // Initialize the UDP_addr field of the federate struct - fed->UDP_addr.sin_family = AF_INET; - fed->UDP_addr.sin_port = htons(federate_UDP_port_number); - fed->UDP_addr.sin_addr = fed->server_ip_addr; - } - } else { - // Disable clock sync after initial round. - fed->clock_synchronization_enabled = false; - } - } else { - // No clock synchronization at all. - LF_PRINT_DEBUG("RTI: No clock synchronization for federate %d.", fed_id); - // Clock synchronization is universally disabled via the clock-sync command-line parameter - // (-c off was passed to the RTI). - // Note that the federates are still going to send a - // MSG_TYPE_UDP_PORT message but with a payload (port) of -1. - fed->clock_synchronization_enabled = false; +static int receive_udp_message_and_set_up_clock_sync(int* socket_id, uint16_t fed_id) { + // Read the MSG_TYPE_UDP_PORT message from the federate regardless of the status of + // clock synchronization. This message will tell the RTI whether the federate + // is doing clock synchronization, and if it is, what port to use for UDP. + LF_PRINT_DEBUG("RTI waiting for MSG_TYPE_UDP_PORT from federate %d.", fed_id); + unsigned char response[1 + sizeof(uint16_t)]; + read_from_socket_fail_on_error(socket_id, 1 + sizeof(uint16_t), response, NULL, + "RTI failed to read MSG_TYPE_UDP_PORT message from federate %d.", fed_id); + if (response[0] != MSG_TYPE_UDP_PORT) { + lf_print_error("RTI was expecting a MSG_TYPE_UDP_PORT message from federate %d. Got %u instead. " + "Rejecting federate.", + fed_id, response[0]); + send_reject(socket_id, UNEXPECTED_MESSAGE); + return 0; + } else { + federate_info_t* fed = GET_FED_INFO(fed_id); + if (rti_remote->clock_sync_global_status >= clock_sync_init) { + // If no initial clock sync, no need perform initial clock sync. + uint16_t federate_UDP_port_number = extract_uint16(&(response[1])); + + LF_PRINT_DEBUG("RTI got MSG_TYPE_UDP_PORT %u from federate %d.", federate_UDP_port_number, fed_id); + + // A port number of UINT16_MAX means initial clock sync should not be performed. + if (federate_UDP_port_number != UINT16_MAX) { + // Perform the initialization clock synchronization with the federate. + // Send the required number of messages for clock synchronization + for (int i = 0; i < rti_remote->clock_sync_exchanges_per_interval; i++) { + // Send the RTI's current physical time T1 to the federate. + send_physical_clock(MSG_TYPE_CLOCK_SYNC_T1, fed, TCP); + + // Listen for reply message, which should be T3. + size_t message_size = 1 + sizeof(int32_t); + unsigned char buffer[message_size]; + read_from_socket_fail_on_error(socket_id, message_size, buffer, NULL, + "Socket to federate %d unexpectedly closed.", fed_id); + if (buffer[0] == MSG_TYPE_CLOCK_SYNC_T3) { + int32_t fed_id = extract_int32(&(buffer[1])); + assert(fed_id > -1); + assert(fed_id < 65536); + LF_PRINT_DEBUG("RTI received T3 clock sync message from federate %d.", fed_id); + handle_physical_clock_sync_message(fed, TCP); + } else { + lf_print_error("Unexpected message %u from federate %d.", buffer[0], fed_id); + send_reject(socket_id, UNEXPECTED_MESSAGE); + return 0; + } } - } - return 1; + LF_PRINT_DEBUG("RTI finished initial clock synchronization with federate %d.", fed_id); + } + if (rti_remote->clock_sync_global_status >= clock_sync_on) { + // If no runtime clock sync, no need to set up the UDP port. + if (federate_UDP_port_number > 0) { + // Initialize the UDP_addr field of the federate struct + fed->UDP_addr.sin_family = AF_INET; + fed->UDP_addr.sin_port = htons(federate_UDP_port_number); + fed->UDP_addr.sin_addr = fed->server_ip_addr; + } + } else { + // Disable clock sync after initial round. + fed->clock_synchronization_enabled = false; + } + } else { + // No clock synchronization at all. + LF_PRINT_DEBUG("RTI: No clock synchronization for federate %d.", fed_id); + // Clock synchronization is universally disabled via the clock-sync command-line parameter + // (-c off was passed to the RTI). + // Note that the federates are still going to send a + // MSG_TYPE_UDP_PORT message but with a payload (port) of -1. + fed->clock_synchronization_enabled = false; + } + } + return 1; } #ifdef __RTI_AUTH__ /** * Authenticate incoming federate by performing HMAC-based authentication. - * + * * @param socket Socket for the incoming federate tryting to authenticate. * @return True if authentication is successful and false otherwise. */ -static bool authenticate_federate(int *socket) { - // Wait for MSG_TYPE_FED_NONCE from federate. - size_t fed_id_length = sizeof(uint16_t); - unsigned char buffer[1 + fed_id_length + NONCE_LENGTH]; - read_from_socket_fail_on_error(socket, 1 + fed_id_length + NONCE_LENGTH, buffer, NULL, - "Failed to read MSG_TYPE_FED_NONCE"); - if (buffer[0] != MSG_TYPE_FED_NONCE) { - lf_print_error_and_exit( - "Received unexpected response %u from the FED (see net_common.h).", - buffer[0]); - } - unsigned int hmac_length = SHA256_HMAC_LENGTH; - size_t federation_id_length = strnlen(rti_remote->federation_id, 255); - // HMAC tag is created with MSG_TYPE, federate ID, received federate nonce. - unsigned char mac_buf[1 + fed_id_length + NONCE_LENGTH]; - mac_buf[0] = MSG_TYPE_RTI_RESPONSE; - memcpy(&mac_buf[1], &buffer[1], fed_id_length); - memcpy(&mac_buf[1 + fed_id_length], &buffer[1 + fed_id_length], NONCE_LENGTH); - unsigned char hmac_tag[hmac_length]; - unsigned char *ret = HMAC(EVP_sha256(), rti_remote->federation_id, - federation_id_length, mac_buf, 1 + fed_id_length + NONCE_LENGTH, - hmac_tag, &hmac_length); - if (ret == NULL) { - lf_print_error_and_exit("HMAC construction failed for MSG_TYPE_RTI_RESPONSE."); - } - // Make buffer for message type, RTI's nonce, and HMAC tag. - unsigned char sender[1 + NONCE_LENGTH + hmac_length]; - sender[0] = MSG_TYPE_RTI_RESPONSE; - unsigned char rti_nonce[NONCE_LENGTH]; - RAND_bytes(rti_nonce, NONCE_LENGTH); - memcpy(&sender[1], rti_nonce, NONCE_LENGTH); - memcpy(&sender[1 + NONCE_LENGTH], hmac_tag, hmac_length); - if (write_to_socket(*socket, 1 + NONCE_LENGTH + hmac_length, sender)) { - lf_print_error("Failed to send nonce to federate."); - } - - // Wait for MSG_TYPE_FED_RESPONSE - unsigned char received[1 + hmac_length]; - read_from_socket_fail_on_error(socket, 1 + hmac_length, received, NULL, - "Failed to read federate response."); - if (received[0] != MSG_TYPE_FED_RESPONSE) { - lf_print_error_and_exit( - "Received unexpected response %u from the federate (see net_common.h).", - received[0]); - return false; - } - // HMAC tag is created with MSG_TYPE_FED_RESPONSE and RTI's nonce. - unsigned char mac_buf2[1 + NONCE_LENGTH]; - mac_buf2[0] = MSG_TYPE_FED_RESPONSE; - memcpy(&mac_buf2[1], rti_nonce, NONCE_LENGTH); - unsigned char rti_tag[hmac_length]; - ret = HMAC(EVP_sha256(), rti_remote->federation_id, federation_id_length, - mac_buf2, 1 + NONCE_LENGTH, rti_tag, &hmac_length); - if (ret == NULL) { - lf_print_error_and_exit("HMAC construction failed for MSG_TYPE_FED_RESPONSE."); - } - // Compare received tag and created tag. - if (memcmp(&received[1], rti_tag, hmac_length) != 0) { - // Federation IDs do not match. Send back a HMAC_DOES_NOT_MATCH message. - lf_print_warning("HMAC authentication failed. Rejecting the federate."); - send_reject(socket, HMAC_DOES_NOT_MATCH); - return false; - } else { - LF_PRINT_LOG("Federate's HMAC verified."); - return true; - } +static bool authenticate_federate(int* socket) { + // Wait for MSG_TYPE_FED_NONCE from federate. + size_t fed_id_length = sizeof(uint16_t); + unsigned char buffer[1 + fed_id_length + NONCE_LENGTH]; + read_from_socket_fail_on_error(socket, 1 + fed_id_length + NONCE_LENGTH, buffer, NULL, + "Failed to read MSG_TYPE_FED_NONCE"); + if (buffer[0] != MSG_TYPE_FED_NONCE) { + lf_print_error_and_exit("Received unexpected response %u from the FED (see net_common.h).", buffer[0]); + } + unsigned int hmac_length = SHA256_HMAC_LENGTH; + size_t federation_id_length = strnlen(rti_remote->federation_id, 255); + // HMAC tag is created with MSG_TYPE, federate ID, received federate nonce. + unsigned char mac_buf[1 + fed_id_length + NONCE_LENGTH]; + mac_buf[0] = MSG_TYPE_RTI_RESPONSE; + memcpy(&mac_buf[1], &buffer[1], fed_id_length); + memcpy(&mac_buf[1 + fed_id_length], &buffer[1 + fed_id_length], NONCE_LENGTH); + unsigned char hmac_tag[hmac_length]; + unsigned char* ret = HMAC(EVP_sha256(), rti_remote->federation_id, federation_id_length, mac_buf, + 1 + fed_id_length + NONCE_LENGTH, hmac_tag, &hmac_length); + if (ret == NULL) { + lf_print_error_and_exit("HMAC construction failed for MSG_TYPE_RTI_RESPONSE."); + } + // Make buffer for message type, RTI's nonce, and HMAC tag. + unsigned char sender[1 + NONCE_LENGTH + hmac_length]; + sender[0] = MSG_TYPE_RTI_RESPONSE; + unsigned char rti_nonce[NONCE_LENGTH]; + RAND_bytes(rti_nonce, NONCE_LENGTH); + memcpy(&sender[1], rti_nonce, NONCE_LENGTH); + memcpy(&sender[1 + NONCE_LENGTH], hmac_tag, hmac_length); + if (write_to_socket(*socket, 1 + NONCE_LENGTH + hmac_length, sender)) { + lf_print_error("Failed to send nonce to federate."); + } + + // Wait for MSG_TYPE_FED_RESPONSE + unsigned char received[1 + hmac_length]; + read_from_socket_fail_on_error(socket, 1 + hmac_length, received, NULL, "Failed to read federate response."); + if (received[0] != MSG_TYPE_FED_RESPONSE) { + lf_print_error_and_exit("Received unexpected response %u from the federate (see net_common.h).", received[0]); + return false; + } + // HMAC tag is created with MSG_TYPE_FED_RESPONSE and RTI's nonce. + unsigned char mac_buf2[1 + NONCE_LENGTH]; + mac_buf2[0] = MSG_TYPE_FED_RESPONSE; + memcpy(&mac_buf2[1], rti_nonce, NONCE_LENGTH); + unsigned char rti_tag[hmac_length]; + ret = HMAC(EVP_sha256(), rti_remote->federation_id, federation_id_length, mac_buf2, 1 + NONCE_LENGTH, rti_tag, + &hmac_length); + if (ret == NULL) { + lf_print_error_and_exit("HMAC construction failed for MSG_TYPE_FED_RESPONSE."); + } + // Compare received tag and created tag. + if (memcmp(&received[1], rti_tag, hmac_length) != 0) { + // Federation IDs do not match. Send back a HMAC_DOES_NOT_MATCH message. + lf_print_warning("HMAC authentication failed. Rejecting the federate."); + send_reject(socket, HMAC_DOES_NOT_MATCH); + return false; + } else { + LF_PRINT_LOG("Federate's HMAC verified."); + return true; + } } #endif void lf_connect_to_federates(int socket_descriptor) { - for (int i = 0; i < rti_remote->base.number_of_scheduling_nodes; i++) { - // Wait for an incoming connection request. - struct sockaddr client_fd; - uint32_t client_length = sizeof(client_fd); - // The following blocks until a federate connects. - int socket_id = -1; - while (1) { - socket_id = accept(rti_remote->socket_descriptor_TCP, &client_fd, &client_length); - if (socket_id >= 0) { - // Got a socket - break; - } else if (socket_id < 0 && (errno != EAGAIN || errno != EWOULDBLOCK)) { - lf_print_error_system_failure("RTI failed to accept the socket."); - } else { - // Try again - lf_print_warning("RTI failed to accept the socket. %s. Trying again.", strerror(errno)); - continue; - } - } + for (int i = 0; i < rti_remote->base.number_of_scheduling_nodes; i++) { + // Wait for an incoming connection request. + struct sockaddr client_fd; + uint32_t client_length = sizeof(client_fd); + // The following blocks until a federate connects. + int socket_id = -1; + while (1) { + socket_id = accept(rti_remote->socket_descriptor_TCP, &client_fd, &client_length); + if (socket_id >= 0) { + // Got a socket + break; + } else if (socket_id < 0 && (errno != EAGAIN || errno != EWOULDBLOCK)) { + lf_print_error_system_failure("RTI failed to accept the socket."); + } else { + // Try again + lf_print_warning("RTI failed to accept the socket. %s. Trying again.", strerror(errno)); + continue; + } + } // Wait for the first message from the federate when RTI -a option is on. #ifdef __RTI_AUTH__ - if (rti_remote->authentication_enabled) { - if (!authenticate_federate(&socket_id)) { - lf_print_warning("RTI failed to authenticate the incoming federate."); - // Close the socket. - shutdown(socket_id, SHUT_RDWR); - close(socket_id); - socket_id = -1; - // Ignore the federate that failed authentication. - i--; - continue; - } - } + if (rti_remote->authentication_enabled) { + if (!authenticate_federate(&socket_id)) { + lf_print_warning("RTI failed to authenticate the incoming federate."); + // Close the socket. + shutdown(socket_id, SHUT_RDWR); + close(socket_id); + socket_id = -1; + // Ignore the federate that failed authentication. + i--; + continue; + } + } #endif - // The first message from the federate should contain its ID and the federation ID. - int32_t fed_id = receive_and_check_fed_id_message(&socket_id, (struct sockaddr_in *)&client_fd); - if (fed_id >= 0 && socket_id >= 0 - && receive_connection_information(&socket_id, (uint16_t)fed_id) - && receive_udp_message_and_set_up_clock_sync(&socket_id, (uint16_t)fed_id)) { - - // Create a thread to communicate with the federate. - // This has to be done after clock synchronization is finished - // or that thread may end up attempting to handle incoming clock - // synchronization messages. - federate_info_t *fed = GET_FED_INFO(fed_id); - lf_thread_create(&(fed->thread_id), federate_info_thread_TCP, fed); - } else { - // Received message was rejected. Try again. - i--; - } + // The first message from the federate should contain its ID and the federation ID. + int32_t fed_id = receive_and_check_fed_id_message(&socket_id, (struct sockaddr_in*)&client_fd); + if (fed_id >= 0 && socket_id >= 0 && receive_connection_information(&socket_id, (uint16_t)fed_id) && + receive_udp_message_and_set_up_clock_sync(&socket_id, (uint16_t)fed_id)) { + + // Create a thread to communicate with the federate. + // This has to be done after clock synchronization is finished + // or that thread may end up attempting to handle incoming clock + // synchronization messages. + federate_info_t* fed = GET_FED_INFO(fed_id); + lf_thread_create(&(fed->thread_id), federate_info_thread_TCP, fed); + } else { + // Received message was rejected. Try again. + i--; + } + } + // All federates have connected. + LF_PRINT_DEBUG("All federates have connected to RTI."); + + if (rti_remote->clock_sync_global_status >= clock_sync_on) { + // Create the thread that performs periodic PTP clock synchronization sessions + // over the UDP channel, but only if the UDP channel is open and at least one + // federate is performing runtime clock synchronization. + bool clock_sync_enabled = false; + for (int i = 0; i < rti_remote->base.number_of_scheduling_nodes; i++) { + federate_info_t* fed_info = GET_FED_INFO(i); + if (fed_info->clock_synchronization_enabled) { + clock_sync_enabled = true; + break; + } } - // All federates have connected. - LF_PRINT_DEBUG("All federates have connected to RTI."); - - if (rti_remote->clock_sync_global_status >= clock_sync_on) { - // Create the thread that performs periodic PTP clock synchronization sessions - // over the UDP channel, but only if the UDP channel is open and at least one - // federate is performing runtime clock synchronization. - bool clock_sync_enabled = false; - for (int i = 0; i < rti_remote->base.number_of_scheduling_nodes; i++) { - federate_info_t *fed_info = GET_FED_INFO(i); - if (fed_info->clock_synchronization_enabled) { - clock_sync_enabled = true; - break; - } - } - if (rti_remote->final_port_UDP != UINT16_MAX && clock_sync_enabled) { - lf_thread_create(&rti_remote->clock_thread, clock_synchronization_thread, NULL); - } + if (rti_remote->final_port_UDP != UINT16_MAX && clock_sync_enabled) { + lf_thread_create(&rti_remote->clock_thread, clock_synchronization_thread, NULL); } + } } -void *respond_to_erroneous_connections(void *nothing) { - while (true) { - // Wait for an incoming connection request. - struct sockaddr client_fd; - uint32_t client_length = sizeof(client_fd); - // The following will block until either a federate attempts to connect - // or close(rti->socket_descriptor_TCP) is called. - int socket_id = accept(rti_remote->socket_descriptor_TCP, &client_fd, &client_length); - if (socket_id < 0) return NULL; - - if (rti_remote->all_federates_exited) { - return NULL; - } +void* respond_to_erroneous_connections(void* nothing) { + initialize_lf_thread_id(); + while (true) { + // Wait for an incoming connection request. + struct sockaddr client_fd; + uint32_t client_length = sizeof(client_fd); + // The following will block until either a federate attempts to connect + // or close(rti->socket_descriptor_TCP) is called. + int socket_id = accept(rti_remote->socket_descriptor_TCP, &client_fd, &client_length); + if (socket_id < 0) + return NULL; - lf_print_error("RTI received an unexpected connection request. Federation is running."); - unsigned char response[2]; - response[0] = MSG_TYPE_REJECT; - response[1] = FEDERATION_ID_DOES_NOT_MATCH; - // Ignore errors on this response. - if (write_to_socket(socket_id, 2, response)) { - lf_print_warning("RTI failed to write FEDERATION_ID_DOES_NOT_MATCH to erroneous incoming connection."); - } - // Close the socket. - shutdown(socket_id, SHUT_RDWR); - close(socket_id); + if (rti_remote->all_federates_exited) { + return NULL; + } + + lf_print_error("RTI received an unexpected connection request. Federation is running."); + unsigned char response[2]; + response[0] = MSG_TYPE_REJECT; + response[1] = FEDERATION_ID_DOES_NOT_MATCH; + // Ignore errors on this response. + if (write_to_socket(socket_id, 2, response)) { + lf_print_warning("RTI failed to write FEDERATION_ID_DOES_NOT_MATCH to erroneous incoming connection."); } - return NULL; + // Close the socket. + shutdown(socket_id, SHUT_RDWR); + close(socket_id); + } + return NULL; } -void initialize_federate(federate_info_t *fed, uint16_t id) { - initialize_scheduling_node(&(fed->enclave), id); - fed->requested_stop = false; - fed->socket = -1; // No socket. - fed->clock_synchronization_enabled = true; - fed->in_transit_message_tags = pqueue_tag_init(10); - strncpy(fed->server_hostname, "localhost", INET_ADDRSTRLEN); - fed->server_ip_addr.s_addr = 0; - fed->server_port = -1; +void initialize_federate(federate_info_t* fed, uint16_t id) { + initialize_scheduling_node(&(fed->enclave), id); + fed->requested_stop = false; + fed->socket = -1; // No socket. + fed->clock_synchronization_enabled = true; + fed->in_transit_message_tags = pqueue_tag_init(10); + strncpy(fed->server_hostname, "localhost", INET_ADDRSTRLEN); + fed->server_ip_addr.s_addr = 0; + fed->server_port = -1; } int32_t start_rti_server(uint16_t port) { - _lf_initialize_clock(); - // Create the TCP socket server - rti_remote->socket_descriptor_TCP = create_rti_server(port, TCP); - lf_print("RTI: Listening for federates."); - // Create the UDP socket server - // Try to get the rti_remote->final_port_TCP + 1 port - if (rti_remote->clock_sync_global_status >= clock_sync_on) { - rti_remote->socket_descriptor_UDP = create_rti_server(rti_remote->final_port_TCP + 1, UDP); - } - return rti_remote->socket_descriptor_TCP; + _lf_initialize_clock(); + // Create the TCP socket server + rti_remote->socket_descriptor_TCP = create_rti_server(port, TCP); + lf_print("RTI: Listening for federates."); + // Create the UDP socket server + // Try to get the rti_remote->final_port_TCP + 1 port + if (rti_remote->clock_sync_global_status >= clock_sync_on) { + rti_remote->socket_descriptor_UDP = create_rti_server(rti_remote->final_port_TCP + 1, UDP); + } + return rti_remote->socket_descriptor_TCP; } void wait_for_federates(int socket_descriptor) { - // Wait for connections from federates and create a thread for each. - lf_connect_to_federates(socket_descriptor); - - // All federates have connected. - lf_print("RTI: All expected federates have connected. Starting execution."); - - // The socket server will not continue to accept connections after all the federates - // have joined. - // In case some other federation's federates are trying to join the wrong - // federation, need to respond. Start a separate thread to do that. - lf_thread_t responder_thread; - lf_thread_create(&responder_thread, respond_to_erroneous_connections, NULL); - - // Wait for federate threads to exit. - void *thread_exit_status; - for (int i = 0; i < rti_remote->base.number_of_scheduling_nodes; i++) { - federate_info_t *fed = GET_FED_INFO(i); - lf_print("RTI: Waiting for thread handling federate %d.", fed->enclave.id); - lf_thread_join(fed->thread_id, &thread_exit_status); - pqueue_tag_free(fed->in_transit_message_tags); - lf_print("RTI: Federate %d thread exited.", fed->enclave.id); - } - - rti_remote->all_federates_exited = true; - - // Shutdown and close the socket that is listening for incoming connections - // so that the accept() call in respond_to_erroneous_connections returns. - // That thread should then check rti->all_federates_exited and it should exit. - if (shutdown(socket_descriptor, SHUT_RDWR)) { - LF_PRINT_LOG("On shut down TCP socket, received reply: %s", strerror(errno)); - } - // NOTE: In all common TCP/IP stacks, there is a time period, - // typically between 30 and 120 seconds, called the TIME_WAIT period, - // before the port is released after this close. This is because - // the OS is preventing another program from accidentally receiving - // duplicated packets intended for this program. - close(socket_descriptor); - - if (rti_remote->socket_descriptor_UDP > 0) { - if (shutdown(rti_remote->socket_descriptor_UDP, SHUT_RDWR)) { - LF_PRINT_LOG("On shut down UDP socket, received reply: %s", strerror(errno)); - } - close(rti_remote->socket_descriptor_UDP); - } + // Wait for connections from federates and create a thread for each. + lf_connect_to_federates(socket_descriptor); + + // All federates have connected. + lf_print("RTI: All expected federates have connected. Starting execution."); + + // The socket server will not continue to accept connections after all the federates + // have joined. + // In case some other federation's federates are trying to join the wrong + // federation, need to respond. Start a separate thread to do that. + lf_thread_t responder_thread; + lf_thread_create(&responder_thread, respond_to_erroneous_connections, NULL); + + // Wait for federate threads to exit. + void* thread_exit_status; + for (int i = 0; i < rti_remote->base.number_of_scheduling_nodes; i++) { + federate_info_t* fed = GET_FED_INFO(i); + lf_print("RTI: Waiting for thread handling federate %d.", fed->enclave.id); + lf_thread_join(fed->thread_id, &thread_exit_status); + pqueue_tag_free(fed->in_transit_message_tags); + lf_print("RTI: Federate %d thread exited.", fed->enclave.id); + } + + rti_remote->all_federates_exited = true; + + // Shutdown and close the socket that is listening for incoming connections + // so that the accept() call in respond_to_erroneous_connections returns. + // That thread should then check rti->all_federates_exited and it should exit. + if (shutdown(socket_descriptor, SHUT_RDWR)) { + LF_PRINT_LOG("On shut down TCP socket, received reply: %s", strerror(errno)); + } + // NOTE: In all common TCP/IP stacks, there is a time period, + // typically between 30 and 120 seconds, called the TIME_WAIT period, + // before the port is released after this close. This is because + // the OS is preventing another program from accidentally receiving + // duplicated packets intended for this program. + close(socket_descriptor); + + if (rti_remote->socket_descriptor_UDP > 0) { + if (shutdown(rti_remote->socket_descriptor_UDP, SHUT_RDWR)) { + LF_PRINT_LOG("On shut down UDP socket, received reply: %s", strerror(errno)); + } + close(rti_remote->socket_descriptor_UDP); + } } -void initialize_RTI(rti_remote_t *rti) { - rti_remote = rti; - - // Initialize thread synchronization primitives - LF_MUTEX_INIT(&rti_mutex); - LF_COND_INIT(&received_start_times, &rti_mutex); - LF_COND_INIT(&sent_start_time, &rti_mutex); - - initialize_rti_common(&rti_remote->base); - rti_remote->base.mutex = &rti_mutex; - - // federation_rti related initializations - rti_remote->max_start_time = 0LL; - rti_remote->num_feds_proposed_start = 0; - rti_remote->all_federates_exited = false; - rti_remote->federation_id = "Unidentified Federation"; - rti_remote->user_specified_port = 0; - rti_remote->final_port_TCP = 0; - rti_remote->socket_descriptor_TCP = -1; - rti_remote->final_port_UDP = UINT16_MAX; - rti_remote->socket_descriptor_UDP = -1; - rti_remote->clock_sync_global_status = clock_sync_init; - rti_remote->clock_sync_period_ns = MSEC(10); - rti_remote->clock_sync_exchanges_per_interval = 10; - rti_remote->authentication_enabled = false; - rti_remote->base.tracing_enabled = false; - rti_remote->stop_in_progress = false; +void initialize_RTI(rti_remote_t* rti) { + rti_remote = rti; + + // Initialize thread synchronization primitives + LF_MUTEX_INIT(&rti_mutex); + LF_COND_INIT(&received_start_times, &rti_mutex); + LF_COND_INIT(&sent_start_time, &rti_mutex); + + initialize_rti_common(&rti_remote->base); + rti_remote->base.mutex = &rti_mutex; + + // federation_rti related initializations + rti_remote->max_start_time = 0LL; + rti_remote->num_feds_proposed_start = 0; + rti_remote->all_federates_exited = false; + rti_remote->federation_id = "Unidentified Federation"; + rti_remote->user_specified_port = 0; + rti_remote->final_port_TCP = 0; + rti_remote->socket_descriptor_TCP = -1; + rti_remote->final_port_UDP = UINT16_MAX; + rti_remote->socket_descriptor_UDP = -1; + rti_remote->clock_sync_global_status = clock_sync_init; + rti_remote->clock_sync_period_ns = MSEC(10); + rti_remote->clock_sync_exchanges_per_interval = 10; + rti_remote->authentication_enabled = false; + rti_remote->base.tracing_enabled = false; + rti_remote->stop_in_progress = false; } -void free_scheduling_nodes(scheduling_node_t **scheduling_nodes, uint16_t number_of_scheduling_nodes) { - for (uint16_t i = 0; i < number_of_scheduling_nodes; i++) { - // FIXME: Gives error freeing memory not allocated!!!! - scheduling_node_t *node = scheduling_nodes[i]; - if (node->immediate_upstreams != NULL) - free(node->immediate_upstreams); - if (node->immediate_downstreams != NULL) - free(node->immediate_downstreams); - } - free(scheduling_nodes); +void free_scheduling_nodes(scheduling_node_t** scheduling_nodes, uint16_t number_of_scheduling_nodes) { + for (uint16_t i = 0; i < number_of_scheduling_nodes; i++) { + // FIXME: Gives error freeing memory not allocated!!!! + scheduling_node_t* node = scheduling_nodes[i]; + if (node->immediate_upstreams != NULL) + free(node->immediate_upstreams); + if (node->immediate_downstreams != NULL) + free(node->immediate_downstreams); + } + free(scheduling_nodes); } #endif // STANDALONE_RTI diff --git a/core/federated/RTI/rti_remote.h b/core/federated/RTI/rti_remote.h index 9303da42d..a83179f62 100644 --- a/core/federated/RTI/rti_remote.h +++ b/core/federated/RTI/rti_remote.h @@ -38,10 +38,7 @@ ///////////////////////////////////////////// //// Data structures -typedef enum socket_type_t { - TCP, - UDP -} socket_type_t; +typedef enum socket_type_t { TCP, UDP } socket_type_t; /** * Information about a federate known to the RTI, including its runtime state, @@ -52,37 +49,31 @@ typedef enum socket_type_t { * any scheduling constraints. */ typedef struct federate_info_t { - scheduling_node_t enclave; - bool requested_stop; // Indicates that the federate has requested stop or has replied - // to a request for stop from the RTI. Used to prevent double-counting - // a federate when handling lf_request_stop(). - lf_thread_t thread_id; // The ID of the thread handling communication with this federate. - int socket; // The TCP socket descriptor for communicating with this federate. - struct sockaddr_in UDP_addr; // The UDP address for the federate. - bool clock_synchronization_enabled; // Indicates the status of clock synchronization - // for this federate. Enabled by default. - pqueue_tag_t* in_transit_message_tags; // Record of in-transit messages to this federate that are not - // yet processed. This record is ordered based on the time - // value of each message for a more efficient access. - char server_hostname[INET_ADDRSTRLEN]; // Human-readable IP address and - int32_t server_port; // port number of the socket server of the federate - // if it has any incoming direct connections from other federates. - // The port number will be -1 if there is no server or if the - // RTI has not been informed of the port number. - struct in_addr server_ip_addr; // Information about the IP address of the socket - // server of the federate. + scheduling_node_t enclave; + bool requested_stop; // Indicates that the federate has requested stop or has replied + // to a request for stop from the RTI. Used to prevent double-counting + // a federate when handling lf_request_stop(). + lf_thread_t thread_id; // The ID of the thread handling communication with this federate. + int socket; // The TCP socket descriptor for communicating with this federate. + struct sockaddr_in UDP_addr; // The UDP address for the federate. + bool clock_synchronization_enabled; // Indicates the status of clock synchronization + // for this federate. Enabled by default. + pqueue_tag_t* in_transit_message_tags; // Record of in-transit messages to this federate that are not + // yet processed. This record is ordered based on the time + // value of each message for a more efficient access. + char server_hostname[INET_ADDRSTRLEN]; // Human-readable IP address and + int32_t server_port; // port number of the socket server of the federate + // if it has any incoming direct connections from other federates. + // The port number will be -1 if there is no server or if the + // RTI has not been informed of the port number. + struct in_addr server_ip_addr; // Information about the IP address of the socket + // server of the federate. } federate_info_t; - - /** * The status of clock synchronization. */ -typedef enum clock_sync_stat { - clock_sync_off, - clock_sync_init, - clock_sync_on -} clock_sync_stat; +typedef enum clock_sync_stat { clock_sync_off, clock_sync_init, clock_sync_on } clock_sync_stat; /** * Structure that an RTI instance uses to keep track of its own and its @@ -98,75 +89,75 @@ typedef enum clock_sync_stat { * // ************************************************** */ typedef struct rti_remote_t { - rti_common_t base; - // Maximum start time seen so far from the federates. - int64_t max_start_time; - - // Number of federates that have proposed start times. - int num_feds_proposed_start; - - /** - * Boolean indicating that all federates have exited. - * This gets set to true exactly once before the program exits. - * It is marked volatile because the write is not guarded by a mutex. - * The main thread makes this true, then calls shutdown and close on - * the socket, which will cause accept() to return with an error code - * in respond_to_erroneous_connections(). - */ - volatile bool all_federates_exited; - - /** - * The ID of the federation that this RTI will supervise. - * This should be overridden with a command-line -i option to ensure - * that each federate only joins its assigned federation. - */ - const char* federation_id; - - /************* TCP server information *************/ - /** The desired port specified by the user on the command line. */ - uint16_t user_specified_port; - - /** The final port number that the TCP socket server ends up using. */ - uint16_t final_port_TCP; - - /** The TCP socket descriptor for the socket server. */ - int socket_descriptor_TCP; - - /************* UDP server information *************/ - /** The final port number that the UDP socket server ends up using. */ - uint16_t final_port_UDP; - - /** The UDP socket descriptor for the socket server. */ - int socket_descriptor_UDP; - - /************* Clock synchronization information *************/ - /* Thread performing PTP clock sync sessions periodically. */ - lf_thread_t clock_thread; - - /** - * Indicates whether clock sync is globally on for the federation. Federates - * can still selectively disable clock synchronization if they wanted to. - */ - clock_sync_stat clock_sync_global_status; - - /** - * Frequency (period in nanoseconds) between clock sync attempts. - */ - uint64_t clock_sync_period_ns; - - /** - * Number of messages exchanged for each clock sync attempt. - */ - int32_t clock_sync_exchanges_per_interval; - - /** - * Boolean indicating that authentication is enabled. - */ - bool authentication_enabled; - /** - * Boolean indicating that a stop request is already in progress. - */ - bool stop_in_progress; + rti_common_t base; + // Maximum start time seen so far from the federates. + int64_t max_start_time; + + // Number of federates that have proposed start times. + int num_feds_proposed_start; + + /** + * Boolean indicating that all federates have exited. + * This gets set to true exactly once before the program exits. + * It is marked volatile because the write is not guarded by a mutex. + * The main thread makes this true, then calls shutdown and close on + * the socket, which will cause accept() to return with an error code + * in respond_to_erroneous_connections(). + */ + volatile bool all_federates_exited; + + /** + * The ID of the federation that this RTI will supervise. + * This should be overridden with a command-line -i option to ensure + * that each federate only joins its assigned federation. + */ + const char* federation_id; + + /************* TCP server information *************/ + /** The desired port specified by the user on the command line. */ + uint16_t user_specified_port; + + /** The final port number that the TCP socket server ends up using. */ + uint16_t final_port_TCP; + + /** The TCP socket descriptor for the socket server. */ + int socket_descriptor_TCP; + + /************* UDP server information *************/ + /** The final port number that the UDP socket server ends up using. */ + uint16_t final_port_UDP; + + /** The UDP socket descriptor for the socket server. */ + int socket_descriptor_UDP; + + /************* Clock synchronization information *************/ + /* Thread performing PTP clock sync sessions periodically. */ + lf_thread_t clock_thread; + + /** + * Indicates whether clock sync is globally on for the federation. Federates + * can still selectively disable clock synchronization if they wanted to. + */ + clock_sync_stat clock_sync_global_status; + + /** + * Frequency (period in nanoseconds) between clock sync attempts. + */ + uint64_t clock_sync_period_ns; + + /** + * Number of messages exchanged for each clock sync attempt. + */ + int32_t clock_sync_exchanges_per_interval; + + /** + * Boolean indicating that authentication is enabled. + */ + bool authentication_enabled; + /** + * Boolean indicating that a stop request is already in progress. + */ + bool stop_in_progress; } rti_remote_t; /** @@ -300,7 +291,7 @@ void handle_address_ad(uint16_t federate_id); * A function to handle timestamp messages. * This function assumes the caller does not hold the mutex. */ -void handle_timestamp(federate_info_t *my_fed); +void handle_timestamp(federate_info_t* my_fed); /** * Take a snapshot of the physical clock time and send @@ -371,7 +362,7 @@ void lf_connect_to_federates(int socket_descriptor); */ void* respond_to_erroneous_connections(void* nothing); -/** +/** * Initialize the federate with the specified ID. * @param id The federate ID. */ @@ -419,7 +410,7 @@ int process_args(int argc, const char* argv[]); /** * Initialize the _RTI instance. */ -void initialize_RTI(rti_remote_t *rti); +void initialize_RTI(rti_remote_t* rti); #endif // RTI_REMOTE_H #endif // STANDALONE_RTI \ No newline at end of file diff --git a/core/federated/clock-sync.c b/core/federated/clock-sync.c index ada5d858e..ddd845d6f 100644 --- a/core/federated/clock-sync.c +++ b/core/federated/clock-sync.c @@ -38,7 +38,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#include "platform.h" +#include "low_level_platform.h" #include "clock-sync.h" #include "net_common.h" #include "net_util.h" @@ -53,16 +53,14 @@ interval_t _lf_clock_sync_constant_bias = NSEC(0); * Keep a record of connection statistics * and the remote physical clock of the RTI. */ -socket_stat_t _lf_rti_socket_stat = { - .remote_physical_clock_snapshot_T1 = NEVER, - .local_physical_clock_snapshot_T2 = NEVER, - .local_delay = 0LL, - .received_T4_messages_in_current_sync_window = 0, - .history = 0LL, - .network_stat_round_trip_delay_max = 0LL, - .network_stat_sample_index = 0, - .clock_synchronization_error_bound = 0LL -}; +socket_stat_t _lf_rti_socket_stat = {.remote_physical_clock_snapshot_T1 = NEVER, + .local_physical_clock_snapshot_T2 = NEVER, + .local_delay = 0LL, + .received_T4_messages_in_current_sync_window = 0, + .history = 0LL, + .network_stat_round_trip_delay_max = 0LL, + .network_stat_sample_index = 0, + .clock_synchronization_error_bound = 0LL}; /** * Records the physical time at which the clock of this federate was @@ -83,7 +81,7 @@ int _lf_rti_socket_UDP = -1; * This needs to be atomic to be thread safe, particularly on 32-bit platforms. */ static void adjust_lf_clock_sync_offset(interval_t adjustment) { - lf_atomic_fetch_add64(&_lf_clock_sync_offset, adjustment); + lf_atomic_fetch_add64(&_lf_clock_sync_offset, adjustment); } #ifdef _LF_CLOCK_SYNC_COLLECT_STATS @@ -96,21 +94,20 @@ static void adjust_lf_clock_sync_offset(interval_t adjustment) { * @param clock_synchronization_error The newly calculated clock synchronization error relative to * the remote federate/RTI */ -void update_socket_stat(socket_stat_t* socket_stat, - long long network_round_trip_delay, +void update_socket_stat(socket_stat_t* socket_stat, long long network_round_trip_delay, long long clock_synchronization_error) { - // Add the data point - socket_stat->network_stat_samples[socket_stat->network_stat_sample_index] = network_round_trip_delay; - socket_stat->network_stat_sample_index++; - - // Calculate maximums - if (socket_stat->network_stat_round_trip_delay_max < network_round_trip_delay) { - socket_stat->network_stat_round_trip_delay_max = network_round_trip_delay; - } - - if (socket_stat->clock_synchronization_error_bound < clock_synchronization_error) { - socket_stat->clock_synchronization_error_bound = clock_synchronization_error; - } + // Add the data point + socket_stat->network_stat_samples[socket_stat->network_stat_sample_index] = network_round_trip_delay; + socket_stat->network_stat_sample_index++; + + // Calculate maximums + if (socket_stat->network_stat_round_trip_delay_max < network_round_trip_delay) { + socket_stat->network_stat_round_trip_delay_max = network_round_trip_delay; + } + + if (socket_stat->clock_synchronization_error_bound < clock_synchronization_error) { + socket_stat->clock_synchronization_error_bound = clock_synchronization_error; + } } /** @@ -120,23 +117,23 @@ void update_socket_stat(socket_stat_t* socket_stat, * @param socket_stat The socket_stat_t struct that keeps track of stats for a given connection */ lf_stat_ll calculate_socket_stat(struct socket_stat_t* socket_stat) { - // Initialize the stat struct - lf_stat_ll stats = {0, 0, 0, 0}; - // Calculate the average and max - for (int i = 0; i < socket_stat->network_stat_sample_index; i++) { - if (socket_stat->network_stat_samples[i] > stats.max) { - stats.max = socket_stat->network_stat_samples[i]; - } - stats.average += socket_stat->network_stat_samples[i] / socket_stat->network_stat_sample_index; - } - for (int i = 0; i < socket_stat->network_stat_sample_index; i++) { - long long delta = socket_stat->network_stat_samples[i] - stats.average; - stats.variance += powl(delta, 2); + // Initialize the stat struct + lf_stat_ll stats = {0, 0, 0, 0}; + // Calculate the average and max + for (int i = 0; i < socket_stat->network_stat_sample_index; i++) { + if (socket_stat->network_stat_samples[i] > stats.max) { + stats.max = socket_stat->network_stat_samples[i]; } - stats.variance /= socket_stat->network_stat_sample_index; - stats.standard_deviation = sqrtl(stats.variance); - - return stats; + stats.average += socket_stat->network_stat_samples[i] / socket_stat->network_stat_sample_index; + } + for (int i = 0; i < socket_stat->network_stat_sample_index; i++) { + long long delta = socket_stat->network_stat_samples[i] - stats.average; + stats.variance += powl(delta, 2); + } + stats.variance /= socket_stat->network_stat_sample_index; + stats.standard_deviation = sqrtl(stats.variance); + + return stats; } #endif @@ -146,9 +143,9 @@ lf_stat_ll calculate_socket_stat(struct socket_stat_t* socket_stat) { * @param socket_stat The socket_stat_t struct that keeps track of stats for a given connection */ void reset_socket_stat(struct socket_stat_t* socket_stat) { - socket_stat->received_T4_messages_in_current_sync_window = 0; - socket_stat->history = 0LL; - socket_stat->network_stat_sample_index = 0; + socket_stat->received_T4_messages_in_current_sync_window = 0; + socket_stat->history = 0LL; + socket_stat->network_stat_sample_index = 0; } /** @@ -161,98 +158,95 @@ void reset_socket_stat(struct socket_stat_t* socket_stat) { * will be sent. */ uint16_t setup_clock_synchronization_with_rti() { - uint16_t port_to_return = UINT16_MAX; + uint16_t port_to_return = UINT16_MAX; #ifdef _LF_CLOCK_SYNC_ON - // Initialize the UDP socket - _lf_rti_socket_UDP = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); - // Initialize the necessary information for the UDP address - struct sockaddr_in federate_UDP_addr; - federate_UDP_addr.sin_family = AF_INET; - federate_UDP_addr.sin_port = htons(0u); // Port 0 indicates to bind that - // it can assign any port to this - // socket. This is okay because - // the port number is then sent - // to the RTI. - federate_UDP_addr.sin_addr.s_addr = INADDR_ANY; - if (bind( - _lf_rti_socket_UDP, - (struct sockaddr *) &federate_UDP_addr, - sizeof(federate_UDP_addr)) < 0) { - lf_print_error_system_failure("Failed to bind its UDP socket."); - } - // Retrieve the port number that was assigned by the operating system - socklen_t addr_length = sizeof(federate_UDP_addr); - if (getsockname(_lf_rti_socket_UDP, (struct sockaddr *)&federate_UDP_addr, &addr_length) == -1) { - // FIXME: Send 0 UDP_PORT message instead of exiting. - // That will disable clock synchronization. - lf_print_error_system_failure("Failed to retrieve UDP port."); - } - LF_PRINT_DEBUG("Assigned UDP port number %u to its socket.", ntohs(federate_UDP_addr.sin_port)); - - port_to_return = ntohs(federate_UDP_addr.sin_port); - - // Set the option for this socket to reuse the same address - int option_value = 1; - if (setsockopt(_lf_rti_socket_UDP, SOL_SOCKET, SO_REUSEADDR, &option_value, sizeof(int)) < 0) { - lf_print_error("Failed to set SO_REUSEADDR option on the socket: %s.", strerror(errno)); - } - // Set the timeout on the UDP socket so that read and write operations don't block for too long - struct timeval timeout_time = {.tv_sec = UDP_TIMEOUT_TIME / BILLION, .tv_usec = (UDP_TIMEOUT_TIME % BILLION) / 1000}; - if (setsockopt(_lf_rti_socket_UDP, SOL_SOCKET, SO_RCVTIMEO, (const char*)&timeout_time, sizeof(timeout_time)) < 0) { - lf_print_error("Failed to set SO_RCVTIMEO option on the socket: %s.", strerror(errno)); - } - if (setsockopt(_lf_rti_socket_UDP, SOL_SOCKET, SO_SNDTIMEO, (const char*)&timeout_time, sizeof(timeout_time)) < 0) { - lf_print_error("Failed to set SO_SNDTIMEO option on the socket: %s.", strerror(errno)); - } + // Initialize the UDP socket + _lf_rti_socket_UDP = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); + // Initialize the necessary information for the UDP address + struct sockaddr_in federate_UDP_addr; + federate_UDP_addr.sin_family = AF_INET; + federate_UDP_addr.sin_port = htons(0u); // Port 0 indicates to bind that + // it can assign any port to this + // socket. This is okay because + // the port number is then sent + // to the RTI. + federate_UDP_addr.sin_addr.s_addr = INADDR_ANY; + if (bind(_lf_rti_socket_UDP, (struct sockaddr*)&federate_UDP_addr, sizeof(federate_UDP_addr)) < 0) { + lf_print_error_system_failure("Failed to bind its UDP socket."); + } + // Retrieve the port number that was assigned by the operating system + socklen_t addr_length = sizeof(federate_UDP_addr); + if (getsockname(_lf_rti_socket_UDP, (struct sockaddr*)&federate_UDP_addr, &addr_length) == -1) { + // FIXME: Send 0 UDP_PORT message instead of exiting. + // That will disable clock synchronization. + lf_print_error_system_failure("Failed to retrieve UDP port."); + } + LF_PRINT_DEBUG("Assigned UDP port number %u to its socket.", ntohs(federate_UDP_addr.sin_port)); + + port_to_return = ntohs(federate_UDP_addr.sin_port); + + // Set the option for this socket to reuse the same address + int option_value = 1; + if (setsockopt(_lf_rti_socket_UDP, SOL_SOCKET, SO_REUSEADDR, &option_value, sizeof(int)) < 0) { + lf_print_error("Failed to set SO_REUSEADDR option on the socket: %s.", strerror(errno)); + } + // Set the timeout on the UDP socket so that read and write operations don't block for too long + struct timeval timeout_time = {.tv_sec = UDP_TIMEOUT_TIME / BILLION, .tv_usec = (UDP_TIMEOUT_TIME % BILLION) / 1000}; + if (setsockopt(_lf_rti_socket_UDP, SOL_SOCKET, SO_RCVTIMEO, (const char*)&timeout_time, sizeof(timeout_time)) < 0) { + lf_print_error("Failed to set SO_RCVTIMEO option on the socket: %s.", strerror(errno)); + } + if (setsockopt(_lf_rti_socket_UDP, SOL_SOCKET, SO_SNDTIMEO, (const char*)&timeout_time, sizeof(timeout_time)) < 0) { + lf_print_error("Failed to set SO_SNDTIMEO option on the socket: %s.", strerror(errno)); + } #else // No runtime clock synchronization. Send port -1 or 0 instead. #ifdef _LF_CLOCK_SYNC_INITIAL - port_to_return = 0u; + port_to_return = 0u; #endif #endif // _LF_CLOCK_SYNC_ON - return port_to_return; + return port_to_return; } void synchronize_initial_physical_clock_with_rti(int* rti_socket_TCP) { - LF_PRINT_DEBUG("Waiting for initial clock synchronization messages from the RTI."); + LF_PRINT_DEBUG("Waiting for initial clock synchronization messages from the RTI."); - size_t message_size = 1 + sizeof(instant_t); - unsigned char buffer[message_size]; + size_t message_size = 1 + sizeof(instant_t); + unsigned char buffer[message_size]; - for (int i=0; i < _LF_CLOCK_SYNC_EXCHANGES_PER_INTERVAL; i++) { - // The first message expected from the RTI is MSG_TYPE_CLOCK_SYNC_T1 - read_from_socket_fail_on_error(rti_socket_TCP, message_size, buffer, NULL, - "Federate %d did not get the initial clock synchronization message T1 from the RTI.", - _lf_my_fed_id); + for (int i = 0; i < _LF_CLOCK_SYNC_EXCHANGES_PER_INTERVAL; i++) { + // The first message expected from the RTI is MSG_TYPE_CLOCK_SYNC_T1 + read_from_socket_fail_on_error(rti_socket_TCP, message_size, buffer, NULL, + "Federate %d did not get the initial clock synchronization message T1 from the RTI.", + _lf_my_fed_id); - // Get local physical time before doing anything else. - instant_t receive_time = lf_time_physical(); + // Get local physical time before doing anything else. + instant_t receive_time = lf_time_physical(); - // Check that this is the T1 message. - if (buffer[0] != MSG_TYPE_CLOCK_SYNC_T1) { - lf_print_error_and_exit("Initial clock sync: Expected T1 message from RTI. Got %x.", buffer[0]); - } - // Handle the message and send a reply T3 message. - // NOTE: No need to acquire the mutex lock during initialization because only - // one thread is running. - if (handle_T1_clock_sync_message(buffer, *rti_socket_TCP, receive_time) != 0) { - lf_print_error_and_exit("Initial clock sync: Failed to send T3 reply to RTI."); - } - - // Next message from the RTI is required to be MSG_TYPE_CLOCK_SYNC_T4 - read_from_socket_fail_on_error(rti_socket_TCP, message_size, buffer, NULL, - "Federate %d did not get the clock synchronization message T4 from the RTI.", - _lf_my_fed_id); + // Check that this is the T1 message. + if (buffer[0] != MSG_TYPE_CLOCK_SYNC_T1) { + lf_print_error_and_exit("Initial clock sync: Expected T1 message from RTI. Got %x.", buffer[0]); + } + // Handle the message and send a reply T3 message. + // NOTE: No need to acquire the mutex lock during initialization because only + // one thread is running. + if (handle_T1_clock_sync_message(buffer, *rti_socket_TCP, receive_time) != 0) { + lf_print_error_and_exit("Initial clock sync: Failed to send T3 reply to RTI."); + } - // Check that this is the T4 message. - if (buffer[0] != MSG_TYPE_CLOCK_SYNC_T4) { - lf_print_error_and_exit("Federate %d expected T4 message from RTI. Got %x.", _lf_my_fed_id, buffer[0]); - } + // Next message from the RTI is required to be MSG_TYPE_CLOCK_SYNC_T4 + read_from_socket_fail_on_error(rti_socket_TCP, message_size, buffer, NULL, + "Federate %d did not get the clock synchronization message T4 from the RTI.", + _lf_my_fed_id); - // Handle the message. - handle_T4_clock_sync_message(buffer, *rti_socket_TCP, receive_time); + // Check that this is the T4 message. + if (buffer[0] != MSG_TYPE_CLOCK_SYNC_T4) { + lf_print_error_and_exit("Federate %d expected T4 message from RTI. Got %x.", _lf_my_fed_id, buffer[0]); } - LF_PRINT_LOG("Finished initial clock synchronization with the RTI."); + // Handle the message. + handle_T4_clock_sync_message(buffer, *rti_socket_TCP, receive_time); + } + + LF_PRINT_LOG("Finished initial clock synchronization with the RTI."); } /** @@ -267,35 +261,35 @@ void synchronize_initial_physical_clock_with_rti(int* rti_socket_TCP) { * @return 0 if T3 reply is successfully sent, -1 otherwise. */ int handle_T1_clock_sync_message(unsigned char* buffer, int socket, instant_t t2) { - // Extract the payload - instant_t t1 = extract_int64(&(buffer[1])); - - LF_PRINT_DEBUG("Received T1 message with time payload " PRINTF_TIME " from RTI at local time " PRINTF_TIME ".", - t1, t2); - - // Store snapshots of remote (master) and local physical clock - _lf_rti_socket_stat.remote_physical_clock_snapshot_T1 = t1; - _lf_rti_socket_stat.local_physical_clock_snapshot_T2 = t2; - // Send a message to the RTI and calculate the local delay - // T3-T2 between receiving the T1 message and replying. - - // Reply will have the federate ID as a payload. - unsigned char reply_buffer[1 + sizeof(int)]; - reply_buffer[0] = MSG_TYPE_CLOCK_SYNC_T3; - encode_int32(_lf_my_fed_id, &(reply_buffer[1])); - - // Write the reply to the socket. - LF_PRINT_DEBUG("Sending T3 message to RTI."); - if (write_to_socket(socket, 1 + sizeof(int), reply_buffer)) { - lf_print_error("Clock sync: Failed to send T3 message to RTI."); - return -1; - } - - // Measure the time _after_ the write on the assumption that the read - // from the socket, which occurs before this function is called, takes - // about the same amount of time as the write of the reply. - _lf_rti_socket_stat.local_delay = lf_time_physical() - t2; - return 0; + // Extract the payload + instant_t t1 = extract_int64(&(buffer[1])); + + LF_PRINT_DEBUG("Received T1 message with time payload " PRINTF_TIME " from RTI at local time " PRINTF_TIME ".", t1, + t2); + + // Store snapshots of remote (master) and local physical clock + _lf_rti_socket_stat.remote_physical_clock_snapshot_T1 = t1; + _lf_rti_socket_stat.local_physical_clock_snapshot_T2 = t2; + // Send a message to the RTI and calculate the local delay + // T3-T2 between receiving the T1 message and replying. + + // Reply will have the federate ID as a payload. + unsigned char reply_buffer[1 + sizeof(int)]; + reply_buffer[0] = MSG_TYPE_CLOCK_SYNC_T3; + encode_int32(_lf_my_fed_id, &(reply_buffer[1])); + + // Write the reply to the socket. + LF_PRINT_DEBUG("Sending T3 message to RTI."); + if (write_to_socket(socket, 1 + sizeof(int), reply_buffer)) { + lf_print_error("Clock sync: Failed to send T3 message to RTI."); + return -1; + } + + // Measure the time _after_ the write on the assumption that the read + // from the socket, which occurs before this function is called, takes + // about the same amount of time as the write of the reply. + _lf_rti_socket_stat.local_delay = lf_time_physical() - t2; + return 0; } /** @@ -315,254 +309,233 @@ int handle_T1_clock_sync_message(unsigned char* buffer, int socket, instant_t t2 * @param r4 The physical time at which this T4 message was received. */ void handle_T4_clock_sync_message(unsigned char* buffer, int socket, instant_t r4) { - // Increment the number of received T4 messages - _lf_rti_socket_stat.received_T4_messages_in_current_sync_window++; - - // Extract the payload - instant_t t4 = extract_int64(&(buffer[1])); - - LF_PRINT_DEBUG("Clock sync: Received T4 message with time payload " PRINTF_TIME - " from RTI at local time " PRINTF_TIME ". " - "(difference " PRINTF_TIME ")", - t4, r4, r4 - t4); - - // Calculate the round trip delay from T1 to T4: - // (T4 - T1) - (T3 - T2) - interval_t network_round_trip_delay = (t4 - - _lf_rti_socket_stat.remote_physical_clock_snapshot_T1) - - _lf_rti_socket_stat.local_delay; - - - // Estimate the clock synchronization error based on the assumption - // that the channel delay is symmetric: - // one_way_channel_delay - (T2 - T1). - // This number is positive if the clock at the federate (T2) is - // behind the clock at the RTI (T1). - interval_t estimated_clock_error = - network_round_trip_delay/2 - - (_lf_rti_socket_stat.local_physical_clock_snapshot_T2 - - _lf_rti_socket_stat.remote_physical_clock_snapshot_T1); - LF_PRINT_DEBUG("Clock sync: Estimated clock error: " PRINTF_TIME ".", - estimated_clock_error); - - // The adjustment to the clock offset (to be calculated) - interval_t adjustment = 0; - // If the socket is _lf_rti_socket_UDP, then - // after sending T4, the RTI sends a "coded probe" message, - // which can be used to filter out noise. - if (socket == _lf_rti_socket_UDP) { - // Read the coded probe message. - // We can reuse the same buffer. - int read_failed = read_from_socket(socket, 1 + sizeof(instant_t), buffer); - - instant_t r5 = lf_time_physical(); - - if (read_failed || buffer[0] != MSG_TYPE_CLOCK_SYNC_CODED_PROBE) { - lf_print_warning("Clock sync: Did not get the expected coded probe message from the RTI. " - "Skipping clock synchronization round."); - return; - } - // Filter out noise. - instant_t t5 = extract_int64(&(buffer[1])); // Time at the RTI of sending the coded probe. - - // Compare the difference in time at the RTI between sending T4 and the coded probe - // against the difference in time at this federate of receiving these two message. - interval_t coded_probe_distance = llabs((r5 - r4) - (t5 - t4)); - - LF_PRINT_DEBUG("Clock sync: Received code probe that reveals a time discrepancy between " - "messages of " PRINTF_TIME ".", - coded_probe_distance); - - // Check against the guard band. - if (coded_probe_distance >= CLOCK_SYNC_GUARD_BAND) { - // Discard this clock sync cycle - LF_PRINT_LOG("Clock sync: Skipping the current clock synchronization cycle " - "due to impure coded probes."); - LF_PRINT_LOG("Clock sync: Coded probe packet stats: " - "Distance: " PRINTF_TIME ". r5 - r4 = " PRINTF_TIME ". t5 - t4 = " PRINTF_TIME ".", - coded_probe_distance, - r5 - r4, - t5 - t4); - _lf_rti_socket_stat.received_T4_messages_in_current_sync_window--; - return; - } - // Apply a jitter attenuator to the estimated clock error to prevent - // large jumps in the underlying clock. - // Note that estimated_clock_error is calculated using lf_time_physical() which includes - // the clock sync adjustment. - adjustment = estimated_clock_error / _LF_CLOCK_SYNC_ATTENUATION; - } else { - // Use of TCP socket means we are in the startup phase, so - // rather than adjust the clock offset, we simply set it to the - // estimated error. - adjustment = estimated_clock_error; + // Increment the number of received T4 messages + _lf_rti_socket_stat.received_T4_messages_in_current_sync_window++; + + // Extract the payload + instant_t t4 = extract_int64(&(buffer[1])); + + LF_PRINT_DEBUG("Clock sync: Received T4 message with time payload " PRINTF_TIME " from RTI at local time " PRINTF_TIME + ". " + "(difference " PRINTF_TIME ")", + t4, r4, r4 - t4); + + // Calculate the round trip delay from T1 to T4: + // (T4 - T1) - (T3 - T2) + interval_t network_round_trip_delay = + (t4 - _lf_rti_socket_stat.remote_physical_clock_snapshot_T1) - _lf_rti_socket_stat.local_delay; + + // Estimate the clock synchronization error based on the assumption + // that the channel delay is symmetric: + // one_way_channel_delay - (T2 - T1). + // This number is positive if the clock at the federate (T2) is + // behind the clock at the RTI (T1). + interval_t estimated_clock_error = + network_round_trip_delay / 2 - + (_lf_rti_socket_stat.local_physical_clock_snapshot_T2 - _lf_rti_socket_stat.remote_physical_clock_snapshot_T1); + LF_PRINT_DEBUG("Clock sync: Estimated clock error: " PRINTF_TIME ".", estimated_clock_error); + + // The adjustment to the clock offset (to be calculated) + interval_t adjustment = 0; + // If the socket is _lf_rti_socket_UDP, then + // after sending T4, the RTI sends a "coded probe" message, + // which can be used to filter out noise. + if (socket == _lf_rti_socket_UDP) { + // Read the coded probe message. + // We can reuse the same buffer. + int read_failed = read_from_socket(socket, 1 + sizeof(instant_t), buffer); + + instant_t r5 = lf_time_physical(); + + if (read_failed || buffer[0] != MSG_TYPE_CLOCK_SYNC_CODED_PROBE) { + lf_print_warning("Clock sync: Did not get the expected coded probe message from the RTI. " + "Skipping clock synchronization round."); + return; } + // Filter out noise. + instant_t t5 = extract_int64(&(buffer[1])); // Time at the RTI of sending the coded probe. + + // Compare the difference in time at the RTI between sending T4 and the coded probe + // against the difference in time at this federate of receiving these two message. + interval_t coded_probe_distance = llabs((r5 - r4) - (t5 - t4)); + + LF_PRINT_DEBUG("Clock sync: Received code probe that reveals a time discrepancy between " + "messages of " PRINTF_TIME ".", + coded_probe_distance); + + // Check against the guard band. + if (coded_probe_distance >= CLOCK_SYNC_GUARD_BAND) { + // Discard this clock sync cycle + LF_PRINT_LOG("Clock sync: Skipping the current clock synchronization cycle " + "due to impure coded probes."); + LF_PRINT_LOG("Clock sync: Coded probe packet stats: " + "Distance: " PRINTF_TIME ". r5 - r4 = " PRINTF_TIME ". t5 - t4 = " PRINTF_TIME ".", + coded_probe_distance, r5 - r4, t5 - t4); + _lf_rti_socket_stat.received_T4_messages_in_current_sync_window--; + return; + } + // Apply a jitter attenuator to the estimated clock error to prevent + // large jumps in the underlying clock. + // Note that estimated_clock_error is calculated using lf_time_physical() which includes + // the clock sync adjustment. + adjustment = estimated_clock_error / _LF_CLOCK_SYNC_ATTENUATION; + } else { + // Use of TCP socket means we are in the startup phase, so + // rather than adjust the clock offset, we simply set it to the + // estimated error. + adjustment = estimated_clock_error; + } #ifdef _LF_CLOCK_SYNC_COLLECT_STATS // Enabled by default - // Update RTI's socket stats - update_socket_stat(&_lf_rti_socket_stat, network_round_trip_delay, estimated_clock_error); + // Update RTI's socket stats + update_socket_stat(&_lf_rti_socket_stat, network_round_trip_delay, estimated_clock_error); #endif - // FIXME: Enable alternative regression mechanism here. - LF_PRINT_DEBUG("Clock sync: Adjusting clock offset running average by " PRINTF_TIME ".", - adjustment/_LF_CLOCK_SYNC_EXCHANGES_PER_INTERVAL); - // Calculate the running average - _lf_rti_socket_stat.history += adjustment/_LF_CLOCK_SYNC_EXCHANGES_PER_INTERVAL; + // FIXME: Enable alternative regression mechanism here. + LF_PRINT_DEBUG("Clock sync: Adjusting clock offset running average by " PRINTF_TIME ".", + adjustment / _LF_CLOCK_SYNC_EXCHANGES_PER_INTERVAL); + // Calculate the running average + _lf_rti_socket_stat.history += adjustment / _LF_CLOCK_SYNC_EXCHANGES_PER_INTERVAL; - if (_lf_rti_socket_stat.received_T4_messages_in_current_sync_window >= - _LF_CLOCK_SYNC_EXCHANGES_PER_INTERVAL) { + if (_lf_rti_socket_stat.received_T4_messages_in_current_sync_window >= _LF_CLOCK_SYNC_EXCHANGES_PER_INTERVAL) { - lf_stat_ll stats = {0, 0, 0, 0}; + lf_stat_ll stats = {0, 0, 0, 0}; #ifdef _LF_CLOCK_SYNC_COLLECT_STATS // Enabled by default - stats = calculate_socket_stat(&_lf_rti_socket_stat); - // Issue a warning if standard deviation is high in data - if (stats.standard_deviation >= CLOCK_SYNC_GUARD_BAND) { - // Reset the stats - LF_PRINT_LOG("Clock sync: Large standard deviation detected in network delays (" PRINTF_TIME ") for the current period." - " Clock synchronization offset might not be accurate.", - stats.standard_deviation); - reset_socket_stat(&_lf_rti_socket_stat); - return; - } -#endif - // The number of received T4 messages has reached _LF_CLOCK_SYNC_EXCHANGES_PER_INTERVAL - // which means we can now adjust the clock offset. - // For the AVG algorithm, history is a running average and can be directly - // applied - adjust_lf_clock_sync_offset(_lf_rti_socket_stat.history); - // @note AVG and SD will be zero if collect-stats is set to false - LF_PRINT_LOG("Clock sync:" - " New offset: " PRINTF_TIME "." - " Round trip delay to RTI (now): " PRINTF_TIME "." - " (AVG): " PRINTF_TIME "." - " (SD): " PRINTF_TIME "." - " Local round trip delay: " PRINTF_TIME ".", - _lf_clock_sync_offset, - network_round_trip_delay, - stats.average, - stats.standard_deviation, - _lf_rti_socket_stat.local_delay); - // Reset the stats - reset_socket_stat(&_lf_rti_socket_stat); - // Set the last instant at which the clocks were synchronized - _lf_last_clock_sync_instant = r4; + stats = calculate_socket_stat(&_lf_rti_socket_stat); + // Issue a warning if standard deviation is high in data + if (stats.standard_deviation >= CLOCK_SYNC_GUARD_BAND) { + // Reset the stats + LF_PRINT_LOG("Clock sync: Large standard deviation detected in network delays (" PRINTF_TIME + ") for the current period." + " Clock synchronization offset might not be accurate.", + stats.standard_deviation); + reset_socket_stat(&_lf_rti_socket_stat); + return; } +#endif + // The number of received T4 messages has reached _LF_CLOCK_SYNC_EXCHANGES_PER_INTERVAL + // which means we can now adjust the clock offset. + // For the AVG algorithm, history is a running average and can be directly + // applied + adjust_lf_clock_sync_offset(_lf_rti_socket_stat.history); + // @note AVG and SD will be zero if collect-stats is set to false + LF_PRINT_LOG("Clock sync:" + " New offset: " PRINTF_TIME "." + " Round trip delay to RTI (now): " PRINTF_TIME "." + " (AVG): " PRINTF_TIME "." + " (SD): " PRINTF_TIME "." + " Local round trip delay: " PRINTF_TIME ".", + _lf_clock_sync_offset, network_round_trip_delay, stats.average, stats.standard_deviation, + _lf_rti_socket_stat.local_delay); + // Reset the stats + reset_socket_stat(&_lf_rti_socket_stat); + // Set the last instant at which the clocks were synchronized + _lf_last_clock_sync_instant = r4; + } } /** * Thread that listens for UDP inputs from the RTI. */ void* listen_to_rti_UDP_thread(void* args) { - // Listen for UDP messages from the RTI. - // The only expected messages are T1 and T4, which have - // a payload of a time value. - size_t message_size = 1 + sizeof(instant_t); - unsigned char buffer[message_size]; - // This thread will be either waiting for T1 or waiting - // for T4. Track the mode with this variable: - bool waiting_for_T1 = true; - // Even though UDP messages are connectionless, we need to call connect() - // at least once to record the address of the RTI's UDP port. The RTI - // uses bind() to reserve that address, so recording it once is sufficient. - bool connected = false; - while (1) { - struct sockaddr_in RTI_UDP_addr; - socklen_t RTI_UDP_addr_length = sizeof(RTI_UDP_addr); - ssize_t bytes_read = 0; - // Read from the UDP socket - do { - ssize_t bytes = recvfrom(_lf_rti_socket_UDP, // The UDP socket - &buffer[bytes_read], // The buffer to read into - message_size - (size_t)bytes_read, // Number of bytes to read - MSG_WAITALL, // Read the entire datagram - (struct sockaddr*)&RTI_UDP_addr, // Record the RTI's address - &RTI_UDP_addr_length); // The RTI's address length - // Try reading again if errno indicates the need to try again and there are more - // bytes to read. - if (bytes > 0) { - bytes_read += bytes; - } - } while ((errno == EAGAIN || errno == EWOULDBLOCK) && bytes_read < message_size); - - // Get local physical time before doing anything else. - instant_t receive_time = lf_time_physical(); - - if (bytes_read < message_size) { - // Either the socket has closed or the RTI has sent EOF. - // Exit the thread to halt clock synchronization. - lf_print_error("Clock sync: UDP socket to RTI is broken: %s. Clock sync is now disabled.", - strerror(errno)); - break; + initialize_lf_thread_id(); + // Listen for UDP messages from the RTI. + // The only expected messages are T1 and T4, which have + // a payload of a time value. + size_t message_size = 1 + sizeof(instant_t); + unsigned char buffer[message_size]; + // This thread will be either waiting for T1 or waiting + // for T4. Track the mode with this variable: + bool waiting_for_T1 = true; + // Even though UDP messages are connectionless, we need to call connect() + // at least once to record the address of the RTI's UDP port. The RTI + // uses bind() to reserve that address, so recording it once is sufficient. + bool connected = false; + while (1) { + struct sockaddr_in RTI_UDP_addr; + socklen_t RTI_UDP_addr_length = sizeof(RTI_UDP_addr); + ssize_t bytes_read = 0; + // Read from the UDP socket + do { + ssize_t bytes = recvfrom(_lf_rti_socket_UDP, // The UDP socket + &buffer[bytes_read], // The buffer to read into + message_size - (size_t)bytes_read, // Number of bytes to read + MSG_WAITALL, // Read the entire datagram + (struct sockaddr*)&RTI_UDP_addr, // Record the RTI's address + &RTI_UDP_addr_length); // The RTI's address length + // Try reading again if errno indicates the need to try again and there are more + // bytes to read. + if (bytes > 0) { + bytes_read += bytes; + } + } while ((errno == EAGAIN || errno == EWOULDBLOCK) && bytes_read < message_size); + + // Get local physical time before doing anything else. + instant_t receive_time = lf_time_physical(); + + if (bytes_read < message_size) { + // Either the socket has closed or the RTI has sent EOF. + // Exit the thread to halt clock synchronization. + lf_print_error("Clock sync: UDP socket to RTI is broken: %s. Clock sync is now disabled.", strerror(errno)); + break; + } + LF_PRINT_DEBUG("Clock sync: Received UDP message %u from RTI on port %u.", buffer[0], ntohs(RTI_UDP_addr.sin_port)); + + // Handle the message + if (waiting_for_T1) { + if (buffer[0] == MSG_TYPE_CLOCK_SYNC_T1) { + waiting_for_T1 = false; + // The reply (or return) address is given in RTI_UDP_addr. + // We utilize the connect() function to set the default address + // of the _lf_rti_socket_UDP socket to RTI_UDP_addr. This is convenient + // because subsequent calls to write_to_socket do not need this address. + // Note that this only needs to be done for handle_T1_clock_sync_message() + // because it is the only function that needs to reply to the RTI. + if (!connected && connect(_lf_rti_socket_UDP, (struct sockaddr*)&RTI_UDP_addr, RTI_UDP_addr_length) < 0) { + lf_print_error("Clock sync: Federate %d failed to register RTI's UDP reply address. " + "Clock synchronization has stopped.", + _lf_my_fed_id); + break; } - LF_PRINT_DEBUG("Clock sync: Received UDP message %u from RTI on port %u.", - buffer[0], ntohs(RTI_UDP_addr.sin_port)); - - // Handle the message - if (waiting_for_T1) { - if (buffer[0] == MSG_TYPE_CLOCK_SYNC_T1) { - waiting_for_T1 = false; - // The reply (or return) address is given in RTI_UDP_addr. - // We utilize the connect() function to set the default address - // of the _lf_rti_socket_UDP socket to RTI_UDP_addr. This is convenient - // because subsequent calls to write_to_socket do not need this address. - // Note that this only needs to be done for handle_T1_clock_sync_message() - // because it is the only function that needs to reply to the RTI. - if (!connected - && connect(_lf_rti_socket_UDP, - (struct sockaddr*)&RTI_UDP_addr, - RTI_UDP_addr_length) < 0) { - lf_print_error("Clock sync: Federate %d failed to register RTI's UDP reply address. " - "Clock synchronization has stopped.", - _lf_my_fed_id); - break; - } - connected = true; - if (handle_T1_clock_sync_message(buffer, _lf_rti_socket_UDP, receive_time) != 0) { - // Failed to send T3 reply. Wait for the next T1. - waiting_for_T1 = true; - continue; - } - } else { - // Waiting for a T1 message, but received something else. Discard message. - lf_print_warning("Clock sync: Received %u message from RTI, but waiting for %u (T1). " - "Discarding the message.", - buffer[0], - MSG_TYPE_CLOCK_SYNC_T1); - continue; - } - } else if (buffer[0] == MSG_TYPE_CLOCK_SYNC_T4) { - handle_T4_clock_sync_message(buffer, _lf_rti_socket_UDP, receive_time); - waiting_for_T1 = true; - } else { - lf_print_warning("Clock sync: Received from RTI an unexpected UDP message type: %u. " - "Discarding the message and skipping this round.", - buffer[0]); - // Ignore further clock sync messages until we get a T1. - waiting_for_T1 = true; + connected = true; + if (handle_T1_clock_sync_message(buffer, _lf_rti_socket_UDP, receive_time) != 0) { + // Failed to send T3 reply. Wait for the next T1. + waiting_for_T1 = true; + continue; } + } else { + // Waiting for a T1 message, but received something else. Discard message. + lf_print_warning("Clock sync: Received %u message from RTI, but waiting for %u (T1). " + "Discarding the message.", + buffer[0], MSG_TYPE_CLOCK_SYNC_T1); + continue; + } + } else if (buffer[0] == MSG_TYPE_CLOCK_SYNC_T4) { + handle_T4_clock_sync_message(buffer, _lf_rti_socket_UDP, receive_time); + waiting_for_T1 = true; + } else { + lf_print_warning("Clock sync: Received from RTI an unexpected UDP message type: %u. " + "Discarding the message and skipping this round.", + buffer[0]); + // Ignore further clock sync messages until we get a T1. + waiting_for_T1 = true; } - return NULL; + } + return NULL; } - // If clock synchronization is enabled, provide implementations. If not // just empty implementations that should be optimized away. #if defined(FEDERATED) && defined(_LF_CLOCK_SYNC_ON) -void clock_sync_apply_offset(instant_t *t) { - *t += (_lf_clock_sync_offset + _lf_clock_sync_constant_bias); -} +void clock_sync_apply_offset(instant_t* t) { *t += (_lf_clock_sync_offset + _lf_clock_sync_constant_bias); } -void clock_sync_remove_offset(instant_t *t) { - *t -= (_lf_clock_sync_offset + _lf_clock_sync_constant_bias); -} +void clock_sync_remove_offset(instant_t* t) { *t -= (_lf_clock_sync_offset + _lf_clock_sync_constant_bias); } -void clock_sync_set_constant_bias(interval_t offset) { - _lf_clock_sync_constant_bias = offset; -} +void clock_sync_set_constant_bias(interval_t offset) { _lf_clock_sync_constant_bias = offset; } #else -void clock_sync_apply_offset(instant_t *t) { } -void clock_sync_remove_offset(instant_t *t) { } -void clock_sync_set_constant_bias(interval_t offset) { } +void clock_sync_apply_offset(instant_t* t) {} +void clock_sync_remove_offset(instant_t* t) {} +void clock_sync_set_constant_bias(interval_t offset) {} #endif /** @@ -575,10 +548,10 @@ void clock_sync_set_constant_bias(interval_t offset) { } */ int create_clock_sync_thread(lf_thread_t* thread_id) { #ifdef _LF_CLOCK_SYNC_ON - // One for UDP messages if clock synchronization is enabled for this federate - return lf_thread_create(thread_id, listen_to_rti_UDP_thread, NULL); + // One for UDP messages if clock synchronization is enabled for this federate + return lf_thread_create(thread_id, listen_to_rti_UDP_thread, NULL); #endif // _LF_CLOCK_SYNC_ON - return 0; + return 0; } #endif diff --git a/core/federated/federate.c b/core/federated/federate.c index fdb24bbf6..6ba3e7cc9 100644 --- a/core/federated/federate.c +++ b/core/federated/federate.c @@ -18,13 +18,13 @@ #include // Defines getaddrinfo(), freeaddrinfo() and struct addrinfo. #include // Defines struct sockaddr_in #include -#include // Defines read(), write(), and close() -#include // Defines memset(), strnlen(), strncmp(), strncpy() -#include // Defines strerror() +#include // Defines read(), write(), and close() +#include // Defines memset(), strnlen(), strncmp(), strncpy() +#include // Defines strerror() #include -#include // Defined perror(), errno -#include // Defines bzero(). +#include // Defined perror(), errno +#include // Defines bzero(). #include "clock-sync.h" #include "federate.h" @@ -35,7 +35,7 @@ #include "reactor_threaded.h" #include "api/schedule.h" #include "scheduler.h" -#include "trace.h" +#include "tracepoint.h" #ifdef FEDERATED_AUTHENTICATED #include // For secure random number generation. @@ -76,32 +76,26 @@ int max_level_allowed_to_advance; * The state of this federate instance. Each executable has exactly one federate instance, * and the _fed global variable refers to that instance. */ -federate_instance_t _fed = { - .socket_TCP_RTI = -1, - .number_of_inbound_p2p_connections = 0, - .inbound_socket_listeners = NULL, - .number_of_outbound_p2p_connections = 0, - .inbound_p2p_handling_thread_id = 0, - .server_socket = -1, - .server_port = -1, - .last_TAG = {.time = NEVER, .microstep = 0u}, - .is_last_TAG_provisional = false, - .has_upstream = false, - .has_downstream = false, - .last_skipped_LTC = (tag_t) {.time = NEVER, .microstep = 0u}, - .last_DNET = (tag_t) {.time = NEVER, .microstep = 0u}, - .received_stop_request_from_rti = false, - .last_sent_LTC = (tag_t) {.time = NEVER, .microstep = 0u}, - .last_sent_NET = (tag_t) {.time = NEVER, .microstep = 0u}, - .min_delay_from_physical_action_to_federate_output = NEVER -}; +federate_instance_t _fed = {.socket_TCP_RTI = -1, + .number_of_inbound_p2p_connections = 0, + .inbound_socket_listeners = NULL, + .number_of_outbound_p2p_connections = 0, + .inbound_p2p_handling_thread_id = 0, + .server_socket = -1, + .server_port = -1, + .last_TAG = {.time = NEVER, .microstep = 0u}, + .is_last_TAG_provisional = false, + .has_upstream = false, + .has_downstream = false, + .last_skipped_LTC = (tag_t){.time = NEVER, .microstep = 0u}, + .last_DNET = (tag_t){.time = NEVER, .microstep = 0u}, + .received_stop_request_from_rti = false, + .last_sent_LTC = (tag_t){.time = NEVER, .microstep = 0u}, + .last_sent_NET = (tag_t){.time = NEVER, .microstep = 0u}, + .min_delay_from_physical_action_to_federate_output = NEVER}; federation_metadata_t federation_metadata = { - .federation_id = "Unidentified Federation", - .rti_host = NULL, - .rti_port = -1, - .rti_user = NULL -}; + .federation_id = "Unidentified Federation", .rti_host = NULL, .rti_port = -1, .rti_user = NULL}; ////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////// @@ -113,20 +107,20 @@ federation_metadata_t federation_metadata = { * @param time The time. */ static void send_time(unsigned char type, instant_t time) { - LF_PRINT_DEBUG("Sending time " PRINTF_TIME " to the RTI.", time); - size_t bytes_to_write = 1 + sizeof(instant_t); - unsigned char buffer[bytes_to_write]; - buffer[0] = type; - encode_int64(time, &(buffer[1])); - - // Trace the event when tracing is enabled - tag_t tag = {.time = time, .microstep = 0}; - tracepoint_federate_to_rti(_fed.trace, send_TIMESTAMP, _lf_my_fed_id, &tag); - - LF_MUTEX_LOCK(&lf_outbound_socket_mutex); - write_to_socket_fail_on_error(&_fed.socket_TCP_RTI, bytes_to_write, buffer, &lf_outbound_socket_mutex, - "Failed to send time " PRINTF_TIME " to the RTI.", time - start_time); - LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); + LF_PRINT_DEBUG("Sending time " PRINTF_TIME " to the RTI.", time); + size_t bytes_to_write = 1 + sizeof(instant_t); + unsigned char buffer[bytes_to_write]; + buffer[0] = type; + encode_int64(time, &(buffer[1])); + + // Trace the event when tracing is enabled + tag_t tag = {.time = time, .microstep = 0}; + tracepoint_federate_to_rti(send_TIMESTAMP, _lf_my_fed_id, &tag); + + LF_MUTEX_LOCK(&lf_outbound_socket_mutex); + write_to_socket_fail_on_error(&_fed.socket_TCP_RTI, bytes_to_write, buffer, &lf_outbound_socket_mutex, + "Failed to send time " PRINTF_TIME " to the RTI.", time - start_time); + LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); } /** @@ -136,25 +130,24 @@ static void send_time(unsigned char type, instant_t time) { * @param tag The tag. */ static void send_tag(unsigned char type, tag_t tag) { - LF_PRINT_DEBUG("Sending tag " PRINTF_TAG " to the RTI.", tag.time - start_time, tag.microstep); - size_t bytes_to_write = 1 + sizeof(instant_t) + sizeof(microstep_t); - unsigned char buffer[bytes_to_write]; - buffer[0] = type; - encode_tag(&(buffer[1]), tag); - - LF_MUTEX_LOCK(&lf_outbound_socket_mutex); - if (_fed.socket_TCP_RTI < 0) { - lf_print_warning("Socket is no longer connected. Dropping message."); - LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); - return; - } - trace_event_t event_type = (type == MSG_TYPE_NEXT_EVENT_TAG) ? send_NET : send_LTC; - // Trace the event when tracing is enabled - tracepoint_federate_to_rti(_fed.trace, event_type, _lf_my_fed_id, &tag); - write_to_socket_fail_on_error( - &_fed.socket_TCP_RTI, bytes_to_write, buffer, &lf_outbound_socket_mutex, - "Failed to send tag " PRINTF_TAG " to the RTI.", tag.time - start_time, tag.microstep); + LF_PRINT_DEBUG("Sending tag " PRINTF_TAG " to the RTI.", tag.time - start_time, tag.microstep); + size_t bytes_to_write = 1 + sizeof(instant_t) + sizeof(microstep_t); + unsigned char buffer[bytes_to_write]; + buffer[0] = type; + encode_tag(&(buffer[1]), tag); + + LF_MUTEX_LOCK(&lf_outbound_socket_mutex); + if (_fed.socket_TCP_RTI < 0) { + lf_print_warning("Socket is no longer connected. Dropping message."); LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); + return; + } + trace_event_t event_type = (type == MSG_TYPE_NEXT_EVENT_TAG) ? send_NET : send_LTC; + // Trace the event when tracing is enabled + tracepoint_federate_to_rti(event_type, _lf_my_fed_id, &tag); + write_to_socket_fail_on_error(&_fed.socket_TCP_RTI, bytes_to_write, buffer, &lf_outbound_socket_mutex, + "Failed to send tag " PRINTF_TAG " to the RTI.", tag.time - start_time, tag.microstep); + LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); } /** @@ -162,10 +155,12 @@ static void send_tag(unsigned char type, tag_t tag) { * alive and the first unread byte on the socket's queue is MSG_TYPE_FAILED. */ static bool rti_failed() { - unsigned char first_byte; - ssize_t bytes = peek_from_socket(_fed.socket_TCP_RTI, &first_byte); - if (bytes < 0 || (bytes == 1 && first_byte == MSG_TYPE_FAILED)) return true; - else return false; + unsigned char first_byte; + ssize_t bytes = peek_from_socket(_fed.socket_TCP_RTI, &first_byte); + if (bytes < 0 || (bytes == 1 && first_byte == MSG_TYPE_FAILED)) + return true; + else + return false; } //////////////////////////////// Port Status Handling /////////////////////////////////////// @@ -191,11 +186,11 @@ extern size_t staa_lst_size; * @return A pointer to an action struct or null if the ID is out of range. */ static lf_action_base_t* action_for_port(int port_id) { - if (port_id >= 0 && port_id < _lf_action_table_size) { - return _lf_action_table[port_id]; - } - lf_print_error_and_exit("Invalid port ID: %d", port_id); - return NULL; + if (port_id >= 0 && port_id < _lf_action_table_size) { + return _lf_action_table[port_id]; + } + lf_print_error_and_exit("Invalid port ID: %d", port_id); + return NULL; } /** @@ -211,54 +206,49 @@ static lf_action_base_t* action_for_port(int port_id) { * ports is known. */ static void update_last_known_status_on_input_ports(tag_t tag) { - LF_PRINT_DEBUG("In update_last_known_status_on_input ports."); - bool notify = false; - for (int i = 0; i < _lf_action_table_size; i++) { - lf_action_base_t* input_port_action = _lf_action_table[i]; - // This is called when a TAG is received. - // But it is possible for an input port to have received already - // a message with a larger tag (if there is an after delay on the - // connection), in which case, the last known status tag of the port - // is in the future and should not be rolled back. So in that case, - // we do not update the last known status tag. - if (lf_tag_compare(tag, - input_port_action->trigger->last_known_status_tag) >= 0) { - LF_PRINT_DEBUG( - "Updating the last known status tag of port %d from " PRINTF_TAG " to " PRINTF_TAG ".", - i, - input_port_action->trigger->last_known_status_tag.time - lf_time_start(), - input_port_action->trigger->last_known_status_tag.microstep, - tag.time - lf_time_start(), - tag.microstep - ); - input_port_action->trigger->last_known_status_tag = tag; - notify = true; - } - } - // FIXME: We could put a condition variable into the trigger_t - // struct for each network input port, in which case this won't - // be a broadcast but rather a targetted signal. - if (notify && lf_update_max_level(tag, false)) { - // Notify network input reactions - lf_cond_broadcast(&lf_port_status_changed); - } + LF_PRINT_DEBUG("In update_last_known_status_on_input ports."); + bool notify = false; + for (int i = 0; i < _lf_action_table_size; i++) { + lf_action_base_t* input_port_action = _lf_action_table[i]; + // This is called when a TAG is received. + // But it is possible for an input port to have received already + // a message with a larger tag (if there is an after delay on the + // connection), in which case, the last known status tag of the port + // is in the future and should not be rolled back. So in that case, + // we do not update the last known status tag. + if (lf_tag_compare(tag, input_port_action->trigger->last_known_status_tag) >= 0) { + LF_PRINT_DEBUG("Updating the last known status tag of port %d from " PRINTF_TAG " to " PRINTF_TAG ".", i, + input_port_action->trigger->last_known_status_tag.time - lf_time_start(), + input_port_action->trigger->last_known_status_tag.microstep, tag.time - lf_time_start(), + tag.microstep); + input_port_action->trigger->last_known_status_tag = tag; + notify = true; + } + } + // FIXME: We could put a condition variable into the trigger_t + // struct for each network input port, in which case this won't + // be a broadcast but rather a targetted signal. + if (notify && lf_update_max_level(tag, false)) { + // Notify network input reactions + lf_cond_broadcast(&lf_port_status_changed); + } } /** * @brief Update the last known status tag of a network input port. - * + * * First, if the specified tag is less than the current_tag of the top-level * environment, then ignore the specified tag and use the current_tag. This * situation can arise if a message has arrived late (an STP violation has occurred). - * + * * If the specified tag is greater than the previous last_known_status_tag * of the port, then update the last_known_status_tag to the new tag. - * + * * If the tag is equal to the previous last_known_status_tag, then * increment the microstep of the last_known_status_tag. This situation can * occur if a sequence of late messages (STP violations) are occurring all at * once during an execution of a logical tag. - * + * * This function is called when a message or absent message arrives. For decentralized * coordination, it is also called by the background thread update_ports_from_staa_offsets * which uses physical time to determine when an input port can be assumed to be absent @@ -272,35 +262,33 @@ static void update_last_known_status_on_input_ports(tag_t tag) { * @param portID The port ID. */ static void update_last_known_status_on_input_port(environment_t* env, tag_t tag, int port_id) { - if (lf_tag_compare(tag, env->current_tag) < 0) tag = env->current_tag; - trigger_t* input_port_action = action_for_port(port_id)->trigger; - int comparison = lf_tag_compare(tag, input_port_action->last_known_status_tag); - if (comparison == 0) tag.microstep++; - if (comparison >= 0) { - LF_PRINT_LOG( - "Updating the last known status tag of port %d from " PRINTF_TAG " to " PRINTF_TAG ".", - port_id, - input_port_action->last_known_status_tag.time - lf_time_start(), - input_port_action->last_known_status_tag.microstep, - tag.time - lf_time_start(), - tag.microstep - ); - input_port_action->last_known_status_tag = tag; - - // Check whether this port update implies a change to MLAA, which may unblock reactions. - // For decentralized coordination, the first argument is NEVER, so it has no effect. - // For centralized, the arguments probably also have no effect, but the port update may. - // Note that it would not be correct to pass `tag` as the first argument because - // there is no guarantee that there is either a TAG or a PTAG for this time. - // The message that triggered this to be called could be from an upstream - // federate that is far ahead of other upstream federates in logical time. - lf_update_max_level(_fed.last_TAG, _fed.is_last_TAG_provisional); - lf_cond_broadcast(&lf_port_status_changed); - } else { - // Message arrivals should be monotonic, so this should not occur. - lf_print_warning("Attempt to update the last known status tag " - "of network input port %d to an earlier tag was ignored.", port_id); - } + if (lf_tag_compare(tag, env->current_tag) < 0) + tag = env->current_tag; + trigger_t* input_port_action = action_for_port(port_id)->trigger; + int comparison = lf_tag_compare(tag, input_port_action->last_known_status_tag); + if (comparison == 0) + tag.microstep++; + if (comparison >= 0) { + LF_PRINT_LOG("Updating the last known status tag of port %d from " PRINTF_TAG " to " PRINTF_TAG ".", port_id, + input_port_action->last_known_status_tag.time - lf_time_start(), + input_port_action->last_known_status_tag.microstep, tag.time - lf_time_start(), tag.microstep); + input_port_action->last_known_status_tag = tag; + + // Check whether this port update implies a change to MLAA, which may unblock reactions. + // For decentralized coordination, the first argument is NEVER, so it has no effect. + // For centralized, the arguments probably also have no effect, but the port update may. + // Note that it would not be correct to pass `tag` as the first argument because + // there is no guarantee that there is either a TAG or a PTAG for this time. + // The message that triggered this to be called could be from an upstream + // federate that is far ahead of other upstream federates in logical time. + lf_update_max_level(_fed.last_TAG, _fed.is_last_TAG_provisional); + lf_cond_broadcast(&lf_port_status_changed); + } else { + // Message arrivals should be monotonic, so this should not occur. + lf_print_warning("Attempt to update the last known status tag " + "of network input port %d to an earlier tag was ignored.", + port_id); + } } /** @@ -310,8 +298,8 @@ static void update_last_known_status_on_input_port(environment_t* env, tag_t tag * @param status The network port status (port_status_t) */ static void set_network_port_status(int portID, port_status_t status) { - lf_action_base_t* network_input_port_action = action_for_port(portID); - network_input_port_action->trigger->status = status; + lf_action_base_t* network_input_port_action = action_for_port(portID); + network_input_port_action->trigger->status = status; } /** @@ -333,61 +321,56 @@ static void set_network_port_status(int portID, port_status_t status) { * scalar and 0 for no payload. * @return A handle to the event, or 0 if no event was scheduled, or -1 for error. */ -static trigger_handle_t schedule_message_received_from_network_locked( - environment_t* env, - trigger_t* trigger, - tag_t tag, - lf_token_t* token) { - assert(env != GLOBAL_ENVIRONMENT); - - // Return value of the function - trigger_handle_t return_value = 0; - - // Indicates whether or not the intended tag - // of the message (timestamp, microstep) is - // in the future relative to the tag of this - // federate. By default, assume it is not. - bool message_tag_is_in_the_future = lf_tag_compare(tag, env->current_tag) > 0; - // Assign the intended tag temporarily to restore later. - tag_t previous_intended_tag = trigger->intended_tag; - trigger->intended_tag = tag; - - // Calculate the extra_delay required to be passed - // to the schedule function. - interval_t extra_delay = tag.time - env->current_tag.time; - if (!message_tag_is_in_the_future && env->execution_started) { +static trigger_handle_t schedule_message_received_from_network_locked(environment_t* env, trigger_t* trigger, tag_t tag, + lf_token_t* token) { + assert(env != GLOBAL_ENVIRONMENT); + + // Return value of the function + trigger_handle_t return_value = 0; + + // Indicates whether or not the intended tag + // of the message (timestamp, microstep) is + // in the future relative to the tag of this + // federate. By default, assume it is not. + bool message_tag_is_in_the_future = lf_tag_compare(tag, env->current_tag) > 0; + // Assign the intended tag temporarily to restore later. + tag_t previous_intended_tag = trigger->intended_tag; + trigger->intended_tag = tag; + + // Calculate the extra_delay required to be passed + // to the schedule function. + interval_t extra_delay = tag.time - env->current_tag.time; + if (!message_tag_is_in_the_future && env->execution_started) { #ifdef FEDERATED_CENTRALIZED - // If the coordination is centralized, receiving a message - // that does not carry a timestamp that is in the future - // would indicate a critical condition, showing that the - // time advance mechanism is not working correctly. - LF_MUTEX_UNLOCK(&env->mutex); - lf_print_error_and_exit( - "Received a message at tag " PRINTF_TAG " that has a tag " PRINTF_TAG - " that has violated the STP offset. " - "Centralized coordination should not have these types of messages.", - env->current_tag.time - start_time, env->current_tag.microstep, - tag.time - start_time, tag.microstep); + // If the coordination is centralized, receiving a message + // that does not carry a timestamp that is in the future + // would indicate a critical condition, showing that the + // time advance mechanism is not working correctly. + LF_MUTEX_UNLOCK(&env->mutex); + lf_print_error_and_exit( + "Received a message at tag " PRINTF_TAG " that has a tag " PRINTF_TAG " that has violated the STP offset. " + "Centralized coordination should not have these types of messages.", + env->current_tag.time - start_time, env->current_tag.microstep, tag.time - start_time, tag.microstep); #else - // Set the delay back to 0 - extra_delay = 0LL; - LF_PRINT_LOG("Calling schedule with 0 delay and intended tag " PRINTF_TAG ".", - trigger->intended_tag.time - start_time, - trigger->intended_tag.microstep); - return_value = lf_schedule_trigger(env, trigger, extra_delay, token); + // Set the delay back to 0 + extra_delay = 0LL; + LF_PRINT_LOG("Calling schedule with 0 delay and intended tag " PRINTF_TAG ".", + trigger->intended_tag.time - start_time, trigger->intended_tag.microstep); + return_value = lf_schedule_trigger(env, trigger, extra_delay, token); #endif - } else { - // In case the message is in the future, call - // _lf_schedule_at_tag() so that the microstep is respected. - LF_PRINT_LOG("Received a message that is (" PRINTF_TIME " nanoseconds, " PRINTF_MICROSTEP " microsteps) " - "in the future.", extra_delay, tag.microstep - env->current_tag.microstep); - return_value = _lf_schedule_at_tag(env, trigger, tag, token); - } - trigger->intended_tag = previous_intended_tag; - // Notify the main thread in case it is waiting for physical time to elapse. - LF_PRINT_DEBUG("Broadcasting notification that event queue changed."); - lf_cond_broadcast(&env->event_q_changed); - return return_value; + } else { + // In case the message is in the future, call + // _lf_schedule_at_tag() so that the microstep is respected. + LF_PRINT_LOG("Received a message that is (" PRINTF_TIME " nanoseconds, " PRINTF_MICROSTEP " microsteps) " + "in the future.", + extra_delay, tag.microstep - env->current_tag.microstep); + return_value = _lf_schedule_at_tag(env, trigger, tag, token); + } + trigger->intended_tag = previous_intended_tag; + // Notify the main thread in case it is waiting for physical time to elapse. + LF_PRINT_DEBUG("Broadcasting notification that event queue changed."); + lf_cond_broadcast(&env->event_q_changed); + return return_value; } /** @@ -402,20 +385,20 @@ static trigger_handle_t schedule_message_received_from_network_locked( * @param flag 0 if an EOF was received, -1 if a socket error occurred, 1 otherwise. */ static void close_inbound_socket(int fed_id, int flag) { - LF_MUTEX_LOCK(&socket_mutex); - if (_fed.sockets_for_inbound_p2p_connections[fed_id] >= 0) { - if (flag >= 0) { - if (flag > 0) { - shutdown(_fed.sockets_for_inbound_p2p_connections[fed_id], SHUT_RDWR); - } else { - // Have received EOF from the other end. Send EOF to the other end. - shutdown(_fed.sockets_for_inbound_p2p_connections[fed_id], SHUT_WR); - } - } - close(_fed.sockets_for_inbound_p2p_connections[fed_id]); - _fed.sockets_for_inbound_p2p_connections[fed_id] = -1; - } - LF_MUTEX_UNLOCK(&socket_mutex); + LF_MUTEX_LOCK(&socket_mutex); + if (_fed.sockets_for_inbound_p2p_connections[fed_id] >= 0) { + if (flag >= 0) { + if (flag > 0) { + shutdown(_fed.sockets_for_inbound_p2p_connections[fed_id], SHUT_RDWR); + } else { + // Have received EOF from the other end. Send EOF to the other end. + shutdown(_fed.sockets_for_inbound_p2p_connections[fed_id], SHUT_WR); + } + } + close(_fed.sockets_for_inbound_p2p_connections[fed_id]); + _fed.sockets_for_inbound_p2p_connections[fed_id] = -1; + } + LF_MUTEX_UNLOCK(&socket_mutex); } /** @@ -447,7 +430,7 @@ static void close_inbound_socket(int fed_id, int flag) { * last_known_status_tag (condition 4) deals with messages arriving with identical intended * tags (which should not happen). This one will be handled late (one microstep later than * the current tag if 1 and 2 are true). - * + * * This function assumes the mutex is held on the environment. * * @param env The environment. @@ -455,12 +438,10 @@ static void close_inbound_socket(int fed_id, int flag) { * @param intended_tag The intended tag. */ static bool handle_message_now(environment_t* env, trigger_t* trigger, tag_t intended_tag) { - return trigger->reactions[0]->index >= max_level_allowed_to_advance - && lf_tag_compare(intended_tag, lf_tag(env)) == 0 - && lf_tag_compare(intended_tag, trigger->last_tag) > 0 - && lf_tag_compare(intended_tag, trigger->last_known_status_tag) > 0 - && env->execution_started - && !trigger->is_physical; + return trigger->reactions[0]->index >= max_level_allowed_to_advance && + lf_tag_compare(intended_tag, lf_tag(env)) == 0 && lf_tag_compare(intended_tag, trigger->last_tag) > 0 && + lf_tag_compare(intended_tag, trigger->last_known_status_tag) > 0 && env->execution_started && + !trigger->is_physical; } /** @@ -472,40 +453,40 @@ static bool handle_message_now(environment_t* env, trigger_t* trigger, tag_t int * @return 0 for success, -1 for failure. */ static int handle_message(int* socket, int fed_id) { - // Read the header. - size_t bytes_to_read = sizeof(uint16_t) + sizeof(uint16_t) + sizeof(int32_t); - unsigned char buffer[bytes_to_read]; - if (read_from_socket_close_on_error(socket, bytes_to_read, buffer)) { - // Read failed, which means the socket has been closed between reading the - // message ID byte and here. - return -1; - } - - // Extract the header information. - unsigned short port_id; - unsigned short federate_id; - size_t length; - extract_header(buffer, &port_id, &federate_id, &length); - // Check if the message is intended for this federate - assert(_lf_my_fed_id == federate_id); - LF_PRINT_DEBUG("Receiving message to port %d of length %zu.", port_id, length); - - // Get the triggering action for the corresponding port - lf_action_base_t* action = action_for_port(port_id); - - // Read the payload. - // Allocate memory for the message contents. - unsigned char* message_contents = (unsigned char*)malloc(length); - if (read_from_socket_close_on_error(socket, length, message_contents)) { - return -1; - } - // Trace the event when tracing is enabled - tracepoint_federate_from_federate(_fed.trace, receive_P2P_MSG, _lf_my_fed_id, federate_id, NULL); - LF_PRINT_LOG("Message received by federate: %s. Length: %zu.", message_contents, length); - - LF_PRINT_DEBUG("Calling schedule for message received on a physical connection."); - lf_schedule_value(action, 0, message_contents, length); - return 0; + // Read the header. + size_t bytes_to_read = sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t); + unsigned char buffer[bytes_to_read]; + if (read_from_socket_close_on_error(socket, bytes_to_read, buffer)) { + // Read failed, which means the socket has been closed between reading the + // message ID byte and here. + return -1; + } + + // Extract the header information. + unsigned short port_id; + unsigned short federate_id; + size_t length; + extract_header(buffer, &port_id, &federate_id, &length); + // Check if the message is intended for this federate + assert(_lf_my_fed_id == federate_id); + LF_PRINT_DEBUG("Receiving message to port %d of length %zu.", port_id, length); + + // Get the triggering action for the corresponding port + lf_action_base_t* action = action_for_port(port_id); + + // Read the payload. + // Allocate memory for the message contents. + unsigned char* message_contents = (unsigned char*)malloc(length); + if (read_from_socket_close_on_error(socket, length, message_contents)) { + return -1; + } + // Trace the event when tracing is enabled + tracepoint_federate_from_federate(receive_P2P_MSG, _lf_my_fed_id, federate_id, NULL); + LF_PRINT_LOG("Message received by federate: %s. Length: %zu.", message_contents, length); + + LF_PRINT_DEBUG("Calling schedule for message received on a physical connection."); + lf_schedule_value(action, 0, message_contents, length); + return 0; } /** @@ -525,162 +506,158 @@ static int handle_message(int* socket, int fed_id) { * @return 0 on successfully reading the message, -1 on failure (e.g. due to socket closed). */ static int handle_tagged_message(int* socket, int fed_id) { - // Environment is always the one corresponding to the top-level scheduling enclave. - environment_t *env; - _lf_get_environments(&env); - - // Read the header which contains the timestamp. - size_t bytes_to_read = sizeof(uint16_t) + sizeof(uint16_t) + sizeof(int32_t) - + sizeof(instant_t) + sizeof(microstep_t); - unsigned char buffer[bytes_to_read]; - if (read_from_socket_close_on_error(socket, bytes_to_read, buffer)) { - return -1; // Read failed. - } - - // Extract the header information. - unsigned short port_id; - unsigned short federate_id; - size_t length; - tag_t intended_tag; - extract_timed_header(buffer, &port_id, &federate_id, &length, &intended_tag); - // Trace the event when tracing is enabled - if (fed_id == -1) { - tracepoint_federate_from_rti(_fed.trace, receive_TAGGED_MSG, _lf_my_fed_id, &intended_tag); - } else { - tracepoint_federate_from_federate(_fed.trace, receive_P2P_TAGGED_MSG, _lf_my_fed_id, fed_id, &intended_tag); - } - // Check if the message is intended for this federate - assert(_lf_my_fed_id == federate_id); - LF_PRINT_DEBUG("Receiving message to port %d of length %zu.", port_id, length); - - // Get the triggering action for the corresponding port - lf_action_base_t* action = action_for_port(port_id); - - // Record the physical time of arrival of the message - instant_t time_of_arrival = lf_time_physical(); - - if (action->trigger->is_physical) { - // Messages sent on physical connections should be handled via handle_message(). - lf_print_error_and_exit("Received a tagged message on a physical connection."); - } + // Environment is always the one corresponding to the top-level scheduling enclave. + environment_t* env; + _lf_get_environments(&env); + + // Read the header which contains the timestamp. + size_t bytes_to_read = + sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t) + sizeof(instant_t) + sizeof(microstep_t); + unsigned char buffer[bytes_to_read]; + if (read_from_socket_close_on_error(socket, bytes_to_read, buffer)) { + return -1; // Read failed. + } + + // Extract the header information. + unsigned short port_id; + unsigned short federate_id; + size_t length; + tag_t intended_tag; + extract_timed_header(buffer, &port_id, &federate_id, &length, &intended_tag); + // Trace the event when tracing is enabled + if (fed_id == -1) { + tracepoint_federate_from_rti(receive_TAGGED_MSG, _lf_my_fed_id, &intended_tag); + } else { + tracepoint_federate_from_federate(receive_P2P_TAGGED_MSG, _lf_my_fed_id, fed_id, &intended_tag); + } + // Check if the message is intended for this federate + assert(_lf_my_fed_id == federate_id); + LF_PRINT_DEBUG("Receiving message to port %d of length %zu.", port_id, length); + + // Get the triggering action for the corresponding port + lf_action_base_t* action = action_for_port(port_id); + + // Record the physical time of arrival of the message + instant_t time_of_arrival = lf_time_physical(); + + if (action->trigger->is_physical) { + // Messages sent on physical connections should be handled via handle_message(). + lf_print_error_and_exit("Received a tagged message on a physical connection."); + } #ifdef FEDERATED_DECENTRALIZED - // Only applicable for federated programs with decentralized coordination: - // For logical connections in decentralized coordination, - // increment the barrier to prevent advancement of tag beyond - // the received tag if possible. The following function call - // suggests that the tag barrier be raised to the tag provided - // by the message. If this tag is in the past, the function will cause - // the tag to freeze at the current level. - // If something happens, make sure to release the barrier. - _lf_increment_tag_barrier(env, intended_tag); + // Only applicable for federated programs with decentralized coordination: + // For logical connections in decentralized coordination, + // increment the barrier to prevent advancement of tag beyond + // the received tag if possible. The following function call + // suggests that the tag barrier be raised to the tag provided + // by the message. If this tag is in the past, the function will cause + // the tag to freeze at the current level. + // If something happens, make sure to release the barrier. + _lf_increment_tag_barrier(env, intended_tag); #endif - LF_PRINT_LOG("Received message on port %d with intended tag: " PRINTF_TAG ", Current tag: " PRINTF_TAG ".", - port_id, intended_tag.time - start_time, intended_tag.microstep, - lf_time_logical_elapsed(env), env->current_tag.microstep); - - // Read the payload. - // Allocate memory for the message contents. - unsigned char* message_contents = (unsigned char*)malloc(length); - if (read_from_socket_close_on_error(socket, length, message_contents)) { -#ifdef FEDERATED_DECENTRALIZED - _lf_decrement_tag_barrier_locked(env); + LF_PRINT_LOG("Received message on port %d with intended tag: " PRINTF_TAG ", Current tag: " PRINTF_TAG ".", port_id, + intended_tag.time - start_time, intended_tag.microstep, lf_time_logical_elapsed(env), + env->current_tag.microstep); + + // Read the payload. + // Allocate memory for the message contents. + unsigned char* message_contents = (unsigned char*)malloc(length); + if (read_from_socket_close_on_error(socket, length, message_contents)) { +#ifdef FEDERATED_DECENTRALIZED + _lf_decrement_tag_barrier_locked(env); #endif - return -1; // Read failed. - } - - // The following is only valid for string messages. - // LF_PRINT_DEBUG("Message received: %s.", message_contents); + return -1; // Read failed. + } - LF_MUTEX_LOCK(&env->mutex); + // The following is only valid for string messages. + // LF_PRINT_DEBUG("Message received: %s.", message_contents); - action->trigger->physical_time_of_arrival = time_of_arrival; + LF_MUTEX_LOCK(&env->mutex); - // Create a token for the message - lf_token_t* message_token = _lf_new_token((token_type_t*)action, message_contents, length); + action->trigger->physical_time_of_arrival = time_of_arrival; - if (handle_message_now(env, action->trigger, intended_tag)) { - // Since the message is intended for the current tag and a port absent reaction - // was waiting for the message, trigger the corresponding reactions for this message. + // Create a token for the message + lf_token_t* message_token = _lf_new_token((token_type_t*)action, message_contents, length); - update_last_known_status_on_input_port(env, intended_tag, port_id); + if (handle_message_now(env, action->trigger, intended_tag)) { + // Since the message is intended for the current tag and a port absent reaction + // was waiting for the message, trigger the corresponding reactions for this message. - LF_PRINT_LOG( - "Inserting reactions directly at tag " PRINTF_TAG ". " - "Intended tag: " PRINTF_TAG ".", - env->current_tag.time - lf_time_start(), - env->current_tag.microstep, - intended_tag.time - lf_time_start(), - intended_tag.microstep - ); - // Only set the intended tag of the trigger if it is being executed now - // because otherwise this may preempt the intended_tag of a previous activation - // of the trigger. - action->trigger->intended_tag = intended_tag; - - // This will mark the STP violation in the reaction if the message is tardy. - _lf_insert_reactions_for_trigger(env, action->trigger, message_token); + update_last_known_status_on_input_port(env, intended_tag, port_id); - // Set the status of the port as present here to inform the network input - // port absent reactions know that they no longer need to block. The reason for - // that is because the network receiver reaction is now in the reaction queue - // keeping the precedence order intact. - set_network_port_status(port_id, present); - } else { - // If no port absent reaction is waiting for this message, or if the intended - // tag is in the future, or the message is tardy, use schedule functions to process the message. - - tag_t actual_tag = intended_tag; -#ifdef FEDERATED_DECENTRALIZED - // For tardy messages in decentralized coordination, we need to figure out what the actual tag will be. - // (Centralized coordination errors out with tardy messages). - if (lf_tag_compare(intended_tag, env->current_tag) <= 0) { - // Message is tardy. - actual_tag = env->current_tag; - actual_tag.microstep++; - // Check that this is greater than any previously scheduled event for this port. - trigger_t* input_port_action = action_for_port(port_id)->trigger; - if (lf_tag_compare(actual_tag, input_port_action->last_known_status_tag) <= 0) { - actual_tag = input_port_action->last_known_status_tag; - actual_tag.microstep++; - } - } + LF_PRINT_LOG("Inserting reactions directly at tag " PRINTF_TAG ". " + "Intended tag: " PRINTF_TAG ".", + env->current_tag.time - lf_time_start(), env->current_tag.microstep, + intended_tag.time - lf_time_start(), intended_tag.microstep); + // Only set the intended tag of the trigger if it is being executed now + // because otherwise this may preempt the intended_tag of a previous activation + // of the trigger. + action->trigger->intended_tag = intended_tag; + + // This will mark the STP violation in the reaction if the message is tardy. + _lf_insert_reactions_for_trigger(env, action->trigger, message_token); + + // Set the status of the port as present here to inform the network input + // port absent reactions know that they no longer need to block. The reason for + // that is because the network receiver reaction is now in the reaction queue + // keeping the precedence order intact. + set_network_port_status(port_id, present); + } else { + // If no port absent reaction is waiting for this message, or if the intended + // tag is in the future, or the message is tardy, use schedule functions to process the message. + + tag_t actual_tag = intended_tag; +#ifdef FEDERATED_DECENTRALIZED + // For tardy messages in decentralized coordination, we need to figure out what the actual tag will be. + // (Centralized coordination errors out with tardy messages). + if (lf_tag_compare(intended_tag, env->current_tag) <= 0) { + // Message is tardy. + actual_tag = env->current_tag; + actual_tag.microstep++; + // Check that this is greater than any previously scheduled event for this port. + trigger_t* input_port_action = action_for_port(port_id)->trigger; + if (lf_tag_compare(actual_tag, input_port_action->last_known_status_tag) <= 0) { + actual_tag = input_port_action->last_known_status_tag; + actual_tag.microstep++; + } + } #endif // FEDERATED_DECENTRALIZED - // The following will update the input_port_action->last_known_status_tag. - // For decentralized coordination, this is needed for the thread implementing STAA. - update_last_known_status_on_input_port(env, actual_tag, port_id); - - // If the current time >= stop time, discard the message. - // But only if the stop time is not equal to the start time! - if (lf_tag_compare(env->current_tag, env->stop_tag) >= 0 && env->execution_started) { - lf_print_error("Received message too late. Already at stop tag.\n" - " Current tag is " PRINTF_TAG " and intended tag is " PRINTF_TAG ".\n" - " Discarding message and closing the socket.", - env->current_tag.time - start_time, env->current_tag.microstep, - intended_tag.time - start_time, intended_tag.microstep); - // Close socket, reading any incoming data and discarding it. - close_inbound_socket(fed_id, 1); - } else { - // Need to use intended_tag here, not actual_tag, so that STP violations are detected. - // It will become actual_tag (that is when the reactions will be invoked). - schedule_message_received_from_network_locked(env, action->trigger, intended_tag, message_token); - } + // The following will update the input_port_action->last_known_status_tag. + // For decentralized coordination, this is needed for the thread implementing STAA. + update_last_known_status_on_input_port(env, actual_tag, port_id); + + // If the current time >= stop time, discard the message. + // But only if the stop time is not equal to the start time! + if (lf_tag_compare(env->current_tag, env->stop_tag) >= 0 && env->execution_started) { + lf_print_error("Received message too late. Already at stop tag.\n" + " Current tag is " PRINTF_TAG " and intended tag is " PRINTF_TAG ".\n" + " Discarding message and closing the socket.", + env->current_tag.time - start_time, env->current_tag.microstep, intended_tag.time - start_time, + intended_tag.microstep); + // Close socket, reading any incoming data and discarding it. + close_inbound_socket(fed_id, 1); + } else { + // Need to use intended_tag here, not actual_tag, so that STP violations are detected. + // It will become actual_tag (that is when the reactions will be invoked). + schedule_message_received_from_network_locked(env, action->trigger, intended_tag, message_token); } + } -#ifdef FEDERATED_DECENTRALIZED - // Only applicable for federated programs with decentralized coordination - // Finally, decrement the barrier to allow the execution to continue - // past the raised barrier - _lf_decrement_tag_barrier_locked(env); +#ifdef FEDERATED_DECENTRALIZED + // Only applicable for federated programs with decentralized coordination + // Finally, decrement the barrier to allow the execution to continue + // past the raised barrier + _lf_decrement_tag_barrier_locked(env); #endif - // The mutex is unlocked here after the barrier on - // logical time has been removed to avoid - // the need for unecessary lock and unlock - // operations. - LF_MUTEX_UNLOCK(&env->mutex); + // The mutex is unlocked here after the barrier on + // logical time has been removed to avoid + // the need for unecessary lock and unlock + // operations. + LF_MUTEX_UNLOCK(&env->mutex); - return 0; + return 0; } /** @@ -693,40 +670,36 @@ static int handle_tagged_message(int* socket, int fed_id) { * @return 0 for success, -1 for failure to complete the read. */ static int handle_port_absent_message(int* socket, int fed_id) { - size_t bytes_to_read = sizeof(uint16_t) + sizeof(uint16_t) + sizeof(instant_t) + sizeof(microstep_t); - unsigned char buffer[bytes_to_read]; - if (read_from_socket_close_on_error(socket, bytes_to_read, buffer)) { - return -1; - } - - // Extract the header information. - unsigned short port_id = extract_uint16(buffer); - // The next part of the message is the federate_id, but we don't need it. - // unsigned short federate_id = extract_uint16(&(buffer[sizeof(uint16_t)])); - tag_t intended_tag = extract_tag(&(buffer[sizeof(uint16_t)+sizeof(uint16_t)])); - - // Trace the event when tracing is enabled - if (fed_id == -1) { - tracepoint_federate_from_rti(_fed.trace, receive_PORT_ABS, _lf_my_fed_id, &intended_tag); - } else { - tracepoint_federate_from_federate(_fed.trace, receive_PORT_ABS, _lf_my_fed_id, fed_id, &intended_tag); - } - LF_PRINT_LOG("Handling port absent for tag " PRINTF_TAG " for port %hu of fed %d.", - intended_tag.time - lf_time_start(), - intended_tag.microstep, - port_id, - fed_id - ); - - // Environment is always the one corresponding to the top-level scheduling enclave. - environment_t *env; - _lf_get_environments(&env); - - LF_MUTEX_LOCK(&env->mutex); - update_last_known_status_on_input_port(env, intended_tag, port_id); - LF_MUTEX_UNLOCK(&env->mutex); - - return 0; + size_t bytes_to_read = sizeof(uint16_t) + sizeof(uint16_t) + sizeof(instant_t) + sizeof(microstep_t); + unsigned char buffer[bytes_to_read]; + if (read_from_socket_close_on_error(socket, bytes_to_read, buffer)) { + return -1; + } + + // Extract the header information. + unsigned short port_id = extract_uint16(buffer); + // The next part of the message is the federate_id, but we don't need it. + // unsigned short federate_id = extract_uint16(&(buffer[sizeof(uint16_t)])); + tag_t intended_tag = extract_tag(&(buffer[sizeof(uint16_t) + sizeof(uint16_t)])); + + // Trace the event when tracing is enabled + if (fed_id == -1) { + tracepoint_federate_from_rti(receive_PORT_ABS, _lf_my_fed_id, &intended_tag); + } else { + tracepoint_federate_from_federate(receive_PORT_ABS, _lf_my_fed_id, fed_id, &intended_tag); + } + LF_PRINT_LOG("Handling port absent for tag " PRINTF_TAG " for port %hu of fed %d.", + intended_tag.time - lf_time_start(), intended_tag.microstep, port_id, fed_id); + + // Environment is always the one corresponding to the top-level scheduling enclave. + environment_t* env; + _lf_get_environments(&env); + + LF_MUTEX_LOCK(&env->mutex); + update_last_known_status_on_input_port(env, intended_tag, port_id); + LF_MUTEX_UNLOCK(&env->mutex); + + return 0; } /** @@ -743,82 +716,82 @@ static int handle_port_absent_message(int* socket, int fed_id) { * This procedure frees the memory pointed to before returning. */ static void* listen_to_federates(void* _args) { - uint16_t fed_id = (uint16_t)(uintptr_t)_args; - - LF_PRINT_LOG("Listening to federate %d.", fed_id); - - int* socket_id = &_fed.sockets_for_inbound_p2p_connections[fed_id]; - - // Buffer for incoming messages. - // This does not constrain the message size - // because the message will be put into malloc'd memory. - unsigned char buffer[FED_COM_BUFFER_SIZE]; - - // Listen for messages from the federate. - while (1) { - bool socket_closed = false; - // Read one byte to get the message type. - LF_PRINT_DEBUG("Waiting for a P2P message on socket %d.", *socket_id); - if (read_from_socket_close_on_error(socket_id, 1, buffer)) { - // Socket has been closed. - lf_print("Socket from federate %d is closed.", fed_id); - // Stop listening to this federate. - socket_closed = true; - break; - } - LF_PRINT_DEBUG("Received a P2P message on socket %d of type %d.", - *socket_id, buffer[0]); - bool bad_message = false; - switch (buffer[0]) { - case MSG_TYPE_P2P_MESSAGE: - LF_PRINT_LOG("Received untimed message from federate %d.", fed_id); - if (handle_message(socket_id, fed_id)) { - // Failed to complete the reading of a message on a physical connection. - lf_print_warning("Failed to complete reading of message on physical connection."); - socket_closed = true; - } - break; - case MSG_TYPE_P2P_TAGGED_MESSAGE: - LF_PRINT_LOG("Received tagged message from federate %d.", fed_id); - if (handle_tagged_message(socket_id, fed_id)) { - // P2P tagged messages are only used in decentralized coordination, and - // it is not a fatal error if the socket is closed before the whole message is read. - // But this thread should exit. - lf_print_warning("Failed to complete reading of tagged message."); - socket_closed = true; - } - break; - case MSG_TYPE_PORT_ABSENT: - LF_PRINT_LOG("Received port absent message from federate %d.", fed_id); - if (handle_port_absent_message(socket_id, fed_id)) { - // P2P tagged messages are only used in decentralized coordination, and - // it is not a fatal error if the socket is closed before the whole message is read. - // But this thread should exit. - lf_print_warning("Failed to complete reading of tagged message."); - socket_closed = true; - } - break; - default: - bad_message = true; - } - if (bad_message) { - lf_print_error("Received erroneous message type: %d. Closing the socket.", buffer[0]); - // Trace the event when tracing is enabled - tracepoint_federate_from_federate(_fed.trace, receive_UNIDENTIFIED, _lf_my_fed_id, fed_id, NULL); - break; // while loop - } - if (socket_closed) { - // NOTE: For decentralized execution, once this socket is closed, we could - // update last known tags of all ports connected to the specified federate to FOREVER_TAG, - // which would eliminate the need to wait for STAA to assume an input is absent. - // However, at this time, we don't know which ports correspond to which upstream federates. - // The code generator would have to encode this information. Once that is done, - // we could call update_last_known_status_on_input_port with FOREVER_TAG. - - break; // while loop - } - } - return NULL; + initialize_lf_thread_id(); + uint16_t fed_id = (uint16_t)(uintptr_t)_args; + + LF_PRINT_LOG("Listening to federate %d.", fed_id); + + int* socket_id = &_fed.sockets_for_inbound_p2p_connections[fed_id]; + + // Buffer for incoming messages. + // This does not constrain the message size + // because the message will be put into malloc'd memory. + unsigned char buffer[FED_COM_BUFFER_SIZE]; + + // Listen for messages from the federate. + while (1) { + bool socket_closed = false; + // Read one byte to get the message type. + LF_PRINT_DEBUG("Waiting for a P2P message on socket %d.", *socket_id); + if (read_from_socket_close_on_error(socket_id, 1, buffer)) { + // Socket has been closed. + lf_print("Socket from federate %d is closed.", fed_id); + // Stop listening to this federate. + socket_closed = true; + break; + } + LF_PRINT_DEBUG("Received a P2P message on socket %d of type %d.", *socket_id, buffer[0]); + bool bad_message = false; + switch (buffer[0]) { + case MSG_TYPE_P2P_MESSAGE: + LF_PRINT_LOG("Received untimed message from federate %d.", fed_id); + if (handle_message(socket_id, fed_id)) { + // Failed to complete the reading of a message on a physical connection. + lf_print_warning("Failed to complete reading of message on physical connection."); + socket_closed = true; + } + break; + case MSG_TYPE_P2P_TAGGED_MESSAGE: + LF_PRINT_LOG("Received tagged message from federate %d.", fed_id); + if (handle_tagged_message(socket_id, fed_id)) { + // P2P tagged messages are only used in decentralized coordination, and + // it is not a fatal error if the socket is closed before the whole message is read. + // But this thread should exit. + lf_print_warning("Failed to complete reading of tagged message."); + socket_closed = true; + } + break; + case MSG_TYPE_PORT_ABSENT: + LF_PRINT_LOG("Received port absent message from federate %d.", fed_id); + if (handle_port_absent_message(socket_id, fed_id)) { + // P2P tagged messages are only used in decentralized coordination, and + // it is not a fatal error if the socket is closed before the whole message is read. + // But this thread should exit. + lf_print_warning("Failed to complete reading of tagged message."); + socket_closed = true; + } + break; + default: + bad_message = true; + } + if (bad_message) { + lf_print_error("Received erroneous message type: %d. Closing the socket.", buffer[0]); + // Trace the event when tracing is enabled + tracepoint_federate_from_federate(receive_UNIDENTIFIED, _lf_my_fed_id, fed_id, NULL); + break; // while loop + } + if (socket_closed) { + // NOTE: For decentralized execution, once this socket is closed, we could + // update last known tags of all ports connected to the specified federate to FOREVER_TAG, + // which would eliminate the need to wait for STAA to assume an input is absent. + // However, at this time, we don't know which ports correspond to which upstream federates. + // The code generator would have to encode this information. Once that is done, + // we could call update_last_known_status_on_input_port with FOREVER_TAG. + + break; // while loop + } + } + return NULL; } /** @@ -830,30 +803,31 @@ static void* listen_to_federates(void* _args) { * @param flag 0 if the socket has received EOF, 1 if not, -1 if abnormal termination. */ static void close_outbound_socket(int fed_id, int flag) { - assert (fed_id >= 0 && fed_id < NUMBER_OF_FEDERATES); - if (_lf_normal_termination) { - LF_MUTEX_LOCK(&lf_outbound_socket_mutex); - } - if (_fed.sockets_for_outbound_p2p_connections[fed_id] >= 0) { - // Close the socket by sending a FIN packet indicating that no further writes - // are expected. Then read until we get an EOF indication. - if (flag >= 0) { - // SHUT_WR indicates no further outgoing messages. - shutdown(_fed.sockets_for_outbound_p2p_connections[fed_id], SHUT_WR); - if (flag > 0) { - // Have not received EOF yet. read until we get an EOF or error indication. - // This compensates for delayed ACKs and disabling of Nagles algorithm - // by delaying exiting until the shutdown is complete. - unsigned char message[32]; - while (read(_fed.sockets_for_outbound_p2p_connections[fed_id], &message, 32) > 0); - } - } - close(_fed.sockets_for_outbound_p2p_connections[fed_id]); - _fed.sockets_for_outbound_p2p_connections[fed_id] = -1; - } - if (_lf_normal_termination) { - LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); - } + assert(fed_id >= 0 && fed_id < NUMBER_OF_FEDERATES); + if (_lf_normal_termination) { + LF_MUTEX_LOCK(&lf_outbound_socket_mutex); + } + if (_fed.sockets_for_outbound_p2p_connections[fed_id] >= 0) { + // Close the socket by sending a FIN packet indicating that no further writes + // are expected. Then read until we get an EOF indication. + if (flag >= 0) { + // SHUT_WR indicates no further outgoing messages. + shutdown(_fed.sockets_for_outbound_p2p_connections[fed_id], SHUT_WR); + if (flag > 0) { + // Have not received EOF yet. read until we get an EOF or error indication. + // This compensates for delayed ACKs and disabling of Nagles algorithm + // by delaying exiting until the shutdown is complete. + unsigned char message[32]; + while (read(_fed.sockets_for_outbound_p2p_connections[fed_id], &message, 32) > 0) + ; + } + } + close(_fed.sockets_for_outbound_p2p_connections[fed_id]); + _fed.sockets_for_outbound_p2p_connections[fed_id] = -1; + } + if (_lf_normal_termination) { + LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); + } } #ifdef FEDERATED_AUTHENTICATED @@ -864,84 +838,78 @@ static void close_outbound_socket(int fed_id, int flag) { */ static int perform_hmac_authentication() { - // Send buffer including message type, federate ID, federate's nonce. - size_t fed_id_length = sizeof(uint16_t); - size_t message_length = 1 + fed_id_length + NONCE_LENGTH; - unsigned char fed_hello_buf[message_length]; - fed_hello_buf[0] = MSG_TYPE_FED_NONCE; - encode_uint16((uint16_t)_lf_my_fed_id, &fed_hello_buf[1]); - unsigned char fed_nonce[NONCE_LENGTH]; - RAND_bytes(fed_nonce, NONCE_LENGTH); - memcpy(&fed_hello_buf[1 + fed_id_length], fed_nonce, NONCE_LENGTH); - - write_to_socket_fail_on_error( - &_fed.socket_TCP_RTI, message_length, fed_hello_buf, NULL, - "Failed to write nonce."); - - // Check HMAC of received FED_RESPONSE message. - unsigned int hmac_length = SHA256_HMAC_LENGTH; - size_t federation_id_length = strnlen(federation_metadata.federation_id, 255); - - unsigned char received[1 + NONCE_LENGTH + hmac_length]; - if (read_from_socket_close_on_error(&_fed.socket_TCP_RTI, 1 + NONCE_LENGTH + hmac_length, received)) { - lf_print_warning("Failed to read RTI response."); - return -1; - } - if (received[0] != MSG_TYPE_RTI_RESPONSE) { - if (received[0] == MSG_TYPE_FAILED) { - lf_print_error("RTI has failed."); - return -1; - } else { - lf_print_error( - "Received unexpected response %u from the RTI (see net_common.h).", - received[0]); - return -1; - } - } - // Create tag to compare to received tag. - unsigned char buf_to_check[1 + fed_id_length + NONCE_LENGTH]; - buf_to_check[0] = MSG_TYPE_RTI_RESPONSE; - encode_uint16((uint16_t)_lf_my_fed_id, &buf_to_check[1]); - memcpy(&buf_to_check[1 + fed_id_length], fed_nonce, NONCE_LENGTH); - unsigned char fed_tag[hmac_length]; - HMAC(EVP_sha256(), federation_metadata.federation_id, federation_id_length, buf_to_check, 1 + fed_id_length + NONCE_LENGTH, - fed_tag, &hmac_length); - - // Compare received tag and created tag. - if (memcmp(&received[1 + NONCE_LENGTH], fed_tag, hmac_length) != 0) { - // HMAC does not match. Send back a MSG_TYPE_REJECT message. - lf_print_error("HMAC authentication failed."); - unsigned char response[2]; - response[0] = MSG_TYPE_REJECT; - response[1] = HMAC_DOES_NOT_MATCH; - - // Ignore errors on writing back. - write_to_socket(_fed.socket_TCP_RTI, 2, response); - return -1; + // Send buffer including message type, federate ID, federate's nonce. + size_t fed_id_length = sizeof(uint16_t); + size_t message_length = 1 + fed_id_length + NONCE_LENGTH; + unsigned char fed_hello_buf[message_length]; + fed_hello_buf[0] = MSG_TYPE_FED_NONCE; + encode_uint16((uint16_t)_lf_my_fed_id, &fed_hello_buf[1]); + unsigned char fed_nonce[NONCE_LENGTH]; + RAND_bytes(fed_nonce, NONCE_LENGTH); + memcpy(&fed_hello_buf[1 + fed_id_length], fed_nonce, NONCE_LENGTH); + + write_to_socket_fail_on_error(&_fed.socket_TCP_RTI, message_length, fed_hello_buf, NULL, "Failed to write nonce."); + + // Check HMAC of received FED_RESPONSE message. + unsigned int hmac_length = SHA256_HMAC_LENGTH; + size_t federation_id_length = strnlen(federation_metadata.federation_id, 255); + + unsigned char received[1 + NONCE_LENGTH + hmac_length]; + if (read_from_socket_close_on_error(&_fed.socket_TCP_RTI, 1 + NONCE_LENGTH + hmac_length, received)) { + lf_print_warning("Failed to read RTI response."); + return -1; + } + if (received[0] != MSG_TYPE_RTI_RESPONSE) { + if (received[0] == MSG_TYPE_FAILED) { + lf_print_error("RTI has failed."); + return -1; } else { - LF_PRINT_LOG("HMAC verified."); - // HMAC tag is created with MSG_TYPE_FED_RESPONSE and received federate nonce. - unsigned char mac_buf[1 + NONCE_LENGTH]; - mac_buf[0] = MSG_TYPE_FED_RESPONSE; - memcpy(&mac_buf[1], &received[1], NONCE_LENGTH); - // Buffer for message type and HMAC tag. - unsigned char sender[1 + hmac_length]; - sender[0] = MSG_TYPE_FED_RESPONSE; - HMAC(EVP_sha256(), federation_metadata.federation_id, federation_id_length, mac_buf, 1 + NONCE_LENGTH, - &sender[1], &hmac_length); - - write_to_socket_fail_on_error( - &_fed.socket_TCP_RTI, 1 + hmac_length, sender, NULL, - "Failed to write fed response."); - } - return 0; + lf_print_error("Received unexpected response %u from the RTI (see net_common.h).", received[0]); + return -1; + } + } + // Create tag to compare to received tag. + unsigned char buf_to_check[1 + fed_id_length + NONCE_LENGTH]; + buf_to_check[0] = MSG_TYPE_RTI_RESPONSE; + encode_uint16((uint16_t)_lf_my_fed_id, &buf_to_check[1]); + memcpy(&buf_to_check[1 + fed_id_length], fed_nonce, NONCE_LENGTH); + unsigned char fed_tag[hmac_length]; + HMAC(EVP_sha256(), federation_metadata.federation_id, federation_id_length, buf_to_check, + 1 + fed_id_length + NONCE_LENGTH, fed_tag, &hmac_length); + + // Compare received tag and created tag. + if (memcmp(&received[1 + NONCE_LENGTH], fed_tag, hmac_length) != 0) { + // HMAC does not match. Send back a MSG_TYPE_REJECT message. + lf_print_error("HMAC authentication failed."); + unsigned char response[2]; + response[0] = MSG_TYPE_REJECT; + response[1] = HMAC_DOES_NOT_MATCH; + + // Ignore errors on writing back. + write_to_socket(_fed.socket_TCP_RTI, 2, response); + return -1; + } else { + LF_PRINT_LOG("HMAC verified."); + // HMAC tag is created with MSG_TYPE_FED_RESPONSE and received federate nonce. + unsigned char mac_buf[1 + NONCE_LENGTH]; + mac_buf[0] = MSG_TYPE_FED_RESPONSE; + memcpy(&mac_buf[1], &received[1], NONCE_LENGTH); + // Buffer for message type and HMAC tag. + unsigned char sender[1 + hmac_length]; + sender[0] = MSG_TYPE_FED_RESPONSE; + HMAC(EVP_sha256(), federation_metadata.federation_id, federation_id_length, mac_buf, 1 + NONCE_LENGTH, &sender[1], + &hmac_length); + + write_to_socket_fail_on_error(&_fed.socket_TCP_RTI, 1 + hmac_length, sender, NULL, "Failed to write fed response."); + } + return 0; } #endif static void close_rti_socket() { - shutdown(_fed.socket_TCP_RTI, SHUT_RDWR); - close(_fed.socket_TCP_RTI); - _fed.socket_TCP_RTI = -1; + shutdown(_fed.socket_TCP_RTI, SHUT_RDWR); + close(_fed.socket_TCP_RTI); + _fed.socket_TCP_RTI = -1; } /** @@ -952,26 +920,26 @@ static void close_rti_socket() { * @param result The struct into which to write. */ static void rti_address(const char* hostname, uint16_t port, struct addrinfo** result) { - struct addrinfo hints; - - memset(&hints, 0, sizeof(hints)); - hints.ai_family = AF_INET; /* Allow IPv4 */ - hints.ai_socktype = SOCK_STREAM; /* Stream socket */ - hints.ai_protocol = IPPROTO_TCP; /* TCP protocol */ - hints.ai_addr = NULL; - hints.ai_next = NULL; - hints.ai_flags = AI_NUMERICSERV; /* Allow only numeric port numbers */ - - // Convert port number to string. - char str[6]; - sprintf(str, "%u", port); - - // Get address structure matching hostname and hints criteria, and - // set port to the port number provided in str. There should only - // ever be one matching address structure, and we connect to that. - if (getaddrinfo(hostname, (const char*)&str, &hints, result)) { - lf_print_error_and_exit("No host for RTI matching given hostname: %s", hostname); - } + struct addrinfo hints; + + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_INET; /* Allow IPv4 */ + hints.ai_socktype = SOCK_STREAM; /* Stream socket */ + hints.ai_protocol = IPPROTO_TCP; /* TCP protocol */ + hints.ai_addr = NULL; + hints.ai_next = NULL; + hints.ai_flags = AI_NUMERICSERV; /* Allow only numeric port numbers */ + + // Convert port number to string. + char str[6]; + sprintf(str, "%u", port); + + // Get address structure matching hostname and hints criteria, and + // set port to the port number provided in str. There should only + // ever be one matching address structure, and we connect to that. + if (getaddrinfo(hostname, (const char*)&str, &hints, result)) { + lf_print_error_and_exit("No host for RTI matching given hostname: %s", hostname); + } } /** @@ -984,37 +952,36 @@ static void rti_address(const char* hostname, uint16_t port, struct addrinfo** r * @return The designated start time for the federate. */ static instant_t get_start_time_from_rti(instant_t my_physical_time) { - // Send the timestamp marker first. - send_time(MSG_TYPE_TIMESTAMP, my_physical_time); - - // Read bytes from the socket. We need 9 bytes. - // Buffer for message ID plus timestamp. - size_t buffer_length = 1 + sizeof(instant_t); - unsigned char buffer[buffer_length]; - - read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, buffer_length, buffer, NULL, - "Failed to read MSG_TYPE_TIMESTAMP message from RTI."); - LF_PRINT_DEBUG("Read 9 bytes."); - - // First byte received is the message ID. - if (buffer[0] != MSG_TYPE_TIMESTAMP) { - if (buffer[0] == MSG_TYPE_FAILED) { - lf_print_error_and_exit("RTI has failed."); - } - lf_print_error_and_exit( - "Expected a MSG_TYPE_TIMESTAMP message from the RTI. Got %u (see net_common.h).", - buffer[0]); + // Send the timestamp marker first. + send_time(MSG_TYPE_TIMESTAMP, my_physical_time); + + // Read bytes from the socket. We need 9 bytes. + // Buffer for message ID plus timestamp. + size_t buffer_length = 1 + sizeof(instant_t); + unsigned char buffer[buffer_length]; + + read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, buffer_length, buffer, NULL, + "Failed to read MSG_TYPE_TIMESTAMP message from RTI."); + LF_PRINT_DEBUG("Read 9 bytes."); + + // First byte received is the message ID. + if (buffer[0] != MSG_TYPE_TIMESTAMP) { + if (buffer[0] == MSG_TYPE_FAILED) { + lf_print_error_and_exit("RTI has failed."); } + lf_print_error_and_exit("Expected a MSG_TYPE_TIMESTAMP message from the RTI. Got %u (see net_common.h).", + buffer[0]); + } - instant_t timestamp = extract_int64(&(buffer[1])); + instant_t timestamp = extract_int64(&(buffer[1])); - tag_t tag = {.time = timestamp, .microstep = 0}; - // Trace the event when tracing is enabled - tracepoint_federate_from_rti(_fed.trace, receive_TIMESTAMP, _lf_my_fed_id, &tag); - lf_print("Starting timestamp is: " PRINTF_TIME ".", timestamp); - LF_PRINT_LOG("Current physical time is: " PRINTF_TIME ".", lf_time_physical()); + tag_t tag = {.time = timestamp, .microstep = 0}; + // Trace the event when tracing is enabled + tracepoint_federate_from_rti(receive_TIMESTAMP, _lf_my_fed_id, &tag); + lf_print("Starting timestamp is: " PRINTF_TIME ".", timestamp); + LF_PRINT_LOG("Current physical time is: " PRINTF_TIME ".", lf_time_physical()); - return timestamp; + return timestamp; } /** @@ -1031,47 +998,46 @@ static instant_t get_start_time_from_rti(instant_t my_physical_time) { * it sets last_TAG_was_provisional to false. */ static void handle_tag_advance_grant(void) { - // Environment is always the one corresponding to the top-level scheduling enclave. - environment_t *env; - _lf_get_environments(&env); - - size_t bytes_to_read = sizeof(instant_t) + sizeof(microstep_t); - unsigned char buffer[bytes_to_read]; - read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, bytes_to_read, buffer, NULL, - "Failed to read tag advance grant from RTI."); - tag_t TAG = extract_tag(buffer); - - // Trace the event when tracing is enabled - tracepoint_federate_from_rti(_fed.trace, receive_TAG, _lf_my_fed_id, &TAG); - - LF_MUTEX_LOCK(&env->mutex); - - // Update the last known status tag of all network input ports - // to the TAG received from the RTI. Here we assume that the RTI - // knows the status of network ports up to and including the granted tag, - // so by extension, we assume that the federate can safely rely - // on the RTI to handle port statuses up until the granted tag. - update_last_known_status_on_input_ports(TAG); - - // It is possible for this federate to have received a PTAG - // earlier with the same tag as this TAG. - if (lf_tag_compare(TAG, _fed.last_TAG) >= 0) { - _fed.last_TAG = TAG; - _fed.is_last_TAG_provisional = false; - LF_PRINT_LOG("Received Time Advance Grant (TAG): " PRINTF_TAG ".", - _fed.last_TAG.time - start_time, _fed.last_TAG.microstep); - } else { - LF_MUTEX_UNLOCK(&env->mutex); - lf_print_error("Received a TAG " PRINTF_TAG " that wasn't larger " - "than the previous TAG or PTAG " PRINTF_TAG ". Ignoring the TAG.", - TAG.time - start_time, TAG.microstep, - _fed.last_TAG.time - start_time, _fed.last_TAG.microstep); - return; - } - // Notify everything that is blocked. - lf_cond_broadcast(&env->event_q_changed); - + // Environment is always the one corresponding to the top-level scheduling enclave. + environment_t* env; + _lf_get_environments(&env); + + size_t bytes_to_read = sizeof(instant_t) + sizeof(microstep_t); + unsigned char buffer[bytes_to_read]; + read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, bytes_to_read, buffer, NULL, + "Failed to read tag advance grant from RTI."); + tag_t TAG = extract_tag(buffer); + + // Trace the event when tracing is enabled + tracepoint_federate_from_rti(receive_TAG, _lf_my_fed_id, &TAG); + + LF_MUTEX_LOCK(&env->mutex); + + // Update the last known status tag of all network input ports + // to the TAG received from the RTI. Here we assume that the RTI + // knows the status of network ports up to and including the granted tag, + // so by extension, we assume that the federate can safely rely + // on the RTI to handle port statuses up until the granted tag. + update_last_known_status_on_input_ports(TAG); + + // It is possible for this federate to have received a PTAG + // earlier with the same tag as this TAG. + if (lf_tag_compare(TAG, _fed.last_TAG) >= 0) { + _fed.last_TAG = TAG; + _fed.is_last_TAG_provisional = false; + LF_PRINT_LOG("Received Time Advance Grant (TAG): " PRINTF_TAG ".", _fed.last_TAG.time - start_time, + _fed.last_TAG.microstep); + } else { LF_MUTEX_UNLOCK(&env->mutex); + lf_print_error("Received a TAG " PRINTF_TAG " that wasn't larger " + "than the previous TAG or PTAG " PRINTF_TAG ". Ignoring the TAG.", + TAG.time - start_time, TAG.microstep, _fed.last_TAG.time - start_time, _fed.last_TAG.microstep); + return; + } + // Notify everything that is blocked. + lf_cond_broadcast(&env->event_q_changed); + + LF_MUTEX_UNLOCK(&env->mutex); } #ifdef FEDERATED_DECENTRALIZED @@ -1081,14 +1047,14 @@ static void handle_tag_advance_grant(void) { * @param staa_elem A record of all input port actions. */ static bool a_port_is_unknown(staa_t* staa_elem) { - bool do_wait = false; - for (int j = 0; j < staa_elem->num_actions; ++j) { - if (staa_elem->actions[j]->trigger->status == unknown) { - do_wait = true; - break; - } - } - return do_wait; + bool do_wait = false; + for (int j = 0; j < staa_elem->num_actions; ++j) { + if (staa_elem->actions[j]->trigger->status == unknown) { + do_wait = true; + break; + } + } + return do_wait; } #endif @@ -1097,10 +1063,11 @@ static bool a_port_is_unknown(staa_t* staa_elem) { * @return The port ID or -1 if there is no match. */ static int id_of_action(lf_action_base_t* input_port_action) { - for (int i = 0; i < _lf_action_table_size; i++) { - if (_lf_action_table[i] == input_port_action) return i; - } - return -1; + for (int i = 0; i < _lf_action_table_size; i++) { + if (_lf_action_table[i] == input_port_action) + return i; + } + return -1; } /** @@ -1112,110 +1079,117 @@ static int id_of_action(lf_action_base_t* input_port_action) { */ #ifdef FEDERATED_DECENTRALIZED static void* update_ports_from_staa_offsets(void* args) { - if (staa_lst_size == 0) return NULL; // Nothing to do. - // NOTE: Using only the top-level environment, which is the one that deals with network - // input ports. - environment_t *env; - int num_envs = _lf_get_environments(&env); - LF_MUTEX_LOCK(&env->mutex); - while (1) { - LF_PRINT_DEBUG("**** (update thread) starting"); - tag_t tag_when_started_waiting = lf_tag(env); - for (int i = 0; i < staa_lst_size; ++i) { - staa_t* staa_elem = staa_lst[i]; - // The staa_elem is adjusted in the code generator to have subtracted the delay on the connection. - // The list is sorted in increasing order of adjusted STAA offsets. - // The wait_until function automatically adds the lf_fed_STA_offset to the wait time. - interval_t wait_until_time = env->current_tag.time + staa_elem->STAA; - LF_PRINT_DEBUG("**** (update thread) original wait_until_time: " PRINTF_TIME, wait_until_time - lf_time_start()); - - // The wait_until call will release the env->mutex while it is waiting. - // However, it will not release the env->mutex if the wait time is too small. - // At the cost of a small additional delay in deciding a port is absent, - // we require a minimum wait time here. Otherwise, if both the STAA and STA are - // zero, this thread will fail to ever release the environment mutex. - // This causes chaos. The MIN_SLEEP_DURATION is the smallest amount of time - // that wait_until will actually wait. Note that this strategy does not - // block progress of any execution that is actually processing events. - // It only slightly delays the decision that an event is absent, and only - // if the STAA and STA are extremely small. - if (lf_fed_STA_offset + staa_elem->STAA < 5 * MIN_SLEEP_DURATION) { - wait_until_time += 5 * MIN_SLEEP_DURATION; - } - while (a_port_is_unknown(staa_elem)) { - LF_PRINT_DEBUG("**** (update thread) waiting until: " PRINTF_TIME, wait_until_time - lf_time_start()); - if (wait_until(env, wait_until_time, &lf_port_status_changed)) { - if (lf_tag_compare(lf_tag(env), tag_when_started_waiting) != 0) { - break; - } - /* Possibly useful for debugging: - tag_t current_tag = lf_tag(env); - LF_PRINT_DEBUG("**** (update thread) Assuming absent! " PRINTF_TAG, current_tag.time - lf_time_start(), current_tag.microstep); - LF_PRINT_DEBUG("**** (update thread) Lag is " PRINTF_TIME, current_tag.time - lf_time_physical()); - LF_PRINT_DEBUG("**** (update thread) Wait until time is " PRINTF_TIME, wait_until_time - lf_time_start()); - */ - - for (int j = 0; j < staa_elem->num_actions; ++j) { - lf_action_base_t* input_port_action = staa_elem->actions[j]; - if (input_port_action->trigger->status == unknown) { - input_port_action->trigger->status = absent; - LF_PRINT_DEBUG("**** (update thread) Assuming port absent at time " PRINTF_TIME, lf_tag(env).time - start_time); - update_last_known_status_on_input_port(env, lf_tag(env), id_of_action(input_port_action)); - lf_cond_broadcast(&lf_port_status_changed); - } - } - } - // If the tag has advanced, start over. - if (lf_tag_compare(lf_tag(env), tag_when_started_waiting) != 0) break; + initialize_lf_thread_id(); + if (staa_lst_size == 0) + return NULL; // Nothing to do. + // NOTE: Using only the top-level environment, which is the one that deals with network + // input ports. + environment_t* env; + int num_envs = _lf_get_environments(&env); + LF_MUTEX_LOCK(&env->mutex); + while (1) { + LF_PRINT_DEBUG("**** (update thread) starting"); + tag_t tag_when_started_waiting = lf_tag(env); + for (int i = 0; i < staa_lst_size; ++i) { + staa_t* staa_elem = staa_lst[i]; + // The staa_elem is adjusted in the code generator to have subtracted the delay on the connection. + // The list is sorted in increasing order of adjusted STAA offsets. + // The wait_until function automatically adds the lf_fed_STA_offset to the wait time. + interval_t wait_until_time = env->current_tag.time + staa_elem->STAA; + LF_PRINT_DEBUG("**** (update thread) original wait_until_time: " PRINTF_TIME, wait_until_time - lf_time_start()); + + // The wait_until call will release the env->mutex while it is waiting. + // However, it will not release the env->mutex if the wait time is too small. + // At the cost of a small additional delay in deciding a port is absent, + // we require a minimum wait time here. Otherwise, if both the STAA and STA are + // zero, this thread will fail to ever release the environment mutex. + // This causes chaos. The MIN_SLEEP_DURATION is the smallest amount of time + // that wait_until will actually wait. Note that this strategy does not + // block progress of any execution that is actually processing events. + // It only slightly delays the decision that an event is absent, and only + // if the STAA and STA are extremely small. + if (lf_fed_STA_offset + staa_elem->STAA < 5 * MIN_SLEEP_DURATION) { + wait_until_time += 5 * MIN_SLEEP_DURATION; + } + while (a_port_is_unknown(staa_elem)) { + LF_PRINT_DEBUG("**** (update thread) waiting until: " PRINTF_TIME, wait_until_time - lf_time_start()); + if (wait_until(env, wait_until_time, &lf_port_status_changed)) { + if (lf_tag_compare(lf_tag(env), tag_when_started_waiting) != 0) { + break; + } + /* Possibly useful for debugging: + tag_t current_tag = lf_tag(env); + LF_PRINT_DEBUG("**** (update thread) Assuming absent! " PRINTF_TAG, current_tag.time - lf_time_start(), + current_tag.microstep); LF_PRINT_DEBUG("**** (update thread) Lag is " PRINTF_TIME, current_tag.time - + lf_time_physical()); LF_PRINT_DEBUG("**** (update thread) Wait until time is " PRINTF_TIME, wait_until_time - + lf_time_start()); + */ + + for (int j = 0; j < staa_elem->num_actions; ++j) { + lf_action_base_t* input_port_action = staa_elem->actions[j]; + if (input_port_action->trigger->status == unknown) { + input_port_action->trigger->status = absent; + LF_PRINT_DEBUG("**** (update thread) Assuming port absent at time " PRINTF_TIME, + lf_tag(env).time - start_time); + update_last_known_status_on_input_port(env, lf_tag(env), id_of_action(input_port_action)); + lf_cond_broadcast(&lf_port_status_changed); } - // If the tag has advanced, start over. - if (lf_tag_compare(lf_tag(env), tag_when_started_waiting) != 0) break; + } } // If the tag has advanced, start over. - if (lf_tag_compare(lf_tag(env), tag_when_started_waiting) != 0) continue; - - // At this point, the current tag is the same as when we started waiting - // and all ports should be known, and hence max_level_allowed_to_advance - // should be INT_MAX. Check this to prevent an infinite wait. - if (max_level_allowed_to_advance != INT_MAX) { - // If this occurs, then the current tag advanced during a wait. - // Some ports may have been reset to uknown during that wait, in which case, - // it would be huge mistake to enter the wait for a new tag below because the - // program will freeze. First, check whether any ports are unknown: - bool port_unkonwn = false; - for (int i = 0; i < staa_lst_size; ++i) { - staa_t* staa_elem = staa_lst[i]; - if (a_port_is_unknown(staa_elem)) { - port_unkonwn = true; - break; - } - } - if (!port_unkonwn) { - // If this occurs, then there is a race condition that can lead to deadlocks. - lf_print_error_and_exit("**** (update thread) Inconsistency: All ports are known, but MLAA is blocking."); - } - - // Since max_level_allowed_to_advance will block advancement of time, we cannot follow - // through to the next step without deadlocking. Wait some time, then continue. - // The wait is necessary to prevent a busy wait. - lf_sleep(2 * MIN_SLEEP_DURATION); - continue; + if (lf_tag_compare(lf_tag(env), tag_when_started_waiting) != 0) + break; + } + // If the tag has advanced, start over. + if (lf_tag_compare(lf_tag(env), tag_when_started_waiting) != 0) + break; + } + // If the tag has advanced, start over. + if (lf_tag_compare(lf_tag(env), tag_when_started_waiting) != 0) + continue; + + // At this point, the current tag is the same as when we started waiting + // and all ports should be known, and hence max_level_allowed_to_advance + // should be INT_MAX. Check this to prevent an infinite wait. + if (max_level_allowed_to_advance != INT_MAX) { + // If this occurs, then the current tag advanced during a wait. + // Some ports may have been reset to uknown during that wait, in which case, + // it would be huge mistake to enter the wait for a new tag below because the + // program will freeze. First, check whether any ports are unknown: + bool port_unkonwn = false; + for (int i = 0; i < staa_lst_size; ++i) { + staa_t* staa_elem = staa_lst[i]; + if (a_port_is_unknown(staa_elem)) { + port_unkonwn = true; + break; } - - // Wait until we progress to a new tag. - while (lf_tag_compare(lf_tag(env), tag_when_started_waiting) == 0) { - // The following will release the env->mutex while waiting. - LF_PRINT_DEBUG("**** (update thread) Waiting for tags to not match: " PRINTF_TAG ", " PRINTF_TAG, - lf_tag(env).time - lf_time_start(), lf_tag(env).microstep, - tag_when_started_waiting.time -lf_time_start(), tag_when_started_waiting.microstep); - // Ports are reset to unknown at the start of new tag, so that will wake this up. - lf_cond_wait(&lf_port_status_changed); - } - LF_PRINT_DEBUG("**** (update thread) Tags after wait: " PRINTF_TAG ", " PRINTF_TAG, - lf_tag(env).time - lf_time_start(), lf_tag(env).microstep, - tag_when_started_waiting.time -lf_time_start(), tag_when_started_waiting.microstep); - } - LF_MUTEX_UNLOCK(&env->mutex); + } + if (!port_unkonwn) { + // If this occurs, then there is a race condition that can lead to deadlocks. + lf_print_error_and_exit("**** (update thread) Inconsistency: All ports are known, but MLAA is blocking."); + } + + // Since max_level_allowed_to_advance will block advancement of time, we cannot follow + // through to the next step without deadlocking. Wait some time, then continue. + // The wait is necessary to prevent a busy wait. + lf_sleep(2 * MIN_SLEEP_DURATION); + continue; + } + + // Wait until we progress to a new tag. + while (lf_tag_compare(lf_tag(env), tag_when_started_waiting) == 0) { + // The following will release the env->mutex while waiting. + LF_PRINT_DEBUG("**** (update thread) Waiting for tags to not match: " PRINTF_TAG ", " PRINTF_TAG, + lf_tag(env).time - lf_time_start(), lf_tag(env).microstep, + tag_when_started_waiting.time - lf_time_start(), tag_when_started_waiting.microstep); + // Ports are reset to unknown at the start of new tag, so that will wake this up. + lf_cond_wait(&lf_port_status_changed); + } + LF_PRINT_DEBUG("**** (update thread) Tags after wait: " PRINTF_TAG ", " PRINTF_TAG, + lf_tag(env).time - lf_time_start(), lf_tag(env).microstep, + tag_when_started_waiting.time - lf_time_start(), tag_when_started_waiting.microstep); + } + LF_MUTEX_UNLOCK(&env->mutex); } #endif // FEDERATED_DECENTRALIZED @@ -1235,98 +1209,96 @@ static void* update_ports_from_staa_offsets(void* args) { * last known tag for input ports. */ static void handle_provisional_tag_advance_grant() { - // Environment is always the one corresponding to the top-level scheduling enclave. - environment_t *env; - _lf_get_environments(&env); - - size_t bytes_to_read = sizeof(instant_t) + sizeof(microstep_t); - unsigned char buffer[bytes_to_read]; - read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, bytes_to_read, buffer, NULL, - "Failed to read provisional tag advance grant from RTI."); - tag_t PTAG = extract_tag(buffer); - - // Trace the event when tracing is enabled - tracepoint_federate_from_rti(_fed.trace, receive_PTAG, _lf_my_fed_id, &PTAG); - - // Note: it is important that last_known_status_tag of ports does not - // get updated to a PTAG value because a PTAG does not indicate that - // the RTI knows about the status of all ports up to and _including_ - // the value of PTAG. Only a TAG message indicates that. - LF_MUTEX_LOCK(&env->mutex); - - // Sanity check - if (lf_tag_compare(PTAG, _fed.last_TAG) < 0 - || (lf_tag_compare(PTAG, _fed.last_TAG) == 0 && !_fed.is_last_TAG_provisional)) { - LF_MUTEX_UNLOCK(&env->mutex); - lf_print_error_and_exit("Received a PTAG " PRINTF_TAG " that is equal or earlier " - "than an already received TAG " PRINTF_TAG ".", - PTAG.time, PTAG.microstep, - _fed.last_TAG.time, _fed.last_TAG.microstep); - } - - _fed.last_TAG = PTAG; - _fed.is_last_TAG_provisional = true; - LF_PRINT_LOG("At tag " PRINTF_TAG ", received Provisional Tag Advance Grant (PTAG): " PRINTF_TAG ".", - env->current_tag.time - start_time, env->current_tag.microstep, - _fed.last_TAG.time - start_time, _fed.last_TAG.microstep); - - // Even if we don't modify the event queue, we need to broadcast a change - // because we do not need to continue to wait for a TAG. - lf_cond_broadcast(&env->event_q_changed); - // Notify level advance thread which is blocked. - lf_update_max_level(_fed.last_TAG, _fed.is_last_TAG_provisional); - lf_cond_broadcast(&lf_port_status_changed); - - // Possibly insert a dummy event into the event queue if current time is behind - // (which it should be). Do not do this if the federate has not fully - // started yet. - - instant_t dummy_event_time = PTAG.time; - microstep_t dummy_event_relative_microstep = PTAG.microstep; - - if (lf_tag_compare(env->current_tag, PTAG) == 0) { - // The current tag can equal the PTAG if we are at the start time - // or if this federate has been able to advance time to the current - // tag (e.g., it has no upstream federates). In either case, either - // it is already treating the current tag as PTAG cycle (e.g. at the - // start time) or it will be completing the current cycle and sending - // a LTC message shortly. In either case, there is nothing more to do. - LF_MUTEX_UNLOCK(&env->mutex); - return; - } else if (lf_tag_compare(env->current_tag, PTAG) > 0) { - // Current tag is greater than the PTAG. - // It could be that we have sent an LTC that crossed with the incoming - // PTAG or that we have advanced to a tag greater than the PTAG. - // In the former case, there is nothing more to do. - // In the latter case, we may be blocked processing a PTAG cycle at - // a greater tag or we may be in the middle of processing a regular - // TAG. In either case, we know that at the PTAG tag, all outputs - // have either been sent or are absent, so we can send an LTC. - // Send an LTC to indicate absent outputs. - lf_latest_tag_complete(PTAG); - // Nothing more to do. - LF_MUTEX_UNLOCK(&env->mutex); - return; - } else if (PTAG.time == env->current_tag.time) { - // We now know env->current_tag < PTAG, but the times are equal. - // Adjust the microstep for scheduling the dummy event. - dummy_event_relative_microstep -= env->current_tag.microstep; - } - // We now know env->current_tag < PTAG. - - if (dummy_event_time != FOREVER) { - // Schedule a dummy event at the specified time and (relative) microstep. - LF_PRINT_DEBUG("At tag " PRINTF_TAG ", inserting into the event queue a dummy event " - "with time " PRINTF_TIME " and (relative) microstep " PRINTF_MICROSTEP ".", - env->current_tag.time - start_time, env->current_tag.microstep, - dummy_event_time - start_time, dummy_event_relative_microstep); - // Dummy event points to a NULL trigger and NULL real event. - event_t* dummy = _lf_create_dummy_events(env, - NULL, dummy_event_time, NULL, dummy_event_relative_microstep); - pqueue_insert(env->event_q, dummy); - } - + // Environment is always the one corresponding to the top-level scheduling enclave. + environment_t* env; + _lf_get_environments(&env); + + size_t bytes_to_read = sizeof(instant_t) + sizeof(microstep_t); + unsigned char buffer[bytes_to_read]; + read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, bytes_to_read, buffer, NULL, + "Failed to read provisional tag advance grant from RTI."); + tag_t PTAG = extract_tag(buffer); + + // Trace the event when tracing is enabled + tracepoint_federate_from_rti(receive_PTAG, _lf_my_fed_id, &PTAG); + + // Note: it is important that last_known_status_tag of ports does not + // get updated to a PTAG value because a PTAG does not indicate that + // the RTI knows about the status of all ports up to and _including_ + // the value of PTAG. Only a TAG message indicates that. + LF_MUTEX_LOCK(&env->mutex); + + // Sanity check + if (lf_tag_compare(PTAG, _fed.last_TAG) < 0 || + (lf_tag_compare(PTAG, _fed.last_TAG) == 0 && !_fed.is_last_TAG_provisional)) { + LF_MUTEX_UNLOCK(&env->mutex); + lf_print_error_and_exit("Received a PTAG " PRINTF_TAG " that is equal or earlier " + "than an already received TAG " PRINTF_TAG ".", + PTAG.time, PTAG.microstep, _fed.last_TAG.time, _fed.last_TAG.microstep); + } + + _fed.last_TAG = PTAG; + _fed.is_last_TAG_provisional = true; + LF_PRINT_LOG("At tag " PRINTF_TAG ", received Provisional Tag Advance Grant (PTAG): " PRINTF_TAG ".", + env->current_tag.time - start_time, env->current_tag.microstep, _fed.last_TAG.time - start_time, + _fed.last_TAG.microstep); + + // Even if we don't modify the event queue, we need to broadcast a change + // because we do not need to continue to wait for a TAG. + lf_cond_broadcast(&env->event_q_changed); + // Notify level advance thread which is blocked. + lf_update_max_level(_fed.last_TAG, _fed.is_last_TAG_provisional); + lf_cond_broadcast(&lf_port_status_changed); + + // Possibly insert a dummy event into the event queue if current time is behind + // (which it should be). Do not do this if the federate has not fully + // started yet. + + instant_t dummy_event_time = PTAG.time; + microstep_t dummy_event_relative_microstep = PTAG.microstep; + + if (lf_tag_compare(env->current_tag, PTAG) == 0) { + // The current tag can equal the PTAG if we are at the start time + // or if this federate has been able to advance time to the current + // tag (e.g., it has no upstream federates). In either case, either + // it is already treating the current tag as PTAG cycle (e.g. at the + // start time) or it will be completing the current cycle and sending + // a LTC message shortly. In either case, there is nothing more to do. + LF_MUTEX_UNLOCK(&env->mutex); + return; + } else if (lf_tag_compare(env->current_tag, PTAG) > 0) { + // Current tag is greater than the PTAG. + // It could be that we have sent an LTC that crossed with the incoming + // PTAG or that we have advanced to a tag greater than the PTAG. + // In the former case, there is nothing more to do. + // In the latter case, we may be blocked processing a PTAG cycle at + // a greater tag or we may be in the middle of processing a regular + // TAG. In either case, we know that at the PTAG tag, all outputs + // have either been sent or are absent, so we can send an LTC. + // Send an LTC to indicate absent outputs. + lf_latest_tag_complete(PTAG); + // Nothing more to do. LF_MUTEX_UNLOCK(&env->mutex); + return; + } else if (PTAG.time == env->current_tag.time) { + // We now know env->current_tag < PTAG, but the times are equal. + // Adjust the microstep for scheduling the dummy event. + dummy_event_relative_microstep -= env->current_tag.microstep; + } + // We now know env->current_tag < PTAG. + + if (dummy_event_time != FOREVER) { + // Schedule a dummy event at the specified time and (relative) microstep. + LF_PRINT_DEBUG("At tag " PRINTF_TAG ", inserting into the event queue a dummy event " + "with time " PRINTF_TIME " and (relative) microstep " PRINTF_MICROSTEP ".", + env->current_tag.time - start_time, env->current_tag.microstep, dummy_event_time - start_time, + dummy_event_relative_microstep); + // Dummy event points to a NULL trigger and NULL real event. + event_t* dummy = _lf_create_dummy_events(env, NULL, dummy_event_time, NULL, dummy_event_relative_microstep); + pqueue_insert(env->event_q, dummy); + } + + LF_MUTEX_UNLOCK(&env->mutex); } /** @@ -1338,208 +1310,217 @@ static void handle_provisional_tag_advance_grant() { */ static void handle_stop_granted_message() { - size_t bytes_to_read = MSG_TYPE_STOP_GRANTED_LENGTH - 1; - unsigned char buffer[bytes_to_read]; - read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, bytes_to_read, buffer, NULL, - "Failed to read stop granted from RTI."); + size_t bytes_to_read = MSG_TYPE_STOP_GRANTED_LENGTH - 1; + unsigned char buffer[bytes_to_read]; + read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, bytes_to_read, buffer, NULL, + "Failed to read stop granted from RTI."); - tag_t received_stop_tag = extract_tag(buffer); + tag_t received_stop_tag = extract_tag(buffer); - // Trace the event when tracing is enabled - tracepoint_federate_from_rti(_fed.trace, receive_STOP_GRN, _lf_my_fed_id, &received_stop_tag); + // Trace the event when tracing is enabled + tracepoint_federate_from_rti(receive_STOP_GRN, _lf_my_fed_id, &received_stop_tag); - LF_PRINT_LOG("Received from RTI a MSG_TYPE_STOP_GRANTED message with elapsed tag " PRINTF_TAG ".", - received_stop_tag.time - start_time, received_stop_tag.microstep); + LF_PRINT_LOG("Received from RTI a MSG_TYPE_STOP_GRANTED message with elapsed tag " PRINTF_TAG ".", + received_stop_tag.time - start_time, received_stop_tag.microstep); - environment_t *env; - int num_environments = _lf_get_environments(&env); + environment_t* env; + int num_environments = _lf_get_environments(&env); - for (int i = 0; i < num_environments; i++) { - LF_MUTEX_LOCK(&env[i].mutex); + for (int i = 0; i < num_environments; i++) { + LF_MUTEX_LOCK(&env[i].mutex); - // Sanity check. - if (lf_tag_compare(received_stop_tag, env[i].current_tag) <= 0) { - lf_print_error("RTI granted a MSG_TYPE_STOP_GRANTED tag that is equal to or less than this federate's current tag " PRINTF_TAG ". " - "Stopping at the next microstep instead.", - env[i].current_tag.time - start_time, env[i].current_tag.microstep); - received_stop_tag = env[i].current_tag; - received_stop_tag.microstep++; - } + // Sanity check. + if (lf_tag_compare(received_stop_tag, env[i].current_tag) <= 0) { + lf_print_error("RTI granted a MSG_TYPE_STOP_GRANTED tag that is equal to or less than this federate's current " + "tag " PRINTF_TAG ". " + "Stopping at the next microstep instead.", + env[i].current_tag.time - start_time, env[i].current_tag.microstep); + received_stop_tag = env[i].current_tag; + received_stop_tag.microstep++; + } - lf_set_stop_tag(&env[i], received_stop_tag); - LF_PRINT_DEBUG("Setting the stop tag to " PRINTF_TAG ".", - env[i].stop_tag.time - start_time, - env[i].stop_tag.microstep); + lf_set_stop_tag(&env[i], received_stop_tag); + LF_PRINT_DEBUG("Setting the stop tag to " PRINTF_TAG ".", env[i].stop_tag.time - start_time, + env[i].stop_tag.microstep); - if (env[i].barrier.requestors) _lf_decrement_tag_barrier_locked(&env[i]); - lf_cond_broadcast(&env[i].event_q_changed); - LF_MUTEX_UNLOCK(&env[i].mutex); - } + if (env[i].barrier.requestors) + _lf_decrement_tag_barrier_locked(&env[i]); + lf_cond_broadcast(&env[i].event_q_changed); + LF_MUTEX_UNLOCK(&env[i].mutex); + } } /** * Handle a MSG_TYPE_STOP_REQUEST message from the RTI. */ static void handle_stop_request_message() { - size_t bytes_to_read = MSG_TYPE_STOP_REQUEST_LENGTH - 1; - unsigned char buffer[bytes_to_read]; - read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, bytes_to_read, buffer, NULL, - "Failed to read stop request from RTI."); - tag_t tag_to_stop = extract_tag(buffer); - - // Trace the event when tracing is enabled - tracepoint_federate_from_rti(_fed.trace, receive_STOP_REQ, _lf_my_fed_id, &tag_to_stop); - LF_PRINT_LOG("Received from RTI a MSG_TYPE_STOP_REQUEST signal with tag " PRINTF_TAG ".", - tag_to_stop.time - start_time, - tag_to_stop.microstep); - - extern lf_mutex_t global_mutex; - extern bool lf_stop_requested; - bool already_blocked = false; - - LF_MUTEX_LOCK(&global_mutex); - if (lf_stop_requested) { - LF_PRINT_LOG("Ignoring MSG_TYPE_STOP_REQUEST from RTI because lf_request_stop has been called locally."); - already_blocked = true; - } - // Treat the stop request from the RTI as if a local stop request had been received. - lf_stop_requested = true; - LF_MUTEX_UNLOCK(&global_mutex); - - // If we have previously received from the RTI a stop request, - // or we have previously sent a stop request to the RTI, - // then we have already blocked tag advance in enclaves. - // Do not do this twice. The record of whether the first has occurred - // is guarded by the outbound socket mutex. - // The second is guarded by the global mutex. - // Note that the RTI should not send stop requests more than once to federates. - LF_MUTEX_LOCK(&lf_outbound_socket_mutex); - if (_fed.received_stop_request_from_rti) { - LF_PRINT_LOG("Redundant MSG_TYPE_STOP_REQUEST from RTI. Ignoring it."); - already_blocked = true; - } else if (!already_blocked) { - // Do this only if lf_request_stop has not been called because it will - // prevent lf_request_stop from sending. - _fed.received_stop_request_from_rti = true; - } - LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); - - if (already_blocked) { - // Either we have sent a stop request to the RTI ourselves, - // or we have previously received a stop request from the RTI. - // Nothing more to do. Tag advance is already blocked on enclaves. - return; - } + size_t bytes_to_read = MSG_TYPE_STOP_REQUEST_LENGTH - 1; + unsigned char buffer[bytes_to_read]; + read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, bytes_to_read, buffer, NULL, + "Failed to read stop request from RTI."); + tag_t tag_to_stop = extract_tag(buffer); + + // Trace the event when tracing is enabled + tracepoint_federate_from_rti(receive_STOP_REQ, _lf_my_fed_id, &tag_to_stop); + LF_PRINT_LOG("Received from RTI a MSG_TYPE_STOP_REQUEST signal with tag " PRINTF_TAG ".", + tag_to_stop.time - start_time, tag_to_stop.microstep); + + extern lf_mutex_t global_mutex; + extern bool lf_stop_requested; + bool already_blocked = false; + + LF_MUTEX_LOCK(&global_mutex); + if (lf_stop_requested) { + LF_PRINT_LOG("Ignoring MSG_TYPE_STOP_REQUEST from RTI because lf_request_stop has been called locally."); + already_blocked = true; + } + // Treat the stop request from the RTI as if a local stop request had been received. + lf_stop_requested = true; + LF_MUTEX_UNLOCK(&global_mutex); + + // If we have previously received from the RTI a stop request, + // or we have previously sent a stop request to the RTI, + // then we have already blocked tag advance in enclaves. + // Do not do this twice. The record of whether the first has occurred + // is guarded by the outbound socket mutex. + // The second is guarded by the global mutex. + // Note that the RTI should not send stop requests more than once to federates. + LF_MUTEX_LOCK(&lf_outbound_socket_mutex); + if (_fed.received_stop_request_from_rti) { + LF_PRINT_LOG("Redundant MSG_TYPE_STOP_REQUEST from RTI. Ignoring it."); + already_blocked = true; + } else if (!already_blocked) { + // Do this only if lf_request_stop has not been called because it will + // prevent lf_request_stop from sending. + _fed.received_stop_request_from_rti = true; + } + LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); + + if (already_blocked) { + // Either we have sent a stop request to the RTI ourselves, + // or we have previously received a stop request from the RTI. + // Nothing more to do. Tag advance is already blocked on enclaves. + return; + } + + // Iterate over the scheduling enclaves to find the maximum current tag + // and adjust the tag_to_stop if any of those is greater than tag_to_stop. + // If not done previously, block tag advance in the enclave. + environment_t* env; + int num_environments = _lf_get_environments(&env); + for (int i = 0; i < num_environments; i++) { + LF_MUTEX_LOCK(&env[i].mutex); + if (lf_tag_compare(tag_to_stop, env[i].current_tag) <= 0) { + // Can't stop at the requested tag. Make a counteroffer. + tag_to_stop = env->current_tag; + tag_to_stop.microstep++; + } + // Set a barrier to prevent the enclave from advancing past the so-far tag to stop. + _lf_increment_tag_barrier_locked(&env[i], tag_to_stop); + + LF_MUTEX_UNLOCK(&env[i].mutex); + } + // Send the reply, which is the least tag at which we can stop. + unsigned char outgoing_buffer[MSG_TYPE_STOP_REQUEST_REPLY_LENGTH]; + ENCODE_STOP_REQUEST_REPLY(outgoing_buffer, tag_to_stop.time, tag_to_stop.microstep); + + // Trace the event when tracing is enabled + tracepoint_federate_to_rti(send_STOP_REQ_REP, _lf_my_fed_id, &tag_to_stop); + + // Send the current logical time to the RTI. + LF_MUTEX_LOCK(&lf_outbound_socket_mutex); + write_to_socket_fail_on_error(&_fed.socket_TCP_RTI, MSG_TYPE_STOP_REQUEST_REPLY_LENGTH, outgoing_buffer, + &lf_outbound_socket_mutex, + "Failed to send the answer to MSG_TYPE_STOP_REQUEST to RTI."); + LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); + + LF_PRINT_DEBUG("Sent MSG_TYPE_STOP_REQUEST_REPLY to RTI with tag " PRINTF_TAG, tag_to_stop.time, + tag_to_stop.microstep); +} - // Iterate over the scheduling enclaves to find the maximum current tag - // and adjust the tag_to_stop if any of those is greater than tag_to_stop. - // If not done previously, block tag advance in the enclave. - environment_t *env; - int num_environments = _lf_get_environments(&env); - for (int i = 0; i < num_environments; i++) { - LF_MUTEX_LOCK(&env[i].mutex); - if (lf_tag_compare(tag_to_stop, env[i].current_tag) <= 0) { - // Can't stop at the requested tag. Make a counteroffer. - tag_to_stop = env->current_tag; - tag_to_stop.microstep++; - } - // Set a barrier to prevent the enclave from advancing past the so-far tag to stop. - _lf_increment_tag_barrier_locked(&env[i], tag_to_stop); +/** + * Handle a downstream next event tag (DNET) message from the RTI. + * FIXME: Use this tag to eliminate unncessary LTC or NET messages. + */ +static void handle_downstream_next_event_tag() { + size_t bytes_to_read = sizeof(instant_t) + sizeof(microstep_t); + unsigned char buffer[bytes_to_read]; + read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, bytes_to_read, buffer, NULL, + "Failed to read downstream next event tag from RTI."); + tag_t DNET = extract_tag(buffer); - LF_MUTEX_UNLOCK(&env[i].mutex); - } - // Send the reply, which is the least tag at which we can stop. - unsigned char outgoing_buffer[MSG_TYPE_STOP_REQUEST_REPLY_LENGTH]; - ENCODE_STOP_REQUEST_REPLY(outgoing_buffer, tag_to_stop.time, tag_to_stop.microstep); + // Trace the event when tracing is enabled + tracepoint_federate_from_rti(_fed.trace, receive_DNET, _lf_my_fed_id, &DNET); - // Trace the event when tracing is enabled - tracepoint_federate_to_rti(_fed.trace, send_STOP_REQ_REP, _lf_my_fed_id, &tag_to_stop); + LF_PRINT_LOG("Received Downstream Next Event Tag (DNET): " PRINTF_TAG ".", DNET.time - start_time, DNET.microstep); - // Send the current logical time to the RTI. - LF_MUTEX_LOCK(&lf_outbound_socket_mutex); - write_to_socket_fail_on_error( - &_fed.socket_TCP_RTI, MSG_TYPE_STOP_REQUEST_REPLY_LENGTH, outgoing_buffer, &lf_outbound_socket_mutex, - "Failed to send the answer to MSG_TYPE_STOP_REQUEST to RTI."); - LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); + _fed.last_DNET = DNET; - LF_PRINT_DEBUG("Sent MSG_TYPE_STOP_REQUEST_REPLY to RTI with tag " PRINTF_TAG, - tag_to_stop.time, tag_to_stop.microstep); + if (lf_tag_compare(_fed.last_skipped_LTC, NEVER_TAG) != 0 && + lf_tag_compare(_fed.last_skipped_LTC, _fed.last_DNET) >= 0) { + send_tag(MSG_TYPE_LATEST_TAG_COMPLETE, _fed.last_skipped_LTC); + _fed.last_skipped_LTC = NEVER_TAG; + } } /** * Handle a downstream next event tag (DNET) message from the RTI. * FIXME: Use this tag to eliminate unncessary LTC or NET messages. -*/ + */ static void handle_downstream_next_event_tag() { - size_t bytes_to_read = sizeof(instant_t) + sizeof(microstep_t); - unsigned char buffer[bytes_to_read]; - read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, bytes_to_read, buffer, NULL, - "Failed to read downstream next event tag from RTI."); - tag_t DNET = extract_tag(buffer); + size_t bytes_to_read = sizeof(instant_t) + sizeof(microstep_t); + unsigned char buffer[bytes_to_read]; + read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, bytes_to_read, buffer, NULL, + "Failed to read downstream next event tag from RTI."); + tag_t DNET = extract_tag(buffer); - // Trace the event when tracing is enabled - tracepoint_federate_from_rti(_fed.trace, receive_DNET, _lf_my_fed_id, &DNET); + // Trace the event when tracing is enabled + tracepoint_federate_from_rti(_fed.trace, receive_DNET, _lf_my_fed_id, &DNET); - LF_PRINT_LOG("Received Downstream Next Event Tag (DNET): " PRINTF_TAG ".", - DNET.time - start_time, DNET.microstep); + LF_PRINT_LOG("Received Downstream Next Event Tag (DNET): " PRINTF_TAG ".", DNET.time - start_time, DNET.microstep); - _fed.last_DNET = DNET; + _fed.last_DNET = DNET; - if (lf_tag_compare(_fed.last_skipped_LTC, NEVER_TAG) != 0 - && lf_tag_compare(_fed.last_skipped_LTC, _fed.last_DNET) >= 0) { - send_tag(MSG_TYPE_LATEST_TAG_COMPLETE, _fed.last_skipped_LTC); - _fed.last_skipped_LTC = NEVER_TAG; - } + if (lf_tag_compare(_fed.last_skipped_LTC, NEVER_TAG) != 0 && + lf_tag_compare(_fed.last_skipped_LTC, _fed.last_DNET) >= 0) { + send_tag(MSG_TYPE_LATEST_TAG_COMPLETE, _fed.last_skipped_LTC); + _fed.last_skipped_LTC = NEVER_TAG; + } } /** * Send a resign signal to the RTI. */ static void send_resign_signal(environment_t* env) { - size_t bytes_to_write = 1; - unsigned char buffer[bytes_to_write]; - buffer[0] = MSG_TYPE_RESIGN; - LF_MUTEX_LOCK(&lf_outbound_socket_mutex); - write_to_socket_fail_on_error( - &_fed.socket_TCP_RTI, bytes_to_write, &(buffer[0]), &lf_outbound_socket_mutex, - "Failed to send MSG_TYPE_RESIGN."); - LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); - LF_PRINT_LOG("Resigned."); + size_t bytes_to_write = 1; + unsigned char buffer[bytes_to_write]; + buffer[0] = MSG_TYPE_RESIGN; + LF_MUTEX_LOCK(&lf_outbound_socket_mutex); + write_to_socket_fail_on_error(&_fed.socket_TCP_RTI, bytes_to_write, &(buffer[0]), &lf_outbound_socket_mutex, + "Failed to send MSG_TYPE_RESIGN."); + LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); + LF_PRINT_LOG("Resigned."); } /** * Send a failed signal to the RTI. */ static void send_failed_signal(environment_t* env) { - size_t bytes_to_write = 1; - unsigned char buffer[bytes_to_write]; - buffer[0] = MSG_TYPE_FAILED; - write_to_socket_fail_on_error( - &_fed.socket_TCP_RTI, bytes_to_write, &(buffer[0]), NULL, - "Failed to send MSG_TYPE_FAILED."); - LF_PRINT_LOG("Failed."); -} - -/** - * @brief Stop the traces associated with all environments in the program. - */ -static void stop_all_traces() { - environment_t *env; - int num_envs = _lf_get_environments(&env); - for (int i = 0; i < num_envs; i++) { - stop_trace(env[i].trace); - } + size_t bytes_to_write = 1; + unsigned char buffer[bytes_to_write]; + buffer[0] = MSG_TYPE_FAILED; + write_to_socket_fail_on_error(&_fed.socket_TCP_RTI, bytes_to_write, &(buffer[0]), NULL, + "Failed to send MSG_TYPE_FAILED."); + LF_PRINT_LOG("Failed."); } /** * Handle a failed signal from the RTI. The RTI will only fail * if it is forced to exit, e.g. by a SIG_INT. Hence, this federate * will exit immediately with an error condition, counting on the - * termination functions to handle any cleanup needed. + * termination functions to handle any cleanup needed. */ -static void handle_rti_failed_message(void) { - exit(1); -} +static void handle_rti_failed_message(void) { exit(1); } /** * Thread that listens for TCP inputs from the RTI. @@ -1547,88 +1528,86 @@ static void handle_rti_failed_message(void) { * @param args Ignored */ static void* listen_to_rti_TCP(void* args) { - // Buffer for incoming messages. - // This does not constrain the message size - // because the message will be put into malloc'd memory. - unsigned char buffer[FED_COM_BUFFER_SIZE]; - - // Listen for messages from the federate. - while (1) { - // Check whether the RTI socket is still valid - if (_fed.socket_TCP_RTI < 0) { - lf_print_warning("Socket to the RTI unexpectedly closed."); - return NULL; - } - // Read one byte to get the message type. - // This will exit if the read fails. - int read_failed = read_from_socket(_fed.socket_TCP_RTI, 1, buffer); - if (read_failed < 0) { - if (errno == ECONNRESET) { - lf_print_error("Socket connection to the RTI was closed by the RTI without" - " properly sending an EOF first. Considering this a soft error."); - // FIXME: If this happens, possibly a new RTI must be elected. - _fed.socket_TCP_RTI = -1; - return NULL; - } else { - lf_print_error("Socket connection to the RTI has been broken with error %d: %s." - " The RTI should close connections with an EOF first." - " Considering this a soft error.", - errno, - strerror(errno)); - // FIXME: If this happens, possibly a new RTI must be elected. - _fed.socket_TCP_RTI = -1; - return NULL; - } - } else if (read_failed > 0) { - // EOF received. - lf_print("Connection to the RTI closed with an EOF."); - _fed.socket_TCP_RTI = -1; - stop_all_traces(); - return NULL; - } - switch (buffer[0]) { - case MSG_TYPE_TAGGED_MESSAGE: - if (handle_tagged_message(&_fed.socket_TCP_RTI, -1)) { - // Failures to complete the read of messages from the RTI are fatal. - lf_print_error_and_exit("Failed to complete the reading of a message from the RTI."); - } - break; - case MSG_TYPE_TAG_ADVANCE_GRANT: - handle_tag_advance_grant(); - break; - case MSG_TYPE_PROVISIONAL_TAG_ADVANCE_GRANT: - handle_provisional_tag_advance_grant(); - break; - case MSG_TYPE_STOP_REQUEST: - handle_stop_request_message(); - break; - case MSG_TYPE_STOP_GRANTED: - handle_stop_granted_message(); - break; - case MSG_TYPE_PORT_ABSENT: - if (handle_port_absent_message(&_fed.socket_TCP_RTI, -1)) { - // Failures to complete the read of absent messages from the RTI are fatal. - lf_print_error_and_exit("Failed to complete the reading of an absent message from the RTI."); - } - break; - case MSG_TYPE_DOWNSTREAM_NEXT_EVENT_TAG: - handle_downstream_next_event_tag(); - break; - case MSG_TYPE_FAILED: - handle_rti_failed_message(); - break; - case MSG_TYPE_CLOCK_SYNC_T1: - case MSG_TYPE_CLOCK_SYNC_T4: - lf_print_error("Federate %d received unexpected clock sync message from RTI on TCP socket.", - _lf_my_fed_id); - break; - default: - lf_print_error_and_exit("Received from RTI an unrecognized TCP message type: %hhx.", buffer[0]); - // Trace the event when tracing is enabled - tracepoint_federate_from_rti(_fed.trace, receive_UNIDENTIFIED, _lf_my_fed_id, NULL); - } - } - return NULL; + initialize_lf_thread_id(); + // Buffer for incoming messages. + // This does not constrain the message size + // because the message will be put into malloc'd memory. + unsigned char buffer[FED_COM_BUFFER_SIZE]; + + // Listen for messages from the federate. + while (1) { + // Check whether the RTI socket is still valid + if (_fed.socket_TCP_RTI < 0) { + lf_print_warning("Socket to the RTI unexpectedly closed."); + return NULL; + } + // Read one byte to get the message type. + // This will exit if the read fails. + int read_failed = read_from_socket(_fed.socket_TCP_RTI, 1, buffer); + if (read_failed < 0) { + if (errno == ECONNRESET) { + lf_print_error("Socket connection to the RTI was closed by the RTI without" + " properly sending an EOF first. Considering this a soft error."); + // FIXME: If this happens, possibly a new RTI must be elected. + _fed.socket_TCP_RTI = -1; + return NULL; + } else { + lf_print_error("Socket connection to the RTI has been broken with error %d: %s." + " The RTI should close connections with an EOF first." + " Considering this a soft error.", + errno, strerror(errno)); + // FIXME: If this happens, possibly a new RTI must be elected. + _fed.socket_TCP_RTI = -1; + return NULL; + } + } else if (read_failed > 0) { + // EOF received. + lf_print("Connection to the RTI closed with an EOF."); + _fed.socket_TCP_RTI = -1; + return NULL; + } + switch (buffer[0]) { + case MSG_TYPE_TAGGED_MESSAGE: + if (handle_tagged_message(&_fed.socket_TCP_RTI, -1)) { + // Failures to complete the read of messages from the RTI are fatal. + lf_print_error_and_exit("Failed to complete the reading of a message from the RTI."); + } + break; + case MSG_TYPE_TAG_ADVANCE_GRANT: + handle_tag_advance_grant(); + break; + case MSG_TYPE_PROVISIONAL_TAG_ADVANCE_GRANT: + handle_provisional_tag_advance_grant(); + break; + case MSG_TYPE_STOP_REQUEST: + handle_stop_request_message(); + break; + case MSG_TYPE_STOP_GRANTED: + handle_stop_granted_message(); + break; + case MSG_TYPE_PORT_ABSENT: + if (handle_port_absent_message(&_fed.socket_TCP_RTI, -1)) { + // Failures to complete the read of absent messages from the RTI are fatal. + lf_print_error_and_exit("Failed to complete the reading of an absent message from the RTI."); + } + break; + case MSG_TYPE_DOWNSTREAM_NEXT_EVENT_TAG: + handle_downstream_next_event_tag(); + break; + case MSG_TYPE_FAILED: + handle_rti_failed_message(); + break; + case MSG_TYPE_CLOCK_SYNC_T1: + case MSG_TYPE_CLOCK_SYNC_T4: + lf_print_error("Federate %d received unexpected clock sync message from RTI on TCP socket.", _lf_my_fed_id); + break; + default: + lf_print_error_and_exit("Received from RTI an unrecognized TCP message type: %hhx.", buffer[0]); + // Trace the event when tracing is enabled + tracepoint_federate_from_rti(receive_UNIDENTIFIED, _lf_my_fed_id, NULL); + } + } + return NULL; } /** @@ -1645,41 +1624,38 @@ static void* listen_to_rti_TCP(void* args) { * modified. */ static bool bounded_NET(tag_t* tag) { - // The tag sent by this function is a promise that, absent - // inputs from another federate, this federate will not produce events - // earlier than t. But if there are downstream federates and there is - // a physical action (not counting receivers from upstream federates), - // then we can only promise up to current physical time (plus the minimum - // of all minimum delays on the physical actions). - // In this case, we send a NET message with the current physical time - // to permit downstream federates to advance. To avoid - // overwhelming the network, this NET message should be sent periodically - // at specified intervals controlled by the target parameter - // coordination-options: {advance-message-interval: time units}. - // The larger the interval, the more downstream federates will lag - // behind real time, but the less network traffic. If this option is - // missing, we issue a warning message suggesting that a redesign - // might be in order so that outputs don't depend on physical actions. - LF_PRINT_DEBUG("Checking NET to see whether it should be bounded by physical time." - " Min delay from physical action: " PRINTF_TIME ".", - _fed.min_delay_from_physical_action_to_federate_output); - if (_fed.min_delay_from_physical_action_to_federate_output >= 0LL - && _fed.has_downstream - ) { - // There is a physical action upstream of some output from this - // federate, and there is at least one downstream federate. - // Compare the tag to the current physical time. - instant_t physical_time = lf_time_physical(); - if (physical_time + _fed.min_delay_from_physical_action_to_federate_output < tag->time) { - // Can only promise up and not including this new time: - tag->time = physical_time + _fed.min_delay_from_physical_action_to_federate_output - 1L; - tag->microstep = 0; - LF_PRINT_LOG("Has physical actions that bound NET to " PRINTF_TAG ".", - tag->time - start_time, tag->microstep); - return true; - } - } - return false; + // The tag sent by this function is a promise that, absent + // inputs from another federate, this federate will not produce events + // earlier than t. But if there are downstream federates and there is + // a physical action (not counting receivers from upstream federates), + // then we can only promise up to current physical time (plus the minimum + // of all minimum delays on the physical actions). + // In this case, we send a NET message with the current physical time + // to permit downstream federates to advance. To avoid + // overwhelming the network, this NET message should be sent periodically + // at specified intervals controlled by the target parameter + // coordination-options: {advance-message-interval: time units}. + // The larger the interval, the more downstream federates will lag + // behind real time, but the less network traffic. If this option is + // missing, we issue a warning message suggesting that a redesign + // might be in order so that outputs don't depend on physical actions. + LF_PRINT_DEBUG("Checking NET to see whether it should be bounded by physical time." + " Min delay from physical action: " PRINTF_TIME ".", + _fed.min_delay_from_physical_action_to_federate_output); + if (_fed.min_delay_from_physical_action_to_federate_output >= 0LL && _fed.has_downstream) { + // There is a physical action upstream of some output from this + // federate, and there is at least one downstream federate. + // Compare the tag to the current physical time. + instant_t physical_time = lf_time_physical(); + if (physical_time + _fed.min_delay_from_physical_action_to_federate_output < tag->time) { + // Can only promise up and not including this new time: + tag->time = physical_time + _fed.min_delay_from_physical_action_to_federate_output - 1L; + tag->microstep = 0; + LF_PRINT_LOG("Has physical actions that bound NET to " PRINTF_TAG ".", tag->time - start_time, tag->microstep); + return true; + } + } + return false; } ////////////////////////////////////////////////////////////////////////////////// @@ -1695,1185 +1671,1107 @@ static bool bounded_NET(tag_t* tag) { * @param env The environment of the federate */ void lf_terminate_execution(environment_t* env) { - assert(env != GLOBAL_ENVIRONMENT); - - // For an abnormal termination (e.g. a SIGINT), we need to send a - // MSG_TYPE_FAILED message to the RTI, but we should not acquire a mutex. - if (_fed.socket_TCP_RTI >= 0) { - if (_lf_normal_termination) { - tracepoint_federate_to_rti(_fed.trace, send_RESIGN, _lf_my_fed_id, &env->current_tag); - send_resign_signal(env); - } else { - tracepoint_federate_to_rti(_fed.trace, send_FAILED, _lf_my_fed_id, &env->current_tag); - send_failed_signal(env); - } - } + assert(env != GLOBAL_ENVIRONMENT); - LF_PRINT_DEBUG("Closing incoming P2P sockets."); - // Close any incoming P2P sockets that are still open. - for (int i=0; i < NUMBER_OF_FEDERATES; i++) { - close_inbound_socket(i, 1); - // Ignore errors. Mark the socket closed. - _fed.sockets_for_inbound_p2p_connections[i] = -1; - } - - // Check for all outgoing physical connections in - // _fed.sockets_for_outbound_p2p_connections and - // if the socket ID is not -1, the connection is still open. - // Send an EOF by closing the socket here. - for (int i=0; i < NUMBER_OF_FEDERATES; i++) { - - // Close outbound connections, in case they have not closed themselves. - // This will result in EOF being sent to the remote federate, except for - // abnormal termination, in which case it will just close the socket. - int flag = _lf_normal_termination? 1 : -1; - close_outbound_socket(i, flag); - } - - LF_PRINT_DEBUG("Waiting for inbound p2p socket listener threads."); - // Wait for each inbound socket listener thread to close. - if (_fed.number_of_inbound_p2p_connections > 0 && _fed.inbound_socket_listeners != NULL) { - LF_PRINT_LOG("Waiting for %zu threads listening for incoming messages to exit.", - _fed.number_of_inbound_p2p_connections); - for (int i=0; i < _fed.number_of_inbound_p2p_connections; i++) { - // Ignoring errors here. - lf_thread_join(_fed.inbound_socket_listeners[i], NULL); - } - } - - LF_PRINT_DEBUG("Waiting for RTI's socket listener threads."); - // Wait for the thread listening for messages from the RTI to close. - lf_thread_join(_fed.RTI_socket_listener, NULL); - - // For abnormal termination, there is no need to free memory. + // For an abnormal termination (e.g. a SIGINT), we need to send a + // MSG_TYPE_FAILED message to the RTI, but we should not acquire a mutex. + if (_fed.socket_TCP_RTI >= 0) { if (_lf_normal_termination) { - LF_PRINT_DEBUG("Freeing memory occupied by the federate."); - free(_fed.inbound_socket_listeners); - free(federation_metadata.rti_host); - free(federation_metadata.rti_user); - } + tracepoint_federate_to_rti(send_RESIGN, _lf_my_fed_id, &env->current_tag); + send_resign_signal(env); + } else { + tracepoint_federate_to_rti(send_FAILED, _lf_my_fed_id, &env->current_tag); + send_failed_signal(env); + } + } + + LF_PRINT_DEBUG("Closing incoming P2P sockets."); + // Close any incoming P2P sockets that are still open. + for (int i = 0; i < NUMBER_OF_FEDERATES; i++) { + close_inbound_socket(i, 1); + // Ignore errors. Mark the socket closed. + _fed.sockets_for_inbound_p2p_connections[i] = -1; + } + + // Check for all outgoing physical connections in + // _fed.sockets_for_outbound_p2p_connections and + // if the socket ID is not -1, the connection is still open. + // Send an EOF by closing the socket here. + for (int i = 0; i < NUMBER_OF_FEDERATES; i++) { + + // Close outbound connections, in case they have not closed themselves. + // This will result in EOF being sent to the remote federate, except for + // abnormal termination, in which case it will just close the socket. + int flag = _lf_normal_termination ? 1 : -1; + close_outbound_socket(i, flag); + } + + LF_PRINT_DEBUG("Waiting for inbound p2p socket listener threads."); + // Wait for each inbound socket listener thread to close. + if (_fed.number_of_inbound_p2p_connections > 0 && _fed.inbound_socket_listeners != NULL) { + LF_PRINT_LOG("Waiting for %zu threads listening for incoming messages to exit.", + _fed.number_of_inbound_p2p_connections); + for (int i = 0; i < _fed.number_of_inbound_p2p_connections; i++) { + // Ignoring errors here. + lf_thread_join(_fed.inbound_socket_listeners[i], NULL); + } + } + + LF_PRINT_DEBUG("Waiting for RTI's socket listener threads."); + // Wait for the thread listening for messages from the RTI to close. + lf_thread_join(_fed.RTI_socket_listener, NULL); + + // For abnormal termination, there is no need to free memory. + if (_lf_normal_termination) { + LF_PRINT_DEBUG("Freeing memory occupied by the federate."); + free(_fed.inbound_socket_listeners); + free(federation_metadata.rti_host); + free(federation_metadata.rti_user); + } } - ////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////// // Public functions (declared in federate.h, in alphabetical order) void lf_connect_to_federate(uint16_t remote_federate_id) { - int result = -1; - int count_retries = 0; - - // Ask the RTI for port number of the remote federate. - // The buffer is used for both sending and receiving replies. - // The size is what is needed for receiving replies. - unsigned char buffer[sizeof(int32_t) + INET_ADDRSTRLEN + 1]; - int port = -1; - struct in_addr host_ip_addr; - int count_tries = 0; - while (port == -1 && !_lf_termination_executed) { - buffer[0] = MSG_TYPE_ADDRESS_QUERY; - // NOTE: Sending messages in little endian. - encode_uint16(remote_federate_id, &(buffer[1])); - - LF_PRINT_DEBUG("Sending address query for federate %d.", remote_federate_id); - // Trace the event when tracing is enabled - tracepoint_federate_to_rti(_fed.trace, send_ADR_QR, _lf_my_fed_id, NULL); - - LF_MUTEX_LOCK(&lf_outbound_socket_mutex); - write_to_socket_fail_on_error( - &_fed.socket_TCP_RTI, sizeof(uint16_t) + 1, buffer, &lf_outbound_socket_mutex, - "Failed to send address query for federate %d to RTI.", - remote_federate_id); - LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); - - // Read RTI's response. - read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, sizeof(int32_t) + 1, buffer, NULL, - "Failed to read the requested port number for federate %d from RTI.", - remote_federate_id); - - if (buffer[0] != MSG_TYPE_ADDRESS_QUERY) { - // Unexpected reply. Could be that RTI has failed and sent a resignation. - if (buffer[0] == MSG_TYPE_FAILED) { - lf_print_error_and_exit("RTI has failed."); - } else { - lf_print_error_and_exit("Unexpected reply of type %hhu from RTI (see net_common.h).", buffer[0]); - } - } - port = extract_int32(&buffer[1]); - - read_from_socket_fail_on_error( - &_fed.socket_TCP_RTI, sizeof(host_ip_addr), (unsigned char*)&host_ip_addr, NULL, - "Failed to read the IP address for federate %d from RTI.", - remote_federate_id); - - // A reply of -1 for the port means that the RTI does not know - // the port number of the remote federate, presumably because the - // remote federate has not yet sent an MSG_TYPE_ADDRESS_ADVERTISEMENT message to the RTI. - // Sleep for some time before retrying. - if (port == -1) { - if (count_tries++ >= CONNECT_MAX_RETRIES) { - lf_print_error_and_exit("TIMEOUT obtaining IP/port for federate %d from the RTI.", - remote_federate_id); - } - // Wait ADDRESS_QUERY_RETRY_INTERVAL nanoseconds. - lf_sleep(ADDRESS_QUERY_RETRY_INTERVAL); - } - } - assert(port < 65536); - assert(port > 0); - uint16_t uport = (uint16_t)port; + int result = -1; + int count_retries = 0; + + // Ask the RTI for port number of the remote federate. + // The buffer is used for both sending and receiving replies. + // The size is what is needed for receiving replies. + unsigned char buffer[sizeof(int32_t) + INET_ADDRSTRLEN + 1]; + int port = -1; + struct in_addr host_ip_addr; + int count_tries = 0; + while (port == -1 && !_lf_termination_executed) { + buffer[0] = MSG_TYPE_ADDRESS_QUERY; + // NOTE: Sending messages in little endian. + encode_uint16(remote_federate_id, &(buffer[1])); + + LF_PRINT_DEBUG("Sending address query for federate %d.", remote_federate_id); + // Trace the event when tracing is enabled + tracepoint_federate_to_rti(send_ADR_QR, _lf_my_fed_id, NULL); + + LF_MUTEX_LOCK(&lf_outbound_socket_mutex); + write_to_socket_fail_on_error(&_fed.socket_TCP_RTI, sizeof(uint16_t) + 1, buffer, &lf_outbound_socket_mutex, + "Failed to send address query for federate %d to RTI.", remote_federate_id); + LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); + + // Read RTI's response. + read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, sizeof(int32_t) + 1, buffer, NULL, + "Failed to read the requested port number for federate %d from RTI.", + remote_federate_id); + + if (buffer[0] != MSG_TYPE_ADDRESS_QUERY_REPLY) { + // Unexpected reply. Could be that RTI has failed and sent a resignation. + if (buffer[0] == MSG_TYPE_FAILED) { + lf_print_error_and_exit("RTI has failed."); + } else { + lf_print_error_and_exit("Unexpected reply of type %hhu from RTI (see net_common.h).", buffer[0]); + } + } + port = extract_int32(&buffer[1]); + + read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, sizeof(host_ip_addr), (unsigned char*)&host_ip_addr, NULL, + "Failed to read the IP address for federate %d from RTI.", remote_federate_id); + + // A reply of -1 for the port means that the RTI does not know + // the port number of the remote federate, presumably because the + // remote federate has not yet sent an MSG_TYPE_ADDRESS_ADVERTISEMENT message to the RTI. + // Sleep for some time before retrying. + if (port == -1) { + if (count_tries++ >= CONNECT_MAX_RETRIES) { + lf_print_error_and_exit("TIMEOUT obtaining IP/port for federate %d from the RTI.", remote_federate_id); + } + // Wait ADDRESS_QUERY_RETRY_INTERVAL nanoseconds. + lf_sleep(ADDRESS_QUERY_RETRY_INTERVAL); + } + } + assert(port < 65536); + assert(port > 0); + uint16_t uport = (uint16_t)port; #if LOG_LEVEL > 3 - // Print the received IP address in a human readable format - // Create the human readable format of the received address. - // This is avoided unless LOG_LEVEL is high enough to - // subdue the overhead caused by inet_ntop(). - char hostname[INET_ADDRSTRLEN]; - inet_ntop(AF_INET, &host_ip_addr, hostname, INET_ADDRSTRLEN); - LF_PRINT_LOG("Received address %s port %d for federate %d from RTI.", - hostname, uport, remote_federate_id); + // Print the received IP address in a human readable format + // Create the human readable format of the received address. + // This is avoided unless LOG_LEVEL is high enough to + // subdue the overhead caused by inet_ntop(). + char hostname[INET_ADDRSTRLEN]; + inet_ntop(AF_INET, &host_ip_addr, hostname, INET_ADDRSTRLEN); + LF_PRINT_LOG("Received address %s port %d for federate %d from RTI.", hostname, uport, remote_federate_id); #endif - // Iterate until we either successfully connect or exceed the number of - // attempts given by CONNECT_MAX_RETRIES. - int socket_id = -1; - while (result < 0 && !_lf_termination_executed) { - // Create an IPv4 socket for TCP (not UDP) communication over IP (0). - socket_id = create_real_time_tcp_socket_errexit(); - - // Server file descriptor. - struct sockaddr_in server_fd; - // Zero out the server_fd struct. - bzero((char*)&server_fd, sizeof(server_fd)); - - // Set up the server_fd fields. - server_fd.sin_family = AF_INET; // IPv4 - server_fd.sin_addr = host_ip_addr; // Received from the RTI - - // Convert the port number from host byte order to network byte order. - server_fd.sin_port = htons(uport); - result = connect( - socket_id, - (struct sockaddr *)&server_fd, - sizeof(server_fd)); - - if (result != 0) { - lf_print_error("Failed to connect to federate %d on port %d.", remote_federate_id, uport); - - // Try again after some time if the connection failed. - // Note that this should not really happen since the remote federate should be - // accepting socket connections. But possibly it will be busy (in process of accepting - // another socket connection?). Hence, we retry. - count_retries++; - if (count_retries > CONNECT_MAX_RETRIES) { - // If the remote federate is not accepting the connection after CONNECT_MAX_RETRIES - // treat it as a soft error condition and return. - lf_print_error("Failed to connect to federate %d after %d retries. Giving up.", - remote_federate_id, CONNECT_MAX_RETRIES); - return; - } - lf_print_warning("Could not connect to federate %d. Will try again every" PRINTF_TIME "nanoseconds.\n", - remote_federate_id, ADDRESS_QUERY_RETRY_INTERVAL); - - // Check whether the RTI is still there. - if (rti_failed()) break; - - // Wait ADDRESS_QUERY_RETRY_INTERVAL nanoseconds. - lf_sleep(ADDRESS_QUERY_RETRY_INTERVAL); - } else { - // Connect was successful. - size_t buffer_length = 1 + sizeof(uint16_t) + 1; - unsigned char buffer[buffer_length]; - buffer[0] = MSG_TYPE_P2P_SENDING_FED_ID; - if (_lf_my_fed_id > UINT16_MAX) { - // This error is very unlikely to occur. - lf_print_error_and_exit("Too many federates! More than %d.", UINT16_MAX); - } - encode_uint16((uint16_t)_lf_my_fed_id, (unsigned char*)&(buffer[1])); - unsigned char federation_id_length = (unsigned char)strnlen(federation_metadata.federation_id, 255); - buffer[sizeof(uint16_t) + 1] = federation_id_length; - // Trace the event when tracing is enabled - tracepoint_federate_to_federate(_fed.trace, send_FED_ID, _lf_my_fed_id, remote_federate_id, NULL); - - // No need for a mutex because we have the only handle on the socket. - write_to_socket_fail_on_error(&socket_id, - buffer_length, buffer, NULL, - "Failed to send fed_id to federate %d.", remote_federate_id); - write_to_socket_fail_on_error(&socket_id, - federation_id_length, (unsigned char*)federation_metadata.federation_id, NULL, - "Failed to send federation id to federate %d.", - remote_federate_id); - - read_from_socket_fail_on_error(&socket_id, 1, (unsigned char*)buffer, NULL, - "Failed to read MSG_TYPE_ACK from federate %d in response to sending fed_id.", - remote_federate_id); - if (buffer[0] != MSG_TYPE_ACK) { - // Get the error code. - read_from_socket_fail_on_error(&socket_id, 1, (unsigned char*)buffer, NULL, - "Failed to read error code from federate %d in response to sending fed_id.", remote_federate_id); - lf_print_error("Received MSG_TYPE_REJECT message from remote federate (%d).", buffer[0]); - result = -1; - continue; - } else { - lf_print("Connected to federate %d, port %d.", remote_federate_id, port); - // Trace the event when tracing is enabled - tracepoint_federate_to_federate(_fed.trace, receive_ACK, _lf_my_fed_id, remote_federate_id, NULL); - } - } - } - // Once we set this variable, then all future calls to close() on this - // socket ID should reset it to -1 within a critical section. - _fed.sockets_for_outbound_p2p_connections[remote_federate_id] = socket_id; -} - -void lf_connect_to_rti(const char* hostname, int port) { - LF_PRINT_LOG("Connecting to the RTI."); - - // Override passed hostname and port if passed as runtime arguments. - hostname = federation_metadata.rti_host ? federation_metadata.rti_host : hostname; - port = federation_metadata.rti_port >= 0 ? federation_metadata.rti_port : port; - - // Adjust the port. - uint16_t uport = 0; - if (port < 0 || port > INT16_MAX) { - lf_print_error( - "lf_connect_to_rti(): Specified port (%d) is out of range," - " using the default port %d instead.", - port, DEFAULT_PORT - ); - uport = DEFAULT_PORT; - port = 0; // Mark so that increments occur between tries. - } else { - uport = (uint16_t)port; - } - if (uport == 0) { - uport = DEFAULT_PORT; - } + // Iterate until we either successfully connect or exceed the number of + // attempts given by CONNECT_MAX_RETRIES. + int socket_id = -1; + while (result < 0 && !_lf_termination_executed) { + // Create an IPv4 socket for TCP (not UDP) communication over IP (0). + socket_id = create_real_time_tcp_socket_errexit(); - // Create a socket - _fed.socket_TCP_RTI = create_real_time_tcp_socket_errexit(); + // Server file descriptor. + struct sockaddr_in server_fd; + // Zero out the server_fd struct. + bzero((char*)&server_fd, sizeof(server_fd)); - int result = -1; - int count_retries = 0; - struct addrinfo* res = NULL; + // Set up the server_fd fields. + server_fd.sin_family = AF_INET; // IPv4 + server_fd.sin_addr = host_ip_addr; // Received from the RTI - while (count_retries++ < CONNECT_MAX_RETRIES && !_lf_termination_executed) { - if (res != NULL) { - // This is a repeated attempt. - if (_fed.socket_TCP_RTI >= 0) close_rti_socket(); + // Convert the port number from host byte order to network byte order. + server_fd.sin_port = htons(uport); + result = connect(socket_id, (struct sockaddr*)&server_fd, sizeof(server_fd)); - lf_sleep(CONNECT_RETRY_INTERVAL); + if (result != 0) { + lf_print_error("Failed to connect to federate %d on port %d.", remote_federate_id, uport); + + // Try again after some time if the connection failed. + // Note that this should not really happen since the remote federate should be + // accepting socket connections. But possibly it will be busy (in process of accepting + // another socket connection?). Hence, we retry. + count_retries++; + if (count_retries > CONNECT_MAX_RETRIES) { + // If the remote federate is not accepting the connection after CONNECT_MAX_RETRIES + // treat it as a soft error condition and return. + lf_print_error("Failed to connect to federate %d after %d retries. Giving up.", remote_federate_id, + CONNECT_MAX_RETRIES); + return; + } + lf_print_warning("Could not connect to federate %d. Will try again every" PRINTF_TIME "nanoseconds.\n", + remote_federate_id, ADDRESS_QUERY_RETRY_INTERVAL); - // Create a new socket. - _fed.socket_TCP_RTI = create_real_time_tcp_socket_errexit(); + // Check whether the RTI is still there. + if (rti_failed()) + break; - if (port == 0) { - // Free previously allocated address info. - freeaddrinfo(res); - // Increment the port number. - uport++; - if (uport >= DEFAULT_PORT + MAX_NUM_PORT_ADDRESSES) uport = DEFAULT_PORT; + // Wait ADDRESS_QUERY_RETRY_INTERVAL nanoseconds. + lf_sleep(ADDRESS_QUERY_RETRY_INTERVAL); + } else { + // Connect was successful. + size_t buffer_length = 1 + sizeof(uint16_t) + 1; + unsigned char buffer[buffer_length]; + buffer[0] = MSG_TYPE_P2P_SENDING_FED_ID; + if (_lf_my_fed_id > UINT16_MAX) { + // This error is very unlikely to occur. + lf_print_error_and_exit("Too many federates! More than %d.", UINT16_MAX); + } + encode_uint16((uint16_t)_lf_my_fed_id, (unsigned char*)&(buffer[1])); + unsigned char federation_id_length = (unsigned char)strnlen(federation_metadata.federation_id, 255); + buffer[sizeof(uint16_t) + 1] = federation_id_length; + // Trace the event when tracing is enabled + tracepoint_federate_to_federate(send_FED_ID, _lf_my_fed_id, remote_federate_id, NULL); + + // No need for a mutex because we have the only handle on the socket. + write_to_socket_fail_on_error(&socket_id, buffer_length, buffer, NULL, "Failed to send fed_id to federate %d.", + remote_federate_id); + write_to_socket_fail_on_error(&socket_id, federation_id_length, (unsigned char*)federation_metadata.federation_id, + NULL, "Failed to send federation id to federate %d.", remote_federate_id); + + read_from_socket_fail_on_error(&socket_id, 1, (unsigned char*)buffer, NULL, + "Failed to read MSG_TYPE_ACK from federate %d in response to sending fed_id.", + remote_federate_id); + if (buffer[0] != MSG_TYPE_ACK) { + // Get the error code. + read_from_socket_fail_on_error(&socket_id, 1, (unsigned char*)buffer, NULL, + "Failed to read error code from federate %d in response to sending fed_id.", + remote_federate_id); + lf_print_error("Received MSG_TYPE_REJECT message from remote federate (%d).", buffer[0]); + result = -1; + continue; + } else { + lf_print("Connected to federate %d, port %d.", remote_federate_id, port); + // Trace the event when tracing is enabled + tracepoint_federate_to_federate(receive_ACK, _lf_my_fed_id, remote_federate_id, NULL); + } + } + } + // Once we set this variable, then all future calls to close() on this + // socket ID should reset it to -1 within a critical section. + _fed.sockets_for_outbound_p2p_connections[remote_federate_id] = socket_id; +} - // Reconstruct the address info. - rti_address(hostname, uport, &res); - } - lf_print("Trying RTI again on port %d (attempt %d).", uport, count_retries); - } else { - // This is the first attempt. - rti_address(hostname, uport, &res); - } +void lf_connect_to_rti(const char* hostname, int port) { + LF_PRINT_LOG("Connecting to the RTI."); + + // Override passed hostname and port if passed as runtime arguments. + hostname = federation_metadata.rti_host ? federation_metadata.rti_host : hostname; + port = federation_metadata.rti_port >= 0 ? federation_metadata.rti_port : port; + + // Adjust the port. + uint16_t uport = 0; + if (port < 0 || port > INT16_MAX) { + lf_print_error("lf_connect_to_rti(): Specified port (%d) is out of range," + " using the default port %d instead.", + port, DEFAULT_PORT); + uport = DEFAULT_PORT; + port = 0; // Mark so that increments occur between tries. + } else { + uport = (uint16_t)port; + } + if (uport == 0) { + uport = DEFAULT_PORT; + } + + // Create a socket + _fed.socket_TCP_RTI = create_real_time_tcp_socket_errexit(); + + int result = -1; + int count_retries = 0; + struct addrinfo* res = NULL; + + while (count_retries++ < CONNECT_MAX_RETRIES && !_lf_termination_executed) { + if (res != NULL) { + // This is a repeated attempt. + if (_fed.socket_TCP_RTI >= 0) + close_rti_socket(); + + lf_sleep(CONNECT_RETRY_INTERVAL); + + // Create a new socket. + _fed.socket_TCP_RTI = create_real_time_tcp_socket_errexit(); + + if (port == 0) { + // Free previously allocated address info. + freeaddrinfo(res); + // Increment the port number. + uport++; + if (uport >= DEFAULT_PORT + MAX_NUM_PORT_ADDRESSES) + uport = DEFAULT_PORT; + + // Reconstruct the address info. + rti_address(hostname, uport, &res); + } + lf_print("Trying RTI again on port %d (attempt %d).", uport, count_retries); + } else { + // This is the first attempt. + rti_address(hostname, uport, &res); + } - result = connect(_fed.socket_TCP_RTI, res->ai_addr, res->ai_addrlen); - if (result < 0) continue; // Connect failed. + result = connect(_fed.socket_TCP_RTI, res->ai_addr, res->ai_addrlen); + if (result < 0) + continue; // Connect failed. - // Have connected to an RTI, but not sure it's the right RTI. - // Send a MSG_TYPE_FED_IDS message and wait for a reply. - // Notify the RTI of the ID of this federate and its federation. + // Have connected to an RTI, but not sure it's the right RTI. + // Send a MSG_TYPE_FED_IDS message and wait for a reply. + // Notify the RTI of the ID of this federate and its federation. #ifdef FEDERATED_AUTHENTICATED - LF_PRINT_LOG("Connected to an RTI. Performing HMAC-based authentication using federation ID."); - if (perform_hmac_authentication()) { - if (port == 0) { - continue; // Try again with a new port. - } else { - // No point in trying again because it will be the same port. - close_rti_socket(); - lf_print_error_and_exit("Authentication failed."); - } - } + LF_PRINT_LOG("Connected to an RTI. Performing HMAC-based authentication using federation ID."); + if (perform_hmac_authentication()) { + if (port == 0) { + continue; // Try again with a new port. + } else { + // No point in trying again because it will be the same port. + close_rti_socket(); + lf_print_error_and_exit("Authentication failed."); + } + } #else - LF_PRINT_LOG("Connected to an RTI. Sending federation ID for authentication."); + LF_PRINT_LOG("Connected to an RTI. Sending federation ID for authentication."); #endif - // Send the message type first. - unsigned char buffer[4]; - buffer[0] = MSG_TYPE_FED_IDS; - // Next send the federate ID. - if (_lf_my_fed_id > UINT16_MAX) { - lf_print_error_and_exit("Too many federates! More than %d.", UINT16_MAX); - } - encode_uint16((uint16_t)_lf_my_fed_id, &buffer[1]); - // Next send the federation ID length. - // The federation ID is limited to 255 bytes. - size_t federation_id_length = strnlen(federation_metadata.federation_id, 255); - buffer[1 + sizeof(uint16_t)] = (unsigned char)(federation_id_length & 0xff); - - // Trace the event when tracing is enabled - tracepoint_federate_to_rti(_fed.trace, send_FED_ID, _lf_my_fed_id, NULL); - - // No need for a mutex here because no other threads are writing to this socket. - if (write_to_socket(_fed.socket_TCP_RTI, 2 + sizeof(uint16_t), buffer)) { - continue; // Try again, possibly on a new port. - } - - // Next send the federation ID itself. - if (write_to_socket( - _fed.socket_TCP_RTI, - federation_id_length, - (unsigned char*)federation_metadata.federation_id)) { - continue; // Try again. - } - - // Wait for a response. - // The response will be MSG_TYPE_REJECT if the federation ID doesn't match. - // Otherwise, it will be either MSG_TYPE_ACK or MSG_TYPE_UDP_PORT, where the latter - // is used if clock synchronization will be performed. - unsigned char response; - - LF_PRINT_DEBUG("Waiting for response to federation ID from the RTI."); - - if (read_from_socket(_fed.socket_TCP_RTI, 1, &response)) { - continue; // Try again. - } - if (response == MSG_TYPE_REJECT) { - // Trace the event when tracing is enabled - tracepoint_federate_from_rti(_fed.trace, receive_REJECT, _lf_my_fed_id, NULL); - // Read one more byte to determine the cause of rejection. - unsigned char cause; - read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, 1, &cause, NULL, - "Failed to read the cause of rejection by the RTI."); - if (cause == FEDERATION_ID_DOES_NOT_MATCH || cause == WRONG_SERVER) { - lf_print_warning("Connected to the wrong RTI on port %d. Will try again", uport); - continue; - } - } else if (response == MSG_TYPE_ACK) { - // Trace the event when tracing is enabled - tracepoint_federate_from_rti(_fed.trace, receive_ACK, _lf_my_fed_id, NULL); - LF_PRINT_LOG("Received acknowledgment from the RTI."); - break; - } else if (response == MSG_TYPE_RESIGN) { - lf_print_warning("RTI on port %d resigned. Will try again", uport); - continue; - } else { - lf_print_warning("RTI on port %d gave unexpect response %u. Will try again", uport, response); - continue; - } - } - if (result < 0) { - lf_print_error_and_exit("Failed to connect to RTI after %d tries.", CONNECT_MAX_RETRIES); - } - - freeaddrinfo(res); /* No longer needed */ - - // Call a generated (external) function that sends information - // about connections between this federate and other federates - // where messages are routed through the RTI. - // @see MSG_TYPE_NEIGHBOR_STRUCTURE in net_common.h - lf_send_neighbor_structure_to_RTI(_fed.socket_TCP_RTI); - - uint16_t udp_port = setup_clock_synchronization_with_rti(); - - // Write the returned port number to the RTI - unsigned char UDP_port_number[1 + sizeof(uint16_t)]; - UDP_port_number[0] = MSG_TYPE_UDP_PORT; - encode_uint16(udp_port, &(UDP_port_number[1])); - write_to_socket_fail_on_error(&_fed.socket_TCP_RTI, 1 + sizeof(uint16_t), UDP_port_number, NULL, - "Failed to send the UDP port number to the RTI."); - - lf_print("Connected to RTI at %s:%d.", hostname, uport); -} - -void lf_create_server(int specified_port) { - assert(specified_port <= UINT16_MAX && specified_port >= 0); - uint16_t port = (uint16_t)specified_port; - LF_PRINT_LOG("Creating a socket server on port %d.", port); - // Create an IPv4 socket for TCP (not UDP) communication over IP (0). - int socket_descriptor = create_real_time_tcp_socket_errexit(); - - // Server file descriptor. - struct sockaddr_in server_fd; - // Zero out the server address structure. - bzero((char*)&server_fd, sizeof(server_fd)); - - server_fd.sin_family = AF_INET; // IPv4 - server_fd.sin_addr.s_addr = INADDR_ANY; // All interfaces, 0.0.0.0. - // Convert the port number from host byte order to network byte order. - server_fd.sin_port = htons(port); - - int result = bind( - socket_descriptor, - (struct sockaddr *) &server_fd, - sizeof(server_fd)); - int count = 0; - while (result < 0 && count++ < PORT_BIND_RETRY_LIMIT) { - lf_sleep(PORT_BIND_RETRY_INTERVAL); - result = bind( - socket_descriptor, - (struct sockaddr *) &server_fd, - sizeof(server_fd)); - } - if (result < 0) { - lf_print_error_and_exit("Failed to bind socket on port %d.", port); + // Send the message type first. + unsigned char buffer[4]; + buffer[0] = MSG_TYPE_FED_IDS; + // Next send the federate ID. + if (_lf_my_fed_id > UINT16_MAX) { + lf_print_error_and_exit("Too many federates! More than %d.", UINT16_MAX); } + encode_uint16((uint16_t)_lf_my_fed_id, &buffer[1]); + // Next send the federation ID length. + // The federation ID is limited to 255 bytes. + size_t federation_id_length = strnlen(federation_metadata.federation_id, 255); + buffer[1 + sizeof(uint16_t)] = (unsigned char)(federation_id_length & 0xff); - // Set the global server port. - if (specified_port == 0) { - // Need to retrieve the port number assigned by the OS. - struct sockaddr_in assigned; - socklen_t addr_len = sizeof(assigned); - if (getsockname(socket_descriptor, (struct sockaddr *) &assigned, &addr_len) < 0) { - lf_print_error_and_exit("Failed to retrieve assigned port number."); - } - _fed.server_port = ntohs(assigned.sin_port); + // Trace the event when tracing is enabled + tracepoint_federate_to_rti(send_FED_ID, _lf_my_fed_id, NULL); + + // No need for a mutex here because no other threads are writing to this socket. + if (write_to_socket(_fed.socket_TCP_RTI, 2 + sizeof(uint16_t), buffer)) { + continue; // Try again, possibly on a new port. + } + + // Next send the federation ID itself. + if (write_to_socket(_fed.socket_TCP_RTI, federation_id_length, (unsigned char*)federation_metadata.federation_id)) { + continue; // Try again. + } + + // Wait for a response. + // The response will be MSG_TYPE_REJECT if the federation ID doesn't match. + // Otherwise, it will be either MSG_TYPE_ACK or MSG_TYPE_UDP_PORT, where the latter + // is used if clock synchronization will be performed. + unsigned char response; + + LF_PRINT_DEBUG("Waiting for response to federation ID from the RTI."); + + if (read_from_socket(_fed.socket_TCP_RTI, 1, &response)) { + continue; // Try again. + } + if (response == MSG_TYPE_REJECT) { + // Trace the event when tracing is enabled + tracepoint_federate_from_rti(receive_REJECT, _lf_my_fed_id, NULL); + // Read one more byte to determine the cause of rejection. + unsigned char cause; + read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, 1, &cause, NULL, + "Failed to read the cause of rejection by the RTI."); + if (cause == FEDERATION_ID_DOES_NOT_MATCH || cause == WRONG_SERVER) { + lf_print_warning("Connected to the wrong RTI on port %d. Will try again", uport); + continue; + } + } else if (response == MSG_TYPE_ACK) { + // Trace the event when tracing is enabled + tracepoint_federate_from_rti(receive_ACK, _lf_my_fed_id, NULL); + LF_PRINT_LOG("Received acknowledgment from the RTI."); + break; + } else if (response == MSG_TYPE_RESIGN) { + lf_print_warning("RTI on port %d resigned. Will try again", uport); + continue; } else { - _fed.server_port = port; + lf_print_warning("RTI on port %d gave unexpect response %u. Will try again", uport, response); + continue; } + } + if (result < 0) { + lf_print_error_and_exit("Failed to connect to RTI after %d tries.", CONNECT_MAX_RETRIES); + } - // Enable listening for socket connections. - // The second argument is the maximum number of queued socket requests, - // which according to the Mac man page is limited to 128. - listen(socket_descriptor, 128); + freeaddrinfo(res); /* No longer needed */ - LF_PRINT_LOG("Server for communicating with other federates started using port %d.", _fed.server_port); + // Call a generated (external) function that sends information + // about connections between this federate and other federates + // where messages are routed through the RTI. + // @see MSG_TYPE_NEIGHBOR_STRUCTURE in net_common.h + lf_send_neighbor_structure_to_RTI(_fed.socket_TCP_RTI); - // Send the server port number to the RTI - // on an MSG_TYPE_ADDRESS_ADVERTISEMENT message (@see net_common.h). - unsigned char buffer[sizeof(int32_t) + 1]; - buffer[0] = MSG_TYPE_ADDRESS_ADVERTISEMENT; - encode_int32(_fed.server_port, &(buffer[1])); + uint16_t udp_port = setup_clock_synchronization_with_rti(); - // Trace the event when tracing is enabled - tracepoint_federate_to_rti(_fed.trace, send_ADR_AD, _lf_my_fed_id, NULL); - - // No need for a mutex because we have the only handle on this socket. - write_to_socket_fail_on_error(&_fed.socket_TCP_RTI, sizeof(int32_t) + 1, (unsigned char*)buffer, NULL, - "Failed to send address advertisement."); + // Write the returned port number to the RTI + unsigned char UDP_port_number[1 + sizeof(uint16_t)]; + UDP_port_number[0] = MSG_TYPE_UDP_PORT; + encode_uint16(udp_port, &(UDP_port_number[1])); + write_to_socket_fail_on_error(&_fed.socket_TCP_RTI, 1 + sizeof(uint16_t), UDP_port_number, NULL, + "Failed to send the UDP port number to the RTI."); - LF_PRINT_DEBUG("Sent port %d to the RTI.", _fed.server_port); + lf_print("Connected to RTI at %s:%d.", hostname, uport); +} - // Set the global server socket - _fed.server_socket = socket_descriptor; +void lf_create_server(int specified_port) { + assert(specified_port <= UINT16_MAX && specified_port >= 0); + uint16_t port = (uint16_t)specified_port; + LF_PRINT_LOG("Creating a socket server on port %d.", port); + // Create an IPv4 socket for TCP (not UDP) communication over IP (0). + int socket_descriptor = create_real_time_tcp_socket_errexit(); + + // Server file descriptor. + struct sockaddr_in server_fd; + // Zero out the server address structure. + bzero((char*)&server_fd, sizeof(server_fd)); + + server_fd.sin_family = AF_INET; // IPv4 + server_fd.sin_addr.s_addr = INADDR_ANY; // All interfaces, 0.0.0.0. + // Convert the port number from host byte order to network byte order. + server_fd.sin_port = htons(port); + + int result = bind(socket_descriptor, (struct sockaddr*)&server_fd, sizeof(server_fd)); + int count = 0; + while (result < 0 && count++ < PORT_BIND_RETRY_LIMIT) { + lf_sleep(PORT_BIND_RETRY_INTERVAL); + result = bind(socket_descriptor, (struct sockaddr*)&server_fd, sizeof(server_fd)); + } + if (result < 0) { + lf_print_error_and_exit("Failed to bind socket on port %d.", port); + } + + // Set the global server port. + if (specified_port == 0) { + // Need to retrieve the port number assigned by the OS. + struct sockaddr_in assigned; + socklen_t addr_len = sizeof(assigned); + if (getsockname(socket_descriptor, (struct sockaddr*)&assigned, &addr_len) < 0) { + lf_print_error_and_exit("Failed to retrieve assigned port number."); + } + _fed.server_port = ntohs(assigned.sin_port); + } else { + _fed.server_port = port; + } + + // Enable listening for socket connections. + // The second argument is the maximum number of queued socket requests, + // which according to the Mac man page is limited to 128. + listen(socket_descriptor, 128); + + LF_PRINT_LOG("Server for communicating with other federates started using port %d.", _fed.server_port); + + // Send the server port number to the RTI + // on an MSG_TYPE_ADDRESS_ADVERTISEMENT message (@see net_common.h). + unsigned char buffer[sizeof(int32_t) + 1]; + buffer[0] = MSG_TYPE_ADDRESS_ADVERTISEMENT; + encode_int32(_fed.server_port, &(buffer[1])); + + // Trace the event when tracing is enabled + tracepoint_federate_to_rti(send_ADR_AD, _lf_my_fed_id, NULL); + + // No need for a mutex because we have the only handle on this socket. + write_to_socket_fail_on_error(&_fed.socket_TCP_RTI, sizeof(int32_t) + 1, (unsigned char*)buffer, NULL, + "Failed to send address advertisement."); + + LF_PRINT_DEBUG("Sent port %d to the RTI.", _fed.server_port); + + // Set the global server socket + _fed.server_socket = socket_descriptor; } -void lf_enqueue_port_absent_reactions(environment_t* env){ - assert(env != GLOBAL_ENVIRONMENT); +void lf_enqueue_port_absent_reactions(environment_t* env) { + assert(env != GLOBAL_ENVIRONMENT); #ifdef FEDERATED_CENTRALIZED - if (!_fed.has_downstream) { - // This federate is not connected to any downstream federates via a - // logical connection. No need to trigger port absent - // reactions. - return; - } + if (!_fed.has_downstream) { + // This federate is not connected to any downstream federates via a + // logical connection. No need to trigger port absent + // reactions. + return; + } #endif - LF_PRINT_DEBUG("Enqueueing port absent reactions at time " PRINTF_TIME ".", (env->current_tag.time - start_time)); - if (num_port_absent_reactions == 0) { - LF_PRINT_DEBUG("No port absent reactions."); - return; - } - for (int i = 0; i < num_port_absent_reactions; i++) { - reaction_t* reaction = port_absent_reaction[i]; - if (reaction && reaction->status == inactive) { - LF_PRINT_DEBUG("Inserting port absent reaction on reaction queue."); - lf_scheduler_trigger_reaction(env->scheduler, reaction, -1); - } - } + LF_PRINT_DEBUG("Enqueueing port absent reactions at time " PRINTF_TIME ".", (env->current_tag.time - start_time)); + if (num_port_absent_reactions == 0) { + LF_PRINT_DEBUG("No port absent reactions."); + return; + } + for (int i = 0; i < num_port_absent_reactions; i++) { + reaction_t* reaction = port_absent_reaction[i]; + if (reaction && reaction->status == inactive) { + LF_PRINT_DEBUG("Inserting port absent reaction on reaction queue."); + lf_scheduler_trigger_reaction(env->scheduler, reaction, -1); + } + } } void* lf_handle_p2p_connections_from_federates(void* env_arg) { - assert(env_arg); - environment_t* env = (environment_t *) env_arg; - int received_federates = 0; - // Allocate memory to store thread IDs. - _fed.inbound_socket_listeners = (lf_thread_t*)calloc(_fed.number_of_inbound_p2p_connections, sizeof(lf_thread_t)); - while (received_federates < _fed.number_of_inbound_p2p_connections && !_lf_termination_executed) { - // Wait for an incoming connection request. - struct sockaddr client_fd; - uint32_t client_length = sizeof(client_fd); - int socket_id = accept(_fed.server_socket, &client_fd, &client_length); - - if (socket_id < 0) { - if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) { - if (rti_failed()) break; - else continue; // Try again. - } else if (errno == EPERM) { - lf_print_error_system_failure("Firewall permissions prohibit connection."); - } else { - lf_print_error_system_failure("A fatal error occurred while accepting a new socket."); - } - } - LF_PRINT_LOG("Accepted new connection from remote federate."); - - size_t header_length = 1 + sizeof(uint16_t) + 1; - unsigned char buffer[header_length]; - int read_failed = read_from_socket(socket_id, header_length, (unsigned char*)&buffer); - if (read_failed || buffer[0] != MSG_TYPE_P2P_SENDING_FED_ID) { - lf_print_warning("Federate received invalid first message on P2P socket. Closing socket."); - if (read_failed == 0) { - // Wrong message received. - unsigned char response[2]; - response[0] = MSG_TYPE_REJECT; - response[1] = WRONG_SERVER; - // Trace the event when tracing is enabled - tracepoint_federate_to_federate(_fed.trace, send_REJECT, _lf_my_fed_id, -3, NULL); - // Ignore errors on this response. - write_to_socket(socket_id, 2, response); - } - close(socket_id); - continue; - } - - // Get the federation ID and check it. - unsigned char federation_id_length = buffer[header_length - 1]; - char remote_federation_id[federation_id_length]; - read_failed = read_from_socket(socket_id, federation_id_length, (unsigned char*)remote_federation_id); - if (read_failed || (strncmp(federation_metadata.federation_id, remote_federation_id, strnlen(federation_metadata.federation_id, 255)) != 0)) { - lf_print_warning("Received invalid federation ID. Closing socket."); - if (read_failed == 0) { - unsigned char response[2]; - response[0] = MSG_TYPE_REJECT; - response[1] = FEDERATION_ID_DOES_NOT_MATCH; - // Trace the event when tracing is enabled - tracepoint_federate_to_federate(_fed.trace, send_REJECT, _lf_my_fed_id, -3, NULL); - // Ignore errors on this response. - write_to_socket(socket_id, 2, response); - } - close(socket_id); - continue; - } + assert(env_arg); + environment_t* env = (environment_t*)env_arg; + int received_federates = 0; + // Allocate memory to store thread IDs. + _fed.inbound_socket_listeners = (lf_thread_t*)calloc(_fed.number_of_inbound_p2p_connections, sizeof(lf_thread_t)); + while (received_federates < _fed.number_of_inbound_p2p_connections && !_lf_termination_executed) { + // Wait for an incoming connection request. + struct sockaddr client_fd; + uint32_t client_length = sizeof(client_fd); + int socket_id = accept(_fed.server_socket, &client_fd, &client_length); + + if (socket_id < 0) { + if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) { + if (rti_failed()) + break; + else + continue; // Try again. + } else if (errno == EPERM) { + lf_print_error_system_failure("Firewall permissions prohibit connection."); + } else { + lf_print_error_system_failure("A fatal error occurred while accepting a new socket."); + } + } + LF_PRINT_LOG("Accepted new connection from remote federate."); + + size_t header_length = 1 + sizeof(uint16_t) + 1; + unsigned char buffer[header_length]; + int read_failed = read_from_socket(socket_id, header_length, (unsigned char*)&buffer); + if (read_failed || buffer[0] != MSG_TYPE_P2P_SENDING_FED_ID) { + lf_print_warning("Federate received invalid first message on P2P socket. Closing socket."); + if (read_failed == 0) { + // Wrong message received. + unsigned char response[2]; + response[0] = MSG_TYPE_REJECT; + response[1] = WRONG_SERVER; + // Trace the event when tracing is enabled + tracepoint_federate_to_federate(send_REJECT, _lf_my_fed_id, -3, NULL); + // Ignore errors on this response. + write_to_socket(socket_id, 2, response); + } + close(socket_id); + continue; + } + + // Get the federation ID and check it. + unsigned char federation_id_length = buffer[header_length - 1]; + char remote_federation_id[federation_id_length]; + read_failed = read_from_socket(socket_id, federation_id_length, (unsigned char*)remote_federation_id); + if (read_failed || (strncmp(federation_metadata.federation_id, remote_federation_id, + strnlen(federation_metadata.federation_id, 255)) != 0)) { + lf_print_warning("Received invalid federation ID. Closing socket."); + if (read_failed == 0) { + unsigned char response[2]; + response[0] = MSG_TYPE_REJECT; + response[1] = FEDERATION_ID_DOES_NOT_MATCH; + // Trace the event when tracing is enabled + tracepoint_federate_to_federate(send_REJECT, _lf_my_fed_id, -3, NULL); + // Ignore errors on this response. + write_to_socket(socket_id, 2, response); + } + close(socket_id); + continue; + } - // Extract the ID of the sending federate. - uint16_t remote_fed_id = extract_uint16((unsigned char*)&(buffer[1])); - LF_PRINT_DEBUG("Received sending federate ID %d.", remote_fed_id); + // Extract the ID of the sending federate. + uint16_t remote_fed_id = extract_uint16((unsigned char*)&(buffer[1])); + LF_PRINT_DEBUG("Received sending federate ID %d.", remote_fed_id); - // Trace the event when tracing is enabled - tracepoint_federate_to_federate(_fed.trace, receive_FED_ID, _lf_my_fed_id, remote_fed_id, NULL); + // Trace the event when tracing is enabled + tracepoint_federate_to_federate(receive_FED_ID, _lf_my_fed_id, remote_fed_id, NULL); - // Once we record the socket_id here, all future calls to close() on - // the socket should be done while holding the socket_mutex, and this array - // element should be reset to -1 during that critical section. - // Otherwise, there can be race condition where, during termination, - // two threads attempt to simultaneously close the socket. - _fed.sockets_for_inbound_p2p_connections[remote_fed_id] = socket_id; + // Once we record the socket_id here, all future calls to close() on + // the socket should be done while holding the socket_mutex, and this array + // element should be reset to -1 during that critical section. + // Otherwise, there can be race condition where, during termination, + // two threads attempt to simultaneously close the socket. + _fed.sockets_for_inbound_p2p_connections[remote_fed_id] = socket_id; - // Send an MSG_TYPE_ACK message. - unsigned char response = MSG_TYPE_ACK; + // Send an MSG_TYPE_ACK message. + unsigned char response = MSG_TYPE_ACK; - // Trace the event when tracing is enabled - tracepoint_federate_to_federate(_fed.trace, send_ACK, _lf_my_fed_id, remote_fed_id, NULL); - - LF_MUTEX_LOCK(&lf_outbound_socket_mutex); - write_to_socket_fail_on_error( - &_fed.sockets_for_inbound_p2p_connections[remote_fed_id], - 1, (unsigned char*)&response, - &lf_outbound_socket_mutex, - "Failed to write MSG_TYPE_ACK in response to federate %d.", - remote_fed_id); - LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); - - // Start a thread to listen for incoming messages from other federates. - // The fed_id is a uint16_t, which we assume can be safely cast to and from void*. - void* fed_id_arg = (void*)(uintptr_t)remote_fed_id; - int result = lf_thread_create( - &_fed.inbound_socket_listeners[received_federates], - listen_to_federates, - fed_id_arg); - if (result != 0) { - // Failed to create a listening thread. - LF_MUTEX_LOCK(&socket_mutex); - if (_fed.sockets_for_inbound_p2p_connections[remote_fed_id] != -1) { - close(socket_id); - _fed.sockets_for_inbound_p2p_connections[remote_fed_id] = -1; - } - LF_MUTEX_UNLOCK(&socket_mutex); - lf_print_error_and_exit( - "Failed to create a thread to listen for incoming physical connection. Error code: %d.", - result - ); - } + // Trace the event when tracing is enabled + tracepoint_federate_to_federate(send_ACK, _lf_my_fed_id, remote_fed_id, NULL); - received_federates++; - } + LF_MUTEX_LOCK(&lf_outbound_socket_mutex); + write_to_socket_fail_on_error(&_fed.sockets_for_inbound_p2p_connections[remote_fed_id], 1, + (unsigned char*)&response, &lf_outbound_socket_mutex, + "Failed to write MSG_TYPE_ACK in response to federate %d.", remote_fed_id); + LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); - LF_PRINT_LOG("All %zu remote federates are connected.", _fed.number_of_inbound_p2p_connections); - return NULL; + // Start a thread to listen for incoming messages from other federates. + // The fed_id is a uint16_t, which we assume can be safely cast to and from void*. + void* fed_id_arg = (void*)(uintptr_t)remote_fed_id; + int result = lf_thread_create(&_fed.inbound_socket_listeners[received_federates], listen_to_federates, fed_id_arg); + if (result != 0) { + // Failed to create a listening thread. + LF_MUTEX_LOCK(&socket_mutex); + if (_fed.sockets_for_inbound_p2p_connections[remote_fed_id] != -1) { + close(socket_id); + _fed.sockets_for_inbound_p2p_connections[remote_fed_id] = -1; + } + LF_MUTEX_UNLOCK(&socket_mutex); + lf_print_error_and_exit("Failed to create a thread to listen for incoming physical connection. Error code: %d.", + result); + } + + received_federates++; + } + + LF_PRINT_LOG("All %zu remote federates are connected.", _fed.number_of_inbound_p2p_connections); + return NULL; } void lf_latest_tag_complete(tag_t tag_to_send) { - int compare_with_last_LTC = lf_tag_compare(_fed.last_sent_LTC, tag_to_send); - int compare_with_last_DNET = lf_tag_compare(_fed.last_DNET, tag_to_send); - if (compare_with_last_LTC >= 0) { - return; - } - if (compare_with_last_DNET > 0) { - LF_PRINT_LOG("Skipping Latest Tag Complete (LTC) " PRINTF_TAG " .", - tag_to_send.time - start_time, - tag_to_send.microstep); - _fed.last_skipped_LTC = tag_to_send; - return; - } - LF_PRINT_LOG("Sending Latest Tag Complete (LTC) " PRINTF_TAG " to the RTI.", - tag_to_send.time - start_time, - tag_to_send.microstep); - send_tag(MSG_TYPE_LATEST_TAG_COMPLETE, tag_to_send); - _fed.last_sent_LTC = tag_to_send; - _fed.last_skipped_LTC = NEVER_TAG; + int compare_with_last_LTC = lf_tag_compare(_fed.last_sent_LTC, tag_to_send); + int compare_with_last_DNET = lf_tag_compare(_fed.last_DNET, tag_to_send); + if (compare_with_last_LTC >= 0) { + return; + } + if (compare_with_last_DNET > 0) { + LF_PRINT_LOG("Skipping Latest Tag Complete (LTC) " PRINTF_TAG " .", tag_to_send.time - start_time, + tag_to_send.microstep); + _fed.last_skipped_LTC = tag_to_send; + return; + } + LF_PRINT_LOG("Sending Latest Tag Complete (LTC) " PRINTF_TAG " to the RTI.", tag_to_send.time - start_time, + tag_to_send.microstep); + send_tag(MSG_TYPE_LATEST_TAG_COMPLETE, tag_to_send); + _fed.last_sent_LTC = tag_to_send; + _fed.last_skipped_LTC = NEVER_TAG; } parse_rti_code_t lf_parse_rti_addr(const char* rti_addr) { - bool has_host = false, has_port = false, has_user = false; - rti_addr_info_t rti_addr_info = {0}; - extract_rti_addr_info(rti_addr, &rti_addr_info); - if (!rti_addr_info.has_host && !rti_addr_info.has_port && !rti_addr_info.has_user) { - return FAILED_TO_PARSE; - } - if (rti_addr_info.has_host) { - if (validate_host(rti_addr_info.rti_host_str)) { - char* rti_host = (char*) calloc(256, sizeof(char)); - strncpy(rti_host, rti_addr_info.rti_host_str, 255); - federation_metadata.rti_host = rti_host; - } else { - return INVALID_HOST; - } - } - if (rti_addr_info.has_port) { - if (validate_port(rti_addr_info.rti_port_str)) { - federation_metadata.rti_port = atoi(rti_addr_info.rti_port_str); - } else { - return INVALID_PORT; - } + bool has_host = false, has_port = false, has_user = false; + rti_addr_info_t rti_addr_info = {0}; + extract_rti_addr_info(rti_addr, &rti_addr_info); + if (!rti_addr_info.has_host && !rti_addr_info.has_port && !rti_addr_info.has_user) { + return FAILED_TO_PARSE; + } + if (rti_addr_info.has_host) { + if (validate_host(rti_addr_info.rti_host_str)) { + char* rti_host = (char*)calloc(256, sizeof(char)); + strncpy(rti_host, rti_addr_info.rti_host_str, 255); + federation_metadata.rti_host = rti_host; + } else { + return INVALID_HOST; } - if (rti_addr_info.has_user) { - if (validate_user(rti_addr_info.rti_user_str)) { - char* rti_user = (char*) calloc(256, sizeof(char)); - strncpy(rti_user, rti_addr_info.rti_user_str, 255); - federation_metadata.rti_user = rti_user; - } else { - return INVALID_USER; - } + } + if (rti_addr_info.has_port) { + if (validate_port(rti_addr_info.rti_port_str)) { + federation_metadata.rti_port = atoi(rti_addr_info.rti_port_str); + } else { + return INVALID_PORT; + } + } + if (rti_addr_info.has_user) { + if (validate_user(rti_addr_info.rti_user_str)) { + char* rti_user = (char*)calloc(256, sizeof(char)); + strncpy(rti_user, rti_addr_info.rti_user_str, 255); + federation_metadata.rti_user = rti_user; + } else { + return INVALID_USER; } - return SUCCESS; + } + return SUCCESS; } void lf_reset_status_fields_on_input_port_triggers() { - environment_t *env; - _lf_get_environments(&env); - tag_t now = lf_tag(env); - for (int i = 0; i < _lf_action_table_size; i++) { - if (lf_tag_compare(_lf_action_table[i]->trigger->last_known_status_tag, now) >= 0) { - set_network_port_status(i, absent); // Default may be overriden to become present. - } else { - set_network_port_status(i, unknown); - } + environment_t* env; + _lf_get_environments(&env); + tag_t now = lf_tag(env); + for (int i = 0; i < _lf_action_table_size; i++) { + if (lf_tag_compare(_lf_action_table[i]->trigger->last_known_status_tag, now) >= 0) { + set_network_port_status(i, absent); // Default may be overriden to become present. + } else { + set_network_port_status(i, unknown); } - LF_PRINT_DEBUG("Resetting port status fields."); - lf_update_max_level(_fed.last_TAG, _fed.is_last_TAG_provisional); - lf_cond_broadcast(&lf_port_status_changed); + } + LF_PRINT_DEBUG("Resetting port status fields."); + lf_update_max_level(_fed.last_TAG, _fed.is_last_TAG_provisional); + lf_cond_broadcast(&lf_port_status_changed); } -int lf_send_message(int message_type, - unsigned short port, - unsigned short federate, - const char* next_destination_str, - size_t length, - unsigned char* message) { - unsigned char header_buffer[1 + sizeof(uint16_t) + sizeof(uint16_t) + sizeof(int32_t)]; - // First byte identifies this as a timed message. - if (message_type != MSG_TYPE_P2P_MESSAGE ) { - lf_print_error("lf_send_message: Unsupported message type (%d).", message_type); - return -1; - } - header_buffer[0] = (unsigned char)message_type; - // Next two bytes identify the destination port. - // NOTE: Send messages little endian (network order), not big endian. - encode_uint16(port, &(header_buffer[1])); - - // Next two bytes identify the destination federate. - encode_uint16(federate, &(header_buffer[1 + sizeof(uint16_t)])); - - // The next four bytes are the message length. - encode_int32((int32_t)length, &(header_buffer[1 + sizeof(uint16_t) + sizeof(uint16_t)])); - - LF_PRINT_LOG("Sending untagged message to %s.", next_destination_str); - - // Header: message_type + port_id + federate_id + length of message + timestamp + microstep - const int header_length = 1 + sizeof(uint16_t) + sizeof(uint16_t) + sizeof(int32_t); - - // Use a mutex lock to prevent multiple threads from simultaneously sending. - LF_MUTEX_LOCK(&lf_outbound_socket_mutex); - - int* socket = &_fed.sockets_for_outbound_p2p_connections[federate]; - - // Trace the event when tracing is enabled - tracepoint_federate_to_federate(_fed.trace, send_P2P_MSG, _lf_my_fed_id, federate, NULL); - - int result = write_to_socket_close_on_error(socket, header_length, header_buffer); - if (result == 0) { - // Header sent successfully. Send the body. - result = write_to_socket_close_on_error(socket, length, message); - } - if (result != 0) { - // Message did not send. Since this is used for physical connections, this is not critical. - lf_print_warning("Failed to send message to %s. Dropping the message.", next_destination_str); - } - LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); - return result; +int lf_send_message(int message_type, unsigned short port, unsigned short federate, const char* next_destination_str, + size_t length, unsigned char* message) { + unsigned char header_buffer[1 + sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t)]; + // First byte identifies this as a timed message. + if (message_type != MSG_TYPE_P2P_MESSAGE) { + lf_print_error("lf_send_message: Unsupported message type (%d).", message_type); + return -1; + } + header_buffer[0] = (unsigned char)message_type; + // Next two bytes identify the destination port. + // NOTE: Send messages little endian (network order), not big endian. + encode_uint16(port, &(header_buffer[1])); + + // Next two bytes identify the destination federate. + encode_uint16(federate, &(header_buffer[1 + sizeof(uint16_t)])); + + // The next four bytes are the message length. + encode_uint32((uint32_t)length, &(header_buffer[1 + sizeof(uint16_t) + sizeof(uint16_t)])); + + LF_PRINT_LOG("Sending untagged message to %s.", next_destination_str); + + // Header: message_type + port_id + federate_id + length of message + timestamp + microstep + const int header_length = 1 + sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t); + + // Use a mutex lock to prevent multiple threads from simultaneously sending. + LF_MUTEX_LOCK(&lf_outbound_socket_mutex); + + int* socket = &_fed.sockets_for_outbound_p2p_connections[federate]; + + // Trace the event when tracing is enabled + tracepoint_federate_to_federate(send_P2P_MSG, _lf_my_fed_id, federate, NULL); + + int result = write_to_socket_close_on_error(socket, header_length, header_buffer); + if (result == 0) { + // Header sent successfully. Send the body. + result = write_to_socket_close_on_error(socket, length, message); + } + if (result != 0) { + // Message did not send. Since this is used for physical connections, this is not critical. + lf_print_warning("Failed to send message to %s. Dropping the message.", next_destination_str); + } + LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); + return result; } tag_t lf_send_next_event_tag(environment_t* env, tag_t tag, bool wait_for_reply) { - assert(env != GLOBAL_ENVIRONMENT); - while (true) { - if (!_fed.has_downstream && !_fed.has_upstream) { - // This federate is not connected (except possibly by physical links) - // so there is no need for the RTI to get involved. - - // NOTE: If the event queue is empty, then the time argument is either - // the timeout_time or FOREVER. If -fast is also set, then - // it matters whether there are upstream federates connected by physical - // connections, which do not affect _fed.has_upstream. Perhaps we - // should not return immediately because - // then the execution will hit its timeout_time and fail to receive any - // messages sent by upstream federates. - // However, -fast is really incompatible with federated execution with - // physical connections, so I don't think we need to worry about this. - LF_PRINT_DEBUG("Granted tag " PRINTF_TAG " because the federate has neither " - "upstream nor downstream federates.", - tag.time - start_time, tag.microstep); - return tag; + assert(env != GLOBAL_ENVIRONMENT); + while (true) { + if (!_fed.has_downstream && !_fed.has_upstream) { + // This federate is not connected (except possibly by physical links) + // so there is no need for the RTI to get involved. + + // NOTE: If the event queue is empty, then the time argument is either + // the timeout_time or FOREVER. If -fast is also set, then + // it matters whether there are upstream federates connected by physical + // connections, which do not affect _fed.has_upstream. Perhaps we + // should not return immediately because + // then the execution will hit its timeout_time and fail to receive any + // messages sent by upstream federates. + // However, -fast is really incompatible with federated execution with + // physical connections, so I don't think we need to worry about this. + LF_PRINT_DEBUG("Granted tag " PRINTF_TAG " because the federate has neither " + "upstream nor downstream federates.", + tag.time - start_time, tag.microstep); + return tag; + } + + // If time advance (TAG or PTAG) has already been granted for this tag + // or a larger tag, then return immediately. + if (lf_tag_compare(_fed.last_TAG, tag) >= 0) { + LF_PRINT_DEBUG("Granted tag " PRINTF_TAG " because TAG or PTAG has been received.", + _fed.last_TAG.time - start_time, _fed.last_TAG.microstep); + return _fed.last_TAG; + } + + // Copy the tag because bounded_NET() may modify it. + tag_t original_tag = tag; + + // A NET sent by this function is a promise that, absent + // inputs from another federate, this federate will not produce events + // earlier than t. But if there are downstream federates and there is + // a physical action (not counting receivers from upstream federates), + // then we can only promise up to current physical time (plus the minimum + // of all minimum delays on the physical actions). + // If wait_for_reply is false, leave the tag alone. + bool tag_bounded_by_physical_time = wait_for_reply ? bounded_NET(&tag) : false; + + // What we do next depends on whether the NET has been bounded by + // physical time or by an event on the event queue. + if (!tag_bounded_by_physical_time) { + // This if statement does not fall through but rather returns. + // NET is not bounded by physical time or has no downstream federates. + // Normal case. + if (lf_tag_compare(_fed.last_DNET, tag) <= 0 || lf_tag_compare(_fed.last_TAG, tag) < 0) { + send_tag(MSG_TYPE_NEXT_EVENT_TAG, tag); + _fed.last_sent_NET = tag; + LF_PRINT_LOG("Sent next event tag (NET) " PRINTF_TAG " to RTI.", tag.time - start_time, tag.microstep); + } else { + LF_PRINT_LOG("Skip next event tag (NET) " PRINTF_TAG " to RTI.", tag.time - start_time, tag.microstep); + } + + if (!wait_for_reply) { + LF_PRINT_LOG("Not waiting for reply to NET."); + return tag; + } + + // If there are no upstream federates, return immediately, without + // waiting for a reply. This federate does not need to wait for + // any other federate. + // NOTE: If fast execution is being used, it may be necessary to + // throttle upstream federates. + if (!_fed.has_upstream) { + LF_PRINT_DEBUG("Not waiting for reply to NET " PRINTF_TAG " because I " + "have no upstream federates.", + tag.time - start_time, tag.microstep); + return tag; + } + + // Wait until a TAG is received from the RTI. + while (true) { + // Wait until either something changes on the event queue or + // the RTI has responded with a TAG. + LF_PRINT_DEBUG("Waiting for a TAG from the RTI with _fed.last_TAG= " PRINTF_TAG " and net=" PRINTF_TAG, + _fed.last_TAG.time - start_time, _fed.last_TAG.microstep, tag.time - start_time, tag.microstep); + if (lf_cond_wait(&env->event_q_changed) != 0) { + lf_print_error("Wait error."); } - - // If time advance (TAG or PTAG) has already been granted for this tag - // or a larger tag, then return immediately. - if (lf_tag_compare(_fed.last_TAG, tag) >= 0) { - LF_PRINT_DEBUG("Granted tag " PRINTF_TAG " because TAG or PTAG has been received.", - _fed.last_TAG.time - start_time, _fed.last_TAG.microstep); - return _fed.last_TAG; + // Check whether the new event on the event queue requires sending a new NET. + tag_t next_tag = get_next_event_tag(env); + if (lf_tag_compare(_fed.last_TAG, next_tag) >= 0 || lf_tag_compare(_fed.last_TAG, tag) >= 0) { + return _fed.last_TAG; } + if (lf_tag_compare(next_tag, tag) != 0) { + send_tag(MSG_TYPE_NEXT_EVENT_TAG, next_tag); + _fed.last_sent_NET = next_tag; + LF_PRINT_LOG("Sent next event tag (NET) " PRINTF_TAG " to RTI from loop.", next_tag.time - lf_time_start(), + next_tag.microstep); + } + } + } - // Copy the tag because bounded_NET() may modify it. - tag_t original_tag = tag; - - // A NET sent by this function is a promise that, absent - // inputs from another federate, this federate will not produce events - // earlier than t. But if there are downstream federates and there is - // a physical action (not counting receivers from upstream federates), - // then we can only promise up to current physical time (plus the minimum - // of all minimum delays on the physical actions). - // If wait_for_reply is false, leave the tag alone. - bool tag_bounded_by_physical_time = wait_for_reply ? - bounded_NET(&tag) - : false; - - // What we do next depends on whether the NET has been bounded by - // physical time or by an event on the event queue. - if (!tag_bounded_by_physical_time) { - // This if statement does not fall through but rather returns. - // NET is not bounded by physical time or has no downstream federates. - // Normal case. - if (lf_tag_compare(_fed.last_DNET, tag) <= 0 - || lf_tag_compare(_fed.last_TAG, tag) < 0) { - send_tag(MSG_TYPE_NEXT_EVENT_TAG, tag); - _fed.last_sent_NET = tag; - LF_PRINT_LOG("Sent next event tag (NET) " PRINTF_TAG " to RTI.", - tag.time - start_time, tag.microstep); - } else { - LF_PRINT_LOG("Skip next event tag (NET) " PRINTF_TAG " to RTI.", - tag.time - start_time, tag.microstep); - } + if (tag.time != FOREVER) { + // Create a dummy event that will force this federate to advance time and subsequently + // enable progress for downstream federates. Increment the time by ADVANCE_MESSAGE_INTERVAL + // to prevent too frequent dummy events. + event_t* dummy = _lf_create_dummy_events(env, NULL, tag.time + ADVANCE_MESSAGE_INTERVAL, NULL, 0); + pqueue_insert(env->event_q, dummy); + } - if (!wait_for_reply) { - LF_PRINT_LOG("Not waiting for reply to NET."); - return tag; - } + LF_PRINT_DEBUG("Inserted a dummy event for logical time " PRINTF_TIME ".", tag.time - lf_time_start()); - // If there are no upstream federates, return immediately, without - // waiting for a reply. This federate does not need to wait for - // any other federate. - // NOTE: If fast execution is being used, it may be necessary to - // throttle upstream federates. - if (!_fed.has_upstream) { - LF_PRINT_DEBUG("Not waiting for reply to NET " PRINTF_TAG " because I " - "have no upstream federates.", - tag.time - start_time, tag.microstep); - return tag; - } + if (!wait_for_reply) { + LF_PRINT_LOG("Not waiting for physical time to advance further."); + return tag; + } - // Wait until a TAG is received from the RTI. - while (true) { - // Wait until either something changes on the event queue or - // the RTI has responded with a TAG. - LF_PRINT_DEBUG("Waiting for a TAG from the RTI with _fed.last_TAG= " PRINTF_TAG " and net=" PRINTF_TAG, - _fed.last_TAG.time - start_time, _fed.last_TAG.microstep, - tag.time - start_time, tag.microstep); - if (lf_cond_wait(&env->event_q_changed) != 0) { - lf_print_error("Wait error."); - } - // Check whether the new event on the event queue requires sending a new NET. - tag_t next_tag = get_next_event_tag(env); - if ( - lf_tag_compare(_fed.last_TAG, next_tag) >= 0 - || lf_tag_compare(_fed.last_TAG, tag) >= 0 - ) { - return _fed.last_TAG; - } - if (lf_tag_compare(next_tag, tag) != 0) { - send_tag(MSG_TYPE_NEXT_EVENT_TAG, next_tag); - _fed.last_sent_NET = next_tag; - LF_PRINT_LOG("Sent next event tag (NET) " PRINTF_TAG " to RTI from loop.", - next_tag.time - lf_time_start(), next_tag.microstep); - } - } - } + // This federate should repeatedly advance its tag to ensure downstream federates can make progress. + // Before advancing to the next tag, we need to wait some time so that we don't overwhelm the network and the + // RTI. That amount of time will be no greater than ADVANCE_MESSAGE_INTERVAL in the future. + LF_PRINT_DEBUG("Waiting for physical time to elapse or an event on the event queue."); - if (tag.time != FOREVER) { - // Create a dummy event that will force this federate to advance time and subsequently - // enable progress for downstream federates. Increment the time by ADVANCE_MESSAGE_INTERVAL - // to prevent too frequent dummy events. - event_t* dummy = _lf_create_dummy_events(env, NULL, tag.time + ADVANCE_MESSAGE_INTERVAL, NULL, 0); - pqueue_insert(env->event_q, dummy); - } + instant_t wait_until_time_ns = lf_time_physical() + ADVANCE_MESSAGE_INTERVAL; - LF_PRINT_DEBUG("Inserted a dummy event for logical time " PRINTF_TIME ".", - tag.time - lf_time_start()); + // Regardless of the ADVANCE_MESSAGE_INTERVAL, do not let this + // wait exceed the time of the next tag. + if (wait_until_time_ns > original_tag.time) { + wait_until_time_ns = original_tag.time; + } - if (!wait_for_reply) { - LF_PRINT_LOG("Not waiting for physical time to advance further."); - return tag; - } + lf_clock_cond_timedwait(&env->event_q_changed, wait_until_time_ns); - // This federate should repeatedly advance its tag to ensure downstream federates can make progress. - // Before advancing to the next tag, we need to wait some time so that we don't overwhelm the network and the - // RTI. That amount of time will be no greater than ADVANCE_MESSAGE_INTERVAL in the future. - LF_PRINT_DEBUG("Waiting for physical time to elapse or an event on the event queue."); + LF_PRINT_DEBUG("Wait finished or interrupted."); - instant_t wait_until_time_ns = lf_time_physical() + ADVANCE_MESSAGE_INTERVAL; + // Either the timeout expired or the wait was interrupted by an event being + // put onto the event queue. In either case, we can just loop around. + // The next iteration will determine whether another + // NET should be sent or not. + tag = get_next_event_tag(env); + } +} - // Regardless of the ADVANCE_MESSAGE_INTERVAL, do not let this - // wait exceed the time of the next tag. - if (wait_until_time_ns > original_tag.time) { - wait_until_time_ns = original_tag.time; - } +void lf_send_port_absent_to_federate(environment_t* env, interval_t additional_delay, unsigned short port_ID, + unsigned short fed_ID) { + assert(env != GLOBAL_ENVIRONMENT); - lf_clock_cond_timedwait(&env->event_q_changed, wait_until_time_ns); + // Construct the message + size_t message_length = 1 + sizeof(port_ID) + sizeof(fed_ID) + sizeof(instant_t) + sizeof(microstep_t); + unsigned char buffer[message_length]; - LF_PRINT_DEBUG("Wait finished or interrupted."); + // Apply the additional delay to the current tag and use that as the intended + // tag of the outgoing message. Note that if there is delay on the connection, + // then we cannot promise no message with tag = current_tag + delay because a + // subsequent reaction might produce such a message. But we can promise no + // message with a tag strictly less than current_tag + delay. + tag_t current_message_intended_tag = lf_delay_strict(env->current_tag, additional_delay); - // Either the timeout expired or the wait was interrupted by an event being - // put onto the event queue. In either case, we can just loop around. - // The next iteration will determine whether another - // NET should be sent or not. - tag = get_next_event_tag(env); - } -} + LF_PRINT_LOG("Sending port " + "absent for tag " PRINTF_TAG " for port %d to federate %d.", + current_message_intended_tag.time - start_time, current_message_intended_tag.microstep, port_ID, fed_ID); -void lf_send_port_absent_to_federate( - environment_t* env, - interval_t additional_delay, - unsigned short port_ID, - unsigned short fed_ID) { - assert(env != GLOBAL_ENVIRONMENT); - - // Construct the message - size_t message_length = 1 + sizeof(port_ID) + sizeof(fed_ID) + sizeof(instant_t) + sizeof(microstep_t); - unsigned char buffer[message_length]; - - // Apply the additional delay to the current tag and use that as the intended - // tag of the outgoing message. Note that if there is delay on the connection, - // then we cannot promise no message with tag = current_tag + delay because a - // subsequent reaction might produce such a message. But we can promise no - // message with a tag strictly less than current_tag + delay. - tag_t current_message_intended_tag = lf_delay_strict(env->current_tag, additional_delay); - - LF_PRINT_LOG("Sending port " - "absent for tag " PRINTF_TAG " for port %d to federate %d.", - current_message_intended_tag.time - start_time, - current_message_intended_tag.microstep, - port_ID, fed_ID); - - buffer[0] = MSG_TYPE_PORT_ABSENT; - encode_uint16(port_ID, &(buffer[1])); - encode_uint16(fed_ID, &(buffer[1+sizeof(port_ID)])); - encode_tag(&(buffer[1+sizeof(port_ID)+sizeof(fed_ID)]), current_message_intended_tag); + buffer[0] = MSG_TYPE_PORT_ABSENT; + encode_uint16(port_ID, &(buffer[1])); + encode_uint16(fed_ID, &(buffer[1 + sizeof(port_ID)])); + encode_tag(&(buffer[1 + sizeof(port_ID) + sizeof(fed_ID)]), current_message_intended_tag); #ifdef FEDERATED_CENTRALIZED - // Send the absent message through the RTI - int* socket = &_fed.socket_TCP_RTI; + // Send the absent message through the RTI + int* socket = &_fed.socket_TCP_RTI; #else - // Send the absent message directly to the federate - int* socket = &_fed.sockets_for_outbound_p2p_connections[fed_ID]; + // Send the absent message directly to the federate + int* socket = &_fed.sockets_for_outbound_p2p_connections[fed_ID]; #endif - if (socket == &_fed.socket_TCP_RTI) { - tracepoint_federate_to_rti( - _fed.trace, send_PORT_ABS, _lf_my_fed_id, ¤t_message_intended_tag); - } else { - tracepoint_federate_to_federate( - _fed.trace, send_PORT_ABS, _lf_my_fed_id, fed_ID, ¤t_message_intended_tag); - } + if (socket == &_fed.socket_TCP_RTI) { + tracepoint_federate_to_rti(send_PORT_ABS, _lf_my_fed_id, ¤t_message_intended_tag); + } else { + tracepoint_federate_to_federate(send_PORT_ABS, _lf_my_fed_id, fed_ID, ¤t_message_intended_tag); + } - LF_MUTEX_LOCK(&lf_outbound_socket_mutex); - int result = write_to_socket_close_on_error(socket, message_length, buffer); - LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); + LF_MUTEX_LOCK(&lf_outbound_socket_mutex); + int result = write_to_socket_close_on_error(socket, message_length, buffer); + LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); - if (result != 0) { - // Write failed. Response depends on whether coordination is centralized. - if (socket == &_fed.socket_TCP_RTI) { - // Centralized coordination. This is a critical error. - lf_print_error_system_failure("Failed to send port absent message for port %hu to federate %hu.", - port_ID, fed_ID); - } else { - // Decentralized coordination. This is not a critical error. - lf_print_warning("Failed to send port absent message for port %hu to federate %hu.", - port_ID, fed_ID); - } + if (result != 0) { + // Write failed. Response depends on whether coordination is centralized. + if (socket == &_fed.socket_TCP_RTI) { + // Centralized coordination. This is a critical error. + lf_print_error_system_failure("Failed to send port absent message for port %hu to federate %hu.", port_ID, + fed_ID); + } else { + // Decentralized coordination. This is not a critical error. + lf_print_warning("Failed to send port absent message for port %hu to federate %hu.", port_ID, fed_ID); } + } } int lf_send_stop_request_to_rti(tag_t stop_tag) { - // Send a stop request with the specified tag to the RTI - unsigned char buffer[MSG_TYPE_STOP_REQUEST_LENGTH]; - // Stop at the next microstep - stop_tag.microstep++; - ENCODE_STOP_REQUEST(buffer, stop_tag.time, stop_tag.microstep); + // Send a stop request with the specified tag to the RTI + unsigned char buffer[MSG_TYPE_STOP_REQUEST_LENGTH]; + // Stop at the next microstep + stop_tag.microstep++; + ENCODE_STOP_REQUEST(buffer, stop_tag.time, stop_tag.microstep); - LF_MUTEX_LOCK(&lf_outbound_socket_mutex); - // Do not send a stop request if a stop request has been previously received from the RTI. - if (!_fed.received_stop_request_from_rti) { - LF_PRINT_LOG("Sending to RTI a MSG_TYPE_STOP_REQUEST message with tag " PRINTF_TAG ".", - stop_tag.time - start_time, - stop_tag.microstep); - - if (_fed.socket_TCP_RTI < 0) { - lf_print_warning("Socket is no longer connected. Dropping message."); - LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); - return -1; - } - // Trace the event when tracing is enabled - tracepoint_federate_to_rti(_fed.trace, send_STOP_REQ, _lf_my_fed_id, &stop_tag); - - write_to_socket_fail_on_error(&_fed.socket_TCP_RTI, MSG_TYPE_STOP_REQUEST_LENGTH, - buffer, &lf_outbound_socket_mutex, - "Failed to send stop time " PRINTF_TIME " to the RTI.", stop_tag.time - start_time); + LF_MUTEX_LOCK(&lf_outbound_socket_mutex); + // Do not send a stop request if a stop request has been previously received from the RTI. + if (!_fed.received_stop_request_from_rti) { + LF_PRINT_LOG("Sending to RTI a MSG_TYPE_STOP_REQUEST message with tag " PRINTF_TAG ".", stop_tag.time - start_time, + stop_tag.microstep); - // Treat this sending as equivalent to having received a stop request from the RTI. - _fed.received_stop_request_from_rti = true; - LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); - return 0; - } else { - LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); - return 1; + if (_fed.socket_TCP_RTI < 0) { + lf_print_warning("Socket is no longer connected. Dropping message."); + LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); + return -1; } -} + // Trace the event when tracing is enabled + tracepoint_federate_to_rti(send_STOP_REQ, _lf_my_fed_id, &stop_tag); -int lf_send_tagged_message(environment_t* env, - interval_t additional_delay, - int message_type, - unsigned short port, - unsigned short federate, - const char* next_destination_str, - size_t length, - unsigned char* message) { - assert(env != GLOBAL_ENVIRONMENT); - - size_t header_length = 1 + sizeof(uint16_t) + sizeof(uint16_t) - + sizeof(int32_t) + sizeof(instant_t) + sizeof(microstep_t); - unsigned char header_buffer[header_length]; - - if (message_type != MSG_TYPE_TAGGED_MESSAGE && message_type != MSG_TYPE_P2P_TAGGED_MESSAGE) { - lf_print_error("lf_send_message: Unsupported message type (%d).", message_type); - return -1; - } + write_to_socket_fail_on_error(&_fed.socket_TCP_RTI, MSG_TYPE_STOP_REQUEST_LENGTH, buffer, &lf_outbound_socket_mutex, + "Failed to send stop time " PRINTF_TIME " to the RTI.", stop_tag.time - start_time); - size_t buffer_head = 0; - // First byte is the message type. - header_buffer[buffer_head] = (unsigned char)message_type; - buffer_head += sizeof(unsigned char); - // Next two bytes identify the destination port. - // NOTE: Send messages little endian, not big endian. - encode_uint16(port, &(header_buffer[buffer_head])); - buffer_head += sizeof(uint16_t); - - // Next two bytes identify the destination federate. - encode_uint16(federate, &(header_buffer[buffer_head])); - buffer_head += sizeof(uint16_t); - - // The next four bytes are the message length. - encode_int32((int32_t)length, &(header_buffer[buffer_head])); - buffer_head += sizeof(int32_t); - - // Apply the additional delay to the current tag and use that as the intended - // tag of the outgoing message. - tag_t current_message_intended_tag = lf_delay_tag(env->current_tag, additional_delay); - - if (lf_is_tag_after_stop_tag(env, current_message_intended_tag)) { - // Message tag is past the timeout time (the stop time) so it should not be sent. - LF_PRINT_LOG("Dropping message because it will be after the timeout time."); - return -1; - } + // Treat this sending as equivalent to having received a stop request from the RTI. + _fed.received_stop_request_from_rti = true; + LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); + return 0; + } else { + LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); + return 1; + } +} - // Next 8 + 4 will be the tag (timestamp, microstep) - encode_tag( - &(header_buffer[buffer_head]), - current_message_intended_tag - ); +int lf_send_tagged_message(environment_t* env, interval_t additional_delay, int message_type, unsigned short port, + unsigned short federate, const char* next_destination_str, size_t length, + unsigned char* message) { + assert(env != GLOBAL_ENVIRONMENT); - LF_PRINT_LOG("Sending message with tag " PRINTF_TAG " to %s.", - current_message_intended_tag.time - start_time, - current_message_intended_tag.microstep, - next_destination_str); + size_t header_length = + 1 + sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t) + sizeof(instant_t) + sizeof(microstep_t); + unsigned char header_buffer[header_length]; - // Use a mutex lock to prevent multiple threads from simultaneously sending. - LF_MUTEX_LOCK(&lf_outbound_socket_mutex); - - int* socket; + if (message_type != MSG_TYPE_TAGGED_MESSAGE && message_type != MSG_TYPE_P2P_TAGGED_MESSAGE) { + lf_print_error("lf_send_message: Unsupported message type (%d).", message_type); + return -1; + } + + size_t buffer_head = 0; + // First byte is the message type. + header_buffer[buffer_head] = (unsigned char)message_type; + buffer_head += sizeof(unsigned char); + // Next two bytes identify the destination port. + // NOTE: Send messages little endian, not big endian. + encode_uint16(port, &(header_buffer[buffer_head])); + buffer_head += sizeof(uint16_t); + + // Next two bytes identify the destination federate. + encode_uint16(federate, &(header_buffer[buffer_head])); + buffer_head += sizeof(uint16_t); + + // The next four bytes are the message length. + encode_uint32((uint32_t)length, &(header_buffer[buffer_head])); + buffer_head += sizeof(uint32_t); + + // Apply the additional delay to the current tag and use that as the intended + // tag of the outgoing message. + tag_t current_message_intended_tag = lf_delay_tag(env->current_tag, additional_delay); + + if (lf_is_tag_after_stop_tag(env, current_message_intended_tag)) { + // Message tag is past the timeout time (the stop time) so it should not be sent. + LF_PRINT_LOG("Dropping message because it will be after the timeout time."); + return -1; + } + + // Next 8 + 4 will be the tag (timestamp, microstep) + encode_tag(&(header_buffer[buffer_head]), current_message_intended_tag); + + LF_PRINT_LOG("Sending message with tag " PRINTF_TAG " to %s.", current_message_intended_tag.time - start_time, + current_message_intended_tag.microstep, next_destination_str); + + // Use a mutex lock to prevent multiple threads from simultaneously sending. + LF_MUTEX_LOCK(&lf_outbound_socket_mutex); + + int* socket; + if (message_type == MSG_TYPE_P2P_TAGGED_MESSAGE) { + socket = &_fed.sockets_for_outbound_p2p_connections[federate]; + tracepoint_federate_to_federate(_fed.trace, send_P2P_TAGGED_MSG, _lf_my_fed_id, federate, + ¤t_message_intended_tag); + } else { + socket = &_fed.socket_TCP_RTI; + tracepoint_federate_to_rti(_fed.trace, send_TAGGED_MSG, _lf_my_fed_id, ¤t_message_intended_tag); + } + + if (lf_tag_compare(_fed.last_DNET, current_message_intended_tag) > 0) { + _fed.last_DNET = current_message_intended_tag; + } + + int result = write_to_socket_close_on_error(socket, header_length, header_buffer); + if (result == 0) { + // Header sent successfully. Send the body. + result = write_to_socket_close_on_error(socket, length, message); + } + if (result != 0) { + // Message did not send. Handling depends on message type. if (message_type == MSG_TYPE_P2P_TAGGED_MESSAGE) { - socket = &_fed.sockets_for_outbound_p2p_connections[federate]; - tracepoint_federate_to_federate(_fed.trace, send_P2P_TAGGED_MSG, _lf_my_fed_id, federate, ¤t_message_intended_tag); + lf_print_warning("Failed to send message to %s. Dropping the message.", next_destination_str); } else { - socket = &_fed.socket_TCP_RTI; - tracepoint_federate_to_rti(_fed.trace, send_TAGGED_MSG, _lf_my_fed_id, ¤t_message_intended_tag); - } - - if (lf_tag_compare(_fed.last_DNET, current_message_intended_tag) > 0) { - _fed.last_DNET = current_message_intended_tag; - } - - int result = write_to_socket_close_on_error(socket, header_length, header_buffer); - if (result == 0) { - // Header sent successfully. Send the body. - result = write_to_socket_close_on_error(socket, length, message); + lf_print_error_system_failure("Failed to send message to %s with error code %d (%s). Connection lost to the RTI.", + next_destination_str, errno, strerror(errno)); } - if (result != 0) { - // Message did not send. Handling depends on message type. - if (message_type == MSG_TYPE_P2P_TAGGED_MESSAGE) { - lf_print_warning("Failed to send message to %s. Dropping the message.", next_destination_str); - } else { - lf_print_error_system_failure("Failed to send message to %s. Connection lost to the RTI.", - next_destination_str); - } - } - LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); - return result; -} - -void lf_set_federation_id(const char* fid) { - federation_metadata.federation_id = fid; + } + LF_MUTEX_UNLOCK(&lf_outbound_socket_mutex); + return result; } -void lf_set_federation_trace_object(trace_t * trace) { - _fed.trace = trace; -} +void lf_set_federation_id(const char* fid) { federation_metadata.federation_id = fid; } #ifdef FEDERATED_DECENTRALIZED -void lf_spawn_staa_thread(){ - lf_thread_create(&_fed.staaSetter, update_ports_from_staa_offsets, NULL); -} +void lf_spawn_staa_thread() { lf_thread_create(&_fed.staaSetter, update_ports_from_staa_offsets, NULL); } #endif // FEDERATED_DECENTRALIZED void lf_stall_advance_level_federation(environment_t* env, size_t level) { - LF_PRINT_DEBUG("Acquiring the environment mutex."); - LF_MUTEX_LOCK(&env->mutex); - LF_PRINT_DEBUG("Waiting on MLAA with next_reaction_level %zu and MLAA %d.", level, max_level_allowed_to_advance); - while (((int) level) >= max_level_allowed_to_advance) { - lf_cond_wait(&lf_port_status_changed); - }; - LF_PRINT_DEBUG("Exiting wait with MLAA %d and next_reaction_level %zu.", max_level_allowed_to_advance, level); - LF_MUTEX_UNLOCK(&env->mutex); + LF_PRINT_DEBUG("Acquiring the environment mutex."); + LF_MUTEX_LOCK(&env->mutex); + LF_PRINT_DEBUG("Waiting on MLAA with next_reaction_level %zu and MLAA %d.", level, max_level_allowed_to_advance); + while (((int)level) >= max_level_allowed_to_advance) { + lf_cond_wait(&lf_port_status_changed); + }; + LF_PRINT_DEBUG("Exiting wait with MLAA %d and next_reaction_level %zu.", max_level_allowed_to_advance, level); + LF_MUTEX_UNLOCK(&env->mutex); } void lf_synchronize_with_other_federates(void) { - LF_PRINT_DEBUG("Synchronizing with other federates."); - - // Reset the start time to the coordinated start time for all federates. - // Note that this does not grant execution to this federate. - start_time = get_start_time_from_rti(lf_time_physical()); - - // Start a thread to listen for incoming TCP messages from the RTI. - // @note Up until this point, the federate has been listening for messages - // from the RTI in a sequential manner in the main thread. From now on, a - // separate thread is created to allow for asynchronous communication. - lf_thread_create(&_fed.RTI_socket_listener, listen_to_rti_TCP, NULL); - lf_thread_t thread_id; - if (create_clock_sync_thread(&thread_id)) { - lf_print_warning("Failed to create thread to handle clock synchronization."); - } + LF_PRINT_DEBUG("Synchronizing with other federates."); + + // Reset the start time to the coordinated start time for all federates. + // Note that this does not grant execution to this federate. + start_time = get_start_time_from_rti(lf_time_physical()); + lf_tracing_set_start_time(start_time); + + // Start a thread to listen for incoming TCP messages from the RTI. + // @note Up until this point, the federate has been listening for messages + // from the RTI in a sequential manner in the main thread. From now on, a + // separate thread is created to allow for asynchronous communication. + lf_thread_create(&_fed.RTI_socket_listener, listen_to_rti_TCP, NULL); + lf_thread_t thread_id; + if (create_clock_sync_thread(&thread_id)) { + lf_print_warning("Failed to create thread to handle clock synchronization."); + } } bool lf_update_max_level(tag_t tag, bool is_provisional) { - // This always needs the top-level environment, which will be env[0]. - environment_t *env; - _lf_get_environments(&env); - int prev_max_level_allowed_to_advance = max_level_allowed_to_advance; - max_level_allowed_to_advance = INT_MAX; + // This always needs the top-level environment, which will be env[0]. + environment_t* env; + _lf_get_environments(&env); + int prev_max_level_allowed_to_advance = max_level_allowed_to_advance; + max_level_allowed_to_advance = INT_MAX; #ifdef FEDERATED_DECENTRALIZED - size_t action_table_size = _lf_action_table_size; - lf_action_base_t** action_table = _lf_action_table; + size_t action_table_size = _lf_action_table_size; + lf_action_base_t** action_table = _lf_action_table; #else - // Note that the following test is never true for decentralized coordination, - // where tag always is NEVER_TAG. - if ((lf_tag_compare(env->current_tag, tag) < 0) || ( - lf_tag_compare(env->current_tag, tag) == 0 && !is_provisional - )) { - LF_PRINT_DEBUG("Updated MLAA to %d at time " PRINTF_TIME ".", - max_level_allowed_to_advance, - lf_time_logical_elapsed(env) - ); - // Safe to complete the current tag - return (prev_max_level_allowed_to_advance != max_level_allowed_to_advance); - } + // Note that the following test is never true for decentralized coordination, + // where tag always is NEVER_TAG. + if ((lf_tag_compare(env->current_tag, tag) < 0) || (lf_tag_compare(env->current_tag, tag) == 0 && !is_provisional)) { + LF_PRINT_DEBUG("Updated MLAA to %d at time " PRINTF_TIME ".", max_level_allowed_to_advance, + lf_time_logical_elapsed(env)); + // Safe to complete the current tag + return (prev_max_level_allowed_to_advance != max_level_allowed_to_advance); + } - size_t action_table_size = _lf_zero_delay_cycle_action_table_size; - lf_action_base_t** action_table = _lf_zero_delay_cycle_action_table; + size_t action_table_size = _lf_zero_delay_cycle_action_table_size; + lf_action_base_t** action_table = _lf_zero_delay_cycle_action_table; #endif // FEDERATED_DECENTRALIZED - for (int i = 0; i < action_table_size; i++) { - lf_action_base_t* input_port_action = action_table[i]; + for (int i = 0; i < action_table_size; i++) { + lf_action_base_t* input_port_action = action_table[i]; #ifdef FEDERATED_DECENTRALIZED - // In decentralized execution, if the current_tag is close enough to the - // start tag and there is a large enough delay on an incoming - // connection, then there is no need to block progress waiting for this - // port status. This is irrelevant for centralized because blocking only - // occurs on zero-delay cycles. - if ( - (_lf_action_delay_table[i] == 0 && env->current_tag.time == start_time && env->current_tag.microstep == 0) - || (_lf_action_delay_table[i] > 0 && lf_tag_compare( - env->current_tag, - lf_delay_strict((tag_t) {.time=start_time, .microstep=0}, _lf_action_delay_table[i]) - ) <= 0) - ) { - continue; - } -#endif // FEDERATED_DECENTRALIZED - // If the current tag is greater than the last known status tag of the input port, - // and the input port is not physical, then block on that port by ensuring - // the MLAA is no greater than the level of that port. - // For centralized coordination, this is applied only to input ports coming from - // federates that are in a ZDC. For decentralized coordination, this is applied - // to all input ports. - if (lf_tag_compare(env->current_tag, - input_port_action->trigger->last_known_status_tag) > 0 - && !input_port_action->trigger->is_physical) { - max_level_allowed_to_advance = LF_MIN( - max_level_allowed_to_advance, - ((int) LF_LEVEL(input_port_action->trigger->reactions[0]->index)) - ); - } + // In decentralized execution, if the current_tag is close enough to the + // start tag and there is a large enough delay on an incoming + // connection, then there is no need to block progress waiting for this + // port status. This is irrelevant for centralized because blocking only + // occurs on zero-delay cycles. + if ((_lf_action_delay_table[i] == 0 && env->current_tag.time == start_time && env->current_tag.microstep == 0) || + (_lf_action_delay_table[i] > 0 && + lf_tag_compare(env->current_tag, lf_delay_strict((tag_t){.time = start_time, .microstep = 0}, + _lf_action_delay_table[i])) <= 0)) { + continue; } - LF_PRINT_DEBUG("Updated MLAA to %d at time " PRINTF_TIME ".", - max_level_allowed_to_advance, - lf_time_logical_elapsed(env) - ); - return (prev_max_level_allowed_to_advance != max_level_allowed_to_advance); +#endif // FEDERATED_DECENTRALIZED + // If the current tag is greater than the last known status tag of the input port, + // and the input port is not physical, then block on that port by ensuring + // the MLAA is no greater than the level of that port. + // For centralized coordination, this is applied only to input ports coming from + // federates that are in a ZDC. For decentralized coordination, this is applied + // to all input ports. + if (lf_tag_compare(env->current_tag, input_port_action->trigger->last_known_status_tag) > 0 && + !input_port_action->trigger->is_physical) { + max_level_allowed_to_advance = + LF_MIN(max_level_allowed_to_advance, ((int)LF_LEVEL(input_port_action->trigger->reactions[0]->index))); + } + } + LF_PRINT_DEBUG("Updated MLAA to %d at time " PRINTF_TIME ".", max_level_allowed_to_advance, + lf_time_logical_elapsed(env)); + return (prev_max_level_allowed_to_advance != max_level_allowed_to_advance); } #endif diff --git a/core/federated/network/net_util.c b/core/federated/network/net_util.c index 813e5cd07..c6b3b57a6 100644 --- a/core/federated/network/net_util.c +++ b/core/federated/network/net_util.c @@ -33,21 +33,21 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include #include -#include // For sqrtl() and powl -#include // Defines va_list +#include // For sqrtl() and powl +#include // Defines va_list #include #include -#include // Defines memcpy() -#include // Defines nanosleep() -#include // IPPROTO_TCP, IPPROTO_UDP -#include // TCP_NODELAY +#include // Defines memcpy() +#include // Defines nanosleep() +#include // IPPROTO_TCP, IPPROTO_UDP +#include // TCP_NODELAY #include "net_util.h" #include "util.h" // Define socket functions only for federated execution. #ifdef FEDERATED -#include // Defines read(), write(), and close() +#include // Defines read(), write(), and close() #ifndef NUMBER_OF_FEDERATES #define NUMBER_OF_FEDERATES 1 @@ -61,167 +61,160 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. lf_mutex_t socket_mutex; int create_real_time_tcp_socket_errexit() { - int sock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); - if (sock < 0) { - lf_print_error_system_failure("Could not open TCP socket."); - } - // Disable Nagle's algorithm which bundles together small TCP messages to - // reduce network traffic. - // TODO: Re-consider if we should do this, and whether disabling delayed ACKs - // is enough. - int flag = 1; - int result = setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, &flag, sizeof(int)); - - if (result < 0) { - lf_print_error_system_failure("Failed to disable Nagle algorithm on socket server."); - } - + int sock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); + if (sock < 0) { + lf_print_error_system_failure("Could not open TCP socket."); + } + // Disable Nagle's algorithm which bundles together small TCP messages to + // reduce network traffic. + // TODO: Re-consider if we should do this, and whether disabling delayed ACKs + // is enough. + int flag = 1; + int result = setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, &flag, sizeof(int)); + + if (result < 0) { + lf_print_error_system_failure("Failed to disable Nagle algorithm on socket server."); + } + #if defined(PLATFORM_Linux) - // Disable delayed ACKs. Only possible on Linux - result = setsockopt(sock, IPPROTO_TCP, TCP_QUICKACK, &flag, sizeof(int)); - - if (result < 0) { - lf_print_error_system_failure("Failed to disable Nagle algorithm on socket server."); - } + // Disable delayed ACKs. Only possible on Linux + result = setsockopt(sock, IPPROTO_TCP, TCP_QUICKACK, &flag, sizeof(int)); + + if (result < 0) { + lf_print_error_system_failure("Failed to disable Nagle algorithm on socket server."); + } #endif // Linux - - return sock; + + return sock; } int read_from_socket(int socket, size_t num_bytes, unsigned char* buffer) { - if (socket < 0) { - // Socket is not open. - errno = EBADF; - return -1; - } - ssize_t bytes_read = 0; - int retry_count = 0; - while (bytes_read < (ssize_t)num_bytes) { - ssize_t more = read(socket, buffer + bytes_read, num_bytes - (size_t)bytes_read); - if(more < 0 && retry_count++ < NUM_SOCKET_RETRIES - && (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)) { - // Those error codes set by the socket indicates - // that we should try again (@see man errno). - lf_print_warning("Reading from socket failed. Will try again."); - lf_sleep(DELAY_BETWEEN_SOCKET_RETRIES); - continue; - } else if (more < 0) { - // A more serious error occurred. - return -1; - } else if (more == 0) { - // EOF received. - return 1; - } - bytes_read += more; + if (socket < 0) { + // Socket is not open. + errno = EBADF; + return -1; + } + ssize_t bytes_read = 0; + int retry_count = 0; + while (bytes_read < (ssize_t)num_bytes) { + ssize_t more = read(socket, buffer + bytes_read, num_bytes - (size_t)bytes_read); + if (more < 0 && retry_count++ < NUM_SOCKET_RETRIES && (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)) { + // Those error codes set by the socket indicates + // that we should try again (@see man errno). + lf_print_warning("Reading from socket failed. Will try again."); + lf_sleep(DELAY_BETWEEN_SOCKET_RETRIES); + continue; + } else if (more < 0) { + // A more serious error occurred. + return -1; + } else if (more == 0) { + // EOF received. + return 1; } - return 0; + bytes_read += more; + } + return 0; } int read_from_socket_close_on_error(int* socket, size_t num_bytes, unsigned char* buffer) { - assert(socket); - int read_failed = read_from_socket(*socket, num_bytes, buffer); - if (read_failed) { - // Read failed. - // Socket has probably been closed from the other side. - // Shut down and close the socket from this side. - shutdown(*socket, SHUT_RDWR); - close(*socket); - // Mark the socket closed. - *socket = -1; - return -1; + assert(socket); + int read_failed = read_from_socket(*socket, num_bytes, buffer); + if (read_failed) { + // Read failed. + // Socket has probably been closed from the other side. + // Shut down and close the socket from this side. + shutdown(*socket, SHUT_RDWR); + close(*socket); + // Mark the socket closed. + *socket = -1; + return -1; + } + return 0; +} + +void read_from_socket_fail_on_error(int* socket, size_t num_bytes, unsigned char* buffer, lf_mutex_t* mutex, + char* format, ...) { + va_list args; + assert(socket); + int read_failed = read_from_socket_close_on_error(socket, num_bytes, buffer); + if (read_failed) { + // Read failed. + if (mutex != NULL) { + LF_MUTEX_UNLOCK(mutex); } - return 0; -} - -void read_from_socket_fail_on_error( - int* socket, - size_t num_bytes, - unsigned char* buffer, - lf_mutex_t* mutex, - char* format, ...) { - va_list args; - assert(socket); - int read_failed = read_from_socket_close_on_error(socket, num_bytes, buffer); - if (read_failed) { - // Read failed. - if (mutex != NULL) { - LF_MUTEX_UNLOCK(mutex); - } - if (format != NULL) { - lf_print_error_system_failure(format, args); - } else { - lf_print_error_system_failure("Failed to read from socket."); - } + if (format != NULL) { + lf_print_error_system_failure(format, args); + } else { + lf_print_error_system_failure("Failed to read from socket."); } + } } ssize_t peek_from_socket(int socket, unsigned char* result) { - ssize_t bytes_read = recv(socket, result, 1, MSG_DONTWAIT | MSG_PEEK); - if (bytes_read < 0 && (errno == EAGAIN || errno == EWOULDBLOCK)) return 0; - else return bytes_read; + ssize_t bytes_read = recv(socket, result, 1, MSG_DONTWAIT | MSG_PEEK); + if (bytes_read < 0 && (errno == EAGAIN || errno == EWOULDBLOCK)) + return 0; + else + return bytes_read; } int write_to_socket(int socket, size_t num_bytes, unsigned char* buffer) { - if (socket < 0) { - // Socket is not open. - errno = EBADF; - return -1; - } - ssize_t bytes_written = 0; - va_list args; - while (bytes_written < (ssize_t)num_bytes) { - ssize_t more = write(socket, buffer + bytes_written, num_bytes - (size_t)bytes_written); - if (more <= 0 && (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)) { - // The error codes EAGAIN or EWOULDBLOCK indicate - // that we should try again (@see man errno). - // The error code EINTR means the system call was interrupted before completing. - LF_PRINT_DEBUG("Writing to socket was blocked. Will try again."); - lf_sleep(DELAY_BETWEEN_SOCKET_RETRIES); - continue; - } else if (more < 0) { - // A more serious error occurred. - return -1; - } - bytes_written += more; + if (socket < 0) { + // Socket is not open. + errno = EBADF; + return -1; + } + ssize_t bytes_written = 0; + va_list args; + while (bytes_written < (ssize_t)num_bytes) { + ssize_t more = write(socket, buffer + bytes_written, num_bytes - (size_t)bytes_written); + if (more <= 0 && (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)) { + // The error codes EAGAIN or EWOULDBLOCK indicate + // that we should try again (@see man errno). + // The error code EINTR means the system call was interrupted before completing. + LF_PRINT_DEBUG("Writing to socket was blocked. Will try again."); + lf_sleep(DELAY_BETWEEN_SOCKET_RETRIES); + continue; + } else if (more < 0) { + // A more serious error occurred. + return -1; } - return 0; + bytes_written += more; + } + return 0; } int write_to_socket_close_on_error(int* socket, size_t num_bytes, unsigned char* buffer) { - assert(socket); - int result = write_to_socket(*socket, num_bytes, buffer); - if (result) { - // Write failed. - // Socket has probably been closed from the other side. - // Shut down and close the socket from this side. - shutdown(*socket, SHUT_RDWR); - close(*socket); - // Mark the socket closed. - *socket = -1; + assert(socket); + int result = write_to_socket(*socket, num_bytes, buffer); + if (result) { + // Write failed. + // Socket has probably been closed from the other side. + // Shut down and close the socket from this side. + shutdown(*socket, SHUT_RDWR); + close(*socket); + // Mark the socket closed. + *socket = -1; + } + return result; +} + +void write_to_socket_fail_on_error(int* socket, size_t num_bytes, unsigned char* buffer, lf_mutex_t* mutex, + char* format, ...) { + va_list args; + assert(socket); + int result = write_to_socket_close_on_error(socket, num_bytes, buffer); + if (result) { + // Write failed. + if (mutex != NULL) { + LF_MUTEX_UNLOCK(mutex); } - return result; -} - -void write_to_socket_fail_on_error( - int* socket, - size_t num_bytes, - unsigned char* buffer, - lf_mutex_t* mutex, - char* format, ...) { - va_list args; - assert(socket); - int result = write_to_socket_close_on_error(socket, num_bytes, buffer); - if (result) { - // Write failed. - if (mutex != NULL) { - LF_MUTEX_UNLOCK(mutex); - } - if (format != NULL) { - lf_print_error_system_failure(format, args); - } else { - lf_print_error("Failed to write to socket. Closing it."); - } + if (format != NULL) { + lf_print_error_system_failure(format, args); + } else { + lf_print_error("Failed to write to socket. Closing it."); } + } } #endif // FEDERATED @@ -229,335 +222,338 @@ void write_to_socket_fail_on_error( // Below are more generally useful functions. void encode_int64(int64_t data, unsigned char* buffer) { - // This strategy is fairly brute force, but it avoids potential - // alignment problems. - int shift = 0; - for(size_t i = 0; i < sizeof(int64_t); i++) { - buffer[i] = (unsigned char)((data & (0xffLL << shift)) >> shift); - shift += 8; - } + // This strategy is fairly brute force, but it avoids potential + // alignment problems. + int shift = 0; + for (size_t i = 0; i < sizeof(int64_t); i++) { + buffer[i] = (unsigned char)((data & (0xffLL << shift)) >> shift); + shift += 8; + } } void encode_int32(int32_t data, unsigned char* buffer) { - // This strategy is fairly brute force, but it avoids potential - // alignment problems. Note that this assumes an int32_t is four bytes. - buffer[0] = (unsigned char)(data & 0xff); - buffer[1] = (unsigned char)((data & 0xff00) >> 8); - buffer[2] = (unsigned char)((data & 0xff0000) >> 16); - buffer[3] = (unsigned char)((data & (int32_t)0xff000000) >> 24); + // This strategy is fairly brute force, but it avoids potential + // alignment problems. Note that this assumes an int32_t is four bytes. + buffer[0] = (unsigned char)(data & 0xff); + buffer[1] = (unsigned char)((data & 0xff00) >> 8); + buffer[2] = (unsigned char)((data & 0xff0000) >> 16); + buffer[3] = (unsigned char)((data & (int32_t)0xff000000) >> 24); } void encode_uint32(uint32_t data, unsigned char* buffer) { - // This strategy is fairly brute force, but it avoids potential - // alignment problems. Note that this assumes a uint32_t is four bytes. - buffer[0] = (unsigned char)(data & 0xff); - buffer[1] = (unsigned char)((data & 0xff00) >> 8); - buffer[2] = (unsigned char)((data & 0xff0000) >> 16); - buffer[3] = (unsigned char)((data & (uint32_t)0xff000000) >> 24); + // This strategy is fairly brute force, but it avoids potential + // alignment problems. Note that this assumes a uint32_t is four bytes. + buffer[0] = (unsigned char)(data & 0xff); + buffer[1] = (unsigned char)((data & 0xff00) >> 8); + buffer[2] = (unsigned char)((data & 0xff0000) >> 16); + buffer[3] = (unsigned char)((data & (uint32_t)0xff000000) >> 24); } void encode_uint16(uint16_t data, unsigned char* buffer) { - // This strategy is fairly brute force, but it avoids potential - // alignment problems. Note that this assumes a short is two bytes. - buffer[0] = (unsigned char)(data & 0xff); - buffer[1] = (unsigned char)((data & 0xff00) >> 8); + // This strategy is fairly brute force, but it avoids potential + // alignment problems. Note that this assumes a short is two bytes. + buffer[0] = (unsigned char)(data & 0xff); + buffer[1] = (unsigned char)((data & 0xff00) >> 8); } int host_is_big_endian() { - static int host = 0; - union { - uint32_t uint; - unsigned char c[sizeof(uint32_t)]; - } x; - if (host == 0) { - // Determine the endianness of the host by setting the low-order bit. - x.uint = 0x01; - host = (x.c[3] == 0x01) ? HOST_BIG_ENDIAN : HOST_LITTLE_ENDIAN; - } - return (host == HOST_BIG_ENDIAN); + static int host = 0; + union { + uint32_t uint; + unsigned char c[sizeof(uint32_t)]; + } x; + if (host == 0) { + // Determine the endianness of the host by setting the low-order bit. + x.uint = 0x01; + host = (x.c[3] == 0x01) ? HOST_BIG_ENDIAN : HOST_LITTLE_ENDIAN; + } + return (host == HOST_BIG_ENDIAN); } int32_t swap_bytes_if_big_endian_int32(int32_t src) { - union { - int32_t uint; - unsigned char c[sizeof(int32_t)]; - } x; - if (!host_is_big_endian()) return src; - // printf("DEBUG: Host is little endian.\n"); - x.uint = src; - // printf("DEBUG: Before swapping bytes: %lld.\n", x.ull); - unsigned char c; - // Swap bytes. - c = x.c[0]; x.c[0] = x.c[3]; x.c[3] = c; - c = x.c[1]; x.c[1] = x.c[2]; x.c[2] = c; - // printf("DEBUG: After swapping bytes: %lld.\n", x.ull); - return x.uint; + union { + int32_t uint; + unsigned char c[sizeof(int32_t)]; + } x; + if (!host_is_big_endian()) + return src; + // printf("DEBUG: Host is little endian.\n"); + x.uint = src; + // printf("DEBUG: Before swapping bytes: %lld.\n", x.ull); + unsigned char c; + // Swap bytes. + c = x.c[0]; + x.c[0] = x.c[3]; + x.c[3] = c; + c = x.c[1]; + x.c[1] = x.c[2]; + x.c[2] = c; + // printf("DEBUG: After swapping bytes: %lld.\n", x.ull); + return x.uint; } uint32_t swap_bytes_if_big_endian_uint32(uint32_t src) { - union { - uint32_t uint; - unsigned char c[sizeof(uint32_t)]; - } x; - if (!host_is_big_endian()) return src; - // printf("DEBUG: Host is little endian.\n"); - x.uint = src; - // printf("DEBUG: Before swapping bytes: %lld.\n", x.ull); - unsigned char c; - // Swap bytes. - c = x.c[0]; x.c[0] = x.c[3]; x.c[3] = c; - c = x.c[1]; x.c[1] = x.c[2]; x.c[2] = c; - // printf("DEBUG: After swapping bytes: %lld.\n", x.ull); - return x.uint; + union { + uint32_t uint; + unsigned char c[sizeof(uint32_t)]; + } x; + if (!host_is_big_endian()) + return src; + // printf("DEBUG: Host is little endian.\n"); + x.uint = src; + // printf("DEBUG: Before swapping bytes: %lld.\n", x.ull); + unsigned char c; + // Swap bytes. + c = x.c[0]; + x.c[0] = x.c[3]; + x.c[3] = c; + c = x.c[1]; + x.c[1] = x.c[2]; + x.c[2] = c; + // printf("DEBUG: After swapping bytes: %lld.\n", x.ull); + return x.uint; } int64_t swap_bytes_if_big_endian_int64(int64_t src) { - union { - int64_t ull; - unsigned char c[sizeof(int64_t)]; - } x; - if (!host_is_big_endian()) return src; - // printf("DEBUG: Host is little endian.\n"); - x.ull = src; - // printf("DEBUG: Before swapping bytes: %lld.\n", x.ull); - unsigned char c; - // Swap bytes. - c = x.c[0]; x.c[0] = x.c[7]; x.c[7] = c; - c = x.c[1]; x.c[1] = x.c[6]; x.c[6] = c; - c = x.c[2]; x.c[2] = x.c[5]; x.c[5] = c; - c = x.c[3]; x.c[3] = x.c[4]; x.c[4] = c; - // printf("DEBUG: After swapping bytes: %lld.\n", x.ull); - return x.ull; + union { + int64_t ull; + unsigned char c[sizeof(int64_t)]; + } x; + if (!host_is_big_endian()) + return src; + // printf("DEBUG: Host is little endian.\n"); + x.ull = src; + // printf("DEBUG: Before swapping bytes: %lld.\n", x.ull); + unsigned char c; + // Swap bytes. + c = x.c[0]; + x.c[0] = x.c[7]; + x.c[7] = c; + c = x.c[1]; + x.c[1] = x.c[6]; + x.c[6] = c; + c = x.c[2]; + x.c[2] = x.c[5]; + x.c[5] = c; + c = x.c[3]; + x.c[3] = x.c[4]; + x.c[4] = c; + // printf("DEBUG: After swapping bytes: %lld.\n", x.ull); + return x.ull; } uint16_t swap_bytes_if_big_endian_uint16(uint16_t src) { - union { - uint16_t uint; - unsigned char c[sizeof(uint16_t)]; - } x; - if (!host_is_big_endian()) return src; - // printf("DEBUG: Host is little endian.\n"); - x.uint = src; - // printf("DEBUG: Before swapping bytes: %lld.\n", x.ull); - unsigned char c; - // Swap bytes. - c = x.c[0]; x.c[0] = x.c[1]; x.c[1] = c; - // printf("DEBUG: After swapping bytes: %lld.\n", x.ull); - return x.uint; + union { + uint16_t uint; + unsigned char c[sizeof(uint16_t)]; + } x; + if (!host_is_big_endian()) + return src; + // printf("DEBUG: Host is little endian.\n"); + x.uint = src; + // printf("DEBUG: Before swapping bytes: %lld.\n", x.ull); + unsigned char c; + // Swap bytes. + c = x.c[0]; + x.c[0] = x.c[1]; + x.c[1] = c; + // printf("DEBUG: After swapping bytes: %lld.\n", x.ull); + return x.uint; } int32_t extract_int32(unsigned char* bytes) { - // Use memcpy to prevent possible alignment problems on some processors. - union { - int32_t uint; - unsigned char c[sizeof(int32_t)]; - } result; - memcpy(&result.c, bytes, sizeof(int32_t)); - return swap_bytes_if_big_endian_int32(result.uint); + // Use memcpy to prevent possible alignment problems on some processors. + union { + int32_t uint; + unsigned char c[sizeof(int32_t)]; + } result; + memcpy(&result.c, bytes, sizeof(int32_t)); + return swap_bytes_if_big_endian_int32(result.uint); } uint32_t extract_uint32(unsigned char* bytes) { - // Use memcpy to prevent possible alignment problems on some processors. - union { - uint32_t uint; - unsigned char c[sizeof(uint32_t)]; - } result; - memcpy(&result.c, bytes, sizeof(uint32_t)); - return swap_bytes_if_big_endian_uint32(result.uint); + // Use memcpy to prevent possible alignment problems on some processors. + union { + uint32_t uint; + unsigned char c[sizeof(uint32_t)]; + } result; + memcpy(&result.c, bytes, sizeof(uint32_t)); + return swap_bytes_if_big_endian_uint32(result.uint); } int64_t extract_int64(unsigned char* bytes) { - // Use memcpy to prevent possible alignment problems on some processors. - union { - int64_t ull; - unsigned char c[sizeof(int64_t)]; - } result; - memcpy(&result.c, bytes, sizeof(int64_t)); - return swap_bytes_if_big_endian_int64(result.ull); + // Use memcpy to prevent possible alignment problems on some processors. + union { + int64_t ull; + unsigned char c[sizeof(int64_t)]; + } result; + memcpy(&result.c, bytes, sizeof(int64_t)); + return swap_bytes_if_big_endian_int64(result.ull); } uint16_t extract_uint16(unsigned char* bytes) { - // Use memcpy to prevent possible alignment problems on some processors. - union { - uint16_t ushort; - unsigned char c[sizeof(uint16_t)]; - } result; - memcpy(&result.c, bytes, sizeof(uint16_t)); - return swap_bytes_if_big_endian_uint16(result.ushort); + // Use memcpy to prevent possible alignment problems on some processors. + union { + uint16_t ushort; + unsigned char c[sizeof(uint16_t)]; + } result; + memcpy(&result.c, bytes, sizeof(uint16_t)); + return swap_bytes_if_big_endian_uint16(result.ushort); } #ifdef FEDERATED -void extract_header( - unsigned char* buffer, - uint16_t* port_id, - uint16_t* federate_id, - size_t* length -) { - // The first two bytes are the ID of the destination reactor. - *port_id = extract_uint16(buffer); - - // The next two bytes are the ID of the destination federate. - *federate_id = extract_uint16(&(buffer[sizeof(uint16_t)])); - - // printf("DEBUG: Message for port %d of federate %d.\n", *port_id, *federate_id); - - // The next four bytes are the message length. - int32_t local_length_signed = extract_int32(&(buffer[sizeof(uint16_t) + sizeof(uint16_t)])); - if (local_length_signed < 0) { - lf_print_error_and_exit( - "Received an invalid message length (%d) from federate %d.", - local_length_signed, - *federate_id - ); - } - *length = (size_t)local_length_signed; +void extract_header(unsigned char* buffer, uint16_t* port_id, uint16_t* federate_id, size_t* length) { + // The first two bytes are the ID of the destination reactor. + *port_id = extract_uint16(buffer); + + // The next two bytes are the ID of the destination federate. + *federate_id = extract_uint16(&(buffer[sizeof(uint16_t)])); + + // printf("DEBUG: Message for port %d of federate %d.\n", *port_id, *federate_id); + + // The next four bytes are the message length. + uint32_t local_length_signed = extract_uint32(&(buffer[sizeof(uint16_t) + sizeof(uint16_t)])); + if (local_length_signed < 0) { + lf_print_error_and_exit("Received an invalid message length (%d) from federate %d.", local_length_signed, + *federate_id); + } + *length = (size_t)local_length_signed; - // printf("DEBUG: Federate receiving message to port %d to federate %d of length %d.\n", port_id, federate_id, length); + // printf("DEBUG: Federate receiving message to port %d to federate %d of length %d.\n", port_id, federate_id, + // length); } -void extract_timed_header( - unsigned char* buffer, - uint16_t* port_id, - uint16_t* federate_id, - size_t* length, - tag_t* tag -) { - extract_header(buffer, port_id, federate_id, length); +void extract_timed_header(unsigned char* buffer, uint16_t* port_id, uint16_t* federate_id, size_t* length, tag_t* tag) { + extract_header(buffer, port_id, federate_id, length); - tag_t temporary_tag = extract_tag( - &(buffer[sizeof(uint16_t) + sizeof(uint16_t) + sizeof(int32_t)]) - ); - tag->time = temporary_tag.time; - tag->microstep = temporary_tag.microstep; + tag_t temporary_tag = extract_tag(&(buffer[sizeof(uint16_t) + sizeof(uint16_t) + sizeof(int32_t)])); + tag->time = temporary_tag.time; + tag->microstep = temporary_tag.microstep; } -tag_t extract_tag( - unsigned char* buffer -) { - tag_t tag; - tag.time = extract_int64(buffer); - tag.microstep = extract_uint32(&(buffer[sizeof(int64_t)])); +tag_t extract_tag(unsigned char* buffer) { + tag_t tag; + tag.time = extract_int64(buffer); + tag.microstep = extract_uint32(&(buffer[sizeof(int64_t)])); - return tag; + return tag; } -void encode_tag( - unsigned char* buffer, - tag_t tag -){ - encode_int64(tag.time, buffer); - encode_uint32(tag.microstep, &(buffer[sizeof(int64_t)])); +void encode_tag(unsigned char* buffer, tag_t tag) { + encode_int64(tag.time, buffer); + encode_uint32(tag.microstep, &(buffer[sizeof(int64_t)])); } bool match_regex(const char* str, char* regex) { - regex_t regex_compiled; - regmatch_t group; - bool valid = false; + regex_t regex_compiled; + regmatch_t group; + bool valid = false; - if (regcomp(®ex_compiled, regex, REG_EXTENDED)) { - lf_print_error("Could not compile regex to parse RTI address"); - return valid; - } - - // regexec returns 0 when a match is found. - if (regexec(®ex_compiled, str, 1, &group, 0) == 0) { - valid = true; - } - regfree(®ex_compiled); + if (regcomp(®ex_compiled, regex, REG_EXTENDED)) { + lf_print_error("Could not compile regex to parse RTI address"); return valid; + } + + // regexec returns 0 when a match is found. + if (regexec(®ex_compiled, str, 1, &group, 0) == 0) { + valid = true; + } + regfree(®ex_compiled); + return valid; } bool validate_port(char* port) { - // magic number 6 since port range is [0, 65535] - int port_len = strnlen(port, 6); - if (port_len < 1 || port_len > 5) { - return false; - } - - for (int i = 0; i < port_len; i++) { - if (!isdigit(port[i])) { - return false; - } + // magic number 6 since port range is [0, 65535] + int port_len = strnlen(port, 6); + if (port_len < 1 || port_len > 5) { + return false; + } + + for (int i = 0; i < port_len; i++) { + if (!isdigit(port[i])) { + return false; } - int port_number = atoi(port); - return port_number >= 0 && port_number <= 65535; + } + int port_number = atoi(port); + return port_number >= 0 && port_number <= 65535; } bool validate_host(const char* host) { - // regex taken from LFValidator.xtend - char* ipv4_regex = "((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])"; - char* host_or_FQN_regex = "^([a-z0-9]+(-[a-z0-9]+)*)|(([a-z0-9]+(-[a-z0-9]+)*\\.)+[a-z]{2,})$"; - return match_regex(host, ipv4_regex) || match_regex(host, host_or_FQN_regex); + // regex taken from LFValidator.xtend + char* ipv4_regex = "((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])"; + char* host_or_FQN_regex = "^([a-z0-9]+(-[a-z0-9]+)*)|(([a-z0-9]+(-[a-z0-9]+)*\\.)+[a-z]{2,})$"; + return match_regex(host, ipv4_regex) || match_regex(host, host_or_FQN_regex); } bool validate_user(const char* user) { - // regex taken from LFValidator.xtend - char* username_regex = "^[a-z_]([a-z0-9_-]{0,31}|[a-z0-9_-]{0,30}\\$)$"; - return match_regex(user, username_regex); + // regex taken from LFValidator.xtend + char* username_regex = "^[a-z_]([a-z0-9_-]{0,31}|[a-z0-9_-]{0,30}\\$)$"; + return match_regex(user, username_regex); } -bool extract_match_group(const char* rti_addr, char* dest, regmatch_t group, - int max_len, int min_len, const char* err_msg) { - size_t size = group.rm_eo - group.rm_so; - if (size > max_len || size < min_len) { - lf_print_error("%s", err_msg); - return false; - } - strncpy(dest, &rti_addr[group.rm_so], size); - dest[size] = '\0'; - return true; +bool extract_match_group(const char* rti_addr, char* dest, regmatch_t group, int max_len, int min_len, + const char* err_msg) { + size_t size = group.rm_eo - group.rm_so; + if (size > max_len || size < min_len) { + lf_print_error("%s", err_msg); + return false; + } + strncpy(dest, &rti_addr[group.rm_so], size); + dest[size] = '\0'; + return true; } bool extract_match_groups(const char* rti_addr, char** rti_addr_strs, bool** rti_addr_flags, regmatch_t* group_array, - int* gids, int* max_lens, int* min_lens, const char** err_msgs) { - for (int i = 0; i < 3; i++) { - if (group_array[gids[i]].rm_so != -1) { - if (!extract_match_group(rti_addr, rti_addr_strs[i], group_array[gids[i]], max_lens[i], min_lens[i], err_msgs[i])) { - return false; - } else { - *rti_addr_flags[i] = true; - } - } + int* gids, int* max_lens, int* min_lens, const char** err_msgs) { + for (int i = 0; i < 3; i++) { + if (group_array[gids[i]].rm_so != -1) { + if (!extract_match_group(rti_addr, rti_addr_strs[i], group_array[gids[i]], max_lens[i], min_lens[i], + err_msgs[i])) { + return false; + } else { + *rti_addr_flags[i] = true; + } } - return true; + } + return true; } void extract_rti_addr_info(const char* rti_addr, rti_addr_info_t* rti_addr_info) { - const char* regex_str = "(([a-zA-Z0-9_-]{1,254})@)?([a-zA-Z0-9.]{1,255})(:([0-9]{1,5}))?"; - size_t max_groups = 6; - // The group indices of each field of interest in the regex. - int user_gid = 2, host_gid = 3, port_gid = 5; - int gids[3] = {user_gid, host_gid, port_gid}; - char* rti_addr_strs[3] = {rti_addr_info->rti_user_str, rti_addr_info->rti_host_str, rti_addr_info->rti_port_str}; - bool* rti_addr_flags[3] = {&rti_addr_info->has_user, &rti_addr_info->has_host, &rti_addr_info->has_port}; - int max_lens[3] = {255, 255, 5}; - int min_lens[3] = {1, 1, 1}; - const char* err_msgs[3] = {"User name must be between 1 to 255 characters long.", - "Host must be between 1 to 255 characters long.", - "Port must be between 1 to 5 characters long."}; - - regex_t regex_compiled; - regmatch_t group_array[max_groups]; - - if (regcomp(®ex_compiled, regex_str, REG_EXTENDED)) { - lf_print_error("Could not compile regex to parse RTI address"); - return; + const char* regex_str = "(([a-zA-Z0-9_-]{1,254})@)?([a-zA-Z0-9.]{1,255})(:([0-9]{1,5}))?"; + size_t max_groups = 6; + // The group indices of each field of interest in the regex. + int user_gid = 2, host_gid = 3, port_gid = 5; + int gids[3] = {user_gid, host_gid, port_gid}; + char* rti_addr_strs[3] = {rti_addr_info->rti_user_str, rti_addr_info->rti_host_str, rti_addr_info->rti_port_str}; + bool* rti_addr_flags[3] = {&rti_addr_info->has_user, &rti_addr_info->has_host, &rti_addr_info->has_port}; + int max_lens[3] = {255, 255, 5}; + int min_lens[3] = {1, 1, 1}; + const char* err_msgs[3] = {"User name must be between 1 to 255 characters long.", + "Host must be between 1 to 255 characters long.", + "Port must be between 1 to 5 characters long."}; + + regex_t regex_compiled; + regmatch_t group_array[max_groups]; + + if (regcomp(®ex_compiled, regex_str, REG_EXTENDED)) { + lf_print_error("Could not compile regex to parse RTI address"); + return; + } + + if (regexec(®ex_compiled, rti_addr, max_groups, group_array, 0) == 0) { + // Check for matched username. group_array[0] is the entire matched string. + for (int i = 1; i < max_groups; i++) { + // Annoyingly, the rm_so and rm_eo fields are long long on some platforms and int on others. + // To suppress warnings, cast to long long + LF_PRINT_DEBUG("runtime rti_addr regex: so: %lld eo: %lld\n", (long long)group_array[i].rm_so, + (long long)group_array[i].rm_eo); } - - if (regexec(®ex_compiled, rti_addr, max_groups, group_array, 0) == 0) { - // Check for matched username. group_array[0] is the entire matched string. - for (int i = 1; i < max_groups; i++) { - // Annoyingly, the rm_so and rm_eo fields are long long on some platforms and int on others. - // To suppress warnings, cast to long long - LF_PRINT_DEBUG("runtime rti_addr regex: so: %lld eo: %lld\n", - (long long)group_array[i].rm_so, - (long long)group_array[i].rm_eo); - } - if (!extract_match_groups(rti_addr, rti_addr_strs, rti_addr_flags, group_array, gids, max_lens, min_lens, err_msgs)) { - memset(rti_addr_info, 0, sizeof(rti_addr_info_t)); - } + if (!extract_match_groups(rti_addr, rti_addr_strs, rti_addr_flags, group_array, gids, max_lens, min_lens, + err_msgs)) { + memset(rti_addr_info, 0, sizeof(rti_addr_info_t)); } - regfree(®ex_compiled); + } + regfree(®ex_compiled); } #endif diff --git a/core/lf_token.c b/core/lf_token.c index 8da5d9c01..4e13b9e6a 100644 --- a/core/lf_token.c +++ b/core/lf_token.c @@ -17,7 +17,7 @@ int _lf_count_token_allocations; #include #include -#include // Defines memcpy +#include // Defines memcpy #include "lf_token.h" #include "environment.h" #include "lf_types.h" @@ -46,7 +46,7 @@ static hashset_t _lf_token_recycling_bin = NULL; /** * Set of token templates (trigger_t or port_base_t objects) that * have been initialized. This is used to free their tokens at - * the end of program execution. + * the end of program execution. */ static hashset_t _lf_token_templates = NULL; @@ -57,300 +57,301 @@ static lf_token_t* _lf_writable_copy_locked(lf_port_base_t* port); //// Functions that users may call. lf_token_t* lf_new_token(void* port_or_action, void* val, size_t len) { - return _lf_new_token((token_type_t*)port_or_action, val, len); + return _lf_new_token((token_type_t*)port_or_action, val, len); } lf_token_t* lf_writable_copy(lf_port_base_t* port) { - LF_CRITICAL_SECTION_ENTER(port->source_reactor->environment); - lf_token_t* token = _lf_writable_copy_locked(port); - LF_CRITICAL_SECTION_EXIT(port->source_reactor->environment); - return token; + LF_CRITICAL_SECTION_ENTER(port->source_reactor->environment); + lf_token_t* token = _lf_writable_copy_locked(port); + LF_CRITICAL_SECTION_EXIT(port->source_reactor->environment); + return token; } //////////////////////////////////////////////////////////////////// //// Internal functions. static lf_token_t* _lf_writable_copy_locked(lf_port_base_t* port) { - assert(port != NULL); - - lf_token_t* token = port->tmplt.token; - if (token == NULL) return NULL; - LF_PRINT_DEBUG("lf_writable_copy: Requesting writable copy of token %p with reference count %zu.", - token, token->ref_count); - if (port->num_destinations == 1 && token->ref_count == 1) { - LF_PRINT_DEBUG("lf_writable_copy: Avoided copy because there " - "is only one reader and the reference count is %zu.", token->ref_count); - return token; + assert(port != NULL); + + lf_token_t* token = port->tmplt.token; + if (token == NULL) + return NULL; + LF_PRINT_DEBUG("lf_writable_copy: Requesting writable copy of token %p with reference count %zu.", token, + token->ref_count); + if (port->num_destinations == 1 && token->ref_count == 1) { + LF_PRINT_DEBUG("lf_writable_copy: Avoided copy because there " + "is only one reader and the reference count is %zu.", + token->ref_count); + return token; + } + LF_PRINT_DEBUG("lf_writable_copy: Copying value. Reference count is %zu.", token->ref_count); + // Copy the payload. + void* copy; + if (port->tmplt.type.copy_constructor == NULL) { + LF_PRINT_DEBUG("lf_writable_copy: Copy constructor is NULL. Using default strategy."); + size_t size = port->tmplt.type.element_size * token->length; + if (size == 0) { + return token; } - LF_PRINT_DEBUG("lf_writable_copy: Copying value. Reference count is %zu.", - token->ref_count); - // Copy the payload. - void* copy; - if (port->tmplt.type.copy_constructor == NULL) { - LF_PRINT_DEBUG("lf_writable_copy: Copy constructor is NULL. Using default strategy."); - size_t size = port->tmplt.type.element_size * token->length; - if (size == 0) { - return token; - } - copy = malloc(size); - LF_PRINT_DEBUG("Allocating memory for writable copy %p.", copy); - memcpy(copy, token->value, size); - } else { - LF_PRINT_DEBUG("lf_writable_copy: Copy constructor is not NULL. Using copy constructor."); - if (port->tmplt.type.destructor == NULL) { - lf_print_warning("lf_writable_copy: Using non-default copy constructor " - "without setting destructor. Potential memory leak."); - } - copy = port->tmplt.type.copy_constructor(token->value); + copy = malloc(size); + LF_PRINT_DEBUG("Allocating memory for writable copy %p.", copy); + memcpy(copy, token->value, size); + } else { + LF_PRINT_DEBUG("lf_writable_copy: Copy constructor is not NULL. Using copy constructor."); + if (port->tmplt.type.destructor == NULL) { + lf_print_warning("lf_writable_copy: Using non-default copy constructor " + "without setting destructor. Potential memory leak."); } - LF_PRINT_DEBUG("lf_writable_copy: Allocated memory for payload (token value): %p", copy); + copy = port->tmplt.type.copy_constructor(token->value); + } + LF_PRINT_DEBUG("lf_writable_copy: Allocated memory for payload (token value): %p", copy); - // Count allocations to issue a warning if this is never freed. - #if !defined NDEBUG - LF_CRITICAL_SECTION_ENTER(GLOBAL_ENVIRONMENT); - _lf_count_payload_allocations++; - LF_CRITICAL_SECTION_EXIT(GLOBAL_ENVIRONMENT); - #endif +// Count allocations to issue a warning if this is never freed. +#if !defined NDEBUG + LF_CRITICAL_SECTION_ENTER(GLOBAL_ENVIRONMENT); + _lf_count_payload_allocations++; + LF_CRITICAL_SECTION_EXIT(GLOBAL_ENVIRONMENT); +#endif - // Create a new, dynamically allocated token. - lf_token_t* result = _lf_new_token((token_type_t*)port, copy, token->length); - result->ref_count = 1; - // Arrange for the token to be released (and possibly freed) at - // the start of the next time step. - result->next = _lf_tokens_allocated_in_reactions; - _lf_tokens_allocated_in_reactions = result; + // Create a new, dynamically allocated token. + lf_token_t* result = _lf_new_token((token_type_t*)port, copy, token->length); + result->ref_count = 1; + // Arrange for the token to be released (and possibly freed) at + // the start of the next time step. + result->next = _lf_tokens_allocated_in_reactions; + _lf_tokens_allocated_in_reactions = result; - return result; + return result; } static void _lf_free_token_value(lf_token_t* token) { - if (token->value != NULL) { - // Count frees to issue a warning if this is never freed. - #if !defined NDEBUG - LF_CRITICAL_SECTION_ENTER(GLOBAL_ENVIRONMENT); - _lf_count_payload_allocations--; - LF_CRITICAL_SECTION_EXIT(GLOBAL_ENVIRONMENT); - #endif - // Free the value field (the payload). - LF_PRINT_DEBUG("_lf_free_token_value: Freeing allocated memory for payload (token value): %p", - token->value); - // First check the token's destructor field and invoke it if it is not NULL. - if (token->type->destructor != NULL) { - token->type->destructor(token->value); - } - // If Python Target is not enabled and destructor is NULL - // Token values should be freed - else { + if (token->value != NULL) { +// Count frees to issue a warning if this is never freed. +#if !defined NDEBUG + LF_CRITICAL_SECTION_ENTER(GLOBAL_ENVIRONMENT); + _lf_count_payload_allocations--; + LF_CRITICAL_SECTION_EXIT(GLOBAL_ENVIRONMENT); +#endif + // Free the value field (the payload). + LF_PRINT_DEBUG("_lf_free_token_value: Freeing allocated memory for payload (token value): %p", token->value); + // First check the token's destructor field and invoke it if it is not NULL. + if (token->type->destructor != NULL) { + token->type->destructor(token->value); + } + // If Python Target is not enabled and destructor is NULL + // Token values should be freed + else { #ifndef _PYTHON_TARGET_ENABLED - free(token->value); + free(token->value); #endif - } - token->value = NULL; } + token->value = NULL; + } } token_freed _lf_free_token(lf_token_t* token) { - token_freed result = NOT_FREED; - if (token == NULL) return result; - if (token->ref_count > 0) return result; - _lf_free_token_value(token); - - // Tokens that are created at the start of execution and associated with - // output ports or actions persist until they are overwritten. - // Need to acquire a mutex to access the recycle bin. - LF_CRITICAL_SECTION_ENTER(GLOBAL_ENVIRONMENT); + token_freed result = NOT_FREED; + if (token == NULL) + return result; + if (token->ref_count > 0) + return result; + _lf_free_token_value(token); + + // Tokens that are created at the start of execution and associated with + // output ports or actions persist until they are overwritten. + // Need to acquire a mutex to access the recycle bin. + LF_CRITICAL_SECTION_ENTER(GLOBAL_ENVIRONMENT); + if (_lf_token_recycling_bin == NULL) { + _lf_token_recycling_bin = hashset_create(4); // Initial size is 16. if (_lf_token_recycling_bin == NULL) { - _lf_token_recycling_bin = hashset_create(4); // Initial size is 16. - if (_lf_token_recycling_bin == NULL) { - LF_CRITICAL_SECTION_EXIT(GLOBAL_ENVIRONMENT); - lf_print_error_and_exit("Out of memory: failed to setup _lf_token_recycling_bin"); - } + LF_CRITICAL_SECTION_EXIT(GLOBAL_ENVIRONMENT); + lf_print_error_and_exit("Out of memory: failed to setup _lf_token_recycling_bin"); } - if (hashset_num_items(_lf_token_recycling_bin) < _LF_TOKEN_RECYCLING_BIN_SIZE_LIMIT) { - // Recycle instead of freeing. - LF_PRINT_DEBUG("_lf_free_token: Putting token on the recycling bin: %p", token); - if (!hashset_add(_lf_token_recycling_bin, token)) { - lf_print_warning("Putting token %p on the recycling bin, but it is already there!", token); - } - } else { - // Recycling bin is full. - LF_PRINT_DEBUG("_lf_free_token: Freeing allocated memory for token: %p", token); - free(token); + } + if (hashset_num_items(_lf_token_recycling_bin) < _LF_TOKEN_RECYCLING_BIN_SIZE_LIMIT) { + // Recycle instead of freeing. + LF_PRINT_DEBUG("_lf_free_token: Putting token on the recycling bin: %p", token); + if (!hashset_add(_lf_token_recycling_bin, token)) { + lf_print_warning("Putting token %p on the recycling bin, but it is already there!", token); } - #if !defined NDEBUG - _lf_count_token_allocations--; - #endif - LF_CRITICAL_SECTION_EXIT(GLOBAL_ENVIRONMENT); - result &= TOKEN_FREED; + } else { + // Recycling bin is full. + LF_PRINT_DEBUG("_lf_free_token: Freeing allocated memory for token: %p", token); + free(token); + } +#if !defined NDEBUG + _lf_count_token_allocations--; +#endif + LF_CRITICAL_SECTION_EXIT(GLOBAL_ENVIRONMENT); + result &= TOKEN_FREED; - return result; + return result; } lf_token_t* _lf_new_token(token_type_t* type, void* value, size_t length) { - lf_token_t* result = NULL; - // Check the recycling bin. - LF_CRITICAL_SECTION_ENTER(GLOBAL_ENVIRONMENT); - if (_lf_token_recycling_bin != NULL) { - hashset_itr_t iterator = hashset_iterator(_lf_token_recycling_bin); - if (hashset_iterator_next(iterator) >= 0) { - result = hashset_iterator_value(iterator); - hashset_remove(_lf_token_recycling_bin, result); - LF_PRINT_DEBUG("_lf_new_token: Retrieved token from the recycling bin: %p", result); - } - free(iterator); + lf_token_t* result = NULL; + // Check the recycling bin. + LF_CRITICAL_SECTION_ENTER(GLOBAL_ENVIRONMENT); + if (_lf_token_recycling_bin != NULL) { + hashset_itr_t iterator = hashset_iterator(_lf_token_recycling_bin); + if (hashset_iterator_next(iterator) >= 0) { + result = hashset_iterator_value(iterator); + hashset_remove(_lf_token_recycling_bin, result); + LF_PRINT_DEBUG("_lf_new_token: Retrieved token from the recycling bin: %p", result); } + free(iterator); + } - // Count the token allocation to catch memory leaks. - #if !defined NDEBUG - _lf_count_token_allocations++; - #endif - - LF_CRITICAL_SECTION_EXIT(GLOBAL_ENVIRONMENT); +// Count the token allocation to catch memory leaks. +#if !defined NDEBUG + _lf_count_token_allocations++; +#endif - if (result == NULL) { - // Nothing found on the recycle bin. - result = (lf_token_t*)calloc(1, sizeof(lf_token_t)); - LF_PRINT_DEBUG("_lf_new_token: Allocated memory for token: %p", result); - } - result->type = type; - result->length = length; - result->value = value; - result->ref_count = 0; - return result; + LF_CRITICAL_SECTION_EXIT(GLOBAL_ENVIRONMENT); + + if (result == NULL) { + // Nothing found on the recycle bin. + result = (lf_token_t*)calloc(1, sizeof(lf_token_t)); + LF_PRINT_DEBUG("_lf_new_token: Allocated memory for token: %p", result); + } + result->type = type; + result->length = length; + result->value = value; + result->ref_count = 0; + return result; } lf_token_t* _lf_get_token(token_template_t* tmplt) { - if (tmplt->token != NULL) { - if (tmplt->token->ref_count == 1) { - LF_PRINT_DEBUG("_lf_get_token: Reusing template token: %p with ref_count %zu", - tmplt->token, tmplt->token->ref_count); - // Free any previous value in the token. - _lf_free_token_value(tmplt->token); - return tmplt->token; - } else { - // Liberate the token. - _lf_done_using(tmplt->token); - } + if (tmplt->token != NULL) { + if (tmplt->token->ref_count == 1) { + LF_PRINT_DEBUG("_lf_get_token: Reusing template token: %p with ref_count %zu", tmplt->token, + tmplt->token->ref_count); + // Free any previous value in the token. + _lf_free_token_value(tmplt->token); + return tmplt->token; + } else { + // Liberate the token. + _lf_done_using(tmplt->token); } - // If we get here, we need a new token. - tmplt->token = _lf_new_token((token_type_t*)tmplt, NULL, 0); - tmplt->token->ref_count = 1; - return tmplt->token; + } + // If we get here, we need a new token. + tmplt->token = _lf_new_token((token_type_t*)tmplt, NULL, 0); + tmplt->token->ref_count = 1; + return tmplt->token; } void _lf_initialize_template(token_template_t* tmplt, size_t element_size) { - assert(tmplt != NULL); - LF_CRITICAL_SECTION_ENTER(GLOBAL_ENVIRONMENT); - if (_lf_token_templates == NULL) { - _lf_token_templates = hashset_create(4); // Initial size is 16. - } - hashset_add(_lf_token_templates, tmplt); - LF_CRITICAL_SECTION_EXIT(GLOBAL_ENVIRONMENT); - if (tmplt->token != NULL) { - if (tmplt->token->ref_count == 1 && tmplt->token->type->element_size == element_size) { - // Template token is already set. - // If it has a value, free it. - _lf_free_token_value(tmplt->token); - // Make sure its reference count is 1 (it should not be 0). - tmplt->token->ref_count = 1; - return; - } - // Replace the token. - _lf_done_using(tmplt->token); - tmplt->token = NULL; + assert(tmplt != NULL); + LF_CRITICAL_SECTION_ENTER(GLOBAL_ENVIRONMENT); + if (_lf_token_templates == NULL) { + _lf_token_templates = hashset_create(4); // Initial size is 16. + } + hashset_add(_lf_token_templates, tmplt); + LF_CRITICAL_SECTION_EXIT(GLOBAL_ENVIRONMENT); + if (tmplt->token != NULL) { + if (tmplt->token->ref_count == 1 && tmplt->token->type->element_size == element_size) { + // Template token is already set. + // If it has a value, free it. + _lf_free_token_value(tmplt->token); + // Make sure its reference count is 1 (it should not be 0). + tmplt->token->ref_count = 1; + return; } - tmplt->type.element_size = element_size; - tmplt->token = _lf_new_token((token_type_t*)tmplt, NULL, 0); - tmplt->token->ref_count = 1; + // Replace the token. + _lf_done_using(tmplt->token); + tmplt->token = NULL; + } + tmplt->type.element_size = element_size; + tmplt->token = _lf_new_token((token_type_t*)tmplt, NULL, 0); + tmplt->token->ref_count = 1; } lf_token_t* _lf_initialize_token_with_value(token_template_t* tmplt, void* value, size_t length) { - assert(tmplt != NULL); - LF_PRINT_DEBUG("_lf_initialize_token_with_value: template %p, value %p", tmplt, value); - lf_token_t* result = _lf_get_token(tmplt); - result->value = value; - // Count allocations to issue a warning if this is never freed. - #if !defined NDEBUG - LF_CRITICAL_SECTION_ENTER(GLOBAL_ENVIRONMENT); - _lf_count_payload_allocations++; - LF_CRITICAL_SECTION_EXIT(GLOBAL_ENVIRONMENT); - #endif - result->length = length; - return result; + assert(tmplt != NULL); + LF_PRINT_DEBUG("_lf_initialize_token_with_value: template %p, value %p", tmplt, value); + lf_token_t* result = _lf_get_token(tmplt); + result->value = value; +// Count allocations to issue a warning if this is never freed. +#if !defined NDEBUG + LF_CRITICAL_SECTION_ENTER(GLOBAL_ENVIRONMENT); + _lf_count_payload_allocations++; + LF_CRITICAL_SECTION_EXIT(GLOBAL_ENVIRONMENT); +#endif + result->length = length; + return result; } lf_token_t* _lf_initialize_token(token_template_t* tmplt, size_t length) { - assert(tmplt != NULL); - // Allocate memory for storing the array. - void* value = calloc(length, tmplt->type.element_size); - lf_token_t* result = _lf_initialize_token_with_value(tmplt, value, length); - return result; + assert(tmplt != NULL); + // Allocate memory for storing the array. + void* value = calloc(length, tmplt->type.element_size); + lf_token_t* result = _lf_initialize_token_with_value(tmplt, value, length); + return result; } void _lf_free_all_tokens() { - // Free template tokens. - LF_CRITICAL_SECTION_ENTER(GLOBAL_ENVIRONMENT); - // It is possible for a token to be a template token for more than one port - // or action because the same token may be sent to multiple output ports. - if (_lf_token_templates != NULL) { - hashset_itr_t iterator = hashset_iterator(_lf_token_templates); - while (hashset_iterator_next(iterator) >= 0) { - token_template_t* tmplt = (token_template_t*)hashset_iterator_value(iterator); - _lf_done_using(tmplt->token); - tmplt->token = NULL; - } - free(iterator); - hashset_destroy(_lf_token_templates); - _lf_token_templates = NULL; + // Free template tokens. + LF_CRITICAL_SECTION_ENTER(GLOBAL_ENVIRONMENT); + // It is possible for a token to be a template token for more than one port + // or action because the same token may be sent to multiple output ports. + if (_lf_token_templates != NULL) { + hashset_itr_t iterator = hashset_iterator(_lf_token_templates); + while (hashset_iterator_next(iterator) >= 0) { + token_template_t* tmplt = (token_template_t*)hashset_iterator_value(iterator); + _lf_done_using(tmplt->token); + tmplt->token = NULL; } - if (_lf_token_recycling_bin != NULL) { - hashset_itr_t iterator = hashset_iterator(_lf_token_recycling_bin); - while (hashset_iterator_next(iterator) >= 0) { - void* token = hashset_iterator_value(iterator); - LF_PRINT_DEBUG("Freeing token from _lf_token_recycling_bin: %p", token); - // Payload should already be freed, so we just free the token: - free(token); - } - free(iterator); - hashset_destroy(_lf_token_recycling_bin); - _lf_token_recycling_bin = NULL; + free(iterator); + hashset_destroy(_lf_token_templates); + _lf_token_templates = NULL; + } + if (_lf_token_recycling_bin != NULL) { + hashset_itr_t iterator = hashset_iterator(_lf_token_recycling_bin); + while (hashset_iterator_next(iterator) >= 0) { + void* token = hashset_iterator_value(iterator); + LF_PRINT_DEBUG("Freeing token from _lf_token_recycling_bin: %p", token); + // Payload should already be freed, so we just free the token: + free(token); } - LF_CRITICAL_SECTION_EXIT(GLOBAL_ENVIRONMENT); + free(iterator); + hashset_destroy(_lf_token_recycling_bin); + _lf_token_recycling_bin = NULL; + } + LF_CRITICAL_SECTION_EXIT(GLOBAL_ENVIRONMENT); } void _lf_replace_template_token(token_template_t* tmplt, lf_token_t* newtoken) { - assert(tmplt != NULL); - LF_PRINT_DEBUG("_lf_replace_template_token: template: %p newtoken: %p.", tmplt, newtoken); - if (tmplt->token != newtoken) { - if (tmplt->token != NULL) { - _lf_done_using(tmplt->token); - } - if (newtoken != NULL) { - newtoken->ref_count++; - LF_PRINT_DEBUG("_lf_replace_template_token: Incremented ref_count of %p to %zu.", - newtoken, newtoken->ref_count); - } - tmplt->token = newtoken; + assert(tmplt != NULL); + LF_PRINT_DEBUG("_lf_replace_template_token: template: %p newtoken: %p.", tmplt, newtoken); + if (tmplt->token != newtoken) { + if (tmplt->token != NULL) { + _lf_done_using(tmplt->token); + } + if (newtoken != NULL) { + newtoken->ref_count++; + LF_PRINT_DEBUG("_lf_replace_template_token: Incremented ref_count of %p to %zu.", newtoken, newtoken->ref_count); } + tmplt->token = newtoken; + } } token_freed _lf_done_using(lf_token_t* token) { - if (token == NULL) { - return NOT_FREED; - } - LF_PRINT_DEBUG("_lf_done_using: token = %p, ref_count = %zu.", token, token->ref_count); - if (token->ref_count == 0) { - lf_print_warning("Token being freed that has already been freed: %p", token); - return NOT_FREED; - } - token->ref_count--; - return _lf_free_token(token); + if (token == NULL) { + return NOT_FREED; + } + LF_PRINT_DEBUG("_lf_done_using: token = %p, ref_count = %zu.", token, token->ref_count); + if (token->ref_count == 0) { + lf_print_warning("Token being freed that has already been freed: %p", token); + return NOT_FREED; + } + token->ref_count--; + return _lf_free_token(token); } void _lf_free_token_copies(struct environment_t* env) { - while (_lf_tokens_allocated_in_reactions != NULL) { - lf_token_t* next = _lf_tokens_allocated_in_reactions->next; - _lf_done_using(_lf_tokens_allocated_in_reactions); - _lf_tokens_allocated_in_reactions = next; - } + while (_lf_tokens_allocated_in_reactions != NULL) { + lf_token_t* next = _lf_tokens_allocated_in_reactions->next; + _lf_done_using(_lf_tokens_allocated_in_reactions); + _lf_tokens_allocated_in_reactions = next; + } } diff --git a/core/mixed_radix.c b/core/mixed_radix.c index de21e7e5a..e59b61bd2 100644 --- a/core/mixed_radix.c +++ b/core/mixed_radix.c @@ -31,7 +31,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include -#include // defines NULL +#include // defines NULL #include "mixed_radix.h" @@ -40,22 +40,22 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * @param mixed A pointer to the mixed-radix number. */ void mixed_radix_incr(mixed_radix_int_t* mixed) { - int i = 0; - assert(mixed != NULL); - assert(mixed->size > 0); - while (i < mixed->size) { - int digit_to_increment = mixed->permutation[i]; - assert(digit_to_increment >= 0); - mixed->digits[digit_to_increment]++; - if (mixed->digits[digit_to_increment] >= mixed->radixes[digit_to_increment]) { - mixed->digits[digit_to_increment] = 0; - i++; - } else { - return; // All done. - } - } - // If we get here, the number has overflowed. Wrap to zero. - mixed->digits[i - 1] = 0; + int i = 0; + assert(mixed != NULL); + assert(mixed->size > 0); + while (i < mixed->size) { + int digit_to_increment = mixed->permutation[i]; + assert(digit_to_increment >= 0); + mixed->digits[digit_to_increment]++; + if (mixed->digits[digit_to_increment] >= mixed->radixes[digit_to_increment]) { + mixed->digits[digit_to_increment] = 0; + i++; + } else { + return; // All done. + } + } + // If we get here, the number has overflowed. Wrap to zero. + mixed->digits[i - 1] = 0; } /** @@ -67,23 +67,20 @@ void mixed_radix_incr(mixed_radix_int_t* mixed) { * be greater than or equal to 0. */ int mixed_radix_parent(mixed_radix_int_t* mixed, int n) { - assert(mixed != NULL); - assert(mixed->size > 0); - assert(n >= 0); - int result = 0; - int factor = 1; - for (int i = n; i < mixed->size; i++) { - result += factor * mixed->digits[i]; - factor *= mixed->radixes[i]; - } - return result; + assert(mixed != NULL); + assert(mixed->size > 0); + assert(n >= 0); + int result = 0; + int factor = 1; + for (int i = n; i < mixed->size; i++) { + result += factor * mixed->digits[i]; + factor *= mixed->radixes[i]; + } + return result; } /** * Return the int value of a mixed-radix number. * @param mixed A pointer to the mixed-radix number. */ -int mixed_radix_to_int(mixed_radix_int_t* mixed) { - return mixed_radix_parent(mixed, 0); -} - +int mixed_radix_to_int(mixed_radix_int_t* mixed) { return mixed_radix_parent(mixed, 0); } diff --git a/core/modal_models/modes.c b/core/modal_models/modes.c index 7e1c2493b..e6e6f5d95 100644 --- a/core/modal_models/modes.c +++ b/core/modal_models/modes.c @@ -39,7 +39,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include -#include "platform.h" +#include "low_level_platform.h" #include "lf_types.h" #include "modes.h" #include "reactor.h" @@ -47,45 +47,48 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "api/schedule.h" // Bit masks for the internally used flags on modes -#define _LF_MODE_FLAG_MASK_ACTIVE (1 << 0) +#define _LF_MODE_FLAG_MASK_ACTIVE (1 << 0) #define _LF_MODE_FLAG_MASK_NEEDS_STARTUP (1 << 1) -#define _LF_MODE_FLAG_MASK_HAD_STARTUP (1 << 2) -#define _LF_MODE_FLAG_MASK_NEEDS_RESET (1 << 3) +#define _LF_MODE_FLAG_MASK_HAD_STARTUP (1 << 2) +#define _LF_MODE_FLAG_MASK_NEEDS_RESET (1 << 3) // ---------------------------------------------------------------------------- // Forward declaration of functions and variables supplied by reactor_common.c void _lf_trigger_reaction(environment_t* env, reaction_t* reaction, int worker_number); -event_t* _lf_create_dummy_events(environment_t* env, trigger_t* trigger, instant_t time, event_t* next, microstep_t offset); +event_t* _lf_create_dummy_events(environment_t* env, trigger_t* trigger, instant_t time, event_t* next, + microstep_t offset); // ---------------------------------------------------------------------------- // Linked list element for suspended events in inactive modes typedef struct _lf_suspended_event { - struct _lf_suspended_event* next; - event_t* event; + struct _lf_suspended_event* next; + event_t* event; } _lf_suspended_event_t; -_lf_suspended_event_t* _lf_suspended_events_head = NULL; // Start of linked collection of suspended events (managed automatically!) +_lf_suspended_event_t* _lf_suspended_events_head = + NULL; // Start of linked collection of suspended events (managed automatically!) int _lf_suspended_events_num = 0; // Number of suspended events (managed automatically!) -_lf_suspended_event_t* _lf_unsused_suspended_events_head = NULL; // Internal collection of reusable list elements (managed automatically!) +_lf_suspended_event_t* _lf_unsused_suspended_events_head = + NULL; // Internal collection of reusable list elements (managed automatically!) /** * Save the given event as suspended. */ void _lf_add_suspended_event(event_t* event) { - _lf_suspended_event_t* new_suspended_event; - if (_lf_unsused_suspended_events_head != NULL) { - new_suspended_event = _lf_unsused_suspended_events_head; - _lf_unsused_suspended_events_head = _lf_unsused_suspended_events_head->next; - } else { - new_suspended_event = (_lf_suspended_event_t*) malloc(sizeof(_lf_suspended_event_t)); - } - - new_suspended_event->event = event; - new_suspended_event->next = _lf_suspended_events_head; // prepend - _lf_suspended_events_num++; - - _lf_suspended_events_head = new_suspended_event; + _lf_suspended_event_t* new_suspended_event; + if (_lf_unsused_suspended_events_head != NULL) { + new_suspended_event = _lf_unsused_suspended_events_head; + _lf_unsused_suspended_events_head = _lf_unsused_suspended_events_head->next; + } else { + new_suspended_event = (_lf_suspended_event_t*)malloc(sizeof(_lf_suspended_event_t)); + } + + new_suspended_event->event = event; + new_suspended_event->next = _lf_suspended_events_head; // prepend + _lf_suspended_events_num++; + + _lf_suspended_events_head = new_suspended_event; } /** @@ -93,34 +96,34 @@ void _lf_add_suspended_event(event_t* event) { * Returns the next element in the list. */ _lf_suspended_event_t* _lf_remove_suspended_event(_lf_suspended_event_t* event) { - _lf_suspended_event_t* next = event->next; - - // Clear content - event->event = NULL; - event->next = NULL; - _lf_suspended_events_num--; - - // Store for recycling - if (_lf_unsused_suspended_events_head == NULL) { - _lf_unsused_suspended_events_head = event; - } else { - event->next = _lf_unsused_suspended_events_head; - _lf_unsused_suspended_events_head = event; + _lf_suspended_event_t* next = event->next; + + // Clear content + event->event = NULL; + event->next = NULL; + _lf_suspended_events_num--; + + // Store for recycling + if (_lf_unsused_suspended_events_head == NULL) { + _lf_unsused_suspended_events_head = event; + } else { + event->next = _lf_unsused_suspended_events_head; + _lf_unsused_suspended_events_head = event; + } + + if (_lf_suspended_events_head == event) { + _lf_suspended_events_head = next; // Adjust head + } else { + _lf_suspended_event_t* predecessor = _lf_suspended_events_head; + while (predecessor->next != event && predecessor != NULL) { + predecessor = predecessor->next; } - - if (_lf_suspended_events_head == event) { - _lf_suspended_events_head = next; // Adjust head - } else { - _lf_suspended_event_t* predecessor = _lf_suspended_events_head; - while(predecessor->next != event && predecessor != NULL) { - predecessor = predecessor->next; - } - if (predecessor != NULL) { - predecessor->next = next; // Remove from linked list - } + if (predecessor != NULL) { + predecessor->next = next; // Remove from linked list } + } - return next; + return next; } // ---------------------------------------------------------------------------- @@ -133,11 +136,11 @@ _lf_suspended_event_t* _lf_remove_suspended_event(_lf_suspended_event_t* event) * @param mode The mode instance to check. */ bool _lf_mode_is_active(reactor_mode_t* mode) { - if (mode != NULL) { - // Use cached value (redundant data structure) - return mode->flags & _LF_MODE_FLAG_MASK_ACTIVE; - } - return true; + if (mode != NULL) { + // Use cached value (redundant data structure) + return mode->flags & _LF_MODE_FLAG_MASK_ACTIVE; + } + return true; } /** @@ -148,25 +151,25 @@ bool _lf_mode_is_active(reactor_mode_t* mode) { * @param mode The mode instance to check. */ bool _lf_mode_is_active_fallback(reactor_mode_t* mode) { - if (mode != NULL) { - LF_PRINT_DEBUG("Checking mode state of %s", mode->name); - reactor_mode_state_t* state = mode->state; - while (state != NULL) { - // If this or any parent mode is inactive, return inactive - if (state->current_mode != mode) { - LF_PRINT_DEBUG(" => Mode is inactive"); - return false; - } - mode = state->parent_mode; - if (mode != NULL) { - state = mode->state; - } else { - state = NULL; - } - } - LF_PRINT_DEBUG(" => Mode is active"); + if (mode != NULL) { + LF_PRINT_DEBUG("Checking mode state of %s", mode->name); + reactor_mode_state_t* state = mode->state; + while (state != NULL) { + // If this or any parent mode is inactive, return inactive + if (state->current_mode != mode) { + LF_PRINT_DEBUG(" => Mode is inactive"); + return false; + } + mode = state->parent_mode; + if (mode != NULL) { + state = mode->state; + } else { + state = NULL; + } } - return true; + LF_PRINT_DEBUG(" => Mode is active"); + } + return true; } /** @@ -178,19 +181,19 @@ bool _lf_mode_is_active_fallback(reactor_mode_t* mode) { * @param states_size */ void _lf_initialize_mode_states(environment_t* env, reactor_mode_state_t* states[], int states_size) { - LF_PRINT_DEBUG("Modes: Initialization"); - // Initialize all modes (top down for correct active flags) - for (int i = 0; i < states_size; i++) { - reactor_mode_state_t* state = states[i]; - if (state != NULL && _lf_mode_is_active(state->parent_mode)) { - // If there is no enclosing mode or the parent is marked active, - // then activate the active mode (same as initial at this point) - // and request startup. - state->current_mode->flags |= _LF_MODE_FLAG_MASK_ACTIVE | _LF_MODE_FLAG_MASK_NEEDS_STARTUP; - } + LF_PRINT_DEBUG("Modes: Initialization"); + // Initialize all modes (top down for correct active flags) + for (int i = 0; i < states_size; i++) { + reactor_mode_state_t* state = states[i]; + if (state != NULL && _lf_mode_is_active(state->parent_mode)) { + // If there is no enclosing mode or the parent is marked active, + // then activate the active mode (same as initial at this point) + // and request startup. + state->current_mode->flags |= _LF_MODE_FLAG_MASK_ACTIVE | _LF_MODE_FLAG_MASK_NEEDS_STARTUP; } - // Register execution of special triggers - env->modes->triggered_reactions_request |= _LF_MODE_FLAG_MASK_NEEDS_STARTUP; + } + // Register execution of special triggers + env->modes->triggered_reactions_request |= _LF_MODE_FLAG_MASK_NEEDS_STARTUP; } /** @@ -218,72 +221,63 @@ void _lf_initialize_mode_states(environment_t* env, reactor_mode_state_t* states * @param states_size * */ -void _lf_handle_mode_startup_reset_reactions( - environment_t* env, - reaction_t** startup_reactions, - int startup_reactions_size, - reaction_t** reset_reactions, - int reset_reactions_size, - reactor_mode_state_t* states[], - int states_size -) { - // Handle startup reactions - if (env->modes->triggered_reactions_request & _LF_MODE_FLAG_MASK_NEEDS_STARTUP) { - if (startup_reactions != NULL) { - for (int i = 0; i < startup_reactions_size; i++) { - reaction_t* reaction = startup_reactions[i]; - if (reaction->mode != NULL) { - if(reaction->status == inactive - && _lf_mode_is_active(reaction->mode) - && reaction->mode->flags & _LF_MODE_FLAG_MASK_NEEDS_STARTUP - ) { - // Trigger reaction if not already triggered, is active, - // and requires startup - _lf_trigger_reaction(env, reaction, -1); - } - } - } +void _lf_handle_mode_startup_reset_reactions(environment_t* env, reaction_t** startup_reactions, + int startup_reactions_size, reaction_t** reset_reactions, + int reset_reactions_size, reactor_mode_state_t* states[], + int states_size) { + // Handle startup reactions + if (env->modes->triggered_reactions_request & _LF_MODE_FLAG_MASK_NEEDS_STARTUP) { + if (startup_reactions != NULL) { + for (int i = 0; i < startup_reactions_size; i++) { + reaction_t* reaction = startup_reactions[i]; + if (reaction->mode != NULL) { + if (reaction->status == inactive && _lf_mode_is_active(reaction->mode) && + reaction->mode->flags & _LF_MODE_FLAG_MASK_NEEDS_STARTUP) { + // Trigger reaction if not already triggered, is active, + // and requires startup + _lf_trigger_reaction(env, reaction, -1); + } } + } } - - // Handle reset reactions - if (env->modes->triggered_reactions_request & _LF_MODE_FLAG_MASK_NEEDS_RESET) { - if (reset_reactions != NULL) { - for (int i = 0; i < reset_reactions_size; i++) { - reaction_t* reaction = reset_reactions[i]; - if (reaction->mode != NULL) { - if(reaction->status == inactive - && _lf_mode_is_active(reaction->mode) - && reaction->mode->flags & _LF_MODE_FLAG_MASK_NEEDS_RESET - ) { - // Trigger reaction if not already triggered, is active, - // and requires reset - _lf_trigger_reaction(env, reaction, -1); - } - } - } + } + + // Handle reset reactions + if (env->modes->triggered_reactions_request & _LF_MODE_FLAG_MASK_NEEDS_RESET) { + if (reset_reactions != NULL) { + for (int i = 0; i < reset_reactions_size; i++) { + reaction_t* reaction = reset_reactions[i]; + if (reaction->mode != NULL) { + if (reaction->status == inactive && _lf_mode_is_active(reaction->mode) && + reaction->mode->flags & _LF_MODE_FLAG_MASK_NEEDS_RESET) { + // Trigger reaction if not already triggered, is active, + // and requires reset + _lf_trigger_reaction(env, reaction, -1); + } } + } } - - // Reset the flags in all active modes. - // Hence, register that a mode had a startup even if there are no startup - // reactions to make sure that shutdown is executed properly. - for (int i = 0; i < states_size; i++) { - reactor_mode_state_t* state = states[i]; - if (state != NULL && _lf_mode_is_active(state->current_mode)) { - // Clear and save execution of startup for shutdown - if (state->current_mode->flags & _LF_MODE_FLAG_MASK_NEEDS_STARTUP) { - state->current_mode->flags |= _LF_MODE_FLAG_MASK_HAD_STARTUP; - state->current_mode->flags &= ~_LF_MODE_FLAG_MASK_NEEDS_STARTUP; - } - - // Clear reset flag - state->current_mode->flags &= ~_LF_MODE_FLAG_MASK_NEEDS_RESET; - } + } + + // Reset the flags in all active modes. + // Hence, register that a mode had a startup even if there are no startup + // reactions to make sure that shutdown is executed properly. + for (int i = 0; i < states_size; i++) { + reactor_mode_state_t* state = states[i]; + if (state != NULL && _lf_mode_is_active(state->current_mode)) { + // Clear and save execution of startup for shutdown + if (state->current_mode->flags & _LF_MODE_FLAG_MASK_NEEDS_STARTUP) { + state->current_mode->flags |= _LF_MODE_FLAG_MASK_HAD_STARTUP; + state->current_mode->flags &= ~_LF_MODE_FLAG_MASK_NEEDS_STARTUP; + } + + // Clear reset flag + state->current_mode->flags &= ~_LF_MODE_FLAG_MASK_NEEDS_RESET; } + } - // Clear request - env->modes->triggered_reactions_request = 0; + // Clear request + env->modes->triggered_reactions_request = 0; } /** @@ -301,30 +295,27 @@ void _lf_handle_mode_startup_reset_reactions( * @param shutdown_reactions_size * */ -void _lf_handle_mode_shutdown_reactions( - environment_t* env, - reaction_t** shutdown_reactions, - int shutdown_reactions_size -) { - if (shutdown_reactions != NULL) { - for (int i = 0; i < shutdown_reactions_size; i++) { - reaction_t* reaction = shutdown_reactions[i]; - if (reaction->mode != NULL) { - if (reaction->mode->flags & _LF_MODE_FLAG_MASK_HAD_STARTUP) { // if mode had startup - // Release the reaction from its association with the mode. - // This will effectively bypass the mode activity check. - // This assumes that the reaction will never be trigger/used after shutdown. - // If that is not the case, a temporary bypassing mechanism should be implemented. - reaction->mode = NULL; - - if(reaction->status == inactive) { - // Trigger reaction if not already triggered - _lf_trigger_reaction(env, reaction, -1); - } - } - } +void _lf_handle_mode_shutdown_reactions(environment_t* env, reaction_t** shutdown_reactions, + int shutdown_reactions_size) { + if (shutdown_reactions != NULL) { + for (int i = 0; i < shutdown_reactions_size; i++) { + reaction_t* reaction = shutdown_reactions[i]; + if (reaction->mode != NULL) { + if (reaction->mode->flags & _LF_MODE_FLAG_MASK_HAD_STARTUP) { // if mode had startup + // Release the reaction from its association with the mode. + // This will effectively bypass the mode activity check. + // This assumes that the reaction will never be trigger/used after shutdown. + // If that is not the case, a temporary bypassing mechanism should be implemented. + reaction->mode = NULL; + + if (reaction->status == inactive) { + // Trigger reaction if not already triggered + _lf_trigger_reaction(env, reaction, -1); + } } + } } + } } /** @@ -338,202 +329,199 @@ void _lf_handle_mode_shutdown_reactions( * @param timer_triggers Array of pointers to timer triggers. * @param timer_triggers_size */ -void _lf_process_mode_changes( - environment_t* env, - reactor_mode_state_t* states[], - int states_size, - mode_state_variable_reset_data_t reset_data[], - int reset_data_size, - trigger_t* timer_triggers[], - int timer_triggers_size -) { - bool transition = false; // any mode change in this step - - // Detect mode changes (top down for hierarchical reset) - for (int i = 0; i < states_size; i++) { - reactor_mode_state_t* state = states[i]; - if (state != NULL) { - // Hierarchical reset: if this mode has parent that is entered in - // this step with a reset this reactor has to enter its initial mode - if (state->parent_mode != NULL - && state->parent_mode->state != NULL - && state->parent_mode->state->next_mode == state->parent_mode - && state->parent_mode->state->mode_change == reset_transition - ){ - // Reset to initial state. - state->next_mode = state->initial_mode; - // Enter with reset, to cascade it further down. - state->mode_change = reset_transition; - LF_PRINT_DEBUG("Modes: Hierarchical mode reset to %s when entering %s.", - state->initial_mode->name, state->parent_mode->name); +void _lf_process_mode_changes(environment_t* env, reactor_mode_state_t* states[], int states_size, + mode_state_variable_reset_data_t reset_data[], int reset_data_size, + trigger_t* timer_triggers[], int timer_triggers_size) { + bool transition = false; // any mode change in this step + + // Detect mode changes (top down for hierarchical reset) + for (int i = 0; i < states_size; i++) { + reactor_mode_state_t* state = states[i]; + if (state != NULL) { + // Hierarchical reset: if this mode has parent that is entered in + // this step with a reset this reactor has to enter its initial mode + if (state->parent_mode != NULL && state->parent_mode->state != NULL && + state->parent_mode->state->next_mode == state->parent_mode && + state->parent_mode->state->mode_change == reset_transition) { + // Reset to initial state. + state->next_mode = state->initial_mode; + // Enter with reset, to cascade it further down. + state->mode_change = reset_transition; + LF_PRINT_DEBUG("Modes: Hierarchical mode reset to %s when entering %s.", state->initial_mode->name, + state->parent_mode->name); + } + + // Handle effect of entering next mode + if (state->next_mode != NULL) { + LF_PRINT_DEBUG("Modes: Transition to %s.", state->next_mode->name); + transition = true; + + if (state->mode_change == reset_transition) { + // Reset state variables (if explicitly requested for automatic reset). + // The generated code will not register all state variables by default. + // Usually the reset trigger is used. + for (int j = 0; j < reset_data_size; j++) { + mode_state_variable_reset_data_t data = reset_data[j]; + if (data.mode == state->next_mode) { + LF_PRINT_DEBUG("Modes: Reseting state variable."); + memcpy(data.target, data.source, data.size); + } + } + + // Handle timers that have a period of 0. These timers will only trigger + // once and will not be on the event_q after their initial triggering. + // Therefore, the logic above cannot handle these timers. We need + // to trigger these timers manually if there is a reset transition. + for (int j = 0; j < timer_triggers_size; j++) { + trigger_t* timer = timer_triggers[j]; + if (timer->period == 0 && timer->mode == state->next_mode) { + lf_schedule_trigger(env, timer, timer->offset, NULL); } + } + } - // Handle effect of entering next mode - if (state->next_mode != NULL) { - LF_PRINT_DEBUG("Modes: Transition to %s.", state->next_mode->name); - transition = true; - - if (state->mode_change == reset_transition) { - // Reset state variables (if explicitly requested for automatic reset). - // The generated code will not register all state variables by default. - // Usually the reset trigger is used. - for (int j = 0; j < reset_data_size; j++) { - mode_state_variable_reset_data_t data = reset_data[j]; - if (data.mode == state->next_mode) { - LF_PRINT_DEBUG("Modes: Reseting state variable."); - memcpy(data.target, data.source, data.size); - } - } - - // Handle timers that have a period of 0. These timers will only trigger - // once and will not be on the event_q after their initial triggering. - // Therefore, the logic above cannot handle these timers. We need - // to trigger these timers manually if there is a reset transition. - for (int j = 0; j < timer_triggers_size; j++) { - trigger_t* timer = timer_triggers[j]; - if (timer->period == 0 && timer->mode == state->next_mode) { - lf_schedule_trigger(env, timer, timer->offset, NULL); - } - } - } - - // Reset/Reactivate previously suspended events of next state - _lf_suspended_event_t* suspended_event = _lf_suspended_events_head; - while(suspended_event != NULL) { - event_t* event = suspended_event->event; - if (event != NULL && event->trigger != NULL && event->trigger->mode == state->next_mode) { - if (state->mode_change == reset_transition) { // Reset transition - if (event->trigger->is_timer) { // Only reset timers - trigger_t* timer = event->trigger; - - LF_PRINT_DEBUG("Modes: Re-enqueuing reset timer."); - // Reschedule the timer with no additional delay. - // This will take care of super dense time when offset is 0. - lf_schedule_trigger(env, timer, event->trigger->offset, NULL); - } - // No further processing; drops all events upon reset (timer event was recreated by schedule and original can be removed here) - } else if (state->next_mode != state->current_mode && event->trigger != NULL) { // History transition to a different mode - // Remaining time that the event would have been waiting before mode was left - instant_t local_remaining_delay = event->time - (state->next_mode->deactivation_time != 0 ? state->next_mode->deactivation_time : lf_time_start()); - tag_t current_logical_tag = env->current_tag; - - // Reschedule event with original local delay - LF_PRINT_DEBUG("Modes: Re-enqueuing event with a suspended delay of " PRINTF_TIME - " (previous TTH: " PRINTF_TIME ", Mode suspended at: " PRINTF_TIME ").", - local_remaining_delay, event->time, state->next_mode->deactivation_time); - tag_t schedule_tag = {.time = current_logical_tag.time + local_remaining_delay, .microstep = (local_remaining_delay == 0 ? current_logical_tag.microstep + 1 : 0)}; - _lf_schedule_at_tag(env, event->trigger, schedule_tag, event->token); - - // Also schedule events stacked up in super dense time. - event_t* e = event; - while (e->next != NULL) { - schedule_tag.microstep++; - _lf_schedule_at_tag(env, e->next->trigger, schedule_tag, e->next->token); - event_t* tmp = e->next; - e = tmp->next; - // A fresh event was created by schedule, hence, recycle old one - lf_recycle_event(env, tmp); - } - } - // A fresh event was created by schedule, hence, recycle old one - lf_recycle_event(env, event); - - // Remove suspended event and continue - suspended_event = _lf_remove_suspended_event(suspended_event); - } else { - suspended_event = suspended_event->next; - } - } + // Reset/Reactivate previously suspended events of next state + _lf_suspended_event_t* suspended_event = _lf_suspended_events_head; + while (suspended_event != NULL) { + event_t* event = suspended_event->event; + if (event != NULL && event->trigger != NULL && event->trigger->mode == state->next_mode) { + if (state->mode_change == reset_transition) { // Reset transition + if (event->trigger->is_timer) { // Only reset timers + trigger_t* timer = event->trigger; + + LF_PRINT_DEBUG("Modes: Re-enqueuing reset timer."); + // Reschedule the timer with no additional delay. + // This will take care of super dense time when offset is 0. + lf_schedule_trigger(env, timer, event->trigger->offset, NULL); + } + // No further processing; drops all events upon reset (timer event was recreated by schedule and original + // can be removed here) + } else if (state->next_mode != state->current_mode && + event->trigger != NULL) { // History transition to a different mode + // Remaining time that the event would have been waiting before mode was left + instant_t local_remaining_delay = + event->time - + (state->next_mode->deactivation_time != 0 ? state->next_mode->deactivation_time : lf_time_start()); + tag_t current_logical_tag = env->current_tag; + + // Reschedule event with original local delay + LF_PRINT_DEBUG("Modes: Re-enqueuing event with a suspended delay of " PRINTF_TIME + " (previous TTH: " PRINTF_TIME ", Mode suspended at: " PRINTF_TIME ").", + local_remaining_delay, event->time, state->next_mode->deactivation_time); + tag_t schedule_tag = {.time = current_logical_tag.time + local_remaining_delay, + .microstep = (local_remaining_delay == 0 ? current_logical_tag.microstep + 1 : 0)}; + _lf_schedule_at_tag(env, event->trigger, schedule_tag, event->token); + + // Also schedule events stacked up in super dense time. + event_t* e = event; + while (e->next != NULL) { + schedule_tag.microstep++; + _lf_schedule_at_tag(env, e->next->trigger, schedule_tag, e->next->token); + event_t* tmp = e->next; + e = tmp->next; + // A fresh event was created by schedule, hence, recycle old one + lf_recycle_event(env, tmp); + } } + // A fresh event was created by schedule, hence, recycle old one + lf_recycle_event(env, event); + + // Remove suspended event and continue + suspended_event = _lf_remove_suspended_event(suspended_event); + } else { + suspended_event = suspended_event->next; + } } + } } + } - // Handle leaving active mode in all states - if (transition) { - // Set new active mode and clear mode change flags - // (top down for correct active flags) - for (int i = 0; i < states_size; i++) { - reactor_mode_state_t* state = states[i]; - if (state != NULL) { - // Clear cached active flag on active state, because - // parent activity might have changed or active state may change. - state->current_mode->flags &= ~_LF_MODE_FLAG_MASK_ACTIVE; - - // Apply transition effect - if (state->next_mode != NULL) { - // Save time when mode was left to handle suspended events in the future - state->current_mode->deactivation_time = lf_time_logical(env); - - // Apply transition - state->current_mode = state->next_mode; - - // Trigger startup reactions if entered first time - if (!(state->current_mode->flags & _LF_MODE_FLAG_MASK_HAD_STARTUP)) { - state->current_mode->flags |= _LF_MODE_FLAG_MASK_NEEDS_STARTUP; - } - - // Trigger reset reactions - if (state->mode_change == reset_transition) { - state->current_mode->flags |= _LF_MODE_FLAG_MASK_NEEDS_RESET; - } else { - // Needs to be cleared because flag could be there from previous - // entry (with subsequent inactivity) which is now obsolete - state->current_mode->flags &= ~_LF_MODE_FLAG_MASK_NEEDS_RESET; - } - - state->next_mode = NULL; - state->mode_change = no_transition; - } - - // Compute new cached activity flag - if (_lf_mode_is_active(state->parent_mode)) { - // If there is no enclosing or the parent is marked active, - // then set active flag on active mode - state->current_mode->flags |= _LF_MODE_FLAG_MASK_ACTIVE; - - // Register execution of special triggers - // This is not done when setting the flag because actual triggering - // might be delayed by parent mode inactivity. - env->modes->triggered_reactions_request |= state->current_mode->flags & - (_LF_MODE_FLAG_MASK_NEEDS_STARTUP | _LF_MODE_FLAG_MASK_NEEDS_RESET); - } - } + // Handle leaving active mode in all states + if (transition) { + // Set new active mode and clear mode change flags + // (top down for correct active flags) + for (int i = 0; i < states_size; i++) { + reactor_mode_state_t* state = states[i]; + if (state != NULL) { + // Clear cached active flag on active state, because + // parent activity might have changed or active state may change. + state->current_mode->flags &= ~_LF_MODE_FLAG_MASK_ACTIVE; + + // Apply transition effect + if (state->next_mode != NULL) { + // Save time when mode was left to handle suspended events in the future + state->current_mode->deactivation_time = lf_time_logical(env); + + // Apply transition + state->current_mode = state->next_mode; + + // Trigger startup reactions if entered first time + if (!(state->current_mode->flags & _LF_MODE_FLAG_MASK_HAD_STARTUP)) { + state->current_mode->flags |= _LF_MODE_FLAG_MASK_NEEDS_STARTUP; + } + + // Trigger reset reactions + if (state->mode_change == reset_transition) { + state->current_mode->flags |= _LF_MODE_FLAG_MASK_NEEDS_RESET; + } else { + // Needs to be cleared because flag could be there from previous + // entry (with subsequent inactivity) which is now obsolete + state->current_mode->flags &= ~_LF_MODE_FLAG_MASK_NEEDS_RESET; + } + + state->next_mode = NULL; + state->mode_change = no_transition; } - // Retract all events from the event queue that are associated with now inactive modes - if (env->event_q != NULL) { - size_t q_size = pqueue_size(env->event_q); - if (q_size > 0) { - event_t** delayed_removal = (event_t**) calloc(q_size, sizeof(event_t*)); - size_t delayed_removal_count = 0; - - // Find events - for (size_t i = 0; i < q_size; i++) { - event_t* event = (event_t*)env->event_q->d[i + 1]; // internal queue data structure omits index 0 - if (event != NULL && event->trigger != NULL && !_lf_mode_is_active(event->trigger->mode)) { - delayed_removal[delayed_removal_count++] = event; - // This will store the event including possibly those chained up in super dense time - _lf_add_suspended_event(event); - } - } - - // Events are removed delayed in order to allow linear iteration over the queue - LF_PRINT_DEBUG("Modes: Pulling %zu events from the event queue to suspend them. %d events are now suspended.", - delayed_removal_count, _lf_suspended_events_num); - for (size_t i = 0; i < delayed_removal_count; i++) { - pqueue_remove(env->event_q, delayed_removal[i]); - } - - free(delayed_removal); - } + // Compute new cached activity flag + if (_lf_mode_is_active(state->parent_mode)) { + // If there is no enclosing or the parent is marked active, + // then set active flag on active mode + state->current_mode->flags |= _LF_MODE_FLAG_MASK_ACTIVE; + + // Register execution of special triggers + // This is not done when setting the flag because actual triggering + // might be delayed by parent mode inactivity. + env->modes->triggered_reactions_request |= + state->current_mode->flags & (_LF_MODE_FLAG_MASK_NEEDS_STARTUP | _LF_MODE_FLAG_MASK_NEEDS_RESET); + } + } + } + + // Retract all events from the event queue that are associated with now inactive modes + if (env->event_q != NULL) { + size_t q_size = pqueue_size(env->event_q); + if (q_size > 0) { + event_t** delayed_removal = (event_t**)calloc(q_size, sizeof(event_t*)); + size_t delayed_removal_count = 0; + + // Find events + for (size_t i = 0; i < q_size; i++) { + event_t* event = (event_t*)env->event_q->d[i + 1]; // internal queue data structure omits index 0 + if (event != NULL && event->trigger != NULL && !_lf_mode_is_active(event->trigger->mode)) { + delayed_removal[delayed_removal_count++] = event; + // This will store the event including possibly those chained up in super dense time + _lf_add_suspended_event(event); + } } - if (env->modes->triggered_reactions_request) { - // Insert a dummy event in the event queue for the next microstep to make - // sure startup/reset reactions (if any) are triggered as soon as possible. - pqueue_insert(env->event_q, _lf_create_dummy_events(env, NULL, env->current_tag.time, NULL, 1)); + // Events are removed delayed in order to allow linear iteration over the queue + LF_PRINT_DEBUG("Modes: Pulling %zu events from the event queue to suspend them. %d events are now suspended.", + delayed_removal_count, _lf_suspended_events_num); + for (size_t i = 0; i < delayed_removal_count; i++) { + pqueue_remove(env->event_q, delayed_removal[i]); } + + free(delayed_removal); + } + } + + if (env->modes->triggered_reactions_request) { + // Insert a dummy event in the event queue for the next microstep to make + // sure startup/reset reactions (if any) are triggered as soon as possible. + pqueue_insert(env->event_q, _lf_create_dummy_events(env, NULL, env->current_tag.time, NULL, 1)); } + } } /** @@ -541,57 +529,47 @@ void _lf_process_mode_changes( * - Frees all suspended events. */ void _lf_terminate_modal_reactors(environment_t* env) { - _lf_suspended_event_t* suspended_event = _lf_suspended_events_head; - while(suspended_event != NULL) { - lf_recycle_event(env, suspended_event->event); - _lf_suspended_event_t* next = suspended_event->next; - free(suspended_event); - suspended_event = next; - } - _lf_suspended_events_head = NULL; - _lf_suspended_events_num = 0; - - // Also free suspended_event elements stored for recycling - suspended_event = _lf_unsused_suspended_events_head; - while(suspended_event != NULL) { - _lf_suspended_event_t* next = suspended_event->next; - free(suspended_event); - suspended_event = next; - } - _lf_unsused_suspended_events_head = NULL; + _lf_suspended_event_t* suspended_event = _lf_suspended_events_head; + while (suspended_event != NULL) { + lf_recycle_event(env, suspended_event->event); + _lf_suspended_event_t* next = suspended_event->next; + free(suspended_event); + suspended_event = next; + } + _lf_suspended_events_head = NULL; + _lf_suspended_events_num = 0; + + // Also free suspended_event elements stored for recycling + suspended_event = _lf_unsused_suspended_events_head; + while (suspended_event != NULL) { + _lf_suspended_event_t* next = suspended_event->next; + free(suspended_event); + suspended_event = next; + } + _lf_unsused_suspended_events_head = NULL; } void _lf_initialize_modes(environment_t* env) { - assert(env != GLOBAL_ENVIRONMENT); - if (env->modes) { - _lf_initialize_mode_states( - env, - env->modes->modal_reactor_states, - env->modes->modal_reactor_states_size); - } + assert(env != GLOBAL_ENVIRONMENT); + if (env->modes) { + _lf_initialize_mode_states(env, env->modes->modal_reactor_states, env->modes->modal_reactor_states_size); + } } void _lf_handle_mode_changes(environment_t* env) { - assert(env != GLOBAL_ENVIRONMENT); - if (env->modes) { - _lf_process_mode_changes( - env, - env->modes->modal_reactor_states, - env->modes->modal_reactor_states_size, - env->modes->state_resets, - env->modes->state_resets_size, - env->timer_triggers, - env->timer_triggers_size - ); - } + assert(env != GLOBAL_ENVIRONMENT); + if (env->modes) { + _lf_process_mode_changes(env, env->modes->modal_reactor_states, env->modes->modal_reactor_states_size, + env->modes->state_resets, env->modes->state_resets_size, env->timer_triggers, + env->timer_triggers_size); + } } void _lf_handle_mode_triggered_reactions(environment_t* env) { - assert(env != GLOBAL_ENVIRONMENT); - if (env->modes) { - _lf_handle_mode_startup_reset_reactions( - env, env->startup_reactions, env->startup_reactions_size, - env->reset_reactions, env->reset_reactions_size, - env->modes->modal_reactor_states, env->modes->modal_reactor_states_size); - } + assert(env != GLOBAL_ENVIRONMENT); + if (env->modes) { + _lf_handle_mode_startup_reset_reactions(env, env->startup_reactions, env->startup_reactions_size, + env->reset_reactions, env->reset_reactions_size, + env->modes->modal_reactor_states, env->modes->modal_reactor_states_size); + } } #endif diff --git a/core/platform/CMakeLists.txt b/core/platform/CMakeLists.txt deleted file mode 100644 index eec94db65..000000000 --- a/core/platform/CMakeLists.txt +++ /dev/null @@ -1,33 +0,0 @@ -# Check which system we are running on to select the correct platform support -# file and assign the file's path to LF_PLATFORM_FILE - -set(LF_PLATFORM_FILES - lf_unix_clock_support.c - lf_unix_syscall_support.c - lf_linux_support.c - lf_macos_support.c - lf_windows_support.c - lf_nrf52_support.c - lf_zephyr_support.c - lf_zephyr_clock_counter.c - lf_zephyr_clock_kernel.c - lf_rp2040_support.c - lf_atomic_windows.c - lf_atomic_gcc_clang.c - lf_atomic_irq.c -) - -if(${CMAKE_SYSTEM_NAME} STREQUAL "Nrf52") - list(APPEND REACTORC_COMPILE_DEFS PLATFORM_NRF52) -elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Zephyr") - list(APPEND REACTORC_COMPILE_DEFS PLATFORM_ZEPHYR) - set(PLATFORM_ZEPHYR true) -elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Rp2040") - list(APPEND REACTORC_COMPILE_DEFS PLATFORM_RP2040) -endif() - -# Prepend all sources with platform -list(TRANSFORM LF_PLATFORM_FILES PREPEND platform/) - -# Add sources to the list for debug info -list(APPEND REACTORC_SOURCES ${LF_PLATFORM_FILES}) diff --git a/core/platform/lf_C11_threads_support.c b/core/platform/lf_C11_threads_support.c deleted file mode 100644 index 98dccb58d..000000000 --- a/core/platform/lf_C11_threads_support.c +++ /dev/null @@ -1,69 +0,0 @@ -#if !defined(LF_SINGLE_THREADED) && !defined(PLATFORM_ARDUINO) -#include "platform.h" -#include "lf_C11_threads_support.h" -#include -#include -#include // For fixed-width integral types - -int lf_thread_create(lf_thread_t* thread, void *(*lf_thread) (void *), void* arguments) { - return thrd_create((thrd_t*)thread, (thrd_start_t)lf_thread, arguments); -} - -int lf_thread_join(lf_thread_t thread, void** thread_return) { - // thrd_join wants the second argument to be an int* rather than a void** - return thrd_join((thrd_t)thread, (int*)thread_return); -} - -int lf_mutex_init(lf_mutex_t* mutex) { - // Set up a timed and recursive mutex (default behavior) - return mtx_init((mtx_t*)mutex, mtx_timed | mtx_recursive); -} - -int lf_mutex_lock(lf_mutex_t* mutex) { - return mtx_lock((mtx_t*) mutex); -} - -int lf_mutex_unlock(lf_mutex_t* mutex) { - return mtx_unlock((mtx_t*) mutex); -} - -int lf_cond_init(lf_cond_t* cond, lf_mutex_t* mutex) { - cond->mutex = mutex; - return cnd_init((cnd_t*)&cond->condition); -} - -int lf_cond_broadcast(lf_cond_t* cond) { - return cnd_broadcast((cnd_t*)&cond->condition); -} - -int lf_cond_signal(lf_cond_t* cond) { - return cnd_signal((cnd_t*)&cond->condition); -} - -int lf_cond_wait(lf_cond_t* cond) { - return cnd_wait((cnd_t*)&cond->condition, (mtx_t*)cond->mutex); -} - -int _lf_cond_timedwait(lf_cond_t* cond, instant_t wakeup_time) { - struct timespec timespec_absolute_time = { - .tv_sec = wakeup_time / BILLION, - .tv_nsec = wakeup_time % BILLION - }; - - int return_value = cnd_timedwait( - (cnd_t*)&cond->condition, - (mtx_t*)cond->mutex, - ×pec_absolute_time - ); - - switch (return_value) { - case thrd_timedout: - return_value = LF_TIMEOUT; - break; - - default: - break; - } - return return_value; -} -#endif diff --git a/core/platform/lf_POSIX_threads_support.c b/core/platform/lf_POSIX_threads_support.c deleted file mode 100644 index 7a99eb391..000000000 --- a/core/platform/lf_POSIX_threads_support.c +++ /dev/null @@ -1,83 +0,0 @@ -#if !defined(LF_SINGLE_THREADED) && !defined(PLATFORM_ARDUINO) -#include "platform.h" -#include "lf_POSIX_threads_support.h" -#include "lf_unix_clock_support.h" - -#include -#include -#include // For fixed-width integral types - -int lf_thread_create(lf_thread_t* thread, void *(*lf_thread) (void *), void* arguments) { - return pthread_create((pthread_t*)thread, NULL, lf_thread, arguments); -} - -int lf_thread_join(lf_thread_t thread, void** thread_return) { - return pthread_join((pthread_t)thread, thread_return); -} - -int lf_mutex_init(lf_mutex_t* mutex) { - // Set up a recursive mutex - pthread_mutexattr_t attr; - pthread_mutexattr_init(&attr); - // Initialize the mutex to be recursive, meaning that it is OK - // for the same thread to lock and unlock the mutex even if it already holds - // the lock. - // FIXME: This is dangerous. The docs say this: "It is advised that an - // application should not use a PTHREAD_MUTEX_RECURSIVE mutex with - // condition variables because the implicit unlock performed for a - // pthread_cond_wait() or pthread_cond_timedwait() may not actually - // release the mutex (if it had been locked multiple times). - // If this happens, no other thread can satisfy the condition - // of the predicate.” This seems like a bug in the implementation of - // pthreads. Maybe it has been fixed? - pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); - return pthread_mutex_init((pthread_mutex_t*)mutex, &attr); -} - -int lf_mutex_lock(lf_mutex_t* mutex) { - return pthread_mutex_lock((pthread_mutex_t*)mutex); -} - -int lf_mutex_unlock(lf_mutex_t* mutex) { - return pthread_mutex_unlock((pthread_mutex_t*)mutex); -} - -int lf_cond_init(lf_cond_t* cond, lf_mutex_t* mutex) { - cond->mutex = mutex; - pthread_condattr_t cond_attr; - pthread_condattr_init(&cond_attr); - // Limit the scope of the condition variable to this process (default) - pthread_condattr_setpshared(&cond_attr, PTHREAD_PROCESS_PRIVATE); - return pthread_cond_init(&cond->condition, &cond_attr); -} - -int lf_cond_broadcast(lf_cond_t* cond) { - return pthread_cond_broadcast((pthread_cond_t*)&cond->condition); -} - -int lf_cond_signal(lf_cond_t* cond) { - return pthread_cond_signal((pthread_cond_t*)&cond->condition); -} - -int lf_cond_wait(lf_cond_t* cond) { - return pthread_cond_wait((pthread_cond_t*)&cond->condition, (pthread_mutex_t*)cond->mutex); -} - -int _lf_cond_timedwait(lf_cond_t* cond, instant_t wakeup_time) { - struct timespec timespec_absolute_time = convert_ns_to_timespec(wakeup_time); - int return_value = pthread_cond_timedwait( - (pthread_cond_t*)&cond->condition, - (pthread_mutex_t*)cond->mutex, - ×pec_absolute_time - ); - switch (return_value) { - case ETIMEDOUT: - return_value = LF_TIMEOUT; - break; - - default: - break; - } - return return_value; -} -#endif diff --git a/core/platform/lf_atomic_gcc_clang.c b/core/platform/lf_atomic_gcc_clang.c deleted file mode 100644 index 2a9cbbbc3..000000000 --- a/core/platform/lf_atomic_gcc_clang.c +++ /dev/null @@ -1,40 +0,0 @@ -#if defined(PLATFORM_Linux) || defined(PLATFORM_Darwin) -#if defined(__GNUC__) || defined(__clang__) -/** - * @author Soroush Bateni - * @author Erling Rennemo Jellum - * @copyright (c) 2023 - * License: BSD 2-clause - * @brief Implements the atomics API using GCC/Clang APIs. - */ - -#include "lf_atomic.h" -#include "platform.h" - -int32_t lf_atomic_fetch_add32(int32_t *ptr, int32_t value) { - return __sync_fetch_and_add(ptr, value); -} -int64_t lf_atomic_fetch_add64(int64_t *ptr, int64_t value) { - return __sync_fetch_and_add(ptr, value); -} -int32_t lf_atomic_add_fetch32(int32_t *ptr, int32_t value) { - return __sync_add_and_fetch(ptr, value); -} -int64_t lf_atomic_add_fetch64(int64_t *ptr, int64_t value) { - return __sync_add_and_fetch(ptr, value); -} -bool lf_atomic_bool_compare_and_swap32(int32_t *ptr, int32_t oldval, int32_t newval) { - return __sync_bool_compare_and_swap(ptr, oldval, newval); -} -bool lf_atomic_bool_compare_and_swap64(int64_t *ptr, int64_t oldval, int64_t newval) { - return __sync_bool_compare_and_swap(ptr, oldval, newval); -} -int32_t lf_atomic_val_compare_and_swap32(int32_t *ptr, int32_t oldval, int32_t newval) { - return __sync_val_compare_and_swap(ptr, oldval, newval); -} -int64_t lf_atomic_val_compare_and_swap64(int64_t *ptr, int64_t oldval, int64_t newval) { - return __sync_val_compare_and_swap(ptr, oldval, newval); -} - -#endif -#endif diff --git a/core/platform/lf_atomic_irq.c b/core/platform/lf_atomic_irq.c deleted file mode 100644 index fd19d340d..000000000 --- a/core/platform/lf_atomic_irq.c +++ /dev/null @@ -1,93 +0,0 @@ -#if defined(PLATFORM_ARDUINO) || defined(PLATFORM_NRF52) || defined(PLATFORM_ZEPHYR) || defined(PLATFORM_RP2040) -/** - * @author Erling Rennemo Jellum - * @copyright (c) 2023 - * License: BSD 2-clause - * @brief Implements the atomics API by disabling interrupts. Typically used for platforms that - * do not support atomic operations. The platforms need to implement `lf_enable_interrupts_nested` - * and `lf_disable_interrupts_nested`. - */ - -#include "lf_atomic.h" -#include "platform.h" - -// Forward declare the functions for enabling/disabling interrupts. Must be -// implemented in the platform support file of the target. -int lf_disable_interrupts_nested(); -int lf_enable_interrupts_nested(); - -int32_t lf_atomic_fetch_add32(int32_t *ptr, int32_t value) { - lf_disable_interrupts_nested(); - int32_t res = *ptr; - *ptr += value; - lf_enable_interrupts_nested(); - return res; -} - -int64_t lf_atomic_fetch_add64(int64_t *ptr, int64_t value) { - lf_disable_interrupts_nested(); - int64_t res = *ptr; - *ptr += value; - lf_enable_interrupts_nested(); - return res; -} - -int32_t lf_atomic_add_fetch32(int32_t *ptr, int32_t value) { - lf_disable_interrupts_nested(); - int res = *ptr + value; - *ptr = res; - lf_enable_interrupts_nested(); - return res; -} - -int64_t lf_atomic_add_fetch64(int64_t *ptr, int64_t value) { - lf_disable_interrupts_nested(); - int64_t res = *ptr + value; - *ptr = res; - lf_enable_interrupts_nested(); - return res; -} - -bool lf_atomic_bool_compare_and_swap32(int32_t *ptr, int32_t oldval, int32_t newval) { - lf_disable_interrupts_nested(); - bool res = false; - if ((*ptr) == oldval) { - *ptr = newval; - res = true; - } - lf_enable_interrupts_nested(); - return res; -} - -bool lf_atomic_bool_compare_and_swap64(int64_t *ptr, int64_t oldval, int64_t newval) { - lf_disable_interrupts_nested(); - bool res = false; - if ((*ptr) == oldval) { - *ptr = newval; - res = true; - } - lf_enable_interrupts_nested(); - return res; -} - -int32_t lf_atomic_val_compare_and_swap32(int32_t *ptr, int32_t oldval, int32_t newval) { - lf_disable_interrupts_nested(); - int res = *ptr; - if ((*ptr) == oldval) { - *ptr = newval; - } - lf_enable_interrupts_nested(); - return res; -} - -int64_t lf_atomic_val_compare_and_swap64(int64_t *ptr, int64_t oldval, int64_t newval) { - lf_disable_interrupts_nested(); - int64_t res = *ptr; - if ((*ptr) == oldval) { - *ptr = newval; - } - lf_enable_interrupts_nested(); - return res; -} - -#endif diff --git a/core/platform/lf_atomic_windows.c b/core/platform/lf_atomic_windows.c deleted file mode 100644 index 2739330b9..000000000 --- a/core/platform/lf_atomic_windows.c +++ /dev/null @@ -1,37 +0,0 @@ -#if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__NT__) -/** - * @author Soroush Bateni - * @author Erling Rennemo Jellum - * @copyright (c) 2023 - * License: BSD 2-clause - * @brief Implements the atomic API for Windows machines. - */ - -#include "lf_atomic.h" -#include - -int32_t lf_atomic_fetch_add32(int32_t *ptr, int32_t value) { - return InterlockedExchangeAdd(ptr, value); -} -int64_t lf_atomic_fetch_add64(int64_t *ptr, int64_t value) { - return InterlockedExchangeAdd64(ptr, value); -} -int32_t lf_atomic_add_fetch32(int32_t *ptr, int32_t value) { - return InterlockedAdd(ptr, value); -} -int64_t lf_atomic_add_fetch64(int64_t *ptr, int64_t value) { - return InterlockedAdd64(ptr, value); -} -bool lf_atomic_bool_compare_and_swap32(int32_t *ptr, int32_t oldval, int32_t newval) { - return (InterlockedCompareExchange(ptr, newval, oldval) == oldval); -} -bool lf_atomic_bool_compare_and_swap64(int64_t *ptr, int64_t oldval, int64_t newval) { - return (InterlockedCompareExchange64(ptr, newval, oldval) == oldval); -} -int32_t lf_atomic_val_compare_and_swap32(int32_t *ptr, int32_t oldval, int32_t newval) { - return InterlockedCompareExchange(ptr, newval, oldval); -} -int64_t lf_atomic_val_compare_and_swap64(int64_t *ptr, int64_t oldval, int64_t newval) { - return InterlockedCompareExchange64(ptr, newval, oldval); -} -#endif diff --git a/core/platform/lf_unix_clock_support.c b/core/platform/lf_unix_clock_support.c deleted file mode 100644 index 92309e9e8..000000000 --- a/core/platform/lf_unix_clock_support.c +++ /dev/null @@ -1,45 +0,0 @@ -#if defined(PLATFORM_Linux) || defined(PLATFORM_Darwin) -#include -#include - -#include "platform.h" -#include "util.h" -#include "lf_unix_clock_support.h" - - -instant_t convert_timespec_to_ns(struct timespec tp) { - return ((instant_t) tp.tv_sec) * BILLION + tp.tv_nsec; -} - -struct timespec convert_ns_to_timespec(instant_t t) { - struct timespec tp; - tp.tv_sec = t / BILLION; - tp.tv_nsec = (t % BILLION); - return tp; -} - -void _lf_initialize_clock() { - struct timespec res; - int return_value = clock_getres(CLOCK_REALTIME, (struct timespec*) &res); - if (return_value < 0) { - lf_print_error_and_exit("Could not obtain resolution for CLOCK_REALTIME"); - } - - lf_print("---- System clock resolution: %ld nsec", res.tv_nsec); -} - -/** - * Fetch the value of CLOCK_REALTIME and store it in t. - * @return 0 for success, or -1 for failure. - */ -int _lf_clock_gettime(instant_t* t) { - if (t == NULL) return -1; - struct timespec tp; - if (clock_gettime(CLOCK_REALTIME, (struct timespec*) &tp) != 0) { - return -1; - } - *t = convert_timespec_to_ns(tp); - return 0; -} - -#endif diff --git a/core/platform/lf_windows_support.c b/core/platform/lf_windows_support.c deleted file mode 100644 index 8c829bfb7..000000000 --- a/core/platform/lf_windows_support.c +++ /dev/null @@ -1,301 +0,0 @@ -#ifdef PLATFORM_Windows -/* Windows API support for the C target of Lingua Franca. */ - -/************* -Copyright (c) 2021, The University of California at Berkeley. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL -THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF -THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -***************/ - -/** Windows API support for the C target of Lingua Franca. - * - * @author{Soroush Bateni } - * - * All functions return 0 on success. - * - * @see https://gist.github.com/Soroosh129/127d1893fa4c1da6d3e1db33381bb273 - */ - -#include // Order in which windows.h is included does matter! -#include -#include -#include -#include - -#include "lf_windows_support.h" -#include "platform.h" -#include "tag.h" -#include "util.h" - -/** - * Indicate whether or not the underlying hardware - * supports Windows' high-resolution counter. It should - * always be supported for Windows Xp and later. - */ -int _lf_use_performance_counter = 0; - -/** - * The denominator to convert the performance counter - * to nanoseconds. - */ -double _lf_frequency_to_ns = 1.0; - -void _lf_initialize_clock() { - // Check if the performance counter is available - LARGE_INTEGER performance_frequency; - _lf_use_performance_counter = QueryPerformanceFrequency(&performance_frequency); - if (_lf_use_performance_counter) { - _lf_frequency_to_ns = (double)performance_frequency.QuadPart / BILLION; - } else { - lf_print_error( - "High resolution performance counter is not supported on this machine."); - _lf_frequency_to_ns = 0.01; - } -} - -/** - * Fetch the value of the physical clock (see lf_windows_support.h) and store it in t. - * The timestamp value in 't' will be based on QueryPerformanceCounter, adjusted to - * reflect time passed in nanoseconds, on most modern Windows systems. - * - * @return 0 for success, or -1 for failure. In case of failure, errno will be - * set to EINVAL or EFAULT. - */ -int _lf_clock_gettime(instant_t* t) { - // Adapted from gclib/GResUsage.cpp - // (https://github.com/gpertea/gclib/blob/8aee376774ccb2f3bd3f8e3bf1c9df1528ac7c5b/GResUsage.cpp) - // License: https://github.com/gpertea/gclib/blob/master/LICENSE.txt - int result = -1; - if (t == NULL) { - // The t argument address references invalid memory - errno = EFAULT; - return result; - } - LARGE_INTEGER windows_time; - if (_lf_use_performance_counter) { - int result = QueryPerformanceCounter(&windows_time); - if ( result == 0) { - lf_print_error("_lf_clock_gettime(): Failed to read the value of the physical clock."); - return result; - } - } else { - FILETIME f; - GetSystemTimeAsFileTime(&f); - windows_time.QuadPart = f.dwHighDateTime; - windows_time.QuadPart <<= 32; - windows_time.QuadPart |= f.dwLowDateTime; - } - *t = (instant_t)((double)windows_time.QuadPart / _lf_frequency_to_ns); - return (0); -} - -/** - * Pause execution for a number of nanoseconds. - * - * @return 0 for success, or -1 for failure. In case of failure, errno will be - * set to - * - EINTR: The sleep was interrupted by a signal handler - * - EINVAL: All other errors - */ -int lf_sleep(interval_t sleep_duration) { - /* Declarations */ - HANDLE timer; /* Timer handle */ - LARGE_INTEGER li; /* Time defintion */ - /* Create timer */ - if(!(timer = CreateWaitableTimer(NULL, TRUE, NULL))) { - return FALSE; - } - /** - * Set timer properties. - * A negative number indicates relative time to wait. - * The requested sleep duration must be in number of 100 nanoseconds. - */ - li.QuadPart = -1 * (sleep_duration / 100); - if(!SetWaitableTimer(timer, &li, 0, NULL, NULL, FALSE)){ - CloseHandle(timer); - return FALSE; - } - /* Start & wait for timer */ - WaitForSingleObject(timer, INFINITE); - /* Clean resources */ - CloseHandle(timer); - /* Slept without problems */ - return TRUE; -} - -int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup_time) { - interval_t sleep_duration = wakeup_time - lf_time_physical(); - - if (sleep_duration <= 0) { - return 0; - } else { - return lf_sleep(sleep_duration); - } -} - -int lf_nanosleep(interval_t sleep_duration) { - return lf_sleep(sleep_duration); -} - -#if defined(LF_SINGLE_THREADED) -#include "lf_os_single_threaded_support.c" -#endif - - -#if !defined(LF_SINGLE_THREADED) -int lf_available_cores() { - SYSTEM_INFO sysinfo; - GetSystemInfo(&sysinfo); - return sysinfo.dwNumberOfProcessors; -} - -int lf_thread_create(lf_thread_t* thread, void *(*lf_thread) (void *), void* arguments) { - uintptr_t handle = _beginthreadex(NULL, 0, lf_thread, arguments, 0, NULL); - *thread = (HANDLE)handle; - if(handle == 0){ - return errno; - }else{ - return 0; - } -} - -/** - * Make calling thread wait for termination of the thread. The - * exit status of the thread is stored in thread_return, if thread_return - * is not NULL. - * - * @return 0 on success, EINVAL otherwise. - */ -int lf_thread_join(lf_thread_t thread, void** thread_return) { - DWORD retvalue = WaitForSingleObject(thread, INFINITE); - if(retvalue == WAIT_FAILED){ - return EINVAL; - } - return 0; -} - -int lf_mutex_init(_lf_critical_section_t* critical_section) { - // Set up a recursive mutex - InitializeCriticalSection((PCRITICAL_SECTION)critical_section); - if(critical_section != NULL){ - return 0; - }else{ - return 1; - } -} - -/** - * Lock a critical section. - * - * From https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-entercriticalsection: - * "This function can raise EXCEPTION_POSSIBLE_DEADLOCK if a wait operation on the critical section times out. - * The timeout interval is specified by the following registry value: - * HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\CriticalSectionTimeout. - * Do not handle a possible deadlock exception; instead, debug the application." - * - * @return 0 - */ -int lf_mutex_lock(_lf_critical_section_t* critical_section) { - // The following Windows API does not return a value. It can - // raise a EXCEPTION_POSSIBLE_DEADLOCK. See synchapi.h. - EnterCriticalSection((PCRITICAL_SECTION)critical_section); - return 0; -} - -int lf_mutex_unlock(_lf_critical_section_t* critical_section) { - // The following Windows API does not return a value. - LeaveCriticalSection((PCRITICAL_SECTION)critical_section); - return 0; -} - -int lf_cond_init(lf_cond_t* cond, _lf_critical_section_t* critical_section) { - // The following Windows API does not return a value. - cond->critical_section = critical_section; - InitializeConditionVariable((PCONDITION_VARIABLE)&cond->condition); - return 0; -} - -int lf_cond_broadcast(lf_cond_t* cond) { - // The following Windows API does not return a value. - WakeAllConditionVariable((PCONDITION_VARIABLE)&cond->condition); - return 0; -} - -int lf_cond_signal(lf_cond_t* cond) { - // The following Windows API does not return a value. - WakeConditionVariable((PCONDITION_VARIABLE)&cond->condition); - return 0; -} - -int lf_cond_wait(lf_cond_t* cond) { - // According to synchapi.h, the following Windows API returns 0 on failure, - // and non-zero on success. - int return_value = - (int)SleepConditionVariableCS( - (PCONDITION_VARIABLE)&cond->condition, - (PCRITICAL_SECTION)cond->critical_section, - INFINITE - ); - switch (return_value) { - case 0: - // Error - return 1; - break; - - default: - // Success - return 0; - break; - } -} - -int _lf_cond_timedwait(lf_cond_t* cond, instant_t wakeup_time) { - // Convert the absolute time to a relative time. - interval_t wait_duration = wakeup_time - lf_time_physical(); - if (wait_duration<= 0) { - // physical time has already caught up sufficiently and we do not need to wait anymore - return 0; - } - - // convert ns to ms and round up to closest full integer - DWORD wait_duration_ms = (wait_duration + 999999LL) / 1000000LL; - - int return_value = - (int)SleepConditionVariableCS( - (PCONDITION_VARIABLE)&cond->condition, - (PCRITICAL_SECTION)cond->critical_section, - wait_duration_ms - ); - if (return_value == 0) { - // Error - if (GetLastError() == ERROR_TIMEOUT) { - return LF_TIMEOUT; - } - return -1; - } - - // Success - return 0; -} -#endif - - -#endif diff --git a/core/platform/lf_zephyr_clock_counter.c b/core/platform/lf_zephyr_clock_counter.c deleted file mode 100644 index 6e99feb06..000000000 --- a/core/platform/lf_zephyr_clock_counter.c +++ /dev/null @@ -1,220 +0,0 @@ -#if defined(PLATFORM_ZEPHYR) -#include "lf_zephyr_board_support.h" -#if defined(LF_ZEPHYR_CLOCK_COUNTER) -/************* -Copyright (c) 2023, Norwegian University of Science and Technology. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL -THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF -THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -***************/ - -/** - * @brief This implements the timing-related platform API ontop of the Zephyr - * Counter API. The Counter API is a generic interface to a timer peripheral. It - * gives the best timing performance and allows actual sleeping rather than - * busy-waiting which is performed with the Kernel API. - * - * @author{Erling Jellum } - * @author{Marten Lohstroh } - */ -#include -#include - -#include "lf_zephyr_support.h" -#include "platform.h" -#include "util.h" - -static int64_t epoch_duration_nsec; -static int64_t epoch_duration_usec; -static uint32_t counter_max_ticks; -static volatile int64_t last_epoch_nsec = 0; -static uint32_t counter_freq; -static volatile bool async_event = false; - -K_SEM_DEFINE(semaphore,0,1) - -static struct counter_alarm_cfg alarm_cfg; -const struct device *const counter_dev = DEVICE_DT_GET(LF_TIMER); -static volatile bool alarm_fired; - -/** - * This callback is invoked when the underlying Timer peripheral overflows. - * Handled by incrementing the epoch variable. - */ -static void overflow_callback(const struct device *dev, void *user_data) { - last_epoch_nsec += epoch_duration_nsec; -} - -/** - * This callback is invoked when the alarm configured for sleeping expires. - * The sleeping thread is released by giving it the semaphore. - */ -static void alarm_callback(const struct device *counter_dev, - uint8_t chan_id, uint32_t ticks, - void *user_data) { - alarm_fired=true; - k_sem_give(&semaphore); -} - -/** - * Initialize the Counter device. Check its frequency and compute epoch - * durations. - */ -void _lf_initialize_clock() { - struct counter_top_cfg counter_top_cfg; - uint32_t counter_max_ticks=0; - int res; - - // Verify that we have the device - if (!device_is_ready(counter_dev)) { - lf_print_error_and_exit("ERROR: counter device not ready.\n"); - } - - // Verify that it is working as we think - if(!counter_is_counting_up(counter_dev)) { - lf_print_error_and_exit("ERROR: Counter is counting down \n"); - } - - // Get the frequency of the timer - counter_freq = counter_get_frequency(counter_dev); - - // Calculate the duration of an epoch. Compute both - // nsec and usec now at boot to avoid these computations later - counter_max_ticks = counter_get_max_top_value(counter_dev); - epoch_duration_usec = counter_ticks_to_us(counter_dev, counter_max_ticks); - epoch_duration_nsec = epoch_duration_usec * 1000LL; - - // Set the max_top value to be the maximum - counter_top_cfg.ticks = counter_max_ticks; - counter_top_cfg.callback = overflow_callback; - res = counter_set_top_value(counter_dev, &counter_top_cfg); - if (res != 0) { - lf_print_error_and_exit("ERROR: Timer couldnt set top value\n"); - } - - LF_PRINT_LOG("--- Using LF Zephyr Counter Clock with a frequency of %u Hz and wraps every %u sec\n", - counter_freq, counter_max_ticks/counter_freq); - - // Prepare the alarm config - alarm_cfg.flags = 0; - alarm_cfg.ticks = 0; - alarm_cfg.callback = alarm_callback; - alarm_cfg.user_data = &alarm_cfg; - - // Start counter - counter_start(counter_dev); -} - -/** - * The Counter device tracks current physical time. Overflows are handled in an - * ISR. - */ -int _lf_clock_gettime(instant_t* t) { - static uint64_t last_nsec = 0; - uint32_t now_cycles; - int res; - uint64_t now_nsec; - - res = counter_get_value(counter_dev, &now_cycles); - now_nsec = counter_ticks_to_us(counter_dev, now_cycles)*1000ULL + last_epoch_nsec; - - // Make sure that the clock is monotonic. We might have had a wrap but the - // epoch has not been updated because interrupts are disabled. - if (now_nsec < last_nsec) { - now_nsec = last_nsec + 1; - } - - *t = now_nsec; - last_nsec = now_nsec; - return 0; -} - -/** - * Handle interruptable sleep by configuring a future alarm callback and waiting - * on a semaphore. Make sure we can handle sleeps that exceed an entire epoch - * of the Counter. - */ -int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup) { - // Reset flags - alarm_fired = false; - async_event = false; - k_sem_reset(&semaphore); - - // Calculate the sleep duration - uint32_t now_cycles, sleep_duration_ticks; - counter_get_value(counter_dev, &now_cycles); - instant_t now; - _lf_clock_gettime(&now); - interval_t sleep_for_us = (wakeup - now)/1000; - - while ( !async_event && - sleep_for_us > (LF_WAKEUP_OVERHEAD_US + LF_MIN_SLEEP_US) - ) { - if (sleep_for_us < epoch_duration_usec) { - sleep_duration_ticks = counter_us_to_ticks(counter_dev, ((uint64_t) sleep_for_us) - LF_WAKEUP_OVERHEAD_US); - } else { - sleep_duration_ticks = UINT32_MAX; - } - - alarm_cfg.ticks = sleep_duration_ticks; - int err = counter_set_channel_alarm(counter_dev, LF_TIMER_ALARM_CHANNEL, &alarm_cfg); - - if (err != 0) { - lf_print_error_and_exit("Could not setup alarm for sleeping. Errno %i", err); - } - - LF_CRITICAL_SECTION_EXIT(env); - k_sem_take(&semaphore, K_FOREVER); - LF_CRITICAL_SECTION_ENTER(env); - - // Then calculating remaining sleep, unless we got woken up by an event - if (!async_event) { - _lf_clock_gettime(&now); - sleep_for_us = (wakeup - now)/1000; - } - } - - // Do remaining sleep in busy_wait - if (!async_event && - sleep_for_us > LF_RUNTIME_OVERHEAD_US) { - k_busy_wait((uint32_t) (sleep_for_us - LF_RUNTIME_OVERHEAD_US)); - } - - if (async_event) { - // Cancel the outstanding alarm - counter_cancel_channel_alarm(counter_dev, LF_TIMER_ALARM_CHANNEL); - async_event = false; - return -1; - } else { - return 0; - } -} - -/** - * We notify of async events by setting the flag and giving the semaphore. - */ -int _lf_single_threaded_notify_of_event() { - async_event = true; - k_sem_give(&semaphore); - return 0; -} - -#endif -#endif diff --git a/core/port.c b/core/port.c index 3d0dd0348..08235ef05 100644 --- a/core/port.c +++ b/core/port.c @@ -42,13 +42,13 @@ * @param b Pointer to the second integer. */ int compare_sizes(const void* a, const void* b) { - if (*(size_t*)a < *(size_t*)b) { - return -1; - } else if (*(size_t*)a > *(size_t*)b) { - return 1; - } else { - return 0; - } + if (*(size_t*)a < *(size_t*)b) { + return -1; + } else if (*(size_t*)a > *(size_t*)b) { + return 1; + } else { + return 0; + } } /** @@ -59,42 +59,38 @@ int compare_sizes(const void* a, const void* b) { * a multiport). */ lf_multiport_iterator_t _lf_multiport_iterator_impl(lf_port_base_t** port, int width) { - // NOTE: Synchronization is not required because all writers must have - // completed by the time this is invoked. - struct lf_multiport_iterator_t result = (lf_multiport_iterator_t) { - .next = -1, - .idx = -1, // Indicate that lf_multiport_next() has not been called. - .port = port, - .width = width - }; - if (width <= 0) return result; - if (port[0]->sparse_record && port[0]->sparse_record->size >= 0) { - // Sparse record is enabled and ready to use. - if (port[0]->sparse_record->size > 0) { - // Need to sort it first (if the length is greater than 1). - if (port[0]->sparse_record->size > 1) { - qsort( - &port[0]->sparse_record->present_channels[0], - (size_t)port[0]->sparse_record->size, - sizeof(size_t), - &compare_sizes - ); - } - // NOTE: Following cast is unsafe if there more than 2^31 channels. - result.next = (int)port[0]->sparse_record->present_channels[0]; - } - return result; - } - // Fallback is to iterate over all port structs representing channels. - int start = 0; - while(start < width) { - if (port[start]->is_present) { - result.next = start; - return result; - } - start++; - } - return result; + // NOTE: Synchronization is not required because all writers must have + // completed by the time this is invoked. + struct lf_multiport_iterator_t result = + (lf_multiport_iterator_t){.next = -1, + .idx = -1, // Indicate that lf_multiport_next() has not been called. + .port = port, + .width = width}; + if (width <= 0) + return result; + if (port[0]->sparse_record && port[0]->sparse_record->size >= 0) { + // Sparse record is enabled and ready to use. + if (port[0]->sparse_record->size > 0) { + // Need to sort it first (if the length is greater than 1). + if (port[0]->sparse_record->size > 1) { + qsort(&port[0]->sparse_record->present_channels[0], (size_t)port[0]->sparse_record->size, sizeof(size_t), + &compare_sizes); + } + // NOTE: Following cast is unsafe if there more than 2^31 channels. + result.next = (int)port[0]->sparse_record->present_channels[0]; + } + return result; + } + // Fallback is to iterate over all port structs representing channels. + int start = 0; + while (start < width) { + if (port[start]->is_present) { + result.next = start; + return result; + } + start++; + } + return result; } /** @@ -103,40 +99,39 @@ lf_multiport_iterator_t _lf_multiport_iterator_impl(lf_port_base_t** port, int w * @param iterator The iterator. */ int lf_multiport_next(lf_multiport_iterator_t* iterator) { - // If the iterator has not been used, return next. - if (iterator->idx < 0) { - iterator->idx = 0; - return iterator->next; - } - // If the iterator is already exhausted, return. - if (iterator->next < 0 || iterator->width <= 0) { - return -1; - } - struct lf_sparse_io_record_t* sparse_record - = iterator->port[iterator->idx]->sparse_record; - if (sparse_record && sparse_record->size >= 0) { - // Sparse record is enabled and ready to use. - iterator->idx++; - if (iterator->idx >= sparse_record->size) { - // No more present channels. - iterator->next = -1; - } else { - // NOTE: Following cast is unsafe if there more than 2^31 channels. - iterator->next = (int)sparse_record->present_channels[iterator->idx]; - } - return iterator->next; - } else { - // Fall back to iterate over all port structs representing channels. - int start = iterator->next + 1; - while(start < iterator->width) { - if (iterator->port[start]->is_present) { - iterator->next = start; - return iterator->next; - } - start++; - } - // No more present channels found. - iterator->next = -1; - return iterator->next; - } + // If the iterator has not been used, return next. + if (iterator->idx < 0) { + iterator->idx = 0; + return iterator->next; + } + // If the iterator is already exhausted, return. + if (iterator->next < 0 || iterator->width <= 0) { + return -1; + } + struct lf_sparse_io_record_t* sparse_record = iterator->port[iterator->idx]->sparse_record; + if (sparse_record && sparse_record->size >= 0) { + // Sparse record is enabled and ready to use. + iterator->idx++; + if (iterator->idx >= sparse_record->size) { + // No more present channels. + iterator->next = -1; + } else { + // NOTE: Following cast is unsafe if there more than 2^31 channels. + iterator->next = (int)sparse_record->present_channels[iterator->idx]; + } + return iterator->next; + } else { + // Fall back to iterate over all port structs representing channels. + int start = iterator->next + 1; + while (start < iterator->width) { + if (iterator->port[start]->is_present) { + iterator->next = start; + return iterator->next; + } + start++; + } + // No more present channels found. + iterator->next = -1; + return iterator->next; + } } diff --git a/core/reactor.c b/core/reactor.c index 9e2092dcd..8bf3b9460 100644 --- a/core/reactor.c +++ b/core/reactor.c @@ -1,6 +1,6 @@ /** * @brief Runtime implementation for the single-threaded version of the C target of Lingua Franca. - * + * * @author{Edward A. Lee } * @author{Marten Lohstroh } * @author{Soroush Bateni } @@ -14,7 +14,7 @@ #include "reactor.h" #include "lf_types.h" -#include "platform.h" +#include "low_level_platform.h" #include "reactor_common.h" #include "environment.h" @@ -26,164 +26,166 @@ // Global variable defined in tag.c: extern instant_t start_time; +int lf_thread_id() { return 0; } +int lf_mutex_unlock(lf_mutex_t* mutex) { return 0; } +int lf_mutex_init(lf_mutex_t* mutex) { return 0; } +int lf_mutex_lock(lf_mutex_t* mutex) { return 0; } + // Defined in reactor_common.c: extern bool fast; extern bool keepalive_specified; void lf_set_present(lf_port_base_t* port) { - if (!port->source_reactor) return; - environment_t *env = port->source_reactor->environment; - bool* is_present_field = &port->is_present; - if (env->is_present_fields_abbreviated_size < env->is_present_fields_size) { - env->is_present_fields_abbreviated[env->is_present_fields_abbreviated_size] - = is_present_field; - } - env->is_present_fields_abbreviated_size++; - *is_present_field = true; - - // Support for sparse destination multiports. - if(port->sparse_record - && port->destination_channel >= 0 - && port->sparse_record->size >= 0) { - size_t next = port->sparse_record->size++; - if (next >= port->sparse_record->capacity) { - // Buffer is full. Have to revert to the classic iteration. - port->sparse_record->size = -1; - } else { - port->sparse_record->present_channels[next] - = port->destination_channel; - } + if (!port->source_reactor) + return; + environment_t* env = port->source_reactor->environment; + bool* is_present_field = &port->is_present; + if (env->is_present_fields_abbreviated_size < env->is_present_fields_size) { + env->is_present_fields_abbreviated[env->is_present_fields_abbreviated_size] = is_present_field; + } + env->is_present_fields_abbreviated_size++; + *is_present_field = true; + + // Support for sparse destination multiports. + if (port->sparse_record && port->destination_channel >= 0 && port->sparse_record->size >= 0) { + size_t next = port->sparse_record->size++; + if (next >= port->sparse_record->capacity) { + // Buffer is full. Have to revert to the classic iteration. + port->sparse_record->size = -1; + } else { + port->sparse_record->present_channels[next] = port->destination_channel; } + } } /** - * Wait until physical time matches the given logical time or the time of a - * concurrently scheduled physical action, which might be earlier than the + * Wait until physical time matches the given logical time or the time of a + * concurrently scheduled physical action, which might be earlier than the * requested logical time. * @param env Environment in which we are executing * @return 0 if the wait was completed, -1 if it was skipped or interrupted. - */ + */ int wait_until(environment_t* env, instant_t wakeup_time) { - if (!fast) { - LF_PRINT_LOG("Waiting for elapsed logical time " PRINTF_TIME ".", wakeup_time - start_time); - return lf_clock_interruptable_sleep_until_locked(env, wakeup_time); - } - return 0; + if (!fast) { + LF_PRINT_LOG("Waiting for elapsed logical time " PRINTF_TIME ".", wakeup_time - start_time); + return lf_clock_interruptable_sleep_until_locked(env, wakeup_time); + } + return 0; } #ifndef NDEBUG void lf_print_snapshot(environment_t* env) { - if(LOG_LEVEL > LOG_LEVEL_LOG) { - LF_PRINT_DEBUG(">>> START Snapshot"); - pqueue_dump(env->reaction_q, env->reaction_q->prt); - LF_PRINT_DEBUG(">>> END Snapshot"); - } + if (LOG_LEVEL > LOG_LEVEL_LOG) { + LF_PRINT_DEBUG(">>> START Snapshot"); + pqueue_dump(env->reaction_q, env->reaction_q->prt); + LF_PRINT_DEBUG(">>> END Snapshot"); + } } -#else // NDEBUG +#else // NDEBUG void lf_print_snapshot(environment_t* env) { - // Do nothing. + // Do nothing. } #endif // NDEBUG void _lf_trigger_reaction(environment_t* env, reaction_t* reaction, int worker_number) { - assert(env != GLOBAL_ENVIRONMENT); + assert(env != GLOBAL_ENVIRONMENT); #ifdef MODAL_REACTORS - // Check if reaction is disabled by mode inactivity - if (!_lf_mode_is_active(reaction->mode)) { - LF_PRINT_DEBUG("Suppressing downstream reaction %s due inactivity of mode %s.", reaction->name, reaction->mode->name); - return; // Suppress reaction by preventing entering reaction queue - } + // Check if reaction is disabled by mode inactivity + if (!_lf_mode_is_active(reaction->mode)) { + LF_PRINT_DEBUG("Suppressing downstream reaction %s due inactivity of mode %s.", reaction->name, + reaction->mode->name); + return; // Suppress reaction by preventing entering reaction queue + } #endif - // Do not enqueue this reaction twice. - if (reaction->status == inactive) { - LF_PRINT_DEBUG("Enqueueing downstream reaction %s, which has level %lld.", - reaction->name, reaction->index & 0xffffLL); - reaction->status = queued; - if (pqueue_insert(env->reaction_q, reaction) != 0) { - lf_print_error_and_exit("Could not insert reaction into reaction_q"); - } + // Do not enqueue this reaction twice. + if (reaction->status == inactive) { + LF_PRINT_DEBUG("Enqueueing downstream reaction %s, which has level %lld.", reaction->name, + reaction->index & 0xffffLL); + reaction->status = queued; + if (pqueue_insert(env->reaction_q, reaction) != 0) { + lf_print_error_and_exit("Could not insert reaction into reaction_q"); } + } } /** * Execute all the reactions in the reaction queue at the current tag. - * + * * @param env Environment in which we are executing * @return Returns 1 if the execution should continue and 0 if the execution * should stop. */ int _lf_do_step(environment_t* env) { - assert(env != GLOBAL_ENVIRONMENT); - - // Invoke reactions. - while(pqueue_size(env->reaction_q) > 0) { - // lf_print_snapshot(); - reaction_t* reaction = (reaction_t*)pqueue_pop(env->reaction_q); - reaction->status = running; - - LF_PRINT_LOG("Invoking reaction %s at elapsed logical tag " PRINTF_TAG ".", - reaction->name, - env->current_tag.time - start_time, env->current_tag.microstep); - - bool violation = false; - - // FIXME: These comments look outdated. We may need to update them. - // If the reaction has a deadline, compare to current physical time - // and invoke the deadline violation reaction instead of the reaction function - // if a violation has occurred. Note that the violation reaction will be invoked - // at most once per logical time value. If the violation reaction triggers the - // same reaction at the current time value, even if at a future superdense time, - // then the reaction will be invoked and the violation reaction will not be invoked again. - if (reaction->deadline >= 0LL) { - // Get the current physical time. - instant_t physical_time = lf_time_physical(); - // FIXME: These comments look outdated. We may need to update them. - // Check for deadline violation. - // There are currently two distinct deadline mechanisms: - // local deadlines are defined with the reaction; - // container deadlines are defined in the container. - // They can have different deadlines, so we have to check both. - // Handle the local deadline first. - if (reaction->deadline == 0 || physical_time > env->current_tag.time + reaction->deadline) { - LF_PRINT_LOG("Deadline violation. Invoking deadline handler."); - tracepoint_reaction_deadline_missed(env->trace, reaction, 0); - // Deadline violation has occurred. - violation = true; - // Invoke the local handler, if there is one. - reaction_function_t handler = reaction->deadline_violation_handler; - if (handler != NULL) { - (*handler)(reaction->self); - // If the reaction produced outputs, put the resulting - // triggered reactions into the queue. - schedule_output_reactions(env, reaction, 0); - } - } + assert(env != GLOBAL_ENVIRONMENT); + + // Invoke reactions. + while (pqueue_size(env->reaction_q) > 0) { + // lf_print_snapshot(); + reaction_t* reaction = (reaction_t*)pqueue_pop(env->reaction_q); + reaction->status = running; + + LF_PRINT_LOG("Invoking reaction %s at elapsed logical tag " PRINTF_TAG ".", reaction->name, + env->current_tag.time - start_time, env->current_tag.microstep); + + bool violation = false; + + // FIXME: These comments look outdated. We may need to update them. + // If the reaction has a deadline, compare to current physical time + // and invoke the deadline violation reaction instead of the reaction function + // if a violation has occurred. Note that the violation reaction will be invoked + // at most once per logical time value. If the violation reaction triggers the + // same reaction at the current time value, even if at a future superdense time, + // then the reaction will be invoked and the violation reaction will not be invoked again. + if (reaction->deadline >= 0LL) { + // Get the current physical time. + instant_t physical_time = lf_time_physical(); + // FIXME: These comments look outdated. We may need to update them. + // Check for deadline violation. + // There are currently two distinct deadline mechanisms: + // local deadlines are defined with the reaction; + // container deadlines are defined in the container. + // They can have different deadlines, so we have to check both. + // Handle the local deadline first. + if (reaction->deadline == 0 || physical_time > env->current_tag.time + reaction->deadline) { + LF_PRINT_LOG("Deadline violation. Invoking deadline handler."); + tracepoint_reaction_deadline_missed(env, reaction, 0); + // Deadline violation has occurred. + violation = true; + // Invoke the local handler, if there is one. + reaction_function_t handler = reaction->deadline_violation_handler; + if (handler != NULL) { + (*handler)(reaction->self); + // If the reaction produced outputs, put the resulting + // triggered reactions into the queue. + schedule_output_reactions(env, reaction, 0); } + } + } - if (!violation) { - // Invoke the reaction function. - _lf_invoke_reaction(env, reaction, 0); // 0 indicates single-threaded. + if (!violation) { + // Invoke the reaction function. + _lf_invoke_reaction(env, reaction, 0); // 0 indicates single-threaded. - // If the reaction produced outputs, put the resulting triggered - // reactions into the queue. - schedule_output_reactions(env, reaction, 0); - } - // There cannot be any subsequent events that trigger this reaction at the - // current tag, so it is safe to conclude that it is now inactive. - reaction->status = inactive; + // If the reaction produced outputs, put the resulting triggered + // reactions into the queue. + schedule_output_reactions(env, reaction, 0); } + // There cannot be any subsequent events that trigger this reaction at the + // current tag, so it is safe to conclude that it is now inactive. + reaction->status = inactive; + } #ifdef MODAL_REACTORS - // At the end of the step, perform mode transitions - _lf_handle_mode_changes(env); + // At the end of the step, perform mode transitions + _lf_handle_mode_changes(env); #endif - if (lf_tag_compare(env->current_tag, env->stop_tag) >= 0) { - return 0; - } + if (lf_tag_compare(env->current_tag, env->stop_tag) >= 0) { + return 0; + } - return 1; + return 1; } // Wait until physical time matches or exceeds the time of the least tag @@ -202,93 +204,89 @@ int _lf_do_step(environment_t* env) { // the keepalive command-line option has not been given. // Otherwise, return 1. int next(environment_t* env) { - assert(env != GLOBAL_ENVIRONMENT); - - // Enter the critical section and do not leave until we have - // determined which tag to commit to and start invoking reactions for. - LF_CRITICAL_SECTION_ENTER(env); - event_t* event = (event_t*)pqueue_peek(env->event_q); - //pqueue_dump(event_q, event_q->prt); - // If there is no next event and -keepalive has been specified - // on the command line, then we will wait the maximum time possible. - tag_t next_tag = FOREVER_TAG_INITIALIZER; - if (event == NULL) { - // No event in the queue. - if (!keepalive_specified) { - lf_set_stop_tag( env, - (tag_t){.time=env->current_tag.time, .microstep=env->current_tag.microstep+1} - ); - } - } else { - next_tag.time = event->time; - // Deduce the microstep - if (next_tag.time == env->current_tag.time) { - next_tag.microstep = env->current_tag.microstep + 1; - } else { - next_tag.microstep = 0; - } - } - - if (lf_is_tag_after_stop_tag(env, next_tag)) { - // Cannot process events after the stop tag. - next_tag = env->stop_tag; - } - - LF_PRINT_LOG("Next event (elapsed) time is " PRINTF_TIME ".", next_tag.time - start_time); - // Wait until physical time >= event.time. - int finished_sleep = wait_until(env, next_tag.time); - LF_PRINT_LOG("Next event (elapsed) time is " PRINTF_TIME ".", next_tag.time - start_time); - if (finished_sleep != 0) { - LF_PRINT_DEBUG("***** wait_until was interrupted."); - // Sleep was interrupted. This could happen when a physical action - // gets scheduled from an interrupt service routine. - // In this case, check the event queue again to make sure to - // advance time to the correct tag. - LF_CRITICAL_SECTION_EXIT(env); - return 1; + assert(env != GLOBAL_ENVIRONMENT); + + // Enter the critical section and do not leave until we have + // determined which tag to commit to and start invoking reactions for. + LF_CRITICAL_SECTION_ENTER(env); + event_t* event = (event_t*)pqueue_peek(env->event_q); + // pqueue_dump(event_q, event_q->prt); + // If there is no next event and -keepalive has been specified + // on the command line, then we will wait the maximum time possible. + tag_t next_tag = FOREVER_TAG_INITIALIZER; + if (event == NULL) { + // No event in the queue. + if (!keepalive_specified) { + lf_set_stop_tag(env, (tag_t){.time = env->current_tag.time, .microstep = env->current_tag.microstep + 1}); } - // Advance current time to match that of the first event on the queue. - // We can now leave the critical section. Any events that will be added - // to the queue asynchronously will have a later tag than the current one. - _lf_advance_logical_time(env, next_tag.time); - - // Trigger shutdown reactions if appropriate. - if (lf_tag_compare(env->current_tag, env->stop_tag) >= 0) { - _lf_trigger_shutdown_reactions(env); + } else { + next_tag.time = event->time; + // Deduce the microstep + if (next_tag.time == env->current_tag.time) { + next_tag.microstep = env->current_tag.microstep + 1; + } else { + next_tag.microstep = 0; } - - // Invoke code that must execute before starting a new logical time round, - // such as initializing outputs to be absent. - _lf_start_time_step(env); - - // Pop all events from event_q with timestamp equal to env->current_tag.time, - // extract all the reactions triggered by these events, and - // stick them into the reaction queue. - _lf_pop_events(env); + } + + if (lf_is_tag_after_stop_tag(env, next_tag)) { + // Cannot process events after the stop tag. + next_tag = env->stop_tag; + } + + LF_PRINT_LOG("Next event (elapsed) time is " PRINTF_TIME ".", next_tag.time - start_time); + // Wait until physical time >= event.time. + int finished_sleep = wait_until(env, next_tag.time); + LF_PRINT_LOG("Next event (elapsed) time is " PRINTF_TIME ".", next_tag.time - start_time); + if (finished_sleep != 0) { + LF_PRINT_DEBUG("***** wait_until was interrupted."); + // Sleep was interrupted. This could happen when a physical action + // gets scheduled from an interrupt service routine. + // In this case, check the event queue again to make sure to + // advance time to the correct tag. LF_CRITICAL_SECTION_EXIT(env); - - return _lf_do_step(env); + return 1; + } + // Advance current time to match that of the first event on the queue. + // We can now leave the critical section. Any events that will be added + // to the queue asynchronously will have a later tag than the current one. + _lf_advance_logical_time(env, next_tag.time); + + // Trigger shutdown reactions if appropriate. + if (lf_tag_compare(env->current_tag, env->stop_tag) >= 0) { + _lf_trigger_shutdown_reactions(env); + } + + // Invoke code that must execute before starting a new logical time round, + // such as initializing outputs to be absent. + _lf_start_time_step(env); + + // Pop all events from event_q with timestamp equal to env->current_tag.time, + // extract all the reactions triggered by these events, and + // stick them into the reaction queue. + _lf_pop_events(env); + LF_CRITICAL_SECTION_EXIT(env); + + return _lf_do_step(env); } void lf_request_stop(void) { - // There is only one enclave, so get its environment. - environment_t *env; - int num_environments = _lf_get_environments(&env); - assert(num_environments == 1); - - tag_t new_stop_tag; - new_stop_tag.time = env->current_tag.time; - new_stop_tag.microstep = env->current_tag.microstep + 1; - lf_set_stop_tag(env, new_stop_tag); + // There is only one enclave, so get its environment. + environment_t* env; + int num_environments = _lf_get_environments(&env); + assert(num_environments == 1); + + tag_t new_stop_tag; + new_stop_tag.time = env->current_tag.time; + new_stop_tag.microstep = env->current_tag.microstep + 1; + lf_set_stop_tag(env, new_stop_tag); } /** * Return false. * @param reaction The reaction. */ -bool _lf_is_blocked_by_executing_reaction(void) { - return false; -} +bool _lf_is_blocked_by_executing_reaction(void) { return false; } /** * The main loop of the LF program. @@ -303,82 +301,76 @@ bool _lf_is_blocked_by_executing_reaction(void) { * at compile time. */ int lf_reactor_c_main(int argc, const char* argv[]) { - // Invoke the function that optionally provides default command-line options. - lf_set_default_command_line_options(); - _lf_initialize_clock(); - - LF_PRINT_DEBUG("Processing command line arguments."); - if (process_args(default_argc, default_argv) - && process_args(argc, argv)) { - LF_PRINT_DEBUG("Processed command line arguments."); - LF_PRINT_DEBUG("Registering the termination function."); - if (atexit(termination) != 0) { - lf_print_warning("Failed to register termination function!"); - } - // The above handles only "normal" termination (via a call to exit). - // As a consequence, we need to also trap Ctrl-C, which issues a SIGINT, - // and cause it to call exit. - // Embedded platforms with NO_TTY have no concept of a signal; for those, we exclude this call. + // Invoke the function that optionally provides default command-line options. + lf_set_default_command_line_options(); + _lf_initialize_clock(); + + LF_PRINT_DEBUG("Processing command line arguments."); + if (process_args(default_argc, default_argv) && process_args(argc, argv)) { + LF_PRINT_DEBUG("Processed command line arguments."); + LF_PRINT_DEBUG("Registering the termination function."); + if (atexit(termination) != 0) { + lf_print_warning("Failed to register termination function!"); + } + // The above handles only "normal" termination (via a call to exit). + // As a consequence, we need to also trap Ctrl-C, which issues a SIGINT, + // and cause it to call exit. + // Embedded platforms with NO_TTY have no concept of a signal; for those, we exclude this call. #ifndef NO_TTY - signal(SIGINT, exit); + signal(SIGINT, exit); +#endif + // Create and initialize the environment + lf_create_environments(); // code-generated function + environment_t* env; + int num_environments = _lf_get_environments(&env); + LF_ASSERT(num_environments == 1, "Found %d environments. Only 1 can be used with the single-threaded runtime", + num_environments); + + LF_PRINT_DEBUG("Initializing."); + initialize_global(); + // Set start time + start_time = lf_time_physical(); +#ifndef FEDERATED + lf_tracing_set_start_time(start_time); #endif - // Create and initialize the environment - lf_create_environments(); // code-generated function - environment_t *env; - int num_environments = _lf_get_environments(&env); - LF_ASSERT(num_environments == 1, - "Found %d environments. Only 1 can be used with the single-threaded runtime", num_environments); - - LF_PRINT_DEBUG("Initializing."); - initialize_global(); - // Set start time - start_time = lf_time_physical(); - - LF_PRINT_DEBUG("NOTE: FOREVER is displayed as " PRINTF_TAG " and NEVER as " PRINTF_TAG, - FOREVER_TAG.time - start_time, FOREVER_TAG.microstep, - NEVER_TAG.time - start_time, 0); - - environment_init_tags(env, start_time, duration); - // Start tracing if enabled. - start_trace(env->trace); + + LF_PRINT_DEBUG("NOTE: FOREVER is displayed as " PRINTF_TAG " and NEVER as " PRINTF_TAG, + FOREVER_TAG.time - start_time, FOREVER_TAG.microstep, NEVER_TAG.time - start_time, 0); + + environment_init_tags(env, start_time, duration); #ifdef MODAL_REACTORS - // Set up modal infrastructure - _lf_initialize_modes(env); + // Set up modal infrastructure + _lf_initialize_modes(env); #endif - _lf_trigger_startup_reactions(env); - _lf_initialize_timers(env); - // If the stop_tag is (0,0), also insert the shutdown - // reactions. This can only happen if the timeout time - // was set to 0. - if (lf_tag_compare(env->current_tag, env->stop_tag) >= 0) { - _lf_trigger_shutdown_reactions(env); - } - LF_PRINT_DEBUG("Running the program's main loop."); - // Handle reactions triggered at time (T,m). - env->execution_started = true; - if (_lf_do_step(env)) { - while (next(env) != 0); - } - _lf_normal_termination = true; - return 0; - } else { - return -1; + _lf_trigger_startup_reactions(env); + _lf_initialize_timers(env); + // If the stop_tag is (0,0), also insert the shutdown + // reactions. This can only happen if the timeout time + // was set to 0. + if (lf_tag_compare(env->current_tag, env->stop_tag) >= 0) { + _lf_trigger_shutdown_reactions(env); } + LF_PRINT_DEBUG("Running the program's main loop."); + // Handle reactions triggered at time (T,m). + env->execution_started = true; + if (_lf_do_step(env)) { + while (next(env) != 0) + ; + } + _lf_normal_termination = true; + return 0; + } else { + return -1; + } } /** * @brief Notify of new event by calling the single-threaded platform API * @param env Environment in which we are executing. */ -int lf_notify_of_event(environment_t* env) { - return _lf_single_threaded_notify_of_event(); -} +int lf_notify_of_event(environment_t* env) { return _lf_single_threaded_notify_of_event(); } -int lf_critical_section_enter(environment_t* env) { - return lf_disable_interrupts_nested(); -} +int lf_critical_section_enter(environment_t* env) { return lf_disable_interrupts_nested(); } -int lf_critical_section_exit(environment_t* env) { - return lf_enable_interrupts_nested(); -} +int lf_critical_section_exit(environment_t* env) { return lf_enable_interrupts_nested(); } #endif diff --git a/core/reactor_common.c b/core/reactor_common.c index 866e64b79..6fc3d3824 100644 --- a/core/reactor_common.c +++ b/core/reactor_common.c @@ -15,7 +15,7 @@ #include #include -#include "platform.h" +#include "low_level_platform.h" #include "api/schedule.h" #ifdef MODAL_REACTORS #include "modes.h" @@ -26,9 +26,10 @@ #include "port.h" #include "pqueue.h" #include "reactor.h" -#include "trace.h" +#include "tracepoint.h" #include "util.h" #include "vector.h" +#include "lf_core_version.h" #include "hashset/hashset.h" #include "hashset/hashset_itr.h" #include "environment.h" @@ -51,7 +52,7 @@ extern int _lf_count_payload_allocations; /** * @brief Global STA (safe to advance) offset uniformly applied to advancement of each * time step in federated execution. - * + * * This can be retrieved in user code by calling lf_get_stp_offset() and adjusted by * calling lf_set_stp_offset(interval_t offset). */ @@ -87,18 +88,19 @@ instant_t duration = -1LL; bool keepalive_specified = false; void* lf_allocate(size_t count, size_t size, struct allocation_record_t** head) { - void *mem = calloc(count, size); - if (mem == NULL) lf_print_error_and_exit("Out of memory!"); - if (head != NULL) { - struct allocation_record_t* record - = (allocation_record_t*)calloc(1, sizeof(allocation_record_t)); - if (record == NULL) lf_print_error_and_exit("Out of memory!"); - record->allocated = mem; - allocation_record_t* tmp = *head; // Previous head of the list or NULL. - *head = record; // New head of the list. - record->next = tmp; - } - return mem; + void* mem = calloc(count, size); + if (mem == NULL) + lf_print_error_and_exit("Out of memory!"); + if (head != NULL) { + struct allocation_record_t* record = (allocation_record_t*)calloc(1, sizeof(allocation_record_t)); + if (record == NULL) + lf_print_error_and_exit("Out of memory!"); + record->allocated = mem; + allocation_record_t* tmp = *head; // Previous head of the list or NULL. + *head = record; // New head of the list. + record->next = tmp; + } + return mem; } /** @@ -107,690 +109,685 @@ void* lf_allocate(size_t count, size_t size, struct allocation_record_t** head) */ struct allocation_record_t* _lf_reactors_to_free = NULL; -self_base_t* lf_new_reactor(size_t size) { - return (self_base_t*)lf_allocate(1, size, &_lf_reactors_to_free); -} +self_base_t* lf_new_reactor(size_t size) { return (self_base_t*)lf_allocate(1, size, &_lf_reactors_to_free); } void lf_free(struct allocation_record_t** head) { - if (head == NULL) return; - struct allocation_record_t* record = *head; - while (record != NULL) { - LF_PRINT_DEBUG("Freeing memory at %p", record->allocated); - free(record->allocated); - struct allocation_record_t* tmp = record->next; - LF_PRINT_DEBUG("Freeing allocation record at %p", record); - free(record); - record = tmp; - } - *head = NULL; + if (head == NULL) + return; + struct allocation_record_t* record = *head; + while (record != NULL) { + LF_PRINT_DEBUG("Freeing memory at %p", record->allocated); + free(record->allocated); + struct allocation_record_t* tmp = record->next; + LF_PRINT_DEBUG("Freeing allocation record at %p", record); + free(record); + record = tmp; + } + *head = NULL; } -void lf_free_reactor(self_base_t *self) { - lf_free(&self->allocations); - free(self); +void lf_free_reactor(self_base_t* self) { + lf_free(&self->allocations); + free(self); } void lf_free_all_reactors(void) { - struct allocation_record_t* head = _lf_reactors_to_free; - while (head != NULL) { - lf_free_reactor((self_base_t*)head->allocated); - struct allocation_record_t* tmp = head->next; - free(head); - head = tmp; - } - _lf_reactors_to_free = NULL; + struct allocation_record_t* head = _lf_reactors_to_free; + while (head != NULL) { + lf_free_reactor((self_base_t*)head->allocated); + struct allocation_record_t* tmp = head->next; + free(head); + head = tmp; + } + _lf_reactors_to_free = NULL; } void lf_set_stop_tag(environment_t* env, tag_t tag) { - assert(env != GLOBAL_ENVIRONMENT); - if (lf_tag_compare(tag, env->stop_tag) < 0) { - env->stop_tag = tag; - } + assert(env != GLOBAL_ENVIRONMENT); + if (lf_tag_compare(tag, env->stop_tag) < 0) { + env->stop_tag = tag; + } } #ifdef FEDERATED_DECENTRALIZED -interval_t lf_get_stp_offset() { - return lf_fed_STA_offset; -} +interval_t lf_get_stp_offset() { return lf_fed_STA_offset; } void lf_set_stp_offset(interval_t offset) { - if (offset > 0LL) { - lf_fed_STA_offset = offset; - } + if (offset > 0LL) { + lf_fed_STA_offset = offset; + } } #endif // FEDERATED_DECENTRALIZED -void _lf_start_time_step(environment_t *env) { - assert(env != GLOBAL_ENVIRONMENT); - if (!env->execution_started) { - // Execution hasn't started, so this is probably being invoked in termination - // due to an error. - return; - } - LF_PRINT_LOG("--------- Start time step at tag " PRINTF_TAG ".", env->current_tag.time - start_time, env->current_tag.microstep); - // Handle dynamically created tokens for mutable inputs. - _lf_free_token_copies(env); - - bool** is_present_fields = env->is_present_fields_abbreviated; - int size = env->is_present_fields_abbreviated_size; - if (env->is_present_fields_abbreviated_size > env->is_present_fields_size) { - size = env->is_present_fields_size; - is_present_fields = env->is_present_fields; +void _lf_start_time_step(environment_t* env) { + assert(env != GLOBAL_ENVIRONMENT); + if (!env->execution_started) { + // Execution hasn't started, so this is probably being invoked in termination + // due to an error. + return; + } + LF_PRINT_LOG("--------- Start time step at tag " PRINTF_TAG ".", env->current_tag.time - start_time, + env->current_tag.microstep); + // Handle dynamically created tokens for mutable inputs. + _lf_free_token_copies(env); + + bool** is_present_fields = env->is_present_fields_abbreviated; + int size = env->is_present_fields_abbreviated_size; + if (env->is_present_fields_abbreviated_size > env->is_present_fields_size) { + size = env->is_present_fields_size; + is_present_fields = env->is_present_fields; + } + for (int i = 0; i < size; i++) { + *is_present_fields[i] = false; + } + // Reset sparse IO record sizes to 0, if any. + if (env->sparse_io_record_sizes.start != NULL) { + for (size_t i = 0; i < vector_size(&env->sparse_io_record_sizes); i++) { + // NOTE: vector_at does not return the element at + // the index, but rather returns a pointer to that element, which is + // itself a pointer. + int** sizep = (int**)vector_at(&env->sparse_io_record_sizes, i); + if (sizep != NULL && *sizep != NULL) { + **sizep = 0; + } } - for(int i = 0; i < size; i++) { - *is_present_fields[i] = false; - } - // Reset sparse IO record sizes to 0, if any. - if (env->sparse_io_record_sizes.start != NULL) { - for (size_t i = 0; i < vector_size(&env->sparse_io_record_sizes); i++) { - // NOTE: vector_at does not return the element at - // the index, but rather returns a pointer to that element, which is - // itself a pointer. - int** sizep = (int**)vector_at(&env->sparse_io_record_sizes, i); - if (sizep != NULL && *sizep != NULL) { - **sizep = 0; - } - } - } - env->is_present_fields_abbreviated_size = 0; + } + env->is_present_fields_abbreviated_size = 0; #ifdef FEDERATED - // If the environment is the top-level one, we have some work to do. - environment_t *envs; - int num_envs = _lf_get_environments(&envs); - if (env == envs) { - // This is the top-level environment. + // If the environment is the top-level one, we have some work to do. + environment_t* envs; + int num_envs = _lf_get_environments(&envs); + if (env == envs) { + // This is the top-level environment. #ifdef FEDERATED_DECENTRALIZED - for (int i = 0; i < env->is_present_fields_size; i++) { - // An intended tag of NEVER_TAG indicates that it has never been set. - *env->_lf_intended_tag_fields[i] = NEVER_TAG; - } + for (int i = 0; i < env->is_present_fields_size; i++) { + // An intended tag of NEVER_TAG indicates that it has never been set. + *env->_lf_intended_tag_fields[i] = NEVER_TAG; + } #endif // FEDERATED_DECENTRALIZED - // Reset absent fields on network ports because - // their status is unknown - lf_reset_status_fields_on_input_port_triggers(); - // Signal the helper thread to reset its progress since the logical time has changed. - lf_cond_signal(&lf_current_tag_changed); - } + // Reset absent fields on network ports because + // their status is unknown + lf_reset_status_fields_on_input_port_triggers(); + // Signal the helper thread to reset its progress since the logical time has changed. + lf_cond_signal(&lf_current_tag_changed); + } #endif // FEDERATED } bool lf_is_tag_after_stop_tag(environment_t* env, tag_t tag) { - assert(env != GLOBAL_ENVIRONMENT); - return (lf_tag_compare(tag, env->stop_tag) > 0); + assert(env != GLOBAL_ENVIRONMENT); + return (lf_tag_compare(tag, env->stop_tag) > 0); } -void _lf_pop_events(environment_t *env) { - assert(env != GLOBAL_ENVIRONMENT); +void _lf_pop_events(environment_t* env) { + assert(env != GLOBAL_ENVIRONMENT); #ifdef MODAL_REACTORS - _lf_handle_mode_triggered_reactions(env); + _lf_handle_mode_triggered_reactions(env); #endif - event_t* event = (event_t*)pqueue_peek(env->event_q); - while(event != NULL && event->time == env->current_tag.time) { - event = (event_t*)pqueue_pop(env->event_q); - - if (event->is_dummy) { - LF_PRINT_DEBUG("Popped dummy event from the event queue."); - if (event->next != NULL) { - LF_PRINT_DEBUG("Putting event from the event queue for the next microstep."); - pqueue_insert(env->next_q, event->next); - } - lf_recycle_event(env, event); - // Peek at the next event in the event queue. - event = (event_t*)pqueue_peek(env->event_q); - continue; - } + event_t* event = (event_t*)pqueue_peek(env->event_q); + while (event != NULL && event->time == env->current_tag.time) { + event = (event_t*)pqueue_pop(env->event_q); + + if (event->is_dummy) { + LF_PRINT_DEBUG("Popped dummy event from the event queue."); + if (event->next != NULL) { + LF_PRINT_DEBUG("Putting event from the event queue for the next microstep."); + pqueue_insert(env->next_q, event->next); + } + lf_recycle_event(env, event); + // Peek at the next event in the event queue. + event = (event_t*)pqueue_peek(env->event_q); + continue; + } #ifdef MODAL_REACTORS - // If this event is associated with an inactive mode it should haven been suspended and no longer on the event queue. - // NOTE: This should not be possible - if (!_lf_mode_is_active(event->trigger->mode)) { - lf_print_warning("Assumption violated. There is an event on the event queue that is associated to an inactive mode."); - } + // If this event is associated with an inactive mode it should haven been suspended and no longer on the event + // queue. NOTE: This should not be possible + if (!_lf_mode_is_active(event->trigger->mode)) { + lf_print_warning( + "Assumption violated. There is an event on the event queue that is associated to an inactive mode."); + } #endif - lf_token_t *token = event->token; + lf_token_t* token = event->token; - // Put the corresponding reactions onto the reaction queue. - for (int i = 0; i < event->trigger->number_of_reactions; i++) { - reaction_t *reaction = event->trigger->reactions[i]; - // Do not enqueue this reaction twice. - if (reaction->status == inactive) { + // Put the corresponding reactions onto the reaction queue. + for (int i = 0; i < event->trigger->number_of_reactions; i++) { + reaction_t* reaction = event->trigger->reactions[i]; + // Do not enqueue this reaction twice. + if (reaction->status == inactive) { #ifdef FEDERATED_DECENTRALIZED - // In federated execution, an intended tag that is not (NEVER, 0) - // indicates that this particular event is triggered by a network message. - // The intended tag is set in handle_tagged_message in federate.c whenever - // a tagged message arrives from another federate. - if (event->intended_tag.time != NEVER) { - // If the intended tag of the event is actually set, - // transfer the intended tag to the trigger so that - // the reaction can access the value. - event->trigger->intended_tag = event->intended_tag; - // And check if it is in the past compared to the current tag. - if (lf_tag_compare(event->intended_tag, env->current_tag) < 0) { - // Mark the triggered reaction with a STP violation - reaction->is_STP_violated = true; - LF_PRINT_LOG("Trigger %p has violated the reaction's STP offset. Intended tag: " PRINTF_TAG ". Current tag: " PRINTF_TAG, - event->trigger, - event->intended_tag.time - start_time, event->intended_tag.microstep, - env->current_tag.time - start_time, env->current_tag.microstep); - // Need to update the last_known_status_tag of the port because otherwise, - // the MLAA could get stuck, causing the program to lock up. - // This should not call update_last_known_status_on_input_port because we - // are starting a new tag step execution, so there are no reactions blocked on this input. - if (lf_tag_compare(env->current_tag, event->trigger->last_known_status_tag) > 0) { - event->trigger->last_known_status_tag = env->current_tag; - } - } - } + // In federated execution, an intended tag that is not (NEVER, 0) + // indicates that this particular event is triggered by a network message. + // The intended tag is set in handle_tagged_message in federate.c whenever + // a tagged message arrives from another federate. + if (event->intended_tag.time != NEVER) { + // If the intended tag of the event is actually set, + // transfer the intended tag to the trigger so that + // the reaction can access the value. + event->trigger->intended_tag = event->intended_tag; + // And check if it is in the past compared to the current tag. + if (lf_tag_compare(event->intended_tag, env->current_tag) < 0) { + // Mark the triggered reaction with a STP violation + reaction->is_STP_violated = true; + LF_PRINT_LOG("Trigger %p has violated the reaction's STP offset. Intended tag: " PRINTF_TAG + ". Current tag: " PRINTF_TAG, + event->trigger, event->intended_tag.time - start_time, event->intended_tag.microstep, + env->current_tag.time - start_time, env->current_tag.microstep); + // Need to update the last_known_status_tag of the port because otherwise, + // the MLAA could get stuck, causing the program to lock up. + // This should not call update_last_known_status_on_input_port because we + // are starting a new tag step execution, so there are no reactions blocked on this input. + if (lf_tag_compare(env->current_tag, event->trigger->last_known_status_tag) > 0) { + event->trigger->last_known_status_tag = env->current_tag; + } + } + } #endif #ifdef MODAL_REACTORS - // Check if reaction is disabled by mode inactivity - if (!_lf_mode_is_active(reaction->mode)) { - LF_PRINT_DEBUG("Suppressing reaction %s due inactive mode.", reaction->name); - continue; // Suppress reaction by preventing entering reaction queue - } -#endif - LF_PRINT_DEBUG("Triggering reaction %s.", reaction->name); - _lf_trigger_reaction(env, reaction, -1); - } else { - LF_PRINT_DEBUG("Reaction is already triggered: %s", reaction->name); - } + // Check if reaction is disabled by mode inactivity + if (!_lf_mode_is_active(reaction->mode)) { + LF_PRINT_DEBUG("Suppressing reaction %s due inactive mode.", reaction->name); + continue; // Suppress reaction by preventing entering reaction queue } +#endif + LF_PRINT_DEBUG("Triggering reaction %s.", reaction->name); + _lf_trigger_reaction(env, reaction, -1); + } else { + LF_PRINT_DEBUG("Reaction is already triggered: %s", reaction->name); + } + } - // Mark the trigger present. - event->trigger->status = present; + // Mark the trigger present. + event->trigger->status = present; - // If the trigger is a periodic timer, create a new event for its next execution. - if (event->trigger->is_timer && event->trigger->period > 0LL) { - // Reschedule the trigger. - lf_schedule_trigger(env, event->trigger, event->trigger->period, NULL); - } + // If the trigger is a periodic timer, create a new event for its next execution. + if (event->trigger->is_timer && event->trigger->period > 0LL) { + // Reschedule the trigger. + lf_schedule_trigger(env, event->trigger, event->trigger->period, NULL); + } - // Copy the token pointer into the trigger struct so that the - // reactions can access it. This overwrites the previous template token, - // for which we decrement the reference count. - _lf_replace_template_token((token_template_t*)event->trigger, token); + // Copy the token pointer into the trigger struct so that the + // reactions can access it. This overwrites the previous template token, + // for which we decrement the reference count. + _lf_replace_template_token((token_template_t*)event->trigger, token); - // Decrement the reference count because the event queue no longer needs this token. - // This has to be done after the above call to _lf_replace_template_token because - // that call will increment the reference count and we need to not let the token be - // freed prematurely. - _lf_done_using(token); + // Decrement the reference count because the event queue no longer needs this token. + // This has to be done after the above call to _lf_replace_template_token because + // that call will increment the reference count and we need to not let the token be + // freed prematurely. + _lf_done_using(token); - // Mark the trigger present. - event->trigger->status = present; + // Mark the trigger present. + event->trigger->status = present; - // If this event points to a next event, insert it into the next queue. - if (event->next != NULL) { - // Insert the next event into the next queue. - pqueue_insert(env->next_q, event->next); - } + // If this event points to a next event, insert it into the next queue. + if (event->next != NULL) { + // Insert the next event into the next queue. + pqueue_insert(env->next_q, event->next); + } - lf_recycle_event(env, event); + lf_recycle_event(env, event); - // Peek at the next event in the event queue. - event = (event_t*)pqueue_peek(env->event_q); - }; + // Peek at the next event in the event queue. + event = (event_t*)pqueue_peek(env->event_q); + }; - LF_PRINT_DEBUG("There are %zu events deferred to the next microstep.", pqueue_size(env->next_q)); + LF_PRINT_DEBUG("There are %zu events deferred to the next microstep.", pqueue_size(env->next_q)); - // After populating the reaction queue, see if there are things on the - // next queue to put back into the event queue. - while(pqueue_peek(env->next_q) != NULL) { - pqueue_insert(env->event_q, pqueue_pop(env->next_q)); - } + // After populating the reaction queue, see if there are things on the + // next queue to put back into the event queue. + while (pqueue_peek(env->next_q) != NULL) { + pqueue_insert(env->event_q, pqueue_pop(env->next_q)); + } } event_t* lf_get_new_event(environment_t* env) { - assert(env != GLOBAL_ENVIRONMENT); - // Recycle event_t structs, if possible. - event_t* e = (event_t*)pqueue_pop(env->recycle_q); - if (e == NULL) { - e = (event_t*)calloc(1, sizeof(struct event_t)); - if (e == NULL) lf_print_error_and_exit("Out of memory!"); + assert(env != GLOBAL_ENVIRONMENT); + // Recycle event_t structs, if possible. + event_t* e = (event_t*)pqueue_pop(env->recycle_q); + if (e == NULL) { + e = (event_t*)calloc(1, sizeof(struct event_t)); + if (e == NULL) + lf_print_error_and_exit("Out of memory!"); #ifdef FEDERATED_DECENTRALIZED - e->intended_tag = (tag_t) { .time = NEVER, .microstep = 0u}; + e->intended_tag = (tag_t){.time = NEVER, .microstep = 0u}; #endif - } - return e; + } + return e; } void _lf_initialize_timer(environment_t* env, trigger_t* timer) { - assert(env != GLOBAL_ENVIRONMENT); - interval_t delay = 0; + assert(env != GLOBAL_ENVIRONMENT); + interval_t delay = 0; #ifdef MODAL_REACTORS - // Suspend all timer events that start in inactive mode - if (!_lf_mode_is_active(timer->mode)) { - // FIXME: The following check might not be working as - // intended - // && (timer->offset != 0 || timer->period != 0)) { - event_t* e = lf_get_new_event(env); - e->trigger = timer; - e->time = lf_time_logical(env) + timer->offset; - _lf_add_suspended_event(e); - return; - } + // Suspend all timer events that start in inactive mode + if (!_lf_mode_is_active(timer->mode)) { + // FIXME: The following check might not be working as + // intended + // && (timer->offset != 0 || timer->period != 0)) { + event_t* e = lf_get_new_event(env); + e->trigger = timer; + e->time = lf_time_logical(env) + timer->offset; + _lf_add_suspended_event(e); + return; + } #endif - if (timer->offset == 0) { - for (int i = 0; i < timer->number_of_reactions; i++) { - _lf_trigger_reaction(env, timer->reactions[i], -1); - tracepoint_schedule(env->trace, timer, 0LL); // Trace even though schedule is not called. - } - if (timer->period == 0) { - return; - } else { - // Schedule at t + period. - delay = timer->period; - } + if (timer->offset == 0) { + for (int i = 0; i < timer->number_of_reactions; i++) { + _lf_trigger_reaction(env, timer->reactions[i], -1); + tracepoint_schedule(env, timer, 0LL); // Trace even though schedule is not called. + } + if (timer->period == 0) { + return; } else { - // Schedule at t + offset. - delay = timer->offset; + // Schedule at t + period. + delay = timer->period; } - - // Get an event_t struct to put on the event queue. - // Recycle event_t structs, if possible. - event_t* e = lf_get_new_event(env); - e->trigger = timer; - e->time = lf_time_logical(env) + delay; - // NOTE: No lock is being held. Assuming this only happens at startup. - pqueue_insert(env->event_q, e); - tracepoint_schedule(env->trace, timer, delay); // Trace even though schedule is not called. + } else { + // Schedule at t + offset. + delay = timer->offset; + } + + // Get an event_t struct to put on the event queue. + // Recycle event_t structs, if possible. + event_t* e = lf_get_new_event(env); + e->trigger = timer; + e->time = lf_time_logical(env) + delay; + // NOTE: No lock is being held. Assuming this only happens at startup. + pqueue_insert(env->event_q, e); + tracepoint_schedule(env, timer, delay); // Trace even though schedule is not called. } void _lf_initialize_timers(environment_t* env) { - assert(env != GLOBAL_ENVIRONMENT); - for (int i = 0; i < env->timer_triggers_size; i++) { - if (env->timer_triggers[i] != NULL) { - _lf_initialize_timer(env, env->timer_triggers[i]); - } - } - - // To avoid runtime memory allocations for timer-driven programs - // the recycle queue is initialized with a single event. - if (env->timer_triggers_size > 0) { - event_t *e = lf_get_new_event(env); - lf_recycle_event(env, e); + assert(env != GLOBAL_ENVIRONMENT); + for (int i = 0; i < env->timer_triggers_size; i++) { + if (env->timer_triggers[i] != NULL) { + _lf_initialize_timer(env, env->timer_triggers[i]); } + } + + // To avoid runtime memory allocations for timer-driven programs + // the recycle queue is initialized with a single event. + if (env->timer_triggers_size > 0) { + event_t* e = lf_get_new_event(env); + lf_recycle_event(env, e); + } } void _lf_trigger_startup_reactions(environment_t* env) { - assert(env != GLOBAL_ENVIRONMENT); - for (int i = 0; i < env->startup_reactions_size; i++) { - if (env->startup_reactions[i] != NULL) { - if (env->startup_reactions[i]->mode != NULL) { - // Skip reactions in modes - continue; - } - _lf_trigger_reaction(env, env->startup_reactions[i], -1); - } - } - #ifdef MODAL_REACTORS - if (env->modes) { - _lf_handle_mode_startup_reset_reactions( - env, - env->startup_reactions, env->startup_reactions_size, - NULL, 0, - env->modes->modal_reactor_states, env->modes->modal_reactor_states_size - ); + assert(env != GLOBAL_ENVIRONMENT); + for (int i = 0; i < env->startup_reactions_size; i++) { + if (env->startup_reactions[i] != NULL) { + if (env->startup_reactions[i]->mode != NULL) { + // Skip reactions in modes + continue; + } + _lf_trigger_reaction(env, env->startup_reactions[i], -1); } - #endif + } +#ifdef MODAL_REACTORS + if (env->modes) { + _lf_handle_mode_startup_reset_reactions(env, env->startup_reactions, env->startup_reactions_size, NULL, 0, + env->modes->modal_reactor_states, env->modes->modal_reactor_states_size); + } +#endif } -void _lf_trigger_shutdown_reactions(environment_t *env) { - assert(env != GLOBAL_ENVIRONMENT); - for (int i = 0; i < env->shutdown_reactions_size; i++) { - if (env->shutdown_reactions[i] != NULL) { - if (env->shutdown_reactions[i]->mode != NULL) { - // Skip reactions in modes - continue; - } - _lf_trigger_reaction(env, env->shutdown_reactions[i], -1); - } +void _lf_trigger_shutdown_reactions(environment_t* env) { + assert(env != GLOBAL_ENVIRONMENT); + for (int i = 0; i < env->shutdown_reactions_size; i++) { + if (env->shutdown_reactions[i] != NULL) { + if (env->shutdown_reactions[i]->mode != NULL) { + // Skip reactions in modes + continue; + } + _lf_trigger_reaction(env, env->shutdown_reactions[i], -1); } + } #ifdef MODAL_REACTORS - if (env->modes) { - _lf_handle_mode_shutdown_reactions(env, env->shutdown_reactions, env->shutdown_reactions_size); - } + if (env->modes) { + _lf_handle_mode_shutdown_reactions(env, env->shutdown_reactions, env->shutdown_reactions_size); + } #endif } void lf_recycle_event(environment_t* env, event_t* e) { - assert(env != GLOBAL_ENVIRONMENT); - e->time = 0LL; - e->trigger = NULL; - e->pos = 0; - e->token = NULL; - e->is_dummy = false; + assert(env != GLOBAL_ENVIRONMENT); + e->time = 0LL; + e->trigger = NULL; + e->pos = 0; + e->token = NULL; + e->is_dummy = false; #ifdef FEDERATED_DECENTRALIZED - e->intended_tag = (tag_t) { .time = NEVER, .microstep = 0u}; + e->intended_tag = (tag_t){.time = NEVER, .microstep = 0u}; #endif - e->next = NULL; - pqueue_insert(env->recycle_q, e); + e->next = NULL; + pqueue_insert(env->recycle_q, e); } -event_t* _lf_create_dummy_events(environment_t* env, trigger_t* trigger, instant_t time, event_t* next, microstep_t offset) { - event_t* first_dummy = lf_get_new_event(env); - event_t* dummy = first_dummy; +event_t* _lf_create_dummy_events(environment_t* env, trigger_t* trigger, instant_t time, event_t* next, + microstep_t offset) { + event_t* first_dummy = lf_get_new_event(env); + event_t* dummy = first_dummy; + dummy->time = time; + dummy->is_dummy = true; + dummy->trigger = trigger; + while (offset > 0) { + if (offset == 1) { + dummy->next = next; + break; + } + dummy->next = lf_get_new_event(env); + dummy = dummy->next; dummy->time = time; dummy->is_dummy = true; dummy->trigger = trigger; - while (offset > 0) { - if (offset == 1) { - dummy->next = next; - break; - } - dummy->next = lf_get_new_event(env); - dummy = dummy->next; - dummy->time = time; - dummy->is_dummy = true; - dummy->trigger = trigger; - offset--; - } - return first_dummy; + offset--; + } + return first_dummy; } void lf_replace_token(event_t* event, lf_token_t* token) { - if (event->token != token) { - // Free the existing token, if any. - _lf_done_using(event->token); - } - // Replace the token with ours. - event->token = token; + if (event->token != token) { + // Free the existing token, if any. + _lf_done_using(event->token); + } + // Replace the token with ours. + event->token = token; } trigger_handle_t _lf_schedule_at_tag(environment_t* env, trigger_t* trigger, tag_t tag, lf_token_t* token) { - assert(env != GLOBAL_ENVIRONMENT); - tag_t current_logical_tag = env->current_tag; - - LF_PRINT_DEBUG("_lf_schedule_at_tag() called with tag " PRINTF_TAG " at tag " PRINTF_TAG ".", - tag.time - start_time, tag.microstep, - current_logical_tag.time - start_time, current_logical_tag.microstep); - if (lf_tag_compare(tag, current_logical_tag) <= 0 && env->execution_started) { - lf_print_warning("_lf_schedule_at_tag(): requested to schedule an event at the current or past tag."); - return -1; - } - - // Increment the reference count of the token. - if (token != NULL) { - token->ref_count++; - LF_PRINT_DEBUG("_lf_schedule_at_tag: Incremented ref_count of %p to %zu.", - token, token->ref_count); - } - - // Do not schedule events if the tag is after the stop tag - if (lf_is_tag_after_stop_tag(env, tag)) { - lf_print_warning("_lf_schedule_at_tag: event time is past the timeout. Discarding event."); - _lf_done_using(token); - return -1; - } - - event_t* e = lf_get_new_event(env); - // Set the event time - e->time = tag.time; - - tracepoint_schedule(env->trace, trigger, tag.time - current_logical_tag.time); - - // Make sure the event points to this trigger so when it is - // dequeued, it will trigger this trigger. - e->trigger = trigger; - - // Set the payload. - e->token = token; + assert(env != GLOBAL_ENVIRONMENT); + tag_t current_logical_tag = env->current_tag; + + LF_PRINT_DEBUG("_lf_schedule_at_tag() called with tag " PRINTF_TAG " at tag " PRINTF_TAG ".", tag.time - start_time, + tag.microstep, current_logical_tag.time - start_time, current_logical_tag.microstep); + if (lf_tag_compare(tag, current_logical_tag) <= 0 && env->execution_started) { + lf_print_warning("_lf_schedule_at_tag(): requested to schedule an event at the current or past tag."); + return -1; + } + + // Increment the reference count of the token. + if (token != NULL) { + token->ref_count++; + LF_PRINT_DEBUG("_lf_schedule_at_tag: Incremented ref_count of %p to %zu.", token, token->ref_count); + } + + // Do not schedule events if the tag is after the stop tag + if (lf_is_tag_after_stop_tag(env, tag)) { + lf_print_warning("_lf_schedule_at_tag: event time is past the timeout. Discarding event."); + _lf_done_using(token); + return -1; + } + + event_t* e = lf_get_new_event(env); + // Set the event time + e->time = tag.time; + + tracepoint_schedule(env, trigger, tag.time - current_logical_tag.time); + + // Make sure the event points to this trigger so when it is + // dequeued, it will trigger this trigger. + e->trigger = trigger; + + // Set the payload. + e->token = token; #ifdef FEDERATED_DECENTRALIZED - // Set the intended tag - e->intended_tag = trigger->intended_tag; + // Set the intended tag + e->intended_tag = trigger->intended_tag; #endif - event_t* found = (event_t *)pqueue_find_equal_same_priority(env->event_q, e); - if (found != NULL) { - if (tag.microstep == 0u) { - // The microstep is 0, which means that the event is being scheduled - // at a future time and at the beginning of the skip list of events - // at that time. - // In case the event is a dummy event - // convert it to a real event. - found->is_dummy = false; - switch (trigger->policy) { - case drop: - if (found->token != token) { - _lf_done_using(token); - } - lf_recycle_event(env, e); - return(0); - break; - case replace: - // Replace the payload of the event at the head with our - // current payload. - lf_replace_token(found, token); - lf_recycle_event(env, e); - return 0; - break; - default: - // Adding a microstep to the original - // intended tag. - if (lf_is_tag_after_stop_tag(env, (tag_t) {.time=found->time,.microstep=1})) { - // Scheduling e will incur a microstep after the stop tag, - // which is illegal. - lf_recycle_event(env, e); - return 0; - } - if (found->next != NULL) { - lf_print_error("_lf_schedule_at_tag: in-order contract violated."); - return -1; - } - found->next = e; - } - } else { - // We are requesting a microstep greater than 0 - // where there is already an event for this trigger on the event queue. - // That event may itself be a dummy event for a real event that is - // also at a microstep greater than 0. - // We have to insert our event into the chain or append it - // to the end of the chain, depending on which microstep is lesser. - microstep_t microstep_of_found = 0; - if (tag.time == current_logical_tag.time) { - // This is a situation where the head of the queue - // is an event with microstep == current_microstep + 1 - // which should be reflected in our steps calculation. - microstep_of_found += current_logical_tag.microstep + 1; // Indicating that - // the found event - // is at this microstep. - } - // Follow the chain of events until the right point - // to insert the new event. - while (microstep_of_found < tag.microstep - 1) { - if (found->next == NULL) { - // The chain stops short of where we want to be. - // If it exactly one microstep short of where we want to be, - // then we don't need a dummy. Otherwise, we do. - microstep_t undershot_by = (tag.microstep - 1) - microstep_of_found; - if (undershot_by > 0) { - found->next = _lf_create_dummy_events(env, trigger, tag.time, e, undershot_by); - } else { - found->next = e; - } - return 1; - } - found = found->next; - microstep_of_found++; - } - // At this point, microstep_of_found == tag.microstep - 1. - if (found->next == NULL) { - found->next = e; - } else { - switch (trigger->policy) { - case drop: - if (found->next->token != token) { - _lf_done_using(token); - } - lf_recycle_event(env, e); - return 0; - break; - case replace: - // Replace the payload of the event at the head with our - // current payload. - lf_replace_token(found->next, token); - lf_recycle_event(env, e); - return 0; - break; - default: - // Adding a microstep to the original - // intended tag. - if (lf_is_tag_after_stop_tag(env, (tag_t){.time=found->time,.microstep=microstep_of_found+1})) { - // Scheduling e will incur a microstep at timeout, - // which is illegal. - lf_recycle_event(env, e); - return 0; - } - if (found->next->next != NULL) { - lf_print_error("_lf_schedule_at_tag: in-order contract violated."); - return -1; - } - found->next->next = e; - } - } + event_t* found = (event_t*)pqueue_find_equal_same_priority(env->event_q, e); + if (found != NULL) { + if (tag.microstep == 0u) { + // The microstep is 0, which means that the event is being scheduled + // at a future time and at the beginning of the skip list of events + // at that time. + // In case the event is a dummy event + // convert it to a real event. + found->is_dummy = false; + switch (trigger->policy) { + case drop: + if (found->token != token) { + _lf_done_using(token); } + lf_recycle_event(env, e); + return (0); + break; + case replace: + // Replace the payload of the event at the head with our + // current payload. + lf_replace_token(found, token); + lf_recycle_event(env, e); + return 0; + break; + default: + // Adding a microstep to the original + // intended tag. + if (lf_is_tag_after_stop_tag(env, (tag_t){.time = found->time, .microstep = 1})) { + // Scheduling e will incur a microstep after the stop tag, + // which is illegal. + lf_recycle_event(env, e); + return 0; + } + if (found->next != NULL) { + lf_print_error("_lf_schedule_at_tag: in-order contract violated."); + return -1; + } + found->next = e; + } } else { - // No existing event queued. - microstep_t relative_microstep = tag.microstep; - if (tag.time == current_logical_tag.time) { - relative_microstep -= current_logical_tag.microstep; + // We are requesting a microstep greater than 0 + // where there is already an event for this trigger on the event queue. + // That event may itself be a dummy event for a real event that is + // also at a microstep greater than 0. + // We have to insert our event into the chain or append it + // to the end of the chain, depending on which microstep is lesser. + microstep_t microstep_of_found = 0; + if (tag.time == current_logical_tag.time) { + // This is a situation where the head of the queue + // is an event with microstep == current_microstep + 1 + // which should be reflected in our steps calculation. + microstep_of_found += current_logical_tag.microstep + 1; // Indicating that + // the found event + // is at this microstep. + } + // Follow the chain of events until the right point + // to insert the new event. + while (microstep_of_found < tag.microstep - 1) { + if (found->next == NULL) { + // The chain stops short of where we want to be. + // If it exactly one microstep short of where we want to be, + // then we don't need a dummy. Otherwise, we do. + microstep_t undershot_by = (tag.microstep - 1) - microstep_of_found; + if (undershot_by > 0) { + found->next = _lf_create_dummy_events(env, trigger, tag.time, e, undershot_by); + } else { + found->next = e; + } + return 1; } - if ((tag.time == current_logical_tag.time && relative_microstep == 1 && env->execution_started) || - tag.microstep == 0) { - // Do not need a dummy event if we are scheduling at 1 microstep - // in the future at current time or at microstep 0 in a future time. - // Note that if execution hasn't started, then we have to insert dummy events. - pqueue_insert(env->event_q, e); - } else { - // Create a dummy event. Insert it into the queue, and let its next - // pointer point to the actual event. - pqueue_insert(env->event_q, _lf_create_dummy_events(env, trigger, tag.time, e, relative_microstep)); + found = found->next; + microstep_of_found++; + } + // At this point, microstep_of_found == tag.microstep - 1. + if (found->next == NULL) { + found->next = e; + } else { + switch (trigger->policy) { + case drop: + if (found->next->token != token) { + _lf_done_using(token); + } + lf_recycle_event(env, e); + return 0; + break; + case replace: + // Replace the payload of the event at the head with our + // current payload. + lf_replace_token(found->next, token); + lf_recycle_event(env, e); + return 0; + break; + default: + // Adding a microstep to the original + // intended tag. + if (lf_is_tag_after_stop_tag(env, (tag_t){.time = found->time, .microstep = microstep_of_found + 1})) { + // Scheduling e will incur a microstep at timeout, + // which is illegal. + lf_recycle_event(env, e); + return 0; + } + if (found->next->next != NULL) { + lf_print_error("_lf_schedule_at_tag: in-order contract violated."); + return -1; + } + found->next->next = e; } + } + } + } else { + // No existing event queued. + microstep_t relative_microstep = tag.microstep; + if (tag.time == current_logical_tag.time) { + relative_microstep -= current_logical_tag.microstep; } - trigger_handle_t return_value = env->_lf_handle++; - if (env->_lf_handle < 0) { - env->_lf_handle = 1; + if ((tag.time == current_logical_tag.time && relative_microstep == 1 && env->execution_started) || + tag.microstep == 0) { + // Do not need a dummy event if we are scheduling at 1 microstep + // in the future at current time or at microstep 0 in a future time. + // Note that if execution hasn't started, then we have to insert dummy events. + pqueue_insert(env->event_q, e); + } else { + // Create a dummy event. Insert it into the queue, and let its next + // pointer point to the actual event. + pqueue_insert(env->event_q, _lf_create_dummy_events(env, trigger, tag.time, e, relative_microstep)); } - return return_value; + } + trigger_handle_t return_value = env->_lf_handle++; + if (env->_lf_handle < 0) { + env->_lf_handle = 1; + } + return return_value; } trigger_handle_t _lf_insert_reactions_for_trigger(environment_t* env, trigger_t* trigger, lf_token_t* token) { - assert(env != GLOBAL_ENVIRONMENT); - // The trigger argument could be null, meaning that nothing is triggered. - // Doing this after incrementing the reference count ensures that the - // payload will be freed, if there is one. - if (trigger == NULL) { - lf_print_warning("_lf_schedule_init_reactions() called with a NULL trigger"); - _lf_done_using(token); - return 0; - } - - // Check to see if the trigger is not a timer - // and not a physical action - if (trigger->is_timer || trigger->is_physical) { - lf_print_warning("_lf_schedule_init_reactions() called on a timer or physical action."); - return 0; - } + assert(env != GLOBAL_ENVIRONMENT); + // The trigger argument could be null, meaning that nothing is triggered. + // Doing this after incrementing the reference count ensures that the + // payload will be freed, if there is one. + if (trigger == NULL) { + lf_print_warning("_lf_schedule_init_reactions() called with a NULL trigger"); + _lf_done_using(token); + return 0; + } + + // Check to see if the trigger is not a timer + // and not a physical action + if (trigger->is_timer || trigger->is_physical) { + lf_print_warning("_lf_schedule_init_reactions() called on a timer or physical action."); + return 0; + } #ifdef MODAL_REACTORS - // If this trigger is associated with an inactive mode, it should not trigger any reaction. - if (!_lf_mode_is_active(trigger->mode)) { - LF_PRINT_DEBUG("Suppressing reactions of trigger due inactivity of mode %s.", trigger->mode->name); - return 1; - } + // If this trigger is associated with an inactive mode, it should not trigger any reaction. + if (!_lf_mode_is_active(trigger->mode)) { + LF_PRINT_DEBUG("Suppressing reactions of trigger due inactivity of mode %s.", trigger->mode->name); + return 1; + } #endif - // Check if the trigger has violated the STP offset - bool is_STP_violated = false; + // Check if the trigger has violated the STP offset + bool is_STP_violated = false; #ifdef FEDERATED - if (lf_tag_compare(trigger->intended_tag, env->current_tag) < 0) { - is_STP_violated = true; - } + if (lf_tag_compare(trigger->intended_tag, env->current_tag) < 0) { + is_STP_violated = true; + } #ifdef FEDERATED_CENTRALIZED - // Check for STP violation in the centralized coordination, which is a - // critical error. - if (is_STP_violated) { - lf_print_error_and_exit("Attempted to insert reactions for a trigger that had an intended tag that was in the past. " - "This should not happen under centralized coordination. Intended tag: " PRINTF_TAG ". Current tag: " PRINTF_TAG ").", - trigger->intended_tag.time - lf_time_start(), - trigger->intended_tag.microstep, - lf_time_logical_elapsed(env), - env->current_tag.microstep); - } + // Check for STP violation in the centralized coordination, which is a + // critical error. + if (is_STP_violated) { + lf_print_error_and_exit( + "Attempted to insert reactions for a trigger that had an intended tag that was in the past. " + "This should not happen under centralized coordination. Intended tag: " PRINTF_TAG ". Current tag: " PRINTF_TAG + ").", + trigger->intended_tag.time - lf_time_start(), trigger->intended_tag.microstep, lf_time_logical_elapsed(env), + env->current_tag.microstep); + } #endif #endif - // Copy the token pointer into the trigger struct so that the - // reactions can access it. This overwrites the previous template token, - // for which we decrement the reference count. - _lf_replace_template_token((token_template_t*)trigger, token); + // Copy the token pointer into the trigger struct so that the + // reactions can access it. This overwrites the previous template token, + // for which we decrement the reference count. + _lf_replace_template_token((token_template_t*)trigger, token); - // Mark the trigger present. - trigger->status = present; + // Mark the trigger present. + trigger->status = present; - // Push the corresponding reactions for this trigger - // onto the reaction queue. - for (int i = 0; i < trigger->number_of_reactions; i++) { - reaction_t* reaction = trigger->reactions[i]; + // Push the corresponding reactions for this trigger + // onto the reaction queue. + for (int i = 0; i < trigger->number_of_reactions; i++) { + reaction_t* reaction = trigger->reactions[i]; #ifdef MODAL_REACTORS - // Check if reaction is disabled by mode inactivity - if (!_lf_mode_is_active(reaction->mode)) { - LF_PRINT_DEBUG("Suppressing reaction %s due inactivity of mode %s.", reaction->name, reaction->mode->name); - continue; // Suppress reaction by preventing entering reaction queue - } + // Check if reaction is disabled by mode inactivity + if (!_lf_mode_is_active(reaction->mode)) { + LF_PRINT_DEBUG("Suppressing reaction %s due inactivity of mode %s.", reaction->name, reaction->mode->name); + continue; // Suppress reaction by preventing entering reaction queue + } #endif - // Do not enqueue this reaction twice. - if (reaction->status == inactive) { - reaction->is_STP_violated = is_STP_violated; - _lf_trigger_reaction(env, reaction, -1); - LF_PRINT_LOG("Enqueued reaction %s at time " PRINTF_TIME ".", reaction->name, lf_time_logical(env)); - } + // Do not enqueue this reaction twice. + if (reaction->status == inactive) { + reaction->is_STP_violated = is_STP_violated; + _lf_trigger_reaction(env, reaction, -1); + LF_PRINT_LOG("Enqueued reaction %s at time " PRINTF_TIME ".", reaction->name, lf_time_logical(env)); } + } - return 1; + return 1; } -void _lf_advance_logical_time(environment_t *env, instant_t next_time) { - assert(env != GLOBAL_ENVIRONMENT); - - // FIXME: The following checks that _lf_advance_logical_time() - // is being called correctly. Namely, check if logical time - // is being pushed past the head of the event queue. This should - // never happen if _lf_advance_logical_time() is called correctly. - // This is commented out because it will add considerable overhead - // to the ordinary execution of LF programs. Instead, there might - // be a need for a target property that enables these kinds of logic - // assertions for development purposes only. - #ifndef NDEBUG - event_t* next_event = (event_t*)pqueue_peek(env->event_q); - if (next_event != NULL) { - if (next_time > next_event->time) { - lf_print_error_and_exit("_lf_advance_logical_time(): Attempted to move time to " PRINTF_TIME ", which is " - "past the head of the event queue, " PRINTF_TIME ".", - next_time - start_time, next_event->time - start_time); - } - } - #endif - if (env->current_tag.time < next_time) { - env->current_tag.time = next_time; - env->current_tag.microstep = 0; - } else if (env->current_tag.time == next_time) { - env->current_tag.microstep++; - } else { - lf_print_error_and_exit("_lf_advance_logical_time(): Attempted to move tag back in time."); +void _lf_advance_logical_time(environment_t* env, instant_t next_time) { + assert(env != GLOBAL_ENVIRONMENT); + +// FIXME: The following checks that _lf_advance_logical_time() +// is being called correctly. Namely, check if logical time +// is being pushed past the head of the event queue. This should +// never happen if _lf_advance_logical_time() is called correctly. +// This is commented out because it will add considerable overhead +// to the ordinary execution of LF programs. Instead, there might +// be a need for a target property that enables these kinds of logic +// assertions for development purposes only. +#ifndef NDEBUG + event_t* next_event = (event_t*)pqueue_peek(env->event_q); + if (next_event != NULL) { + if (next_time > next_event->time) { + lf_print_error_and_exit("_lf_advance_logical_time(): Attempted to move time to " PRINTF_TIME ", which is " + "past the head of the event queue, " PRINTF_TIME ".", + next_time - start_time, next_event->time - start_time); } - LF_PRINT_LOG("Advanced (elapsed) tag to " PRINTF_TAG " at physical time " PRINTF_TIME, - next_time - start_time, env->current_tag.microstep, lf_time_physical_elapsed()); + } +#endif + if (env->current_tag.time < next_time) { + env->current_tag.time = next_time; + env->current_tag.microstep = 0; + } else if (env->current_tag.time == next_time) { + env->current_tag.microstep++; + } else { + lf_print_error_and_exit("_lf_advance_logical_time(): Attempted to move tag back in time."); + } + LF_PRINT_LOG("Advanced (elapsed) tag to " PRINTF_TAG " at physical time " PRINTF_TIME, next_time - start_time, + env->current_tag.microstep, lf_time_physical_elapsed()); } /** @@ -802,25 +799,24 @@ void _lf_advance_logical_time(environment_t *env, instant_t next_time) { * @param worker The thread number of the worker thread or 0 for single-threaded execution (for tracing). */ void _lf_invoke_reaction(environment_t* env, reaction_t* reaction, int worker) { - assert(env != GLOBAL_ENVIRONMENT); + assert(env != GLOBAL_ENVIRONMENT); #if !defined(LF_SINGLE_THREADED) - if (((self_base_t*) reaction->self)->reactor_mutex != NULL) { - LF_MUTEX_LOCK((lf_mutex_t*)((self_base_t*)reaction->self)->reactor_mutex); - } + if (((self_base_t*)reaction->self)->reactor_mutex != NULL) { + LF_MUTEX_LOCK((lf_mutex_t*)((self_base_t*)reaction->self)->reactor_mutex); + } #endif - tracepoint_reaction_starts(env->trace, reaction, worker); - ((self_base_t*) reaction->self)->executing_reaction = reaction; - reaction->function(reaction->self); - ((self_base_t*) reaction->self)->executing_reaction = NULL; - tracepoint_reaction_ends(env->trace, reaction, worker); - + tracepoint_reaction_starts(env, reaction, worker); + ((self_base_t*)reaction->self)->executing_reaction = reaction; + reaction->function(reaction->self); + ((self_base_t*)reaction->self)->executing_reaction = NULL; + tracepoint_reaction_ends(env, reaction, worker); #if !defined(LF_SINGLE_THREADED) - if (((self_base_t*) reaction->self)->reactor_mutex != NULL) { - LF_MUTEX_UNLOCK((lf_mutex_t*)((self_base_t*)reaction->self)->reactor_mutex); - } + if (((self_base_t*)reaction->self)->reactor_mutex != NULL) { + LF_MUTEX_UNLOCK((lf_mutex_t*)((self_base_t*)reaction->self)->reactor_mutex); + } #endif } @@ -833,155 +829,155 @@ void _lf_invoke_reaction(environment_t* env, reaction_t* reaction, int worker) { * @param reaction The reaction that has just executed. * @param worker The thread number of the worker thread or 0 for single-threaded execution (for tracing). */ -void schedule_output_reactions(environment_t *env, reaction_t* reaction, int worker) { - assert(env != GLOBAL_ENVIRONMENT); - - // If the reaction produced outputs, put the resulting triggered - // reactions into the reaction queue. As an optimization, if exactly one - // downstream reaction is enabled by this reaction, then it may be - // executed immediately in this same thread - // without going through the reaction queue. - reaction_t* downstream_to_execute_now = NULL; - int num_downstream_reactions = 0; -#ifdef FEDERATED_DECENTRALIZED // Only pass down STP violation for federated programs that use decentralized coordination. - // Extract the inherited STP violation - bool inherited_STP_violation = reaction->is_STP_violated; - LF_PRINT_DEBUG("Reaction %s has STP violation status: %d.", reaction->name, reaction->is_STP_violated); +void schedule_output_reactions(environment_t* env, reaction_t* reaction, int worker) { + assert(env != GLOBAL_ENVIRONMENT); + + // If the reaction produced outputs, put the resulting triggered + // reactions into the reaction queue. As an optimization, if exactly one + // downstream reaction is enabled by this reaction, then it may be + // executed immediately in this same thread + // without going through the reaction queue. + reaction_t* downstream_to_execute_now = NULL; + int num_downstream_reactions = 0; +#ifdef FEDERATED_DECENTRALIZED // Only pass down STP violation for federated programs that use decentralized + // coordination. + // Extract the inherited STP violation + bool inherited_STP_violation = reaction->is_STP_violated; + LF_PRINT_DEBUG("Reaction %s has STP violation status: %d.", reaction->name, reaction->is_STP_violated); #endif - LF_PRINT_DEBUG("There are %zu outputs from reaction %s.", reaction->num_outputs, reaction->name); - for (size_t i=0; i < reaction->num_outputs; i++) { - if (reaction->output_produced[i] != NULL && *(reaction->output_produced[i])) { - LF_PRINT_DEBUG("Output %zu has been produced.", i); - trigger_t** triggerArray = (reaction->triggers)[i]; - LF_PRINT_DEBUG("There are %d trigger arrays associated with output %zu.", - reaction->triggered_sizes[i], i); - for (int j=0; j < reaction->triggered_sizes[i]; j++) { - trigger_t* trigger = triggerArray[j]; - if (trigger != NULL) { - LF_PRINT_DEBUG("Trigger %p lists %d reactions.", trigger, trigger->number_of_reactions); - for (int k=0; k < trigger->number_of_reactions; k++) { - reaction_t* downstream_reaction = trigger->reactions[k]; + LF_PRINT_DEBUG("There are %zu outputs from reaction %s.", reaction->num_outputs, reaction->name); + for (size_t i = 0; i < reaction->num_outputs; i++) { + if (reaction->output_produced[i] != NULL && *(reaction->output_produced[i])) { + LF_PRINT_DEBUG("Output %zu has been produced.", i); + trigger_t** triggerArray = (reaction->triggers)[i]; + LF_PRINT_DEBUG("There are %d trigger arrays associated with output %zu.", reaction->triggered_sizes[i], i); + for (int j = 0; j < reaction->triggered_sizes[i]; j++) { + trigger_t* trigger = triggerArray[j]; + if (trigger != NULL) { + LF_PRINT_DEBUG("Trigger %p lists %d reactions.", trigger, trigger->number_of_reactions); + for (int k = 0; k < trigger->number_of_reactions; k++) { + reaction_t* downstream_reaction = trigger->reactions[k]; #ifdef FEDERATED_DECENTRALIZED // Only pass down tardiness for federated LF programs - // Set the is_STP_violated for the downstream reaction - if (downstream_reaction != NULL) { - downstream_reaction->is_STP_violated = inherited_STP_violation; - LF_PRINT_DEBUG("Passing is_STP_violated of %d to the downstream reaction: %s", - downstream_reaction->is_STP_violated, downstream_reaction->name); - } + // Set the is_STP_violated for the downstream reaction + if (downstream_reaction != NULL) { + downstream_reaction->is_STP_violated = inherited_STP_violation; + LF_PRINT_DEBUG("Passing is_STP_violated of %d to the downstream reaction: %s", + downstream_reaction->is_STP_violated, downstream_reaction->name); + } #endif - if (downstream_reaction != NULL && downstream_reaction != downstream_to_execute_now) { - num_downstream_reactions++; - // If there is exactly one downstream reaction that is enabled by this - // reaction, then we can execute that reaction immediately without - // going through the reaction queue. In multithreaded execution, this - // avoids acquiring a mutex lock. - // FIXME: Check the earliest deadline on the reaction queue. - // This optimization could violate EDF scheduling otherwise. - if (num_downstream_reactions == 1 && downstream_reaction->last_enabling_reaction == reaction) { - // So far, this downstream reaction is a candidate to execute now. - downstream_to_execute_now = downstream_reaction; - } else { - // If there is a previous candidate reaction to execute now, - // it is no longer a candidate. - if (downstream_to_execute_now != NULL) { - // More than one downstream reaction is enabled. - // In this case, if we were to execute the downstream reaction - // immediately without changing any queues, then the second - // downstream reaction would be blocked because this reaction - // remains on the executing queue. Hence, the optimization - // is not valid. Put the candidate reaction on the queue. - _lf_trigger_reaction(env, downstream_to_execute_now, worker); - downstream_to_execute_now = NULL; - } - // Queue the reaction. - _lf_trigger_reaction(env, downstream_reaction, worker); - } - } - } + if (downstream_reaction != NULL && downstream_reaction != downstream_to_execute_now) { + num_downstream_reactions++; + // If there is exactly one downstream reaction that is enabled by this + // reaction, then we can execute that reaction immediately without + // going through the reaction queue. In multithreaded execution, this + // avoids acquiring a mutex lock. + // FIXME: Check the earliest deadline on the reaction queue. + // This optimization could violate EDF scheduling otherwise. + if (num_downstream_reactions == 1 && downstream_reaction->last_enabling_reaction == reaction) { + // So far, this downstream reaction is a candidate to execute now. + downstream_to_execute_now = downstream_reaction; + } else { + // If there is a previous candidate reaction to execute now, + // it is no longer a candidate. + if (downstream_to_execute_now != NULL) { + // More than one downstream reaction is enabled. + // In this case, if we were to execute the downstream reaction + // immediately without changing any queues, then the second + // downstream reaction would be blocked because this reaction + // remains on the executing queue. Hence, the optimization + // is not valid. Put the candidate reaction on the queue. + _lf_trigger_reaction(env, downstream_to_execute_now, worker); + downstream_to_execute_now = NULL; } + // Queue the reaction. + _lf_trigger_reaction(env, downstream_reaction, worker); + } } + } } + } } - if (downstream_to_execute_now != NULL) { - LF_PRINT_LOG("Worker %d: Optimizing and executing downstream reaction now: %s", worker, downstream_to_execute_now->name); - bool violation = false; + } + if (downstream_to_execute_now != NULL) { + LF_PRINT_LOG("Worker %d: Optimizing and executing downstream reaction now: %s", worker, + downstream_to_execute_now->name); + bool violation = false; #ifdef FEDERATED_DECENTRALIZED // Only use the STP handler for federated programs that use decentralized coordination - // If the is_STP_violated for the reaction is true, - // an input trigger to this reaction has been triggered at a later - // logical time than originally anticipated. In this case, a special - // STP handler will be invoked. - // FIXME: Note that the STP handler will be invoked - // at most once per logical time value. If the STP handler triggers the - // same reaction at the current time value, even if at a future superdense time, - // then the reaction will be invoked and the STP handler will not be invoked again. - // However, input ports to a federate reactor are network port types so this possibly should - // be disallowed. - // @note The STP handler and the deadline handler are not mutually exclusive. - // In other words, both can be invoked for a reaction if it is triggered late - // in logical time (STP offset is violated) and also misses the constraint on - // physical time (deadline). - // @note In absence of a STP handler, the is_STP_violated will be passed down the reaction - // chain until it is dealt with in a downstream STP handler. - if (downstream_to_execute_now->is_STP_violated == true) { - // Tardiness has occurred - LF_PRINT_LOG("Event has STP violation."); - reaction_function_t handler = downstream_to_execute_now->STP_handler; - // Invoke the STP handler if there is one. - if (handler != NULL) { - // There is a violation and it is being handled here - // If there is no STP handler, pass the is_STP_violated - // to downstream reactions. - violation = true; - LF_PRINT_LOG("Invoke tardiness handler."); - (*handler)(downstream_to_execute_now->self); - - // If the reaction produced outputs, put the resulting - // triggered reactions into the queue or execute them directly if possible. - schedule_output_reactions(env, downstream_to_execute_now, worker); - - // Reset the tardiness because it has been dealt with in the - // STP handler - downstream_to_execute_now->is_STP_violated = false; - LF_PRINT_DEBUG("Reset reaction's is_STP_violated field to false: %s", - downstream_to_execute_now->name); - } - } + // If the is_STP_violated for the reaction is true, + // an input trigger to this reaction has been triggered at a later + // logical time than originally anticipated. In this case, a special + // STP handler will be invoked. + // FIXME: Note that the STP handler will be invoked + // at most once per logical time value. If the STP handler triggers the + // same reaction at the current time value, even if at a future superdense time, + // then the reaction will be invoked and the STP handler will not be invoked again. + // However, input ports to a federate reactor are network port types so this possibly should + // be disallowed. + // @note The STP handler and the deadline handler are not mutually exclusive. + // In other words, both can be invoked for a reaction if it is triggered late + // in logical time (STP offset is violated) and also misses the constraint on + // physical time (deadline). + // @note In absence of a STP handler, the is_STP_violated will be passed down the reaction + // chain until it is dealt with in a downstream STP handler. + if (downstream_to_execute_now->is_STP_violated == true) { + // Tardiness has occurred + LF_PRINT_LOG("Event has STP violation."); + reaction_function_t handler = downstream_to_execute_now->STP_handler; + // Invoke the STP handler if there is one. + if (handler != NULL) { + // There is a violation and it is being handled here + // If there is no STP handler, pass the is_STP_violated + // to downstream reactions. + violation = true; + LF_PRINT_LOG("Invoke tardiness handler."); + (*handler)(downstream_to_execute_now->self); + + // If the reaction produced outputs, put the resulting + // triggered reactions into the queue or execute them directly if possible. + schedule_output_reactions(env, downstream_to_execute_now, worker); + + // Reset the tardiness because it has been dealt with in the + // STP handler + downstream_to_execute_now->is_STP_violated = false; + LF_PRINT_DEBUG("Reset reaction's is_STP_violated field to false: %s", downstream_to_execute_now->name); + } + } #endif - if (downstream_to_execute_now->deadline >= 0LL) { - // Get the current physical time. - instant_t physical_time = lf_time_physical(); - // Check for deadline violation. - if (downstream_to_execute_now->deadline == 0 || physical_time > env->current_tag.time + downstream_to_execute_now->deadline) { - // Deadline violation has occurred. - tracepoint_reaction_deadline_missed(env->trace, downstream_to_execute_now, worker); - violation = true; - // Invoke the local handler, if there is one. - reaction_function_t handler = downstream_to_execute_now->deadline_violation_handler; - if (handler != NULL) { - // Assume the mutex is still not held. - (*handler)(downstream_to_execute_now->self); - - // If the reaction produced outputs, put the resulting - // triggered reactions into the queue or execute them directly if possible. - schedule_output_reactions(env, downstream_to_execute_now, worker); - } - } - } - if (!violation) { - // Invoke the downstream_reaction function. - _lf_invoke_reaction(env, downstream_to_execute_now, worker); - - // If the downstream_reaction produced outputs, put the resulting triggered - // reactions into the queue (or execute them directly, if possible). - schedule_output_reactions(env, downstream_to_execute_now, worker); + if (downstream_to_execute_now->deadline >= 0LL) { + // Get the current physical time. + instant_t physical_time = lf_time_physical(); + // Check for deadline violation. + if (downstream_to_execute_now->deadline == 0 || + physical_time > env->current_tag.time + downstream_to_execute_now->deadline) { + // Deadline violation has occurred. + tracepoint_reaction_deadline_missed(env, downstream_to_execute_now, worker); + violation = true; + // Invoke the local handler, if there is one. + reaction_function_t handler = downstream_to_execute_now->deadline_violation_handler; + if (handler != NULL) { + // Assume the mutex is still not held. + (*handler)(downstream_to_execute_now->self); + + // If the reaction produced outputs, put the resulting + // triggered reactions into the queue or execute them directly if possible. + schedule_output_reactions(env, downstream_to_execute_now, worker); } + } + } + if (!violation) { + // Invoke the downstream_reaction function. + _lf_invoke_reaction(env, downstream_to_execute_now, worker); - // Reset the is_STP_violated because it has been passed - // down the chain - downstream_to_execute_now->is_STP_violated = false; - LF_PRINT_DEBUG("Finally, reset reaction's is_STP_violated field to false: %s", - downstream_to_execute_now->name); + // If the downstream_reaction produced outputs, put the resulting triggered + // reactions into the queue (or execute them directly, if possible). + schedule_output_reactions(env, downstream_to_execute_now, worker); } + + // Reset the is_STP_violated because it has been passed + // down the chain + downstream_to_execute_now->is_STP_violated = false; + LF_PRINT_DEBUG("Finally, reset reaction's is_STP_violated field to false: %s", downstream_to_execute_now->name); + } } /** @@ -989,30 +985,30 @@ void schedule_output_reactions(environment_t *env, reaction_t* reaction, int wor * TODO: This is not necessary for NO_TTY */ void usage(int argc, const char* argv[]) { - printf("\nCommand-line arguments: \n\n"); - printf(" -f, --fast [true | false]\n"); - printf(" Whether to wait for physical time to match logical time.\n\n"); - printf(" -o, --timeout \n"); - printf(" Stop after the specified amount of logical time, where units are one of\n"); - printf(" nsec, usec, msec, sec, minute, hour, day, week, or the plurals of those.\n\n"); - printf(" -k, --keepalive\n"); - printf(" Whether continue execution even when there are no events to process.\n\n"); - printf(" -w, --workers \n"); - printf(" Executed in threads if possible (optional feature).\n\n"); - printf(" -i, --id \n"); - printf(" The ID of the federation that this reactor will join.\n\n"); - #ifdef FEDERATED - printf(" -r, --rti \n"); - printf(" The address of the RTI, which can be in the form of user@host:port or ip:port.\n\n"); - printf(" -l\n"); - printf(" Send stdout to individual log files for each federate.\n\n"); - #endif - - printf("Command given:\n"); - for (int i = 0; i < argc; i++) { - printf("%s ", argv[i]); - } - printf("\n\n"); + printf("\nCommand-line arguments: \n\n"); + printf(" -f, --fast [true | false]\n"); + printf(" Whether to wait for physical time to match logical time.\n\n"); + printf(" -o, --timeout \n"); + printf(" Stop after the specified amount of logical time, where units are one of\n"); + printf(" nsec, usec, msec, sec, minute, hour, day, week, or the plurals of those.\n\n"); + printf(" -k, --keepalive\n"); + printf(" Whether continue execution even when there are no events to process.\n\n"); + printf(" -w, --workers \n"); + printf(" Executed in threads if possible (optional feature).\n\n"); + printf(" -i, --id \n"); + printf(" The ID of the federation that this reactor will join.\n\n"); +#ifdef FEDERATED + printf(" -r, --rti \n"); + printf(" The address of the RTI, which can be in the form of user@host:port or ip:port.\n\n"); + printf(" -l\n"); + printf(" Send stdout to individual log files for each federate.\n\n"); +#endif + + printf("Command given:\n"); + for (int i = 0; i < argc; i++) { + printf("%s ", argv[i]); + } + printf("\n\n"); } // Some options given in the target directive are provided here as @@ -1020,7 +1016,6 @@ void usage(int argc, const char* argv[]) { int default_argc = 0; const char** default_argv = NULL; - /** * Process the command-line arguments. If the command line arguments are not * understood, then print a usage message and return 0. Otherwise, return 1. @@ -1028,172 +1023,207 @@ const char** default_argv = NULL; * TODO: Not necessary for NO_TTY */ int process_args(int argc, const char* argv[]) { - int i = 1; - while (i < argc) { - const char* arg = argv[i++]; - if (strcmp(arg, "-f") == 0 || strcmp(arg, "--fast") == 0) { - if (argc < i + 1) { - lf_print_error("--fast needs a boolean."); - usage(argc, argv); - return 0; - } - const char* fast_spec = argv[i++]; - if (strcmp(fast_spec, "true") == 0) { - fast = true; - } else if (strcmp(fast_spec, "false") == 0) { - fast = false; - } else { - lf_print_error("Invalid value for --fast: %s", fast_spec); - } - } else if (strcmp(arg, "-o") == 0 - || strcmp(arg, "--timeout") == 0 - || strcmp(arg, "-timeout") == 0) { - // Tolerate -timeout for legacy uses. - if (argc < i + 2) { - lf_print_error("--timeout needs time and units."); - usage(argc, argv); - return 0; - } - const char* time_spec = argv[i++]; - const char* units = argv[i++]; - - - #if defined(PLATFORM_ARDUINO) - duration = atol(time_spec); - #else - duration = atoll(time_spec); - #endif - - // A parse error returns 0LL, so check to see whether that is what is meant. - if (duration == 0LL && strncmp(time_spec, "0", 1) != 0) { - // Parse error. - lf_print_error("Invalid time value: %s", time_spec); - usage(argc, argv); - return 0; - } - if (strncmp(units, "sec", 3) == 0) { - duration = SEC(duration); - } else if (strncmp(units, "msec", 4) == 0) { - duration = MSEC(duration); - } else if (strncmp(units, "usec", 4) == 0) { - duration = USEC(duration); - } else if (strncmp(units, "nsec", 4) == 0) { - duration = NSEC(duration); - } else if (strncmp(units, "min", 3) == 0) { - duration = MINUTE(duration); - } else if (strncmp(units, "hour", 4) == 0) { - duration = HOUR(duration); - } else if (strncmp(units, "day", 3) == 0) { - duration = DAY(duration); - } else if (strncmp(units, "week", 4) == 0) { - duration = WEEK(duration); - } else { - // Invalid units. - lf_print_error("Invalid time units: %s", units); - usage(argc, argv); - return 0; - } - } else if (strcmp(arg, "-k") == 0 || strcmp(arg, "--keepalive") == 0) { - if (argc < i + 1) { - lf_print_error("--keepalive needs a boolean."); - usage(argc, argv); - return 0; - } - const char* keep_spec = argv[i++]; - if (strcmp(keep_spec, "true") == 0) { - keepalive_specified = true; - } else if (strcmp(keep_spec, "false") == 0) { - keepalive_specified = false; - } else { - lf_print_error("Invalid value for --keepalive: %s", keep_spec); - } - } else if (strcmp(arg, "-w") == 0 || strcmp(arg, "--workers") == 0) { - if (argc < i + 1) { - lf_print_error("--workers needs an integer argument.s"); - usage(argc, argv); - return 0; - } - const char* threads_spec = argv[i++]; - int num_workers = atoi(threads_spec); - if (num_workers <= 0) { - lf_print_error("Invalid value for --workers: %s. Using 1.", threads_spec); - num_workers = 1; - } - _lf_number_of_workers = (unsigned int)num_workers; - } - #ifdef FEDERATED - else if (strcmp(arg, "-i") == 0 || strcmp(arg, "--id") == 0) { - if (argc < i + 1) { - lf_print_error("--id needs a string argument."); - usage(argc, argv); - return 0; - } - const char* fid = argv[i++]; - lf_set_federation_id(fid); - lf_print("Federation ID for executable %s: %s", argv[0], fid); - } else if (strcmp(arg, "-r") == 0 || strcmp(arg, "--rti") == 0) { - if (argc < i + 1) { - lf_print_error("--rti needs a string argument in the form of [user]@[host]:[port]."); - usage(argc, argv); - return 0; - } - parse_rti_code_t code = lf_parse_rti_addr(argv[i++]); - if (code != SUCCESS) { - switch (code) { - case INVALID_HOST: - lf_print_error("--rti needs a valid host"); - break; - case INVALID_PORT: - lf_print_error("--rti needs a valid port"); - break; - case INVALID_USER: - lf_print_error("--rti needs a valid user"); - break; - case FAILED_TO_PARSE: - lf_print_error("Failed to parse address of RTI"); - break; - default: - break; - } - usage(argc, argv); - return 0; - } - } - #endif - else if (strcmp(arg, "--ros-args") == 0) { - // FIXME: Ignore ROS arguments for now - } else { - lf_print_error("Unrecognized command-line argument: %s", arg); - usage(argc, argv); - return 0; + int i = 1; + while (i < argc) { + const char* arg = argv[i++]; + if (strcmp(arg, "-f") == 0 || strcmp(arg, "--fast") == 0) { + if (argc < i + 1) { + lf_print_error("--fast needs a boolean."); + usage(argc, argv); + return 0; + } + const char* fast_spec = argv[i++]; + if (strcmp(fast_spec, "true") == 0) { + fast = true; + } else if (strcmp(fast_spec, "false") == 0) { + fast = false; + } else { + lf_print_error("Invalid value for --fast: %s", fast_spec); + } + } else if (strcmp(arg, "-o") == 0 || strcmp(arg, "--timeout") == 0 || strcmp(arg, "-timeout") == 0) { + // Tolerate -timeout for legacy uses. + if (argc < i + 2) { + lf_print_error("--timeout needs time and units."); + usage(argc, argv); + return 0; + } + const char* time_spec = argv[i++]; + const char* units = argv[i++]; + +#if defined(PLATFORM_ARDUINO) + duration = atol(time_spec); +#else + duration = atoll(time_spec); +#endif + + // A parse error returns 0LL, so check to see whether that is what is meant. + if (duration == 0LL && strncmp(time_spec, "0", 1) != 0) { + // Parse error. + lf_print_error("Invalid time value: %s", time_spec); + usage(argc, argv); + return 0; + } + if (strncmp(units, "sec", 3) == 0) { + duration = SEC(duration); + } else if (strncmp(units, "msec", 4) == 0) { + duration = MSEC(duration); + } else if (strncmp(units, "usec", 4) == 0) { + duration = USEC(duration); + } else if (strncmp(units, "nsec", 4) == 0) { + duration = NSEC(duration); + } else if (strncmp(units, "min", 3) == 0) { + duration = MINUTE(duration); + } else if (strncmp(units, "hour", 4) == 0) { + duration = HOUR(duration); + } else if (strncmp(units, "day", 3) == 0) { + duration = DAY(duration); + } else if (strncmp(units, "week", 4) == 0) { + duration = WEEK(duration); + } else { + // Invalid units. + lf_print_error("Invalid time units: %s", units); + usage(argc, argv); + return 0; + } + } else if (strcmp(arg, "-k") == 0 || strcmp(arg, "--keepalive") == 0) { + if (argc < i + 1) { + lf_print_error("--keepalive needs a boolean."); + usage(argc, argv); + return 0; + } + const char* keep_spec = argv[i++]; + if (strcmp(keep_spec, "true") == 0) { + keepalive_specified = true; + } else if (strcmp(keep_spec, "false") == 0) { + keepalive_specified = false; + } else { + lf_print_error("Invalid value for --keepalive: %s", keep_spec); + } + } else if (strcmp(arg, "-w") == 0 || strcmp(arg, "--workers") == 0) { + if (argc < i + 1) { + lf_print_error("--workers needs an integer argument.s"); + usage(argc, argv); + return 0; + } + const char* threads_spec = argv[i++]; + int num_workers = atoi(threads_spec); + if (num_workers <= 0) { + lf_print_error("Invalid value for --workers: %s. Using 1.", threads_spec); + num_workers = 1; + } + _lf_number_of_workers = (unsigned int)num_workers; + } +#ifdef FEDERATED + else if (strcmp(arg, "-i") == 0 || strcmp(arg, "--id") == 0) { + if (argc < i + 1) { + lf_print_error("--id needs a string argument."); + usage(argc, argv); + return 0; + } + const char* fid = argv[i++]; + lf_set_federation_id(fid); + lf_print("Federation ID for executable %s: %s", argv[0], fid); + } else if (strcmp(arg, "-r") == 0 || strcmp(arg, "--rti") == 0) { + if (argc < i + 1) { + lf_print_error("--rti needs a string argument in the form of [user]@[host]:[port]."); + usage(argc, argv); + return 0; + } + parse_rti_code_t code = lf_parse_rti_addr(argv[i++]); + if (code != SUCCESS) { + switch (code) { + case INVALID_HOST: + lf_print_error("--rti needs a valid host"); + break; + case INVALID_PORT: + lf_print_error("--rti needs a valid port"); + break; + case INVALID_USER: + lf_print_error("--rti needs a valid user"); + break; + case FAILED_TO_PARSE: + lf_print_error("Failed to parse address of RTI"); + break; + default: + break; } + usage(argc, argv); + return 0; + } } - return 1; +#endif + else if (strcmp(arg, "--ros-args") == 0) { + // FIXME: Ignore ROS arguments for now + } else { + lf_print_error("Unrecognized command-line argument: %s", arg); + usage(argc, argv); + return 0; + } + } + return 1; +} + +/** + * @brief Check that the provided version information is consistent with the + * core runtime. + */ +#ifdef LF_TRACE +static void check_version(version_t version) { +#ifdef LF_SINGLE_THREADED + LF_ASSERT(version.build_config.single_threaded == TRIBOOL_TRUE || + version.build_config.single_threaded == TRIBOOL_DOES_NOT_MATTER, + "expected single-threaded version"); +#else + LF_ASSERT(version.build_config.single_threaded == TRIBOOL_FALSE || + version.build_config.single_threaded == TRIBOOL_DOES_NOT_MATTER, + "expected multi-threaded version"); +#endif +#ifdef NDEBUG + LF_ASSERT(version.build_config.build_type_is_debug == TRIBOOL_FALSE || + version.build_config.build_type_is_debug == TRIBOOL_DOES_NOT_MATTER, + "expected release version"); +#else + LF_ASSERT(version.build_config.build_type_is_debug == TRIBOOL_TRUE || + version.build_config.build_type_is_debug == TRIBOOL_DOES_NOT_MATTER, + "expected debug version"); +#endif + LF_ASSERT(version.build_config.log_level == LOG_LEVEL || version.build_config.log_level == INT_MAX, + "expected log level %d", LOG_LEVEL); + // assert(!version.core_version_name || strcmp(version.core_version_name, CORE_SHA) == 0); // TODO: provide CORE_SHA } +#endif // LF_TRACE void initialize_global(void) { - #if !defined NDEBUG - _lf_count_payload_allocations = 0; - _lf_count_token_allocations = 0; - #endif - - environment_t *envs; - int num_envs = _lf_get_environments(&envs); - for (int i = 0; itrace); - #endif - // Call the code-generated function to initialize all actions, timers, and ports - // This is done for all environments/enclaves at the same time. - _lf_initialize_trigger_objects() ; + environment_t* envs; + int num_envs = _lf_get_environments(&envs); +#if defined(LF_SINGLE_THREADED) + int max_threads_tracing = 1; +#else + int max_threads_tracing = envs[0].num_workers * num_envs + 1; // add 1 for the main thread +#endif +#if defined(FEDERATED) + // NUMBER_OF_FEDERATES is an upper bound on the number of upstream federates + // -- threads are spawned to listen to upstream federates. Add 1 for the + // clock sync thread and add 1 for the staa thread + max_threads_tracing += NUMBER_OF_FEDERATES + 2; + lf_tracing_global_init("federate__", FEDERATE_ID, max_threads_tracing); +#else + lf_tracing_global_init("trace_", 0, max_threads_tracing); +#endif + // Call the code-generated function to initialize all actions, timers, and ports + // This is done for all environments/enclaves at the same time. + _lf_initialize_trigger_objects(); } -/** +/** * Flag to prevent termination function from executing twice and to signal to background * threads to terminate. */ @@ -1207,95 +1237,100 @@ bool _lf_normal_termination = false; * memory allocated for tokens has not been freed. */ void termination(void) { - if (_lf_termination_executed) return; - _lf_termination_executed = true; + if (_lf_termination_executed) + return; + _lf_termination_executed = true; + + environment_t* env; + int num_envs = _lf_get_environments(&env); + // Invoke the code generated termination function. It terminates the federated related services. + // It should only be called for the top-level environment, which, by convention, is the first environment. + lf_terminate_execution(env); + + // In order to free tokens, we perform the same actions we would have for a new time step. + for (int i = 0; i < num_envs; i++) { + if (!env[i].initialized) { + lf_print_warning("---- Environment %u was never initialized", env[i].id); + continue; + } + LF_PRINT_LOG("---- Terminating environment %u, normal termination: %d", env[i].id, _lf_normal_termination); + +#if !defined(LF_SINGLE_THREADED) + // Make sure all watchdog threads have stopped + _lf_watchdog_terminate_all(&env[i]); +#endif - environment_t *env; - int num_envs = _lf_get_environments(&env); - // Invoke the code generated termination function. It terminates the federated related services. - // It should only be called for the top-level environment, which, by convention, is the first environment. - lf_terminate_execution(env); + // Skip most cleanup on abnormal termination. + if (_lf_normal_termination) { + _lf_start_time_step(&env[i]); - // In order to free tokens, we perform the same actions we would have for a new time step. - for (int i = 0; i < num_envs; i++) { - if (!env[i].initialized) { - lf_print_warning("---- Environment %u was never initialized", env[i].id); - continue; - } - LF_PRINT_LOG("---- Terminating environment %u, normal termination: %d", env[i].id, _lf_normal_termination); - // Stop any tracing, if it is running. - // No need to acquire a mutex because if this is normal termination, all - // other threads have stopped, and if it's not, then acquiring a mutex could - // lead to a deadlock. - stop_trace_locked(env[i].trace); - - #if !defined(LF_SINGLE_THREADED) - // Make sure all watchdog threads have stopped - _lf_watchdog_terminate_all(&env[i]); - #endif - - // Skip most cleanup on abnormal termination. - if (_lf_normal_termination) { - _lf_start_time_step(&env[i]); - - #ifdef MODAL_REACTORS - // Free events and tokens suspended by modal reactors. - _lf_terminate_modal_reactors(&env[i]); - #endif - // If the event queue still has events on it, report that. - if (env[i].event_q != NULL && pqueue_size(env[i].event_q) > 0) { - lf_print_warning("---- There are %zu unprocessed future events on the event queue.", pqueue_size(env[i].event_q)); - event_t* event = (event_t*)pqueue_peek(env[i].event_q); - interval_t event_time = event->time - start_time; - lf_print_warning("---- The first future event has timestamp " PRINTF_TIME " after start time.", event_time); - } - // Print elapsed times. - // If these are negative, then the program failed to start up. - interval_t elapsed_time = lf_time_logical_elapsed(&env[i]); - if (elapsed_time >= 0LL) { - char time_buffer[29]; // 28 bytes is enough for the largest 64 bit number: 9,223,372,036,854,775,807 - lf_comma_separated_time(time_buffer, elapsed_time); - printf("---- Elapsed logical time (in nsec): %s\n", time_buffer); - - // If start_time is 0, then execution didn't get far enough along - // to initialize this. - if (start_time > 0LL) { - lf_comma_separated_time(time_buffer, lf_time_physical_elapsed()); - printf("---- Elapsed physical time (in nsec): %s\n", time_buffer); - } - } +#ifdef MODAL_REACTORS + // Free events and tokens suspended by modal reactors. + _lf_terminate_modal_reactors(&env[i]); +#endif + // If the event queue still has events on it, report that. + if (env[i].event_q != NULL && pqueue_size(env[i].event_q) > 0) { + lf_print_warning("---- There are %zu unprocessed future events on the event queue.", + pqueue_size(env[i].event_q)); + event_t* event = (event_t*)pqueue_peek(env[i].event_q); + interval_t event_time = event->time - start_time; + lf_print_warning("---- The first future event has timestamp " PRINTF_TIME " after start time.", event_time); + } + // Print elapsed times. + // If these are negative, then the program failed to start up. + interval_t elapsed_time = lf_time_logical_elapsed(&env[i]); + if (elapsed_time >= 0LL) { + char time_buffer[29]; // 28 bytes is enough for the largest 64 bit number: 9,223,372,036,854,775,807 + lf_comma_separated_time(time_buffer, elapsed_time); + printf("---- Elapsed logical time (in nsec): %s\n", time_buffer); + + // If start_time is 0, then execution didn't get far enough along + // to initialize this. + if (start_time > 0LL) { + lf_comma_separated_time(time_buffer, lf_time_physical_elapsed()); + printf("---- Elapsed physical time (in nsec): %s\n", time_buffer); } + } } - // Skip most cleanup on abnormal termination. - if (_lf_normal_termination) { - _lf_free_all_tokens(); // Must be done before freeing reactors. + } + // Skip most cleanup on abnormal termination. + if (_lf_normal_termination) { + _lf_free_all_tokens(); // Must be done before freeing reactors. #if !defined NDEBUG - // Issue a warning if a memory leak has been detected. - if (_lf_count_payload_allocations > 0) { - lf_print_warning("Memory allocated for messages has not been freed."); - lf_print_warning("Number of unfreed messages: %d.", _lf_count_payload_allocations); - } - if (_lf_count_token_allocations > 0) { - lf_print_warning("Memory allocated for tokens has not been freed!"); - lf_print_warning("Number of unfreed tokens: %d.", _lf_count_token_allocations); - } + // Issue a warning if a memory leak has been detected. + if (_lf_count_payload_allocations > 0) { + lf_print_warning("Memory allocated for messages has not been freed."); + lf_print_warning("Number of unfreed messages: %d.", _lf_count_payload_allocations); + } + if (_lf_count_token_allocations > 0) { + lf_print_warning("Memory allocated for tokens has not been freed!"); + lf_print_warning("Number of unfreed tokens: %d.", _lf_count_token_allocations); + } #endif #if !defined(LF_SINGLE_THREADED) - for (int i = 0; i < env->watchdogs_size; i++) { - if (env->watchdogs[i]->base->reactor_mutex != NULL) { - free(env->watchdogs[i]->base->reactor_mutex); - } - } + for (int i = 0; i < env->watchdogs_size; i++) { + if (env->watchdogs[i]->base->reactor_mutex != NULL) { + free(env->watchdogs[i]->base->reactor_mutex); + } + } #endif - lf_free_all_reactors(); + lf_free_all_reactors(); - // Free up memory associated with environment. - // Do this last so that printed warnings don't access freed memory. - for (int i = 0; i < num_envs; i++) { - environment_free(&env[i]); - } + // Free up memory associated with environment. + // Do this last so that printed warnings don't access freed memory. + for (int i = 0; i < num_envs; i++) { + environment_free(&env[i]); + } #if defined LF_ENCLAVES - free_local_rti(); + free_local_rti(); #endif - } + } + lf_tracing_global_shutdown(); +} + +index_t lf_combine_deadline_and_level(interval_t deadline, int level) { + if (deadline > ULLONG_MAX >> 16) + return ((ULLONG_MAX >> 16) << 16) | level; + else + return (deadline << 16) | level; } diff --git a/core/tag.c b/core/tag.c index e236dd766..b45b67acc 100644 --- a/core/tag.c +++ b/core/tag.c @@ -15,7 +15,7 @@ #include "tag.h" #include "util.h" -#include "platform.h" +#include "low_level_platform.h" #include "environment.h" #include "reactor.h" #include "util.h" @@ -25,13 +25,7 @@ /** * An enum for specifying the desired tag when calling "lf_time" */ -typedef enum _lf_time_type { - LF_LOGICAL, - LF_PHYSICAL, - LF_ELAPSED_LOGICAL, - LF_ELAPSED_PHYSICAL, - LF_START -} _lf_time_type; +typedef enum _lf_time_type { LF_LOGICAL, LF_PHYSICAL, LF_ELAPSED_LOGICAL, LF_ELAPSED_PHYSICAL, LF_START } _lf_time_type; //////////////// Global variables declared in tag.h: @@ -40,207 +34,210 @@ instant_t start_time = NEVER; //////////////// Functions declared in tag.h -tag_t lf_tag(void *env) { - assert(env != GLOBAL_ENVIRONMENT); - return ((environment_t *)env)->current_tag; +tag_t lf_tag(void* env) { + assert(env != GLOBAL_ENVIRONMENT); + return ((environment_t*)env)->current_tag; } tag_t lf_tag_add(tag_t a, tag_t b) { - if (a.time == NEVER || b.time == NEVER) return NEVER_TAG; - if (a.time == FOREVER || b.time == FOREVER) return FOREVER_TAG; - if (b.time > 0) a.microstep = 0; // Ignore microstep of first arg if time of second is > 0. - tag_t result = {.time = a.time + b.time, .microstep = a.microstep + b.microstep}; - if (result.microstep < a.microstep) return FOREVER_TAG; - if (result.time < a.time && b.time > 0) return FOREVER_TAG; - if (result.time > a.time && b.time < 0) return NEVER_TAG; - return result; + if (a.time == NEVER || b.time == NEVER) + return NEVER_TAG; + if (a.time == FOREVER || b.time == FOREVER) + return FOREVER_TAG; + if (b.time > 0) + a.microstep = 0; // Ignore microstep of first arg if time of second is > 0. + tag_t result = {.time = a.time + b.time, .microstep = a.microstep + b.microstep}; + if (result.microstep < a.microstep) + return FOREVER_TAG; + if (result.time < a.time && b.time > 0) + return FOREVER_TAG; + if (result.time > a.time && b.time < 0) + return NEVER_TAG; + return result; } int lf_tag_compare(tag_t tag1, tag_t tag2) { - if (tag1.time < tag2.time) { - return -1; - } else if (tag1.time > tag2.time) { - return 1; - } else if (tag1.microstep < tag2.microstep) { - return -1; - } else if (tag1.microstep > tag2.microstep) { - return 1; - } else { - return 0; - } + if (tag1.time < tag2.time) { + return -1; + } else if (tag1.time > tag2.time) { + return 1; + } else if (tag1.microstep < tag2.microstep) { + return -1; + } else if (tag1.microstep > tag2.microstep) { + return 1; + } else { + return 0; + } } tag_t lf_delay_tag(tag_t tag, interval_t interval) { - if (tag.time == NEVER || interval < 0LL) return tag; - // Note that overflow in C is undefined for signed variables. - if (tag.time >= FOREVER - interval) return FOREVER_TAG; // Overflow. - tag_t result = tag; - if (interval == 0LL) { - // Note that unsigned variables will wrap on overflow. - // This is probably the only reasonable thing to do with overflowing - // microsteps. - result.microstep++; - } else { - result.time += interval; - result.microstep = 0; - } - return result; + if (tag.time == NEVER || interval < 0LL) + return tag; + // Note that overflow in C is undefined for signed variables. + if (tag.time >= FOREVER - interval) + return FOREVER_TAG; // Overflow. + tag_t result = tag; + if (interval == 0LL) { + // Note that unsigned variables will wrap on overflow. + // This is probably the only reasonable thing to do with overflowing + // microsteps. + result.microstep++; + } else { + result.time += interval; + result.microstep = 0; + } + return result; } tag_t lf_delay_strict(tag_t tag, interval_t interval) { - tag_t result = lf_delay_tag(tag, interval); - if (interval != 0 && interval != NEVER && interval != FOREVER && result.time != NEVER && result.time != FOREVER) { - result.time -= 1; - result.microstep = UINT_MAX; - } - return result; + tag_t result = lf_delay_tag(tag, interval); + if (interval != 0 && interval != NEVER && interval != FOREVER && result.time != NEVER && result.time != FOREVER) { + result.time -= 1; + result.microstep = UINT_MAX; + } + return result; } -instant_t lf_time_logical(void *env) { - assert(env != GLOBAL_ENVIRONMENT); - return ((environment_t *) env)->current_tag.time; +instant_t lf_time_logical(void* env) { + assert(env != GLOBAL_ENVIRONMENT); + return ((environment_t*)env)->current_tag.time; } -interval_t lf_time_logical_elapsed(void *env) { - return lf_time_logical(env) - start_time; -} +interval_t lf_time_logical_elapsed(void* env) { return lf_time_logical(env) - start_time; } instant_t lf_time_physical(void) { - instant_t now, last_read_local; - // Get the current clock value - LF_ASSERTN(lf_clock_gettime(&now), "Failed to read physical clock."); - return now; - + instant_t now, last_read_local; + // Get the current clock value + LF_ASSERTN(lf_clock_gettime(&now), "Failed to read physical clock."); + return now; } -instant_t lf_time_physical_elapsed(void) { - return lf_time_physical() - start_time; -} +instant_t lf_time_physical_elapsed(void) { return lf_time_physical() - start_time; } -instant_t lf_time_start(void) { - return start_time; -} +instant_t lf_time_start(void) { return start_time; } size_t lf_readable_time(char* buffer, instant_t time) { - char* original_buffer = buffer; - bool lead = false; // Set to true when first clause has been printed. - if (time > WEEKS(1)) { - lead = true; - size_t printed = lf_comma_separated_time(buffer, time / WEEKS(1)); - time = time % WEEKS(1); - buffer += printed; - snprintf(buffer, 7, " weeks"); - buffer += 6; + if (time <= (instant_t)0) { + snprintf(buffer, 2, "0"); + return 1; + } + char* original_buffer = buffer; + bool lead = false; // Set to true when first clause has been printed. + if (time > WEEKS(1)) { + lead = true; + size_t printed = lf_comma_separated_time(buffer, time / WEEKS(1)); + time = time % WEEKS(1); + buffer += printed; + snprintf(buffer, 7, " weeks"); + buffer += 6; + } + if (time > DAYS(1)) { + if (lead == true) { + snprintf(buffer, 3, ", "); + buffer += 2; } - if (time > DAYS(1)) { - if (lead == true) { - snprintf(buffer, 3, ", "); - buffer += 2; - } - lead = true; - size_t printed = lf_comma_separated_time(buffer, time / DAYS(1)); - time = time % DAYS(1); - buffer += printed; - snprintf(buffer, 6, " days"); - buffer += 5; + lead = true; + size_t printed = lf_comma_separated_time(buffer, time / DAYS(1)); + time = time % DAYS(1); + buffer += printed; + snprintf(buffer, 3, " d"); + buffer += 2; + } + if (time > HOURS(1)) { + if (lead == true) { + snprintf(buffer, 3, ", "); + buffer += 2; } - if (time > HOURS(1)) { - if (lead == true) { - snprintf(buffer, 3, ", "); - buffer += 2; - } - lead = true; - size_t printed = lf_comma_separated_time(buffer, time / HOURS(1)); - time = time % HOURS(1); - buffer += printed; - snprintf(buffer, 7, " hours"); - buffer += 6; + lead = true; + size_t printed = lf_comma_separated_time(buffer, time / HOURS(1)); + time = time % HOURS(1); + buffer += printed; + snprintf(buffer, 3, " h"); + buffer += 2; + } + if (time > MINUTES(1)) { + if (lead == true) { + snprintf(buffer, 3, ", "); + buffer += 2; } - if (time > MINUTES(1)) { - if (lead == true) { - snprintf(buffer, 3, ", "); - buffer += 2; - } - lead = true; - size_t printed = lf_comma_separated_time(buffer, time / MINUTES(1)); - time = time % MINUTES(1); - buffer += printed; - snprintf(buffer, 9, " minutes"); - buffer += 8; + lead = true; + size_t printed = lf_comma_separated_time(buffer, time / MINUTES(1)); + time = time % MINUTES(1); + buffer += printed; + snprintf(buffer, 5, " min"); + buffer += 4; + } + if (time > SECONDS(1)) { + if (lead == true) { + snprintf(buffer, 3, ", "); + buffer += 2; } - if (time > SECONDS(1)) { - if (lead == true) { - snprintf(buffer, 3, ", "); - buffer += 2; - } - lead = true; - size_t printed = lf_comma_separated_time(buffer, time / SECONDS(1)); - time = time % SECONDS(1); - buffer += printed; - snprintf(buffer, 9, " seconds"); - buffer += 8; + lead = true; + size_t printed = lf_comma_separated_time(buffer, time / SECONDS(1)); + time = time % SECONDS(1); + buffer += printed; + snprintf(buffer, 3, " s"); + buffer += 2; + } + if (time > (instant_t)0) { + if (lead == true) { + snprintf(buffer, 3, ", "); + buffer += 2; } - if (time > (instant_t)0) { - if (lead == true) { - snprintf(buffer, 3, ", "); - buffer += 2; - } - const char* units = "nanoseconds"; - if (time % MSEC(1) == (instant_t) 0) { - units = "milliseconds"; - time = time / MSEC(1); - } else if (time % USEC(1) == (instant_t) 0) { - units = "microseconds"; - time = time / USEC(1); - } - size_t printed = lf_comma_separated_time(buffer, time); - buffer += printed; - snprintf(buffer, 14, " %s", units); - buffer += strlen(units) + 1; - } else { - snprintf(buffer, 2, "0"); + const char* units = "ns"; + if (time % MSEC(1) == (instant_t)0) { + units = "ms"; + time = time / MSEC(1); + } else if (time % USEC(1) == (instant_t)0) { + units = "us"; + time = time / USEC(1); } - return (buffer - original_buffer); + size_t printed = lf_comma_separated_time(buffer, time); + buffer += printed; + snprintf(buffer, 3, " %s", units); + buffer += strlen(units) + 1; + } + return (buffer - original_buffer); } size_t lf_comma_separated_time(char* buffer, instant_t time) { - size_t result = 0; // The number of characters printed. - // If the number is zero, print it and return. - if (time == (instant_t)0) { - snprintf(buffer, 2, "0"); - return 1; - } - // If the number is negative, print a minus sign. - if (time < (instant_t)0) { - snprintf(buffer, 2, "-"); - buffer++; - result++; - } - int count = 0; - // Assume the time value is no larger than 64 bits. - instant_t clauses[7]; - while (time > (instant_t)0) { - clauses[count++] = time; - time = time/1000; - } - // Highest order clause should not be filled with zeros. - instant_t to_print = clauses[--count] % 1000; - snprintf(buffer, 5, "%lld", (long long)to_print); - if (to_print >= 100LL) { - buffer += 3; - result += 3; - } else if (to_print >= 10LL) { - buffer += 2; - result += 2; - } else { - buffer += 1; - result += 1; - } - while (count-- > 0) { - to_print = clauses[count] % 1000LL; - snprintf(buffer, 8, ",%03lld", (long long)to_print); - buffer += 4; - result += 4; - } - return result; + size_t result = 0; // The number of characters printed. + // If the number is zero, print it and return. + if (time == (instant_t)0) { + snprintf(buffer, 2, "0"); + return 1; + } + // If the number is negative, print a minus sign. + if (time < (instant_t)0) { + snprintf(buffer, 2, "-"); + buffer++; + result++; + } + int count = 0; + // Assume the time value is no larger than 64 bits. + instant_t clauses[7]; + while (time > (instant_t)0) { + clauses[count++] = time; + time = time / 1000; + } + // Highest order clause should not be filled with zeros. + instant_t to_print = clauses[--count] % 1000; + snprintf(buffer, 5, "%lld", (long long)to_print); + if (to_print >= 100LL) { + buffer += 3; + result += 3; + } else if (to_print >= 10LL) { + buffer += 2; + result += 2; + } else { + buffer += 1; + result += 1; + } + while (count-- > 0) { + to_print = clauses[count] % 1000LL; + snprintf(buffer, 8, ",%03lld", (long long)to_print); + buffer += 4; + result += 4; + } + return result; } diff --git a/core/threaded/reactor_threaded.c b/core/threaded/reactor_threaded.c index 5d6e89c2a..3419c3483 100644 --- a/core/threaded/reactor_threaded.c +++ b/core/threaded/reactor_threaded.c @@ -5,7 +5,7 @@ * @author{Soroush Bateni } * @copyright (c) 2020-2024, The University of California at Berkeley. * License: BSD 2-clause - * @brief Runtime infrastructure for the threaded version of the C target of Lingua Franca. + * @brief Runtime infrastructure for the threaded version of the C target of Lingua Franca. */ #if !defined LF_SINGLE_THREADED #ifndef NUMBER_OF_WORKERS @@ -18,7 +18,7 @@ #include #include "lf_types.h" -#include "platform.h" +#include "low_level_platform.h" #include "reactor_threaded.h" #include "reactor.h" #include "scheduler.h" @@ -44,78 +44,74 @@ extern instant_t start_time; /** * Global mutex, used for synchronizing across environments. Mainly used for token-management and tracing -*/ + */ lf_mutex_t global_mutex; -void _lf_increment_tag_barrier_locked(environment_t *env, tag_t future_tag) { - assert(env != GLOBAL_ENVIRONMENT); - - // Check if future_tag is after stop tag. - // This will only occur when a federate receives a timed message with - // a tag that is after the stop tag - if (lf_is_tag_after_stop_tag(env, future_tag)) { - lf_print_warning("Attempting to raise a barrier after the stop tag."); - future_tag = env->stop_tag; - } - - // Check to see if future_tag is actually in the future. - if (lf_tag_compare(future_tag, env->current_tag) > 0) { - // Future tag is actually in the future. - // See whether it is smaller than any pre-existing barrier. - if (lf_tag_compare(future_tag, env->barrier.horizon) < 0) { - // The future tag is smaller than the current horizon of the barrier. - // Therefore, we should prevent logical time from reaching the - // future tag. - env->barrier.horizon = future_tag; - LF_PRINT_DEBUG("Raised barrier at elapsed tag " PRINTF_TAG ".", - env->barrier.horizon.time - start_time, - env->barrier.horizon.microstep); - } - } else { - // The future_tag is not in the future. - - // One possibility is that the incoming message has violated the STP offset. - // Another possibility is that the message is coming from a zero-delay loop, - // and port absent reactions are waiting. - - // Prevent logical time from advancing further so that the measure of - // STP violation properly reflects the amount of time (logical or physical) - // that has elapsed after the incoming message would have violated the STP offset. - env->barrier.horizon = env->current_tag; - env->barrier.horizon.microstep++; - LF_PRINT_DEBUG("Raised barrier at elapsed tag " PRINTF_TAG ".", - env->barrier.horizon.time - start_time, - env->barrier.horizon.microstep); +void _lf_increment_tag_barrier_locked(environment_t* env, tag_t future_tag) { + assert(env != GLOBAL_ENVIRONMENT); + + // Check if future_tag is after stop tag. + // This will only occur when a federate receives a timed message with + // a tag that is after the stop tag + if (lf_is_tag_after_stop_tag(env, future_tag)) { + lf_print_warning("Attempting to raise a barrier after the stop tag."); + future_tag = env->stop_tag; + } + + // Check to see if future_tag is actually in the future. + if (lf_tag_compare(future_tag, env->current_tag) > 0) { + // Future tag is actually in the future. + // See whether it is smaller than any pre-existing barrier. + if (lf_tag_compare(future_tag, env->barrier.horizon) < 0) { + // The future tag is smaller than the current horizon of the barrier. + // Therefore, we should prevent logical time from reaching the + // future tag. + env->barrier.horizon = future_tag; + LF_PRINT_DEBUG("Raised barrier at elapsed tag " PRINTF_TAG ".", env->barrier.horizon.time - start_time, + env->barrier.horizon.microstep); } - // Increment the number of requestors - env->barrier.requestors++; + } else { + // The future_tag is not in the future. + + // One possibility is that the incoming message has violated the STP offset. + // Another possibility is that the message is coming from a zero-delay loop, + // and port absent reactions are waiting. + + // Prevent logical time from advancing further so that the measure of + // STP violation properly reflects the amount of time (logical or physical) + // that has elapsed after the incoming message would have violated the STP offset. + env->barrier.horizon = env->current_tag; + env->barrier.horizon.microstep++; + LF_PRINT_DEBUG("Raised barrier at elapsed tag " PRINTF_TAG ".", env->barrier.horizon.time - start_time, + env->barrier.horizon.microstep); + } + // Increment the number of requestors + env->barrier.requestors++; } -void _lf_increment_tag_barrier(environment_t *env, tag_t future_tag) { - assert(env != GLOBAL_ENVIRONMENT); - LF_MUTEX_LOCK(&env->mutex); - _lf_increment_tag_barrier_locked(env, future_tag); - LF_MUTEX_UNLOCK(&env->mutex); +void _lf_increment_tag_barrier(environment_t* env, tag_t future_tag) { + assert(env != GLOBAL_ENVIRONMENT); + LF_MUTEX_LOCK(&env->mutex); + _lf_increment_tag_barrier_locked(env, future_tag); + LF_MUTEX_UNLOCK(&env->mutex); } void _lf_decrement_tag_barrier_locked(environment_t* env) { - assert(env != GLOBAL_ENVIRONMENT); - // Decrement the number of requestors for the tag barrier. - env->barrier.requestors--; - // Check to see if the semaphore is negative, which indicates that - // a mismatched call was placed for this function. - if (env->barrier.requestors < 0) { - lf_print_error_and_exit("Mismatched use of _lf_increment_tag_barrier()" - " and _lf_decrement_tag_barrier_locked()."); - } else if (env->barrier.requestors == 0) { - // When the semaphore reaches zero, reset the horizon to forever. - env->barrier.horizon = FOREVER_TAG; - // Notify waiting threads that the semaphore has reached zero. - lf_cond_broadcast(&env->global_tag_barrier_requestors_reached_zero); - } - LF_PRINT_DEBUG("Barrier is at tag " PRINTF_TAG ".", - env->barrier.horizon.time, - env->barrier.horizon.microstep); + assert(env != GLOBAL_ENVIRONMENT); + // Decrement the number of requestors for the tag barrier. + env->barrier.requestors--; + // Check to see if the semaphore is negative, which indicates that + // a mismatched call was placed for this function. + if (env->barrier.requestors < 0) { + lf_print_error_and_exit("Mismatched use of _lf_increment_tag_barrier()" + " and _lf_decrement_tag_barrier_locked()."); + } else if (env->barrier.requestors == 0) { + // When the semaphore reaches zero, reset the horizon to forever. + env->barrier.horizon = FOREVER_TAG; + // Notify waiting threads that the semaphore has reached zero. + lf_cond_broadcast(&env->global_tag_barrier_requestors_reached_zero); + } + LF_PRINT_DEBUG("Barrier is at tag " PRINTF_TAG ".", env->barrier.horizon.time, env->barrier.horizon.microstep); } /** @@ -141,63 +137,61 @@ void _lf_decrement_tag_barrier_locked(environment_t* env) { * @return 0 if no wait was needed and 1 if a wait actually occurred. */ int _lf_wait_on_tag_barrier(environment_t* env, tag_t proposed_tag) { - assert(env != GLOBAL_ENVIRONMENT); + assert(env != GLOBAL_ENVIRONMENT); - // Check the most common case first. - if (env->barrier.requestors == 0) return 0; + // Check the most common case first. + if (env->barrier.requestors == 0) + return 0; - // Do not wait for tags after the stop tag + // Do not wait for tags after the stop tag + if (lf_is_tag_after_stop_tag(env, proposed_tag)) { + proposed_tag = env->stop_tag; + } + // Do not wait forever + if (proposed_tag.time == FOREVER) { + lf_print_warning("Global tag barrier should not handle FOREVER proposed tags."); + return 0; + } + int result = 0; + // Wait until the global barrier semaphore on logical time is zero + // and the proposed_time is larger than or equal to the horizon. + while (env->barrier.requestors > 0 && lf_tag_compare(proposed_tag, env->barrier.horizon) >= 0) { + result = 1; + LF_PRINT_LOG("Waiting on barrier for tag " PRINTF_TAG ".", proposed_tag.time - start_time, proposed_tag.microstep); + // Wait until no requestor remains for the barrier on logical time + lf_cond_wait(&env->global_tag_barrier_requestors_reached_zero); + + // The stop tag may have changed during the wait. if (lf_is_tag_after_stop_tag(env, proposed_tag)) { - proposed_tag = env->stop_tag; - } - // Do not wait forever - if (proposed_tag.time == FOREVER) { - lf_print_warning("Global tag barrier should not handle FOREVER proposed tags."); - return 0; + proposed_tag = env->stop_tag; } - int result = 0; - // Wait until the global barrier semaphore on logical time is zero - // and the proposed_time is larger than or equal to the horizon. - while (env->barrier.requestors > 0 - && lf_tag_compare(proposed_tag, env->barrier.horizon) >= 0 - ) { - result = 1; - LF_PRINT_LOG("Waiting on barrier for tag " PRINTF_TAG ".", proposed_tag.time - start_time, proposed_tag.microstep); - // Wait until no requestor remains for the barrier on logical time - lf_cond_wait(&env->global_tag_barrier_requestors_reached_zero); - - // The stop tag may have changed during the wait. - if (lf_is_tag_after_stop_tag(env, proposed_tag)) { - proposed_tag = env->stop_tag; - } - } - LF_PRINT_LOG("Finished waiting on barrier for tag " PRINTF_TAG ".", proposed_tag.time - start_time, proposed_tag.microstep); - return result; + } + LF_PRINT_LOG("Finished waiting on barrier for tag " PRINTF_TAG ".", proposed_tag.time - start_time, + proposed_tag.microstep); + return result; } void lf_set_present(lf_port_base_t* port) { - if (!port->source_reactor) return; - environment_t *env = port->source_reactor->environment; - bool* is_present_field = &port->is_present; - int ipfas = lf_atomic_fetch_add32(&env->is_present_fields_abbreviated_size, 1); - if (ipfas < env->is_present_fields_size) { - env->is_present_fields_abbreviated[ipfas] = is_present_field; - } - *is_present_field = true; - - // Support for sparse destination multiports. - if(port->sparse_record - && port->destination_channel >= 0 - && port->sparse_record->size >= 0) { - int next = lf_atomic_fetch_add32(&port->sparse_record->size, 1); - if (next >= port->sparse_record->capacity) { - // Buffer is full. Have to revert to the classic iteration. - port->sparse_record->size = -1; - } else { - port->sparse_record->present_channels[next] - = port->destination_channel; - } + if (!port->source_reactor) + return; + environment_t* env = port->source_reactor->environment; + bool* is_present_field = &port->is_present; + int ipfas = lf_atomic_fetch_add32(&env->is_present_fields_abbreviated_size, 1); + if (ipfas < env->is_present_fields_size) { + env->is_present_fields_abbreviated[ipfas] = is_present_field; + } + *is_present_field = true; + + // Support for sparse destination multiports. + if (port->sparse_record && port->destination_channel >= 0 && port->sparse_record->size >= 0) { + int next = lf_atomic_fetch_add32(&port->sparse_record->size, 1); + if (next >= port->sparse_record->capacity) { + // Buffer is full. Have to revert to the classic iteration. + port->sparse_record->size = -1; + } else { + port->sparse_record->present_channels[next] = port->destination_channel; } + } } /** @@ -207,7 +201,7 @@ void lf_set_present(lf_port_base_t* port) { * * If an event is put on the event queue during the wait, then the wait is * interrupted and this function returns false. It also returns false if the - * timeout time is reached before the wait has completed. Note this this could + * timeout time is reached before the wait has completed. Note this this could * return true even if the a new event was placed on the queue if that event * time matches or exceeds the specified time. * @@ -227,50 +221,49 @@ void lf_set_present(lf_port_base_t* port) { * was reached. */ bool wait_until(environment_t* env, instant_t logical_time, lf_cond_t* condition) { - LF_PRINT_DEBUG("-------- Waiting until physical time matches logical time " PRINTF_TIME, logical_time); - interval_t wait_until_time = logical_time; + LF_PRINT_DEBUG("-------- Waiting until physical time matches logical time " PRINTF_TIME, logical_time); + interval_t wait_until_time = logical_time; #ifdef FEDERATED_DECENTRALIZED // Only apply the STA if coordination is decentralized - // Apply the STA to the logical time - // Prevent an overflow - if (start_time != logical_time && wait_until_time < FOREVER - lf_fed_STA_offset) { - // If wait_time is not forever - LF_PRINT_DEBUG("Adding STA " PRINTF_TIME " to wait until time " PRINTF_TIME ".", - lf_fed_STA_offset, - wait_until_time - start_time); - wait_until_time += lf_fed_STA_offset; - } + // Apply the STA to the logical time + // Prevent an overflow + if (start_time != logical_time && wait_until_time < FOREVER - lf_fed_STA_offset) { + // If wait_time is not forever + LF_PRINT_DEBUG("Adding STA " PRINTF_TIME " to wait until time " PRINTF_TIME ".", lf_fed_STA_offset, + wait_until_time - start_time); + wait_until_time += lf_fed_STA_offset; + } #endif - if (!fast) { - // Check whether we actually need to wait, or if we have already passed the timepoint. - interval_t wait_duration = wait_until_time - lf_time_physical(); - if (wait_duration < MIN_SLEEP_DURATION) { - LF_PRINT_DEBUG("Wait time " PRINTF_TIME " is less than MIN_SLEEP_DURATION " PRINTF_TIME ". Skipping wait.", - wait_duration, MIN_SLEEP_DURATION); - return true; - } - - // We do the sleep on the cond var so we can be awakened by the - // asynchronous scheduling of a physical action. lf_clock_cond_timedwait - // returns 0 if it is awakened before the timeout. Hence, we want to run - // it repeatedly until either it returns non-zero or the current - // physical time matches or exceeds the logical time. - if (lf_clock_cond_timedwait(condition, wait_until_time) != LF_TIMEOUT) { - LF_PRINT_DEBUG("-------- wait_until interrupted before timeout."); - - // Wait did not time out, which means that there - // may have been an asynchronous call to lf_schedule(). - // Continue waiting. - // Do not adjust logical tag here. If there was an asynchronous - // call to lf_schedule(), it will have put an event on the event queue, - // and logical tag will be set to that time when that event is pulled. - return false; - } else { - // Reached timeout. - LF_PRINT_DEBUG("-------- Returned from wait, having waited " PRINTF_TIME " ns.", wait_duration); - return true; - } + if (!fast) { + // Check whether we actually need to wait, or if we have already passed the timepoint. + interval_t wait_duration = wait_until_time - lf_time_physical(); + if (wait_duration < MIN_SLEEP_DURATION) { + LF_PRINT_DEBUG("Wait time " PRINTF_TIME " is less than MIN_SLEEP_DURATION " PRINTF_TIME ". Skipping wait.", + wait_duration, MIN_SLEEP_DURATION); + return true; + } + + // We do the sleep on the cond var so we can be awakened by the + // asynchronous scheduling of a physical action. lf_clock_cond_timedwait + // returns 0 if it is awakened before the timeout. Hence, we want to run + // it repeatedly until either it returns non-zero or the current + // physical time matches or exceeds the logical time. + if (lf_clock_cond_timedwait(condition, wait_until_time) != LF_TIMEOUT) { + LF_PRINT_DEBUG("-------- wait_until interrupted before timeout."); + + // Wait did not time out, which means that there + // may have been an asynchronous call to lf_schedule(). + // Continue waiting. + // Do not adjust logical tag here. If there was an asynchronous + // call to lf_schedule(), it will have put an event on the event queue, + // and logical tag will be set to that time when that event is pulled. + return false; + } else { + // Reached timeout. + LF_PRINT_DEBUG("-------- Returned from wait, having waited " PRINTF_TIME " ns.", wait_duration); + return true; } - return true; + } + return true; } /** @@ -279,39 +272,38 @@ bool wait_until(environment_t* env, instant_t logical_time, lf_cond_t* condition * or, is a stop_time (timeout time) has been set, the stop time. * @param env Environment within which we are executing. */ -tag_t get_next_event_tag(environment_t *env) { - assert(env != GLOBAL_ENVIRONMENT); - - // Peek at the earliest event in the event queue. - event_t* event = (event_t*)pqueue_peek(env->event_q); - tag_t next_tag = FOREVER_TAG; - if (event != NULL) { - // There is an event in the event queue. - if (event->time < env->current_tag.time) { - lf_print_error_and_exit("get_next_event_tag(): Earliest event on the event queue (" PRINTF_TIME ") is " - "earlier than the current time (" PRINTF_TIME ").", - event->time - start_time, - env->current_tag.time - start_time); - } - - next_tag.time = event->time; - if (next_tag.time == env->current_tag.time) { - LF_PRINT_DEBUG("Earliest event matches current time. Incrementing microstep. Event is dummy: %d.", - event->is_dummy); - next_tag.microstep = env->current_tag.microstep + 1; - } else { - next_tag.microstep = 0; - } +tag_t get_next_event_tag(environment_t* env) { + assert(env != GLOBAL_ENVIRONMENT); + + // Peek at the earliest event in the event queue. + event_t* event = (event_t*)pqueue_peek(env->event_q); + tag_t next_tag = FOREVER_TAG; + if (event != NULL) { + // There is an event in the event queue. + if (event->time < env->current_tag.time) { + lf_print_error_and_exit("get_next_event_tag(): Earliest event on the event queue (" PRINTF_TIME ") is " + "earlier than the current time (" PRINTF_TIME ").", + event->time - start_time, env->current_tag.time - start_time); } - // If a timeout tag was given, adjust the next_tag from the - // event tag to that timeout tag. - if (lf_is_tag_after_stop_tag(env, next_tag)) { - next_tag = env->stop_tag; + next_tag.time = event->time; + if (next_tag.time == env->current_tag.time) { + LF_PRINT_DEBUG("Earliest event matches current time. Incrementing microstep. Event is dummy: %d.", + event->is_dummy); + next_tag.microstep = env->current_tag.microstep + 1; + } else { + next_tag.microstep = 0; } - LF_PRINT_LOG("Earliest event on the event queue (or stop time if empty) is " PRINTF_TAG ". Event queue has size %zu.", - next_tag.time - start_time, next_tag.microstep, pqueue_size(env->event_q)); - return next_tag; + } + + // If a timeout tag was given, adjust the next_tag from the + // event tag to that timeout tag. + if (lf_is_tag_after_stop_tag(env, next_tag)) { + next_tag = env->stop_tag; + } + LF_PRINT_LOG("Earliest event on the event queue (or stop time if empty) is " PRINTF_TAG ". Event queue has size %zu.", + next_tag.time - start_time, next_tag.microstep, pqueue_size(env->event_q)); + return next_tag; } /** @@ -330,11 +322,11 @@ tag_t get_next_event_tag(environment_t *env) { */ tag_t send_next_event_tag(environment_t* env, tag_t tag, bool wait_for_reply) { #if defined(FEDERATED_CENTRALIZED) - return lf_send_next_event_tag(env, tag, wait_for_reply); + return lf_send_next_event_tag(env, tag, wait_for_reply); #elif defined(LF_ENCLAVES) - return rti_next_event_tag_locked(env->enclave_info, tag); + return rti_next_event_tag_locked(env->enclave_info, tag); #else - return tag; + return tag; #endif } @@ -358,149 +350,151 @@ tag_t send_next_event_tag(environment_t* env, tag_t tag, bool wait_for_reply) { * This does not acquire the mutex lock. It assumes the lock is already held. * @param env Environment within which we are executing. */ -void _lf_next_locked(environment_t *env) { - assert(env != GLOBAL_ENVIRONMENT); +void _lf_next_locked(environment_t* env) { + assert(env != GLOBAL_ENVIRONMENT); #ifdef MODAL_REACTORS - // Perform mode transitions - _lf_handle_mode_changes(env); + // Perform mode transitions + _lf_handle_mode_changes(env); #endif - // Get the tag of the next event on the event queue. - tag_t next_tag = get_next_event_tag(env); + // Get the tag of the next event on the event queue. + tag_t next_tag = get_next_event_tag(env); #if defined LF_ENCLAVES - // Request permission to advance time. This call might block. - tag_t grant_tag = rti_next_event_tag_locked(env->enclave_info, next_tag); - - // If we received are granted a tag which is less than the requested tag - // then we return and re-do the next function. We might have gotten a new - // event on the event queue. - if (lf_tag_compare(grant_tag, next_tag) < 0) return; - - // Next event might have changed while waiting for the TAG + // Request permission to advance time. This call might block. + tag_t grant_tag = rti_next_event_tag_locked(env->enclave_info, next_tag); + + // If we received are granted a tag which is less than the requested tag + // then we return and re-do the next function. We might have gotten a new + // event on the event queue. + if (lf_tag_compare(grant_tag, next_tag) < 0) + return; + + // Next event might have changed while waiting for the TAG + next_tag = get_next_event_tag(env); + + // Check for starvation. If our next tag is FOREVER_TAG now. This means that + // we have no events on our event queue and that the RTI has granted us a TAG + // to advance to FOREVER. I.e. all upstream enclaves have terminated and sent + // an LTC for FOREVER. We can, in this case, terminate the current enclave. + if (!keepalive_specified && lf_tag_compare(next_tag, FOREVER_TAG) == 0) { + lf_set_stop_tag(env, (tag_t){.time = env->current_tag.time, .microstep = env->current_tag.microstep + 1}); next_tag = get_next_event_tag(env); - - // Check for starvation. If our next tag is FOREVER_TAG now. This means that - // we have no events on our event queue and that the RTI has granted us a TAG - // to advance to FOREVER. I.e. all upstream enclaves have terminated and sent - // an LTC for FOREVER. We can, in this case, terminate the current enclave. - if(!keepalive_specified && lf_tag_compare(next_tag, FOREVER_TAG) == 0) { - lf_set_stop_tag(env, (tag_t){.time=env->current_tag.time,.microstep=env->current_tag.microstep+1}); - next_tag = get_next_event_tag(env); - } + } #elif defined FEDERATED_CENTRALIZED - // In case this is in a federation with centralized coordination, notify - // the RTI of the next earliest tag at which this federate might produce - // an event. This function may block until it is safe to advance the current - // tag to the next tag. Specifically, it blocks if there are upstream - // federates. If an action triggers during that wait, it will unblock - // and return with a time (typically) less than the next_time. - tag_t grant_tag = send_next_event_tag(env, next_tag, true); // true means this blocks. - while (lf_tag_compare(grant_tag, next_tag) < 0) { - next_tag = get_next_event_tag(env); - grant_tag = send_next_event_tag(env, next_tag, true); - } - // Granted tag is greater than or equal to next event tag that we sent to the RTI. - // Since send_next_event_tag releases the mutex lock internally, we need to check - // again for what the next tag is (e.g., the stop time could have changed). + // In case this is in a federation with centralized coordination, notify + // the RTI of the next earliest tag at which this federate might produce + // an event. This function may block until it is safe to advance the current + // tag to the next tag. Specifically, it blocks if there are upstream + // federates. If an action triggers during that wait, it will unblock + // and return with a time (typically) less than the next_time. + tag_t grant_tag = send_next_event_tag(env, next_tag, true); // true means this blocks. + while (lf_tag_compare(grant_tag, next_tag) < 0) { next_tag = get_next_event_tag(env); - - // FIXME: Do starvation analysis for centralized coordination. - // Specifically, if the event queue is empty on *all* federates, this - // can become known to the RTI which can then stop execution. - // Hence, it will no longer be necessary to force keepalive to be true - // for all federated execution. With centralized coordination, we could - // allow keepalive to be either true or false and could get the same - // behavior with centralized coordination as with unfederated execution. - -#else // not FEDERATED_CENTRALIZED nor LF_ENCLAVES - if (pqueue_peek(env->event_q) == NULL && !keepalive_specified) { - // There is no event on the event queue and keepalive is false. - // No event in the queue - // keepalive is not set so we should stop. - // Note that federated programs with decentralized coordination always have - // keepalive = true - lf_set_stop_tag(env, (tag_t){.time=env->current_tag.time,.microstep=env->current_tag.microstep+1}); - - // Stop tag has changed. Need to check next_tag again. - next_tag = get_next_event_tag(env); - } + grant_tag = send_next_event_tag(env, next_tag, true); + } + // Granted tag is greater than or equal to next event tag that we sent to the RTI. + // Since send_next_event_tag releases the mutex lock internally, we need to check + // again for what the next tag is (e.g., the stop time could have changed). + next_tag = get_next_event_tag(env); + + // FIXME: Do starvation analysis for centralized coordination. + // Specifically, if the event queue is empty on *all* federates, this + // can become known to the RTI which can then stop execution. + // Hence, it will no longer be necessary to force keepalive to be true + // for all federated execution. With centralized coordination, we could + // allow keepalive to be either true or false and could get the same + // behavior with centralized coordination as with unfederated execution. + +#else // not FEDERATED_CENTRALIZED nor LF_ENCLAVES + if (pqueue_peek(env->event_q) == NULL && !keepalive_specified) { + // There is no event on the event queue and keepalive is false. + // No event in the queue + // keepalive is not set so we should stop. + // Note that federated programs with decentralized coordination always have + // keepalive = true + lf_set_stop_tag(env, (tag_t){.time = env->current_tag.time, .microstep = env->current_tag.microstep + 1}); + + // Stop tag has changed. Need to check next_tag again. + next_tag = get_next_event_tag(env); + } #endif - // Wait for physical time to advance to the next event time (or stop time). - // This can be interrupted if a physical action triggers (e.g., a message - // arrives from an upstream federate or a local physical action triggers). - LF_PRINT_LOG("Waiting until elapsed time " PRINTF_TIME ".", (next_tag.time - start_time)); - while (!wait_until(env, next_tag.time, &env->event_q_changed)) { - LF_PRINT_DEBUG("_lf_next_locked(): Wait until time interrupted."); - // Sleep was interrupted. Check for a new next_event. - // The interruption could also have been due to a call to lf_request_stop(). - next_tag = get_next_event_tag(env); - - // If this (possibly new) next tag is past the stop time, return. - if (lf_is_tag_after_stop_tag(env, next_tag)) { - return; - } - } - // A wait occurs even if wait_until() returns true, which means that the - // tag on the head of the event queue may have changed. + // Wait for physical time to advance to the next event time (or stop time). + // This can be interrupted if a physical action triggers (e.g., a message + // arrives from an upstream federate or a local physical action triggers). + LF_PRINT_LOG("Waiting until elapsed time " PRINTF_TIME ".", (next_tag.time - start_time)); + while (!wait_until(env, next_tag.time, &env->event_q_changed)) { + LF_PRINT_DEBUG("_lf_next_locked(): Wait until time interrupted."); + // Sleep was interrupted. Check for a new next_event. + // The interruption could also have been due to a call to lf_request_stop(). next_tag = get_next_event_tag(env); // If this (possibly new) next tag is past the stop time, return. - if (lf_is_tag_after_stop_tag(env, next_tag)) { // lf_tag_compare(tag, stop_tag) > 0 - return; + if (lf_is_tag_after_stop_tag(env, next_tag)) { + return; } + } + // A wait occurs even if wait_until() returns true, which means that the + // tag on the head of the event queue may have changed. + next_tag = get_next_event_tag(env); + + // If this (possibly new) next tag is past the stop time, return. + if (lf_is_tag_after_stop_tag(env, next_tag)) { // lf_tag_compare(tag, stop_tag) > 0 + return; + } - LF_PRINT_DEBUG("Physical time is ahead of next tag time by " PRINTF_TIME ". This should be small unless -fast is used.", - lf_time_physical() - next_tag.time); + LF_PRINT_DEBUG("Physical time is ahead of next tag time by " PRINTF_TIME + ". This should be small unless -fast is used.", + lf_time_physical() - next_tag.time); #ifdef FEDERATED - // In federated execution (at least under decentralized coordination), - // it is possible that an incoming message has been partially read, - // enough to see its tag. To prevent it from becoming tardy, the thread - // that is reading the message has set a barrier to prevent logical time - // from exceeding the timestamp of the message. It will remove that barrier - // once the complete message has been read. Also, if a federate requests - // to stop exeuction barriers will used while reaching a consensus. - // Here, we wait for that barrier to be removed, if appropriate. - if(_lf_wait_on_tag_barrier(env, next_tag)) { - // A wait actually occurred, so the next_tag may have changed again. - next_tag = get_next_event_tag(env); - } + // In federated execution (at least under decentralized coordination), + // it is possible that an incoming message has been partially read, + // enough to see its tag. To prevent it from becoming tardy, the thread + // that is reading the message has set a barrier to prevent logical time + // from exceeding the timestamp of the message. It will remove that barrier + // once the complete message has been read. Also, if a federate requests + // to stop exeuction barriers will used while reaching a consensus. + // Here, we wait for that barrier to be removed, if appropriate. + if (_lf_wait_on_tag_barrier(env, next_tag)) { + // A wait actually occurred, so the next_tag may have changed again. + next_tag = get_next_event_tag(env); + } #endif // FEDERATED - // If the first event in the event queue has a tag greater than or equal to the - // stop time, and the current tag matches the stop tag (meaning that we have already - // executed microstep 0 at the timeout time), then we are done. The above code prevents the next_tag - // from exceeding the stop_tag, so we have to do further checks if - // they are equal. - if (lf_tag_compare(next_tag, env->stop_tag) >= 0 && lf_tag_compare(env->current_tag, env->stop_tag) >= 0) { - // If we pop anything further off the event queue with this same time or larger, - // then it will be assigned a tag larger than the stop tag. - return; - } - - // At this point, finally, we have an event to process. - _lf_advance_logical_time(env, next_tag.time); - - _lf_start_time_step(env); - - if (lf_tag_compare(env->current_tag, env->stop_tag) >= 0) { - // Pop shutdown events - LF_PRINT_DEBUG("Scheduling shutdown reactions."); - _lf_trigger_shutdown_reactions(env); - } - - // Pop all events from event_q with timestamp equal to env->current_tag.time, - // extract all the reactions triggered by these events, and - // stick them into the reaction queue. - _lf_pop_events(env); + // If the first event in the event queue has a tag greater than or equal to the + // stop time, and the current tag matches the stop tag (meaning that we have already + // executed microstep 0 at the timeout time), then we are done. The above code prevents the next_tag + // from exceeding the stop_tag, so we have to do further checks if + // they are equal. + if (lf_tag_compare(next_tag, env->stop_tag) >= 0 && lf_tag_compare(env->current_tag, env->stop_tag) >= 0) { + // If we pop anything further off the event queue with this same time or larger, + // then it will be assigned a tag larger than the stop tag. + return; + } + + // At this point, finally, we have an event to process. + _lf_advance_logical_time(env, next_tag.time); + + _lf_start_time_step(env); + + if (lf_tag_compare(env->current_tag, env->stop_tag) >= 0) { + // Pop shutdown events + LF_PRINT_DEBUG("Scheduling shutdown reactions."); + _lf_trigger_shutdown_reactions(env); + } + + // Pop all events from event_q with timestamp equal to env->current_tag.time, + // extract all the reactions triggered by these events, and + // stick them into the reaction queue. + _lf_pop_events(env); #ifdef FEDERATED - lf_enqueue_port_absent_reactions(env); - // _lf_pop_events may have set some triggers present. - extern federate_instance_t _fed; - lf_update_max_level(_fed.last_TAG, _fed.is_last_TAG_provisional); + lf_enqueue_port_absent_reactions(env); + // _lf_pop_events may have set some triggers present. + extern federate_instance_t _fed; + lf_update_max_level(_fed.last_TAG, _fed.is_last_TAG_provisional); #endif } @@ -511,76 +505,76 @@ bool lf_stop_requested = false; // See reactor.h for docs. void lf_request_stop(void) { - // If a requested stop is pending, return without doing anything. - LF_PRINT_LOG("lf_request_stop() has been called."); - LF_MUTEX_LOCK(&global_mutex); - if (lf_stop_requested) { - LF_MUTEX_UNLOCK(&global_mutex); - LF_PRINT_LOG("Ignoring redundant lf_request_stop() call."); - return; - } - lf_stop_requested = true; + // If a requested stop is pending, return without doing anything. + LF_PRINT_LOG("lf_request_stop() has been called."); + LF_MUTEX_LOCK(&global_mutex); + if (lf_stop_requested) { LF_MUTEX_UNLOCK(&global_mutex); - - // Iterate over scheduling enclaves to find their maximum current tag - // and set a barrier for tag advancement for each enclave. - tag_t max_current_tag = NEVER_TAG; - environment_t* env; - int num_environments = _lf_get_environments(&env); - for (int i = 0; i < num_environments; i++) { - LF_MUTEX_LOCK(&env[i].mutex); - if (lf_tag_compare(env[i].current_tag, max_current_tag) > 0) { - max_current_tag = env[i].current_tag; - } - // Set a barrier to prevent the enclave from advancing past the so-far maximum current tag. - _lf_increment_tag_barrier_locked(&env[i], max_current_tag); - LF_MUTEX_UNLOCK(&env[i].mutex); + LF_PRINT_LOG("Ignoring redundant lf_request_stop() call."); + return; + } + lf_stop_requested = true; + LF_MUTEX_UNLOCK(&global_mutex); + + // Iterate over scheduling enclaves to find their maximum current tag + // and set a barrier for tag advancement for each enclave. + tag_t max_current_tag = NEVER_TAG; + environment_t* env; + int num_environments = _lf_get_environments(&env); + for (int i = 0; i < num_environments; i++) { + LF_MUTEX_LOCK(&env[i].mutex); + if (lf_tag_compare(env[i].current_tag, max_current_tag) > 0) { + max_current_tag = env[i].current_tag; } + // Set a barrier to prevent the enclave from advancing past the so-far maximum current tag. + _lf_increment_tag_barrier_locked(&env[i], max_current_tag); + LF_MUTEX_UNLOCK(&env[i].mutex); + } #ifdef FEDERATED - // In the federated case, the RTI might grant a - // later stop tag than the current tag. The above code has raised - // a barrier no greater than max_current_tag. - if (lf_send_stop_request_to_rti(max_current_tag) != 0) { - // Message was not sent to the RTI. - // Decrement the barriers to reverse our previous increment. - for (int i = 0; i < num_environments; i++) { - LF_MUTEX_LOCK(&env[i].mutex); - _lf_decrement_tag_barrier_locked(&env[i]); - LF_MUTEX_UNLOCK(&env[i].mutex); - } - } -#else - // In a non-federated program, the stop_tag will be the next microstep after max_current_tag. - // Iterate over environments to set their stop tag and release their barrier. + // In the federated case, the RTI might grant a + // later stop tag than the current tag. The above code has raised + // a barrier no greater than max_current_tag. + if (lf_send_stop_request_to_rti(max_current_tag) != 0) { + // Message was not sent to the RTI. + // Decrement the barriers to reverse our previous increment. for (int i = 0; i < num_environments; i++) { - LF_MUTEX_LOCK(&env[i].mutex); - lf_set_stop_tag(&env[i], (tag_t) {.time = max_current_tag.time, .microstep = max_current_tag.microstep+1}); - // Release the barrier on tag advancement. - _lf_decrement_tag_barrier_locked(&env[i]); - - // We signal instead of broadcast under the assumption that only - // one worker thread can call wait_until at a given time because - // the call to wait_until is protected by a mutex lock - lf_cond_signal(&env->event_q_changed); - LF_MUTEX_UNLOCK(&env[i].mutex); + LF_MUTEX_LOCK(&env[i].mutex); + _lf_decrement_tag_barrier_locked(&env[i]); + LF_MUTEX_UNLOCK(&env[i].mutex); } + } +#else + // In a non-federated program, the stop_tag will be the next microstep after max_current_tag. + // Iterate over environments to set their stop tag and release their barrier. + for (int i = 0; i < num_environments; i++) { + LF_MUTEX_LOCK(&env[i].mutex); + lf_set_stop_tag(&env[i], (tag_t){.time = max_current_tag.time, .microstep = max_current_tag.microstep + 1}); + // Release the barrier on tag advancement. + _lf_decrement_tag_barrier_locked(&env[i]); + + // We signal instead of broadcast under the assumption that only + // one worker thread can call wait_until at a given time because + // the call to wait_until is protected by a mutex lock + lf_cond_signal(&env->event_q_changed); + LF_MUTEX_UNLOCK(&env[i].mutex); + } #endif } void _lf_trigger_reaction(environment_t* env, reaction_t* reaction, int worker_number) { - assert(env != GLOBAL_ENVIRONMENT); + assert(env != GLOBAL_ENVIRONMENT); #ifdef MODAL_REACTORS - // Check if reaction is disabled by mode inactivity - if (_lf_mode_is_active(reaction->mode)) { + // Check if reaction is disabled by mode inactivity + if (_lf_mode_is_active(reaction->mode)) { #endif lf_scheduler_trigger_reaction(env->scheduler, reaction, worker_number); #ifdef MODAL_REACTORS - } else { // Suppress reaction by preventing entering reaction queue - LF_PRINT_DEBUG("Suppressing downstream reaction %s due inactivity of mode %s.", - reaction->name, reaction->mode->name); - } + } else { // Suppress reaction by preventing entering reaction queue + LF_PRINT_DEBUG("Suppressing downstream reaction %s due inactivity of mode %s.", reaction->name, + reaction->mode->name); + } #endif } @@ -593,117 +587,117 @@ void _lf_trigger_reaction(environment_t* env, reaction_t* reaction, int worker_n * This assumes the mutex lock is held by the caller. * @param env Environment within which we are executing. */ -void _lf_initialize_start_tag(environment_t *env) { - assert(env != GLOBAL_ENVIRONMENT); +void _lf_initialize_start_tag(environment_t* env) { + assert(env != GLOBAL_ENVIRONMENT); - // Add reactions invoked at tag (0,0) (including startup reactions) to the reaction queue - _lf_trigger_startup_reactions(env); + // Add reactions invoked at tag (0,0) (including startup reactions) to the reaction queue + _lf_trigger_startup_reactions(env); #if defined FEDERATED - // If env is the environment for the top-level enclave, then initialize the federate. - environment_t *top_level_env; - _lf_get_environments(&top_level_env); - if (env == top_level_env) { - // Reset status fields before talking to the RTI to set network port - // statuses to unknown - lf_reset_status_fields_on_input_port_triggers(); - - // Get a start_time from the RTI - lf_synchronize_with_other_federates(); // Resets start_time in federated execution according to the RTI. - } + // If env is the environment for the top-level enclave, then initialize the federate. + environment_t* top_level_env; + _lf_get_environments(&top_level_env); + if (env == top_level_env) { + // Reset status fields before talking to the RTI to set network port + // statuses to unknown + lf_reset_status_fields_on_input_port_triggers(); - // The start time will likely have changed. Adjust the current tag and stop tag. - env->current_tag = (tag_t){.time = start_time, .microstep = 0u}; - if (duration >= 0LL) { - // A duration has been specified. Recalculate the stop time. - env->stop_tag = ((tag_t) {.time = start_time + duration, .microstep = 0}); - } + // Get a start_time from the RTI + lf_synchronize_with_other_federates(); // Resets start_time in federated execution according to the RTI. + } + + // The start time will likely have changed. Adjust the current tag and stop tag. + env->current_tag = (tag_t){.time = start_time, .microstep = 0u}; + if (duration >= 0LL) { + // A duration has been specified. Recalculate the stop time. + env->stop_tag = ((tag_t){.time = start_time + duration, .microstep = 0}); + } - _lf_initialize_timers(env); + _lf_initialize_timers(env); - env->current_tag = (tag_t){.time = start_time, .microstep = 0u}; + env->current_tag = (tag_t){.time = start_time, .microstep = 0u}; #if defined FEDERATED_DECENTRALIZED - // If we have a non-zero STA offset, then we need to allow messages to arrive - // prior to the start time. To avoid spurious STP violations, we temporarily - // set the current time back by the STA offset. - env->current_tag.time -= lf_fed_STA_offset; - LF_PRINT_LOG("Waiting for start time " PRINTF_TIME " plus STA " PRINTF_TIME ".", - start_time, lf_fed_STA_offset); + // If we have a non-zero STA offset, then we need to allow messages to arrive + // prior to the start time. To avoid spurious STP violations, we temporarily + // set the current time back by the STA offset. + env->current_tag.time -= lf_fed_STA_offset; + LF_PRINT_LOG("Waiting for start time " PRINTF_TIME " plus STA " PRINTF_TIME ".", start_time, lf_fed_STA_offset); #else - instant_t lf_fed_STA_offset = 0; - LF_PRINT_LOG("Waiting for start time " PRINTF_TIME ".", - start_time); + // For other than federated decentralized execution, there is no lf_fed_STA_offset variable defined. + // To use uniform code below, we define it here as a local variable. + instant_t lf_fed_STA_offset = 0; + LF_PRINT_LOG("Waiting for start time " PRINTF_TIME ".", start_time); #endif - // Call wait_until if federated. This is required because the startup procedure - // in lf_synchronize_with_other_federates() can decide on a new start_time that is - // larger than the current physical time. - // Therefore, if --fast was not specified, wait until physical time matches - // or exceeds the start time. Microstep is ignored. - // This wait_until() is deliberately called after most precursor operations - // for tag (0,0) are performed (e.g., injecting startup reactions, etc.). - // This has two benefits: First, the startup overheads will reduce - // the required waiting time. Second, this call releases the mutex lock and allows - // other threads (specifically, federate threads that handle incoming p2p messages - // from other federates) to hold the lock and possibly raise a tag barrier. This is - // especially useful if an STA is set properly because the federate will get - // a chance to process incoming messages while utilizing the STA. - - // Here we wait until the start time and also release the environment mutex. - // this means that the other worker threads will be allowed to start. We need - // this to avoid potential deadlock in federated startup. - while(!wait_until(env, start_time + lf_fed_STA_offset, &env->event_q_changed)) {}; - LF_PRINT_DEBUG("Done waiting for start time + STA offset " PRINTF_TIME ".", start_time + lf_fed_STA_offset); - LF_PRINT_DEBUG("Physical time is ahead of current time by " PRINTF_TIME - ". This should be close to the STA offset.", - lf_time_physical() - start_time); - - // Restore the current tag to match the start time. - env->current_tag = (tag_t){.time = start_time, .microstep = 0u}; - - // If the stop_tag is (0,0), also insert the shutdown - // reactions. This can only happen if the timeout time - // was set to 0. - if (lf_tag_compare(env->current_tag, env->stop_tag) >= 0) { - _lf_trigger_shutdown_reactions(env); - } + // Call wait_until if federated. This is required because the startup procedure + // in lf_synchronize_with_other_federates() can decide on a new start_time that is + // larger than the current physical time. + // Therefore, if --fast was not specified, wait until physical time matches + // or exceeds the start time. Microstep is ignored. + // This wait_until() is deliberately called after most precursor operations + // for tag (0,0) are performed (e.g., injecting startup reactions, etc.). + // This has two benefits: First, the startup overheads will reduce + // the required waiting time. Second, this call releases the mutex lock and allows + // other threads (specifically, federate threads that handle incoming p2p messages + // from other federates) to hold the lock and possibly raise a tag barrier. This is + // especially useful if an STA is set properly because the federate will get + // a chance to process incoming messages while utilizing the STA. + + // Here we wait until the start time and also release the environment mutex. + // this means that the other worker threads will be allowed to start. We need + // this to avoid potential deadlock in federated startup. + while (!wait_until(env, start_time + lf_fed_STA_offset, &env->event_q_changed)) { + }; + LF_PRINT_DEBUG("Done waiting for start time + STA offset " PRINTF_TIME ".", start_time + lf_fed_STA_offset); + LF_PRINT_DEBUG("Physical time is ahead of current time by " PRINTF_TIME ". This should be close to the STA offset.", + lf_time_physical() - start_time); + + // Restore the current tag to match the start time. + env->current_tag = (tag_t){.time = start_time, .microstep = 0u}; + + // If the stop_tag is (0,0), also insert the shutdown + // reactions. This can only happen if the timeout time + // was set to 0. + if (lf_tag_compare(env->current_tag, env->stop_tag) >= 0) { + _lf_trigger_shutdown_reactions(env); + } #ifdef FEDERATED_DECENTRALIZED - // In federated execution (at least under decentralized coordination), - // it is possible that an incoming message has been partially read at (0,0), - // enough to see its tag. To prevent it from becoming tardy, the thread - // that is reading the message has set a barrier to prevent logical time - // from exceeding the timestamp of the message. It will remove that barrier - // once the complete message has been read. Here, we wait for that barrier - // to be removed, if appropriate before proceeding to executing tag (0,0). - _lf_wait_on_tag_barrier(env, (tag_t){.time=start_time,.microstep=0}); - lf_spawn_staa_thread(); - -#else // NOT FEDERATED_DECENTRALIZED - // Each federate executes the start tag (which is the current - // tag). Inform the RTI of this if needed. - send_next_event_tag(env, env->current_tag, true); + // In federated execution (at least under decentralized coordination), + // it is possible that an incoming message has been partially read at (0,0), + // enough to see its tag. To prevent it from becoming tardy, the thread + // that is reading the message has set a barrier to prevent logical time + // from exceeding the timestamp of the message. It will remove that barrier + // once the complete message has been read. Here, we wait for that barrier + // to be removed, if appropriate before proceeding to executing tag (0,0). + _lf_wait_on_tag_barrier(env, (tag_t){.time = start_time, .microstep = 0}); + lf_spawn_staa_thread(); + +#else // NOT FEDERATED_DECENTRALIZED + // Each federate executes the start tag (which is the current + // tag). Inform the RTI of this if needed. + send_next_event_tag(env, env->current_tag, true); #endif // NOT FEDERATED_DECENTRALIZED - // For messages that may have arrived while we were waiting, put - // reactions on the reaction queue. - _lf_pop_events(env); - -#else // NOT FEDERATED - _lf_initialize_timers(env); - - // If the stop_tag is (0,0), also insert the shutdown - // reactions. This can only happen if the timeout time - // was set to 0. - if (lf_tag_compare(env->current_tag, env->stop_tag) >= 0) { - _lf_trigger_shutdown_reactions(env); - } + // For messages that may have arrived while we were waiting, put + // reactions on the reaction queue. + _lf_pop_events(env); + +#else // NOT FEDERATED + _lf_initialize_timers(env); + + // If the stop_tag is (0,0), also insert the shutdown + // reactions. This can only happen if the timeout time + // was set to 0. + if (lf_tag_compare(env->current_tag, env->stop_tag) >= 0) { + _lf_trigger_shutdown_reactions(env); + } #endif // NOT FEDERATED - // Set the following boolean so that other thread(s), including federated threads, - // know that the execution has started - env->execution_started = true; + // Set the following boolean so that other thread(s), including federated threads, + // know that the execution has started + env->execution_started = true; } /** For logging and debugging, each worker thread is numbered. */ @@ -720,39 +714,38 @@ int worker_thread_count = 0; * * @return true if a deadline violation occurred. false otherwise. */ -bool _lf_worker_handle_deadline_violation_for_reaction(environment_t *env, int worker_number, reaction_t* reaction) { - assert(env != GLOBAL_ENVIRONMENT); - - bool violation_occurred = false; - // If the reaction has a deadline, compare to current physical time - // and invoke the deadline violation reaction instead of the reaction function - // if a violation has occurred. Note that the violation reaction will be invoked - // at most once per logical time value. If the violation reaction triggers the - // same reaction at the current time value, even if at a future superdense time, - // then the reaction will be invoked and the violation reaction will not be invoked again. - if (reaction->deadline >= 0LL) { - // Get the current physical time. - instant_t physical_time = lf_time_physical(); - // Check for deadline violation. - if (reaction->deadline == 0 || physical_time > env->current_tag.time + reaction->deadline) { - // Deadline violation has occurred. - tracepoint_reaction_deadline_missed(env->trace, reaction, worker_number); - violation_occurred = true; - // Invoke the local handler, if there is one. - reaction_function_t handler = reaction->deadline_violation_handler; - if (handler != NULL) { - LF_PRINT_LOG("Worker %d: Deadline violation. Invoking deadline handler.", - worker_number); - (*handler)(reaction->self); - - // If the reaction produced outputs, put the resulting - // triggered reactions into the queue or execute them directly if possible. - schedule_output_reactions(env, reaction, worker_number); - // Remove the reaction from the executing queue. - } - } +bool _lf_worker_handle_deadline_violation_for_reaction(environment_t* env, int worker_number, reaction_t* reaction) { + assert(env != GLOBAL_ENVIRONMENT); + + bool violation_occurred = false; + // If the reaction has a deadline, compare to current physical time + // and invoke the deadline violation reaction instead of the reaction function + // if a violation has occurred. Note that the violation reaction will be invoked + // at most once per logical time value. If the violation reaction triggers the + // same reaction at the current time value, even if at a future superdense time, + // then the reaction will be invoked and the violation reaction will not be invoked again. + if (reaction->deadline >= 0LL) { + // Get the current physical time. + instant_t physical_time = lf_time_physical(); + // Check for deadline violation. + if (reaction->deadline == 0 || physical_time > env->current_tag.time + reaction->deadline) { + // Deadline violation has occurred. + tracepoint_reaction_deadline_missed(env, reaction, worker_number); + violation_occurred = true; + // Invoke the local handler, if there is one. + reaction_function_t handler = reaction->deadline_violation_handler; + if (handler != NULL) { + LF_PRINT_LOG("Worker %d: Deadline violation. Invoking deadline handler.", worker_number); + (*handler)(reaction->self); + + // If the reaction produced outputs, put the resulting + // triggered reactions into the queue or execute them directly if possible. + schedule_output_reactions(env, reaction, worker_number); + // Remove the reaction from the executing queue. + } } - return violation_occurred; + } + return violation_occurred; } /** @@ -767,55 +760,55 @@ bool _lf_worker_handle_deadline_violation_for_reaction(environment_t *env, int w * @return true if an STP violation occurred and was handled. false otherwise. */ bool _lf_worker_handle_STP_violation_for_reaction(environment_t* env, int worker_number, reaction_t* reaction) { - bool violation_occurred = false; - // If the reaction violates the STP offset, - // an input trigger to this reaction has been triggered at a later - // logical time than originally anticipated. In this case, a special - // STP handler will be invoked. - // FIXME: Note that the STP handler will be invoked - // at most once per logical time value. If the STP handler triggers the - // same reaction at the current time value, even if at a future superdense time, - // then the reaction will be invoked and the STP handler will not be invoked again. - // However, inputs ports to a federate reactor are network port types so this possibly should - // be disallowed. - // @note The STP handler and the deadline handler are not mutually exclusive. - // In other words, both can be invoked for a reaction if it is triggered late - // in logical time (STP offset is violated) and also misses the constraint on - // physical time (deadline). - // @note In absence of an STP handler, the is_STP_violated will be passed down the reaction - // chain until it is dealt with in a downstream STP handler. - if (reaction->is_STP_violated == true && !reaction->is_an_input_reaction) { - reaction_function_t handler = reaction->STP_handler; - LF_PRINT_LOG("STP violation detected."); - - // Invoke the STP handler if there is one. - if (handler != NULL) { - LF_PRINT_LOG("Worker %d: Invoking STP violation handler.", worker_number); - // There is a violation - violation_occurred = true; - (*handler)(reaction->self); - - // Reset the STP violation flag because it has been dealt with. - // Downstream handlers should not be invoked. - reaction->is_STP_violated = false; - - // If the reaction produced outputs, put the resulting - // triggered reactions into the queue or execute them directly if possible. - schedule_output_reactions(env, reaction, worker_number); - - // Reset the is_STP_violated because it has been dealt with - reaction->is_STP_violated = false; - } else { - // The intended tag cannot be respected and there is no handler. - // Print an error message and return true. - // NOTE: STP violations are ignored for network input reactions, which need to - // execute anyway. - lf_print_error("STP violation occurred in a trigger to reaction %d, " - "and there is no handler.\n**** Invoking reaction at the wrong tag!", - reaction->number + 1); // +1 to align with diagram numbering. - } + bool violation_occurred = false; + // If the reaction violates the STP offset, + // an input trigger to this reaction has been triggered at a later + // logical time than originally anticipated. In this case, a special + // STP handler will be invoked. + // FIXME: Note that the STP handler will be invoked + // at most once per logical time value. If the STP handler triggers the + // same reaction at the current time value, even if at a future superdense time, + // then the reaction will be invoked and the STP handler will not be invoked again. + // However, inputs ports to a federate reactor are network port types so this possibly should + // be disallowed. + // @note The STP handler and the deadline handler are not mutually exclusive. + // In other words, both can be invoked for a reaction if it is triggered late + // in logical time (STP offset is violated) and also misses the constraint on + // physical time (deadline). + // @note In absence of an STP handler, the is_STP_violated will be passed down the reaction + // chain until it is dealt with in a downstream STP handler. + if (reaction->is_STP_violated == true && !reaction->is_an_input_reaction) { + reaction_function_t handler = reaction->STP_handler; + LF_PRINT_LOG("STP violation detected."); + + // Invoke the STP handler if there is one. + if (handler != NULL) { + LF_PRINT_LOG("Worker %d: Invoking STP violation handler.", worker_number); + // There is a violation + violation_occurred = true; + (*handler)(reaction->self); + + // Reset the STP violation flag because it has been dealt with. + // Downstream handlers should not be invoked. + reaction->is_STP_violated = false; + + // If the reaction produced outputs, put the resulting + // triggered reactions into the queue or execute them directly if possible. + schedule_output_reactions(env, reaction, worker_number); + + // Reset the is_STP_violated because it has been dealt with + reaction->is_STP_violated = false; + } else { + // The intended tag cannot be respected and there is no handler. + // Print an error message and return true. + // NOTE: STP violations are ignored for network input reactions, which need to + // execute anyway. + lf_print_error("STP violation occurred in a trigger to reaction %d, " + "and there is no handler.\n**** Invoking reaction at the wrong tag!", + reaction->number + 1); // +1 to align with diagram numbering. } - return violation_occurred; + } + return violation_occurred; } /** @@ -831,12 +824,12 @@ bool _lf_worker_handle_STP_violation_for_reaction(environment_t* env, int worker * * @return true if a violation occurred and was handled. false otherwise. */ -bool _lf_worker_handle_violations(environment_t *env, int worker_number, reaction_t* reaction) { - bool violation = false; +bool _lf_worker_handle_violations(environment_t* env, int worker_number, reaction_t* reaction) { + bool violation = false; - violation = _lf_worker_handle_deadline_violation_for_reaction(env, worker_number, reaction) || - _lf_worker_handle_STP_violation_for_reaction(env, worker_number, reaction); - return violation; + violation = _lf_worker_handle_deadline_violation_for_reaction(env, worker_number, reaction) || + _lf_worker_handle_STP_violation_for_reaction(env, worker_number, reaction); + return violation; } /** @@ -849,26 +842,24 @@ bool _lf_worker_handle_violations(environment_t *env, int worker_number, reactio * @param worker_number The ID of the worker. * @param reaction The reaction to invoke. */ -void _lf_worker_invoke_reaction(environment_t *env, int worker_number, reaction_t* reaction) { - LF_PRINT_LOG("Worker %d: Invoking reaction %s at elapsed tag " PRINTF_TAG ".", - worker_number, - reaction->name, - env->current_tag.time - start_time, - env->current_tag.microstep); - _lf_invoke_reaction(env, reaction, worker_number); - - // If the reaction produced outputs, put the resulting triggered - // reactions into the queue or execute them immediately. - schedule_output_reactions(env, reaction, worker_number); - - reaction->is_STP_violated = false; +void _lf_worker_invoke_reaction(environment_t* env, int worker_number, reaction_t* reaction) { + LF_PRINT_LOG("Worker %d: Invoking reaction %s at elapsed tag " PRINTF_TAG ".", worker_number, reaction->name, + env->current_tag.time - start_time, env->current_tag.microstep); + _lf_invoke_reaction(env, reaction, worker_number); + + // If the reaction produced outputs, put the resulting triggered + // reactions into the queue or execute them immediately. + schedule_output_reactions(env, reaction, worker_number); + + reaction->is_STP_violated = false; } void try_advance_level(environment_t* env, volatile size_t* next_reaction_level) { - #ifdef FEDERATED - lf_stall_advance_level_federation(env, *next_reaction_level); - #endif - if (*next_reaction_level < SIZE_MAX) *next_reaction_level += 1; +#ifdef FEDERATED + lf_stall_advance_level_federation(env, *next_reaction_level); +#endif + if (*next_reaction_level < SIZE_MAX) + *next_reaction_level += 1; } /** @@ -878,166 +869,154 @@ void try_advance_level(environment_t* env, volatile size_t* next_reaction_level) * @param env Environment within which we are executing. * @param worker_number The number assigned to this worker thread */ -void _lf_worker_do_work(environment_t *env, int worker_number) { - assert(env != GLOBAL_ENVIRONMENT); - - // Keep track of whether we have decremented the idle thread count. - // Obtain a reaction from the scheduler that is ready to execute - // (i.e., it is not blocked by concurrently executing reactions - // that it depends on). - // lf_print_snapshot(); // This is quite verbose (but very useful in debugging reaction deadlocks). - reaction_t* current_reaction_to_execute = NULL; +void _lf_worker_do_work(environment_t* env, int worker_number) { + assert(env != GLOBAL_ENVIRONMENT); + + // Keep track of whether we have decremented the idle thread count. + // Obtain a reaction from the scheduler that is ready to execute + // (i.e., it is not blocked by concurrently executing reactions + // that it depends on). + // lf_print_snapshot(); // This is quite verbose (but very useful in debugging reaction deadlocks). + reaction_t* current_reaction_to_execute = NULL; #ifdef FEDERATED - lf_stall_advance_level_federation(env, 0); + lf_stall_advance_level_federation(env, 0); #endif - while ((current_reaction_to_execute = - lf_sched_get_ready_reaction(env->scheduler, worker_number)) - != NULL) { - // Got a reaction that is ready to run. - LF_PRINT_DEBUG("Worker %d: Got from scheduler reaction %s: " - "level: %lld, is input reaction: %d, chain ID: %llu, and deadline " PRINTF_TIME ".", - worker_number, - current_reaction_to_execute->name, - LF_LEVEL(current_reaction_to_execute->index), - current_reaction_to_execute->is_an_input_reaction, - current_reaction_to_execute->chain_id, - current_reaction_to_execute->deadline); - - bool violation = _lf_worker_handle_violations( - env, - worker_number, - current_reaction_to_execute - ); - - if (!violation) { - // Invoke the reaction function. - _lf_worker_invoke_reaction(env, worker_number, current_reaction_to_execute); - } - - LF_PRINT_DEBUG("Worker %d: Done with reaction %s.", - worker_number, current_reaction_to_execute->name); - - lf_sched_done_with_reaction(worker_number, current_reaction_to_execute); + while ((current_reaction_to_execute = lf_sched_get_ready_reaction(env->scheduler, worker_number)) != NULL) { + // Got a reaction that is ready to run. + LF_PRINT_DEBUG("Worker %d: Got from scheduler reaction %s: " + "level: %lld, is input reaction: %d, chain ID: %llu, and deadline " PRINTF_TIME ".", + worker_number, current_reaction_to_execute->name, LF_LEVEL(current_reaction_to_execute->index), + current_reaction_to_execute->is_an_input_reaction, current_reaction_to_execute->chain_id, + current_reaction_to_execute->deadline); + + bool violation = _lf_worker_handle_violations(env, worker_number, current_reaction_to_execute); + + if (!violation) { + // Invoke the reaction function. + _lf_worker_invoke_reaction(env, worker_number, current_reaction_to_execute); } + + LF_PRINT_DEBUG("Worker %d: Done with reaction %s.", worker_number, current_reaction_to_execute->name); + + lf_sched_done_with_reaction(worker_number, current_reaction_to_execute); + } } /** * Worker thread for the thread pool. Its argument is the environment within which is working - * The very first worker per environment/enclave is in charge of synchronizing with + * The very first worker per environment/enclave is in charge of synchronizing with * the other enclaves by getting a TAG to (0,0) this might block until upstream enclaves * have finished tag (0,0). This is unlike federated scheduling where each federate will * get a PTAG to (0,0) and use network control reactions to handle upstream dependencies * @param arg Environment within which the worker should execute. */ void* worker(void* arg) { - environment_t *env = (environment_t* ) arg; - LF_MUTEX_LOCK(&env->mutex); + initialize_lf_thread_id(); + environment_t* env = (environment_t*)arg; + LF_MUTEX_LOCK(&env->mutex); - int worker_number = env->worker_thread_count++; - LF_PRINT_LOG("Environment %u: Worker thread %d started.",env->id, worker_number); - - // If we have scheduling enclaves. The first worker will block here until - // it receives a TAG for tag (0,0) from the local RTI. In federated scheduling - // we use PTAGs to get things started on tag (0,0) but those are not used - // with enclaves. - #if defined LF_ENCLAVES - if (worker_number == 0) { - // If we have scheduling enclaves. We must get a TAG to the start tag. - LF_PRINT_LOG("Environment %u: Worker thread %d waits for TAG to (0,0).",env->id, worker_number); - - tag_t tag_granted = rti_next_event_tag_locked(env->enclave_info, env->current_tag); - LF_ASSERT( lf_tag_compare(tag_granted, env->current_tag) == 0, - "We did not receive a TAG to the start tag."); - } - #endif + int worker_number = env->worker_thread_count++; + LF_PRINT_LOG("Environment %u: Worker thread %d started.", env->id, worker_number); - // Release mutex and start working. - LF_MUTEX_UNLOCK(&env->mutex); - _lf_worker_do_work(env, worker_number); - LF_MUTEX_LOCK(&env->mutex); +// If we have scheduling enclaves. The first worker will block here until +// it receives a TAG for tag (0,0) from the local RTI. In federated scheduling +// we use PTAGs to get things started on tag (0,0) but those are not used +// with enclaves. +#if defined LF_ENCLAVES + if (worker_number == 0) { + // If we have scheduling enclaves. We must get a TAG to the start tag. + LF_PRINT_LOG("Environment %u: Worker thread %d waits for TAG to (0,0).", env->id, worker_number); + + tag_t tag_granted = rti_next_event_tag_locked(env->enclave_info, env->current_tag); + LF_ASSERT(lf_tag_compare(tag_granted, env->current_tag) == 0, "We did not receive a TAG to the start tag."); + } +#endif - // This thread is exiting, so don't count it anymore. - env->worker_thread_count--; + // Release mutex and start working. + LF_MUTEX_UNLOCK(&env->mutex); + _lf_worker_do_work(env, worker_number); + LF_MUTEX_LOCK(&env->mutex); - if (env->worker_thread_count == 0) { - // The last worker thread to exit will inform the RTI if needed. -#if defined LF_ENCLAVES - // If we have scheduling enclaves. Then we must send a LTC of FOREVER. - // to grant other enclaves a TAG to FOREVER. - // TODO: Can we unify this? Preferraby also have federates send NETs - rti_logical_tag_complete_locked(env->enclave_info, FOREVER_TAG); + // This thread is exiting, so don't count it anymore. + env->worker_thread_count--; + + if (env->worker_thread_count == 0) { + // The last worker thread to exit will inform the RTI if needed. +#if defined LF_ENCLAVES + // If we have scheduling enclaves. Then we must send a LTC of FOREVER. + // to grant other enclaves a TAG to FOREVER. + // TODO: Can we unify this? Preferraby also have federates send NETs + rti_logical_tag_complete_locked(env->enclave_info, FOREVER_TAG); #else - // In federated execution we send a NET to the RTI. This will result in - // giving the other federates a PTAG to FOREVER. - send_next_event_tag(env, FOREVER_TAG, false); + // In federated execution we send a NET to the RTI. This will result in + // giving the other federates a PTAG to FOREVER. + send_next_event_tag(env, FOREVER_TAG, false); #endif + } - } + lf_cond_signal(&env->event_q_changed); - lf_cond_signal(&env->event_q_changed); - - LF_PRINT_DEBUG("Worker %d: Stop requested. Exiting.", worker_number); - LF_MUTEX_UNLOCK(&env->mutex); - // timeout has been requested. - return NULL; + LF_PRINT_DEBUG("Worker %d: Stop requested. Exiting.", worker_number); + LF_MUTEX_UNLOCK(&env->mutex); + // timeout has been requested. + return NULL; } #ifndef NDEBUG void lf_print_snapshot(environment_t* env) { - assert(env != GLOBAL_ENVIRONMENT); - - if(LOG_LEVEL > LOG_LEVEL_LOG) { - LF_PRINT_DEBUG(">>> START Snapshot"); - LF_PRINT_DEBUG("Pending:"); - // pqueue_dump(reaction_q, print_reaction); FIXME: reaction_q is not - // accessible here - LF_PRINT_DEBUG("Event queue size: %zu. Contents:", - pqueue_size(env->event_q)); - pqueue_dump(env->event_q, print_reaction); - LF_PRINT_DEBUG(">>> END Snapshot"); - } + assert(env != GLOBAL_ENVIRONMENT); + + if (LOG_LEVEL > LOG_LEVEL_LOG) { + LF_PRINT_DEBUG(">>> START Snapshot"); + LF_PRINT_DEBUG("Pending:"); + // pqueue_dump(reaction_q, print_reaction); FIXME: reaction_q is not + // accessible here + LF_PRINT_DEBUG("Event queue size: %zu. Contents:", pqueue_size(env->event_q)); + pqueue_dump(env->event_q, print_reaction); + LF_PRINT_DEBUG(">>> END Snapshot"); + } } -#else // NDEBUG +#else // NDEBUG void lf_print_snapshot(environment_t* env) { - // Do nothing. + // Do nothing. } #endif // NDEBUG // Start threads in the thread pool. void start_threads(environment_t* env) { - assert(env != GLOBAL_ENVIRONMENT); + assert(env != GLOBAL_ENVIRONMENT); - LF_PRINT_LOG("Starting %u worker threads in environment", env->num_workers); - for (unsigned int i = 0; i < env->num_workers; i++) { - if (lf_thread_create(&env->thread_ids[i], worker, env) != 0) { - lf_print_error_and_exit("Could not start thread-%u", i); - } + LF_PRINT_LOG("Starting %u worker threads in environment", env->num_workers); + for (unsigned int i = 0; i < env->num_workers; i++) { + if (lf_thread_create(&env->thread_ids[i], worker, env) != 0) { + lf_print_error_and_exit("Could not start thread-%u", i); } + } } /** * @brief Determine the number of workers. */ void determine_number_of_workers(void) { - // If _lf_number_of_workers is 0, it means that it was not provided on - // the command-line using the --workers argument. - if (_lf_number_of_workers == 0u) { - #if !defined(NUMBER_OF_WORKERS) || NUMBER_OF_WORKERS == 0 - // Use the number of cores on the host machine. - _lf_number_of_workers = lf_available_cores(); - - // If reaction graph breadth is available. Cap number of workers - #if defined(LF_REACTION_GRAPH_BREADTH) - if (LF_REACTION_GRAPH_BREADTH < _lf_number_of_workers) { - _lf_number_of_workers = LF_REACTION_GRAPH_BREADTH; - } - #endif - - #else - // Use the provided number of workers by the user - _lf_number_of_workers = NUMBER_OF_WORKERS; - #endif + // If _lf_number_of_workers is 0, it means that it was not provided on + // the command-line using the --workers argument. + if (_lf_number_of_workers == 0u) { +#if !defined(NUMBER_OF_WORKERS) || NUMBER_OF_WORKERS == 0 + // Use the number of cores on the host machine. + _lf_number_of_workers = lf_available_cores(); + +// If reaction graph breadth is available. Cap number of workers +#if defined(LF_REACTION_GRAPH_BREADTH) + if (LF_REACTION_GRAPH_BREADTH < _lf_number_of_workers) { + _lf_number_of_workers = LF_REACTION_GRAPH_BREADTH; } +#endif + +#else + // Use the provided number of workers by the user + _lf_number_of_workers = NUMBER_OF_WORKERS; +#endif + } } /** @@ -1053,127 +1032,129 @@ void determine_number_of_workers(void) { * at compile time. */ int lf_reactor_c_main(int argc, const char* argv[]) { - // Invoke the function that optionally provides default command-line options. - lf_set_default_command_line_options(); - - // Parse command line arguments. Sets global variables like duration, fast, number_of_workers. - if (!(process_args(default_argc, default_argv) - && process_args(argc, argv))) { - return -1; - } - - // Register the termination function - if (atexit(termination) != 0) { - lf_print_warning("Failed to register termination function!"); - } - // The above handles only "normal" termination (via a call to exit). - // As a consequence, we need to also trap ctrl-C, which issues a SIGINT, - // and cause it to call exit. - signal(SIGINT, exit); + initialize_lf_thread_id(); + // Invoke the function that optionally provides default command-line options. + lf_set_default_command_line_options(); + + // Parse command line arguments. Sets global variables like duration, fast, number_of_workers. + if (!(process_args(default_argc, default_argv) && process_args(argc, argv))) { + return -1; + } + + // Register the termination function + if (atexit(termination) != 0) { + lf_print_warning("Failed to register termination function!"); + } + // The above handles only "normal" termination (via a call to exit). + // As a consequence, we need to also trap ctrl-C, which issues a SIGINT, + // and cause it to call exit. + signal(SIGINT, exit); #ifdef SIGPIPE - // Ignore SIGPIPE errors, which terminate the entire application if - // socket write() fails because the reader has closed the socket. - // Instead, cause an EPIPE error to be set when write() fails. - // NOTE: The reason for a broken socket causing a SIGPIPE signal - // instead of just having write() return an error is to robutly - // a foo | bar pipeline where bar crashes. The default behavior - // is for foo to also exit. - signal(SIGPIPE, SIG_IGN); + // Ignore SIGPIPE errors, which terminate the entire application if + // socket write() fails because the reader has closed the socket. + // Instead, cause an EPIPE error to be set when write() fails. + // NOTE: The reason for a broken socket causing a SIGPIPE signal + // instead of just having write() return an error is to robutly + // a foo | bar pipeline where bar crashes. The default behavior + // is for foo to also exit. + signal(SIGPIPE, SIG_IGN); #endif // SIGPIPE - // Determine global number of workers based on user request and available parallelism - determine_number_of_workers(); - - // Initialize the clock through the platform API. No reading of physical time before this. - _lf_initialize_clock(); - start_time = lf_time_physical(); - - LF_PRINT_DEBUG("Start time: " PRINTF_TIME "ns", start_time); - struct timespec physical_time_timespec = {start_time / BILLION, start_time % BILLION}; - lf_print("---- Start execution at time %s---- plus %ld nanoseconds", - ctime(&physical_time_timespec.tv_sec), physical_time_timespec.tv_nsec); - - // Create and initialize the environments for each enclave - lf_create_environments(); - - // Initialize the one global mutex - LF_MUTEX_INIT(&global_mutex); - - // Initialize the global payload and token allocation counts and the trigger table - // as well as starting tracing subsystem - initialize_global(); - - - environment_t *envs; - int num_envs = _lf_get_environments(&envs); + // Determine global number of workers based on user request and available parallelism + determine_number_of_workers(); + + // Initialize the clock through the platform API. No reading of physical time before this. + _lf_initialize_clock(); + start_time = lf_time_physical(); +#ifndef FEDERATED + lf_tracing_set_start_time(start_time); +#endif + + LF_PRINT_DEBUG("Start time: " PRINTF_TIME "ns", start_time); + struct timespec physical_time_timespec = {start_time / BILLION, start_time % BILLION}; + lf_print("---- Start execution at time %s---- plus %ld nanoseconds", ctime(&physical_time_timespec.tv_sec), + physical_time_timespec.tv_nsec); + + // Create and initialize the environments for each enclave + lf_create_environments(); + + // Initialize the one global mutex + LF_MUTEX_INIT(&global_mutex); + + // Initialize the global payload and token allocation counts and the trigger table + // as well as starting tracing subsystem + initialize_global(); + + environment_t* envs; + int num_envs = _lf_get_environments(&envs); #if defined LF_ENCLAVES - initialize_local_rti(envs, num_envs); + initialize_local_rti(envs, num_envs); +#endif + + // Do environment-specific setup + for (int i = 0; i < num_envs; i++) { + environment_t* env = &envs[i]; + + // Initialize the watchdogs on this environment. + _lf_initialize_watchdogs(env); + + // Initialize the start and stop tags of the environment + environment_init_tags(env, start_time, duration); +#ifdef MODAL_REACTORS + // Set up modal infrastructure + _lf_initialize_modes(env); #endif - - // Do environment-specific setup - for (int i = 0; inum_workers, NULL); - - // Lock mutex and spawn threads. This must be done before `_lf_initialize_start_tag` since it is using - // a cond var - LF_MUTEX_LOCK(&env->mutex); - - // Initialize start tag - lf_print("Environment %u: ---- Intializing start tag", env->id); - _lf_initialize_start_tag(env); - - lf_print("Environment %u: ---- Spawning %d workers.",env->id, env->num_workers); - start_threads(env); - // Unlock mutex and allow threads proceed - LF_MUTEX_UNLOCK(&env->mutex); + + // Initialize the scheduler + // FIXME: Why is this called here and in `_lf_initialize_trigger objects`? + lf_sched_init(env, (size_t)env->num_workers, NULL); + + // Lock mutex and spawn threads. This must be done before `_lf_initialize_start_tag` since it is using + // a cond var + LF_MUTEX_LOCK(&env->mutex); + + // Initialize start tag + lf_print("Environment %u: ---- Intializing start tag", env->id); + _lf_initialize_start_tag(env); + + lf_print("Environment %u: ---- Spawning %d workers.", env->id, env->num_workers); + start_threads(env); + // Unlock mutex and allow threads proceed + LF_MUTEX_UNLOCK(&env->mutex); + } + + for (int i = 0; i < num_envs; i++) { + // Wait for the worker threads to exit. + environment_t* env = &envs[i]; + void* worker_thread_exit_status = NULL; + int ret = 0; + for (int i = 0; i < env->num_workers; i++) { + int failure = lf_thread_join(env->thread_ids[i], &worker_thread_exit_status); + if (failure) { + lf_print_error("Failed to join thread listening for incoming messages: %s", strerror(failure)); + } + if (worker_thread_exit_status != NULL) { + lf_print_error("---- Worker %d reports error code %p", i, worker_thread_exit_status); + ret = 1; + } } - - for (int i = 0; inum_workers; i++) { - int failure = lf_thread_join(env->thread_ids[i], &worker_thread_exit_status); - if (failure) { - lf_print_error("Failed to join thread listening for incoming messages: %s", strerror(failure)); - } - if (worker_thread_exit_status != NULL) { - lf_print_error("---- Worker %d reports error code %p", i, worker_thread_exit_status); - ret = 1; - } - } - - if (ret == 0) { - LF_PRINT_LOG("---- All worker threads exited successfully."); - } + + if (ret == 0) { + LF_PRINT_LOG("---- All worker threads exited successfully."); } - _lf_normal_termination = true; - return 0; -} + } + _lf_normal_termination = true; + return 0; +} /** - * @brief Notify of new event by broadcasting on a condition variable. + * @brief Notify of new event by broadcasting on a condition variable. * @param env Environment within which we are executing. */ int lf_notify_of_event(environment_t* env) { - assert(env != GLOBAL_ENVIRONMENT); - return lf_cond_broadcast(&env->event_q_changed); + assert(env != GLOBAL_ENVIRONMENT); + return lf_cond_broadcast(&env->event_q_changed); } /** @@ -1181,11 +1162,11 @@ int lf_notify_of_event(environment_t* env) { * @param env Environment within which we are executing or GLOBAL_ENVIRONMENT. */ int lf_critical_section_enter(environment_t* env) { - if (env == GLOBAL_ENVIRONMENT) { - return lf_mutex_lock(&global_mutex); - } else { - return lf_mutex_lock(&env->mutex); - } + if (env == GLOBAL_ENVIRONMENT) { + return lf_mutex_lock(&global_mutex); + } else { + return lf_mutex_lock(&env->mutex); + } } /** @@ -1193,10 +1174,10 @@ int lf_critical_section_enter(environment_t* env) { * @param env Environment within which we are executing or GLOBAL_ENVIRONMENT. */ int lf_critical_section_exit(environment_t* env) { - if (env == GLOBAL_ENVIRONMENT) { - return lf_mutex_unlock(&global_mutex); - } else { - return lf_mutex_unlock(&env->mutex); - } + if (env == GLOBAL_ENVIRONMENT) { + return lf_mutex_unlock(&global_mutex); + } else { + return lf_mutex_unlock(&env->mutex); + } } #endif diff --git a/core/threaded/scheduler_GEDF_NP.c b/core/threaded/scheduler_GEDF_NP.c index b66d88478..0f3d971d5 100644 --- a/core/threaded/scheduler_GEDF_NP.c +++ b/core/threaded/scheduler_GEDF_NP.c @@ -14,11 +14,11 @@ #ifndef NUMBER_OF_WORKERS #define NUMBER_OF_WORKERS 1 -#endif // NUMBER_OF_WORKERS +#endif // NUMBER_OF_WORKERS #include -#include "platform.h" +#include "low_level_platform.h" #include "environment.h" #include "pqueue.h" #include "reactor_threaded.h" @@ -26,7 +26,7 @@ #include "scheduler_sync_tag_advance.h" #include "scheduler.h" #include "lf_semaphore.h" -#include "trace.h" +#include "tracepoint.h" #include "util.h" /////////////////// Scheduler Private API ///////////////////////// @@ -37,15 +37,12 @@ * @param reaction The reaction to insert. */ static inline void _lf_sched_insert_reaction(lf_scheduler_t* scheduler, reaction_t* reaction) { - size_t reaction_level = LF_LEVEL(reaction->index); - LF_PRINT_DEBUG("Scheduler: Trying to lock the mutex for level %zu.", - reaction_level); - LF_MUTEX_LOCK(&scheduler->array_of_mutexes[reaction_level]); - LF_PRINT_DEBUG("Scheduler: Locked the mutex for level %zu.", reaction_level); - pqueue_insert(((pqueue_t**)scheduler - ->triggered_reactions)[reaction_level], - (void*)reaction); - LF_MUTEX_UNLOCK(&scheduler->array_of_mutexes[reaction_level]); + size_t reaction_level = LF_LEVEL(reaction->index); + LF_PRINT_DEBUG("Scheduler: Trying to lock the mutex for level %zu.", reaction_level); + LF_MUTEX_LOCK(&scheduler->array_of_mutexes[reaction_level]); + LF_PRINT_DEBUG("Scheduler: Locked the mutex for level %zu.", reaction_level); + pqueue_insert(((pqueue_t**)scheduler->triggered_reactions)[reaction_level], (void*)reaction); + LF_MUTEX_UNLOCK(&scheduler->array_of_mutexes[reaction_level]); } /** @@ -56,26 +53,25 @@ static inline void _lf_sched_insert_reaction(lf_scheduler_t* scheduler, reaction * threads. */ int _lf_sched_distribute_ready_reactions(lf_scheduler_t* scheduler) { - pqueue_t* tmp_queue = NULL; - // Note: All the threads are idle, which means that they are done inserting - // reactions. Therefore, the reaction queues can be accessed without locking - // a mutex. + pqueue_t* tmp_queue = NULL; + // Note: All the threads are idle, which means that they are done inserting + // reactions. Therefore, the reaction queues can be accessed without locking + // a mutex. - while (scheduler->next_reaction_level <= scheduler->max_reaction_level) { - LF_PRINT_DEBUG("Waiting with curr_reaction_level %zu.", scheduler->next_reaction_level); - try_advance_level(scheduler->env, &scheduler->next_reaction_level); + while (scheduler->next_reaction_level <= scheduler->max_reaction_level) { + LF_PRINT_DEBUG("Waiting with curr_reaction_level %zu.", scheduler->next_reaction_level); + try_advance_level(scheduler->env, &scheduler->next_reaction_level); - tmp_queue = ((pqueue_t**)scheduler->triggered_reactions) - [scheduler->next_reaction_level-1]; - size_t reactions_to_execute = pqueue_size(tmp_queue); + tmp_queue = ((pqueue_t**)scheduler->triggered_reactions)[scheduler->next_reaction_level - 1]; + size_t reactions_to_execute = pqueue_size(tmp_queue); - if (reactions_to_execute) { - scheduler->executing_reactions = tmp_queue; - return reactions_to_execute; - } + if (reactions_to_execute) { + scheduler->executing_reactions = tmp_queue; + return reactions_to_execute; } + } - return 0; + return 0; } /** @@ -84,22 +80,19 @@ int _lf_sched_distribute_ready_reactions(lf_scheduler_t* scheduler) { * This assumes that the caller is not holding any thread mutexes. */ void _lf_sched_notify_workers(lf_scheduler_t* scheduler) { - // Note: All threads are idle. Therefore, there is no need to lock the mutex - // while accessing the executing queue (which is pointing to one of the - // reaction queues). - size_t workers_to_awaken = - LF_MIN(scheduler->number_of_idle_workers, - pqueue_size((pqueue_t*)scheduler->executing_reactions)); - LF_PRINT_DEBUG("Scheduler: Notifying %zu workers.", workers_to_awaken); - scheduler->number_of_idle_workers -= workers_to_awaken; - LF_PRINT_DEBUG("Scheduler: New number of idle workers: %zu.", - scheduler->number_of_idle_workers); - if (workers_to_awaken > 1) { - // Notify all the workers except the worker thread that has called this - // function. - lf_semaphore_release(scheduler->semaphore, - (workers_to_awaken - 1)); - } + // Note: All threads are idle. Therefore, there is no need to lock the mutex + // while accessing the executing queue (which is pointing to one of the + // reaction queues). + size_t workers_to_awaken = + LF_MIN(scheduler->number_of_idle_workers, pqueue_size((pqueue_t*)scheduler->executing_reactions)); + LF_PRINT_DEBUG("Scheduler: Notifying %zu workers.", workers_to_awaken); + scheduler->number_of_idle_workers -= workers_to_awaken; + LF_PRINT_DEBUG("Scheduler: New number of idle workers: %zu.", scheduler->number_of_idle_workers); + if (workers_to_awaken > 1) { + // Notify all the workers except the worker thread that has called this + // function. + lf_semaphore_release(scheduler->semaphore, (workers_to_awaken - 1)); + } } /** @@ -107,9 +100,8 @@ void _lf_sched_notify_workers(lf_scheduler_t* scheduler) { * */ void _lf_sched_signal_stop(lf_scheduler_t* scheduler) { - scheduler->should_stop = true; - lf_semaphore_release(scheduler->semaphore, - (scheduler->number_of_workers - 1)); + scheduler->should_stop = true; + lf_semaphore_release(scheduler->semaphore, (scheduler->number_of_workers - 1)); } /** @@ -121,33 +113,33 @@ void _lf_sched_signal_stop(lf_scheduler_t* scheduler) { * This function assumes the caller does not hold the 'mutex' lock. */ void _lf_scheduler_try_advance_tag_and_distribute(lf_scheduler_t* scheduler) { - environment_t* env = scheduler->env; + environment_t* env = scheduler->env; - // Executing queue must be empty when this is called. - assert(pqueue_size((pqueue_t*)scheduler->executing_reactions) == 0); + // Executing queue must be empty when this is called. + assert(pqueue_size((pqueue_t*)scheduler->executing_reactions) == 0); - // Loop until it's time to stop or work has been distributed - while (true) { - if (scheduler->next_reaction_level == (scheduler->max_reaction_level + 1)) { - scheduler->next_reaction_level = 0; - LF_MUTEX_LOCK(&env->mutex); - // Nothing more happening at this tag. - LF_PRINT_DEBUG("Scheduler: Advancing tag."); - // This worker thread will take charge of advancing tag. - if (_lf_sched_advance_tag_locked(scheduler)) { - LF_PRINT_DEBUG("Scheduler: Reached stop tag."); - _lf_sched_signal_stop(scheduler); - LF_MUTEX_UNLOCK(&env->mutex); - break; - } - LF_MUTEX_UNLOCK(&env->mutex); - } + // Loop until it's time to stop or work has been distributed + while (true) { + if (scheduler->next_reaction_level == (scheduler->max_reaction_level + 1)) { + scheduler->next_reaction_level = 0; + LF_MUTEX_LOCK(&env->mutex); + // Nothing more happening at this tag. + LF_PRINT_DEBUG("Scheduler: Advancing tag."); + // This worker thread will take charge of advancing tag. + if (_lf_sched_advance_tag_locked(scheduler)) { + LF_PRINT_DEBUG("Scheduler: Reached stop tag."); + _lf_sched_signal_stop(scheduler); + LF_MUTEX_UNLOCK(&env->mutex); + break; + } + LF_MUTEX_UNLOCK(&env->mutex); + } - if (_lf_sched_distribute_ready_reactions(scheduler) > 0) { - _lf_sched_notify_workers(scheduler); - break; - } + if (_lf_sched_distribute_ready_reactions(scheduler) > 0) { + _lf_sched_notify_workers(scheduler); + break; } + } } /** @@ -161,27 +153,22 @@ void _lf_scheduler_try_advance_tag_and_distribute(lf_scheduler_t* scheduler) { * to be assigned to it. */ void _lf_sched_wait_for_work(lf_scheduler_t* scheduler, size_t worker_number) { - // Increment the number of idle workers by 1 and check if this is the last - // worker thread to become idle. - if (lf_atomic_add_fetch32((int32_t *) &scheduler->number_of_idle_workers, - 1) == - scheduler->number_of_workers) { - // Last thread to go idle - LF_PRINT_DEBUG("Scheduler: Worker %zu is the last idle thread.", - worker_number); - // Call on the scheduler to distribute work or advance tag. - _lf_scheduler_try_advance_tag_and_distribute(scheduler); - } else { - // Not the last thread to become idle. - // Wait for work to be released. - LF_PRINT_DEBUG( - "Scheduler: Worker %zu is trying to acquire the scheduling " - "semaphore.", - worker_number); - lf_semaphore_acquire(scheduler->semaphore); - LF_PRINT_DEBUG("Scheduler: Worker %zu acquired the scheduling semaphore.", - worker_number); - } + // Increment the number of idle workers by 1 and check if this is the last + // worker thread to become idle. + if (lf_atomic_add_fetch32((int32_t*)&scheduler->number_of_idle_workers, 1) == scheduler->number_of_workers) { + // Last thread to go idle + LF_PRINT_DEBUG("Scheduler: Worker %zu is the last idle thread.", worker_number); + // Call on the scheduler to distribute work or advance tag. + _lf_scheduler_try_advance_tag_and_distribute(scheduler); + } else { + // Not the last thread to become idle. + // Wait for work to be released. + LF_PRINT_DEBUG("Scheduler: Worker %zu is trying to acquire the scheduling " + "semaphore.", + worker_number); + lf_semaphore_acquire(scheduler->semaphore); + LF_PRINT_DEBUG("Scheduler: Worker %zu acquired the scheduling semaphore.", worker_number); + } } ///////////////////// Scheduler Init and Destroy API ///////////////////////// @@ -197,45 +184,36 @@ void _lf_sched_wait_for_work(lf_scheduler_t* scheduler, size_t worker_number) { * @param option Pointer to a `sched_params_t` struct containing additional * scheduler parameters. */ -void lf_sched_init( - environment_t* env, - size_t number_of_workers, - sched_params_t* params -) { - assert(env != GLOBAL_ENVIRONMENT); +void lf_sched_init(environment_t* env, size_t number_of_workers, sched_params_t* params) { + assert(env != GLOBAL_ENVIRONMENT); - LF_PRINT_DEBUG("Scheduler: Initializing with %zu workers", number_of_workers); - if(!init_sched_instance(env, &env->scheduler, number_of_workers, params)) { - // Already initialized - return; - } - lf_scheduler_t* scheduler = env->scheduler; + LF_PRINT_DEBUG("Scheduler: Initializing with %zu workers", number_of_workers); + if (!init_sched_instance(env, &env->scheduler, number_of_workers, params)) { + // Already initialized + return; + } + lf_scheduler_t* scheduler = env->scheduler; - scheduler->triggered_reactions = calloc( - (scheduler->max_reaction_level + 1), - sizeof(pqueue_t*)); + scheduler->triggered_reactions = calloc((scheduler->max_reaction_level + 1), sizeof(pqueue_t*)); - scheduler->array_of_mutexes = (lf_mutex_t*)calloc( - (scheduler->max_reaction_level + 1), sizeof(lf_mutex_t)); + scheduler->array_of_mutexes = (lf_mutex_t*)calloc((scheduler->max_reaction_level + 1), sizeof(lf_mutex_t)); - size_t queue_size = INITIAL_REACT_QUEUE_SIZE; - for (size_t i = 0; i <= scheduler->max_reaction_level; i++) { - if (params != NULL) { - if (params->num_reactions_per_level != NULL) { - queue_size = params->num_reactions_per_level[i]; - } - } - // Initialize the reaction queues - ((pqueue_t**)scheduler->triggered_reactions)[i] = - pqueue_init(queue_size, in_reverse_order, get_reaction_index, - get_reaction_position, set_reaction_position, - reaction_matches, print_reaction); - // Initialize the mutexes for the reaction queues - LF_MUTEX_INIT(&scheduler->array_of_mutexes[i]); + size_t queue_size = INITIAL_REACT_QUEUE_SIZE; + for (size_t i = 0; i <= scheduler->max_reaction_level; i++) { + if (params != NULL) { + if (params->num_reactions_per_level != NULL) { + queue_size = params->num_reactions_per_level[i]; + } } + // Initialize the reaction queues + ((pqueue_t**)scheduler->triggered_reactions)[i] = + pqueue_init(queue_size, in_reverse_order, get_reaction_index, get_reaction_position, set_reaction_position, + reaction_matches, print_reaction); + // Initialize the mutexes for the reaction queues + LF_MUTEX_INIT(&scheduler->array_of_mutexes[i]); + } - scheduler->executing_reactions = - ((pqueue_t**)scheduler->triggered_reactions)[0]; + scheduler->executing_reactions = ((pqueue_t**)scheduler->triggered_reactions)[0]; } /** @@ -244,12 +222,12 @@ void lf_sched_init( * This must be called when the scheduler is no longer needed. */ void lf_sched_free(lf_scheduler_t* scheduler) { - // for (size_t j = 0; j <= scheduler->max_reaction_level; j++) { - // pqueue_free(scheduler->triggered_reactions[j]); - // FIXME: This is causing weird memory errors. - // } - pqueue_free((pqueue_t*)scheduler->executing_reactions); - lf_semaphore_destroy(scheduler->semaphore); + // for (size_t j = 0; j <= scheduler->max_reaction_level; j++) { + // pqueue_free(scheduler->triggered_reactions[j]); + // FIXME: This is causing weird memory errors. + // } + pqueue_free((pqueue_t*)scheduler->executing_reactions); + lf_semaphore_destroy(scheduler->semaphore); } ///////////////////// Scheduler Worker API (public) ///////////////////////// @@ -265,36 +243,31 @@ void lf_sched_free(lf_scheduler_t* scheduler) { * worker thread should exit. */ reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_number) { - // Iterate until the stop_tag is reached or reaction queue is empty - while (!scheduler->should_stop) { - // Need to lock the mutex for the current level - size_t current_level = - scheduler->next_reaction_level - 1; - LF_PRINT_DEBUG( - "Scheduler: Worker %d trying to lock the mutex for level %zu.", - worker_number, current_level); - LF_MUTEX_LOCK(&scheduler->array_of_mutexes[current_level]); - LF_PRINT_DEBUG("Scheduler: Worker %d locked the mutex for level %zu.", - worker_number, current_level); - reaction_t* reaction_to_return = (reaction_t*)pqueue_pop( - (pqueue_t*)scheduler->executing_reactions); - LF_MUTEX_UNLOCK(&scheduler->array_of_mutexes[current_level]); + // Iterate until the stop_tag is reached or reaction queue is empty + while (!scheduler->should_stop) { + // Need to lock the mutex for the current level + size_t current_level = scheduler->next_reaction_level - 1; + LF_PRINT_DEBUG("Scheduler: Worker %d trying to lock the mutex for level %zu.", worker_number, current_level); + LF_MUTEX_LOCK(&scheduler->array_of_mutexes[current_level]); + LF_PRINT_DEBUG("Scheduler: Worker %d locked the mutex for level %zu.", worker_number, current_level); + reaction_t* reaction_to_return = (reaction_t*)pqueue_pop((pqueue_t*)scheduler->executing_reactions); + LF_MUTEX_UNLOCK(&scheduler->array_of_mutexes[current_level]); - if (reaction_to_return != NULL) { - // Got a reaction - return reaction_to_return; - } + if (reaction_to_return != NULL) { + // Got a reaction + return reaction_to_return; + } - LF_PRINT_DEBUG("Worker %d is out of ready reactions.", worker_number); + LF_PRINT_DEBUG("Worker %d is out of ready reactions.", worker_number); - // Ask the scheduler for more work and wait - tracepoint_worker_wait_starts(scheduler->env->trace, worker_number); - _lf_sched_wait_for_work(scheduler, worker_number); - tracepoint_worker_wait_ends(scheduler->env->trace, worker_number); - } + // Ask the scheduler for more work and wait + tracepoint_worker_wait_starts(scheduler->env, worker_number); + _lf_sched_wait_for_work(scheduler, worker_number); + tracepoint_worker_wait_ends(scheduler->env, worker_number); + } - // It's time for the worker thread to stop and exit. - return NULL; + // It's time for the worker thread to stop and exit. + return NULL; } /** @@ -305,12 +278,10 @@ reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_nu * finished executing 'done_reaction'. * @param done_reaction The reaction that is done. */ -void lf_sched_done_with_reaction(size_t worker_number, - reaction_t* done_reaction) { - if (!lf_atomic_bool_compare_and_swap32((int32_t *) &done_reaction->status, queued, inactive)) { - lf_print_error_and_exit("Unexpected reaction status: %d. Expected %d.", - done_reaction->status, queued); - } +void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction) { + if (!lf_atomic_bool_compare_and_swap32((int32_t*)&done_reaction->status, queued, inactive)) { + lf_print_error_and_exit("Unexpected reaction status: %d. Expected %d.", done_reaction->status, queued); + } } /** @@ -330,11 +301,10 @@ void lf_sched_done_with_reaction(size_t worker_number, * worker number does not make sense (e.g., the caller is not a worker thread). */ void lf_scheduler_trigger_reaction(lf_scheduler_t* scheduler, reaction_t* reaction, int worker_number) { - if (reaction == NULL || !lf_atomic_bool_compare_and_swap32((int32_t *) &reaction->status, inactive, queued)) { - return; - } - LF_PRINT_DEBUG("Scheduler: Enqueueing reaction %s, which has level %lld.", - reaction->name, LF_LEVEL(reaction->index)); - _lf_sched_insert_reaction(scheduler, reaction); + if (reaction == NULL || !lf_atomic_bool_compare_and_swap32((int32_t*)&reaction->status, inactive, queued)) { + return; + } + LF_PRINT_DEBUG("Scheduler: Enqueueing reaction %s, which has level %lld.", reaction->name, LF_LEVEL(reaction->index)); + _lf_sched_insert_reaction(scheduler, reaction); } #endif // SCHEDULER == SCHED_GEDF_NP diff --git a/core/threaded/scheduler_NP.c b/core/threaded/scheduler_NP.c index fee5775f4..630464dd6 100644 --- a/core/threaded/scheduler_NP.c +++ b/core/threaded/scheduler_NP.c @@ -13,17 +13,17 @@ #ifndef NUMBER_OF_WORKERS #define NUMBER_OF_WORKERS 1 -#endif // NUMBER_OF_WORKERS +#endif // NUMBER_OF_WORKERS #include -#include "platform.h" +#include "low_level_platform.h" #include "environment.h" #include "scheduler_instance.h" #include "scheduler_sync_tag_advance.h" #include "scheduler.h" #include "lf_semaphore.h" -#include "trace.h" +#include "tracepoint.h" #include "util.h" #include "reactor_threaded.h" @@ -34,49 +34,43 @@ * * @param reaction The reaction to insert. */ -static inline void _lf_sched_insert_reaction(lf_scheduler_t * scheduler, reaction_t* reaction) { - size_t reaction_level = LF_LEVEL(reaction->index); +static inline void _lf_sched_insert_reaction(lf_scheduler_t* scheduler, reaction_t* reaction) { + size_t reaction_level = LF_LEVEL(reaction->index); #ifdef FEDERATED - // Lock the mutex if federated because a federate can insert reactions with - // a level equal to the current level. - size_t current_level = scheduler->next_reaction_level - 1; - // There is a race condition here where - // `scheduler->next_reaction_level` can change after it is - // cached here. In that case, if the cached value is equal to - // `reaction_level`, the cost will be an additional unnecessary mutex lock, - // but no logic error. If the cached value is not equal to `reaction_level`, - // it can never become `reaction_level` because the scheduler will only - // change the `scheduler->next_reaction_level` if it can - // ensure that all worker threads are idle, and thus, none are triggering - // reactions (and therefore calling this function). - if (reaction_level == current_level) { - LF_PRINT_DEBUG("Scheduler: Trying to lock the mutex for level %zu.", - reaction_level); - LF_MUTEX_LOCK(&scheduler->array_of_mutexes[reaction_level]); - LF_PRINT_DEBUG("Scheduler: Locked the mutex for level %zu.", reaction_level); - } - // The level index for the current level can sometimes become negative. Set - // it back to zero before adding a reaction (otherwise worker threads will - // not be able to see the added reaction). - if (scheduler->indexes[reaction_level] < 0) { - scheduler->indexes[reaction_level] = 0; - } + // Lock the mutex if federated because a federate can insert reactions with + // a level equal to the current level. + size_t current_level = scheduler->next_reaction_level - 1; + // There is a race condition here where + // `scheduler->next_reaction_level` can change after it is + // cached here. In that case, if the cached value is equal to + // `reaction_level`, the cost will be an additional unnecessary mutex lock, + // but no logic error. If the cached value is not equal to `reaction_level`, + // it can never become `reaction_level` because the scheduler will only + // change the `scheduler->next_reaction_level` if it can + // ensure that all worker threads are idle, and thus, none are triggering + // reactions (and therefore calling this function). + if (reaction_level == current_level) { + LF_PRINT_DEBUG("Scheduler: Trying to lock the mutex for level %zu.", reaction_level); + LF_MUTEX_LOCK(&scheduler->array_of_mutexes[reaction_level]); + LF_PRINT_DEBUG("Scheduler: Locked the mutex for level %zu.", reaction_level); + } + // The level index for the current level can sometimes become negative. Set + // it back to zero before adding a reaction (otherwise worker threads will + // not be able to see the added reaction). + if (scheduler->indexes[reaction_level] < 0) { + scheduler->indexes[reaction_level] = 0; + } #endif - int reaction_q_level_index = - lf_atomic_fetch_add32((int32_t *) &scheduler->indexes[reaction_level], 1); - assert(reaction_q_level_index >= 0); - LF_PRINT_DEBUG( - "Scheduler: Accessing triggered reactions at the level %zu with index %d.", - reaction_level, - reaction_q_level_index - ); - ((reaction_t***)scheduler->triggered_reactions)[reaction_level][reaction_q_level_index] = reaction; - LF_PRINT_DEBUG("Scheduler: Index for level %zu is at %d.", reaction_level, - reaction_q_level_index); + int reaction_q_level_index = lf_atomic_fetch_add32((int32_t*)&scheduler->indexes[reaction_level], 1); + assert(reaction_q_level_index >= 0); + LF_PRINT_DEBUG("Scheduler: Accessing triggered reactions at the level %zu with index %d.", reaction_level, + reaction_q_level_index); + ((reaction_t***)scheduler->triggered_reactions)[reaction_level][reaction_q_level_index] = reaction; + LF_PRINT_DEBUG("Scheduler: Index for level %zu is at %d.", reaction_level, reaction_q_level_index); #ifdef FEDERATED - if (reaction_level == current_level) { - LF_MUTEX_UNLOCK(&scheduler->array_of_mutexes[reaction_level]); - } + if (reaction_level == current_level) { + LF_MUTEX_UNLOCK(&scheduler->array_of_mutexes[reaction_level]); + } #endif } @@ -87,26 +81,25 @@ static inline void _lf_sched_insert_reaction(lf_scheduler_t * scheduler, reactio * @return 1 if any reaction is ready. 0 otherwise. */ int _lf_sched_distribute_ready_reactions(lf_scheduler_t* scheduler) { - // Note: All the threads are idle, which means that they are done inserting - // reactions. Therefore, the reaction vectors can be accessed without - // locking a mutex. - while (scheduler->next_reaction_level <= scheduler->max_reaction_level) { - LF_PRINT_DEBUG("Waiting with curr_reaction_level %zu.", scheduler->next_reaction_level); - try_advance_level(scheduler->env, &scheduler->next_reaction_level); - - scheduler->executing_reactions = - (void*)((reaction_t***)scheduler->triggered_reactions)[ - scheduler->next_reaction_level - 1 - ]; - - LF_PRINT_DEBUG("Start of rxn queue at %zu is %p", scheduler->next_reaction_level - 1, ((reaction_t**)scheduler->executing_reactions)[0]); - if (((reaction_t**)scheduler->executing_reactions)[0] != NULL) { - // There is at least one reaction to execute - return 1; - } + // Note: All the threads are idle, which means that they are done inserting + // reactions. Therefore, the reaction vectors can be accessed without + // locking a mutex. + while (scheduler->next_reaction_level <= scheduler->max_reaction_level) { + LF_PRINT_DEBUG("Waiting with curr_reaction_level %zu.", scheduler->next_reaction_level); + try_advance_level(scheduler->env, &scheduler->next_reaction_level); + + scheduler->executing_reactions = + (void*)((reaction_t***)scheduler->triggered_reactions)[scheduler->next_reaction_level - 1]; + + LF_PRINT_DEBUG("Start of rxn queue at %zu is %p", scheduler->next_reaction_level - 1, + ((reaction_t**)scheduler->executing_reactions)[0]); + if (((reaction_t**)scheduler->executing_reactions)[0] != NULL) { + // There is at least one reaction to execute + return 1; } + } - return 0; + return 0; } /** @@ -115,29 +108,25 @@ int _lf_sched_distribute_ready_reactions(lf_scheduler_t* scheduler) { * This assumes that the caller is not holding any thread mutexes. */ void _lf_sched_notify_workers(lf_scheduler_t* scheduler) { - // Calculate the number of workers that we need to wake up, which is the - // Note: All threads are idle. Therefore, there is no need to lock the mutex - // while accessing the index for the current level. - size_t workers_to_awaken = - LF_MIN(scheduler->number_of_idle_workers, - scheduler->indexes[ - scheduler->next_reaction_level - 1 // Current - // reaction - // level - // to execute. - ]); - LF_PRINT_DEBUG("Scheduler: Notifying %zu workers.", workers_to_awaken); - - scheduler->number_of_idle_workers -= workers_to_awaken; - LF_PRINT_DEBUG("Scheduler: New number of idle workers: %zu.", - scheduler->number_of_idle_workers); - - if (workers_to_awaken > 1) { - // Notify all the workers except the worker thread that has called this - // function. - lf_semaphore_release(scheduler->semaphore, - (workers_to_awaken - 1)); - } + // Calculate the number of workers that we need to wake up, which is the + // Note: All threads are idle. Therefore, there is no need to lock the mutex + // while accessing the index for the current level. + size_t workers_to_awaken = LF_MIN(scheduler->number_of_idle_workers, + scheduler->indexes[scheduler->next_reaction_level - 1 // Current + // reaction + // level + // to execute. + ]); + LF_PRINT_DEBUG("Scheduler: Notifying %zu workers.", workers_to_awaken); + + scheduler->number_of_idle_workers -= workers_to_awaken; + LF_PRINT_DEBUG("Scheduler: New number of idle workers: %zu.", scheduler->number_of_idle_workers); + + if (workers_to_awaken > 1) { + // Notify all the workers except the worker thread that has called this + // function. + lf_semaphore_release(scheduler->semaphore, (workers_to_awaken - 1)); + } } /** @@ -145,9 +134,8 @@ void _lf_sched_notify_workers(lf_scheduler_t* scheduler) { * */ void _lf_sched_signal_stop(lf_scheduler_t* scheduler) { - scheduler->should_stop = true; - lf_semaphore_release(scheduler->semaphore, - (scheduler->number_of_workers - 1)); + scheduler->should_stop = true; + lf_semaphore_release(scheduler->semaphore, (scheduler->number_of_workers - 1)); } /** @@ -159,35 +147,32 @@ void _lf_sched_signal_stop(lf_scheduler_t* scheduler) { * This function assumes the caller does not hold the 'mutex' lock. */ void _lf_scheduler_try_advance_tag_and_distribute(lf_scheduler_t* scheduler) { - // Reset the index - environment_t *env = scheduler->env; - scheduler - ->indexes[scheduler->next_reaction_level - - 1] = 0; - - // Loop until it's time to stop or work has been distributed - while (true) { - if (scheduler->next_reaction_level == - (scheduler->max_reaction_level + 1)) { - scheduler->next_reaction_level = 0; - LF_MUTEX_LOCK(&env->mutex); - // Nothing more happening at this tag. - LF_PRINT_DEBUG("Scheduler: Advancing tag."); - // This worker thread will take charge of advancing tag. - if (_lf_sched_advance_tag_locked(scheduler)) { - LF_PRINT_DEBUG("Scheduler: Reached stop tag."); - _lf_sched_signal_stop(scheduler); - LF_MUTEX_UNLOCK(&env->mutex); - break; - } - LF_MUTEX_UNLOCK(&env->mutex); - } - - if (_lf_sched_distribute_ready_reactions(scheduler) > 0) { - _lf_sched_notify_workers(scheduler); - break; - } + // Reset the index + environment_t* env = scheduler->env; + scheduler->indexes[scheduler->next_reaction_level - 1] = 0; + + // Loop until it's time to stop or work has been distributed + while (true) { + if (scheduler->next_reaction_level == (scheduler->max_reaction_level + 1)) { + scheduler->next_reaction_level = 0; + LF_MUTEX_LOCK(&env->mutex); + // Nothing more happening at this tag. + LF_PRINT_DEBUG("Scheduler: Advancing tag."); + // This worker thread will take charge of advancing tag. + if (_lf_sched_advance_tag_locked(scheduler)) { + LF_PRINT_DEBUG("Scheduler: Reached stop tag."); + _lf_sched_signal_stop(scheduler); + LF_MUTEX_UNLOCK(&env->mutex); + break; + } + LF_MUTEX_UNLOCK(&env->mutex); } + + if (_lf_sched_distribute_ready_reactions(scheduler) > 0) { + _lf_sched_notify_workers(scheduler); + break; + } + } } /** @@ -201,26 +186,21 @@ void _lf_scheduler_try_advance_tag_and_distribute(lf_scheduler_t* scheduler) { * to be assigned to it. */ void _lf_sched_wait_for_work(lf_scheduler_t* scheduler, size_t worker_number) { - // Increment the number of idle workers by 1 and check if this is the last - // worker thread to become idle. - if (lf_atomic_add_fetch32((int32_t *) &scheduler->number_of_idle_workers, - 1) == - scheduler->number_of_workers) { - // Last thread to go idle - LF_PRINT_DEBUG("Scheduler: Worker %zu is the last idle thread.", - worker_number); - // Call on the scheduler to distribute work or advance tag. - _lf_scheduler_try_advance_tag_and_distribute(scheduler); - } else { - // Not the last thread to become idle. Wait for work to be released. - LF_PRINT_DEBUG( - "Scheduler: Worker %zu is trying to acquire the scheduling " - "semaphore.", - worker_number); - lf_semaphore_acquire(scheduler->semaphore); - LF_PRINT_DEBUG("Scheduler: Worker %zu acquired the scheduling semaphore.", - worker_number); - } + // Increment the number of idle workers by 1 and check if this is the last + // worker thread to become idle. + if (lf_atomic_add_fetch32((int32_t*)&scheduler->number_of_idle_workers, 1) == scheduler->number_of_workers) { + // Last thread to go idle + LF_PRINT_DEBUG("Scheduler: Worker %zu is the last idle thread.", worker_number); + // Call on the scheduler to distribute work or advance tag. + _lf_scheduler_try_advance_tag_and_distribute(scheduler); + } else { + // Not the last thread to become idle. Wait for work to be released. + LF_PRINT_DEBUG("Scheduler: Worker %zu is trying to acquire the scheduling " + "semaphore.", + worker_number); + lf_semaphore_acquire(scheduler->semaphore); + LF_PRINT_DEBUG("Scheduler: Worker %zu acquired the scheduling semaphore.", worker_number); + } } ///////////////////// Scheduler Init and Destroy API ///////////////////////// @@ -236,64 +216,49 @@ void _lf_sched_wait_for_work(lf_scheduler_t* scheduler, size_t worker_number) { * @param option Pointer to a `sched_params_t` struct containing additional * scheduler parameters. */ -void lf_sched_init( - environment_t *env, - size_t number_of_workers, - sched_params_t* params -) { - assert(env != GLOBAL_ENVIRONMENT); - - LF_PRINT_DEBUG("Scheduler: Initializing with %zu workers", number_of_workers); - - // This scheduler is unique in that it requires `num_reactions_per_level` to - // work correctly. - if (init_sched_instance(env, &env->scheduler, number_of_workers, params)) { - // Scheduler has not been initialized before. - if (params == NULL || params->num_reactions_per_level == NULL) { - lf_print_warning("Scheduler initialized with no reactions"); - return; - } - } else { - // Already initialized - return; +void lf_sched_init(environment_t* env, size_t number_of_workers, sched_params_t* params) { + assert(env != GLOBAL_ENVIRONMENT); + + LF_PRINT_DEBUG("Scheduler: Initializing with %zu workers", number_of_workers); + + // This scheduler is unique in that it requires `num_reactions_per_level` to + // work correctly. + if (init_sched_instance(env, &env->scheduler, number_of_workers, params)) { + // Scheduler has not been initialized before. + if (params == NULL || params->num_reactions_per_level == NULL) { + lf_print_warning("Scheduler initialized with no reactions"); + return; } + } else { + // Already initialized + return; + } - LF_PRINT_DEBUG("Scheduler: Max reaction level: %zu", env->scheduler->max_reaction_level); + LF_PRINT_DEBUG("Scheduler: Max reaction level: %zu", env->scheduler->max_reaction_level); - env->scheduler->triggered_reactions = - calloc((env->scheduler->max_reaction_level + 1), sizeof(reaction_t**)); + env->scheduler->triggered_reactions = calloc((env->scheduler->max_reaction_level + 1), sizeof(reaction_t**)); - env->scheduler->array_of_mutexes = (lf_mutex_t*)calloc( - (env->scheduler->max_reaction_level + 1), sizeof(lf_mutex_t)); + env->scheduler->array_of_mutexes = (lf_mutex_t*)calloc((env->scheduler->max_reaction_level + 1), sizeof(lf_mutex_t)); - env->scheduler->indexes = (volatile int*)calloc( - (env->scheduler->max_reaction_level + 1), sizeof(volatile int)); + env->scheduler->indexes = (volatile int*)calloc((env->scheduler->max_reaction_level + 1), sizeof(volatile int)); - size_t queue_size = INITIAL_REACT_QUEUE_SIZE; - for (size_t i = 0; i <= env->scheduler->max_reaction_level; i++) { - if (params != NULL) { - if (params->num_reactions_per_level != NULL) { - queue_size = params->num_reactions_per_level[i]; - } - } - // Initialize the reaction vectors - ((reaction_t***)env->scheduler->triggered_reactions)[i] = - (reaction_t**)calloc(queue_size, sizeof(reaction_t*)); + size_t queue_size = INITIAL_REACT_QUEUE_SIZE; + for (size_t i = 0; i <= env->scheduler->max_reaction_level; i++) { + if (params != NULL) { + if (params->num_reactions_per_level != NULL) { + queue_size = params->num_reactions_per_level[i]; + } + } + // Initialize the reaction vectors + ((reaction_t***)env->scheduler->triggered_reactions)[i] = (reaction_t**)calloc(queue_size, sizeof(reaction_t*)); - LF_PRINT_DEBUG( - "Scheduler: Initialized vector of reactions for level %zu with size %zu", - i, - queue_size - ); + LF_PRINT_DEBUG("Scheduler: Initialized vector of reactions for level %zu with size %zu", i, queue_size); - // Initialize the mutexes for the reaction vectors - LF_MUTEX_INIT(&env->scheduler->array_of_mutexes[i]); + // Initialize the mutexes for the reaction vectors + LF_MUTEX_INIT(&env->scheduler->array_of_mutexes[i]); + } - } - - env->scheduler->executing_reactions = - (void*)((reaction_t***)env->scheduler-> - triggered_reactions)[0]; + env->scheduler->executing_reactions = (void*)((reaction_t***)env->scheduler->triggered_reactions)[0]; } /** @@ -302,14 +267,14 @@ void lf_sched_init( * This must be called when the scheduler is no longer needed. */ void lf_sched_free(lf_scheduler_t* scheduler) { - if (scheduler->triggered_reactions) { - for (size_t j = 0; j <= scheduler->max_reaction_level; j++) { - free(((reaction_t***)scheduler->triggered_reactions)[j]); - } - free(scheduler->triggered_reactions); + if (scheduler->triggered_reactions) { + for (size_t j = 0; j <= scheduler->max_reaction_level; j++) { + free(((reaction_t***)scheduler->triggered_reactions)[j]); } + free(scheduler->triggered_reactions); + } - lf_semaphore_destroy(scheduler->semaphore); + lf_semaphore_destroy(scheduler->semaphore); } ///////////////////// Scheduler Worker API (public) ///////////////////////// @@ -325,51 +290,43 @@ void lf_sched_free(lf_scheduler_t* scheduler) { * worker thread should exit. */ reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_number) { - // Iterate until the stop tag is reached or reaction vectors are empty - while (!scheduler->should_stop) { - // Calculate the current level of reactions to execute - size_t current_level = - scheduler->next_reaction_level - 1; - reaction_t* reaction_to_return = NULL; + // Iterate until the stop tag is reached or reaction vectors are empty + while (!scheduler->should_stop) { + // Calculate the current level of reactions to execute + size_t current_level = scheduler->next_reaction_level - 1; + reaction_t* reaction_to_return = NULL; #ifdef FEDERATED - // Need to lock the mutex because federate.c could trigger reactions at - // the current level (if there is a causality loop) - LF_MUTEX_LOCK(&scheduler->array_of_mutexes[current_level]); + // Need to lock the mutex because federate.c could trigger reactions at + // the current level (if there is a causality loop) + LF_MUTEX_LOCK(&scheduler->array_of_mutexes[current_level]); #endif - int current_level_q_index = lf_atomic_add_fetch32( - (int32_t *) &scheduler->indexes[current_level], -1); - if (current_level_q_index >= 0) { - LF_PRINT_DEBUG( - "Scheduler: Worker %d popping reaction with level %zu, index " - "for level: %d.", - worker_number, current_level, current_level_q_index - ); - reaction_to_return = - ((reaction_t**)scheduler-> - executing_reactions)[current_level_q_index]; - ((reaction_t**)scheduler-> - executing_reactions)[current_level_q_index] = NULL; - } + int current_level_q_index = lf_atomic_add_fetch32((int32_t*)&scheduler->indexes[current_level], -1); + if (current_level_q_index >= 0) { + LF_PRINT_DEBUG("Scheduler: Worker %d popping reaction with level %zu, index " + "for level: %d.", + worker_number, current_level, current_level_q_index); + reaction_to_return = ((reaction_t**)scheduler->executing_reactions)[current_level_q_index]; + ((reaction_t**)scheduler->executing_reactions)[current_level_q_index] = NULL; + } #ifdef FEDERATED - lf_mutex_unlock( - &scheduler->array_of_mutexes[current_level]); + lf_mutex_unlock(&scheduler->array_of_mutexes[current_level]); #endif - if (reaction_to_return != NULL) { - // Got a reaction - return reaction_to_return; - } + if (reaction_to_return != NULL) { + // Got a reaction + return reaction_to_return; + } - LF_PRINT_DEBUG("Worker %d is out of ready reactions.", worker_number); + LF_PRINT_DEBUG("Worker %d is out of ready reactions.", worker_number); - // Ask the scheduler for more work and wait - tracepoint_worker_wait_starts(scheduler->env->trace, worker_number); - _lf_sched_wait_for_work(scheduler, worker_number); - tracepoint_worker_wait_ends(scheduler->env->trace, worker_number); - } + // Ask the scheduler for more work and wait + tracepoint_worker_wait_starts(scheduler->env, worker_number); + _lf_sched_wait_for_work(scheduler, worker_number); + tracepoint_worker_wait_ends(scheduler->env, worker_number); + } - // It's time for the worker thread to stop and exit. - return NULL; + // It's time for the worker thread to stop and exit. + return NULL; } /** @@ -380,12 +337,10 @@ reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_nu * finished executing 'done_reaction'. * @param done_reaction The reaction that is done. */ -void lf_sched_done_with_reaction(size_t worker_number, - reaction_t* done_reaction) { - if (!lf_atomic_bool_compare_and_swap32((int32_t *) &done_reaction->status, queued, inactive)) { - lf_print_error_and_exit("Unexpected reaction status: %d. Expected %d.", - done_reaction->status, queued); - } +void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction) { + if (!lf_atomic_bool_compare_and_swap32((int32_t*)&done_reaction->status, queued, inactive)) { + lf_print_error_and_exit("Unexpected reaction status: %d. Expected %d.", done_reaction->status, queued); + } } /** @@ -408,11 +363,10 @@ void lf_sched_done_with_reaction(size_t worker_number, * */ void lf_scheduler_trigger_reaction(lf_scheduler_t* scheduler, reaction_t* reaction, int worker_number) { - if (reaction == NULL || !lf_atomic_bool_compare_and_swap32((int32_t *) &reaction->status, inactive, queued)) { - return; - } - LF_PRINT_DEBUG("Scheduler: Enqueueing reaction %s, which has level %lld.", - reaction->name, LF_LEVEL(reaction->index)); - _lf_sched_insert_reaction(scheduler, reaction); + if (reaction == NULL || !lf_atomic_bool_compare_and_swap32((int32_t*)&reaction->status, inactive, queued)) { + return; + } + LF_PRINT_DEBUG("Scheduler: Enqueueing reaction %s, which has level %lld.", reaction->name, LF_LEVEL(reaction->index)); + _lf_sched_insert_reaction(scheduler, reaction); } #endif // SCHEDULER == SCHED_NP || !defined(SCHEDULER) diff --git a/core/threaded/scheduler_adaptive.c b/core/threaded/scheduler_adaptive.c index 35921d642..99f50c043 100644 --- a/core/threaded/scheduler_adaptive.c +++ b/core/threaded/scheduler_adaptive.c @@ -35,11 +35,8 @@ static void data_collection_init(lf_scheduler_t* scheduler, sched_params_t* para static void data_collection_free(lf_scheduler_t* scheduler); static void data_collection_start_level(lf_scheduler_t* scheduler, size_t level); static void data_collection_end_level(lf_scheduler_t* scheduler, size_t level, size_t num_workers); -static void data_collection_end_tag( - lf_scheduler_t* scheduler, - size_t* num_workers_by_level, - size_t* max_num_workers_by_level -); +static void data_collection_end_tag(lf_scheduler_t* scheduler, size_t* num_workers_by_level, + size_t* max_num_workers_by_level); /** * The level counter is a number that changes whenever the current level changes. * @@ -48,63 +45,61 @@ static void data_collection_end_tag( * have returned to the same value must be negligible. */ - /////////////////// Scheduler Variables and Structs ///////////////////////// typedef struct { - /** An array of condition variables, each corresponding to a group of workers. */ - lf_cond_t* worker_conds; - /** The cumsum of the sizes of the groups of workers corresponding to each successive cond. */ - size_t* cumsum_of_worker_group_sizes; - /** The number of non-waiting threads. */ - volatile size_t num_loose_threads; - /** The number of threads that were awakened for the purpose of executing the current level. */ - volatile size_t num_awakened; - /** Whether the mutex is held by each worker via this module's API. */ - bool* mutex_held; + /** An array of condition variables, each corresponding to a group of workers. */ + lf_cond_t* worker_conds; + /** The cumsum of the sizes of the groups of workers corresponding to each successive cond. */ + size_t* cumsum_of_worker_group_sizes; + /** The number of non-waiting threads. */ + volatile size_t num_loose_threads; + /** The number of threads that were awakened for the purpose of executing the current level. */ + volatile size_t num_awakened; + /** Whether the mutex is held by each worker via this module's API. */ + bool* mutex_held; } worker_states_t; - typedef struct { - /** The queued reactions. */ - reaction_t**** reactions_by_worker_by_level; - /** The number of queued reactions currently assigned to each worker at each level. */ - size_t** num_reactions_by_worker_by_level; - /** The maximum number of workers that could possibly be kept simultaneously busy at each level. */ - size_t* max_num_workers_by_level; - /** The number of workers that will be used to execute each level. */ - size_t* num_workers_by_level; - /** The number of levels. */ - size_t num_levels; - /** The maximum number of workers that can be used to execute any level. */ - size_t max_num_workers; - /** The following values apply to the current level. */ - size_t current_level; - /** The number of reactions each worker still has to execute, indexed by worker. */ - size_t* num_reactions_by_worker; - /** The reactions to be executed, indexed by assigned worker. */ - reaction_t*** reactions_by_worker; - /** The total number of workers active, including those who have finished their work. */ - size_t num_workers; + /** The queued reactions. */ + reaction_t**** reactions_by_worker_by_level; + /** The number of queued reactions currently assigned to each worker at each level. */ + size_t** num_reactions_by_worker_by_level; + /** The maximum number of workers that could possibly be kept simultaneously busy at each level. */ + size_t* max_num_workers_by_level; + /** The number of workers that will be used to execute each level. */ + size_t* num_workers_by_level; + /** The number of levels. */ + size_t num_levels; + /** The maximum number of workers that can be used to execute any level. */ + size_t max_num_workers; + /** The following values apply to the current level. */ + size_t current_level; + /** The number of reactions each worker still has to execute, indexed by worker. */ + size_t* num_reactions_by_worker; + /** The reactions to be executed, indexed by assigned worker. */ + reaction_t*** reactions_by_worker; + /** The total number of workers active, including those who have finished their work. */ + size_t num_workers; } worker_assignments_t; typedef struct { - interval_t* start_times_by_level; - interval_t** execution_times_by_num_workers_by_level; - interval_t* execution_times_mins; - size_t* execution_times_argmins; - size_t data_collection_counter; - bool collecting_data; - size_t* possible_nums_workers; - size_t num_levels; + interval_t* start_times_by_level; + interval_t** execution_times_by_num_workers_by_level; + interval_t* execution_times_mins; + size_t* execution_times_argmins; + size_t data_collection_counter; + bool collecting_data; + size_t* possible_nums_workers; + size_t num_levels; } data_collection_t; typedef struct custom_scheduler_data_t { - worker_states_t* worker_states; - worker_assignments_t* worker_assignments; - data_collection_t* data_collection; - bool init_called; - bool should_stop; - size_t level_counter; + worker_states_t* worker_states; + worker_assignments_t* worker_assignments; + data_collection_t* data_collection; + bool init_called; + bool should_stop; + size_t level_counter; } custom_scheduler_data_t; ///////////////////////// Scheduler Private Functions /////////////////////////// @@ -118,13 +113,13 @@ typedef struct custom_scheduler_data_t { * @return size_t The index of the condition variable used by worker. */ static size_t cond_of(size_t worker) { - // Note: __builtin_clz with GCC might be preferred, or fls (?). - int ret = 0; - while (worker) { - ret++; - worker >>= 1; - } - return ret; + // Note: __builtin_clz with GCC might be preferred, or fls (?). + int ret = 0; + while (worker) { + ret++; + worker >>= 1; + } + return ret; } ///////////////////////// Private Worker Assignments Functions /////////////////////////// @@ -135,69 +130,71 @@ static size_t cond_of(size_t worker) { * @param level The new current level. */ static void set_level(lf_scheduler_t* scheduler, size_t level) { - worker_assignments_t * worker_assignments = scheduler->custom_data->worker_assignments; - assert(level < worker_assignments->num_levels); - assert(0 <= level); - data_collection_end_level(scheduler, worker_assignments->current_level, worker_assignments->num_workers); - worker_assignments->current_level = level; - worker_assignments->num_reactions_by_worker = worker_assignments->num_reactions_by_worker_by_level[level]; - worker_assignments->reactions_by_worker = worker_assignments->reactions_by_worker_by_level[level]; - worker_assignments->num_workers = worker_assignments->num_workers_by_level[level]; - // TODO: Experiment with not recording that the level is starting in the case that there is - // nothing to execute. We need not optimize for the case when there is nothing to execute - // because that case is not merely optimized, but is optimized out (we do not bother with - // executing nothing). - data_collection_start_level(scheduler, worker_assignments->current_level); + worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; + assert(level < worker_assignments->num_levels); + assert(0 <= level); + data_collection_end_level(scheduler, worker_assignments->current_level, worker_assignments->num_workers); + worker_assignments->current_level = level; + worker_assignments->num_reactions_by_worker = worker_assignments->num_reactions_by_worker_by_level[level]; + worker_assignments->reactions_by_worker = worker_assignments->reactions_by_worker_by_level[level]; + worker_assignments->num_workers = worker_assignments->num_workers_by_level[level]; + // TODO: Experiment with not recording that the level is starting in the case that there is + // nothing to execute. We need not optimize for the case when there is nothing to execute + // because that case is not merely optimized, but is optimized out (we do not bother with + // executing nothing). + data_collection_start_level(scheduler, worker_assignments->current_level); } /** @brief Return the total number of reactions enqueued on the current level. */ static size_t get_num_reactions(lf_scheduler_t* scheduler) { - worker_assignments_t * worker_assignments = scheduler->custom_data->worker_assignments; - size_t total_num_reactions = 0; - for (size_t i = 0; i < worker_assignments->num_workers; i++) { - total_num_reactions += worker_assignments->num_reactions_by_worker[i]; - } - // TODO: if num_workers was > total_num_reactions, report this to data_collection? - return total_num_reactions; + worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; + size_t total_num_reactions = 0; + for (size_t i = 0; i < worker_assignments->num_workers; i++) { + total_num_reactions += worker_assignments->num_reactions_by_worker[i]; + } + // TODO: if num_workers was > total_num_reactions, report this to data_collection? + return total_num_reactions; } static void worker_assignments_init(lf_scheduler_t* scheduler, size_t number_of_workers, sched_params_t* params) { - worker_assignments_t * worker_assignments = scheduler->custom_data->worker_assignments; - worker_assignments->num_levels = params->num_reactions_per_level_size; - worker_assignments->max_num_workers = number_of_workers; - worker_assignments->reactions_by_worker_by_level = (reaction_t****) malloc(sizeof(reaction_t***) * worker_assignments->num_levels); - worker_assignments->num_reactions_by_worker_by_level = (size_t**) malloc(sizeof(size_t*) * worker_assignments->num_levels); - worker_assignments->num_workers_by_level = (size_t*) malloc(sizeof(size_t) * worker_assignments->num_levels); - worker_assignments->max_num_workers_by_level = (size_t*) malloc(sizeof(size_t) * worker_assignments->num_levels); - for (size_t level = 0; level < worker_assignments->num_levels; level++) { - size_t num_reactions = params->num_reactions_per_level[level]; - size_t num_workers = num_reactions < worker_assignments->max_num_workers ? num_reactions : worker_assignments->max_num_workers; - worker_assignments->max_num_workers_by_level[level] = num_workers; - worker_assignments->num_workers_by_level[level] = worker_assignments->max_num_workers_by_level[level]; - worker_assignments->reactions_by_worker_by_level[level] = (reaction_t***) malloc( - sizeof(reaction_t**) * worker_assignments->max_num_workers - ); - worker_assignments->num_reactions_by_worker_by_level[level] = (size_t*) calloc(worker_assignments->max_num_workers, sizeof(size_t)); - for (size_t worker = 0; worker < worker_assignments->max_num_workers_by_level[level]; worker++) { - worker_assignments->reactions_by_worker_by_level[level][worker] = (reaction_t**) malloc( - sizeof(reaction_t*) * num_reactions - ); // Warning: This wastes space. - } + worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; + worker_assignments->num_levels = params->num_reactions_per_level_size; + worker_assignments->max_num_workers = number_of_workers; + worker_assignments->reactions_by_worker_by_level = + (reaction_t****)malloc(sizeof(reaction_t***) * worker_assignments->num_levels); + worker_assignments->num_reactions_by_worker_by_level = + (size_t**)malloc(sizeof(size_t*) * worker_assignments->num_levels); + worker_assignments->num_workers_by_level = (size_t*)malloc(sizeof(size_t) * worker_assignments->num_levels); + worker_assignments->max_num_workers_by_level = (size_t*)malloc(sizeof(size_t) * worker_assignments->num_levels); + for (size_t level = 0; level < worker_assignments->num_levels; level++) { + size_t num_reactions = params->num_reactions_per_level[level]; + size_t num_workers = + num_reactions < worker_assignments->max_num_workers ? num_reactions : worker_assignments->max_num_workers; + worker_assignments->max_num_workers_by_level[level] = num_workers; + worker_assignments->num_workers_by_level[level] = worker_assignments->max_num_workers_by_level[level]; + worker_assignments->reactions_by_worker_by_level[level] = + (reaction_t***)malloc(sizeof(reaction_t**) * worker_assignments->max_num_workers); + worker_assignments->num_reactions_by_worker_by_level[level] = + (size_t*)calloc(worker_assignments->max_num_workers, sizeof(size_t)); + for (size_t worker = 0; worker < worker_assignments->max_num_workers_by_level[level]; worker++) { + worker_assignments->reactions_by_worker_by_level[level][worker] = + (reaction_t**)malloc(sizeof(reaction_t*) * num_reactions); // Warning: This wastes space. } - set_level(scheduler, 0); + } + set_level(scheduler, 0); } static void worker_assignments_free(lf_scheduler_t* scheduler) { - worker_assignments_t * worker_assignments = scheduler->custom_data->worker_assignments; - for (size_t level = 0; level < worker_assignments->num_levels; level++) { - for (size_t worker = 0; worker < worker_assignments->max_num_workers_by_level[level]; worker++) { - free(worker_assignments->reactions_by_worker_by_level[level][worker]); - } - free(worker_assignments->reactions_by_worker_by_level[level]); - free(worker_assignments->num_reactions_by_worker_by_level[level]); + worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; + for (size_t level = 0; level < worker_assignments->num_levels; level++) { + for (size_t worker = 0; worker < worker_assignments->max_num_workers_by_level[level]; worker++) { + free(worker_assignments->reactions_by_worker_by_level[level][worker]); } - free(worker_assignments->max_num_workers_by_level); - free(worker_assignments->num_workers_by_level); + free(worker_assignments->reactions_by_worker_by_level[level]); + free(worker_assignments->num_reactions_by_worker_by_level[level]); + } + free(worker_assignments->max_num_workers_by_level); + free(worker_assignments->num_workers_by_level); } /** @@ -206,31 +203,28 @@ static void worker_assignments_free(lf_scheduler_t* scheduler) { * @param worker The number of a worker needing work. */ static reaction_t* get_reaction(lf_scheduler_t* scheduler, size_t worker) { - worker_assignments_t * worker_assignments = scheduler->custom_data->worker_assignments; + worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; #ifndef FEDERATED - int index = lf_atomic_add_fetch32((int32_t *) (worker_assignments->num_reactions_by_worker + worker), -1); - if (index >= 0) { - return worker_assignments->reactions_by_worker[worker][index]; - } - worker_assignments->num_reactions_by_worker[worker] = 0; - return NULL; -#else - // This is necessary for federated programs because reactions may be inserted into the current - // level. - int old_num_reactions; - int current_num_reactions = worker_assignments->num_reactions_by_worker[worker]; - int index; - do { - old_num_reactions = current_num_reactions; - if (old_num_reactions <= 0) return NULL; - } while ( - (current_num_reactions = lf_atomic_val_compare_and_swap32( - ((int32_t *) worker_assignments->num_reactions_by_worker + worker), - old_num_reactions, - (index = old_num_reactions - 1) - )) != old_num_reactions - ); + int index = lf_atomic_add_fetch32((int32_t*)(worker_assignments->num_reactions_by_worker + worker), -1); + if (index >= 0) { return worker_assignments->reactions_by_worker[worker][index]; + } + worker_assignments->num_reactions_by_worker[worker] = 0; + return NULL; +#else + // This is necessary for federated programs because reactions may be inserted into the current + // level. + int old_num_reactions; + int current_num_reactions = worker_assignments->num_reactions_by_worker[worker]; + int index; + do { + old_num_reactions = current_num_reactions; + if (old_num_reactions <= 0) + return NULL; + } while ((current_num_reactions = lf_atomic_val_compare_and_swap32( + ((int32_t*)worker_assignments->num_reactions_by_worker + worker), old_num_reactions, + (index = old_num_reactions - 1))) != old_num_reactions); + return worker_assignments->reactions_by_worker[worker][index]; #endif } @@ -241,28 +235,27 @@ static reaction_t* get_reaction(lf_scheduler_t* scheduler, size_t worker) { * @return reaction_t* A reaction to execute, or NULL if no such reaction exists. */ static reaction_t* worker_assignments_get_or_lock(lf_scheduler_t* scheduler, size_t worker) { - worker_assignments_t * worker_assignments = scheduler->custom_data->worker_assignments; - assert(worker >= 0); - // assert(worker < num_workers); // There are edge cases where this doesn't hold. - assert(worker_assignments->num_reactions_by_worker[worker] >= 0); - reaction_t* ret; - while (true) { - if ((ret = get_reaction(scheduler, worker))) return ret; - if (worker < worker_assignments->num_workers) { - for ( - size_t victim = (worker + 1) % worker_assignments->num_workers; - victim != worker; - victim = (victim + 1) % worker_assignments->num_workers - ) { - if ((ret = get_reaction(scheduler, victim))) return ret; - } - } - worker_states_lock(scheduler, worker); - if (!worker_assignments->num_reactions_by_worker[worker]) { - return NULL; - } - worker_states_unlock(scheduler, worker); + worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; + assert(worker >= 0); + // assert(worker < num_workers); // There are edge cases where this doesn't hold. + assert(worker_assignments->num_reactions_by_worker[worker] >= 0); + reaction_t* ret; + while (true) { + if ((ret = get_reaction(scheduler, worker))) + return ret; + if (worker < worker_assignments->num_workers) { + for (size_t victim = (worker + 1) % worker_assignments->num_workers; victim != worker; + victim = (victim + 1) % worker_assignments->num_workers) { + if ((ret = get_reaction(scheduler, victim))) + return ret; + } + } + worker_states_lock(scheduler, worker); + if (!worker_assignments->num_reactions_by_worker[worker]) { + return NULL; } + worker_states_unlock(scheduler, worker); + } } /** @@ -270,56 +263,53 @@ static reaction_t* worker_assignments_get_or_lock(lf_scheduler_t* scheduler, siz * @param reaction A reaction to be executed in the current tag. */ static void worker_assignments_put(lf_scheduler_t* scheduler, reaction_t* reaction) { - worker_assignments_t * worker_assignments = scheduler->custom_data->worker_assignments; - size_t level = LF_LEVEL(reaction->index); - assert(reaction != NULL); + worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; + size_t level = LF_LEVEL(reaction->index); + assert(reaction != NULL); #ifndef FEDERATED - assert(level > worker_assignments->current_level || worker_assignments->current_level == 0); + assert(level > worker_assignments->current_level || worker_assignments->current_level == 0); #endif - assert(level < worker_assignments->num_levels); - // Source: https://xorshift.di.unimi.it/splitmix64.c - // TODO: This is probably not the most efficient way to get the randomness that we need because - // it is designed to give an entire word of randomness, whereas we only need - // ~log2(num_workers_by_level[level]) bits of randomness. - uint64_t hash = (uint64_t) reaction; - hash = (hash ^ (hash >> 30)) * 0xbf58476d1ce4e5b9; - hash = (hash ^ (hash >> 27)) * 0x94d049bb133111eb; - hash = hash ^ (hash >> 31); - size_t worker = hash % worker_assignments->num_workers_by_level[level]; - size_t num_preceding_reactions = lf_atomic_fetch_add32( - (int32_t *) &worker_assignments->num_reactions_by_worker_by_level[level][worker], - 1 - ); - worker_assignments->reactions_by_worker_by_level[level][worker][num_preceding_reactions] = reaction; + assert(level < worker_assignments->num_levels); + // Source: https://xorshift.di.unimi.it/splitmix64.c + // TODO: This is probably not the most efficient way to get the randomness that we need because + // it is designed to give an entire word of randomness, whereas we only need + // ~log2(num_workers_by_level[level]) bits of randomness. + uint64_t hash = (uint64_t)reaction; + hash = (hash ^ (hash >> 30)) * 0xbf58476d1ce4e5b9; + hash = (hash ^ (hash >> 27)) * 0x94d049bb133111eb; + hash = hash ^ (hash >> 31); + size_t worker = hash % worker_assignments->num_workers_by_level[level]; + size_t num_preceding_reactions = + lf_atomic_fetch_add32((int32_t*)&worker_assignments->num_reactions_by_worker_by_level[level][worker], 1); + worker_assignments->reactions_by_worker_by_level[level][worker][num_preceding_reactions] = reaction; } - ///////////////////////// Private Worker States Functions /////////////////////////// static void worker_states_init(lf_scheduler_t* scheduler, size_t number_of_workers) { - worker_states_t* worker_states =scheduler->custom_data->worker_states; - size_t greatest_worker_number = number_of_workers - 1; - size_t num_conds = cond_of(greatest_worker_number) + 1; - worker_states->worker_conds = (lf_cond_t*) malloc(sizeof(lf_cond_t) * num_conds); - worker_states->cumsum_of_worker_group_sizes = (size_t*) calloc(num_conds, sizeof(size_t)); - worker_states->mutex_held = (bool*) calloc(number_of_workers, sizeof(bool)); - for (int i = 0; i < number_of_workers; i++) { - worker_states->cumsum_of_worker_group_sizes[cond_of(i)]++; - } - for (int i = 1; i < num_conds; i++) { - worker_states->cumsum_of_worker_group_sizes[i] += worker_states->cumsum_of_worker_group_sizes[i - 1]; - } - for (int i = 0; i < num_conds; i++) { - LF_COND_INIT(worker_states->worker_conds + i, &scheduler->env->mutex); - } - worker_states->num_loose_threads = scheduler->number_of_workers; + worker_states_t* worker_states = scheduler->custom_data->worker_states; + size_t greatest_worker_number = number_of_workers - 1; + size_t num_conds = cond_of(greatest_worker_number) + 1; + worker_states->worker_conds = (lf_cond_t*)malloc(sizeof(lf_cond_t) * num_conds); + worker_states->cumsum_of_worker_group_sizes = (size_t*)calloc(num_conds, sizeof(size_t)); + worker_states->mutex_held = (bool*)calloc(number_of_workers, sizeof(bool)); + for (int i = 0; i < number_of_workers; i++) { + worker_states->cumsum_of_worker_group_sizes[cond_of(i)]++; + } + for (int i = 1; i < num_conds; i++) { + worker_states->cumsum_of_worker_group_sizes[i] += worker_states->cumsum_of_worker_group_sizes[i - 1]; + } + for (int i = 0; i < num_conds; i++) { + LF_COND_INIT(worker_states->worker_conds + i, &scheduler->env->mutex); + } + worker_states->num_loose_threads = scheduler->number_of_workers; } static void worker_states_free(lf_scheduler_t* scheduler) { - // FIXME: Why do the condition variables and mutexes not need to be freed? - worker_states_t* worker_states =scheduler->custom_data->worker_states; - free(worker_states->worker_conds); - free(worker_states->mutex_held); + // FIXME: Why do the condition variables and mutexes not need to be freed? + worker_states_t* worker_states = scheduler->custom_data->worker_states; + free(worker_states->worker_conds); + free(worker_states->mutex_held); } /** @@ -330,50 +320,51 @@ static void worker_states_free(lf_scheduler_t* scheduler) { * @return A snapshot of the level counter after awakening the workers. */ static void worker_states_awaken_locked(lf_scheduler_t* scheduler, size_t worker, size_t num_to_awaken) { - worker_states_t* worker_states = scheduler->custom_data->worker_states; - worker_assignments_t * worker_assignments = scheduler->custom_data->worker_assignments; - assert(num_to_awaken <= worker_assignments->max_num_workers); - if ((worker == 0) && (num_to_awaken <= 1)) { - worker_states->num_loose_threads = 1; - return; - } - size_t greatest_worker_number_to_awaken = num_to_awaken - 1; - size_t max_cond = cond_of(greatest_worker_number_to_awaken); - if (!worker_states->mutex_held[worker]) { - worker_states->mutex_held[worker] = true; - LF_MUTEX_LOCK(&scheduler->env->mutex); - } - // The predicate of the condition variable depends on num_awakened and level_counter, so - // this is a critical section. - worker_states->num_loose_threads = worker_states->cumsum_of_worker_group_sizes[max_cond]; - worker_states->num_loose_threads += worker >= worker_states->num_loose_threads; - worker_states->num_awakened = worker_states->num_loose_threads; - scheduler->custom_data->level_counter++; - for (int cond = 0; cond <= max_cond; cond++) { - lf_cond_broadcast(worker_states->worker_conds + cond); - } + worker_states_t* worker_states = scheduler->custom_data->worker_states; + worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; + assert(num_to_awaken <= worker_assignments->max_num_workers); + if ((worker == 0) && (num_to_awaken <= 1)) { + worker_states->num_loose_threads = 1; + return; + } + size_t greatest_worker_number_to_awaken = num_to_awaken - 1; + size_t max_cond = cond_of(greatest_worker_number_to_awaken); + if (!worker_states->mutex_held[worker]) { + worker_states->mutex_held[worker] = true; + LF_MUTEX_LOCK(&scheduler->env->mutex); + } + // The predicate of the condition variable depends on num_awakened and level_counter, so + // this is a critical section. + worker_states->num_loose_threads = worker_states->cumsum_of_worker_group_sizes[max_cond]; + worker_states->num_loose_threads += worker >= worker_states->num_loose_threads; + worker_states->num_awakened = worker_states->num_loose_threads; + scheduler->custom_data->level_counter++; + for (int cond = 0; cond <= max_cond; cond++) { + lf_cond_broadcast(worker_states->worker_conds + cond); + } } /** Lock the global mutex if needed. */ static void worker_states_lock(lf_scheduler_t* scheduler, size_t worker) { - worker_states_t* worker_states = scheduler->custom_data->worker_states; - worker_assignments_t * worker_assignments = scheduler->custom_data->worker_assignments; - assert(worker_states->num_loose_threads > 0); - assert(worker_states->num_loose_threads <= worker_assignments->max_num_workers); - size_t lt = worker_states->num_loose_threads; - if (lt > 1 || !fast) { // FIXME: Lock should be partially optimized out even when !fast - LF_MUTEX_LOCK(&scheduler->env->mutex); - assert(!worker_states->mutex_held[worker]); - worker_states->mutex_held[worker] = true; - } + worker_states_t* worker_states = scheduler->custom_data->worker_states; + worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; + assert(worker_states->num_loose_threads > 0); + assert(worker_states->num_loose_threads <= worker_assignments->max_num_workers); + size_t lt = worker_states->num_loose_threads; + if (lt > 1 || !fast) { // FIXME: Lock should be partially optimized out even when !fast + LF_MUTEX_LOCK(&scheduler->env->mutex); + assert(!worker_states->mutex_held[worker]); + worker_states->mutex_held[worker] = true; + } } /** Unlock the global mutex if needed. */ static void worker_states_unlock(lf_scheduler_t* scheduler, size_t worker) { - worker_states_t* worker_states = scheduler->custom_data->worker_states; - if (!worker_states->mutex_held[worker]) return; - worker_states->mutex_held[worker] = false; - LF_MUTEX_UNLOCK(&scheduler->env->mutex); + worker_states_t* worker_states = scheduler->custom_data->worker_states; + if (!worker_states->mutex_held[worker]) + return; + worker_states->mutex_held[worker] = false; + LF_MUTEX_UNLOCK(&scheduler->env->mutex); } /** @@ -384,17 +375,17 @@ static void worker_states_unlock(lf_scheduler_t* scheduler, size_t worker) { * @return false If at least one other worker is still working on the current level. */ static bool worker_states_finished_with_level_locked(lf_scheduler_t* scheduler, size_t worker) { - worker_states_t* worker_states = scheduler->custom_data->worker_states; - worker_assignments_t * worker_assignments = scheduler->custom_data->worker_assignments; - assert(worker >= 0); - assert(worker_states->num_loose_threads > 0); - assert(worker_assignments->num_reactions_by_worker[worker] != 1); - assert(((int64_t) worker_assignments->num_reactions_by_worker[worker]) <= 0); - // Why use an atomic operation when we are supposed to be "as good as locked"? Because I took a - // shortcut, and the shortcut was imperfect. - size_t ret = lf_atomic_add_fetch32((int32_t *) &worker_states->num_loose_threads, -1); - assert(ret <= worker_assignments->max_num_workers); // Check for underflow - return !ret; + worker_states_t* worker_states = scheduler->custom_data->worker_states; + worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; + assert(worker >= 0); + assert(worker_states->num_loose_threads > 0); + assert(worker_assignments->num_reactions_by_worker[worker] != 1); + assert(((int64_t)worker_assignments->num_reactions_by_worker[worker]) <= 0); + // Why use an atomic operation when we are supposed to be "as good as locked"? Because I took a + // shortcut, and the shortcut was imperfect. + size_t ret = lf_atomic_add_fetch32((int32_t*)&worker_states->num_loose_threads, -1); + assert(ret <= worker_assignments->max_num_workers); // Check for underflow + return !ret; } /** @@ -408,24 +399,22 @@ static bool worker_states_finished_with_level_locked(lf_scheduler_t* scheduler, * sleep. */ static void worker_states_sleep_and_unlock(lf_scheduler_t* scheduler, size_t worker, size_t level_counter_snapshot) { - worker_states_t* worker_states = scheduler->custom_data->worker_states; - worker_assignments_t * worker_assignments = scheduler->custom_data->worker_assignments; - assert(worker < worker_assignments->max_num_workers); - assert(worker_states->num_loose_threads <= worker_assignments->max_num_workers); - if (!worker_states->mutex_held[worker]) { - LF_MUTEX_LOCK(&scheduler->env->mutex); - } - worker_states->mutex_held[worker] = false; // This will be true soon, upon call to lf_cond_wait. - size_t cond = cond_of(worker); - if ( - ((level_counter_snapshot == scheduler->custom_data->level_counter) || worker >= worker_states->num_awakened) - ) { - do { - lf_cond_wait(worker_states->worker_conds + cond); - } while (level_counter_snapshot == scheduler->custom_data->level_counter || worker >= worker_states->num_awakened); - } - assert(!worker_states->mutex_held[worker]); // This thread holds the mutex, but it did not report that. - LF_MUTEX_UNLOCK(&scheduler->env->mutex); + worker_states_t* worker_states = scheduler->custom_data->worker_states; + worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; + assert(worker < worker_assignments->max_num_workers); + assert(worker_states->num_loose_threads <= worker_assignments->max_num_workers); + if (!worker_states->mutex_held[worker]) { + LF_MUTEX_LOCK(&scheduler->env->mutex); + } + worker_states->mutex_held[worker] = false; // This will be true soon, upon call to lf_cond_wait. + size_t cond = cond_of(worker); + if (((level_counter_snapshot == scheduler->custom_data->level_counter) || worker >= worker_states->num_awakened)) { + do { + lf_cond_wait(worker_states->worker_conds + cond); + } while (level_counter_snapshot == scheduler->custom_data->level_counter || worker >= worker_states->num_awakened); + } + assert(!worker_states->mutex_held[worker]); // This thread holds the mutex, but it did not report that. + LF_MUTEX_UNLOCK(&scheduler->env->mutex); } /** @@ -433,40 +422,37 @@ static void worker_states_sleep_and_unlock(lf_scheduler_t* scheduler, size_t wor * @param worker The number of the calling worker. */ static void advance_level_and_unlock(lf_scheduler_t* scheduler, size_t worker) { - worker_assignments_t * worker_assignments = scheduler->custom_data->worker_assignments; - worker_states_t* worker_states = scheduler->custom_data->worker_states; - size_t max_level = worker_assignments->num_levels - 1; - while (true) { - if (worker_assignments->current_level == max_level) { - data_collection_end_tag( - scheduler, - worker_assignments->num_workers_by_level, - worker_assignments->max_num_workers_by_level); - set_level(scheduler, 0); - if (_lf_sched_advance_tag_locked(scheduler)) { - scheduler->custom_data->should_stop = true; - worker_states_awaken_locked(scheduler, worker, worker_assignments->max_num_workers); - worker_states_unlock(scheduler, worker); - return; - } - } else { - try_advance_level(scheduler->env, &worker_assignments->current_level); - set_level(scheduler, worker_assignments->current_level); - } - size_t total_num_reactions = get_num_reactions(scheduler); - if (total_num_reactions) { - size_t num_workers_to_awaken = LF_MIN(total_num_reactions, worker_assignments->num_workers); - assert(num_workers_to_awaken > 0); - worker_states_awaken_locked(scheduler, worker, num_workers_to_awaken); - worker_states_unlock(scheduler, worker); - return; - } + worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; + worker_states_t* worker_states = scheduler->custom_data->worker_states; + size_t max_level = worker_assignments->num_levels - 1; + while (true) { + if (worker_assignments->current_level == max_level) { + data_collection_end_tag(scheduler, worker_assignments->num_workers_by_level, + worker_assignments->max_num_workers_by_level); + set_level(scheduler, 0); + if (_lf_sched_advance_tag_locked(scheduler)) { + scheduler->custom_data->should_stop = true; + worker_states_awaken_locked(scheduler, worker, worker_assignments->max_num_workers); + worker_states_unlock(scheduler, worker); + return; + } + } else { + try_advance_level(scheduler->env, &worker_assignments->current_level); + set_level(scheduler, worker_assignments->current_level); } + size_t total_num_reactions = get_num_reactions(scheduler); + if (total_num_reactions) { + size_t num_workers_to_awaken = LF_MIN(total_num_reactions, worker_assignments->num_workers); + assert(num_workers_to_awaken > 0); + worker_states_awaken_locked(scheduler, worker, num_workers_to_awaken); + worker_states_unlock(scheduler, worker); + return; + } + } } ///////////////////////// Private Data Collection Functions /////////////////////////// - /** * A monotonically increasing sequence of numbers of workers, the first and last elements of which * are too large or small to represent valid states of the system (i.e., state transitions to them @@ -479,21 +465,22 @@ static void advance_level_and_unlock(lf_scheduler_t* scheduler, size_t worker) { /** @brief Initialize the possible_nums_workers array. */ static void possible_nums_workers_init(lf_scheduler_t* scheduler) { - worker_assignments_t * worker_assignments = scheduler->custom_data->worker_assignments; - data_collection_t* data_collection = scheduler->custom_data->data_collection; - // Start with 0 and end with two numbers strictly greater than max_num_workers. This must start - // at 4 because the first two and last two entries are not counted. - size_t pnw_length = 4; - size_t temp = worker_assignments->max_num_workers; - while ((temp >>= 1)) pnw_length++; - data_collection->possible_nums_workers = (size_t*) malloc(pnw_length * sizeof(size_t)); - temp = 1; - data_collection->possible_nums_workers[0] = 0; - for (int i = 1; i < pnw_length; i++) { - data_collection->possible_nums_workers[i] = temp; - temp *= 2; - } - assert(temp > worker_assignments->max_num_workers); + worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; + data_collection_t* data_collection = scheduler->custom_data->data_collection; + // Start with 0 and end with two numbers strictly greater than max_num_workers. This must start + // at 4 because the first two and last two entries are not counted. + size_t pnw_length = 4; + size_t temp = worker_assignments->max_num_workers; + while ((temp >>= 1)) + pnw_length++; + data_collection->possible_nums_workers = (size_t*)malloc(pnw_length * sizeof(size_t)); + temp = 1; + data_collection->possible_nums_workers[0] = 0; + for (int i = 1; i < pnw_length; i++) { + data_collection->possible_nums_workers[i] = temp; + temp *= 2; + } + assert(temp > worker_assignments->max_num_workers); } /** @@ -505,89 +492,97 @@ static void possible_nums_workers_init(lf_scheduler_t* scheduler) { * would like to optimize. */ static int get_jitter(size_t current_state, interval_t execution_time) { - static const size_t parallelism_cost_max = 114688; - // The following handles the case where the current level really is just fluff: - // No parallelism needed, no work to be done. - if (execution_time < 16384 && current_state == 1) return 0; - int left_score = 16384; // Want: For execution time = 65536, p(try left) = p(try right) - int middle_score = 65536; - int right_score = 65536; - if (execution_time < parallelism_cost_max) left_score += parallelism_cost_max - execution_time; - int result = rand() % (left_score + middle_score + right_score); - if (result < left_score) return -1; - if (result < left_score + middle_score) return 0; - return 1; + static const size_t parallelism_cost_max = 114688; + // The following handles the case where the current level really is just fluff: + // No parallelism needed, no work to be done. + if (execution_time < 16384 && current_state == 1) + return 0; + int left_score = 16384; // Want: For execution time = 65536, p(try left) = p(try right) + int middle_score = 65536; + int right_score = 65536; + if (execution_time < parallelism_cost_max) + left_score += parallelism_cost_max - execution_time; + int result = rand() % (left_score + middle_score + right_score); + if (result < left_score) + return -1; + if (result < left_score + middle_score) + return 0; + return 1; } /** @brief Get the number of workers resulting from a random state transition. */ -static size_t get_nums_workers_neighboring_state(lf_scheduler_t* scheduler, size_t current_state, interval_t execution_time) { - data_collection_t* data_collection = scheduler->custom_data->data_collection; - size_t jitter = get_jitter(current_state, execution_time); - if (!jitter) return current_state; - size_t i = 1; - // TODO: There are more efficient ways to do this. - while (data_collection->possible_nums_workers[i] < current_state) i++; - return data_collection->possible_nums_workers[i + jitter]; +static size_t get_nums_workers_neighboring_state(lf_scheduler_t* scheduler, size_t current_state, + interval_t execution_time) { + data_collection_t* data_collection = scheduler->custom_data->data_collection; + size_t jitter = get_jitter(current_state, execution_time); + if (!jitter) + return current_state; + size_t i = 1; + // TODO: There are more efficient ways to do this. + while (data_collection->possible_nums_workers[i] < current_state) + i++; + return data_collection->possible_nums_workers[i + jitter]; } static void data_collection_init(lf_scheduler_t* scheduler, sched_params_t* params) { - data_collection_t* data_collection = scheduler->custom_data->data_collection; - worker_assignments_t * worker_assignments = scheduler->custom_data->worker_assignments; - data_collection->num_levels = params->num_reactions_per_level_size; - data_collection->start_times_by_level = (interval_t*) calloc(data_collection->num_levels, sizeof(interval_t)); - data_collection->execution_times_by_num_workers_by_level = (interval_t**) calloc( - data_collection->num_levels, sizeof(interval_t*) - ); - data_collection->execution_times_mins = (interval_t*) calloc(data_collection->num_levels, sizeof(interval_t)); - data_collection->execution_times_argmins = (size_t*) calloc(data_collection->num_levels, sizeof(size_t)); - for (size_t i = 0; i < data_collection->num_levels; i++) { - data_collection->execution_times_argmins[i] = worker_assignments->max_num_workers; - data_collection->execution_times_by_num_workers_by_level[i] = (interval_t*) calloc( - worker_assignments->max_num_workers + 1, // Add 1 for 1-based indexing - sizeof(interval_t) - ); - } - possible_nums_workers_init(scheduler); + data_collection_t* data_collection = scheduler->custom_data->data_collection; + worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; + data_collection->num_levels = params->num_reactions_per_level_size; + data_collection->start_times_by_level = (interval_t*)calloc(data_collection->num_levels, sizeof(interval_t)); + data_collection->execution_times_by_num_workers_by_level = + (interval_t**)calloc(data_collection->num_levels, sizeof(interval_t*)); + data_collection->execution_times_mins = (interval_t*)calloc(data_collection->num_levels, sizeof(interval_t)); + data_collection->execution_times_argmins = (size_t*)calloc(data_collection->num_levels, sizeof(size_t)); + for (size_t i = 0; i < data_collection->num_levels; i++) { + data_collection->execution_times_argmins[i] = worker_assignments->max_num_workers; + data_collection->execution_times_by_num_workers_by_level[i] = + (interval_t*)calloc(worker_assignments->max_num_workers + 1, // Add 1 for 1-based indexing + sizeof(interval_t)); + } + possible_nums_workers_init(scheduler); } // FIXME: This dependes on worker_assignments not being freed yet static void data_collection_free(lf_scheduler_t* scheduler) { - data_collection_t* data_collection = scheduler->custom_data->data_collection; - free(data_collection->start_times_by_level); - for (size_t i = 0; i < data_collection->num_levels; i++) { - free(data_collection->execution_times_by_num_workers_by_level[i]); - } - free(data_collection->execution_times_by_num_workers_by_level); - free(data_collection->possible_nums_workers); + data_collection_t* data_collection = scheduler->custom_data->data_collection; + free(data_collection->start_times_by_level); + for (size_t i = 0; i < data_collection->num_levels; i++) { + free(data_collection->execution_times_by_num_workers_by_level[i]); + } + free(data_collection->execution_times_by_num_workers_by_level); + free(data_collection->possible_nums_workers); } /** @brief Record that the execution of the given level is beginning. */ static void data_collection_start_level(lf_scheduler_t* scheduler, size_t level) { - data_collection_t* data_collection = scheduler->custom_data->data_collection; - if (data_collection->collecting_data) data_collection->start_times_by_level[level] = lf_time_physical(); + data_collection_t* data_collection = scheduler->custom_data->data_collection; + if (data_collection->collecting_data) + data_collection->start_times_by_level[level] = lf_time_physical(); } /** @brief Record that the execution of the given level has completed. */ static void data_collection_end_level(lf_scheduler_t* scheduler, size_t level, size_t num_workers) { - data_collection_t* data_collection = scheduler->custom_data->data_collection; - if (data_collection->collecting_data && data_collection->start_times_by_level[level]) { - interval_t dt = lf_time_physical() - data_collection->start_times_by_level[level]; - if (!data_collection->execution_times_by_num_workers_by_level[level][num_workers]) { - data_collection->execution_times_by_num_workers_by_level[level][num_workers] = LF_MAX( - dt, - 2 * data_collection->execution_times_by_num_workers_by_level[level][data_collection->execution_times_argmins[level]] - ); - } - interval_t* prior_et = &data_collection->execution_times_by_num_workers_by_level[level][num_workers]; - *prior_et = (*prior_et * EXECUTION_TIME_MEMORY + dt) / (EXECUTION_TIME_MEMORY + 1); + data_collection_t* data_collection = scheduler->custom_data->data_collection; + if (data_collection->collecting_data && data_collection->start_times_by_level[level]) { + interval_t dt = lf_time_physical() - data_collection->start_times_by_level[level]; + if (!data_collection->execution_times_by_num_workers_by_level[level][num_workers]) { + data_collection->execution_times_by_num_workers_by_level[level][num_workers] = LF_MAX( + dt, + 2 * data_collection + ->execution_times_by_num_workers_by_level[level][data_collection->execution_times_argmins[level]]); } + interval_t* prior_et = &data_collection->execution_times_by_num_workers_by_level[level][num_workers]; + *prior_et = (*prior_et * EXECUTION_TIME_MEMORY + dt) / (EXECUTION_TIME_MEMORY + 1); + } } static size_t restrict_to_range(size_t start_inclusive, size_t end_inclusive, size_t value) { - assert(start_inclusive <= end_inclusive); - if (value < start_inclusive) return start_inclusive; - if (value > end_inclusive) return end_inclusive; - return value; + assert(start_inclusive <= end_inclusive); + if (value < start_inclusive) + return start_inclusive; + if (value > end_inclusive) + return end_inclusive; + return value; } /** @@ -598,32 +593,25 @@ static size_t restrict_to_range(size_t start_inclusive, size_t end_inclusive, si * @param jitter Whether the possibility of state transitions to numbers of workers that are not * (yet) empirically optimal is desired. */ -static void compute_number_of_workers( - lf_scheduler_t* scheduler, - size_t* num_workers_by_level, - size_t* max_num_workers_by_level, - bool jitter -) { - worker_assignments_t * worker_assignments = scheduler->custom_data->worker_assignments; - data_collection_t* data_collection = scheduler->custom_data->data_collection; - for (size_t level = 0; level < data_collection->num_levels; level++) { - interval_t this_execution_time = data_collection->execution_times_by_num_workers_by_level[level][ - num_workers_by_level[level] - ]; - size_t ideal_number_of_workers; - size_t max_reasonable_num_workers = max_num_workers_by_level[level]; - ideal_number_of_workers = data_collection->execution_times_argmins[level]; - int range = 1; - if (jitter) { - ideal_number_of_workers = get_nums_workers_neighboring_state( - scheduler, ideal_number_of_workers, this_execution_time - ); - } - int minimum_workers = 1; - num_workers_by_level[level] = restrict_to_range( - minimum_workers, max_reasonable_num_workers, ideal_number_of_workers - ); +static void compute_number_of_workers(lf_scheduler_t* scheduler, size_t* num_workers_by_level, + size_t* max_num_workers_by_level, bool jitter) { + worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; + data_collection_t* data_collection = scheduler->custom_data->data_collection; + for (size_t level = 0; level < data_collection->num_levels; level++) { + interval_t this_execution_time = + data_collection->execution_times_by_num_workers_by_level[level][num_workers_by_level[level]]; + size_t ideal_number_of_workers; + size_t max_reasonable_num_workers = max_num_workers_by_level[level]; + ideal_number_of_workers = data_collection->execution_times_argmins[level]; + int range = 1; + if (jitter) { + ideal_number_of_workers = + get_nums_workers_neighboring_state(scheduler, ideal_number_of_workers, this_execution_time); } + int minimum_workers = 1; + num_workers_by_level[level] = + restrict_to_range(minimum_workers, max_reasonable_num_workers, ideal_number_of_workers); + } } /** @@ -632,21 +620,16 @@ static void compute_number_of_workers( * @param num_workers_by_level The number of workers most recently used to execute each level. */ static void compute_costs(lf_scheduler_t* scheduler, size_t* num_workers_by_level) { - data_collection_t* data_collection = scheduler->custom_data->data_collection; - worker_assignments_t * worker_assignments = scheduler->custom_data->worker_assignments; - for (size_t level = 0; level < data_collection->num_levels; level++) { - interval_t score = data_collection->execution_times_by_num_workers_by_level[level][ - num_workers_by_level[level] - ]; - if ( - !data_collection->execution_times_mins[level] - | (score < data_collection->execution_times_mins[level]) - | (num_workers_by_level[level] == data_collection->execution_times_argmins[level]) - ) { - data_collection->execution_times_mins[level] = score; - data_collection->execution_times_argmins[level] = num_workers_by_level[level]; - } + data_collection_t* data_collection = scheduler->custom_data->data_collection; + worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; + for (size_t level = 0; level < data_collection->num_levels; level++) { + interval_t score = data_collection->execution_times_by_num_workers_by_level[level][num_workers_by_level[level]]; + if (!data_collection->execution_times_mins[level] | (score < data_collection->execution_times_mins[level]) | + (num_workers_by_level[level] == data_collection->execution_times_argmins[level])) { + data_collection->execution_times_mins[level] = score; + data_collection->execution_times_argmins[level] = num_workers_by_level[level]; } + } } /** @@ -655,99 +638,93 @@ static void compute_costs(lf_scheduler_t* scheduler, size_t* num_workers_by_leve * @param max_num_workers_by_level The maximum number of workers that could reasonably be used to * execute each level, for any tag. */ -static void data_collection_end_tag( - lf_scheduler_t* scheduler, - size_t* num_workers_by_level, - size_t* max_num_workers_by_level -) { - worker_assignments_t * worker_assignments = scheduler->custom_data->worker_assignments; - data_collection_t* data_collection = scheduler->custom_data->data_collection; - if (data_collection->collecting_data && data_collection->start_times_by_level[0]) { - compute_costs(scheduler, num_workers_by_level); - } - data_collection->data_collection_counter++; - size_t period = 2 + 128 * (data_collection->data_collection_counter > SLOW_EXPERIMENTS); - size_t state = data_collection->data_collection_counter % period; - if (state == 0) { - compute_number_of_workers( - scheduler, - num_workers_by_level, - max_num_workers_by_level, - data_collection->data_collection_counter > START_EXPERIMENTS - ); - data_collection->collecting_data = true; - } else if (state == 1) { - compute_number_of_workers(scheduler, num_workers_by_level, max_num_workers_by_level, false); - data_collection->collecting_data = false; - } +static void data_collection_end_tag(lf_scheduler_t* scheduler, size_t* num_workers_by_level, + size_t* max_num_workers_by_level) { + worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; + data_collection_t* data_collection = scheduler->custom_data->data_collection; + if (data_collection->collecting_data && data_collection->start_times_by_level[0]) { + compute_costs(scheduler, num_workers_by_level); + } + data_collection->data_collection_counter++; + size_t period = 2 + 128 * (data_collection->data_collection_counter > SLOW_EXPERIMENTS); + size_t state = data_collection->data_collection_counter % period; + if (state == 0) { + compute_number_of_workers(scheduler, num_workers_by_level, max_num_workers_by_level, + data_collection->data_collection_counter > START_EXPERIMENTS); + data_collection->collecting_data = true; + } else if (state == 1) { + compute_number_of_workers(scheduler, num_workers_by_level, max_num_workers_by_level, false); + data_collection->collecting_data = false; + } } - ///////////////////// Scheduler Init and Destroy API ///////////////////////// void lf_sched_init(environment_t* env, size_t number_of_workers, sched_params_t* params) { - assert(env != GLOBAL_ENVIRONMENT); + assert(env != GLOBAL_ENVIRONMENT); - // TODO: Instead of making this a no-op, crash the program. If this gets called twice, then that - // is a bug that should be fixed. - if(!init_sched_instance(env, &env->scheduler, number_of_workers, params)) { - // Already initialized - return; - } - - lf_scheduler_t* scheduler = env->scheduler; - scheduler->custom_data = (custom_scheduler_data_t *) calloc(1, sizeof(custom_scheduler_data_t)); - LF_ASSERT_NON_NULL(scheduler->custom_data); - scheduler->custom_data->worker_states = (worker_states_t *) calloc(1, sizeof(worker_states_t)); - LF_ASSERT_NON_NULL(scheduler->custom_data->worker_states); - scheduler->custom_data->worker_assignments = (worker_assignments_t *) calloc(1, sizeof(worker_assignments_t)); - LF_ASSERT_NON_NULL(scheduler->custom_data->worker_assignments); - scheduler->custom_data->data_collection = (data_collection_t *) calloc(1, sizeof(data_collection_t)); - LF_ASSERT_NON_NULL(scheduler->custom_data->data_collection); - - worker_states_init(scheduler, number_of_workers); - worker_assignments_init(scheduler, number_of_workers, params); - - data_collection_init(scheduler, params); + // TODO: Instead of making this a no-op, crash the program. If this gets called twice, then that + // is a bug that should be fixed. + if (!init_sched_instance(env, &env->scheduler, number_of_workers, params)) { + // Already initialized + return; + } + + lf_scheduler_t* scheduler = env->scheduler; + scheduler->custom_data = (custom_scheduler_data_t*)calloc(1, sizeof(custom_scheduler_data_t)); + LF_ASSERT_NON_NULL(scheduler->custom_data); + scheduler->custom_data->worker_states = (worker_states_t*)calloc(1, sizeof(worker_states_t)); + LF_ASSERT_NON_NULL(scheduler->custom_data->worker_states); + scheduler->custom_data->worker_assignments = (worker_assignments_t*)calloc(1, sizeof(worker_assignments_t)); + LF_ASSERT_NON_NULL(scheduler->custom_data->worker_assignments); + scheduler->custom_data->data_collection = (data_collection_t*)calloc(1, sizeof(data_collection_t)); + LF_ASSERT_NON_NULL(scheduler->custom_data->data_collection); + + worker_states_init(scheduler, number_of_workers); + worker_assignments_init(scheduler, number_of_workers, params); + + data_collection_init(scheduler, params); } void lf_sched_free(lf_scheduler_t* scheduler) { - worker_states_free(scheduler); - worker_assignments_free(scheduler); - data_collection_free(scheduler); - free(scheduler->custom_data); - lf_semaphore_destroy(scheduler->semaphore); + worker_states_free(scheduler); + worker_assignments_free(scheduler); + data_collection_free(scheduler); + free(scheduler->custom_data); + lf_semaphore_destroy(scheduler->semaphore); } ///////////////////////// Scheduler Worker API /////////////////////////////// reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_number) { - assert(worker_number >= 0); - reaction_t* ret; - while (true) { - size_t level_counter_snapshot = scheduler->custom_data->level_counter; - ret = worker_assignments_get_or_lock(scheduler, worker_number); - if (ret) return ret; - if (worker_states_finished_with_level_locked(scheduler, worker_number)) { - advance_level_and_unlock(scheduler, worker_number); - } else { - worker_states_sleep_and_unlock(scheduler, worker_number, level_counter_snapshot); - } - if (scheduler->custom_data->should_stop) { - return NULL; - } + assert(worker_number >= 0); + reaction_t* ret; + while (true) { + size_t level_counter_snapshot = scheduler->custom_data->level_counter; + ret = worker_assignments_get_or_lock(scheduler, worker_number); + if (ret) + return ret; + if (worker_states_finished_with_level_locked(scheduler, worker_number)) { + advance_level_and_unlock(scheduler, worker_number); + } else { + worker_states_sleep_and_unlock(scheduler, worker_number, level_counter_snapshot); + } + if (scheduler->custom_data->should_stop) { + return NULL; } - return (reaction_t*) ret; + } + return (reaction_t*)ret; } void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction) { - assert(worker_number >= 0); - assert(done_reaction->status != inactive); - done_reaction->status = inactive; + assert(worker_number >= 0); + assert(done_reaction->status != inactive); + done_reaction->status = inactive; } void lf_scheduler_trigger_reaction(lf_scheduler_t* scheduler, reaction_t* reaction, int worker_number) { - assert(worker_number >= -1); - if (!lf_atomic_bool_compare_and_swap32((int32_t *) &reaction->status, inactive, queued)) return; - worker_assignments_put(scheduler, reaction); + assert(worker_number >= -1); + if (!lf_atomic_bool_compare_and_swap32((int32_t*)&reaction->status, inactive, queued)) + return; + worker_assignments_put(scheduler, reaction); } #endif // defined SCHEDULER && SCHEDULER == SCHED_ADAPTIVE diff --git a/core/threaded/scheduler_instance.c b/core/threaded/scheduler_instance.c index a8aac7ff6..5487ead65 100644 --- a/core/threaded/scheduler_instance.c +++ b/core/threaded/scheduler_instance.c @@ -5,46 +5,39 @@ #include "lf_types.h" #include "util.h" - -bool init_sched_instance( - environment_t * env, - lf_scheduler_t** instance, - size_t number_of_workers, - sched_params_t* params -) { - - assert(env != GLOBAL_ENVIRONMENT); - LF_ASSERT(env, "`init_sched_instance` called without env pointer being set"); - - // Check if the instance is already initialized - LF_CRITICAL_SECTION_ENTER(env); - if (*instance != NULL) { - // Already initialized - LF_CRITICAL_SECTION_EXIT(env); - return false; - } else { - *instance = - (lf_scheduler_t*)calloc(1, sizeof(lf_scheduler_t)); - } - LF_MUTEX_UNLOCK(&env->mutex); - - if (params == NULL || params->num_reactions_per_level_size == 0) { - (*instance)->max_reaction_level = DEFAULT_MAX_REACTION_LEVEL; - } - - if (params != NULL) { - if (params->num_reactions_per_level != NULL) { - (*instance)->max_reaction_level = - params->num_reactions_per_level_size - 1; - } +bool init_sched_instance(environment_t* env, lf_scheduler_t** instance, size_t number_of_workers, + sched_params_t* params) { + + assert(env != GLOBAL_ENVIRONMENT); + LF_ASSERT(env, "`init_sched_instance` called without env pointer being set"); + + // Check if the instance is already initialized + LF_CRITICAL_SECTION_ENTER(env); + if (*instance != NULL) { + // Already initialized + LF_CRITICAL_SECTION_EXIT(env); + return false; + } else { + *instance = (lf_scheduler_t*)calloc(1, sizeof(lf_scheduler_t)); + } + LF_MUTEX_UNLOCK(&env->mutex); + + if (params == NULL || params->num_reactions_per_level_size == 0) { + (*instance)->max_reaction_level = DEFAULT_MAX_REACTION_LEVEL; + } + + if (params != NULL) { + if (params->num_reactions_per_level != NULL) { + (*instance)->max_reaction_level = params->num_reactions_per_level_size - 1; } + } - (*instance)->semaphore = lf_semaphore_new(0); - (*instance)->number_of_workers = number_of_workers; - (*instance)->next_reaction_level = 1; + (*instance)->semaphore = lf_semaphore_new(0); + (*instance)->number_of_workers = number_of_workers; + (*instance)->next_reaction_level = 1; - (*instance)->should_stop = false; - (*instance)->env = env; + (*instance)->should_stop = false; + (*instance)->env = env; - return true; + return true; } diff --git a/core/threaded/scheduler_sync_tag_advance.c b/core/threaded/scheduler_sync_tag_advance.c index 28d3fa458..1b0556ba1 100644 --- a/core/threaded/scheduler_sync_tag_advance.c +++ b/core/threaded/scheduler_sync_tag_advance.c @@ -38,7 +38,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "scheduler_sync_tag_advance.h" #include "rti_local.h" #include "environment.h" -#include "trace.h" +#include "tracepoint.h" #include "util.h" /////////////////// External Functions ///////////////////////// @@ -58,16 +58,16 @@ static bool _latest_tag_completed = false; * Return true if the worker should stop now; false otherwise. * This function assumes the caller holds the mutex lock. */ -bool should_stop_locked(lf_scheduler_t * sched) { - // If this is not the very first step, check against the stop tag to see whether this is the last step. - if (_latest_tag_completed) { - // If we are at the stop tag, do not call _lf_next_locked() - // to prevent advancing the logical time. - if (lf_tag_compare(sched->env->current_tag, sched->env->stop_tag) >= 0) { - return true; - } +bool should_stop_locked(lf_scheduler_t* sched) { + // If this is not the very first step, check against the stop tag to see whether this is the last step. + if (_latest_tag_completed) { + // If we are at the stop tag, do not call _lf_next_locked() + // to prevent advancing the logical time. + if (lf_tag_compare(sched->env->current_tag, sched->env->stop_tag) >= 0) { + return true; } - return false; + } + return false; } /** @@ -78,30 +78,30 @@ bool should_stop_locked(lf_scheduler_t * sched) { * * @return should_exit True if the worker thread should exit. False otherwise. */ -bool _lf_sched_advance_tag_locked(lf_scheduler_t * sched) { - environment_t* env = sched->env; - logical_tag_complete(env->current_tag); +bool _lf_sched_advance_tag_locked(lf_scheduler_t* sched) { + environment_t* env = sched->env; + logical_tag_complete(env->current_tag); - // If we are using scheduling enclaves. Notify the local RTI of the time - // advancement. - #if defined LF_ENCLAVES - rti_logical_tag_complete_locked(env->enclave_info, env->current_tag); - #endif +// If we are using scheduling enclaves. Notify the local RTI of the time +// advancement. +#if defined LF_ENCLAVES + rti_logical_tag_complete_locked(env->enclave_info, env->current_tag); +#endif - if (should_stop_locked(sched)) { - return true; - } + if (should_stop_locked(sched)) { + return true; + } - _latest_tag_completed = true; + _latest_tag_completed = true; - // Advance time. - // _lf_next_locked() may block waiting for real time to pass or events to appear. - // to appear on the event queue. Note that we already - tracepoint_scheduler_advancing_time_starts(env->trace); - _lf_next_locked(env); - tracepoint_scheduler_advancing_time_ends(env->trace); + // Advance time. + // _lf_next_locked() may block waiting for real time to pass or events to appear. + // to appear on the event queue. Note that we already + tracepoint_scheduler_advancing_time_starts(env); + _lf_next_locked(env); + tracepoint_scheduler_advancing_time_ends(env); - LF_PRINT_DEBUG("Scheduler: Done waiting for _lf_next_locked()."); - return false; + LF_PRINT_DEBUG("Scheduler: Done waiting for _lf_next_locked()."); + return false; } #endif diff --git a/core/threaded/watchdog.c b/core/threaded/watchdog.c index 2eedcfb71..4f26f26e7 100644 --- a/core/threaded/watchdog.c +++ b/core/threaded/watchdog.c @@ -26,44 +26,42 @@ static void* watchdog_thread_main(void* arg); * variable which enables the safe termination of a running watchdog. * Finally it starts of a non-termminating thread for each watchdog. */ -void _lf_initialize_watchdogs(environment_t *env) { - for (int i = 0; i < env->watchdogs_size; i++) { - watchdog_t *watchdog = env->watchdogs[i]; - if (watchdog->base->reactor_mutex != NULL) { - LF_MUTEX_INIT((lf_mutex_t*)(watchdog->base->reactor_mutex)); - } - LF_COND_INIT(&watchdog->cond, watchdog->base->reactor_mutex); - - int ret = lf_thread_create(&watchdog->thread_id, watchdog_thread_main, (void *) watchdog); - LF_ASSERTN(ret, "Could not create watchdog thread"); +void _lf_initialize_watchdogs(environment_t* env) { + for (int i = 0; i < env->watchdogs_size; i++) { + watchdog_t* watchdog = env->watchdogs[i]; + if (watchdog->base->reactor_mutex != NULL) { + LF_MUTEX_INIT((lf_mutex_t*)(watchdog->base->reactor_mutex)); } + LF_COND_INIT(&watchdog->cond, watchdog->base->reactor_mutex); + + int ret = lf_thread_create(&watchdog->thread_id, watchdog_thread_main, (void*)watchdog); + LF_ASSERTN(ret, "Could not create watchdog thread"); + } } /** - * @brief Terminate all watchdog threads. + * @brief Terminate all watchdog threads. */ -void _lf_watchdog_terminate_all(environment_t *env) { - void *thread_return; - for (int i = 0; i < env->watchdogs_size; i++) { - watchdog_t *watchdog = env->watchdogs[i]; - LF_MUTEX_LOCK(watchdog->base->reactor_mutex); - _lf_watchdog_terminate(watchdog); - LF_MUTEX_UNLOCK(watchdog->base->reactor_mutex); - void *thread_ret; - lf_thread_join(watchdog->thread_id, &thread_ret); - } +void _lf_watchdog_terminate_all(environment_t* env) { + void* thread_return; + for (int i = 0; i < env->watchdogs_size; i++) { + watchdog_t* watchdog = env->watchdogs[i]; + LF_MUTEX_LOCK(watchdog->base->reactor_mutex); + _lf_watchdog_terminate(watchdog); + LF_MUTEX_UNLOCK(watchdog->base->reactor_mutex); + void* thread_ret; + lf_thread_join(watchdog->thread_id, &thread_ret); + } } -void watchdog_wait(watchdog_t *watchdog) { - watchdog->active = true; - instant_t physical_time = lf_time_physical(); - while ( watchdog->expiration != NEVER && - physical_time < watchdog->expiration && - !watchdog->terminate) { - // Wait for expiration, or a signal to stop or terminate. - lf_clock_cond_timedwait(&watchdog->cond, watchdog->expiration); - physical_time = lf_time_physical(); - } +void watchdog_wait(watchdog_t* watchdog) { + watchdog->active = true; + instant_t physical_time = lf_time_physical(); + while (watchdog->expiration != NEVER && physical_time < watchdog->expiration && !watchdog->terminate) { + // Wait for expiration, or a signal to stop or terminate. + lf_clock_cond_timedwait(&watchdog->cond, watchdog->expiration); + physical_time = lf_time_physical(); + } } /** @@ -85,74 +83,76 @@ void watchdog_wait(watchdog_t *watchdog) { * @return NULL */ static void* watchdog_thread_main(void* arg) { - watchdog_t* watchdog = (watchdog_t*)arg; - self_base_t* base = watchdog->base; - LF_PRINT_DEBUG("Starting Watchdog %p", (void *) watchdog); - LF_ASSERT(base->reactor_mutex, "reactor-mutex not alloc'ed but has watchdogs."); - - // Grab reactor-mutex and start infinite loop. - LF_MUTEX_LOCK((lf_mutex_t*)(base->reactor_mutex)); - while (! watchdog->terminate) { - - // Step 1: Wait for a timeout to start watching for. - if(watchdog->expiration == NEVER) { - // Watchdog has been stopped. - // Let the runtime know that we are in an inactive/stopped state. - watchdog->active = false; - // Wait here until the watchdog is started and we can enter the active state. - LF_COND_WAIT(&watchdog->cond); - continue; - } else { - // Watchdog has been started. - watchdog_wait(watchdog); - - // At this point we have returned from the watchdog wait. But it could - // be that it was to terminate the watchdog. - if (watchdog->terminate) break; - - // It could also be that the watchdog was stopped - if (watchdog->expiration == NEVER) continue; - - // If we reach here, the watchdog actually timed out. Handle it. - LF_PRINT_DEBUG("Watchdog %p timed out", (void *) watchdog); - watchdog_function_t watchdog_func = watchdog->watchdog_function; - (*watchdog_func)(base); - watchdog->expiration = NEVER; - } + initialize_lf_thread_id(); + watchdog_t* watchdog = (watchdog_t*)arg; + self_base_t* base = watchdog->base; + LF_PRINT_DEBUG("Starting Watchdog %p", (void*)watchdog); + LF_ASSERT(base->reactor_mutex, "reactor-mutex not alloc'ed but has watchdogs."); + + // Grab reactor-mutex and start infinite loop. + LF_MUTEX_LOCK((lf_mutex_t*)(base->reactor_mutex)); + while (!watchdog->terminate) { + + // Step 1: Wait for a timeout to start watching for. + if (watchdog->expiration == NEVER) { + // Watchdog has been stopped. + // Let the runtime know that we are in an inactive/stopped state. + watchdog->active = false; + // Wait here until the watchdog is started and we can enter the active state. + LF_COND_WAIT(&watchdog->cond); + continue; + } else { + // Watchdog has been started. + watchdog_wait(watchdog); + + // At this point we have returned from the watchdog wait. But it could + // be that it was to terminate the watchdog. + if (watchdog->terminate) + break; + + // It could also be that the watchdog was stopped + if (watchdog->expiration == NEVER) + continue; + + // If we reach here, the watchdog actually timed out. Handle it. + LF_PRINT_DEBUG("Watchdog %p timed out", (void*)watchdog); + watchdog_function_t watchdog_func = watchdog->watchdog_function; + (*watchdog_func)(base); + watchdog->expiration = NEVER; } + } - // Here the thread terminates. - watchdog->active = false; - LF_MUTEX_UNLOCK(base->reactor_mutex); - return NULL; + // Here the thread terminates. + watchdog->active = false; + LF_MUTEX_UNLOCK(base->reactor_mutex); + return NULL; } void lf_watchdog_start(watchdog_t* watchdog, interval_t additional_timeout) { - // Assumes reactor mutex is already held. - self_base_t* base = watchdog->base; - watchdog->terminate = false; - watchdog->expiration = base->environment->current_tag.time + watchdog->min_expiration + additional_timeout; - - // If the watchdog is inactive, signal it to start waiting. - if (!watchdog->active) { - LF_COND_SIGNAL(&watchdog->cond); - } -} - -void lf_watchdog_stop(watchdog_t* watchdog) { - // If the watchdog isnt active, then it is no reason to stop it. - if (!watchdog->active) { - return; - } + // Assumes reactor mutex is already held. + self_base_t* base = watchdog->base; + watchdog->terminate = false; + watchdog->expiration = base->environment->current_tag.time + watchdog->min_expiration + additional_timeout; - // Assumes reactor mutex is already held. - watchdog->expiration = NEVER; + // If the watchdog is inactive, signal it to start waiting. + if (!watchdog->active) { LF_COND_SIGNAL(&watchdog->cond); + } } +void lf_watchdog_stop(watchdog_t* watchdog) { + // If the watchdog isnt active, then it is no reason to stop it. + if (!watchdog->active) { + return; + } + + // Assumes reactor mutex is already held. + watchdog->expiration = NEVER; + LF_COND_SIGNAL(&watchdog->cond); +} static void _lf_watchdog_terminate(watchdog_t* watchdog) { - watchdog->terminate = true; - watchdog->expiration = NEVER; - LF_COND_SIGNAL(&watchdog->cond); + watchdog->terminate = true; + watchdog->expiration = NEVER; + LF_COND_SIGNAL(&watchdog->cond); } diff --git a/core/trace.c b/core/trace.c deleted file mode 100644 index afd3a6c92..000000000 --- a/core/trace.c +++ /dev/null @@ -1,648 +0,0 @@ -/** - * @file - * @author Edward A. Lee - * - * @section LICENSE -Copyright (c) 2020, The University of California at Berkeley and TU Dresden - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL -THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF -THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - * @section DESCRIPTION - * Include this file instead of trace.h to get tracing. - * See trace.h file for instructions. - */ - -#include "trace.h" - -#ifdef LF_TRACE - -#include -#include -#include - -#include "platform.h" - -#ifdef RTI_TRACE -#include "net_common.h" // Defines message types -#endif // RTI_TRACE - -#include "reactor_common.h" -#include "util.h" - -/** Macro to use when access to trace file fails. */ -#define _LF_TRACE_FAILURE(trace) \ - do { \ - fprintf(stderr, "WARNING: Access to trace file failed.\n"); \ - fclose(trace->_lf_trace_file); \ - trace->_lf_trace_file = NULL; \ - return -1; \ - } while(0) - - -trace_t* trace_new(environment_t* env, const char * filename) { - trace_t * trace = (trace_t *) calloc(1, sizeof(trace_t)); - LF_ASSERT_NON_NULL(trace); - - trace->_lf_trace_stop=1; - trace->env = env; - - // Determine length of the filename - size_t len = strlen(filename) + 1; - - // Allocate memory for the filename on the trace struct - trace->filename = (char*) malloc(len * sizeof(char)); - LF_ASSERT_NON_NULL(trace->filename); - - // Copy it to the struct - strncpy(trace->filename, filename, len); - - return trace; -} - -void trace_free(trace_t *trace) { - free(trace->filename); - free(trace); -} - - -int _lf_register_trace_event(trace_t* trace, void* pointer1, void* pointer2, _lf_trace_object_t type, char* description) { - LF_CRITICAL_SECTION_ENTER(trace->env); - if (trace->_lf_trace_object_descriptions_size >= TRACE_OBJECT_TABLE_SIZE) { - LF_CRITICAL_SECTION_EXIT(trace->env); - fprintf(stderr, "WARNING: Exceeded trace object table size. Trace file will be incomplete.\n"); - return 0; - } - trace->_lf_trace_object_descriptions[trace->_lf_trace_object_descriptions_size].pointer = pointer1; - trace->_lf_trace_object_descriptions[trace->_lf_trace_object_descriptions_size].trigger = pointer2; - trace->_lf_trace_object_descriptions[trace->_lf_trace_object_descriptions_size].type = type; - trace->_lf_trace_object_descriptions[trace->_lf_trace_object_descriptions_size].description = description; - trace->_lf_trace_object_descriptions_size++; - LF_CRITICAL_SECTION_EXIT(trace->env); - return 1; -} - -int register_user_trace_event(void *self, char* description) { - LF_ASSERT(self, "Need a pointer to a self struct to register a user trace event"); - trace_t * trace = ((self_base_t *) self)->environment->trace; - return _lf_register_trace_event(trace, description, NULL, trace_user, description); -} - - -/** - * Write the trace header information. - * See trace.h. - * @return The number of items written to the object table or -1 for failure. - */ -int write_trace_header(trace_t* trace) { - if (trace->_lf_trace_file != NULL) { - // The first item in the header is the start time. - // This is both the starting physical time and the starting logical time. - instant_t start_time = lf_time_start(); - // printf("DEBUG: Start time written to trace file is %lld.\n", start_time); - size_t items_written = fwrite( - &start_time, - sizeof(instant_t), - 1, - trace->_lf_trace_file - ); - if (items_written != 1) _LF_TRACE_FAILURE(trace); - - // The next item in the header is the size of the - // _lf_trace_object_descriptions table. - // printf("DEBUG: Table size written to trace file is %d.\n", _lf_trace_object_descriptions_size); - items_written = fwrite( - &trace->_lf_trace_object_descriptions_size, - sizeof(int), - 1, - trace->_lf_trace_file - ); - if (items_written != 1) _LF_TRACE_FAILURE(trace); - - // Next we write the table. - for (int i = 0; i < trace->_lf_trace_object_descriptions_size; i++) { - // printf("DEBUG: Object pointer: %p.\n", _lf_trace_object_descriptions[i].pointer); - // Write the pointer to the self struct. - items_written = fwrite( - &trace->_lf_trace_object_descriptions[i].pointer, - sizeof(void*), - 1, - trace->_lf_trace_file - ); - if (items_written != 1) _LF_TRACE_FAILURE(trace); - - // Write the pointer to the trigger_t struct. - items_written = fwrite( - &trace->_lf_trace_object_descriptions[i].trigger, - sizeof(trigger_t*), - 1, - trace->_lf_trace_file - ); - if (items_written != 1) _LF_TRACE_FAILURE(trace); - - // Write the object type. - items_written = fwrite( - &trace->_lf_trace_object_descriptions[i].type, // Write the pointer value. - sizeof(_lf_trace_object_t), - 1, - trace->_lf_trace_file - ); - if (items_written != 1) _LF_TRACE_FAILURE(trace); - - // Write the description. - int description_size = strlen(trace->_lf_trace_object_descriptions[i].description); - // printf("DEBUG: Object description: %s.\n", trace->_lf_trace_object_descriptions[i].description); - items_written = fwrite( - trace->_lf_trace_object_descriptions[i].description, - sizeof(char), - description_size + 1, // Include null terminator. - trace->_lf_trace_file - ); - if (items_written != description_size + 1) _LF_TRACE_FAILURE(trace); - } - } - return trace->_lf_trace_object_descriptions_size; -} - -/** - * @brief Flush the specified buffer to a file. - * This assumes the caller has entered a critical section. - * @param worker Index specifying the trace to flush. - */ -void flush_trace_locked(trace_t* trace, int worker) { - if (trace->_lf_trace_stop == 0 - && trace->_lf_trace_file != NULL - && trace->_lf_trace_buffer_size[worker] > 0 - ) { - // If the trace header has not been written, write it now. - // This is deferred to here so that user trace objects can be - // registered in startup reactions. - if (!trace->_lf_trace_header_written) { - if (write_trace_header(trace) < 0) { - lf_print_error("Failed to write trace header. Trace file will be incomplete."); - return; - } - trace->_lf_trace_header_written = true; - } - - // Write first the length of the array. - size_t items_written = fwrite( - &trace->_lf_trace_buffer_size[worker], - sizeof(int), - 1, - trace->_lf_trace_file - ); - if (items_written != 1) { - fprintf(stderr, "WARNING: Access to trace file failed.\n"); - fclose(trace->_lf_trace_file); - trace->_lf_trace_file = NULL; - } else { - // Write the contents. - items_written = fwrite( - trace->_lf_trace_buffer[worker], - sizeof(trace_record_t), - trace->_lf_trace_buffer_size[worker], - trace->_lf_trace_file - ); - if (items_written != trace->_lf_trace_buffer_size[worker]) { - fprintf(stderr, "WARNING: Access to trace file failed.\n"); - fclose(trace->_lf_trace_file); - trace->_lf_trace_file = NULL; - } - } - trace->_lf_trace_buffer_size[worker] = 0; - } -} - -/** - * @brief Flush the specified buffer to a file. - * @param worker Index specifying the trace to flush. - */ -void flush_trace(trace_t* trace, int worker) { - // To avoid having more than one worker writing to the file at the same time, - // enter a critical section. - LF_CRITICAL_SECTION_ENTER(GLOBAL_ENVIRONMENT); - flush_trace_locked(trace, worker); - LF_CRITICAL_SECTION_EXIT(GLOBAL_ENVIRONMENT); -} - -void start_trace(trace_t* trace) { - // FIXME: location of trace file should be customizable. - trace->_lf_trace_file = fopen(trace->filename, "w"); - if (trace->_lf_trace_file == NULL) { - fprintf(stderr, "WARNING: Failed to open log file with error code %d." - "No log will be written.\n", errno); - } - // Do not write the trace header information to the file yet - // so that startup reactions can register user-defined trace objects. - // write_trace_header(); - trace->_lf_trace_header_written = false; - - // Allocate an array of arrays of trace records, one per worker thread plus one - // for the 0 thread (the main thread, or in an single-threaded program, the only - // thread). - trace->_lf_number_of_trace_buffers = _lf_number_of_workers + 1; - trace->_lf_trace_buffer = (trace_record_t**)malloc(sizeof(trace_record_t*) * trace->_lf_number_of_trace_buffers); - for (int i = 0; i < trace->_lf_number_of_trace_buffers; i++) { - trace->_lf_trace_buffer[i] = (trace_record_t*)malloc(sizeof(trace_record_t) * TRACE_BUFFER_CAPACITY); - } - // Array of counters that track the size of each trace record (per thread). - trace->_lf_trace_buffer_size = (int*)calloc(sizeof(int), trace->_lf_number_of_trace_buffers); - - trace->_lf_trace_stop = 0; - LF_PRINT_DEBUG("Started tracing."); -} - -void tracepoint( - trace_t* trace, - trace_event_t event_type, - void* reactor, - tag_t* tag, - int worker, - int src_id, - int dst_id, - instant_t* physical_time, - trigger_t* trigger, - interval_t extra_delay, - bool is_interval_start -) { - instant_t time; - if (!is_interval_start && physical_time == NULL) { - time = lf_time_physical(); - physical_time = &time; - } - - environment_t *env = trace->env; - // Worker argument determines which buffer to write to. - int index = (worker >= 0) ? worker : 0; - - // Flush the buffer if it is full. - if (trace->_lf_trace_buffer_size[index] >= TRACE_BUFFER_CAPACITY) { - // No more room in the buffer. Write the buffer to the file. - flush_trace(trace, index); - } - // The above flush_trace resets the write pointer. - int i = trace->_lf_trace_buffer_size[index]; - // Write to memory buffer. - // Get the correct time of the event - - trace->_lf_trace_buffer[index][i].event_type = event_type; - trace->_lf_trace_buffer[index][i].pointer = reactor; - trace->_lf_trace_buffer[index][i].src_id = src_id; - trace->_lf_trace_buffer[index][i].dst_id = dst_id; - if (tag != NULL) { - trace->_lf_trace_buffer[index][i].logical_time = tag->time; - trace->_lf_trace_buffer[index][i].microstep = tag->microstep; - } else if (env != NULL) { - trace->_lf_trace_buffer[index][i].logical_time = ((environment_t *)env)->current_tag.time; - trace->_lf_trace_buffer[index][i].microstep = ((environment_t*)env)->current_tag.microstep; - } - - trace->_lf_trace_buffer[index][i].trigger = trigger; - trace->_lf_trace_buffer[index][i].extra_delay = extra_delay; - if (is_interval_start && physical_time == NULL) { - time = lf_time_physical(); - physical_time = &time; - } - trace->_lf_trace_buffer[index][i].physical_time = *physical_time; - trace->_lf_trace_buffer_size[index]++; -} - -/** - * Trace the start of a reaction execution. - * @param reaction Pointer to the reaction_t struct for the reaction. - * @param worker The thread number of the worker thread or 0 for single-threaded execution. - */ -void tracepoint_reaction_starts(trace_t* trace, reaction_t* reaction, int worker) { - tracepoint(trace, reaction_starts, reaction->self, NULL, worker, worker, reaction->number, NULL, NULL, 0, true); -} - -/** - * Trace the end of a reaction execution. - * @param reaction Pointer to the reaction_t struct for the reaction. - * @param worker The thread number of the worker thread or 0 for single-threaded execution. - */ -void tracepoint_reaction_ends(trace_t* trace, reaction_t* reaction, int worker) { - tracepoint(trace, reaction_ends, reaction->self, NULL, worker, worker, reaction->number, NULL, NULL, 0, false); -} - -/** - * Trace a call to schedule. - * @param trigger Pointer to the trigger_t struct for the trigger. - * @param extra_delay The extra delay passed to schedule(). - */ -void tracepoint_schedule(trace_t* trace, trigger_t* trigger, interval_t extra_delay) { - // schedule() can only trigger reactions within the same reactor as the action - // or timer. If there is such a reaction, find its reactor's self struct and - // put that into the tracepoint. We only have to look at the first reaction. - // If there is no reaction, insert NULL for the reactor. - void* reactor = NULL; - if (trigger->number_of_reactions > 0 - && trigger->reactions[0] != NULL) { - reactor = trigger->reactions[0]->self; - } - // NOTE: The -1 argument indicates no worker. - // This is OK because it is called only while holding the mutex lock. - // True argument specifies to record physical time as late as possible, when - // the event is already on the event queue. - tracepoint(trace, schedule_called, reactor, NULL, -1, 0, 0, NULL, trigger, extra_delay, true); -} - -/** - * Trace a user-defined event. Before calling this, you must call - * register_user_trace_event() with a pointer to the same string - * or else the event will not be recognized. - * @param self Pointer to the self struct of the reactor from which we want - * to trace this event. This pointer is used to get the correct environment and - * thus the correct logical tag of the event. - * @param description Pointer to the description string. - */ -void tracepoint_user_event(void* self, char* description) { - // -1s indicate unknown reaction number and worker thread. - // NOTE: We currently have no way to get the number of the worker that - // is executing the reaction that calls this, so we can't pass a worker - // number to the tracepoint function. We pass -1, indicating no worker. - // But to be safe, then, we have acquire a mutex before calling this - // because multiple reactions might be calling the same tracepoint function. - // There will be a performance hit for this. - LF_ASSERT(self, "A pointer to the self struct is needed to trace an event"); - environment_t *env = ((self_base_t *)self)->environment; - trace_t *trace = env->trace; - LF_CRITICAL_SECTION_ENTER(env); - tracepoint(trace, user_event, description, NULL, -1, -1, -1, NULL, NULL, 0, false); - LF_CRITICAL_SECTION_EXIT(env); -} - -/** - * Trace a user-defined event with a value. - * Before calling this, you must call - * register_user_trace_event() with a pointer to the same string - * or else the event will not be recognized. - * @param self Pointer to the self struct of the reactor from which we want - * to trace this event. This pointer is used to get the correct environment and - * thus the correct logical tag of the event. - * @param description Pointer to the description string. - * @param value The value of the event. This is a long long for - * convenience so that time values can be passed unchanged. - * But int values work as well. - */ -void tracepoint_user_value(void* self, char* description, long long value) { - // -1s indicate unknown reaction number and worker thread. - // NOTE: We currently have no way to get the number of the worker that - // is executing the reaction that calls this, so we can't pass a worker - // number to the tracepoint function. We pass -1, indicating no worker. - // But to be safe, then, we have acquire a mutex before calling this - // because multiple reactions might be calling the same tracepoint function. - // There will be a performance hit for this. - environment_t *env = ((self_base_t *)self)->environment; - trace_t *trace = env->trace; - LF_CRITICAL_SECTION_ENTER(env); - tracepoint(trace, user_value, description, NULL, -1, -1, -1, NULL, NULL, value, false); - LF_CRITICAL_SECTION_EXIT(env); -} - -/** - * Trace the start of a worker waiting for something to change on the event or reaction queue. - * @param worker The thread number of the worker thread or 0 for single-threaded execution. - */ -void tracepoint_worker_wait_starts(trace_t* trace, int worker) { - tracepoint(trace, worker_wait_starts, NULL, NULL, worker, worker, -1, NULL, NULL, 0, true); -} - -/** - * Trace the end of a worker waiting for something to change on the event or reaction queue. - * @param worker The thread number of the worker thread or 0 for single-threaded execution. - */ -void tracepoint_worker_wait_ends(trace_t* trace, int worker) { - tracepoint(trace, worker_wait_ends, NULL, NULL, worker, worker, -1, NULL, NULL, 0, false); -} - -/** - * Trace the start of the scheduler waiting for logical time to advance or an event to - * appear on the event queue. - */ -void tracepoint_scheduler_advancing_time_starts(trace_t* trace) { - tracepoint(trace, scheduler_advancing_time_starts, NULL, NULL, -1, -1, -1, NULL, NULL, 0, true); -} - -/** - * Trace the end of the scheduler waiting for logical time to advance or an event to - * appear on the event queue. - */ -void tracepoint_scheduler_advancing_time_ends(trace_t* trace) { - tracepoint(trace, scheduler_advancing_time_ends, NULL, NULL, -1, -1, -1, NULL, NULL, 0, false); -} - -/** - * Trace the occurrence of a deadline miss. - * @param reaction Pointer to the reaction_t struct for the reaction. - * @param worker The thread number of the worker thread or 0 for single-threaded execution. - */ -void tracepoint_reaction_deadline_missed(trace_t* trace, reaction_t *reaction, int worker) { - tracepoint(trace, reaction_deadline_missed, reaction->self, NULL, worker, worker, reaction->number, NULL, NULL, 0, false); -} - -void stop_trace(trace_t* trace) { - LF_CRITICAL_SECTION_ENTER(trace->env); - stop_trace_locked(trace); - LF_CRITICAL_SECTION_EXIT(trace->env); -} - -void stop_trace_locked(trace_t* trace) { - if (trace->_lf_trace_stop) { - // Trace was already stopped. Nothing to do. - return; - } - // In multithreaded execution, thread 0 invokes wrapup reactions, so we - // put that trace last. However, it could also include some startup events. - // In any case, the trace file does not guarantee any ordering. - for (int i = 1; i < trace->_lf_number_of_trace_buffers; i++) { - // Flush the buffer if it has data. - // printf("DEBUG: Trace buffer %d has %d records.\n", i, trace->_lf_trace_buffer_size[i]); - if (trace->_lf_trace_buffer_size && trace->_lf_trace_buffer_size[i] > 0) { - flush_trace_locked(trace, i); - } - } - if (trace->_lf_trace_buffer_size && trace->_lf_trace_buffer_size[0] > 0) { - flush_trace_locked(trace, 0); - } - trace->_lf_trace_stop = 1; - if (trace->_lf_trace_file != NULL) { - fclose(trace->_lf_trace_file); - trace->_lf_trace_file = NULL; - } - LF_PRINT_DEBUG("Stopped tracing."); -} - -//////////////////////////////////////////////////////////// -//// For federated execution - -#if defined FEDERATED || defined LF_ENCLAVES - -/** - * Trace federate sending a message to the RTI. - * @param event_type The type of event. Possible values are: - * - * @param fed_id The federate identifier. - * @param tag Pointer to the tag that has been sent, or NULL. - */ -void tracepoint_federate_to_rti(trace_t *trace, trace_event_t event_type, int fed_id, tag_t* tag) { - tracepoint( - trace, - event_type, - NULL, // void* pointer, - tag, // tag* tag, - -1, // int worker, // no worker ID needed because this is called within a mutex - fed_id, // int src_id, - -1, // int dst_id, - NULL, // instant_t* physical_time (will be generated) - NULL, // trigger_t* trigger, - 0, // interval_t extra_delay - true // is_interval_start - ); -} - -/** - * Trace federate receiving a message from the RTI. - * @param event_type The type of event. Possible values are: - * - * @param fed_id The federate identifier. - * @param tag Pointer to the tag that has been received, or NULL. - */ -void tracepoint_federate_from_rti(trace_t* trace, trace_event_t event_type, int fed_id, tag_t* tag) { - // trace_event_t event_type = (type == MSG_TYPE_TAG_ADVANCE_GRANT)? federate_TAG : federate_PTAG; - tracepoint( - trace, - event_type, - NULL, // void* pointer, - tag, // tag* tag, - -1, // int worker, // no worker ID needed because this is called within a mutex - fed_id, // int src_id, - -1, // int dst_id, - NULL, // instant_t* physical_time (will be generated) - NULL, // trigger_t* trigger, - 0, // interval_t extra_delay - false // is_interval_start - ); -} - -/** - * Trace federate sending a message to another federate. - * @param event_type The type of event. Possible values are: - * - * @param fed_id The federate identifier. - * @param partner_id The partner federate identifier. - * @param tag Pointer to the tag that has been sent, or NULL. - */ -void tracepoint_federate_to_federate(trace_t* trace, trace_event_t event_type, int fed_id, int partner_id, tag_t *tag) { - tracepoint( - trace, - event_type, - NULL, // void* pointer, - tag, // tag* tag, - -1, // int worker, // no worker ID needed because this is called within a mutex - fed_id, // int src_id, - partner_id, // int dst_id, - NULL, // instant_t* physical_time (will be generated) - NULL, // trigger_t* trigger, - 0, // interval_t extra_delay - true // is_interval_start - ); -} - -/** - * Trace federate receiving a message from another federate. - * @param event_type The type of event. Possible values are: - * - * @param fed_id The federate identifier. - * @param partner_id The partner federate identifier. - * @param tag Pointer to the tag that has been received, or NULL. - */ -void tracepoint_federate_from_federate(trace_t* trace, trace_event_t event_type, int fed_id, int partner_id, tag_t *tag) { - tracepoint( - trace, - event_type, - NULL, // void* pointer, - tag, // tag* tag, - -1, // int worker, // no worker ID needed because this is called within a mutex - fed_id, // int src_id, - partner_id, // int dst_id, - NULL, // instant_t* physical_time (will be generated) - NULL, // trigger_t* trigger, - 0, // interval_t extra_delay - false // is_interval_start - ); -} -#endif // FEDERATED - -//////////////////////////////////////////////////////////// -//// For RTI execution - -#ifdef RTI_TRACE - -/** - * Trace RTI sending a message to a federate. - * @param event_type The type of event. Possible values are: - * - * @param fed_id The fedaerate ID. - * @param tag Pointer to the tag that has been sent, or NULL. - */ -void tracepoint_rti_to_federate(trace_t* trace, trace_event_t event_type, int fed_id, tag_t* tag) { - tracepoint( - trace, - event_type, - NULL, // void* pointer, - tag, // tag_t* tag, - fed_id, // int worker (one thread per federate) - -1, // int src_id - fed_id, // int dst_id - NULL, // instant_t* physical_time (will be generated) - NULL, // trigger_t* trigger, - 0, // interval_t extra_delay - true // is_interval_start - ); -} - -/** - * Trace RTI receiving a message from a federate. - * @param event_type The type of event. Possible values are: - * - * @param fed_id The fedaerate ID. - * @param tag Pointer to the tag that has been sent, or NULL. - */ -void tracepoint_rti_from_federate(trace_t* trace, trace_event_t event_type, int fed_id, tag_t* tag) { - tracepoint( - trace, - event_type, - NULL, // void* pointer, - tag, // tag_t* tag, - fed_id, // int worker (one thread per federate) - -1, // int src_id (RTI is the source of the tracepoint) - fed_id, // int dst_id - NULL, // instant_t* physical_time (will be generated) - NULL, // trigger_t* trigger, - 0, // interval_t extra_delay - false // is_interval_start - ); -} - -#endif // RTI_TRACE - -#endif // LF_TRACE diff --git a/core/tracepoint.c b/core/tracepoint.c new file mode 100644 index 000000000..24b2f2434 --- /dev/null +++ b/core/tracepoint.c @@ -0,0 +1,292 @@ +/** + * @file + * @author Edward A. Lee + * + * @section LICENSE +Copyright (c) 2020, The University of California at Berkeley and TU Dresden + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + * @section DESCRIPTION + * Include this file instead of trace.h to get tracing. + * See trace.h file for instructions. + */ + +#include "tracepoint.h" + +#ifdef LF_TRACE + +#include +#include +#include + +#include "low_level_platform.h" + +#ifdef RTI_TRACE +#include "net_common.h" // Defines message types +#endif // RTI_TRACE + +#include "reactor_common.h" +#include "util.h" + +int _lf_register_trace_event(void* pointer1, void* pointer2, _lf_trace_object_t type, char* description) { + object_description_t desc = {.pointer = pointer1, .trigger = pointer2, .type = type, .description = description}; + lf_tracing_register_trace_event(desc); + return 1; +} + +int register_user_trace_event(void* self, char* description) { + LF_ASSERT( + self, + "Need a pointer to a self struct to register a user trace event"); // FIXME: Not needed. self not needed either + return _lf_register_trace_event(description, NULL, trace_user, description); +} + +void call_tracepoint(int event_type, void* reactor, tag_t tag, int worker, int src_id, int dst_id, + instant_t* physical_time, trigger_t* trigger, interval_t extra_delay, bool is_interval_start) { + instant_t local_time; + if (physical_time == NULL) { + local_time = lf_time_physical(); + physical_time = &local_time; + } + trace_record_t tr = {.event_type = event_type, + .pointer = reactor, + .src_id = src_id, + .dst_id = dst_id, + .logical_time = tag.time, + .microstep = tag.microstep, + .trigger = trigger, + .extra_delay = extra_delay, + .physical_time = *physical_time}; + lf_tracing_tracepoint(worker, (trace_record_nodeps_t*)&tr); +} + +/** + * Trace a call to schedule. + * @param trigger Pointer to the trigger_t struct for the trigger. + * @param extra_delay The extra delay passed to schedule(). + */ +void tracepoint_schedule(environment_t* env, trigger_t* trigger, interval_t extra_delay) { + // schedule() can only trigger reactions within the same reactor as the action + // or timer. If there is such a reaction, find its reactor's self struct and + // put that into the tracepoint. We only have to look at the first reaction. + // If there is no reaction, insert NULL for the reactor. + void* reactor = NULL; + if (trigger->number_of_reactions > 0 && trigger->reactions[0] != NULL) { + reactor = trigger->reactions[0]->self; + } + // NOTE: The -1 argument indicates no worker. + // This is OK because it is called only while holding the mutex lock. + // True argument specifies to record physical time as late as possible, when + // the event is already on the event queue. + call_tracepoint(schedule_called, reactor, env->current_tag, -1, 0, 0, NULL, trigger, extra_delay, true); +} + +/** + * Trace a user-defined event. Before calling this, you must call + * register_user_trace_event() with a pointer to the same string + * or else the event will not be recognized. + * @param self Pointer to the self struct of the reactor from which we want + * to trace this event. This pointer is used to get the correct environment and + * thus the correct logical tag of the event. + * @param description Pointer to the description string. + */ +void tracepoint_user_event(void* self, char* description) { + // -1s indicate unknown reaction number and worker thread. + // NOTE: We currently have no way to get the number of the worker that + // is executing the reaction that calls this, so we can't pass a worker + // number to the tracepoint function. We pass -1, indicating no worker. + // But to be safe, then, we have acquire a mutex before calling this + // because multiple reactions might be calling the same tracepoint function. + // There will be a performance hit for this. + LF_ASSERT(self, "A pointer to the self struct is needed to trace an event"); + environment_t* env = ((self_base_t*)self)->environment; + call_tracepoint(user_event, description, env->current_tag, -1, -1, -1, NULL, NULL, 0, false); +} + +/** + * Trace a user-defined event with a value. + * Before calling this, you must call + * register_user_trace_event() with a pointer to the same string + * or else the event will not be recognized. + * @param self Pointer to the self struct of the reactor from which we want + * to trace this event. This pointer is used to get the correct environment and + * thus the correct logical tag of the event. + * @param description Pointer to the description string. + * @param value The value of the event. This is a long long for + * convenience so that time values can be passed unchanged. + * But int values work as well. + */ +void tracepoint_user_value(void* self, char* description, long long value) { + // -1s indicate unknown reaction number and worker thread. + // NOTE: We currently have no way to get the number of the worker that + // is executing the reaction that calls this, so we can't pass a worker + // number to the tracepoint function. We pass -1, indicating no worker. + // But to be safe, then, we have acquire a mutex before calling this + // because multiple reactions might be calling the same tracepoint function. + // There will be a performance hit for this. + environment_t* env = ((self_base_t*)self)->environment; + call_tracepoint(user_value, description, env->current_tag, -1, -1, -1, NULL, NULL, value, false); +} + +//////////////////////////////////////////////////////////// +//// For federated execution + +#if defined FEDERATED || defined LF_ENCLAVES + +/** + * Trace federate sending a message to the RTI. + * @param event_type The type of event. Possible values are: + * + * @param fed_id The federate identifier. + * @param tag Pointer to the tag that has been sent, or NULL. + */ +void tracepoint_federate_to_rti(trace_event_t event_type, int fed_id, tag_t* tag) { + call_tracepoint(event_type, + NULL, // void* pointer, + tag ? *tag : NEVER_TAG, // tag* tag, + -1, // int worker, // no worker ID needed because this is called within a mutex + fed_id, // int src_id, + -1, // int dst_id, + NULL, // instant_t* physical_time (will be generated) + NULL, // trigger_t* trigger, + 0, // interval_t extra_delay + true // is_interval_start + ); +} + +/** + * Trace federate receiving a message from the RTI. + * @param event_type The type of event. Possible values are: + * + * @param fed_id The federate identifier. + * @param tag Pointer to the tag that has been received, or NULL. + */ +void tracepoint_federate_from_rti(trace_event_t event_type, int fed_id, tag_t* tag) { + // trace_event_t event_type = (type == MSG_TYPE_TAG_ADVANCE_GRANT)? federate_TAG : federate_PTAG; + call_tracepoint(event_type, + NULL, // void* pointer, + tag ? *tag : NEVER_TAG, // tag* tag, + -1, // int worker, // no worker ID needed because this is called within a mutex + fed_id, // int src_id, + -1, // int dst_id, + NULL, // instant_t* physical_time (will be generated) + NULL, // trigger_t* trigger, + 0, // interval_t extra_delay + false // is_interval_start + ); +} + +/** + * Trace federate sending a message to another federate. + * @param event_type The type of event. Possible values are: + * + * @param fed_id The federate identifier. + * @param partner_id The partner federate identifier. + * @param tag Pointer to the tag that has been sent, or NULL. + */ +void tracepoint_federate_to_federate(trace_event_t event_type, int fed_id, int partner_id, tag_t* tag) { + call_tracepoint(event_type, + NULL, // void* pointer, + tag ? *tag : NEVER_TAG, // tag* tag, + -1, // int worker, // no worker ID needed because this is called within a mutex + fed_id, // int src_id, + partner_id, // int dst_id, + NULL, // instant_t* physical_time (will be generated) + NULL, // trigger_t* trigger, + 0, // interval_t extra_delay + true // is_interval_start + ); +} + +/** + * Trace federate receiving a message from another federate. + * @param event_type The type of event. Possible values are: + * + * @param fed_id The federate identifier. + * @param partner_id The partner federate identifier. + * @param tag Pointer to the tag that has been received, or NULL. + */ +void tracepoint_federate_from_federate(trace_event_t event_type, int fed_id, int partner_id, tag_t* tag) { + call_tracepoint(event_type, + NULL, // void* pointer, + tag ? *tag : NEVER_TAG, // tag* tag, + -1, // int worker, // no worker ID needed because this is called within a mutex + fed_id, // int src_id, + partner_id, // int dst_id, + NULL, // instant_t* physical_time (will be generated) + NULL, // trigger_t* trigger, + 0, // interval_t extra_delay + false // is_interval_start + ); +} +#endif // FEDERATED + +//////////////////////////////////////////////////////////// +//// For RTI execution + +#ifdef RTI_TRACE + +/** + * Trace RTI sending a message to a federate. + * @param event_type The type of event. Possible values are: + * + * @param fed_id The fedaerate ID. + * @param tag Pointer to the tag that has been sent, or NULL. + */ +void tracepoint_rti_to_federate(trace_event_t event_type, int fed_id, tag_t* tag) { + call_tracepoint(event_type, + NULL, // void* pointer, + tag ? *tag : NEVER_TAG, // tag_t* tag, + fed_id, // int worker (one thread per federate) + -1, // int src_id + fed_id, // int dst_id + NULL, // instant_t* physical_time (will be generated) + NULL, // trigger_t* trigger, + 0, // interval_t extra_delay + true // is_interval_start + ); +} + +/** + * Trace RTI receiving a message from a federate. + * @param event_type The type of event. Possible values are: + * + * @param fed_id The fedaerate ID. + * @param tag Pointer to the tag that has been sent, or NULL. + */ +void tracepoint_rti_from_federate(trace_event_t event_type, int fed_id, tag_t* tag) { + call_tracepoint(event_type, + NULL, // void* pointer, + tag ? *tag : NEVER_TAG, // tag_t* tag, + fed_id, // int worker (one thread per federate) + -1, // int src_id (RTI is the source of the tracepoint) + fed_id, // int dst_id + NULL, // instant_t* physical_time (will be generated) + NULL, // trigger_t* trigger, + 0, // interval_t extra_delay + false // is_interval_start + ); +} + +#endif // RTI_TRACE + +#endif // LF_TRACE diff --git a/core/utils/hashset/hashset.c b/core/utils/hashset/hashset.c index 9f519be55..fa67aacdc 100644 --- a/core/utils/hashset/hashset.c +++ b/core/utils/hashset/hashset.c @@ -13,7 +13,7 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * Modified in 2022 by Edward A. Lee (eal@berkeley.edu) so that stored items are * consistently of type void*. Note that the void* value 1 is used to mark a deleted * item and therefore cannot be stored in the hashset. @@ -27,126 +27,124 @@ static const unsigned int prime_1 = 73; static const unsigned int prime_2 = 5009; hashset_t hashset_create(unsigned short nbits) { - hashset_t set = calloc(1, sizeof(struct hashset_st)); - - if (set == NULL) { - return NULL; - } - set->nbits = nbits; - set->capacity = (size_t)(1 << set->nbits); - set->mask = set->capacity - 1; - set->items = (void**)calloc(set->capacity, sizeof(void*)); - if (set->items == NULL) { - hashset_destroy(set); - return NULL; - } - set->nitems = 0; - set->n_deleted_items = 0; - return set; + hashset_t set = calloc(1, sizeof(struct hashset_st)); + + if (set == NULL) { + return NULL; + } + set->nbits = nbits; + set->capacity = (size_t)(1 << set->nbits); + set->mask = set->capacity - 1; + set->items = (void**)calloc(set->capacity, sizeof(void*)); + if (set->items == NULL) { + hashset_destroy(set); + return NULL; + } + set->nitems = 0; + set->n_deleted_items = 0; + return set; } -size_t hashset_num_items(hashset_t set) { - return set->nitems; -} +size_t hashset_num_items(hashset_t set) { return set->nitems; } void hashset_destroy(hashset_t set) { - if (set && set->items) { - free(set->items); - } - free(set); + if (set && set->items) { + free(set->items); + } + free(set); } // FIXME #include -static int hashset_add_member(hashset_t set, void *item) { - size_t ii; - - if (item == 0 || item == (void*)1) { - return -1; - } - ii = set->mask & (prime_1 * (size_t)item); - - // Search chain of possible locations and stop when slot is empty. - // Chain of possibilities always ends with an empty (0) even - // if some items in the chain have been deleted (1). If a - // deleted slot is found along the way, remember it to use it. - int available = -1; - while (set->items[ii] != 0) { - if (set->items[ii] == item) { - return 0; - } else { - if (set->items[ii] == (void*)1 && available < 0) { - // Slot is available from deletion. - available = (int)ii; - } - /* search the next slot */ - ii = set->mask & (ii + prime_2); - } - } - set->nitems++; - if (available >= 0) { - // Use the slot available from a deletion. - set->n_deleted_items--; - set->items[available] = item; +static int hashset_add_member(hashset_t set, void* item) { + size_t ii; + + if (item == 0 || item == (void*)1) { + return -1; + } + ii = set->mask & (prime_1 * (size_t)item); + + // Search chain of possible locations and stop when slot is empty. + // Chain of possibilities always ends with an empty (0) even + // if some items in the chain have been deleted (1). If a + // deleted slot is found along the way, remember it to use it. + int available = -1; + while (set->items[ii] != 0) { + if (set->items[ii] == item) { + return 0; } else { - set->items[ii] = item; + if (set->items[ii] == (void*)1 && available < 0) { + // Slot is available from deletion. + available = (int)ii; + } + /* search the next slot */ + ii = set->mask & (ii + prime_2); } - return 1; + } + set->nitems++; + if (available >= 0) { + // Use the slot available from a deletion. + set->n_deleted_items--; + set->items[available] = item; + } else { + set->items[ii] = item; + } + return 1; } static void maybe_rehash(hashset_t set) { - void** old_items; - size_t old_capacity, ii; - - if (set->nitems + set->n_deleted_items >= (float)set->capacity * 0.85f) { - old_items = set->items; - old_capacity = set->capacity; - set->nbits++; - set->capacity = (size_t)(1 << set->nbits); - set->mask = set->capacity - 1; - set->items = (void**)calloc(set->capacity, sizeof(void*)); - set->nitems = 0; - set->n_deleted_items = 0; - assert(set->items); - for (ii = 0; ii < old_capacity; ii++) { - hashset_add_member(set, old_items[ii]); - } - free(old_items); + void** old_items; + size_t old_capacity, ii; + + if (set->nitems + set->n_deleted_items >= (float)set->capacity * 0.85f) { + old_items = set->items; + old_capacity = set->capacity; + set->nbits++; + set->capacity = (size_t)(1 << set->nbits); + set->mask = set->capacity - 1; + set->items = (void**)calloc(set->capacity, sizeof(void*)); + set->nitems = 0; + set->n_deleted_items = 0; + assert(set->items); + for (ii = 0; ii < old_capacity; ii++) { + hashset_add_member(set, old_items[ii]); } + free(old_items); + } } -int hashset_add(hashset_t set, void *item) { - int rv = hashset_add_member(set, item); - maybe_rehash(set); - return rv; +int hashset_add(hashset_t set, void* item) { + int rv = hashset_add_member(set, item); + maybe_rehash(set); + return rv; } -int hashset_remove(hashset_t set, void *item) { - size_t ii = set->mask & (prime_1 * (size_t)item); - - while (set->items[ii] != 0) { - if (set->items[ii] == item) { - set->items[ii] = (void*)1; - set->nitems--; - set->n_deleted_items++; - return 1; - } else { - ii = set->mask & (ii + prime_2); - } +int hashset_remove(hashset_t set, void* item) { + size_t ii = set->mask & (prime_1 * (size_t)item); + + while (set->items[ii] != 0) { + if (set->items[ii] == item) { + set->items[ii] = (void*)1; + set->nitems--; + set->n_deleted_items++; + return 1; + } else { + ii = set->mask & (ii + prime_2); } - return 0; + } + return 0; } -int hashset_is_member(hashset_t set, void *item) { - size_t ii = set->mask & (prime_1 * (size_t)item); +int hashset_is_member(hashset_t set, void* item) { + size_t ii = set->mask & (prime_1 * (size_t)item); - while (set->items[ii] != 0) { - if (set->items[ii] == item) { - return 1; - } else { - ii = set->mask & (ii + prime_2); - } + while (set->items[ii] != 0) { + if (set->items[ii] == item) { + return 1; + } else { + ii = set->mask & (ii + prime_2); } - return 0; + } + return 0; } diff --git a/core/utils/hashset/hashset_itr.c b/core/utils/hashset/hashset_itr.c index 5d18b372b..75763b0cc 100644 --- a/core/utils/hashset/hashset_itr.c +++ b/core/utils/hashset/hashset_itr.c @@ -43,9 +43,9 @@ int hashset_iterator_has_next(hashset_itr_t itr) { return 0; } /* peek to find another entry */ - while(index < itr->set->capacity) { + while (index < itr->set->capacity) { void* value = itr->set->items[index++]; - if(value != 0 && value != (void*)1) + if (value != 0 && value != (void*)1) return 1; } @@ -82,4 +82,3 @@ void* hashset_iterator_value(hashset_itr_t itr) { return itr->set->items[itr->index]; } - diff --git a/core/utils/lf_semaphore.c b/core/utils/lf_semaphore.c index a4295d47b..2d0255ecb 100644 --- a/core/utils/lf_semaphore.c +++ b/core/utils/lf_semaphore.c @@ -33,7 +33,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "lf_semaphore.h" #include -#include "util.h" // Defines macros LF_MUTEX_LOCK, etc. +#include "util.h" // Defines macros LF_MUTEX_LOCK, etc. /** * @brief Create a new semaphore. @@ -42,11 +42,11 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * @return lf_semaphore_t* Can be NULL on error. */ lf_semaphore_t* lf_semaphore_new(int count) { - lf_semaphore_t* semaphore = (lf_semaphore_t*)malloc(sizeof(lf_semaphore_t)); - LF_MUTEX_INIT(&semaphore->mutex); - LF_COND_INIT(&semaphore->cond, &semaphore->mutex); - semaphore->count = count; - return semaphore; + lf_semaphore_t* semaphore = (lf_semaphore_t*)malloc(sizeof(lf_semaphore_t)); + LF_MUTEX_INIT(&semaphore->mutex); + LF_COND_INIT(&semaphore->cond, &semaphore->mutex); + semaphore->count = count; + return semaphore; } /** @@ -56,11 +56,11 @@ lf_semaphore_t* lf_semaphore_new(int count) { * @param i The count to add. */ void lf_semaphore_release(lf_semaphore_t* semaphore, int i) { - assert(semaphore != NULL); - LF_MUTEX_LOCK(&semaphore->mutex); - semaphore->count += i; - lf_cond_broadcast(&semaphore->cond); - LF_MUTEX_UNLOCK(&semaphore->mutex); + assert(semaphore != NULL); + LF_MUTEX_LOCK(&semaphore->mutex); + semaphore->count += i; + lf_cond_broadcast(&semaphore->cond); + LF_MUTEX_UNLOCK(&semaphore->mutex); } /** @@ -69,13 +69,13 @@ void lf_semaphore_release(lf_semaphore_t* semaphore, int i) { * @param semaphore Instance of a semaphore. */ void lf_semaphore_acquire(lf_semaphore_t* semaphore) { - assert(semaphore != NULL); - LF_MUTEX_LOCK(&semaphore->mutex); - while (semaphore->count == 0) { - lf_cond_wait(&semaphore->cond); - } - semaphore->count--; - LF_MUTEX_UNLOCK(&semaphore->mutex); + assert(semaphore != NULL); + LF_MUTEX_LOCK(&semaphore->mutex); + while (semaphore->count == 0) { + lf_cond_wait(&semaphore->cond); + } + semaphore->count--; + LF_MUTEX_UNLOCK(&semaphore->mutex); } /** @@ -84,12 +84,12 @@ void lf_semaphore_acquire(lf_semaphore_t* semaphore) { * @param semaphore Instance of a semaphore. */ void lf_semaphore_wait(lf_semaphore_t* semaphore) { - assert(semaphore != NULL); - LF_MUTEX_LOCK(&semaphore->mutex); - while (semaphore->count == 0) { - lf_cond_wait(&semaphore->cond); - } - LF_MUTEX_UNLOCK(&semaphore->mutex); + assert(semaphore != NULL); + LF_MUTEX_LOCK(&semaphore->mutex); + while (semaphore->count == 0) { + lf_cond_wait(&semaphore->cond); + } + LF_MUTEX_UNLOCK(&semaphore->mutex); } /** @@ -98,7 +98,7 @@ void lf_semaphore_wait(lf_semaphore_t* semaphore) { * @param semaphore Instance of a semaphore. */ void lf_semaphore_destroy(lf_semaphore_t* semaphore) { - assert(semaphore != NULL); - free(semaphore); + assert(semaphore != NULL); + free(semaphore); } #endif diff --git a/core/utils/pqueue.c b/core/utils/pqueue.c index f7fe4bb67..e73e3ed48 100644 --- a/core/utils/pqueue.c +++ b/core/utils/pqueue.c @@ -4,64 +4,41 @@ * @author Edward A. Lee * @copyright (c) 2020-2023, The University of California at Berkeley. * License: BSD 2-clause - * + * * @brief Priority queue definitions for the event queue and reaction queue. */ -#include "platform.h" +#include "low_level_platform.h" #include "pqueue.h" #include "util.h" #include "lf_types.h" +int in_reverse_order(pqueue_pri_t thiz, pqueue_pri_t that) { return (thiz > that); } -int in_reverse_order(pqueue_pri_t thiz, pqueue_pri_t that) { - return (thiz > that); -} +int in_no_particular_order(pqueue_pri_t thiz, pqueue_pri_t that) { return 0; } -int in_no_particular_order(pqueue_pri_t thiz, pqueue_pri_t that) { - return 0; -} +int event_matches(void* event1, void* event2) { return (((event_t*)event1)->trigger == ((event_t*)event2)->trigger); } -int event_matches(void* event1, void* event2) { - return (((event_t*)event1)->trigger == ((event_t*)event2)->trigger); -} +int reaction_matches(void* a, void* b) { return (a == b); } -int reaction_matches(void* a, void* b) { - return (a == b); -} +pqueue_pri_t get_event_time(void* event) { return (pqueue_pri_t)(((event_t*)event)->time); } -pqueue_pri_t get_event_time(void *event) { - return (pqueue_pri_t)(((event_t*) event)->time); -} +pqueue_pri_t get_reaction_index(void* reaction) { return ((reaction_t*)reaction)->index; } -pqueue_pri_t get_reaction_index(void *reaction) { - return ((reaction_t*) reaction)->index; -} +size_t get_event_position(void* event) { return ((event_t*)event)->pos; } -size_t get_event_position(void *event) { - return ((event_t*) event)->pos; -} +size_t get_reaction_position(void* reaction) { return ((reaction_t*)reaction)->pos; } -size_t get_reaction_position(void *reaction) { - return ((reaction_t*) reaction)->pos; -} +void set_event_position(void* event, size_t pos) { ((event_t*)event)->pos = pos; } -void set_event_position(void *event, size_t pos) { - ((event_t*) event)->pos = pos; -} - -void set_reaction_position(void *reaction, size_t pos) { - ((reaction_t*) reaction)->pos = pos; -} +void set_reaction_position(void* reaction, size_t pos) { ((reaction_t*)reaction)->pos = pos; } -void print_reaction(void *reaction) { - reaction_t *r = (reaction_t*)reaction; - LF_PRINT_DEBUG("%s: chain_id: %llu, index: %llx, reaction: %p", - r->name, r->chain_id, r->index, r); +void print_reaction(void* reaction) { + reaction_t* r = (reaction_t*)reaction; + LF_PRINT_DEBUG("%s: chain_id: %llu, index: %llx, reaction: %p", r->name, r->chain_id, r->index, r); } -void print_event(void *event) { - event_t *e = (event_t*)event; - LF_PRINT_DEBUG("time: " PRINTF_TIME ", trigger: %p, token: %p", - e->time, e->trigger, e->token); +void print_event(void* event) { + event_t* e = (event_t*)event; + LF_PRINT_DEBUG("time: " PRINTF_TIME ", trigger: %p, token: %p", e->time, e->trigger, e->token); } diff --git a/core/utils/pqueue_base.c b/core/utils/pqueue_base.c index 9bba5289e..30d84286e 100644 --- a/core/utils/pqueue_base.c +++ b/core/utils/pqueue_base.c @@ -40,322 +40,304 @@ #include "pqueue_base.h" #include "util.h" -#define LF_LEFT(i) ((i) << 1) -#define LF_RIGHT(i) (((i) << 1) + 1) +#define LF_LEFT(i) ((i) << 1) +#define LF_RIGHT(i) (((i) << 1) + 1) #define LF_PARENT(i) ((i) >> 1) -void* find_equal(pqueue_t *q, void *e, int pos, pqueue_pri_t max) { - if (pos < 0) { - lf_print_error_and_exit("find_equal() called with a negative pos index."); - } +void* find_equal(pqueue_t* q, void* e, int pos, pqueue_pri_t max) { + if (pos < 0) { + lf_print_error_and_exit("find_equal() called with a negative pos index."); + } - // Stop the recursion when we've reached the end of the - // queue. This has to be done before accessing the queue - // to avoid segmentation fault. - if (!q || (size_t)pos >= q->size) { - return NULL; - } + // Stop the recursion when we've reached the end of the + // queue. This has to be done before accessing the queue + // to avoid segmentation fault. + if (!q || (size_t)pos >= q->size) { + return NULL; + } - void* rval; - void* curr = q->d[pos]; + void* rval; + void* curr = q->d[pos]; - // Stop the recursion when we've surpassed the maximum priority. - if (!curr || q->cmppri(q->getpri(curr), max)) { - return NULL; - } - - if (q->eqelem(curr, e)) { - return curr; - } else { - rval = find_equal(q, e, LF_LEFT(pos), max); - if (rval) - return rval; - else - return find_equal(q, e, LF_RIGHT(pos), max); - } + // Stop the recursion when we've surpassed the maximum priority. + if (!curr || q->cmppri(q->getpri(curr), max)) { return NULL; + } + + if (q->eqelem(curr, e)) { + return curr; + } else { + rval = find_equal(q, e, LF_LEFT(pos), max); + if (rval) + return rval; + else + return find_equal(q, e, LF_RIGHT(pos), max); + } + return NULL; } -void* find_equal_same_priority(pqueue_t *q, void *e, int pos) { - if (pos < 0) { - lf_print_error_and_exit("find_equal_same_priority() called with a negative pos index."); - } - - // Stop the recursion when we've reached the end of the - // queue. This has to be done before accessing the queue - // to avoid segmentation fault. - if (!q || (size_t)pos >= q->size) { - return NULL; - } +void* find_equal_same_priority(pqueue_t* q, void* e, int pos) { + if (pos < 0) { + lf_print_error_and_exit("find_equal_same_priority() called with a negative pos index."); + } - void* rval; - void* curr = q->d[pos]; + // Stop the recursion when we've reached the end of the + // queue. This has to be done before accessing the queue + // to avoid segmentation fault. + if (!q || (size_t)pos >= q->size) { + return NULL; + } - // Stop the recursion once we've surpassed the priority of the element - // we're looking for. - if (!curr || q->cmppri(q->getpri(curr), q->getpri(e))) { - return NULL; - } + void* rval; + void* curr = q->d[pos]; - if (q->getpri(curr) == q->getpri(e) && q->eqelem(curr, e)) { - return curr; - } else { - rval = find_equal_same_priority(q, e, LF_LEFT(pos)); - if (rval) - return rval; - else - return find_equal_same_priority(q, e, LF_RIGHT(pos)); - } - - // for (int i=1; i < q->size; i++) { - // if (q->d[i] == e) { - // return q->d[i]; - // } - // } + // Stop the recursion once we've surpassed the priority of the element + // we're looking for. + if (!curr || q->cmppri(q->getpri(curr), q->getpri(e))) { return NULL; + } + + if (q->getpri(curr) == q->getpri(e) && q->eqelem(curr, e)) { + return curr; + } else { + rval = find_equal_same_priority(q, e, LF_LEFT(pos)); + if (rval) + return rval; + else + return find_equal_same_priority(q, e, LF_RIGHT(pos)); + } + + // for (int i=1; i < q->size; i++) { + // if (q->d[i] == e) { + // return q->d[i]; + // } + // } + return NULL; } -pqueue_t * pqueue_init(size_t n, - pqueue_cmp_pri_f cmppri, - pqueue_get_pri_f getpri, - pqueue_get_pos_f getpos, - pqueue_set_pos_f setpos, - pqueue_eq_elem_f eqelem, - pqueue_print_entry_f prt) { - pqueue_t *q; - - if (!(q = (pqueue_t*)malloc(sizeof(pqueue_t)))) - return NULL; - - /* Need to allocate n+1 elements since element 0 isn't used. */ - if (!(q->d = (void**)malloc((n + 1) * sizeof(void *)))) { - free(q); - return NULL; - } +pqueue_t* pqueue_init(size_t n, pqueue_cmp_pri_f cmppri, pqueue_get_pri_f getpri, pqueue_get_pos_f getpos, + pqueue_set_pos_f setpos, pqueue_eq_elem_f eqelem, pqueue_print_entry_f prt) { + pqueue_t* q; - q->size = 1; - q->avail = q->step = (n+1); /* see comment above about n+1 */ - q->cmppri = cmppri; - q->getpri = getpri; - q->getpos = getpos; - q->setpos = setpos; - q->eqelem = eqelem; - q->prt = prt; - return q; -} + if (!(q = (pqueue_t*)malloc(sizeof(pqueue_t)))) + return NULL; -void pqueue_free(pqueue_t *q) { - free(q->d); + /* Need to allocate n+1 elements since element 0 isn't used. */ + if (!(q->d = (void**)malloc((n + 1) * sizeof(void*)))) { free(q); + return NULL; + } + + q->size = 1; + q->avail = q->step = (n + 1); /* see comment above about n+1 */ + q->cmppri = cmppri; + q->getpri = getpri; + q->getpos = getpos; + q->setpos = setpos; + q->eqelem = eqelem; + q->prt = prt; + return q; } -size_t pqueue_size(pqueue_t *q) { - if (!q) return 0; - // Queue element 0 exists but doesn't count since it isn't used. - return (q->size - 1); +void pqueue_free(pqueue_t* q) { + free(q->d); + free(q); } -static size_t maxchild(pqueue_t *q, size_t i) { - size_t child_node = LF_LEFT(i); +size_t pqueue_size(pqueue_t* q) { + if (!q) + return 0; + // Queue element 0 exists but doesn't count since it isn't used. + return (q->size - 1); +} + +static size_t maxchild(pqueue_t* q, size_t i) { + size_t child_node = LF_LEFT(i); - if (child_node >= q->size) - return 0; + if (child_node >= q->size) + return 0; - if ((child_node+1) < q->size && - (q->cmppri(q->getpri(q->d[child_node]), q->getpri(q->d[child_node+1])))) - child_node++; /* use right child instead of left */ + if ((child_node + 1) < q->size && (q->cmppri(q->getpri(q->d[child_node]), q->getpri(q->d[child_node + 1])))) + child_node++; /* use right child instead of left */ - return child_node; + return child_node; } -static size_t bubble_up(pqueue_t *q, size_t i) { - size_t parent_node; - void *moving_node = q->d[i]; - pqueue_pri_t moving_pri = q->getpri(moving_node); - - for (parent_node = LF_PARENT(i); - ((i > 1) && q->cmppri(q->getpri(q->d[parent_node]), moving_pri)); - i = parent_node, parent_node = LF_PARENT(i)) - { - q->d[i] = q->d[parent_node]; - q->setpos(q->d[i], i); - } +static size_t bubble_up(pqueue_t* q, size_t i) { + size_t parent_node; + void* moving_node = q->d[i]; + pqueue_pri_t moving_pri = q->getpri(moving_node); + + for (parent_node = LF_PARENT(i); ((i > 1) && q->cmppri(q->getpri(q->d[parent_node]), moving_pri)); + i = parent_node, parent_node = LF_PARENT(i)) { + q->d[i] = q->d[parent_node]; + q->setpos(q->d[i], i); + } - q->d[i] = moving_node; - q->setpos(moving_node, i); - return i; + q->d[i] = moving_node; + q->setpos(moving_node, i); + return i; } -static void percolate_down(pqueue_t *q, size_t i) { - size_t child_node; - void *moving_node = q->d[i]; - pqueue_pri_t moving_pri = q->getpri(moving_node); - - while ((child_node = maxchild(q, i)) && - q->cmppri(moving_pri, q->getpri(q->d[child_node]))) - { - q->d[i] = q->d[child_node]; - q->setpos(q->d[i], i); - i = child_node; - } +static void percolate_down(pqueue_t* q, size_t i) { + size_t child_node; + void* moving_node = q->d[i]; + pqueue_pri_t moving_pri = q->getpri(moving_node); - q->d[i] = moving_node; - q->setpos(moving_node, i); -} + while ((child_node = maxchild(q, i)) && q->cmppri(moving_pri, q->getpri(q->d[child_node]))) { + q->d[i] = q->d[child_node]; + q->setpos(q->d[i], i); + i = child_node; + } -void* pqueue_find_equal_same_priority(pqueue_t *q, void *e) { - return find_equal_same_priority(q, e, 1); + q->d[i] = moving_node; + q->setpos(moving_node, i); } -void* pqueue_find_equal(pqueue_t *q, void *e, pqueue_pri_t max) { - return find_equal(q, e, 1, max); -} +void* pqueue_find_equal_same_priority(pqueue_t* q, void* e) { return find_equal_same_priority(q, e, 1); } -int pqueue_insert(pqueue_t *q, void *d) { - void **tmp; - size_t i; - size_t newsize; +void* pqueue_find_equal(pqueue_t* q, void* e, pqueue_pri_t max) { return find_equal(q, e, 1, max); } - if (!q) return 1; +int pqueue_insert(pqueue_t* q, void* d) { + void** tmp; + size_t i; + size_t newsize; - /* allocate more memory if necessary */ - if (q->size >= q->avail) { - newsize = q->size + q->step; - if (!(tmp = (void**)realloc(q->d, sizeof(void *) * newsize))) - return 1; - q->d = tmp; - q->avail = newsize; - } - /* insert item and organize the tree */ - i = q->size++; - q->d[i] = d; - bubble_up(q, i); + if (!q) + return 1; - return 0; + /* allocate more memory if necessary */ + if (q->size >= q->avail) { + newsize = q->size + q->step; + if (!(tmp = (void**)realloc(q->d, sizeof(void*) * newsize))) + return 1; + q->d = tmp; + q->avail = newsize; + } + /* insert item and organize the tree */ + i = q->size++; + q->d[i] = d; + bubble_up(q, i); + + return 0; } -int pqueue_remove(pqueue_t *q, void *d) { - if (q->size == 1) return 0; // Nothing to remove - size_t posn = q->getpos(d); - q->d[posn] = q->d[--q->size]; - if (q->cmppri(q->getpri(d), q->getpri(q->d[posn]))) - bubble_up(q, posn); - else - percolate_down(q, posn); - - return 0; +int pqueue_remove(pqueue_t* q, void* d) { + if (q->size == 1) + return 0; // Nothing to remove + size_t posn = q->getpos(d); + q->d[posn] = q->d[--q->size]; + if (q->cmppri(q->getpri(d), q->getpri(q->d[posn]))) + bubble_up(q, posn); + else + percolate_down(q, posn); + + return 0; } -void* pqueue_pop(pqueue_t *q) { - if (!q || q->size == 1) - return NULL; +void* pqueue_pop(pqueue_t* q) { + if (!q || q->size == 1) + return NULL; - void* head; + void* head; - head = q->d[1]; - q->d[1] = q->d[--q->size]; - percolate_down(q, 1); + head = q->d[1]; + q->d[1] = q->d[--q->size]; + percolate_down(q, 1); - return head; + return head; } void pqueue_empty_into(pqueue_t** dest, pqueue_t** src) { - assert(src); - assert(dest); - assert(*src); - assert(*dest); - void* item; - if ((*dest)->size >= (*src)->size) { - while ((item = pqueue_pop(*src))) { - pqueue_insert(*dest, item); - } - } else { - while ((item = pqueue_pop(*dest))) { - pqueue_insert(*src, item); - } - - pqueue_t* tmp = *dest; - *dest = *src; - *src = tmp; + assert(src); + assert(dest); + assert(*src); + assert(*dest); + void* item; + if ((*dest)->size >= (*src)->size) { + while ((item = pqueue_pop(*src))) { + pqueue_insert(*dest, item); + } + } else { + while ((item = pqueue_pop(*dest))) { + pqueue_insert(*src, item); } -} -void* pqueue_peek(pqueue_t *q) { - void *d; - if (!q || q->size == 1) - return NULL; - d = q->d[1]; - return d; + pqueue_t* tmp = *dest; + *dest = *src; + *src = tmp; + } } -void pqueue_dump(pqueue_t *q, pqueue_print_entry_f print) { - size_t i; - - LF_PRINT_DEBUG("posn\tleft\tright\tparent\tmaxchild\t..."); - for (i = 1; i < q->size ;i++) { - LF_PRINT_DEBUG("%zu\t%zu\t%zu\t%zu\t%ul\t", - i, - LF_LEFT(i), LF_RIGHT(i), LF_PARENT(i), - (unsigned int)maxchild(q, i)); - print(q->d[i]); - } +void* pqueue_peek(pqueue_t* q) { + void* d; + if (!q || q->size == 1) + return NULL; + d = q->d[1]; + return d; } -void pqueue_print(pqueue_t *q, pqueue_print_entry_f print) { - pqueue_t *dup; - void *e; - - dup = pqueue_init(q->size, - q->cmppri, q->getpri, - q->getpos, q->setpos, q->eqelem, q->prt); - dup->size = q->size; - dup->avail = q->avail; - dup->step = q->step; - - memcpy(dup->d, q->d, (q->size * sizeof(void *))); - - while ((e = pqueue_pop(dup))) { - if (print == NULL) { - q->prt(e); - } else { - print(e); - } - } - pqueue_free(dup); +void pqueue_dump(pqueue_t* q, pqueue_print_entry_f print) { + size_t i; + + LF_PRINT_DEBUG("posn\tleft\tright\tparent\tmaxchild\t..."); + for (i = 1; i < q->size; i++) { + LF_PRINT_DEBUG("%zu\t%zu\t%zu\t%zu\t%ul\t", i, LF_LEFT(i), LF_RIGHT(i), LF_PARENT(i), (unsigned int)maxchild(q, i)); + print(q->d[i]); + } } -static int subtree_is_valid(pqueue_t *q, int pos) { - if (pos < 0) { - lf_print_error_and_exit("subtree_is_valid() called with a negative pos index."); - } +void pqueue_print(pqueue_t* q, pqueue_print_entry_f print) { + pqueue_t* dup; + void* e; - int left_pos = LF_LEFT(pos); - if (left_pos < 0) { - lf_print_error_and_exit("subtree_is_valid(): index overflow detected."); - } + dup = pqueue_init(q->size, q->cmppri, q->getpri, q->getpos, q->setpos, q->eqelem, q->prt); + dup->size = q->size; + dup->avail = q->avail; + dup->step = q->step; - if ((size_t)left_pos < q->size) { - /* has a left child */ - if (q->cmppri(q->getpri(q->d[pos]), q->getpri(q->d[LF_LEFT(pos)]))) - return 0; - if (!subtree_is_valid(q, LF_LEFT(pos))) - return 0; - } + memcpy(dup->d, q->d, (q->size * sizeof(void*))); - int right_pos = LF_RIGHT(pos); - if (right_pos < 0) { - lf_print_error_and_exit("subtree_is_valid(): index overflow detected."); - } - if ((size_t)right_pos < q->size) { - /* has a right child */ - if (q->cmppri(q->getpri(q->d[pos]), q->getpri(q->d[LF_RIGHT(pos)]))) - return 0; - if (!subtree_is_valid(q, LF_RIGHT(pos))) - return 0; + while ((e = pqueue_pop(dup))) { + if (print == NULL) { + q->prt(e); + } else { + print(e); } - return 1; + } + pqueue_free(dup); } -int pqueue_is_valid(pqueue_t *q) { - return subtree_is_valid(q, 1); +static int subtree_is_valid(pqueue_t* q, int pos) { + if (pos < 0) { + lf_print_error_and_exit("subtree_is_valid() called with a negative pos index."); + } + + int left_pos = LF_LEFT(pos); + if (left_pos < 0) { + lf_print_error_and_exit("subtree_is_valid(): index overflow detected."); + } + + if ((size_t)left_pos < q->size) { + /* has a left child */ + if (q->cmppri(q->getpri(q->d[pos]), q->getpri(q->d[LF_LEFT(pos)]))) + return 0; + if (!subtree_is_valid(q, LF_LEFT(pos))) + return 0; + } + + int right_pos = LF_RIGHT(pos); + if (right_pos < 0) { + lf_print_error_and_exit("subtree_is_valid(): index overflow detected."); + } + if ((size_t)right_pos < q->size) { + /* has a right child */ + if (q->cmppri(q->getpri(q->d[pos]), q->getpri(q->d[LF_RIGHT(pos)]))) + return 0; + if (!subtree_is_valid(q, LF_RIGHT(pos))) + return 0; + } + return 1; } + +int pqueue_is_valid(pqueue_t* q) { return subtree_is_valid(q, 1); } diff --git a/core/utils/pqueue_support.h b/core/utils/pqueue_support.h index f4b37c846..b7c0a08c1 100644 --- a/core/utils/pqueue_support.h +++ b/core/utils/pqueue_support.h @@ -32,7 +32,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef PQUEUE_SUPPORT_H #define PQUEUE_SUPPORT_H - #include "../reactor.h" // ********** Priority Queue Support Start @@ -40,97 +39,75 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** * Return whether the first and second argument are given in reverse order. */ -static int in_reverse_order(pqueue_pri_t thiz, pqueue_pri_t that) { - return (thiz > that); -} +static int in_reverse_order(pqueue_pri_t thiz, pqueue_pri_t that) { return (thiz > that); } /** * Return false (0) regardless of reaction order. */ -static int in_no_particular_order(pqueue_pri_t thiz, pqueue_pri_t that) { - return false; -} +static int in_no_particular_order(pqueue_pri_t thiz, pqueue_pri_t that) { return false; } /** * Return whether or not the given events have matching triggers. */ -static int event_matches(void* next, void* curr) { - return (((event_t*)next)->trigger == ((event_t*)curr)->trigger); -} +static int event_matches(void* next, void* curr) { return (((event_t*)next)->trigger == ((event_t*)curr)->trigger); } /** - * Return whether or not the given reaction_t pointers + * Return whether or not the given reaction_t pointers * point to the same struct. */ -static int reaction_matches(void* next, void* curr) { - return (next == curr); -} +static int reaction_matches(void* next, void* curr) { return (next == curr); } /** * Report a priority equal to the time of the given event. * Used for sorting pointers to event_t structs in the event queue. */ -static pqueue_pri_t get_event_time(void *a) { - return (pqueue_pri_t)(((event_t*) a)->time); -} +static pqueue_pri_t get_event_time(void* a) { return (pqueue_pri_t)(((event_t*)a)->time); } /** * Report a priority equal to the index of the given reaction. - * Used for sorting pointers to reaction_t structs in the + * Used for sorting pointers to reaction_t structs in the * blocked and executing queues. */ -static pqueue_pri_t get_reaction_index(void *a) { - return ((reaction_t*) a)->index; -} +static pqueue_pri_t get_reaction_index(void* a) { return ((reaction_t*)a)->index; } /** * Return the given event's position in the queue. */ -static size_t get_event_position(void *a) { - return ((event_t*) a)->pos; -} +static size_t get_event_position(void* a) { return ((event_t*)a)->pos; } /** * Return the given reaction's position in the queue. */ -static size_t get_reaction_position(void *a) { - return ((reaction_t*) a)->pos; -} +static size_t get_reaction_position(void* a) { return ((reaction_t*)a)->pos; } /** * Set the given event's position in the queue. */ -static void set_event_position(void *a, size_t pos) { - ((event_t*) a)->pos = pos; -} +static void set_event_position(void* a, size_t pos) { ((event_t*)a)->pos = pos; } /** * Return the given reaction's position in the queue. */ -static void set_reaction_position(void *a, size_t pos) { - ((reaction_t*) a)->pos = pos; -} +static void set_reaction_position(void* a, size_t pos) { ((reaction_t*)a)->pos = pos; } /** * Print some information about the given reaction. - * + * * DEBUG function only. */ -static void print_reaction(void *reaction) { - reaction_t *r = (reaction_t*)reaction; - LF_PRINT_DEBUG("%s: chain_id:%llu, index: %llx, reaction: %p", - r->name, r->chain_id, r->index, r); +static void print_reaction(void* reaction) { + reaction_t* r = (reaction_t*)reaction; + LF_PRINT_DEBUG("%s: chain_id:%llu, index: %llx, reaction: %p", r->name, r->chain_id, r->index, r); } /** * Print some information about the given event. - * + * * DEBUG function only. */ -static void print_event(void *event) { - event_t *e = (event_t*)event; - LF_PRINT_DEBUG("time: " PRINTF_TIME ", trigger: %p, token: %p", - e->time, e->trigger, e->token); +static void print_event(void* event) { + event_t* e = (event_t*)event; + LF_PRINT_DEBUG("time: " PRINTF_TIME ", trigger: %p, token: %p", e->time, e->trigger, e->token); } // ********** Priority Queue Support End diff --git a/core/utils/pqueue_tag.c b/core/utils/pqueue_tag.c index 2d05af7bc..9406ca1ca 100644 --- a/core/utils/pqueue_tag.c +++ b/core/utils/pqueue_tag.c @@ -4,15 +4,15 @@ * @author Edward A. Lee * @copyright (c) 2023, The University of California at Berkeley * License in [BSD 2-clause](https://github.com/lf-lang/reactor-c/blob/main/LICENSE.md) - * + * * @brief Priority queue that uses tags for sorting. */ #include #include "pqueue_tag.h" -#include "util.h" // For lf_print -#include "platform.h" // For PRINTF_TAG +#include "util.h" // For lf_print +#include "low_level_platform.h" // For PRINTF_TAG ////////////////// // Local functions, not intended for use outside this file. @@ -23,9 +23,7 @@ * element is also the priority. This function is of type pqueue_get_pri_f. * @param element A pointer to a pqueue_tag_element_t, cast to void*. */ -static pqueue_pri_t pqueue_tag_get_priority(void *element) { - return (pqueue_pri_t) element; -} +static pqueue_pri_t pqueue_tag_get_priority(void* element) { return (pqueue_pri_t)element; } /** * @brief Callback comparison function for the tag-based priority queue. @@ -33,9 +31,9 @@ static pqueue_pri_t pqueue_tag_get_priority(void *element) { * This function is of type pqueue_cmp_pri_f. * @param priority1 A pointer to a pqueue_tag_element_t, cast to pqueue_pri_t. * @param priority2 A pointer to a pqueue_tag_element_t, cast to pqueue_pri_t. -*/ + */ static int pqueue_tag_compare(pqueue_pri_t priority1, pqueue_pri_t priority2) { - return (lf_tag_compare(((pqueue_tag_element_t*) priority1)->tag, ((pqueue_tag_element_t*) priority2)->tag) > 0); + return (lf_tag_compare(((pqueue_tag_element_t*)priority1)->tag, ((pqueue_tag_element_t*)priority2)->tag) > 0); } /** @@ -46,7 +44,7 @@ static int pqueue_tag_compare(pqueue_pri_t priority1, pqueue_pri_t priority2) { * @param element2 A pointer to a pqueue_tag_element_t, cast to void*. */ static int pqueue_tag_matches(void* element1, void* element2) { - return lf_tag_compare(((pqueue_tag_element_t*) element1)->tag, ((pqueue_tag_element_t*) element2)->tag) == 0; + return lf_tag_compare(((pqueue_tag_element_t*)element1)->tag, ((pqueue_tag_element_t*)element2)->tag) == 0; } /** @@ -54,9 +52,7 @@ static int pqueue_tag_matches(void* element1, void* element2) { * This function is of type pqueue_get_pos_f. * @param element A pointer to a pqueue_tag_element_t, cast to void*. */ -static size_t pqueue_tag_get_position(void *element) { - return ((pqueue_tag_element_t*)element)->pos; -} +static size_t pqueue_tag_get_position(void* element) { return ((pqueue_tag_element_t*)element)->pos; } /** * @brief Callback function to set the position of an element. @@ -64,106 +60,92 @@ static size_t pqueue_tag_get_position(void *element) { * @param element A pointer to a pqueue_tag_element_t, cast to void*. * @param pos The position. */ -static void pqueue_tag_set_position(void *element, size_t pos) { - ((pqueue_tag_element_t*)element)->pos = pos; -} +static void pqueue_tag_set_position(void* element, size_t pos) { ((pqueue_tag_element_t*)element)->pos = pos; } /** * @brief Callback function to print information about an element. * This is a function of type pqueue_print_entry_f. * @param element A pointer to a pqueue_tag_element_t, cast to void*. */ -static void pqueue_tag_print_element(void *element) { - tag_t tag = ((pqueue_tag_element_t*) element)->tag; - lf_print("Element with tag " PRINTF_TAG ".", tag.time, tag.microstep); +static void pqueue_tag_print_element(void* element) { + tag_t tag = ((pqueue_tag_element_t*)element)->tag; + lf_print("Element with tag " PRINTF_TAG ".", tag.time, tag.microstep); } ////////////////// // Functions defined in pqueue_tag.h. pqueue_tag_t* pqueue_tag_init(size_t initial_size) { - return (pqueue_tag_t*) pqueue_init( - initial_size, - pqueue_tag_compare, - pqueue_tag_get_priority, - pqueue_tag_get_position, - pqueue_tag_set_position, - pqueue_tag_matches, - pqueue_tag_print_element); + return (pqueue_tag_t*)pqueue_init(initial_size, pqueue_tag_compare, pqueue_tag_get_priority, pqueue_tag_get_position, + pqueue_tag_set_position, pqueue_tag_matches, pqueue_tag_print_element); } -void pqueue_tag_free(pqueue_tag_t *q) { - for (int i = 1; i < q->size ;i++) { - if (q->d[i] != NULL && ((pqueue_tag_element_t*)q->d[i])->is_dynamic) { - free(q->d[i]); - } +void pqueue_tag_free(pqueue_tag_t* q) { + for (int i = 1; i < q->size; i++) { + if (q->d[i] != NULL && ((pqueue_tag_element_t*)q->d[i])->is_dynamic) { + free(q->d[i]); } - pqueue_free((pqueue_t*)q); + } + pqueue_free((pqueue_t*)q); } -size_t pqueue_tag_size(pqueue_tag_t *q) { - return pqueue_size((pqueue_t*)q); -} +size_t pqueue_tag_size(pqueue_tag_t* q) { return pqueue_size((pqueue_t*)q); } -int pqueue_tag_insert(pqueue_tag_t* q, pqueue_tag_element_t* d) { - return pqueue_insert((pqueue_t*)q, (void*)d); -} +int pqueue_tag_insert(pqueue_tag_t* q, pqueue_tag_element_t* d) { return pqueue_insert((pqueue_t*)q, (void*)d); } int pqueue_tag_insert_tag(pqueue_tag_t* q, tag_t t) { - pqueue_tag_element_t* d = (pqueue_tag_element_t*) malloc(sizeof(pqueue_tag_element_t)); - d->is_dynamic = 1; - d->tag = t; - return pqueue_tag_insert(q, d); + pqueue_tag_element_t* d = (pqueue_tag_element_t*)malloc(sizeof(pqueue_tag_element_t)); + d->is_dynamic = 1; + d->tag = t; + return pqueue_tag_insert(q, d); } -pqueue_tag_element_t* pqueue_tag_find_with_tag(pqueue_tag_t *q, tag_t t) { - // Create elements on the stack. These elements are only needed during - // the duration of this function call, so putting them on the stack is OK. - pqueue_tag_element_t element = {.tag = t, .pos = 0, .is_dynamic = false}; - pqueue_tag_element_t forever = {.tag = FOREVER_TAG, .pos = 0, .is_dynamic = false}; - return pqueue_find_equal((pqueue_t*)q, (void*)&element, (pqueue_pri_t)&forever); +pqueue_tag_element_t* pqueue_tag_find_with_tag(pqueue_tag_t* q, tag_t t) { + // Create elements on the stack. These elements are only needed during + // the duration of this function call, so putting them on the stack is OK. + pqueue_tag_element_t element = {.tag = t, .pos = 0, .is_dynamic = false}; + pqueue_tag_element_t forever = {.tag = FOREVER_TAG, .pos = 0, .is_dynamic = false}; + return pqueue_find_equal((pqueue_t*)q, (void*)&element, (pqueue_pri_t)&forever); } int pqueue_tag_insert_if_no_match(pqueue_tag_t* q, tag_t t) { - if (pqueue_tag_find_with_tag(q, t) == NULL) { - return pqueue_tag_insert_tag(q, t); - } else { - return 1; - } + if (pqueue_tag_find_with_tag(q, t) == NULL) { + return pqueue_tag_insert_tag(q, t); + } else { + return 1; + } } -pqueue_tag_element_t* pqueue_tag_peek(pqueue_tag_t* q) { - return (pqueue_tag_element_t*) pqueue_peek((pqueue_t*)q); -} +pqueue_tag_element_t* pqueue_tag_peek(pqueue_tag_t* q) { return (pqueue_tag_element_t*)pqueue_peek((pqueue_t*)q); } tag_t pqueue_tag_peek_tag(pqueue_tag_t* q) { - pqueue_tag_element_t* element = (pqueue_tag_element_t*)pqueue_tag_peek(q); - if (element == NULL) return FOREVER_TAG; - else return element->tag; + pqueue_tag_element_t* element = (pqueue_tag_element_t*)pqueue_tag_peek(q); + if (element == NULL) + return FOREVER_TAG; + else + return element->tag; } -pqueue_tag_element_t* pqueue_tag_pop(pqueue_tag_t* q) { - return (pqueue_tag_element_t*)pqueue_pop((pqueue_t*)q); -} +pqueue_tag_element_t* pqueue_tag_pop(pqueue_tag_t* q) { return (pqueue_tag_element_t*)pqueue_pop((pqueue_t*)q); } tag_t pqueue_tag_pop_tag(pqueue_tag_t* q) { - pqueue_tag_element_t* element = (pqueue_tag_element_t*)pqueue_tag_pop(q); - if (element == NULL) return FOREVER_TAG; - else { - tag_t result = element->tag; - if (element->is_dynamic) free(element); - return result; - } + pqueue_tag_element_t* element = (pqueue_tag_element_t*)pqueue_tag_pop(q); + if (element == NULL) + return FOREVER_TAG; + else { + tag_t result = element->tag; + if (element->is_dynamic) + free(element); + return result; + } +} + +void pqueue_tag_remove(pqueue_tag_t* q, pqueue_tag_element_t* e) { pqueue_remove((pqueue_t*)q, (void*)e); } + +void pqueue_tag_remove_up_to(pqueue_tag_t* q, tag_t t) { + tag_t head = pqueue_tag_peek_tag(q); + while (lf_tag_compare(head, FOREVER_TAG) < 0 && lf_tag_compare(head, t) <= 0) { + pqueue_tag_pop(q); + head = pqueue_tag_peek_tag(q); + } } - -void pqueue_tag_remove(pqueue_tag_t* q, pqueue_tag_element_t* e) { - pqueue_remove((pqueue_t*) q, (void*) e); -} - -void pqueue_tag_remove_up_to(pqueue_tag_t* q, tag_t t){ - tag_t head = pqueue_tag_peek_tag(q); - while (lf_tag_compare(head, FOREVER_TAG) < 0 && lf_tag_compare(head, t) <= 0) { - pqueue_tag_pop(q); - head = pqueue_tag_peek_tag(q); - } -} \ No newline at end of file diff --git a/core/utils/util.c b/core/utils/util.c index f03403eaf..554707edb 100644 --- a/core/utils/util.c +++ b/core/utils/util.c @@ -41,10 +41,10 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include #include -#include // Defines memcpy() -#include // Defines va_list -#include // Defines nanosleep() -#include +#include // Defines memcpy() +#include // Defines va_list +#include // Defines nanosleep() +#include #ifndef NUMBER_OF_FEDERATES #define NUMBER_OF_FEDERATES 1 @@ -54,7 +54,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define SOCKET_READ_RETRY_INTERVAL 1000000 /** - * The ID of this federate. For a non-federated execution, this will be -1. + * The ID of this federate. For a non-federated execution, this will be -1. * For a federated execution, it will be assigned in the generated code. */ int _lf_my_fed_id = -1; @@ -68,163 +68,152 @@ print_message_function_t* print_message_function = NULL; /** The level of messages to redirect to print_message_function. */ int print_message_level = -1; -int lf_fed_id() { - return _lf_my_fed_id; -} +int lf_fed_id() { return _lf_my_fed_id; } // Declaration needed to attach attributes to suppress warnings of the form: // "warning: function '_lf_message_print' might be a candidate for 'gnu_printf' // format attribute [-Wsuggest-attribute=format]" -void _lf_message_print( - int is_error, const char* prefix, const char* format, va_list args, int log_level -) ATTRIBUTE_FORMAT_PRINTF(3, 0); +void _lf_message_print(int is_error, const char* prefix, const char* format, va_list args, int log_level) + ATTRIBUTE_FORMAT_PRINTF(3, 0); /** * Print a fatal error message. Internal function. */ static void lf_vprint_fatal_error(const char* format, va_list args) { - _lf_message_print(1, "FATAL ERROR: ", format, args, LOG_LEVEL_ERROR); + _lf_message_print(1, "FATAL ERROR: ", format, args, LOG_LEVEL_ERROR); } /** * Internal implementation of the next few reporting functions. */ -void _lf_message_print( - int is_error, const char* prefix, const char* format, va_list args, int log_level -) { // Disable warnings about format check. - // The logging level may be set either by a LOG_LEVEL #define - // (which is code generated based on the logging target property) - // or by a lf_register_print_function() call. Honor both. If neither - // has been set, then assume LOG_LEVEL_INFO. If both have been set, - // then honor the maximum. - int print_level = -1; +void _lf_message_print(int is_error, const char* prefix, const char* format, va_list args, + int log_level) { // Disable warnings about format check. + // The logging level may be set either by a LOG_LEVEL #define + // (which is code generated based on the logging target property) + // or by a lf_register_print_function() call. Honor both. If neither + // has been set, then assume LOG_LEVEL_INFO. If both have been set, + // then honor the maximum. + int print_level = -1; #ifdef LOG_LEVEL - print_level = LOG_LEVEL; + print_level = LOG_LEVEL; #endif - if (print_level < print_message_level) { - print_level = print_message_level; - } - if (print_level < 0) { - // Neither has been set. - print_level = LOG_LEVEL_INFO; - } - if (log_level <= print_level) { - // Rather than calling printf() multiple times, we need to call it just - // once because this function is invoked by multiple threads. - // If we make multiple calls to printf(), then the results could be - // interleaved between threads. - // vprintf() is a version that takes an arg list rather than multiple args. - char* message; - if (_lf_my_fed_id < 0) { - size_t length = strlen(prefix) + strlen(format) + 32; - message = (char*) malloc(length + 1); - snprintf(message, length, "%s%s\n", - prefix, format); - } else { + if (print_level < print_message_level) { + print_level = print_message_level; + } + if (print_level < 0) { + // Neither has been set. + print_level = LOG_LEVEL_INFO; + } + if (log_level <= print_level) { + // Rather than calling printf() multiple times, we need to call it just + // once because this function is invoked by multiple threads. + // If we make multiple calls to printf(), then the results could be + // interleaved between threads. + // vprintf() is a version that takes an arg list rather than multiple args. + char* message; + if (_lf_my_fed_id < 0) { + size_t length = strlen(prefix) + strlen(format) + 32; + message = (char*)malloc(length + 1); + snprintf(message, length, "%s%s\n", prefix, format); + } else { #if defined STANDALONE_RTI - size_t length = strlen(prefix) + strlen(format) + 37; - message = (char*) malloc(length + 1); - snprintf(message, length, "RTI: %s%s\n", - prefix, format); + size_t length = strlen(prefix) + strlen(format) + 37; + message = (char*)malloc(length + 1); + snprintf(message, length, "RTI: %s%s\n", prefix, format); #else - // Get the federate name from the top-level environment, which by convention is the first. - environment_t *envs; - _lf_get_environments(&envs); - char* name = envs->name; - size_t length = strlen(prefix) + strlen(format) + +strlen(name) + 32; - message = (char*) malloc(length + 1); - // If the name has prefix "federate__", strip that out. - if (strncmp(name, "federate__", 10) == 0) name += 10; - - snprintf(message, length, "Fed %d (%s): %s%s\n", - _lf_my_fed_id, name, prefix, format); + // Get the federate name from the top-level environment, which by convention is the first. + environment_t* envs; + _lf_get_environments(&envs); + char* name = envs->name; + size_t length = strlen(prefix) + strlen(format) + +strlen(name) + 32; + message = (char*)malloc(length + 1); + // If the name has prefix "federate__", strip that out. + if (strncmp(name, "federate__", 10) == 0) + name += 10; + + snprintf(message, length, "Fed %d (%s): %s%s\n", _lf_my_fed_id, name, prefix, format); #endif // STANDALONE_RTI - } - if (print_message_function == NULL) { - // NOTE: Send all messages to stdout, not to stderr, so that ordering makes sense. - vfprintf(stdout, message, args); - } else { - (*print_message_function)(message, args); - } - free(message); - } + } + if (print_message_function == NULL) { + // NOTE: Send all messages to stdout, not to stderr, so that ordering makes sense. + vfprintf(stdout, message, args); + } else { + (*print_message_function)(message, args); + } + free(message); + } } void lf_print(const char* format, ...) { - va_list args; - va_start (args, format); - lf_vprint(format, args); - va_end (args); + va_list args; + va_start(args, format); + lf_vprint(format, args); + va_end(args); } -void lf_vprint(const char* format, va_list args) { - _lf_message_print(0, "", format, args, LOG_LEVEL_INFO); -} +void lf_vprint(const char* format, va_list args) { _lf_message_print(0, "", format, args, LOG_LEVEL_INFO); } void lf_print_log(const char* format, ...) { - va_list args; - va_start (args, format); - lf_vprint_log(format, args); - va_end (args); + va_list args; + va_start(args, format); + lf_vprint_log(format, args); + va_end(args); } -void lf_vprint_log(const char* format, va_list args) { - _lf_message_print(0, "LOG: ", format, args, LOG_LEVEL_LOG); -} +void lf_vprint_log(const char* format, va_list args) { _lf_message_print(0, "LOG: ", format, args, LOG_LEVEL_LOG); } void lf_print_debug(const char* format, ...) { - va_list args; - va_start (args, format); - lf_vprint_debug(format, args); - va_end (args); + va_list args; + va_start(args, format); + lf_vprint_debug(format, args); + va_end(args); } void lf_vprint_debug(const char* format, va_list args) { - _lf_message_print(0, "DEBUG: ", format, args, LOG_LEVEL_DEBUG); + _lf_message_print(0, "DEBUG: ", format, args, LOG_LEVEL_DEBUG); } void lf_print_error(const char* format, ...) { - va_list args; - va_start (args, format); - lf_vprint_error(format, args); - va_end (args); + va_list args; + va_start(args, format); + lf_vprint_error(format, args); + va_end(args); } void lf_vprint_error(const char* format, va_list args) { - _lf_message_print(1, "ERROR: ", format, args, LOG_LEVEL_ERROR); + _lf_message_print(1, "ERROR: ", format, args, LOG_LEVEL_ERROR); } void lf_print_warning(const char* format, ...) { - va_list args; - va_start (args, format); - lf_vprint_warning(format, args); - va_end (args); + va_list args; + va_start(args, format); + lf_vprint_warning(format, args); + va_end(args); } void lf_vprint_warning(const char* format, va_list args) { - _lf_message_print(1, "WARNING: ", format, args, LOG_LEVEL_WARNING); + _lf_message_print(1, "WARNING: ", format, args, LOG_LEVEL_WARNING); } void lf_print_error_and_exit(const char* format, ...) { - va_list args; - va_start (args, format); - lf_vprint_fatal_error(format, args); - va_end (args); - fflush(stdout); - exit(EXIT_FAILURE); + va_list args; + va_start(args, format); + lf_vprint_fatal_error(format, args); + va_end(args); + fflush(stdout); + exit(EXIT_FAILURE); } void lf_print_error_system_failure(const char* format, ...) { - va_list args; - va_start (args, format); - lf_vprint_error(format, args); - va_end (args); - lf_print_error_and_exit("Error %d: %s", errno, strerror(errno)); - exit(EXIT_FAILURE); + va_list args; + va_start(args, format); + lf_vprint_error(format, args); + va_end(args); + lf_print_error_and_exit("Error %d: %s", errno, strerror(errno)); + exit(EXIT_FAILURE); } void lf_register_print_function(print_message_function_t* function, int log_level) { - print_message_function = function; - print_message_level = log_level; + print_message_function = function; + print_message_level = log_level; } - diff --git a/core/utils/vector.c b/core/utils/vector.c index a249174a7..2124f1213 100644 --- a/core/utils/vector.c +++ b/core/utils/vector.c @@ -19,15 +19,13 @@ static void vector_resize(vector_t* v, size_t new_capacity); * @param initial_capacity The desired initial capacity to allocate. */ vector_t vector_new(size_t initial_capacity) { - void** start = (void**) malloc(initial_capacity * sizeof(void*)); - assert(start); - return (vector_t) { - .start = start, - .next = start, - .end = start + initial_capacity, - .votes_required = REQUIRED_VOTES_TO_SHRINK, - .votes = 0 - }; + void** start = (void**)malloc(initial_capacity * sizeof(void*)); + assert(start); + return (vector_t){.start = start, + .next = start, + .end = start + initial_capacity, + .votes_required = REQUIRED_VOTES_TO_SHRINK, + .votes = 0}; } /** @@ -35,8 +33,8 @@ vector_t vector_new(size_t initial_capacity) { * @param v Any vector. */ void vector_free(vector_t* v) { - assert(v); - free(v->start); + assert(v); + free(v->start); } /** @@ -45,11 +43,11 @@ void vector_free(vector_t* v) { * @param element An element that the vector should contain. */ void vector_push(vector_t* v, void* element) { - if (v->next == v->end) { - v->votes_required++; - vector_resize(v, (v->end - v->start) * SCALE_FACTOR); - } - *(v->next++) = element; + if (v->next == v->end) { + v->votes_required++; + vector_resize(v, (v->end - v->start) * SCALE_FACTOR); + } + *(v->next++) = element; } /** @@ -60,15 +58,15 @@ void vector_push(vector_t* v, void* element) { * @param size The size of the given array. */ void vector_pushall(vector_t* v, void** array, size_t size) { - void** required_end = v->next + size; - if (required_end > v->end) { - vector_resize(v, (required_end - v->start) * SCALE_FACTOR); - } - for (size_t i = 0; i < size; i++) { - assert(array[i]); - v->next[i] = array[i]; - } - v->next += size; + void** required_end = v->next + size; + if (required_end > v->end) { + vector_resize(v, (required_end - v->start) * SCALE_FACTOR); + } + for (size_t i = 0; i < size; i++) { + assert(array[i]); + v->next[i] = array[i]; + } + v->next += size; } /** @@ -77,16 +75,16 @@ void vector_pushall(vector_t* v, void** array, size_t size) { * @param v Any vector. */ void* vector_pop(vector_t* v) { - if (v->next == v->start) { - if (v->votes >= v->votes_required) { - size_t new_capacity = (v->end - v->start) / SCALE_FACTOR; - if (new_capacity > 0) { - vector_resize(v, new_capacity); - } - } - return NULL; + if (v->next == v->start) { + if (v->votes >= v->votes_required) { + size_t new_capacity = (v->end - v->start) / SCALE_FACTOR; + if (new_capacity > 0) { + vector_resize(v, new_capacity); + } } - return *(--v->next); + return NULL; + } + return *(--v->next); } /** @@ -96,40 +94,38 @@ void* vector_pop(vector_t* v) { * is automatically expanded and filled with NULL pointers as needed. * If no element at `idx` has been previously set, then the value * pointed to by the returned pointer will be NULL. - * + * * @param v The vector. * @param idx The index into the vector. - * + * * @return A pointer to the element at 'idx', which is itself a pointer. */ void** vector_at(vector_t* v, size_t idx) { - void** vector_position = v->start + idx; - if ((vector_position + 1) > v->next) { - v->next = vector_position + 1; + void** vector_position = v->start + idx; + if ((vector_position + 1) > v->next) { + v->next = vector_position + 1; + } + if (v->next >= v->end) { + v->votes_required++; + size_t new_size = (v->end - v->start) * SCALE_FACTOR; + // Find a size that includes idx + while (new_size <= idx) { + new_size *= SCALE_FACTOR; } - if (v->next >= v->end) { - v->votes_required++; - size_t new_size = (v->end - v->start) * SCALE_FACTOR; - // Find a size that includes idx - while (new_size <= idx) { - new_size *= SCALE_FACTOR; - } - vector_resize(v, new_size); - } - // Note: Can't re-use vector_position because v->start can move after - // resizing. - return v->start + idx; + vector_resize(v, new_size); + } + // Note: Can't re-use vector_position because v->start can move after + // resizing. + return v->start + idx; } /** * @brief Return the size of the vector. - * + * * @param v Any vector * @return size_t The size of the vector. */ -size_t vector_size(vector_t* v) { - return (v->next - v->start); -} +size_t vector_size(vector_t* v) { return (v->next - v->start); } /** * Vote on whether this vector should be given less memory. @@ -140,9 +136,9 @@ size_t vector_size(vector_t* v) { * @param v Any vector. */ void vector_vote(vector_t* v) { - size_t size = v->next - v->start; - int vote = size * CAPACITY_TO_SIZE_RATIO_FOR_SHRINK_VOTE <= (size_t) (v->end - v->start); - v->votes = vote * v->votes + vote; + size_t size = v->next - v->start; + int vote = size * CAPACITY_TO_SIZE_RATIO_FOR_SHRINK_VOTE <= (size_t)(v->end - v->start); + v->votes = vote * v->votes + vote; } /** @@ -151,16 +147,16 @@ void vector_vote(vector_t* v) { * @param v A vector that should have a different capacity. */ static void vector_resize(vector_t* v, size_t new_capacity) { - if (new_capacity == 0) { - // Don't shrink the queue further - return; - } - size_t size = v->next - v->start; - assert(size <= new_capacity); - void** start = (void**) realloc(v->start, new_capacity * sizeof(void*)); - assert(start); - v->votes = 0; - v->start = start; - v->next = start + size; - v->end = start + new_capacity; + if (new_capacity == 0) { + // Don't shrink the queue further + return; + } + size_t size = v->next - v->start; + assert(size <= new_capacity); + void** start = (void**)realloc(v->start, new_capacity * sizeof(void*)); + assert(start); + v->votes = 0; + v->start = start; + v->next = start + size; + v->end = start + new_capacity; } diff --git a/include/api/reaction_macros.h b/include/api/reaction_macros.h index 554d3be8b..37d90258d 100644 --- a/include/api/reaction_macros.h +++ b/include/api/reaction_macros.h @@ -4,23 +4,23 @@ * @copyright (c) 2020-2024, The University of California at Berkeley. * License: BSD 2-clause * @brief Macros providing an API for use in inline reaction bodies. - * + * * This set of macros is defined prior to each reaction body and undefined after the reaction body * using the set_undef.h header file. If you wish to use these macros in external code, such as * that implementing a bodiless reaction, then you can include this header file (and at least * reactor.h, plus possibly a few other header files) in your code. - * + * * The purpose for these macros is to provide a semblance of polymorphism even though C does not support * polymorphism. For example, `lf_set(port, value)` is a macro where the first argument is a specific * port struct and the second type is a value with a type corresponding to the port's type. It is not * possible in C to provide a function that can be called with a port struct and a value of any type. - * + * * Some of the macros are provided for convenience. For example, the macro can automatically provide * common arguments such as the environment and can cast arguments to required base types to suppress * warning. - * - * Note for target language developers. This is one way of developing a target language where - * the C core runtime is adopted. This file is a translation layer that implements Lingua Franca + * + * Note for target language developers. This is one way of developing a target language where + * the C core runtime is adopted. This file is a translation layer that implements Lingua Franca * APIs which interact with the internal APIs. */ @@ -39,10 +39,10 @@ * @brief Mark a port present. * * This sets the is_present field of the specified output to true. - * + * * This macro is a thin wrapper around the lf_set_present() function. * It simply casts the argument to `lf_port_base_t*` to suppress warnings. - * + * * @param out The output port (by name). */ #define lf_set_present(out) lf_set_present((lf_port_base_t*)out) @@ -51,34 +51,34 @@ * @brief Set the specified output (or input of a contained reactor) to the specified value. * * If the value argument is a primitive type such as int, - * double, etc. as well as the built-in types bool and string, + * double, etc. as well as the built-in types bool and string, * the value is copied and therefore the variable carrying the * value can be subsequently modified without changing the output. * This also applies to structs with a type defined by a typedef * so that the type designating string does not end in '*'. - * + * * If the value argument is a pointer * to memory that the calling reaction has dynamically allocated, * the memory will be automatically freed once all downstream * reactions no longer need the value. * If 'lf_set_destructor' is called on 'out', then that destructor - * will be used to free 'value'. + * will be used to free 'value'. * Otherwise, the default void free(void*) function is used. - * + * * @param out The output port (by name) or input of a contained * reactor in form input_name.port_name. * @param value The value to insert into the self struct. */ -#define lf_set(out, val) \ -do { \ - out->value = val; \ - lf_set_present(out); \ - if (((token_template_t*)out)->token != NULL) { \ - /* The cast "*((void**) &out->value)" is a hack to make the code */ \ - /* compile with non-token types where value is not a pointer. */ \ - lf_token_t* token = _lf_initialize_token_with_value((token_template_t*)out, *((void**) &out->value), 1); \ - } \ -} while(0) +#define lf_set(out, val) \ + do { \ + out->value = val; \ + lf_set_present(out); \ + if (((token_template_t*)out)->token != NULL) { \ + /* The cast "*((void**) &out->value)" is a hack to make the code */ \ + /* compile with non-token types where value is not a pointer. */ \ + lf_token_t* token = _lf_initialize_token_with_value((token_template_t*)out, *((void**)&out->value), 1); \ + } \ + } while (0) /** * @brief Set the specified output (or input of a contained reactor) @@ -93,72 +93,72 @@ do { \ * @param length The length of the array to send. */ #ifndef __cplusplus -#define lf_set_array(out, val, len) \ -do { \ - lf_set_present(out); \ - lf_token_t* token = _lf_initialize_token_with_value((token_template_t*)out, val, len); \ - out->value = token->value; \ - out->length = len; \ -} while(0) +#define lf_set_array(out, val, len) \ + do { \ + lf_set_present(out); \ + lf_token_t* token = _lf_initialize_token_with_value((token_template_t*)out, val, len); \ + out->value = token->value; \ + out->length = len; \ + } while (0) #else -#define lf_set_array(out, val, len) \ -do { \ - lf_set_present(out); \ - lf_token_t* token = _lf_initialize_token_with_value((token_template_t*)out, val, len); \ - out->value = static_castvalue)>(token->value); \ - out->length = len; \ -} while(0) +#define lf_set_array(out, val, len) \ + do { \ + lf_set_present(out); \ + lf_token_t* token = _lf_initialize_token_with_value((token_template_t*)out, val, len); \ + out->value = static_castvalue)>(token->value); \ + out->length = len; \ + } while (0) #endif /** * @brief Set the specified output (or input of a contained reactor) * to the specified token value. - * + * * Tokens in the C runtime wrap messages that are in dynamically allocated memory and * perform reference counting to ensure that memory is not freed prematurely. - * + * * @param out The output port (by name). * @param token A pointer to token obtained from an input, an action, or from `lf_new_token()`. */ #ifndef __cplusplus -#define lf_set_token(out, newtoken) \ -do { \ - lf_set_present(out); \ - _lf_replace_template_token((token_template_t*)out, newtoken); \ - out->value = newtoken->value; \ - out->length = newtoken->length; \ -} while(0) +#define lf_set_token(out, newtoken) \ + do { \ + lf_set_present(out); \ + _lf_replace_template_token((token_template_t*)out, newtoken); \ + out->value = newtoken->value; \ + out->length = newtoken->length; \ + } while (0) #else -#define lf_set_token(out, newtoken) \ -do { \ - lf_set_present(out); \ - _lf_replace_template_token((token_template_t*)out, newtoken); \ - out->value = static_castvalue)>(newtoken->value); \ - out->length = newtoken->length; \ -} while(0) +#define lf_set_token(out, newtoken) \ + do { \ + lf_set_present(out); \ + _lf_replace_template_token((token_template_t*)out, newtoken); \ + out->value = static_castvalue)>(newtoken->value); \ + out->length = newtoken->length; \ + } while (0) #endif /** * @brief Set the destructor associated with the specified port. - * + * * The destructor will be used to free any value sent through the specified port when all * downstream users of the value are finished with it. - * + * * @param out The output port (by name) or input of a contained reactor in form reactor.port_name. * @param dtor A pointer to a void function that takes a pointer argument - * (or NULL to use the default void free(void*) function. + * (or NULL to use the default void free(void*) function. */ #define lf_set_destructor(out, dtor) ((token_type_t*)out)->destructor = dtor /** * @brief Set the copy constructor associated with the specified port. - * + * * The copy constructor will be used to copy any value sent through the specified port whenever * a downstream user of the value declares a mutable input port or calls `lf_writable_copy()`. - * + * * @param out The output port (by name) or input of a contained reactor in form reactor.port_name. * @param dtor A pointer to a void function that takes a pointer argument - * (or NULL to use the default void `memcpy()` function. + * (or NULL to use the default void `memcpy()` function. */ #define lf_set_copy_constructor(out, cpy_ctor) ((token_type_t*)out)->copy_constructor = cpy_ctor diff --git a/include/api/reaction_macros_undef.h b/include/api/reaction_macros_undef.h index 833d91edc..601ea34d3 100644 --- a/include/api/reaction_macros_undef.h +++ b/include/api/reaction_macros_undef.h @@ -4,7 +4,7 @@ * @copyright (c) 2020-2024, The University of California at Berkeley. * License: BSD 2-clause * @brief Undefine macros defined in api/reaction_macros.h. - * + * * This file is included at the end of each reaction body to undefine the macros used in reaction bodies. */ @@ -23,7 +23,7 @@ #endif #undef lf_tag -#undef lf_time_logical -#undef lf_time_logical_elapsed +#undef lf_time_logical +#undef lf_time_logical_elapsed #endif // REACTION_MACROS_H diff --git a/include/api/schedule.h b/include/api/schedule.h index d8d0c7ec5..8021814fd 100644 --- a/include/api/schedule.h +++ b/include/api/schedule.h @@ -7,7 +7,7 @@ * License: BSD 2-clause * * @brief API functions for scheduling actions. - * + * * Most of these functions take a `void*` pointer to an action, which will be internally cast to * a `lf_action_base_t*` pointer. The cast could be done by macros in reaction_macros.h, but unlike * the macros defined there, it is common for `lf_schedule` functions to be invoked outside of reaction @@ -24,7 +24,7 @@ /** * @brief Schedule an action to occur with the specified time offset with no payload (no value conveyed). - * + * * The later tag will depend on whether the action is logical or physical. If it is logical, * the time of the event will be the current logical time of the environment associated with * the action plus the minimum delay of the action plus the extra delay. If that time is equal @@ -42,14 +42,14 @@ trigger_handle_t lf_schedule(void* action, interval_t offset); /** * @brief Schedule the specified action with an integer value at a later logical time. - * + * * The later tag will depend on whether the action is logical or physical. If it is logical, * the time of the event will be the current logical time of the environment associated with * the action plus the minimum delay of the action plus the extra delay. If that time is equal * to the current time, then the tag will be one microstep beyond the current tag. * If the action is physical, the time will be the current physical time plus the extra delay, * and the microstep will be zero. - * + * * This wraps a copy of the integer value in a token. See lf_schedule_token() for more details. * * @param action The action to be triggered (a pointer to an `lf_action_base_t`). @@ -61,16 +61,16 @@ trigger_handle_t lf_schedule_int(void* action, interval_t extra_delay, int value /** * @brief Schedule the specified action at a later tag with the specified token as a payload. - * + * * The later tag will depend on whether the action is logical or physical. If it is logical, * the time of the event will be the current logical time of the environment associated with * the action plus the minimum delay of the action plus the extra delay. If that time is equal * to the current time, then the tag will be one microstep beyond the current tag. * If the action is physical, the time will be the current physical time plus the extra delay, * and the microstep will be zero. - * + * * For a logical action: - * + * * A logical action has a minimum delay (default is zero) and a minimum spacing, which also * defaults to zero. The logical time at which this scheduled event will trigger is the current time * of the environment associated with the action plus the offset plus the delay argument given to @@ -116,7 +116,7 @@ trigger_handle_t lf_schedule_token(void* action, interval_t extra_delay, lf_toke /** * @brief Schedule an action to occur with the specified value and time offset with a * copy of the specified value. - * + * * If the value is non-null, then it will be copied * into newly allocated memory under the assumption that its size is given in * the trigger's token object's element_size field multiplied by the specified @@ -138,12 +138,11 @@ trigger_handle_t lf_schedule_token(void* action, interval_t extra_delay, lf_toke * @return A handle to the event, or 0 if no event was scheduled, or -1 for * error. */ -trigger_handle_t lf_schedule_copy( - void* action, interval_t offset, void* value, size_t length); +trigger_handle_t lf_schedule_copy(void* action, interval_t offset, void* value, size_t length); /** * @brief Variant of lf_schedule_token that creates a token to carry the specified value. - * + * * The value is required to be malloc'd memory with a size equal to the * element_size of the specified action times the length parameter. * @@ -162,10 +161,10 @@ trigger_handle_t lf_schedule_value(void* action, interval_t extra_delay, void* v /** * @brief Schedule the specified trigger to execute in the specified environment with given delay and token. - * + * * This is the most flexible version of the schedule functions and is used in the implementation * of many of the others. End users would rarely use it. - * + * * This will schedule the specified trigger at env->current_tag.time plus the offset of the * specified trigger plus the delay. The value is required to be either * NULL or a pointer to a token wrapping the payload. The token carries @@ -199,11 +198,11 @@ trigger_handle_t lf_schedule_trigger(environment_t* env, trigger_t* trigger, int /** * @brief Check the deadline of the currently executing reaction against the * current physical time. - * + * * If the deadline has passed, invoke the deadline * handler (if invoke_deadline_handler parameter is set true) and return true. * Otherwise, return false. - * + * * This function is intended to be used within a reaction that has been invoked without a deadline * violation, but that wishes to check whether the deadline gets violated _during_ the execution of * the reaction. This can be used, for example, to implement a timeout mechanism that bounds the diff --git a/include/core/clock.h b/include/core/clock.h index 947df048a..114b7792e 100644 --- a/include/core/clock.h +++ b/include/core/clock.h @@ -13,7 +13,7 @@ #ifndef CLOCK_H #define CLOCK_H -#include "platform.h" +#include "low_level_platform.h" /** * Block the calling thread until wakeup_time is reached or the thread is @@ -24,8 +24,7 @@ * @return 0 on success or -1 if interrupted. */ -int lf_clock_interruptable_sleep_until_locked(environment_t *env, instant_t wakeup_time); - +int lf_clock_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup_time); /** * Retrieve the current physical time from the platform API. This adds any clock synchronization offset @@ -34,7 +33,7 @@ int lf_clock_interruptable_sleep_until_locked(environment_t *env, instant_t wake * @param now A pointer to the location in which to store the result. * @return 0 on success, -1 on failure to read the platform clock. */ -int lf_clock_gettime(instant_t *now); +int lf_clock_gettime(instant_t* now); #if !defined(LF_SINGLE_THREADED) /** @@ -46,7 +45,7 @@ int lf_clock_gettime(instant_t *now); * @return 0 on success, LF_TIMEOUT on timeout, platform-specific error * otherwise. */ -int lf_clock_cond_timedwait(lf_cond_t *cond, instant_t wakeup_time); +int lf_clock_cond_timedwait(lf_cond_t* cond, instant_t wakeup_time); #endif #endif diff --git a/include/core/environment.h b/include/core/environment.h index 68437fc5f..98753c6fb 100644 --- a/include/core/environment.h +++ b/include/core/environment.h @@ -27,18 +27,18 @@ * * @section DESCRIPTION API for creating and destroying environments. An environment is the * "context" within which the reactors are executed. The environment contains data structures - * which are shared among the reactors such as priority queues, the current logical tag, + * which are shared among the reactors such as priority queues, the current logical tag, * the worker scheduler, and a lot of meta data. Each reactor stores a pointer to its * environment on its self-struct. If a LF program has multiple scheduling enclaves, * then each enclave will have its own environment. - * + * */ #ifndef ENVIRONMENT_H #define ENVIRONMENT_H #include "lf_types.h" -#include "platform.h" -#include "trace.h" +#include "low_level_platform.h" +#include "tracepoint.h" // Forward declarations so that a pointers can appear in the environment struct. typedef struct lf_scheduler_t lf_scheduler_t; @@ -67,82 +67,70 @@ typedef struct watchdog_t watchdog_t; * scheduling enclaves, then there will be one for each enclave. */ typedef struct environment_t { - bool initialized; - bool execution_started; // Events at the start tag have been pulled from the event queue. - char *name; - int id; - tag_t current_tag; - tag_t stop_tag; - pqueue_t *event_q; - pqueue_t *recycle_q; - pqueue_t *next_q; - bool** is_present_fields; - int is_present_fields_size; - bool** is_present_fields_abbreviated; - int is_present_fields_abbreviated_size; - vector_t sparse_io_record_sizes; - trigger_handle_t _lf_handle; - trigger_t** timer_triggers; - int timer_triggers_size; - reaction_t** startup_reactions; - int startup_reactions_size; - reaction_t** shutdown_reactions; - int shutdown_reactions_size; - reaction_t** reset_reactions; - int reset_reactions_size; - mode_environment_t* modes; - int watchdogs_size; - watchdog_t **watchdogs; - trace_t* trace; - int worker_thread_count; + bool initialized; + bool execution_started; // Events at the start tag have been pulled from the event queue. + char* name; + int id; + tag_t current_tag; + tag_t stop_tag; + pqueue_t* event_q; + pqueue_t* recycle_q; + pqueue_t* next_q; + bool** is_present_fields; + int is_present_fields_size; + bool** is_present_fields_abbreviated; + int is_present_fields_abbreviated_size; + vector_t sparse_io_record_sizes; + trigger_handle_t _lf_handle; + trigger_t** timer_triggers; + int timer_triggers_size; + reaction_t** startup_reactions; + int startup_reactions_size; + reaction_t** shutdown_reactions; + int shutdown_reactions_size; + reaction_t** reset_reactions; + int reset_reactions_size; + mode_environment_t* modes; + int watchdogs_size; + watchdog_t** watchdogs; + int worker_thread_count; #if defined(LF_SINGLE_THREADED) - pqueue_t *reaction_q; + pqueue_t* reaction_q; #else - int num_workers; - lf_thread_t* thread_ids; - lf_mutex_t mutex; - lf_cond_t event_q_changed; - lf_scheduler_t* scheduler; - _lf_tag_advancement_barrier barrier; - lf_cond_t global_tag_barrier_requestors_reached_zero; + int num_workers; + lf_thread_t* thread_ids; + lf_mutex_t mutex; + lf_cond_t event_q_changed; + lf_scheduler_t* scheduler; + _lf_tag_advancement_barrier barrier; + lf_cond_t global_tag_barrier_requestors_reached_zero; #endif // LF_SINGLE_THREADED #if defined(FEDERATED) - tag_t** _lf_intended_tag_fields; - int _lf_intended_tag_fields_size; -#endif // FEDERATED + tag_t** _lf_intended_tag_fields; + int _lf_intended_tag_fields_size; +#endif // FEDERATED #ifdef LF_ENCLAVES // TODO: Consider dropping #ifdef - enclave_info_t *enclave_info; + enclave_info_t* enclave_info; #endif } environment_t; #if defined(MODAL_REACTORS) struct mode_environment_t { - uint8_t triggered_reactions_request; - reactor_mode_state_t** modal_reactor_states; - int modal_reactor_states_size; - mode_state_variable_reset_data_t* state_resets; - int state_resets_size; + uint8_t triggered_reactions_request; + reactor_mode_state_t** modal_reactor_states; + int modal_reactor_states_size; + mode_state_variable_reset_data_t* state_resets; + int state_resets_size; }; #endif /** * @brief Initialize an environment struct with parameters given in the arguments. */ -int environment_init( - environment_t* env, - const char * name, - int id, - int num_workers, - int num_timers, - int num_startup_reactions, - int num_shutdown_reactions, - int num_reset_reactions, - int num_is_present_fields, - int num_modes, - int num_state_resets, - int num_watchdogs, - const char * trace_file_name -); +int environment_init(environment_t* env, const char* name, int id, int num_workers, int num_timers, + int num_startup_reactions, int num_shutdown_reactions, int num_reset_reactions, + int num_is_present_fields, int num_modes, int num_state_resets, int num_watchdogs, + const char* trace_file_name); /** * @brief Free the dynamically allocated memory on the environment struct. @@ -153,9 +141,7 @@ void environment_free(environment_t* env); /** * @brief Initialize the start and stop tags on the environment struct. */ -void environment_init_tags( - environment_t *env, instant_t start_time, interval_t duration -); +void environment_init_tags(environment_t* env, instant_t start_time, interval_t duration); /** * @brief Will update the argument to point to the beginning of the array of environments in this program @@ -163,6 +149,6 @@ void environment_init_tags( * @param envs A double pointer which will be dereferenced and modified * @return int The number of environments in the array */ -int _lf_get_environments(environment_t **envs); +int _lf_get_environments(environment_t** envs); #endif diff --git a/include/core/federated/clock-sync.h b/include/core/federated/clock-sync.h index 8fd1886f5..72263e6bc 100644 --- a/include/core/federated/clock-sync.h +++ b/include/core/federated/clock-sync.h @@ -33,11 +33,11 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef CLOCK_SYNC_H #define CLOCK_SYNC_H -#include "platform.h" +#include "low_level_platform.h" /** * Number of required clock sync T4 messages per synchronization - * interval. The offset to the clock will not be adjusted until + * interval. The offset to the clock will not be adjusted until * this number of T4 clock synchronization messages have been received. */ #ifndef _LF_CLOCK_SYNC_EXCHANGES_PER_INTERVAL @@ -67,35 +67,34 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * estimated as: (T4 - T1) - (T3 - T2). */ typedef struct socket_stat_t { - instant_t remote_physical_clock_snapshot_T1; // T1 in PTP. The first snapshot of the physical - // clock of the remote device (the RTI). - instant_t local_physical_clock_snapshot_T2; // T2 in PTP. The first snapshot of the physical - // clock of the local device (the federate). - interval_t local_delay; // T3 - T2. Estimated delay between a consecutive - // receive and send on the socket for one byte. - int received_T4_messages_in_current_sync_window; // Checked against _LF_CLOCK_SYNC_EXCHANGES_PER_INTERVAL - // Must be reset to 0 every time it reaches the threshold. - interval_t history; // A history of clock synchronization data. For AVG - // strategy, this is a running partially computed average. - - /***** The following stats can be used to calculate an automated STP offset **************/ - /** FIXME: TODO: A federate should create a socket_stat_t for every federate it is connected to and keep record - of the following stats **/ - /*** Network stats ****/ - interval_t network_stat_round_trip_delay_max; // Maximum estimated delay between the local socket and the - // remote socket. - int network_stat_sample_index; // Current index of network_stat_samples - /*** Clock sync stats ***/ - interval_t clock_synchronization_error_bound; // A bound on the differences between this federate's clock and - // the remote clock. - // Note: The following array should come last because g++ will not allow - // designated initialization (e.g., .network_stat_sample_index = 0) out of - // order and we do not want to (and cannot) initialize this array statically - interval_t network_stat_samples[_LF_CLOCK_SYNC_EXCHANGES_PER_INTERVAL]; // Samples gathered during a clock sync - // period + instant_t remote_physical_clock_snapshot_T1; // T1 in PTP. The first snapshot of the physical + // clock of the remote device (the RTI). + instant_t local_physical_clock_snapshot_T2; // T2 in PTP. The first snapshot of the physical + // clock of the local device (the federate). + interval_t local_delay; // T3 - T2. Estimated delay between a consecutive + // receive and send on the socket for one byte. + int received_T4_messages_in_current_sync_window; // Checked against _LF_CLOCK_SYNC_EXCHANGES_PER_INTERVAL + // Must be reset to 0 every time it reaches the threshold. + interval_t history; // A history of clock synchronization data. For AVG + // strategy, this is a running partially computed average. + + /***** The following stats can be used to calculate an automated STP offset **************/ + /** FIXME: TODO: A federate should create a socket_stat_t for every federate it is connected to and keep record + of the following stats **/ + /*** Network stats ****/ + interval_t network_stat_round_trip_delay_max; // Maximum estimated delay between the local socket and the + // remote socket. + int network_stat_sample_index; // Current index of network_stat_samples + /*** Clock sync stats ***/ + interval_t clock_synchronization_error_bound; // A bound on the differences between this federate's clock and + // the remote clock. + // Note: The following array should come last because g++ will not allow + // designated initialization (e.g., .network_stat_sample_index = 0) out of + // order and we do not want to (and cannot) initialize this array statically + interval_t network_stat_samples[_LF_CLOCK_SYNC_EXCHANGES_PER_INTERVAL]; // Samples gathered during a clock sync + // period } socket_stat_t; - #ifdef _LF_CLOCK_SYNC_COLLECT_STATS /** * To hold statistics @@ -103,20 +102,21 @@ typedef struct socket_stat_t { struct lf_stat_ll; /** - * Update statistics on the socket based on the newly calculated network delay + * Update statistics on the socket based on the newly calculated network delay * and clock synchronization error - * + * * @param socket_stat The socket_stat_t struct that keeps track of stats for a given connection * @param network_round_trip_delay The newly calculated round trip delay to the remote federate/RTI * @param clock_synchronization_error The newly calculated clock synchronization error relative to * the remote federate/RTI */ -void update_socket_stat(struct socket_stat_t* socket_stat, long long network_delay, long long clock_synchronization_error); +void update_socket_stat(struct socket_stat_t* socket_stat, long long network_delay, + long long clock_synchronization_error); /** * Calculate statistics of the socket. * The releavent information is returned as a lf_stat_ll struct. - * + * * @param socket_stat The socket_stat_t struct that keeps track of stats for a given connection * @return An lf_stat_ll struct with relevant information. */ @@ -131,7 +131,7 @@ void reset_socket_stat(struct socket_stat_t* socket_stat); /** * Setup necessary functionalities to synchronize clock with the RTI. - * + * * @return port number to be sent to the RTI */ uint16_t setup_clock_synchronization_with_rti(void); @@ -148,7 +148,7 @@ uint16_t setup_clock_synchronization_with_rti(void); * physical clock with the RTI. * Failing to complete this protocol is treated as a catastrophic * error that causes the federate to exit. - * + * * @param rti_socket_TCP Pointer to the RTI's socket */ void synchronize_initial_physical_clock_with_rti(int* rti_socket_TCP); @@ -184,7 +184,7 @@ int handle_T1_clock_sync_message(unsigned char* buffer, int socket, instant_t t2 */ void handle_T4_clock_sync_message(unsigned char* buffer, int socket, instant_t r4); -/** +/** * Thread that listens for UDP inputs from the RTI. */ void* listen_to_rti_UDP_thread(void* args); @@ -202,13 +202,13 @@ int create_clock_sync_thread(lf_thread_t* thread_id); * @brief Add the current clock synchronization offset to a specified timestamp. * @param t Pointer to the timestamp to which to add the offset. */ -void clock_sync_apply_offset(instant_t *t); +void clock_sync_apply_offset(instant_t* t); /** * @brief Subtract the clock synchronization offset from a timestamp. * @param t The timestamp from which to subtract the current clock sync offset. */ -void clock_sync_remove_offset(instant_t *t); +void clock_sync_remove_offset(instant_t* t); /** * Set a fixed offset to the physical clock. @@ -218,5 +218,4 @@ void clock_sync_remove_offset(instant_t *t); */ void clock_sync_set_constant_bias(interval_t offset); - #endif // CLOCK_SYNC_H diff --git a/include/core/federated/federate.h b/include/core/federated/federate.h index f9eec6112..e8dc4bf0d 100644 --- a/include/core/federated/federate.h +++ b/include/core/federated/federate.h @@ -17,7 +17,7 @@ #include "tag.h" #include "lf_types.h" #include "environment.h" -#include "platform.h" +#include "low_level_platform.h" #ifndef ADVANCE_MESSAGE_INTERVAL #define ADVANCE_MESSAGE_INTERVAL MSEC(10) @@ -30,198 +30,187 @@ * Structure that a federate instance uses to keep track of its own state. */ typedef struct federate_instance_t { - /** - * The TCP socket descriptor for this federate to communicate with the RTI. - * This is set by lf_connect_to_rti(), which must be called before other - * functions that communicate with the rti are called. - */ - int socket_TCP_RTI; - - /** - * Thread listening for incoming TCP messages from the RTI. - */ - lf_thread_t RTI_socket_listener; - - /** - * Number of inbound physical connections to the federate. - * This can be either physical connections, or logical connections - * in the decentralized coordination, or both. - */ - size_t number_of_inbound_p2p_connections; - - /** - * Array of thread IDs for threads that listen for incoming messages. - * This is NULL if there are none and otherwise has size given by - * number_of_inbound_p2p_connections. - */ - lf_thread_t *inbound_socket_listeners; - - /** - * Number of outbound peer-to-peer connections from the federate. - * This can be either physical connections, or logical connections - * in the decentralized coordination, or both. - */ - size_t number_of_outbound_p2p_connections; - - /** - * An array that holds the socket descriptors for inbound - * connections from each federate. The index will be the federate - * ID of the remote sending federate. This is initialized at startup - * to -1 and is set to a socket ID by lf_handle_p2p_connections_from_federates() - * when the socket is opened. - * - * @note There will not be an inbound socket unless a physical connection - * or a p2p logical connection (by setting the coordination target property - * to "distributed") is specified in the Lingua Franca program where this - * federate is the destination. Multiple incoming p2p connections from the - * same remote federate will use the same socket. - */ - int sockets_for_inbound_p2p_connections[NUMBER_OF_FEDERATES]; - - /** - * An array that holds the socket descriptors for outbound direct - * connections to each remote federate. The index will be the federate - * ID of the remote receiving federate. This is initialized at startup - * to -1 and is set to a socket ID by lf_connect_to_federate() - * when the socket is opened. - * - * @note This federate will not open an outbound socket unless a physical - * connection or a p2p logical connection (by setting the coordination target - * property to "distributed") is specified in the Lingua Franca - * program where this federate acts as the source. Multiple outgoing p2p - * connections to the same remote federate will use the same socket. - */ - int sockets_for_outbound_p2p_connections[NUMBER_OF_FEDERATES]; - - /** - * Thread ID for a thread that accepts sockets and then supervises - * listening to those sockets for incoming P2P (physical) connections. - */ - lf_thread_t inbound_p2p_handling_thread_id; - - /** - * A socket descriptor for the socket server of the federate. - * This is assigned in lf_create_server(). - * This socket is used to listen to incoming physical connections from - * remote federates. Once an incoming connection is accepted, the - * opened socket will be stored in - * federate_sockets_for_inbound_p2p_connections. - */ - int server_socket; - - /** - * The port used for the server socket to listen for messages from other federates. - * The federate informs the RTI of this port once it has created its socket server by - * sending an ADDRESS_AD message (@see rti.h). - */ - int server_port; - - /** - * Most recent tag advance grant (TAG) received from the RTI, or NEVER if none - * has been received. This variable should only be accessed while holding the - * mutex lock on the top-level environment. - */ - tag_t last_TAG; - - /** - * Indicates whether the last TAG received is provisional or an ordinary TAG. - * If the last TAG has been provisional, network port absent reactions must be inserted. - * This variable should only be accessed while holding the mutex lock. - */ - bool is_last_TAG_provisional; - - /** - * Indicator of whether this federate has upstream federates. - * The default value of false may be overridden in _lf_initialize_trigger_objects. - */ - bool has_upstream; - - /** - * Indicator of whether this federate has downstream federates. - * The default value of false may be overridden in _lf_initialize_trigger_objects. - */ - bool has_downstream; - - /** - * - */ - tag_t last_skipped_LTC; - - /** - * - */ - tag_t last_DNET; - - /** - * Used to prevent the federate from sending a REQUEST_STOP - * message if it has already received a stop request from the RTI. - * This variable should only be accessed while holding a mutex lock. - */ - bool received_stop_request_from_rti; - - /** - * A record of the most recently sent LTC (latest tag complete) message. - * In some situations, federates can send logical_tag_complete for - * the same tag twice or more in-a-row to the RTI. For example, when - * _lf_next() returns without advancing tag. To prevent overwhelming - * the RTI with extra messages, record the last sent logical tag - * complete message and check against it in lf_latest_tag_complete(). - * - * @note Here, the underlying assumption is that the TCP stack will - * deliver the Logical TAG Complete message to the RTI eventually - * if it is deliverable - */ - tag_t last_sent_LTC; - - /** - * A record of the most recently sent NET (next event tag) message. - */ - tag_t last_sent_NET; - - /** - * For use in federates with centralized coordination, the minimum - * time delay between a physical action within this federate and an - * output from this federate. This is NEVER if there is causal - * path from a physical action to any output. - */ - instant_t min_delay_from_physical_action_to_federate_output; - - /** - * Trace object for this federate, used if tracing is enabled. - */ - trace_t* trace; - - #ifdef FEDERATED_DECENTRALIZED - /** - * Thread responsible for setting ports to absent by an STAA offset if they - * aren't already known. - */ - lf_thread_t staaSetter; - #endif + /** + * The TCP socket descriptor for this federate to communicate with the RTI. + * This is set by lf_connect_to_rti(), which must be called before other + * functions that communicate with the rti are called. + */ + int socket_TCP_RTI; + + /** + * Thread listening for incoming TCP messages from the RTI. + */ + lf_thread_t RTI_socket_listener; + + /** + * Number of inbound physical connections to the federate. + * This can be either physical connections, or logical connections + * in the decentralized coordination, or both. + */ + size_t number_of_inbound_p2p_connections; + + /** + * Array of thread IDs for threads that listen for incoming messages. + * This is NULL if there are none and otherwise has size given by + * number_of_inbound_p2p_connections. + */ + lf_thread_t* inbound_socket_listeners; + + /** + * Number of outbound peer-to-peer connections from the federate. + * This can be either physical connections, or logical connections + * in the decentralized coordination, or both. + */ + size_t number_of_outbound_p2p_connections; + + /** + * An array that holds the socket descriptors for inbound + * connections from each federate. The index will be the federate + * ID of the remote sending federate. This is initialized at startup + * to -1 and is set to a socket ID by lf_handle_p2p_connections_from_federates() + * when the socket is opened. + * + * @note There will not be an inbound socket unless a physical connection + * or a p2p logical connection (by setting the coordination target property + * to "distributed") is specified in the Lingua Franca program where this + * federate is the destination. Multiple incoming p2p connections from the + * same remote federate will use the same socket. + */ + int sockets_for_inbound_p2p_connections[NUMBER_OF_FEDERATES]; + + /** + * An array that holds the socket descriptors for outbound direct + * connections to each remote federate. The index will be the federate + * ID of the remote receiving federate. This is initialized at startup + * to -1 and is set to a socket ID by lf_connect_to_federate() + * when the socket is opened. + * + * @note This federate will not open an outbound socket unless a physical + * connection or a p2p logical connection (by setting the coordination target + * property to "distributed") is specified in the Lingua Franca + * program where this federate acts as the source. Multiple outgoing p2p + * connections to the same remote federate will use the same socket. + */ + int sockets_for_outbound_p2p_connections[NUMBER_OF_FEDERATES]; + + /** + * Thread ID for a thread that accepts sockets and then supervises + * listening to those sockets for incoming P2P (physical) connections. + */ + lf_thread_t inbound_p2p_handling_thread_id; + + /** + * A socket descriptor for the socket server of the federate. + * This is assigned in lf_create_server(). + * This socket is used to listen to incoming physical connections from + * remote federates. Once an incoming connection is accepted, the + * opened socket will be stored in + * federate_sockets_for_inbound_p2p_connections. + */ + int server_socket; + + /** + * The port used for the server socket to listen for messages from other federates. + * The federate informs the RTI of this port once it has created its socket server by + * sending an ADDRESS_AD message (@see rti.h). + */ + int server_port; + + /** + * Most recent tag advance grant (TAG) received from the RTI, or NEVER if none + * has been received. This variable should only be accessed while holding the + * mutex lock on the top-level environment. + */ + tag_t last_TAG; + + /** + * Indicates whether the last TAG received is provisional or an ordinary TAG. + * If the last TAG has been provisional, network port absent reactions must be inserted. + * This variable should only be accessed while holding the mutex lock. + */ + bool is_last_TAG_provisional; + + /** + * Indicator of whether this federate has upstream federates. + * The default value of false may be overridden in _lf_initialize_trigger_objects. + */ + bool has_upstream; + + /** + * Indicator of whether this federate has downstream federates. + * The default value of false may be overridden in _lf_initialize_trigger_objects. + */ + bool has_downstream; + + /** + * + */ + tag_t last_skipped_LTC; + + /** + * + */ + tag_t last_DNET; + + /** + * Used to prevent the federate from sending a REQUEST_STOP + * message if it has already received a stop request from the RTI. + * This variable should only be accessed while holding a mutex lock. + */ + bool received_stop_request_from_rti; + + /** + * A record of the most recently sent LTC (latest tag complete) message. + * In some situations, federates can send logical_tag_complete for + * the same tag twice or more in-a-row to the RTI. For example, when + * _lf_next() returns without advancing tag. To prevent overwhelming + * the RTI with extra messages, record the last sent logical tag + * complete message and check against it in lf_latest_tag_complete(). + * + * @note Here, the underlying assumption is that the TCP stack will + * deliver the Logical TAG Complete message to the RTI eventually + * if it is deliverable + */ + tag_t last_sent_LTC; + + /** + * A record of the most recently sent NET (next event tag) message. + */ + tag_t last_sent_NET; + + /** + * For use in federates with centralized coordination, the minimum + * time delay between a physical action within this federate and an + * output from this federate. This is NEVER if there is causal + * path from a physical action to any output. + */ + instant_t min_delay_from_physical_action_to_federate_output; + +#ifdef FEDERATED_DECENTRALIZED + /** + * Thread responsible for setting ports to absent by an STAA offset if they + * aren't already known. + */ + lf_thread_t staaSetter; +#endif } federate_instance_t; #ifdef FEDERATED_DECENTRALIZED typedef struct staa_t { - lf_action_base_t** actions; - size_t STAA; - size_t num_actions; + lf_action_base_t** actions; + size_t STAA; + size_t num_actions; } staa_t; #endif typedef struct federation_metadata_t { - const char* federation_id; - char* rti_host; - int rti_port; - char* rti_user; + const char* federation_id; + char* rti_host; + int rti_port; + char* rti_user; } federation_metadata_t; -typedef enum parse_rti_code_t { - SUCCESS, - INVALID_PORT, - INVALID_HOST, - INVALID_USER, - FAILED_TO_PARSE -} parse_rti_code_t; +typedef enum parse_rti_code_t { SUCCESS, INVALID_PORT, INVALID_HOST, INVALID_USER, FAILED_TO_PARSE } parse_rti_code_t; ////////////////////////////////////////////////////////////////////////////////// // Global variables @@ -237,8 +226,8 @@ extern lf_mutex_t lf_outbound_socket_mutex; extern lf_cond_t lf_port_status_changed; /** - * Condition variable for blocking on tag advance in -*/ + * Condition variable for blocking on tag advance in + */ extern lf_cond_t lf_current_tag_changed; ////////////////////////////////////////////////////////////////////////////////// @@ -246,7 +235,7 @@ extern lf_cond_t lf_current_tag_changed; /** * @brief Connect to the federate with the specified id. - * + * * The established connection will then be used in functions such as lf_send_tagged_message() * to send messages directly to the specified federate. * This function first sends an MSG_TYPE_ADDRESS_QUERY message to the RTI to obtain @@ -261,7 +250,7 @@ void lf_connect_to_federate(uint16_t); /** * @brief Connect to the RTI at the specified host and port. - * + * * This will return the socket descriptor for the connection. * If port_number is 0, then start at DEFAULT_PORT and increment * the port number on each attempt. If an attempt fails, wait CONNECT_RETRY_INTERVAL @@ -275,7 +264,7 @@ void lf_connect_to_rti(const char* hostname, int port_number); /** * @brief Create a server to listen to incoming P2P connections. - * + * * Such connections are used for physical connections or any connection if using * decentralized coordination. This function only handles the creation of the server socket. * The bound port for the server socket is then sent to the RTI by sending an @@ -293,7 +282,7 @@ void lf_create_server(int specified_port); /** * @brief Enqueue port absent reactions. - * + * * These reactions will send a MSG_TYPE_PORT_ABSENT * message to downstream federates if a given network output port is not present. * @param env The environment of the federate @@ -302,7 +291,7 @@ void lf_enqueue_port_absent_reactions(environment_t* env); /** * @brief Thread to accept connections from other federates. - * + * * This thread accepts connections from federates that send messages directly * to this one (not through the RTI). This thread starts a thread for * each accepted socket connection to read messages and, once it has opened all expected @@ -313,9 +302,9 @@ void* lf_handle_p2p_connections_from_federates(void*); /** * @brief Send a latest tag complete (LTC) signal to the RTI. - * + * * This avoids the send if an equal or later LTC has previously been sent. - * + * * This function assumes the caller holds the mutex lock * on the top-level environment. * @@ -343,11 +332,11 @@ void lf_reset_status_fields_on_input_port_triggers(); /** * @brief Send a message to another federate. - * + * * This function is used for physical connections * between federates. If the socket connection to the remote federate or the RTI has been broken, * then this returns -1 without sending. Otherwise, it returns 0. - * + * * This method assumes that the caller does not hold the lf_outbound_socket_mutex lock, * which it acquires to perform the send. * @@ -359,16 +348,12 @@ void lf_reset_status_fields_on_input_port_triggers(); * @param message The message. * @return 0 if the message has been sent, -1 otherwise. */ -int lf_send_message(int message_type, - unsigned short port, - unsigned short federate, - const char* next_destination_str, - size_t length, - unsigned char* message); +int lf_send_message(int message_type, unsigned short port, unsigned short federate, const char* next_destination_str, + size_t length, unsigned char* message); /** * @brief Send information about connections to the RTI. - * + * * This is a generated function that sends information about connections between this federate * and other federates where messages are routed through the RTI. Currently, this * only includes logical connections when the coordination is centralized. This @@ -379,7 +364,7 @@ void lf_send_neighbor_structure_to_RTI(int); /** * @brief Send a next event tag (NET) signal. - * + * * If this federate depends on upstream federates or sends data to downstream * federates, then send to the RTI a NET, which will give the tag of the * earliest event on the event queue, or, if the queue is empty, the timeout @@ -437,7 +422,7 @@ tag_t lf_send_next_event_tag(environment_t* env, tag_t tag, bool wait_for_reply) /** * @brief Send a port absent message. - * + * * This informs the remote federate that it will not receive a message with tag less than the * current tag of the specified environment delayed by the additional_delay. * @@ -446,15 +431,12 @@ tag_t lf_send_next_event_tag(environment_t* env, tag_t tag, bool wait_for_reply) * @param port_ID The ID of the receiving port. * @param fed_ID The fed ID of the receiving federate. */ -void lf_send_port_absent_to_federate( - environment_t* env, - interval_t additional_delay, - unsigned short port_ID, - unsigned short fed_ID); +void lf_send_port_absent_to_federate(environment_t* env, interval_t additional_delay, unsigned short port_ID, + unsigned short fed_ID); /** * @brief Send a MSG_TYPE_STOP_REQUEST message to the RTI. - * + * * The payload is the specified tag plus one microstep. If this federate has previously * received a stop request from the RTI, then do not send the message and * return 1. Return -1 if the socket is disconnected. Otherwise, return 0. @@ -464,7 +446,7 @@ int lf_send_stop_request_to_rti(tag_t stop_tag); /** * @brief Send a tagged message to the specified port of the specified federate. - * + * * The tag will be the current tag of the specified environment delayed by the specified additional_delay. * If the delayed tag falls after the timeout time, then the message is not sent and -1 is returned. * The caller can reuse or free the memory storing the message after this returns. @@ -494,15 +476,9 @@ int lf_send_stop_request_to_rti(tag_t stop_tag); * @param message The message. * @return 0 if the message has been sent, 1 otherwise. */ -int lf_send_tagged_message( - environment_t* env, - interval_t additional_delay, - int message_type, - unsigned short port, - unsigned short federate, - const char* next_destination_str, - size_t length, - unsigned char* message); +int lf_send_tagged_message(environment_t* env, interval_t additional_delay, int message_type, unsigned short port, + unsigned short federate, const char* next_destination_str, size_t length, + unsigned char* message); /** * @brief Set the federation_id of this federate. @@ -510,17 +486,10 @@ int lf_send_tagged_message( */ void lf_set_federation_id(const char* fid); -/** - * @brief Set the trace object for this federate (used when tracing is enabled). - * - * @param The trace object. - */ -void lf_set_federation_trace_object(trace_t * trace); - #ifdef FEDERATED_DECENTRALIZED /** * @brief Spawn a thread to iterate through STAA structs. - * + * * This will set their associated ports absent * at an offset if the port is not present with a value by a certain physical time. */ @@ -529,7 +498,7 @@ void lf_spawn_staa_thread(void); /** * @brief Wait until inputs statuses are known up to and including the specified level. - * + * * Specifically, wait until the specified level is less that the max level allowed to * advance (MLAA). * @param env The environment (which should always be the top-level environment). @@ -539,7 +508,7 @@ void lf_stall_advance_level_federation(environment_t* env, size_t level); /** * @brief Synchronize the start with other federates via the RTI. - * + * * This assumes that a connection to the RTI is already made * and _lf_rti_socket_TCP is valid. It then sends the current logical * time to the RTI and waits for the RTI to respond with a specified @@ -549,7 +518,7 @@ void lf_synchronize_with_other_federates(); /** * @brief Update the max level allowed to advance (MLAA). - * + * * If the specified tag is greater than the current_tag of the top-level environment * (or equal and is_provisional is false), then set the MLAA to INT_MAX and return. * This removes any barriers on execution at the current tag due to network inputs. diff --git a/include/core/federated/network/net_common.h b/include/core/federated/network/net_common.h index 4ee5ab68f..11bbc47f3 100644 --- a/include/core/federated/network/net_common.h +++ b/include/core/federated/network/net_common.h @@ -61,7 +61,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * When the federation IDs match, the RTI will respond with an * MSG_TYPE_ACK. - * + * * The next message to the RTI will be a MSG_TYPE_NEIGHBOR_STRUCTURE message * that informs the RTI about connections between this federate and other * federates where messages are routed through the RTI. Currently, this only @@ -155,9 +155,9 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * each federate report a reading of its physical clock to the RTI on a * `MSG_TYPE_TIMESTAMP`. The RTI broadcasts the maximum of these readings plus * `DELAY_START` to all federates as the start time, again on a `MSG_TYPE_TIMESTAMP`. - * - * The next step depends on the coordination type. - * + * + * The next step depends on the coordination type. + * * Under centralized coordination, each federate will send a * `MSG_TYPE_NEXT_EVENT_TAG` to the RTI with the start tag. That is to say that * each federate has a valid event at the start tag (start time, 0) and it will @@ -168,10 +168,10 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * have to wait for a `MSG_TYPE_TAG_ADVANCE_GRANT` or a * `MSG_TYPE_PROVISIONAL_TAG_ADVANCE_GRANT` before it can advance to a * particular tag. - * + * * Under decentralized coordination, the coordination is governed by STA and * STAAs, as further explained in https://doi.org/10.48550/arXiv.2109.07771. - * + * * FIXME: Expand this. Explain port absent reactions. * */ @@ -327,7 +327,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * The message contains, in this order: * * One byte equal to MSG_TYPE_RTI_RESPONSE. * * Eight bytes for RTI's nonce. - * * 32 bytes for HMAC tag based on SHA256. + * * 32 bytes for HMAC tag based on SHA256. * The HMAC tag is composed of the following order: * * One byte equal to MSG_TYPE_RTI_RESPONSE. * * Two bytes (ushort) giving the received federate ID. @@ -382,11 +382,11 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define MSG_TYPE_RESIGN 4 -/** +/** * Byte identifying a timestamped message to forward to another federate. * The next two bytes will be the ID of the destination reactor port. * The next two bytes are the destination federate ID. - * The four bytes after that will be the length of the message. + * The four bytes after that will be the length of the message (as an unsigned 32-bit int). * The next eight bytes will be the timestamp of the message. * The next four bytes will be the microstep of the message. * The remaining bytes are the message. @@ -397,7 +397,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define MSG_TYPE_TAGGED_MESSAGE 5 -/** +/** * Byte identifying a next event tag (NET) message sent from a federate in * centralized coordination. The next eight bytes will be the timestamp. The * next four bytes will be the microstep. This message from a federate tells the @@ -413,7 +413,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define MSG_TYPE_NEXT_EVENT_TAG 6 -/** +/** * Byte identifying a time advance grant (TAG) sent by the RTI to a federate * in centralized coordination. This message is a promise by the RTI to the federate * that no later message sent to the federate will have a tag earlier than or @@ -423,7 +423,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define MSG_TYPE_TAG_ADVANCE_GRANT 7 -/** +/** * Byte identifying a provisional time advance grant (PTAG) sent by the RTI to a federate * in centralized coordination. This message is a promise by the RTI to the federate * that no later message sent to the federate will have a tag earlier than the tag @@ -433,7 +433,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define MSG_TYPE_PROVISIONAL_TAG_ADVANCE_GRANT 8 -/** +/** * Byte identifying a latest tag complete (LTC) message sent by a federate * to the RTI. * The next eight bytes will be the timestep of the completed tag. @@ -473,18 +473,19 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * that specifies the stop time on all other federates, then every federate * depends on every other federate and time cannot be advanced. * Hence, the actual stop time may be nondeterministic. - * + * * If, on the other hand, the federate requesting the stop is upstream of every * other federate, then it should be possible to respect its requested stop tag. */ #define MSG_TYPE_STOP_REQUEST 10 #define MSG_TYPE_STOP_REQUEST_LENGTH (1 + sizeof(instant_t) + sizeof(microstep_t)) -#define ENCODE_STOP_REQUEST(buffer, time, microstep) do { \ - buffer[0] = MSG_TYPE_STOP_REQUEST; \ - encode_int64(time, &(buffer[1])); \ - assert(microstep >= 0); \ - encode_int32((int32_t)microstep, &(buffer[1 + sizeof(instant_t)])); \ -} while(0) +#define ENCODE_STOP_REQUEST(buffer, time, microstep) \ + do { \ + buffer[0] = MSG_TYPE_STOP_REQUEST; \ + encode_int64(time, &(buffer[1])); \ + assert(microstep >= 0); \ + encode_int32((int32_t)microstep, &(buffer[1 + sizeof(instant_t)])); \ + } while (0) /** * Byte indicating a federate's reply to a MSG_TYPE_STOP_REQUEST that was sent @@ -496,28 +497,30 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define MSG_TYPE_STOP_REQUEST_REPLY 11 #define MSG_TYPE_STOP_REQUEST_REPLY_LENGTH (1 + sizeof(instant_t) + sizeof(microstep_t)) -#define ENCODE_STOP_REQUEST_REPLY(buffer, time, microstep) do { \ - buffer[0] = MSG_TYPE_STOP_REQUEST_REPLY; \ - encode_int64(time, &(buffer[1])); \ - assert(microstep >= 0); \ - encode_int32((int32_t)microstep, &(buffer[1 + sizeof(instant_t)])); \ -} while(0) +#define ENCODE_STOP_REQUEST_REPLY(buffer, time, microstep) \ + do { \ + buffer[0] = MSG_TYPE_STOP_REQUEST_REPLY; \ + encode_int64(time, &(buffer[1])); \ + assert(microstep >= 0); \ + encode_int32((int32_t)microstep, &(buffer[1 + sizeof(instant_t)])); \ + } while (0) /** * Byte sent by the RTI indicating that the stop request from some federate * has been granted. The payload is the tag at which all federates have * agreed that they can stop. - * The next 8 bytes will be the time at which the federates will stop. * + * The next 8 bytes will be the time at which the federates will stop. * * The next 4 bytes will be the microstep at which the federates will stop.. */ #define MSG_TYPE_STOP_GRANTED 12 #define MSG_TYPE_STOP_GRANTED_LENGTH (1 + sizeof(instant_t) + sizeof(microstep_t)) -#define ENCODE_STOP_GRANTED(buffer, time, microstep) do { \ - buffer[0] = MSG_TYPE_STOP_GRANTED; \ - encode_int64(time, &(buffer[1])); \ - assert(microstep >= 0); \ - encode_int32((int32_t)microstep, &(buffer[1 + sizeof(instant_t)])); \ -} while(0) +#define ENCODE_STOP_GRANTED(buffer, time, microstep) \ + do { \ + buffer[0] = MSG_TYPE_STOP_GRANTED; \ + encode_int64(time, &(buffer[1])); \ + assert(microstep >= 0); \ + encode_int32((int32_t)microstep, &(buffer[1 + sizeof(instant_t)])); \ + } while (0) /////////// End of lf_request_stop() messages //////////////// @@ -525,12 +528,20 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * Byte identifying a address query message, sent by a federate to RTI * to ask for another federate's address and port number. * The next two bytes are the other federate's ID. - * The reply from the RTI will a port number (an int32_t), which is -1 + */ +#define MSG_TYPE_ADDRESS_QUERY 13 + +/** + * Byte identifying a address query message reply, sent by a RTI to a federate + * to reply with a remote federate's address and port number. + * The reply from the RTI will be a port number (an int32_t), which is -1 * if the RTI does not know yet (it has not received MSG_TYPE_ADDRESS_ADVERTISEMENT from * the other federate), followed by the IP address of the other * federate (an IPV4 address, which has length INET_ADDRSTRLEN). + * The next four bytes (or sizeof(int32_t)) will be the port number. + * The next four bytes (or sizeof(in_addr), which is uint32_t) will be the ip address. */ -#define MSG_TYPE_ADDRESS_QUERY 13 +#define MSG_TYPE_ADDRESS_QUERY_REPLY 14 /** * Byte identifying a message advertising the port for the TCP connection server @@ -540,7 +551,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * The sending federate will not wait for a response from the RTI and assumes its * request will be processed eventually by the RTI. */ -#define MSG_TYPE_ADDRESS_ADVERTISEMENT 14 +#define MSG_TYPE_ADDRESS_ADVERTISEMENT 15 /** * Byte identifying a first message that is sent by a federate directly to another federate @@ -551,25 +562,25 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * federate does not expect this federate or federation to connect, it will respond * instead with MSG_TYPE_REJECT. */ -#define MSG_TYPE_P2P_SENDING_FED_ID 15 +#define MSG_TYPE_P2P_SENDING_FED_ID 16 /** * Byte identifying a message to send directly to another federate. - * + * * The next two bytes will be the ID of the destination port. * The next two bytes are the destination federate ID. This is checked against * the _lf_my_fed_id of the receiving federate to ensure the message was intended for * The four bytes after will be the length of the message. * The ramaining bytes are the message. */ -#define MSG_TYPE_P2P_MESSAGE 16 +#define MSG_TYPE_P2P_MESSAGE 17 /** * Byte identifying a timestamped message to send directly to another federate. * This is a variant of @see MSG_TYPE_TAGGED_MESSAGE that is used in P2P connections between * federates. Having a separate message type for P2P connections between federates * will be useful in preventing crosstalk. - * + * * The next two bytes will be the ID of the destination port. * The next two bytes are the destination federate ID. This is checked against * the _lf_my_fed_id of the receiving federate to ensure the message was intended for @@ -579,7 +590,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * The next four bytes will be the microstep of the sender. * The ramaining bytes are the message. */ -#define MSG_TYPE_P2P_TAGGED_MESSAGE 17 +#define MSG_TYPE_P2P_TAGGED_MESSAGE 18 //////////////////////////////////////////////// /** @@ -616,11 +627,10 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define MSG_TYPE_CLOCK_SYNC_CODED_PROBE 22 - /** * A port absent message, informing the receiver that a given port * will not have event for the current logical time. - * + * * The next 2 bytes is the port id. * The next 2 bytes will be the federate id of the destination federate. * This is needed for the centralized coordination so that the RTI knows where @@ -630,21 +640,19 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define MSG_TYPE_PORT_ABSENT 23 - - /** * A message that informs the RTI about connections between this federate and * other federates where messages are routed through the RTI. Currently, this * only includes logical connections when the coordination is centralized. This * information is needed for the RTI to perform the centralized coordination. - * + * * @note Only information about the immediate neighbors is required. The RTI can * transitively obtain the structure of the federation based on each federate's * immediate neighbor information. * - * The next 4 bytes is the number of upstream federates. + * The next 4 bytes is the number of upstream federates. * The next 4 bytes is the number of downstream federates. - * + * * Depending on the first four bytes, the next bytes are pairs of (fed ID (2 * bytes), delay (8 bytes)) for this federate's connection to upstream federates * (by direct connection). The delay is the minimum "after" delay of all diff --git a/include/core/federated/network/net_util.h b/include/core/federated/network/net_util.h index 6346e21d3..555c8df89 100644 --- a/include/core/federated/network/net_util.h +++ b/include/core/federated/network/net_util.h @@ -48,8 +48,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#include "../../platform.h" -#include "../../tag.h" +#include "low_level_platform.h" +#include "tag.h" #define NUM_SOCKET_RETRIES 10 #define DELAY_BETWEEN_SOCKET_RETRIES MSEC(100) @@ -57,7 +57,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define HOST_LITTLE_ENDIAN 1 #define HOST_BIG_ENDIAN 2 -/** +/** * Return true (1) if the host is big endian. Otherwise, * return false. */ @@ -122,12 +122,8 @@ int read_from_socket_close_on_error(int* socket, size_t num_bytes, unsigned char * @return The number of bytes read, or 0 if an EOF is received, or * a negative number for an error. */ -void read_from_socket_fail_on_error( - int* socket, - size_t num_bytes, - unsigned char* buffer, - lf_mutex_t* mutex, - char* format, ...); +void read_from_socket_fail_on_error(int* socket, size_t num_bytes, unsigned char* buffer, lf_mutex_t* mutex, + char* format, ...); /** * Without blocking, peek at the specified socket and, if there is @@ -181,12 +177,8 @@ int write_to_socket_close_on_error(int* socket, size_t num_bytes, unsigned char* * fields that will be used to fill the format string as in printf, or NULL * to print a generic error message. */ -void write_to_socket_fail_on_error( - int* socket, - size_t num_bytes, - unsigned char* buffer, - lf_mutex_t* mutex, - char* format, ...); +void write_to_socket_fail_on_error(int* socket, size_t num_bytes, unsigned char* buffer, lf_mutex_t* mutex, + char* format, ...); #endif // FEDERATED @@ -199,7 +191,7 @@ void write_to_socket_fail_on_error( */ void encode_int64(int64_t data, unsigned char* buffer); -/** +/** * Write the specified data as a sequence of bytes starting * at the specified address. This encodes the data in little-endian * order (lowest order byte first). This works for int32_t. @@ -217,7 +209,7 @@ void encode_int32(int32_t data, unsigned char* buffer); */ void encode_uint32(uint32_t data, unsigned char* buffer); -/** +/** * Write the specified data as a sequence of bytes starting * at the specified address. This encodes the data in little-endian * order (lowest order byte first). @@ -226,7 +218,7 @@ void encode_uint32(uint32_t data, unsigned char* buffer); */ void encode_uint16(uint16_t data, unsigned char* buffer); -/** +/** * If this host is little endian, then reverse the order of * the bytes of the argument. Otherwise, return the argument * unchanged. This can be used to convert the argument to @@ -239,7 +231,7 @@ void encode_uint16(uint16_t data, unsigned char* buffer); */ int32_t swap_bytes_if_big_endian_int32(int32_t src); -/** +/** * If this host is little endian, then reverse the order of * the bytes of the argument. Otherwise, return the argument * unchanged. This can be used to convert the argument to @@ -252,7 +244,7 @@ int32_t swap_bytes_if_big_endian_int32(int32_t src); */ int64_t swap_bytes_if_big_endian_int64(int64_t src); -/** +/** * If this host is little endian, then reverse the order of * the bytes of the argument. Otherwise, return the argument * unchanged. This can be used to convert the argument to @@ -277,7 +269,7 @@ int32_t extract_int32(unsigned char* bytes); */ int64_t extract_int64(unsigned char* bytes); -/** +/** * Extract an uint16_t from the specified byte sequence. * This will swap the order of the bytes if this machine is big endian. * @param bytes The address of the start of the sequence of bytes. @@ -296,12 +288,7 @@ uint16_t extract_uint16(unsigned char* bytes); * @param federate_id The place to put the federate ID. * @param length The place to put the length. */ -void extract_header( - unsigned char* buffer, - uint16_t* port_id, - uint16_t* federate_id, - size_t* length -); +void extract_header(unsigned char* buffer, uint16_t* port_id, uint16_t* federate_id, size_t* length); /** * Extract the timed header information for timed messages between @@ -315,13 +302,7 @@ void extract_header( * @param length The place to put the length. * @param tag The place to put the tag. */ -void extract_timed_header( - unsigned char* buffer, - uint16_t* port_id, - uint16_t* federate_id, - size_t* length, - tag_t* tag -); +void extract_timed_header(unsigned char* buffer, uint16_t* port_id, uint16_t* federate_id, size_t* length, tag_t* tag); /** * Extract tag information from buffer. @@ -332,9 +313,7 @@ void extract_timed_header( * @param buffer The buffer to read from. * @return The extracted tag. */ -tag_t extract_tag( - unsigned char* buffer -); +tag_t extract_tag(unsigned char* buffer); /** * Encode tag information into buffer. @@ -344,21 +323,18 @@ tag_t extract_tag( * @param buffer The buffer to encode into. * @param tag The tag to encode into 'buffer'. */ -void encode_tag( - unsigned char* buffer, - tag_t tag -); +void encode_tag(unsigned char* buffer, tag_t tag); /** * A helper struct for passing rti_addr information between lf_parse_rti_addr and extract_rti_addr_info */ typedef struct rti_addr_info_t { - char rti_host_str[256]; - char rti_port_str[6]; - char rti_user_str[256]; - bool has_host; - bool has_port; - bool has_user; + char rti_host_str[256]; + char rti_port_str[6]; + char rti_user_str[256]; + bool has_host; + bool has_port; + bool has_user; } rti_addr_info_t; /** @@ -389,14 +365,15 @@ bool validate_user(const char* user); * Extract one match group from the rti_addr regex . * @return true if SUCCESS, else false. */ -bool extract_match_group(const char* rti_addr, char* dest, regmatch_t group, - int max_len, int min_len, const char* err_msg); +bool extract_match_group(const char* rti_addr, char* dest, regmatch_t group, int max_len, int min_len, + const char* err_msg); /** * Extract match groups from the rti_addr regex. * @return true if success, else false. */ -bool extract_match_groups(const char* rti_addr, char** rti_addr_strs, bool** rti_addr_flags, regmatch_t* group_array, int* gids, int* max_lens, int* min_lens, const char** err_msgs); +bool extract_match_groups(const char* rti_addr, char** rti_addr_strs, bool** rti_addr_flags, regmatch_t* group_array, + int* gids, int* max_lens, int* min_lens, const char** err_msgs); /** * Extract the host, port and user from rti_addr. diff --git a/include/core/lf_token.h b/include/core/lf_token.h index 1c6207f06..f60fd18b5 100644 --- a/include/core/lf_token.h +++ b/include/core/lf_token.h @@ -8,7 +8,7 @@ * This header file supports token objects, which are reference-counted wrappers * around values that are carried by events scheduled on the event queue and held * in ports and actions when the type is not a primitive type. - * + * * A token has type lf_token_t. It points to a value, a dynamically allocated * chunk of memory on the heap. It has a length field, which enables its value * to be interpreted as an array of the given length. It has a pointer to type @@ -18,13 +18,13 @@ * and copy constructor. These must be specified if the payload (value) is a complex * struct that cannot be freed by a simple call to free() or copied by a call * to memcpy(). - * + * * An instance of a port struct and trigger_t struct (an action or an input port) * can be cast to token_template_t, which has a token_type_t field called type * and a pointer to a token (which may be NULL). The same instance can also be * cast to token_type_t, which has an element_size field and (possibly) function * pointers to a destructor and a copy constructor. - * + * * A "template token" is one pointed to by a token_template_t (an action or a port). * This template token ensures that port an action values persist until they are * overwritten, and hence they can be read at a tag even if not present. @@ -50,10 +50,10 @@ struct environment_t; /** Possible return values for _lf_done_using and _lf_free_token. */ typedef enum token_freed { - NOT_FREED = 0, // Nothing was freed. - VALUE_FREED, // The value (payload) was freed. - TOKEN_FREED, // The token was freed but not the value. - TOKEN_AND_VALUE_FREED // Both were freed + NOT_FREED = 0, // Nothing was freed. + VALUE_FREED, // The value (payload) was freed. + TOKEN_FREED, // The token was freed but not the value. + TOKEN_AND_VALUE_FREED // Both were freed } token_freed; ////////////////////////////////////////////////////////// @@ -65,12 +65,12 @@ typedef enum token_freed { * token types, which carry dynamically allocated data. */ typedef struct token_type_t { - /** Size of the struct or array element. */ - size_t element_size; - /** The destructor or NULL to use the default free(). */ - void (*destructor) (void* value); - /** The copy constructor or NULL to use memcpy. */ - void* (*copy_constructor) (void* value); + /** Size of the struct or array element. */ + size_t element_size; + /** The destructor or NULL to use the default free(). */ + void (*destructor)(void* value); + /** The copy constructor or NULL to use memcpy. */ + void* (*copy_constructor)(void* value); } token_type_t; /** @@ -92,25 +92,25 @@ typedef struct token_type_t { * in the preamble that masks the trailing *. */ typedef struct lf_token_t { - /** Pointer to dynamically allocated memory containing a message. */ - void* value; - /** Length of the array or 1 for a non-array. */ - size_t length; - /** Pointer to the port or action defining the type of the data carried. */ - token_type_t* type; - /** The number of times this token is on the event queue. */ - size_t ref_count; - /** Convenience for constructing a temporary list of tokens. */ - struct lf_token_t* next; + /** Pointer to dynamically allocated memory containing a message. */ + void* value; + /** Length of the array or 1 for a non-array. */ + size_t length; + /** Pointer to the port or action defining the type of the data carried. */ + token_type_t* type; + /** The number of times this token is on the event queue. */ + size_t ref_count; + /** Convenience for constructing a temporary list of tokens. */ + struct lf_token_t* next; } lf_token_t; /** * A record of the subset of channels of a multiport that have present inputs. */ typedef struct lf_sparse_io_record_t { - int size; // -1 if overflowed. 0 if empty. - size_t capacity; // Max number of writes to be considered sparse. - size_t present_channels[]; // Array of channel indices that are present. + int size; // -1 if overflowed. 0 if empty. + size_t capacity; // Max number of writes to be considered sparse. + size_t present_channels[]; // Array of channel indices that are present. } lf_sparse_io_record_t; /** @@ -119,10 +119,10 @@ typedef struct lf_sparse_io_record_t { * so that they can be cast to this struct to access these fields in a uniform way. */ typedef struct token_template_t { - /** Instances of this struct can be cast to token_type_t. */ - token_type_t type; - lf_token_t* token; - size_t length; // The token's length, for convenient access in reactions. + /** Instances of this struct can be cast to token_type_t. */ + token_type_t type; + lf_token_t* token; + size_t length; // The token's length, for convenient access in reactions. } token_template_t; // Forward declaration for self_base_t @@ -138,14 +138,14 @@ typedef struct self_base_t self_base_t; * CPortGenerator.java generateAuxiliaryStruct(). */ typedef struct lf_port_base_t { - token_template_t tmplt; // Type and token information (template is a C++ keyword). - bool is_present; - lf_sparse_io_record_t* sparse_record; // NULL if there is no sparse record. - int destination_channel; // -1 if there is no destination. - int num_destinations; // The number of destination reactors this port writes to. - self_base_t* source_reactor; // Pointer to the self struct of the reactor that provides data to this port. - // If this is an input, that reactor will normally be the container of the - // output port that sends it data. + token_template_t tmplt; // Type and token information (template is a C++ keyword). + bool is_present; + lf_sparse_io_record_t* sparse_record; // NULL if there is no sparse record. + int destination_channel; // -1 if there is no destination. + int num_destinations; // The number of destination reactors this port writes to. + self_base_t* source_reactor; // Pointer to the self struct of the reactor that provides data to this port. + // If this is an input, that reactor will normally be the container of the + // output port that sends it data. } lf_port_base_t; ////////////////////////////////////////////////////////// @@ -193,7 +193,7 @@ extern int _lf_count_token_allocations; * @param port_or_action A port or action. * @param val The value. * @param len The length, or 1 if it not an array. - * @return A pointer to a lf_token_t struct. + * @return A pointer to a lf_token_t struct. */ lf_token_t* lf_new_token(void* port_or_action, void* val, size_t len); @@ -218,7 +218,7 @@ lf_token_t* lf_writable_copy(lf_port_base_t* port); /** * @brief Free the specified token, if appropriate. - * If the reference count is greater than 0, then do not free + * If the reference count is greater than 0, then do not free * anything. Otherwise, the token value (payload) will be freed, * if there is one. Then the token itself will be freed. * The freed token will be put on the recycling bin unless that @@ -243,7 +243,7 @@ token_freed _lf_free_token(lf_token_t* token); * @param value The value, or NULL to have no value. * @param length The array length of the value, 1 to not be an array, * or 0 to have no value. - * @return lf_token_t* + * @return lf_token_t* */ lf_token_t* _lf_new_token(token_type_t* type, void* value, size_t length); diff --git a/include/core/lf_types.h b/include/core/lf_types.h index 023172545..5598cf820 100644 --- a/include/core/lf_types.h +++ b/include/core/lf_types.h @@ -7,7 +7,7 @@ * @copyright (c) 2020-2024, The University of California at Berkeley. * License: BSD 2-clause * @brief Type definitions that are widely used across different parts of the runtime. - * + * * IMPORTANT: Many of the structs defined here require matching layouts * and, if changed, will require changes in the code generator. * See @@ -55,14 +55,14 @@ typedef unsigned short int ushort; * 2- Value of horizon is not (FOREVER, 0) */ typedef struct _lf_tag_advancement_barrier { - int requestors; // Used to indicate the number of - // requestors that have asked - // for a barrier to be raised - // on tag. - tag_t horizon; // If semaphore is larger than 0 - // then the runtime should not - // advance its tag beyond the - // horizon. + int requestors; // Used to indicate the number of + // requestors that have asked + // for a barrier to be raised + // on tag. + tag_t horizon; // If semaphore is larger than 0 + // then the runtime should not + // advance its tag beyond the + // horizon. } _lf_tag_advancement_barrier; /** @@ -77,7 +77,7 @@ typedef struct _lf_tag_advancement_barrier { * preceding event has already been popped off the event queue, the * `defer` policy is fallen back to. */ -typedef enum {defer, drop, replace} lf_spacing_policy_t; +typedef enum { defer, drop, replace } lf_spacing_policy_t; /** * Status of a given port at a given logical time. @@ -91,7 +91,7 @@ typedef enum {defer, drop, replace} lf_spacing_policy_t; * respectively because for non-network ports, the status can either be present * or absent (no possibility of unknown). */ -typedef enum {absent = false, present = true, unknown} port_status_t; +typedef enum { absent = false, present = true, unknown } port_status_t; /** * Status of a given reaction at a given logical time. @@ -105,7 +105,7 @@ typedef enum {absent = false, present = true, unknown} port_status_t; * with default values using calloc. * FIXME: The running state does not seem to be read. */ -typedef enum {inactive = 0, queued, running} reaction_status_t; +typedef enum { inactive = 0, queued, running } reaction_status_t; /** * Handles for scheduled triggers. These handles are returned @@ -135,7 +135,7 @@ typedef pqueue_pri_t index_t; * these reaction functions is a pointer to the self struct * for the reactor. */ -typedef void(*reaction_function_t)(void*); +typedef void (*reaction_function_t)(void*); /** Trigger struct representing an output, timer, action, or input. See below. */ typedef struct trigger_t trigger_t; @@ -152,40 +152,42 @@ typedef struct trigger_t trigger_t; */ typedef struct reaction_t reaction_t; struct reaction_t { - reaction_function_t function; // The reaction function. COMMON. - void* self; // Pointer to a struct with the reactor's state. INSTANCE. - int number; // The number of the reaction in the reactor (0 is the first reaction). - index_t index; // Inverse priority determined by dependency analysis. INSTANCE. - // Binary encoding of the branches that this reaction has upstream in the dependency graph. INSTANCE. - unsigned long long chain_id; - size_t pos; // Current position in the priority queue. RUNTIME. - reaction_t* last_enabling_reaction; // The last enabling reaction, or NULL if there is none. Used for optimization. INSTANCE. - size_t num_outputs; // Number of outputs that may possibly be produced by this function. COMMON. - bool** output_produced; // Array of pointers to booleans indicating whether outputs were produced. COMMON. - int* triggered_sizes; // Pointer to array of ints with number of triggers per output. INSTANCE. - trigger_t ***triggers; // Array of pointers to arrays of pointers to triggers triggered by each output. INSTANCE. - reaction_status_t status; // Indicator of whether the reaction is inactive, queued, or running. RUNTIME. - interval_t deadline; // Deadline relative to the time stamp for invocation of the reaction. INSTANCE. - bool is_STP_violated; // Indicator of STP violation in one of the input triggers to this reaction. default = false. - // Value of True indicates to the runtime that this reaction contains trigger(s) - // that are triggered at a later logical time that was originally anticipated. - // Currently, this is only possible if logical - // connections are used in a decentralized federated - // execution. COMMON. - reaction_function_t deadline_violation_handler; // Deadline violation handler. COMMON. - reaction_function_t STP_handler; // STP handler. Invoked when a trigger to this reaction - // was triggered at a later logical time than originally - // intended. Currently, this is only possible if logical - // connections are used in a decentralized federated - // execution. COMMON. - bool is_an_input_reaction; // Indicates whether this reaction is a network input reaction of a federate. Default is false. - size_t worker_affinity; // The worker number of the thread that scheduled this reaction. Used - // as a suggestion to the scheduler. - const char* name; // If logging is set to LOG or higher, then this will - // point to the full name of the reactor followed by - // the reaction number. - reactor_mode_t* mode; // The enclosing mode of this reaction (if exists). - // If enclosed in multiple, this will point to the innermost mode. + reaction_function_t function; // The reaction function. COMMON. + void* self; // Pointer to a struct with the reactor's state. INSTANCE. + int number; // The number of the reaction in the reactor (0 is the first reaction). + index_t index; // Inverse priority determined by dependency analysis. INSTANCE. + // Binary encoding of the branches that this reaction has upstream in the dependency graph. INSTANCE. + unsigned long long chain_id; + size_t pos; // Current position in the priority queue. RUNTIME. + reaction_t* + last_enabling_reaction; // The last enabling reaction, or NULL if there is none. Used for optimization. INSTANCE. + size_t num_outputs; // Number of outputs that may possibly be produced by this function. COMMON. + bool** output_produced; // Array of pointers to booleans indicating whether outputs were produced. COMMON. + int* triggered_sizes; // Pointer to array of ints with number of triggers per output. INSTANCE. + trigger_t*** triggers; // Array of pointers to arrays of pointers to triggers triggered by each output. INSTANCE. + reaction_status_t status; // Indicator of whether the reaction is inactive, queued, or running. RUNTIME. + interval_t deadline; // Deadline relative to the time stamp for invocation of the reaction. INSTANCE. + bool is_STP_violated; // Indicator of STP violation in one of the input triggers to this reaction. default = false. + // Value of True indicates to the runtime that this reaction contains trigger(s) + // that are triggered at a later logical time that was originally anticipated. + // Currently, this is only possible if logical + // connections are used in a decentralized federated + // execution. COMMON. + reaction_function_t deadline_violation_handler; // Deadline violation handler. COMMON. + reaction_function_t STP_handler; // STP handler. Invoked when a trigger to this reaction + // was triggered at a later logical time than originally + // intended. Currently, this is only possible if logical + // connections are used in a decentralized federated + // execution. COMMON. + bool is_an_input_reaction; // Indicates whether this reaction is a network input reaction of a federate. Default is + // false. + size_t worker_affinity; // The worker number of the thread that scheduled this reaction. Used + // as a suggestion to the scheduler. + const char* name; // If logging is set to LOG or higher, then this will + // point to the full name of the reactor followed by + // the reaction number. + reactor_mode_t* mode; // The enclosing mode of this reaction (if exists). + // If enclosed in multiple, this will point to the innermost mode. }; /** Typedef for event_t struct, used for storing activation records. */ @@ -193,58 +195,59 @@ typedef struct event_t event_t; /** Event activation record to push onto the event queue. */ struct event_t { - instant_t time; // Time of release. - trigger_t* trigger; // Associated trigger, NULL if this is a dummy event. - size_t pos; // Position in the priority queue. - lf_token_t* token; // Pointer to the token wrapping the value. - bool is_dummy; // Flag to indicate whether this event is merely a placeholder or an actual event. + instant_t time; // Time of release. + trigger_t* trigger; // Associated trigger, NULL if this is a dummy event. + size_t pos; // Position in the priority queue. + lf_token_t* token; // Pointer to the token wrapping the value. + bool is_dummy; // Flag to indicate whether this event is merely a placeholder or an actual event. #ifdef FEDERATED - tag_t intended_tag; // The intended tag. + tag_t intended_tag; // The intended tag. #endif - event_t* next; // Pointer to the next event lined up in superdense time. + event_t* next; // Pointer to the next event lined up in superdense time. }; /** * Trigger struct representing an output, timer, action, or input. */ struct trigger_t { - token_template_t tmplt; // Type and token information (template is a C++ keyword). - reaction_t** reactions; // Array of pointers to reactions sensitive to this trigger. - int number_of_reactions; // Number of reactions sensitive to this trigger. - bool is_timer; // True if this is a timer (a special kind of action), false otherwise. - interval_t offset; // Minimum delay of an action. For a timer, this is also the maximum delay. - interval_t period; // Minimum interarrival time of an action. For a timer, this is also the maximal interarrival time. - bool is_physical; // Indicator that this denotes a physical action. - tag_t last_tag; // Tag of the last event that was scheduled for this action. - // This is only used for actions and will otherwise be NEVER. - lf_spacing_policy_t policy; // Indicates which policy to use when an event is scheduled too early. - port_status_t status; // Determines the status of the port at the current logical time. Therefore, this + token_template_t tmplt; // Type and token information (template is a C++ keyword). + reaction_t** reactions; // Array of pointers to reactions sensitive to this trigger. + int number_of_reactions; // Number of reactions sensitive to this trigger. + bool is_timer; // True if this is a timer (a special kind of action), false otherwise. + interval_t offset; // Minimum delay of an action. For a timer, this is also the maximum delay. + interval_t period; // Minimum interarrival time of an action. For a timer, this is also the maximal interarrival time. + bool is_physical; // Indicator that this denotes a physical action. + tag_t last_tag; // Tag of the last event that was scheduled for this action. + // This is only used for actions and will otherwise be NEVER. + lf_spacing_policy_t policy; // Indicates which policy to use when an event is scheduled too early. + port_status_t status; // Determines the status of the port at the current logical time. Therefore, this // value needs to be reset at the beginning of each logical time. // - // This status is especially needed for the distributed execution because the receiver logic will need - // to know what it should do if it receives a message with 'intended tag = current tag' from another - // federate. - // - If status is 'unknown', it means that the federate has still no idea what the status of - // this port is and thus has refrained from executing any reaction that has that port as its input. - // This means that the receiver logic can directly inject the triggered reactions into the reaction - // queue at the current logical time. - // - If the status is absent, it means that the federate has assumed that the port is 'absent' - // for the current logical time. Therefore, receiving a message with 'intended tag = current tag' - // is an error that should be handled, for example, as a violation of the STP offset in the decentralized - // coordination. - // - Finally, if status is 'present', then this is an error since multiple - // downstream messages have been produced for the same port for the same logical time. - reactor_mode_t* mode; // The enclosing mode of this reaction (if exists). - // If enclosed in multiple, this will point to the innermost mode. + // This status is especially needed for the distributed execution because the receiver logic + // will need to know what it should do if it receives a message with 'intended tag = current + // tag' from another federate. + // - If status is 'unknown', it means that the federate has still no idea what the status of + // this port is and thus has refrained from executing any reaction that has that port as its + // input. This means that the receiver logic can directly inject the triggered reactions into + // the reaction queue at the current logical time. + // - If the status is absent, it means that the federate has assumed that the port is 'absent' + // for the current logical time. Therefore, receiving a message with 'intended tag = current + // tag' is an error that should be handled, for example, as a violation of the STP offset in + // the decentralized coordination. + // - Finally, if status is 'present', then this is an error since multiple + // downstream messages have been produced for the same port for the same logical time. + reactor_mode_t* mode; // The enclosing mode of this reaction (if exists). + // If enclosed in multiple, this will point to the innermost mode. #ifdef FEDERATED - tag_t last_known_status_tag; // Last known status of the port, either via a timed message, a port absent, or a - // TAG from the RTI. - tag_t intended_tag; // The amount of discrepency in logical time between the original intended - // trigger time of this trigger and the actual trigger time. This currently - // can only happen when logical connections are used using a decentralized coordination - // mechanism (@see https://github.com/icyphy/lingua-franca/wiki/Logical-Connections). - instant_t physical_time_of_arrival; // The physical time at which the message has been received on the network according to the local clock. - // Note: The physical_time_of_arrival is only passed down one level of the hierarchy. Default: NEVER. + tag_t last_known_status_tag; // Last known status of the port, either via a timed message, a port absent, or a + // TAG from the RTI. + tag_t intended_tag; // The amount of discrepency in logical time between the original intended + // trigger time of this trigger and the actual trigger time. This currently + // can only happen when logical connections are used using a decentralized coordination + // mechanism (@see https://github.com/icyphy/lingua-franca/wiki/Logical-Connections). + instant_t physical_time_of_arrival; // The physical time at which the message has been received on the network + // according to the local clock. Note: The physical_time_of_arrival is only passed + // down one level of the hierarchy. Default: NEVER. #endif }; @@ -256,15 +259,15 @@ struct trigger_t { * pointer to allocated memory, rather than directly to the allocated memory. */ typedef struct allocation_record_t { - void* allocated; - struct allocation_record_t *next; + void* allocated; + struct allocation_record_t* next; } allocation_record_t; typedef struct environment_t environment_t; /** * @brief The base type for all reactor self structs. - * + * * The first element of every self struct defined in generated code * will be a pointer to an allocation record, which is either NULL * or the head of a NULL-terminated linked list of allocation records. @@ -275,15 +278,15 @@ typedef struct environment_t environment_t; * it also records the current mode. */ typedef struct self_base_t { - struct allocation_record_t *allocations; - struct reaction_t *executing_reaction; // The currently executing reaction of the reactor. - environment_t * environment; + struct allocation_record_t* allocations; + struct reaction_t* executing_reaction; // The currently executing reaction of the reactor. + environment_t* environment; #if !defined(LF_SINGLE_THREADED) - void* reactor_mutex; // If not null, this is expected to point to an lf_mutex_t. - // It is not declared as such to avoid a dependence on platform.h. + void* reactor_mutex; // If not null, this is expected to point to an lf_mutex_t. + // It is not declared as such to avoid a dependence on platform.h. #endif #if defined(MODAL_REACTORS) - reactor_mode_state_t _lf__mode_state; // The current mode (for modal models). + reactor_mode_state_t _lf__mode_state; // The current mode (for modal models). #endif } self_base_t; @@ -294,31 +297,31 @@ typedef struct self_base_t { * to token_template_t, or to token_type_t to access these common fields. */ typedef struct { - token_template_t tmplt; // Type and token information (template is a C++ keyword). - bool is_present; - trigger_t* trigger; // THIS HAS TO MATCH lf_action_internal_t - self_base_t* parent; - bool has_value; + token_template_t tmplt; // Type and token information (template is a C++ keyword). + bool is_present; + trigger_t* trigger; // THIS HAS TO MATCH lf_action_internal_t + self_base_t* parent; + bool has_value; } lf_action_base_t; /** * Internal part of the action structs. */ typedef struct { - trigger_t* trigger; + trigger_t* trigger; } lf_action_internal_t; /** - * @brief Internal part of the port structs. - * HAS TO MATCH lf_port_base_t after tmplt and is_present. - */ + * @brief Internal part of the port structs. + * HAS TO MATCH lf_port_base_t after tmplt and is_present. + */ typedef struct { - lf_sparse_io_record_t* sparse_record; // NULL if there is no sparse record. - int destination_channel; // -1 if there is no destination. - int num_destinations; // The number of destination reactors this port writes to. - self_base_t* source_reactor; // Pointer to the self struct of the reactor that provides data to this port. - // If this is an input, that reactor will normally be the container of the - // output port that sends it data. + lf_sparse_io_record_t* sparse_record; // NULL if there is no sparse record. + int destination_channel; // -1 if there is no destination. + int num_destinations; // The number of destination reactors this port writes to. + self_base_t* source_reactor; // Pointer to the self struct of the reactor that provides data to this port. + // If this is an input, that reactor will normally be the container of the + // output port that sends it data. } lf_port_internal_t; #endif diff --git a/include/core/mixed_radix.h b/include/core/mixed_radix.h index 0754ba32a..a597dfa0c 100644 --- a/include/core/mixed_radix.h +++ b/include/core/mixed_radix.h @@ -27,12 +27,12 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. @section DESCRIPTION @brief Header file for permuted mixed-radix numbers used in Lingua Franca programs. - + A mixed radix number is a number representation where each digit can have a distinct radix. The radixes are given by a list of numbers, r0, r1, ... , rn, where r0 is the radix of the lowest-order digit and rn is the radix of the highest order digit that has a specified radix. - + A permuted mixed radix number is a mixed radix number that, when incremented, increments the digits in the order given by the permutation matrix. For an ordinary mixed radix number, the permutation matrix is @@ -42,14 +42,14 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. set to 0 and the d1 digit will be incremented. If it overflows, the next digit is incremented. If the last digit overflows, then the number wraps around so that all digits become zero. - + The functions defined here are pretty limited and assume that the caller is well behaved. These functions are used in code generated by the Lingua-Franca compiler and are not intended to be used by end users. For example, there is very limited error checking and misuse of the functions is likely to result in assertion errors and/or segmentation faults that will cause the program to exit. - + To use these functions, you can create the arrays on the stack as follows: ``` @@ -99,10 +99,10 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * assumed to have the same size as given by the size field. */ typedef struct mixed_radix_int_t { - int size; - int* digits; - int* radixes; - int* permutation; + int size; + int* digits; + int* radixes; + int* permutation; } mixed_radix_int_t; /** diff --git a/include/core/modal_models/modes.h b/include/core/modal_models/modes.h index ae7af357c..dd46b0319 100644 --- a/include/core/modal_models/modes.h +++ b/include/core/modal_models/modes.h @@ -66,51 +66,53 @@ typedef struct trigger_t trigger_t; * @param mode The target mode to set for activation. * @param change_type The change type of the transition. */ -#define _LF_SET_MODE_WITH_TYPE(mode, change_type) \ -do { \ - ((self_base_t*)self)->_lf__mode_state.next_mode = mode; \ - ((self_base_t*)self)->_lf__mode_state.mode_change = change_type; \ -} while(0) +#define _LF_SET_MODE_WITH_TYPE(mode, change_type) \ + do { \ + ((self_base_t*)self)->_lf__mode_state.next_mode = mode; \ + ((self_base_t*)self)->_lf__mode_state.mode_change = change_type; \ + } while (0) //////////////////////////////////////////////////////////// //// Type definitions for modal infrastructure. /** Typedef for reactor_mode_t struct, used for representing a mode. */ typedef struct reactor_mode_t reactor_mode_t; -/** Typedef for reactor_mode_state_t struct, used for storing modal state of reactor and/or its relation to enclosing modes. */ +/** Typedef for reactor_mode_state_t struct, used for storing modal state of reactor and/or its relation to enclosing + * modes. */ typedef struct reactor_mode_state_t reactor_mode_state_t; -/** Typedef for mode_state_variable_reset_data_t struct, used for storing data for resetting state variables nested in modes. */ +/** Typedef for mode_state_variable_reset_data_t struct, used for storing data for resetting state variables nested in + * modes. */ typedef struct mode_state_variable_reset_data_t mode_state_variable_reset_data_t; /** Type of the mode change. */ -typedef enum {no_transition, reset_transition, history_transition} lf_mode_change_type_t; +typedef enum { no_transition, reset_transition, history_transition } lf_mode_change_type_t; /** A struct to represent a single mode instace in a reactor instance. */ struct reactor_mode_t { - reactor_mode_state_t* state; // Pointer to a struct with the reactor's mode state. INSTANCE. - char* name; // Name of this mode. - instant_t deactivation_time; // Time when the mode was left. - uint8_t flags; // Bit vector for several internal flags related to the mode. + reactor_mode_state_t* state; // Pointer to a struct with the reactor's mode state. INSTANCE. + char* name; // Name of this mode. + instant_t deactivation_time; // Time when the mode was left. + uint8_t flags; // Bit vector for several internal flags related to the mode. }; /** A struct to store state of the modes in a reactor instance and/or its relation to enclosing modes. */ struct reactor_mode_state_t { - reactor_mode_t* parent_mode; // Pointer to the next enclosing mode (if exists). - reactor_mode_t* initial_mode; // Pointer to the initial mode. - reactor_mode_t* current_mode; // Pointer to the currently active mode (only locally active). - reactor_mode_t* next_mode; // Pointer to the next mode to activate at the end of this step (if set). - lf_mode_change_type_t mode_change; // A mode change type flag. + reactor_mode_t* parent_mode; // Pointer to the next enclosing mode (if exists). + reactor_mode_t* initial_mode; // Pointer to the initial mode. + reactor_mode_t* current_mode; // Pointer to the currently active mode (only locally active). + reactor_mode_t* next_mode; // Pointer to the next mode to activate at the end of this step (if set). + lf_mode_change_type_t mode_change; // A mode change type flag. }; /** A struct to store data for resetting state variables nested in modes. */ struct mode_state_variable_reset_data_t { - reactor_mode_t* mode; // Pointer to the enclosing mode. - void* target; // Pointer to the target variable. - void* source; // Pointer to the data source. - size_t size; // The size of the variable. + reactor_mode_t* mode; // Pointer to the enclosing mode. + void* target; // Pointer to the target variable. + void* source; // Pointer to the data source. + size_t size; // The size of the variable. }; //////////////////////////////////////////////////////////// -//// Forward declaration +//// Forward declaration typedef struct environment_t environment_t; //////////////////////////////////////////////////////////// @@ -119,37 +121,19 @@ void _lf_initialize_modes(environment_t* env); void _lf_handle_mode_changes(environment_t* env); void _lf_handle_mode_triggered_reactions(environment_t* env); bool _lf_mode_is_active(reactor_mode_t* mode); -void _lf_initialize_mode_states( - environment_t* env, - reactor_mode_state_t* states[], - int states_size); -void _lf_process_mode_changes( - environment_t* env, - reactor_mode_state_t* states[], - int states_size, - mode_state_variable_reset_data_t reset_data[], - int reset_data_size, - trigger_t* timer_triggers[], - int timer_triggers_size -); +void _lf_initialize_mode_states(environment_t* env, reactor_mode_state_t* states[], int states_size); +void _lf_process_mode_changes(environment_t* env, reactor_mode_state_t* states[], int states_size, + mode_state_variable_reset_data_t reset_data[], int reset_data_size, + trigger_t* timer_triggers[], int timer_triggers_size); void _lf_add_suspended_event(event_t* event); -void _lf_handle_mode_startup_reset_reactions( - environment_t* env, - reaction_t** startup_reactions, - int startup_reactions_size, - reaction_t** reset_reactions, - int reset_reactions_size, - reactor_mode_state_t* states[], - int states_size -); -void _lf_handle_mode_shutdown_reactions( - environment_t* env, - reaction_t** shutdown_reactions, - int shutdown_reactions_size -); +void _lf_handle_mode_startup_reset_reactions(environment_t* env, reaction_t** startup_reactions, + int startup_reactions_size, reaction_t** reset_reactions, + int reset_reactions_size, reactor_mode_state_t* states[], int states_size); +void _lf_handle_mode_shutdown_reactions(environment_t* env, reaction_t** shutdown_reactions, + int shutdown_reactions_size); void _lf_terminate_modal_reactors(environment_t* env); -#else /* IF NOT MODAL_REACTORS */ +#else /* IF NOT MODAL_REACTORS */ /* * Reactions and triggers must have a mode pointer to set up connection to enclosing modes, * also when they are precompiled without modal reactors in order to later work in modal reactors. diff --git a/include/core/platform/lf_zephyr_board_support.h b/include/core/platform/lf_zephyr_board_support.h deleted file mode 100644 index d5dd16c64..000000000 --- a/include/core/platform/lf_zephyr_board_support.h +++ /dev/null @@ -1,107 +0,0 @@ -/************* -Copyright (c) 2023, Norwegian University of Science and Technology. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL -THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF -THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -***************/ - -/** - * @brief Provide preprocessor flags for the particular board that was chosen - * - * @author{Erling Jellum } - */ - -#ifndef LF_ZEPHYR_BOARD_SUPPORT_H -#define LF_ZEPHYR_BOARD_SUPPORT_H - -// Default options -#define LF_ZEPHYR_THREAD_PRIORITY_DEFAULT 5 -#define LF_ZEPHYR_STACK_SIZE_DEFAULT 2048 - -// Unless the user explicitly asks for the kernel clock, then we use a counter -// clock because it is more precise. -#if !defined(LF_ZEPHYR_CLOCK_KERNEL) - #if defined(CONFIG_SOC_FAMILY_NRF) - #define LF_ZEPHYR_CLOCK_COUNTER - #define LF_TIMER DT_NODELABEL(timer1) - #define LF_WAKEUP_OVERHEAD_US 100 - #define LF_MIN_SLEEP_US 10 - #define LF_RUNTIME_OVERHEAD_US 19 - #elif defined(CONFIG_BOARD_ATSAMD20_XPRO) - #define LF_TIMER DT_NODELABEL(tc4) - #define LF_ZEPHYR_CLOCK_COUNTER - #elif defined(CONFIG_SOC_FAMILY_SAM) - #define LF_TIMER DT_NODELABEL(tc0) - #define LF_ZEPHYR_CLOCK_COUNTER - #elif defined(CONFIG_COUNTER_MICROCHIP_MCP7940N) - #define LF_ZEPHYR_CLOCK_COUNTER - #define LF_TIMER DT_NODELABEL(extrtc0) - #elif defined(CONFIG_COUNTER_RTC0) - #define LF_ZEPHYR_CLOCK_COUNTER - #define LF_TIMER DT_NODELABEL(rtc0) - #elif defined(CONFIG_COUNTER_RTC_STM32) - #define LF_TIMER DT_INST(0, st_stm32_rtc) - #define LF_ZEPHYR_CLOCK_COUNTER - #elif defined(CONFIG_COUNTER_XLNX_AXI_TIMER) - #define LF_TIMER DT_INST(0, xlnx_xps_timer_1_00_a) - #define LF_ZEPHYR_CLOCK_COUNTER - #elif defined(CONFIG_COUNTER_TMR_ESP32) - #define LF_TIMER DT_NODELABEL(timer0) - #define LF_ZEPHYR_CLOCK_COUNTER - #elif defined(CONFIG_COUNTER_MCUX_CTIMER) - #define LF_TIMER DT_NODELABEL(ctimer0) - #define LF_ZEPHYR_CLOCK_COUNTER - #elif defined(CONFIG_SOC_MIMXRT1176_CM7) - #define LF_TIMER DT_NODELABEL(gpt2) - #define LF_ZEPHYR_CLOCK_COUNTER - #else - // This board does not have support for the counter clock. If the user - // explicitly asked for this cock, then throw an error. - #if defined(LF_ZEPHYR_CLOCK_COUNTER) - #error "LF_ZEPHYR_CLOCK_COUNTER was requested but it is not supported by the board" - #else - #define LF_ZEPHYR_CLOCK_KERNEL - #endif - #endif // BOARD -#endif - -#if defined(LF_ZEPHYR_CLOCK_COUNTER) - #ifndef LF_WAKEUP_OVERHEAD_US - #define LF_WAKEUP_OVERHEAD_US 0 - #endif - - #ifndef LF_MIN_SLEEP_US - #define LF_MIN_SLEEP_US 10 - #endif - - #ifndef LF_RUNTIME_OVERHEAD_US - #define LF_RUNTIME_OVERHEAD_US 0 - #endif - - #ifndef LF_TIMER_ALARM_CHANNEL - #define LF_TIMER_ALARM_CHANNEL 0 - #endif -#else - #if !defined(LF_ZEPHYR_CLOCK_KERNEL) - #error Neither hi-res nor lo-res clock specified - #endif -#endif // LF_ZEPHYR_CLOCK_COUNTER - -#endif diff --git a/include/core/port.h b/include/core/port.h index a8888c8bc..625d27cec 100644 --- a/include/core/port.h +++ b/include/core/port.h @@ -67,7 +67,7 @@ #include #include -#include "lf_token.h" // Defines token types and lf_port_base_t, lf_sparse_io_record +#include "lf_token.h" // Defines token types and lf_port_base_t, lf_sparse_io_record /** Threshold for width of multiport s.t. sparse reading is supported. */ #define LF_SPARSE_WIDTH_THRESHOLD 10 @@ -86,10 +86,10 @@ * number of a present input (or -1 if there is no next present input). */ typedef struct lf_multiport_iterator_t { - int next; - int idx; // Index in the record of next or -1 if lf_multiport_next has not been called. - lf_port_base_t** port; - int width; + int next; + int idx; // Index in the record of next or -1 if lf_multiport_next has not been called. + lf_port_base_t** port; + int width; } lf_multiport_iterator_t; /** @@ -107,9 +107,8 @@ lf_multiport_iterator_t _lf_multiport_iterator_impl(lf_port_base_t** port, int w * lf_multiport_iterator_t on the stack, a pointer to which should be * passed to lf_multiport_iterator_next() to advance. */ -#define lf_multiport_iterator(in) (_lf_multiport_iterator_impl( \ - (lf_port_base_t**)self->_lf_ ## in, \ - self->_lf_ ## in ## _width)) +#define lf_multiport_iterator(in) \ + (_lf_multiport_iterator_impl((lf_port_base_t**)self->_lf_##in, self->_lf_##in##_width)) /** * Return the channel number of the next present input on the multiport diff --git a/include/core/reactor.h b/include/core/reactor.h index 14e1bdc40..fffa9ba19 100644 --- a/include/core/reactor.h +++ b/include/core/reactor.h @@ -7,11 +7,11 @@ * @copyright (c) 2020-2024, The University of California at Berkeley. * License: BSD 2-clause * @brief Definitions for the C target of Lingua Franca shared by threaded and unthreaded versions. - * + * * This header file defines functions that programmers use in the body of reactions for reading and * writing inputs and outputs and scheduling future events. Other functions that might be useful to * application programmers are also defined here. - * + * * Many of these functions have macro wrappers defined in reaction_macros.h. */ @@ -19,11 +19,11 @@ #define REACTOR_H #include "lf_types.h" -#include "modes.h" // Modal model support +#include "modes.h" // Modal model support #include "port.h" -#include "tag.h" // Time-related functions. -#include "clock.h" // Time-related functions. -#include "trace.h" +#include "tag.h" // Time-related functions. +#include "clock.h" // Time-related functions. +#include "tracepoint.h" #include "util.h" /** @@ -69,7 +69,7 @@ void lf_set_stp_offset(interval_t offset); /** * @brief Print a snapshot of the priority queues used during execution (for debugging). - * + * * This function implementation will be empty if the NDEBUG macro is defined; that macro * is normally defined for release builds. * @param env The environment in which we are executing. @@ -78,7 +78,7 @@ void lf_print_snapshot(environment_t* env); /** * @brief Request a stop to execution as soon as possible. - * + * * In a non-federated execution with only a single enclave, this will occur * one microstep later than the current tag. In a federated execution or when * there is more than one enclave, it will likely occur at a later tag determined @@ -88,11 +88,11 @@ void lf_request_stop(void); /** * @brief Allocate memory and record on the specified allocation record (a self struct). - * + * * This will allocate memory using calloc (so the allocated memory is zeroed out) * and record the allocated memory on the specified self struct so that * it will be freed when calling {@link free_reactor(self_base_t)}. - * + * * @param count The number of items of size 'size' to accomodate. * @param size The size of each item. * @param head Pointer to the head of a list on which to record @@ -103,13 +103,13 @@ void* lf_allocate(size_t count, size_t size, struct allocation_record_t** head); /** * @brief Allocate memory for a new runtime instance of a reactor. - * + * * This records the reactor on the list of reactors to be freed at * termination of the program. If you plan to free the reactor before * termination of the program, use * {@link lf_allocate(size_t, size_t, allocation_record_t**)} * with a null last argument instead. - * + * * @param size The size of the self struct, obtained with sizeof(). */ self_base_t* lf_new_reactor(size_t size); @@ -121,12 +121,12 @@ void lf_free_all_reactors(void); /** * @brief Free the specified reactor. - * + * * This will free the memory recorded on the allocations list of the specified reactor * and then free the specified self struct. * @param self The self struct of the reactor. */ -void lf_free_reactor(self_base_t *self); +void lf_free_reactor(self_base_t* self); #endif /* REACTOR_H */ /** @} */ diff --git a/include/core/reactor_common.h b/include/core/reactor_common.h index 97ec976b8..fc1451a96 100644 --- a/include/core/reactor_common.h +++ b/include/core/reactor_common.h @@ -9,7 +9,7 @@ * @copyright (c) 2020-2024, The University of California at Berkeley. * License: BSD 2-clause * @brief Declarations of functions with implementations in reactor.c and reactor_threaded.c. - * + * * The functions declared in this file, as opposed to the ones in reactor.h, are not meant to be * called by application programmers. They should be viewed as private functions that make up the * C runtime. In some cases, the implementation of these functions is in reactor_common.c, and in @@ -34,7 +34,7 @@ /** * @brief Constant giving the minimum amount of time to sleep to wait * for physical time to reach a logical time. - * + * * Unless the "fast" option is given, an LF program will wait until * physical time matches logical time before handling an event with * a given logical time. The amount of time is less than this given @@ -65,6 +65,16 @@ extern struct allocation_record_t* _lf_reactors_to_free; ////////////////////// Functions ////////////////////// +/** + * @brief Combine a deadline and a level into a single index for sorting in the reaction queue. + * + * This shifts the deadline right by 16 bits and inserts the level in the low-order 16 bits. + * If the deadline is larger than `ULLONG_MAX >> 16`, then it is treated as the largest possible deadline. + * @oaran deadline THe deadline. + * @param level The level in the reaction graph. + */ +index_t lf_combine_deadline_and_level(interval_t deadline, int level); + /** * @brief Create and initialize the required number of environments for the program. * @note This function will be code generated by the compiler. @@ -73,10 +83,10 @@ void lf_create_environments(void); /** * @brief Free memory on the specified allocation record (a self struct). - * + * * This will mark the allocation record empty by setting `*head` to NULL. * If the argument is NULL, do nothing. - * + * * @param head Pointer to the head of a list on which allocations are recorded. */ void lf_free(struct allocation_record_t** head); @@ -90,7 +100,7 @@ event_t* lf_get_new_event(environment_t* env); /** * @brief Recycle the given event. - * + * * This will zero out the event and push it onto the recycle queue. * @param env Environment in which we are executing. * @param e The event to recycle. @@ -112,20 +122,20 @@ void lf_set_default_command_line_options(void); /** * @brief Perform whatever is needed to start a time step. - * + * * For example, this function resets outputs to be absent at the start of a new time step. - * + * * @param env The environment in which we are executing */ -void _lf_start_time_step(environment_t *env); +void _lf_start_time_step(environment_t* env); /** * @brief Function that is called when the program is about to exit. - * + * * This function will be invoked after all shutdown actions have completed. * For non-federated programs, the code generator generates an empty function to implement this. * For federated programs, the function is implemented in federate.c. - * + * * @param env The environment in which we are executing */ void lf_terminate_execution(environment_t* env); @@ -138,7 +148,7 @@ void _lf_initialize_trigger_objects(); /** * @brief Perform final wrap-up on exit. - * + * * This function will be registered to execute on exit. * It reports elapsed logical and physical times and reports if any * memory allocated for tokens has not been freed. @@ -182,7 +192,7 @@ void _lf_trigger_startup_reactions(environment_t* env); * @brief Trigger all the shutdown reactions in the specified environment. * @param env Environment in which we are executing. */ -void _lf_trigger_shutdown_reactions(environment_t *env); +void _lf_trigger_shutdown_reactions(environment_t* env); /** * Create dummy events to be used as spacers in the event queue. @@ -193,13 +203,8 @@ void _lf_trigger_shutdown_reactions(environment_t *env); * @param offset The number of dummy events to insert. * @return A pointer to the first dummy event. */ -event_t* _lf_create_dummy_events( - environment_t* env, - trigger_t* trigger, - instant_t time, - event_t* next, - microstep_t offset -); +event_t* _lf_create_dummy_events(environment_t* env, trigger_t* trigger, instant_t time, event_t* next, + microstep_t offset); /** * @brief Schedule an event at a specific tag (time, microstep). @@ -247,20 +252,20 @@ trigger_handle_t _lf_insert_reactions_for_trigger(environment_t* env, trigger_t* * @param env The environment in which we are executing * @param next_time The time step to advance to. */ -void _lf_advance_logical_time(environment_t *env, instant_t next_time); +void _lf_advance_logical_time(environment_t* env, instant_t next_time); /** * @brief Pop all events from event_q with tag equal to current tag. - * + * * This will extract all the reactions triggered by these events and stick them onto the * reaction queue. - * + * * @param env The environment in which we are executing */ -void _lf_pop_events(environment_t *env); +void _lf_pop_events(environment_t* env); void _lf_invoke_reaction(environment_t* env, reaction_t* reaction, int worker); -void schedule_output_reactions(environment_t *env, reaction_t* reaction, int worker); +void schedule_output_reactions(environment_t* env, reaction_t* reaction, int worker); int process_args(int argc, const char* argv[]); /** diff --git a/include/core/threaded/reactor_threaded.h b/include/core/threaded/reactor_threaded.h index a62e5b7e4..6971cec17 100644 --- a/include/core/threaded/reactor_threaded.h +++ b/include/core/threaded/reactor_threaded.h @@ -5,7 +5,7 @@ * @author{Soroush Bateni } * @copyright (c) 2020-2024, The University of California at Berkeley. * License: BSD 2-clause - * @brief Runtime infrastructure for the threaded version of the C target of Lingua Franca. + * @brief Runtime infrastructure for the threaded version of the C target of Lingua Franca. */ #ifndef REACTOR_THREADED_H #define REACTOR_THREADED_H @@ -58,7 +58,7 @@ void lf_enqueue_port_absent_reactions(environment_t* env); * If future_tag is in the past (or equals to current logical time), the runtime * will freeze advancement of logical time. */ -void _lf_increment_tag_barrier(environment_t *env, tag_t future_tag); +void _lf_increment_tag_barrier(environment_t* env, tag_t future_tag); /** * @brief Version of _lf_increment_tag_barrier to call when the caller holds the mutex. @@ -70,7 +70,7 @@ void _lf_increment_tag_barrier(environment_t *env, tag_t future_tag); * If future_tag is in the past (or equals to current logical time), the runtime * will freeze advancement of logical time. */ -void _lf_increment_tag_barrier_locked(environment_t *env, tag_t future_tag); +void _lf_increment_tag_barrier_locked(environment_t* env, tag_t future_tag); /** * Decrement the total number of pending barrier requests for the environment tag barrier. diff --git a/include/core/threaded/scheduler.h b/include/core/threaded/scheduler.h index ab4bec48e..ea9f008c2 100644 --- a/include/core/threaded/scheduler.h +++ b/include/core/threaded/scheduler.h @@ -48,7 +48,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * `lf_sched_init`. */ - /** * @brief Initialize the scheduler. * @@ -61,11 +60,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * @param option Pointer to a `sched_params_t` struct containing additional * scheduler parameters. Can be NULL. */ -void lf_sched_init( - environment_t* env, - size_t number_of_workers, - sched_params_t* parameters -); +void lf_sched_init(environment_t* env, size_t number_of_workers, sched_params_t* parameters); /** * @brief Free the memory used by the scheduler. @@ -100,7 +95,6 @@ reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_nu */ void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction); - /** * @brief Inform the scheduler that worker thread 'worker_number' would like to * trigger 'reaction' at the current tag. diff --git a/include/core/threaded/scheduler_instance.h b/include/core/threaded/scheduler_instance.h index 8a8a40905..f664066e6 100644 --- a/include/core/threaded/scheduler_instance.h +++ b/include/core/threaded/scheduler_instance.h @@ -40,7 +40,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef NUMBER_OF_WORKERS // Enable thread-related platform functions #define NUMBER_OF_WORKERS 1 -#endif // NUMBER_OF_WORKERS +#endif // NUMBER_OF_WORKERS #include "lf_semaphore.h" #include @@ -57,84 +57,84 @@ typedef struct custom_scheduler_data_t custom_scheduler_data_t; * @note Members of this struct are added based on existing schedulers' needs. * These should be expanded to accommodate new schedulers. */ -typedef struct lf_scheduler_t { - struct environment_t * env; - /** - * @brief Maximum number of levels for reactions in the program. - * - */ - size_t max_reaction_level; - - /** - * @brief Used by the scheduler to signal the maximum number of worker - * threads that should be executing work at the same time. - * - * Initially, the count is set to 0. Maximum value of count should be - * `number_of_workers`. - * - * For example, if the scheduler releases the semaphore with a count of 4, - * no more than 4 worker threads should wake up to process reactions. - * - * FIXME: specific comment - */ - lf_semaphore_t* semaphore; - - /** - * @brief Indicate whether the program should stop - */ - volatile bool should_stop; - - /** - * @brief Hold triggered reactions. - */ - void* triggered_reactions; - - /** - * @brief An array of mutexes. - * - * Can be used to avoid race conditions. Schedulers are allowed to - * initialize as many mutexes as they deem fit. - */ - lf_mutex_t* array_of_mutexes; - - /** - * @brief An array of atomic indexes. - * - * Can be used to avoid race conditions. Schedulers are allowed to to use as - * many indexes as they deem fit. - */ - volatile int* indexes; - - /** - * @brief Hold currently executing reactions. - */ - void* executing_reactions; - - /** - * @brief Hold reactions temporarily. - */ - void* transfer_reactions; - - /** - * @brief Number of workers that this scheduler is managing. - */ - size_t number_of_workers; - - /** - * @brief Number of workers that are idle. - * Adding to/subtracting from this variable must be done atomically. - */ - volatile size_t number_of_idle_workers; - - /** - * @brief The next level of reactions to execute. - */ - volatile size_t next_reaction_level; - - // Pointer to an optional custom data structure that each scheduler can define. - // The type is forward declared here and must be declared again in the scheduler source file - // Is not touched by `init_sched_instance` and must be initialized by each scheduler that needs it - custom_scheduler_data_t * custom_data; +typedef struct lf_scheduler_t { + struct environment_t* env; + /** + * @brief Maximum number of levels for reactions in the program. + * + */ + size_t max_reaction_level; + + /** + * @brief Used by the scheduler to signal the maximum number of worker + * threads that should be executing work at the same time. + * + * Initially, the count is set to 0. Maximum value of count should be + * `number_of_workers`. + * + * For example, if the scheduler releases the semaphore with a count of 4, + * no more than 4 worker threads should wake up to process reactions. + * + * FIXME: specific comment + */ + lf_semaphore_t* semaphore; + + /** + * @brief Indicate whether the program should stop + */ + volatile bool should_stop; + + /** + * @brief Hold triggered reactions. + */ + void* triggered_reactions; + + /** + * @brief An array of mutexes. + * + * Can be used to avoid race conditions. Schedulers are allowed to + * initialize as many mutexes as they deem fit. + */ + lf_mutex_t* array_of_mutexes; + + /** + * @brief An array of atomic indexes. + * + * Can be used to avoid race conditions. Schedulers are allowed to to use as + * many indexes as they deem fit. + */ + volatile int* indexes; + + /** + * @brief Hold currently executing reactions. + */ + void* executing_reactions; + + /** + * @brief Hold reactions temporarily. + */ + void* transfer_reactions; + + /** + * @brief Number of workers that this scheduler is managing. + */ + size_t number_of_workers; + + /** + * @brief Number of workers that are idle. + * Adding to/subtracting from this variable must be done atomically. + */ + volatile size_t number_of_idle_workers; + + /** + * @brief The next level of reactions to execute. + */ + volatile size_t next_reaction_level; + + // Pointer to an optional custom data structure that each scheduler can define. + // The type is forward declared here and must be declared again in the scheduler source file + // Is not touched by `init_sched_instance` and must be initialized by each scheduler that needs it + custom_scheduler_data_t* custom_data; } lf_scheduler_t; /** @@ -153,8 +153,8 @@ typedef struct lf_scheduler_t { * `DEFAULT_MAX_REACTION_LEVEL` will be used. */ typedef struct { - size_t* num_reactions_per_level; - size_t num_reactions_per_level_size; + size_t* num_reactions_per_level; + size_t num_reactions_per_level_size; } sched_params_t; /** @@ -170,10 +170,7 @@ typedef struct { * @return `true` if initialization was performed. `false` if instance is already * initialized (checked in a thread-safe way). */ -bool init_sched_instance( - struct environment_t* env, - lf_scheduler_t** instance, - size_t number_of_workers, - sched_params_t* params); +bool init_sched_instance(struct environment_t* env, lf_scheduler_t** instance, size_t number_of_workers, + sched_params_t* params); #endif // LF_SCHEDULER_PARAMS_H diff --git a/include/core/threaded/scheduler_sync_tag_advance.h b/include/core/threaded/scheduler_sync_tag_advance.h index d86d26583..3de92e540 100644 --- a/include/core/threaded/scheduler_sync_tag_advance.h +++ b/include/core/threaded/scheduler_sync_tag_advance.h @@ -42,7 +42,7 @@ void _lf_next_locked(struct environment_t* env); * @param tag_to_send The tag to send. */ void logical_tag_complete(tag_t tag_to_send); -bool should_stop_locked(lf_scheduler_t * sched); -bool _lf_sched_advance_tag_locked(lf_scheduler_t * sched); +bool should_stop_locked(lf_scheduler_t* sched); +bool _lf_sched_advance_tag_locked(lf_scheduler_t* sched); #endif // LF_C11_THREADS_SUPPORT_H diff --git a/include/core/threaded/watchdog.h b/include/core/threaded/watchdog.h index 0b8d0d12c..652304164 100644 --- a/include/core/threaded/watchdog.h +++ b/include/core/threaded/watchdog.h @@ -12,41 +12,41 @@ #include "lf_types.h" #include "environment.h" -#include "platform.h" // For lf_thread_t. +#include "platform.h" // For lf_thread_t. #ifdef __cplusplus extern "C" { #endif -/** - * Watchdog function type. The argument passed to one of +/** + * Watchdog function type. The argument passed to one of * these watchdog functions is a pointer to the self struct * for the reactor. */ -typedef void(*watchdog_function_t)(void*); +typedef void (*watchdog_function_t)(void*); /** Typdef for watchdog_t struct, used to call watchdog handler. */ typedef struct watchdog_t { - struct self_base_t* base; // The reactor that contains the watchdog. - trigger_t* trigger; // The trigger associated with this watchdog. - instant_t expiration; // The expiration instant for the watchdog. (Initialized to NEVER) - interval_t min_expiration; // The minimum expiration interval for the watchdog. - lf_thread_t thread_id; // The thread that the watchdog is meant to run on. - lf_cond_t cond; // Condition variable used for sleeping and termination. - bool active; // Boolean indicating whether or not thread is active. - bool terminate; // Boolean indicating whether termination of the thread has been requested. - watchdog_function_t watchdog_function; // The function/handler for the watchdog. + struct self_base_t* base; // The reactor that contains the watchdog. + trigger_t* trigger; // The trigger associated with this watchdog. + instant_t expiration; // The expiration instant for the watchdog. (Initialized to NEVER) + interval_t min_expiration; // The minimum expiration interval for the watchdog. + lf_thread_t thread_id; // The thread that the watchdog is meant to run on. + lf_cond_t cond; // Condition variable used for sleeping and termination. + bool active; // Boolean indicating whether or not thread is active. + bool terminate; // Boolean indicating whether termination of the thread has been requested. + watchdog_function_t watchdog_function; // The function/handler for the watchdog. } watchdog_t; -/** +/** * @brief Start or restart the watchdog timer. - * + * * This function sets the expiration time of the watchdog to the current logical time * plus the minimum timeout of the watchdog plus the specified `additional_timeout`. * This function assumes the reactor mutex is held when it is called; this assumption * is satisfied whenever this function is called from within a reaction that declares * the watchdog as an effect. - * + * * @param watchdog The watchdog to be started * @param additional_timeout Additional timeout to be added to the watchdog's * minimum expiration. @@ -56,12 +56,11 @@ void lf_watchdog_start(watchdog_t* watchdog, interval_t additional_timeout); /** * @brief Stop the specified watchdog without invoking the expiration handler. * This function sets the expiration time of the watchdog to `NEVER`. - * + * * @param watchdog The watchdog. */ void lf_watchdog_stop(watchdog_t* watchdog); - ///////////////////// Internal functions ///////////////////// // The following functions are internal to the runtime and should not be documented by Doxygen. /// \cond INTERNAL // Doxygen conditional. @@ -69,10 +68,10 @@ void lf_watchdog_stop(watchdog_t* watchdog); /** * Function to initialize mutexes for watchdogs */ -void _lf_initialize_watchdogs(environment_t *env); +void _lf_initialize_watchdogs(environment_t* env); /** Terminates all watchdogs inside the environment. */ -void _lf_watchdog_terminate_all(environment_t *env); +void _lf_watchdog_terminate_all(environment_t* env); /// \endcond // INTERNAL diff --git a/include/core/trace.h b/include/core/trace.h deleted file mode 100644 index 6b7b95e49..000000000 --- a/include/core/trace.h +++ /dev/null @@ -1,562 +0,0 @@ -/** - * @file - * @author Edward A. Lee - * - * @section LICENSE -Copyright (c) 2020, The University of California at Berkeley and TU Dresden - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL -THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF -THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - * @section DESCRIPTION - * Definitions of tracepoint events for use with the C code generator and any other - * code generator that uses the C infrastructure (such as the Python code generator). - * - * See: https://www.lf-lang.org/docs/handbook/tracing?target=c - * - * The trace file is named trace.lft and is a binary file with the following format: - * - * Header: - * * instant_t: The start time. This is both the starting physical time and the starting logical time. - * * int: Size N of the table mapping pointers to descriptions. - * This is followed by N records each of which has: - * * A pointer value (the key). - * * A null-terminated string (the description). - * - * Traces: - * A sequence of traces, each of which begins with an int giving the length of the trace - * followed by binary representations of the trace_record struct written using fwrite(). - */ - -#ifdef RTI_TRACE -#define LF_TRACE -#endif - -#ifndef TRACE_H -#define TRACE_H - -#include "lf_types.h" -#include - -#ifdef FEDERATED -#include "net_common.h" -#endif // FEDERATED - -/** - * Trace event types. If you update this, be sure to update the - * string representation below. Also, create a tracepoint function - * for each event type. - */ -typedef enum -{ - reaction_starts, - reaction_ends, - reaction_deadline_missed, - schedule_called, - user_event, - user_value, - worker_wait_starts, - worker_wait_ends, - scheduler_advancing_time_starts, - scheduler_advancing_time_ends, - federated, // Everything below this is for tracing federated interactions. - // Sending messages - send_ACK, - send_FAILED, - send_TIMESTAMP, - send_NET, - send_LTC, - send_STOP_REQ, - send_STOP_REQ_REP, - send_STOP_GRN, - send_FED_ID, - send_PTAG, - send_TAG, - send_REJECT, - send_RESIGN, - send_PORT_ABS, - send_CLOSE_RQ, - send_TAGGED_MSG, - send_P2P_TAGGED_MSG, - send_MSG, - send_P2P_MSG, - send_ADR_AD, - send_ADR_QR, - send_DNET, - // Receiving messages - receive_ACK, - receive_FAILED, - receive_TIMESTAMP, - receive_NET, - receive_LTC, - receive_STOP_REQ, - receive_STOP_REQ_REP, - receive_STOP_GRN, - receive_FED_ID, - receive_PTAG, - receive_TAG, - receive_REJECT, - receive_RESIGN, - receive_PORT_ABS, - receive_CLOSE_RQ, - receive_TAGGED_MSG, - receive_P2P_TAGGED_MSG, - receive_MSG, - receive_P2P_MSG, - receive_ADR_AD, - receive_ADR_QR, - receive_DNET, - receive_UNIDENTIFIED, - NUM_EVENT_TYPES -} trace_event_t; - -#ifdef LF_TRACE - -/** - * String description of event types. - */ -static const char *trace_event_names[] = { - "Reaction starts", - "Reaction ends", - "Reaction deadline missed", - "Schedule called", - "User-defined event", - "User-defined valued event", - "Worker wait starts", - "Worker wait ends", - "Scheduler advancing time starts", - "Scheduler advancing time ends", - "Federated marker", - // Sending messages - "Sending ACK", - "Sending FAILED", - "Sending TIMESTAMP", - "Sending NET", - "Sending LTC", - "Sending STOP_REQ", - "Sending STOP_REQ_REP", - "Sending STOP_GRN", - "Sending FED_ID", - "Sending PTAG", - "Sending TAG", - "Sending REJECT", - "Sending RESIGN", - "Sending PORT_ABS", - "Sending CLOSE_RQ", - "Sending TAGGED_MSG", - "Sending P2P_TAGGED_MSG", - "Sending MSG", - "Sending P2P_MSG", - "Sending ADR_AD", - "Sending ADR_QR", - "Sending DNET", - // Receiving messages - "Receiving ACK", - "Receiving FAILED", - "Receiving TIMESTAMP", - "Receiving NET", - "Receiving LTC", - "Receiving STOP_REQ", - "Receiving STOP_REQ_REP", - "Receiving STOP_GRN", - "Receiving FED_ID", - "Receiving PTAG", - "Receiving TAG", - "Receiving REJECT", - "Receiving RESIGN", - "Receiving PORT_ABS", - "Receiving CLOSE_RQ", - "Receiving TAGGED_MSG", - "Receiving P2P_TAGGED_MSG", - "Receiving MSG", - "Receiving P2P_MSG", - "Receiving ADR_AD", - "Receiving ADR_QR", - "Receiving DNET", - "Receiving UNIDENTIFIED", -}; - -// FIXME: Target property should specify the capacity of the trace buffer. -#define TRACE_BUFFER_CAPACITY 2048 - -/** Size of the table of trace objects. */ -#define TRACE_OBJECT_TABLE_SIZE 1024 - -/** - * @brief A trace record that is written in binary to the trace file. - */ -typedef struct trace_record_t { - trace_event_t event_type; - void* pointer; // pointer identifying the record, e.g. to self struct for a reactor. - int src_id; // The ID number of the source (e.g. worker or federate) or -1 for no ID number. - int dst_id; // The ID number of the destination (e.g. reaction or federate) or -1 for no ID number. - instant_t logical_time; - microstep_t microstep; - instant_t physical_time; - trigger_t* trigger; - interval_t extra_delay; -} trace_record_t; - -/** - * Identifier for what is in the object table. - */ -typedef enum { - trace_reactor, // Self struct. - trace_trigger, // Timer or action (argument to schedule()). - trace_user // User-defined trace object. -} _lf_trace_object_t; - -/** - * Struct for table of pointers to a description of the object. - */ -typedef struct object_description_t object_description_t; -struct object_description_t { - void* pointer; // Pointer to the reactor self struct or other identifying pointer. - void* trigger; // Pointer to the trigger (action or timer) or other secondary ID, if any. - _lf_trace_object_t type; // The type of trace object. - char* description; // A NULL terminated string. -}; -/** - * - * @brief This struct holds all the state associated with tracing in a single environment. - * Each environment which has tracing enabled will have such a struct on its environment struct. - * - */ -typedef struct trace_t { - /** - * Array of buffers into which traces are written. - * When a buffer becomes full, the contents is flushed to the file, - * which will create a significant pause in the calling thread. - */ - trace_record_t** _lf_trace_buffer; - int* _lf_trace_buffer_size; - - /** The number of trace buffers allocated when tracing starts. */ - int _lf_number_of_trace_buffers; - - /** Marker that tracing is stopping or has stopped. */ - int _lf_trace_stop; - - /** The file into which traces are written. */ - FILE* _lf_trace_file; - - /** The file name where the traces are written*/ - char *filename; - - /** Table of pointers to a description of the object. */ - object_description_t _lf_trace_object_descriptions[TRACE_OBJECT_TABLE_SIZE]; - int _lf_trace_object_descriptions_size; - - /** Indicator that the trace header information has been written to the file. */ - bool _lf_trace_header_written; - - /** Pointer back to the environment which we are tracing within*/ - environment_t* env; -} trace_t; - - -/** - * @brief Dynamically allocate a new tracing object. - * - * @param env The environment in which we are tracing. If passed NULL we use the GLOBAL_ENVIRONMENT - * @param filename Name of the file in which to store the trace - * @return trace_t* A newly allocated trace object with environment pointer and filename initialized - */ -trace_t* trace_new(environment_t *env, const char *filename); - -/** - * @brief Free the memory allocated for the trace object - * - * @param trace - */ -void trace_free(trace_t *trace); - - -/** - * Register a trace object. - * @param env Pointer to the environment in which the event is traced - * @param pointer1 Pointer that identifies the object, typically to a reactor self struct. - * @param pointer2 Further identifying pointer, typically to a trigger (action or timer) or NULL if irrelevant. - * @param type The type of trace object. - * @param description The human-readable description of the object. - * @return 1 if successful, 0 if the trace object table is full. - */ -int _lf_register_trace_event(trace_t* trace, void* pointer1, void* pointer2, _lf_trace_object_t type, char* description); - -/** - * Register a user trace event. This should be called once, providing a pointer to a string - * that describes a phenomenon being traced. Use the same pointer as the first argument to - * tracepoint_user_event() and tracepoint_user_value(). - * @param description Pointer to a human-readable description of the event. - * @return 1 if successful, 0 if the trace object table is full. - */ -int register_user_trace_event(void* self, char* description); - -/** - * Open a trace file and start tracing. - * @param filename The filename for the trace file. - */ -void start_trace(trace_t* trace); - -/** - * Trace an event identified by a type and a pointer to the self struct of the reactor instance. - * This is a generic tracepoint function. It is better to use one of the specific functions. - * The worker argument determines which buffer to write to. - * Hence, as long as this argument is distinct for each caller, the callers can be in - * different threads without the need for a mutex lock. - * @param event_type The type of event (see trace_event_t in trace.h) - * @param reactor The pointer to the self struct of the reactor instance in the trace table. - * @param tag Pointer to a tag or NULL to use current tag. - * @param worker The ID of the worker thread (which determines which buffer to write to). - * @param src_id The ID number of the source (e.g. worker or federate) or -1 for no ID number. - * @param dst_id The ID number of the destination (e.g. reaction or federate) or -1 for no ID number. - * @param physical_time If the caller has already accessed physical time, provide it here. - * Otherwise, provide NULL. This argument avoids a second call to lf_time_physical() - * and ensures that the physical time in the trace is the same as that used by the caller. - * @param trigger Pointer to the trigger_t struct for calls to schedule or NULL otherwise. - * @param extra_delay The extra delay passed to schedule(). If not relevant for this event - * type, pass 0. - * @param is_interval_start True to indicate that this tracepoint is at the beginning of - * time interval, such as reaction invocation, so that physical time is captured as late - * as possible. False to indicate that it is at the end of an interval, such as the end - * of a reaction invocation, so that physical time is captured as early as possible. - */ -void tracepoint( - trace_t* trace, - trace_event_t event_type, - void* reactor, - tag_t* tag, - int worker, - int src_id, - int dst_id, - instant_t* physical_time, - trigger_t* trigger, - interval_t extra_delay, - bool is_interval_start -); - -/** - * Trace the start of a reaction execution. - * @param env The environment in which we are executing - * @param reaction Pointer to the reaction_t struct for the reaction. - * @param worker The thread number of the worker thread or 0 for single-threaded execution. - */ -void tracepoint_reaction_starts(trace_t* trace, reaction_t* reaction, int worker); - -/** - * Trace the end of a reaction execution. - * @param env The environment in which we are executing - * @param reaction Pointer to the reaction_t struct for the reaction. - * @param worker The thread number of the worker thread or 0 for single-threaded execution. - */ -void tracepoint_reaction_ends(trace_t* trace, reaction_t* reaction, int worker); - -/** - * Trace a call to schedule. - * @param env The environment in which we are executing - * @param trigger Pointer to the trigger_t struct for the trigger. - * @param extra_delay The extra delay passed to schedule(). - */ -void tracepoint_schedule(trace_t* trace, trigger_t* trigger, interval_t extra_delay); - -/** - * Trace a user-defined event. Before calling this, you must call - * register_user_trace_event() with a pointer to the same string - * or else the event will not be recognized. - * @param self Pointer to the self struct of the reactor from which we want - * to trace this event. This pointer is used to get the correct environment and - * thus the correct logical tag of the event. - * @param description Pointer to the description string. - */ -void tracepoint_user_event(void* self, char* description); - -/** - * Trace a user-defined event with a value. - * Before calling this, you must call - * register_user_trace_event() with a pointer to the same string - * or else the event will not be recognized. - * @param self Pointer to the self struct of the reactor from which we want - * to trace this event. This pointer is used to get the correct environment and - * thus the correct logical tag of the event. - * @param description Pointer to the description string. - * @param value The value of the event. This is a long long for - * convenience so that time values can be passed unchanged. - * But int values work as well. - */ -void tracepoint_user_value(void* self, char* description, long long value); - -/** - * Trace the start of a worker waiting for something to change on the reaction queue. - * @param env The environment in which we are executing - * @param worker The thread number of the worker thread or 0 for single-threaded execution. - */ -void tracepoint_worker_wait_starts(trace_t* trace, int worker); - -/** - * Trace the end of a worker waiting for something to change on reaction queue. - * @param env The environment in which we are executing - * @param worker The thread number of the worker thread or 0 for single-threaded execution. - */ -void tracepoint_worker_wait_ends(trace_t* trace, int worker); - -/** - * Trace the start of the scheduler waiting for logical time to advance or an event to - * appear on the event queue. - * @param env The environment in which we are executing - */ -void tracepoint_scheduler_advancing_time_starts(trace_t* trace); - -/** - * Trace the end of the scheduler waiting for logical time to advance or an event to - * appear on the event queue. - * @param env The environment in which we are executing - */ -void tracepoint_scheduler_advancing_time_ends(trace_t* trace); - -/** - * Trace the occurence of a deadline miss. - * @param env The environment in which we are executing - * @param reaction Pointer to the reaction_t struct for the reaction. - * @param worker The thread number of the worker thread or 0 for single-threaded execution. - */ -void tracepoint_reaction_deadline_missed(trace_t* trace, reaction_t *reaction, int worker); - -/** - * Flush any buffered trace records to the trace file and - * close the files. - */ -void stop_trace(trace_t* trace); - -/** - * Version of stop_trace() that does not lock the trace mutex. - */ -void stop_trace_locked(trace_t* trace); - -//////////////////////////////////////////////////////////// -//// For federated execution - -#if defined(FEDERATED) || defined(LF_ENCLAVES) - -/** - * Trace federate sending a message to the RTI. - * @param event_type The type of event. Possible values are: - * - * @param fed_id The federate identifier. - * @param tag Pointer to the tag that has been sent, or NULL. - */ -void tracepoint_federate_to_rti(trace_t* trace, trace_event_t event_type, int fed_id, tag_t* tag); - -/** - * Trace federate receiving a message from the RTI. - * @param event_type The type of event. Possible values are: - * - * @param fed_id The federate identifier. - * @param tag Pointer to the tag that has been received, or NULL. - */ -void tracepoint_federate_from_rti(trace_t* trace, trace_event_t event_type, int fed_id, tag_t* tag); - -/** - * Trace federate sending a message to another federate. - * @param event_type The type of event. Possible values are: - * - * @param fed_id The federate identifier. - * @param partner_id The partner federate identifier. - * @param tag Pointer to the tag that has been sent, or NULL. - */ -void tracepoint_federate_to_federate(trace_t* trace, trace_event_t event_type, int fed_id, int partner_id, tag_t *tag); - -/** - * Trace federate receiving a message from another federate. - * @param event_type The type of event. Possible values are: - * - * @param fed_id The federate identifier. - * @param partner_id The partner federate identifier. - * @param tag Pointer to the tag that has been received, or NULL. - */ -void tracepoint_federate_from_federate(trace_t* trace, trace_event_t event_type, int fed_id, int partner_id, tag_t *tag); - -#else -#define tracepoint_federate_to_rti(...); -#define tracepoint_federate_from_rti(...); -#define tracepoint_federate_to_federate(...); -#define tracepoint_federate_from_federate(...); -#endif // FEDERATED - -//////////////////////////////////////////////////////////// -//// For RTI execution - -#ifdef RTI_TRACE - -/** - * Trace RTI sending a message to a federate. - * @param event_type The type of event. Possible values are: - * - * @param fed_id The fedaerate ID. - * @param tag Pointer to the tag that has been sent, or NULL. - */ -void tracepoint_rti_to_federate(trace_t* trace, trace_event_t event_type, int fed_id, tag_t* tag); - -/** - * Trace RTI receiving a message from a federate. - * @param event_type The type of event. Possible values are: - * - * @param fed_id The fedaerate ID. - * @param tag Pointer to the tag that has been sent, or NULL. - */ -void tracepoint_rti_from_federate(trace_t* trace, trace_event_t event_type, int fed_id, tag_t* tag); - -#else -#define tracepoint_rti_to_federate(...); -#define tracepoint_rti_from_federate(...) ; -#endif // RTI_TRACE - -#else -typedef struct trace_t trace_t; - -// empty definition in case we compile without tracing -#define _lf_register_trace_event(...) -#define register_user_trace_event(...) -#define tracepoint(...) -#define tracepoint_reaction_starts(...) -#define tracepoint_reaction_ends(...) -#define tracepoint_schedule(...) -#define tracepoint_user_event(...) -#define tracepoint_user_value(...) -#define tracepoint_worker_wait_starts(...) -#define tracepoint_worker_wait_ends(...) -#define tracepoint_scheduler_advancing_time_starts(...); -#define tracepoint_scheduler_advancing_time_ends(...); -#define tracepoint_reaction_deadline_missed(...); -#define tracepoint_federate_to_rti(...); -#define tracepoint_federate_from_rti(...); -#define tracepoint_federate_to_federate(...) ; -#define tracepoint_federate_from_federate(...) ; -#define tracepoint_rti_to_federate(...); -#define tracepoint_rti_from_federate(...) ; - -#define start_trace(...) -#define stop_trace(...) -#define stop_trace_locked(...) -#define trace_new(...) NULL -#define trace_free(...) - - -#endif // LF_TRACE -#endif // TRACE_H diff --git a/include/core/tracepoint.h b/include/core/tracepoint.h new file mode 100644 index 000000000..f28c59f9d --- /dev/null +++ b/include/core/tracepoint.h @@ -0,0 +1,438 @@ +/** + * @file + * @author Edward A. Lee + * @author Peter Donovan + * @copyright (c) 2020-2024, The University of California at Berkeley. + * License: BSD 2-clause + * @brief Definitions of tracepoint functions for use with the C code generator and any other + * code generator that uses the C infrastructure (such as the Python code generator). + * + * See: https://www.lf-lang.org/docs/handbook/tracing?target=c + * + * The trace file is named trace.lft and is a binary file with the following format: + * + * Header: + * * instant_t: The start time. This is both the starting physical time and the starting logical time. + * * int: Size N of the table mapping pointers to descriptions. + * This is followed by N records each of which has: + * * A pointer value (the key). + * * A null-terminated string (the description). + * + * Traces: + * A sequence of traces, each of which begins with an int giving the length of the trace + * followed by binary representations of the trace_record struct written using fwrite(). + */ + +#ifdef RTI_TRACE +#define LF_TRACE +#endif + +#ifndef TRACEPOINT_H +#define TRACEPOINT_H + +#include "lf_types.h" +#include + +#ifdef FEDERATED +#include "net_common.h" +#endif // FEDERATED + +/** + * Trace event types. If you update this, be sure to update the + * string representation below. Also, create a tracepoint function + * for each event type. + */ +typedef enum { + reaction_starts, + reaction_ends, + reaction_deadline_missed, + schedule_called, + user_event, + user_value, + worker_wait_starts, + worker_wait_ends, + scheduler_advancing_time_starts, + scheduler_advancing_time_ends, + federated, // Everything below this is for tracing federated interactions. + // Sending messages + send_ACK, + send_FAILED, + send_TIMESTAMP, + send_NET, + send_LTC, + send_STOP_REQ, + send_STOP_REQ_REP, + send_STOP_GRN, + send_FED_ID, + send_PTAG, + send_TAG, + send_REJECT, + send_RESIGN, + send_PORT_ABS, + send_CLOSE_RQ, + send_TAGGED_MSG, + send_P2P_TAGGED_MSG, + send_MSG, + send_P2P_MSG, + send_ADR_AD, + send_ADR_QR, + send_DNET, + // Receiving messages + receive_ACK, + receive_FAILED, + receive_TIMESTAMP, + receive_NET, + receive_LTC, + receive_STOP_REQ, + receive_STOP_REQ_REP, + receive_STOP_GRN, + receive_FED_ID, + receive_PTAG, + receive_TAG, + receive_REJECT, + receive_RESIGN, + receive_PORT_ABS, + receive_CLOSE_RQ, + receive_TAGGED_MSG, + receive_P2P_TAGGED_MSG, + receive_MSG, + receive_P2P_MSG, + receive_ADR_AD, + receive_ADR_QR, + receive_DNET, + receive_UNIDENTIFIED, + NUM_EVENT_TYPES +} trace_event_t; + +#ifdef LF_TRACE + +#include "trace.h" + +/** + * String description of event types. + */ +static const char* trace_event_names[] = { + "Reaction starts", + "Reaction ends", + "Reaction deadline missed", + "Schedule called", + "User-defined event", + "User-defined valued event", + "Worker wait starts", + "Worker wait ends", + "Scheduler advancing time starts", + "Scheduler advancing time ends", + "Federated marker", + // Sending messages + "Sending ACK", + "Sending FAILED", + "Sending TIMESTAMP", + "Sending NET", + "Sending LTC", + "Sending STOP_REQ", + "Sending STOP_REQ_REP", + "Sending STOP_GRN", + "Sending FED_ID", + "Sending PTAG", + "Sending TAG", + "Sending REJECT", + "Sending RESIGN", + "Sending PORT_ABS", + "Sending CLOSE_RQ", + "Sending TAGGED_MSG", + "Sending P2P_TAGGED_MSG", + "Sending MSG", + "Sending P2P_MSG", + "Sending ADR_AD", + "Sending ADR_QR", + "Sending DNET", + // Receiving messages + "Receiving ACK", + "Receiving FAILED", + "Receiving TIMESTAMP", + "Receiving NET", + "Receiving LTC", + "Receiving STOP_REQ", + "Receiving STOP_REQ_REP", + "Receiving STOP_GRN", + "Receiving FED_ID", + "Receiving PTAG", + "Receiving TAG", + "Receiving REJECT", + "Receiving RESIGN", + "Receiving PORT_ABS", + "Receiving CLOSE_RQ", + "Receiving TAGGED_MSG", + "Receiving P2P_TAGGED_MSG", + "Receiving MSG", + "Receiving P2P_MSG", + "Receiving ADR_AD", + "Receiving ADR_QR", + "Receiving DNET", + "Receiving UNIDENTIFIED", +}; + +/** + * @brief A trace record that gets written in binary to the trace file in the default implementation. + */ +typedef struct trace_record_t { + trace_event_t event_type; + void* pointer; // pointer identifying the record, e.g. to self struct for a reactor. + int src_id; // The ID number of the source (e.g. worker or federate) or -1 for no ID number. + int dst_id; // The ID number of the destination (e.g. reaction or federate) or -1 for no ID number. + instant_t logical_time; + microstep_t microstep; + instant_t physical_time; + trigger_t* trigger; + interval_t extra_delay; +} trace_record_t; + +/** + * @brief Pass the provided info to the tracing module. + * + * @param event_type The kind of tracepoint. + * @param reactor A pointer used as an opaque ID of the source reactor, if one exists. + * @param tag The tag associated with the tracepoint. + * @param worker The worker thread where the tracepoint was reached. + * @param src_id The ID of the source federate/enclave, if applicable. + * @param dst_id The ID of the destination federate/enclave, if applicable. + * @param physical_time The time at which the tracepoint was reached, or NULL if not applicable. + * @param trigger The trigger, if this tracepoint signifies scheduling of an event. + * @param extra_delay The delay passed to schedule(), if applicable. + * @param is_interval_start Whether this is the start of a time interval being measured (this + * argument is currently unused) + */ +void call_tracepoint(int event_type, void* reactor, tag_t tag, int worker, int src_id, int dst_id, + instant_t* physical_time, trigger_t* trigger, interval_t extra_delay, bool is_interval_start); + +/** + * Register a trace object. + * @param env Pointer to the environment in which the event is traced + * @param pointer1 Pointer that identifies the object, typically to a reactor self struct. + * @param pointer2 Further identifying pointer, typically to a trigger (action or timer) or NULL if irrelevant. + * @param type The type of trace object. + * @param description The human-readable description of the object. + * @return 1 if successful, 0 if the trace object table is full. + */ +int _lf_register_trace_event(void* pointer1, void* pointer2, _lf_trace_object_t type, char* description); + +/** + * Register a user trace event. This should be called once, providing a pointer to a string + * that describes a phenomenon being traced. Use the same pointer as the first argument to + * tracepoint_user_event() and tracepoint_user_value(). + * @param description Pointer to a human-readable description of the event. + * @return 1 if successful, 0 if the trace object table is full. + */ +int register_user_trace_event(void* self, char* description); + +/** + * Trace the start of a reaction execution. + * @param env The environment in which we are executing + * @param reaction Pointer to the reaction_t struct for the reaction. + * @param worker The thread number of the worker thread or 0 for single-threaded execution. + */ +#define tracepoint_reaction_starts(env, reaction, worker) \ + call_tracepoint(reaction_starts, reaction->self, env->current_tag, worker, worker, reaction->number, NULL, NULL, 0, \ + true) + +/** + * Trace the end of a reaction execution. + * @param env The environment in which we are executing + * @param reaction Pointer to the reaction_t struct for the reaction. + * @param worker The thread number of the worker thread or 0 for single-threaded execution. + */ +#define tracepoint_reaction_ends(env, reaction, worker) \ + call_tracepoint(reaction_ends, reaction->self, env->current_tag, worker, worker, reaction->number, NULL, NULL, 0, \ + false) + +/** + * Trace a call to schedule. + * @param env The environment in which we are executing + * @param trigger Pointer to the trigger_t struct for the trigger. + * @param extra_delay The extra delay passed to schedule(). + */ +void tracepoint_schedule(environment_t* env, trigger_t* trigger, interval_t extra_delay); + +/** + * Trace a user-defined event. Before calling this, you must call + * register_user_trace_event() with a pointer to the same string + * or else the event will not be recognized. + * @param self Pointer to the self struct of the reactor from which we want + * to trace this event. This pointer is used to get the correct environment and + * thus the correct logical tag of the event. + * @param description Pointer to the description string. + */ +void tracepoint_user_event(void* self, char* description); + +/** + * Trace a user-defined event with a value. + * Before calling this, you must call + * register_user_trace_event() with a pointer to the same string + * or else the event will not be recognized. + * @param self Pointer to the self struct of the reactor from which we want + * to trace this event. This pointer is used to get the correct environment and + * thus the correct logical tag of the event. + * @param description Pointer to the description string. + * @param value The value of the event. This is a long long for + * convenience so that time values can be passed unchanged. + * But int values work as well. + */ +void tracepoint_user_value(void* self, char* description, long long value); + +/** + * Trace the start of a worker waiting for something to change on the reaction queue. + * @param trace The trace object. + * @param worker The thread number of the worker thread or 0 for single-threaded execution. + */ +#define tracepoint_worker_wait_starts(env, worker) \ + call_tracepoint(worker_wait_starts, NULL, env->current_tag, worker, worker, -1, NULL, NULL, 0, true) + +/** + * Trace the end of a worker waiting for something to change on the event or reaction queue. + * @param trace The trace object. + * @param worker The thread number of the worker thread or 0 for single-threaded execution. + */ +#define tracepoint_worker_wait_ends(env, worker) \ + call_tracepoint(worker_wait_ends, NULL, env->current_tag, worker, worker, -1, NULL, NULL, 0, false) + +/** + * Trace the start of the scheduler waiting for logical time to advance or an event to + * appear on the event queue. + * @param trace The trace object. + */ +#define tracepoint_scheduler_advancing_time_starts(env) \ + call_tracepoint(scheduler_advancing_time_starts, NULL, env->current_tag, -1, -1, -1, NULL, NULL, 0, true); + +/** + * Trace the end of the scheduler waiting for logical time to advance or an event to + * appear on the event queue. + * @param trace The trace object. + */ +#define tracepoint_scheduler_advancing_time_ends(env) \ + call_tracepoint(scheduler_advancing_time_ends, NULL, env->current_tag, -1, -1, -1, NULL, NULL, 0, false) + +/** + * Trace the occurrence of a deadline miss. + * @param trace The trace object. + * @param reaction Pointer to the reaction_t struct for the reaction. + * @param worker The thread number of the worker thread or 0 for single-threaded execution. + */ +#define tracepoint_reaction_deadline_missed(env, reaction, worker) \ + call_tracepoint(reaction_deadline_missed, reaction->self, env->current_tag, worker, worker, reaction->number, NULL, \ + NULL, 0, false) + +/** + * @brief Check if the tracing library is compatible with the current version + * of the runtime. + */ +void lf_tracing_check_version(); + +//////////////////////////////////////////////////////////// +//// For federated execution + +#if defined(FEDERATED) || defined(LF_ENCLAVES) + +/** + * Trace federate sending a message to the RTI. + * @param event_type The type of event. Possible values are: + * + * @param fed_id The federate identifier. + * @param tag Pointer to the tag that has been sent, or NULL. + */ +void tracepoint_federate_to_rti(trace_event_t event_type, int fed_id, tag_t* tag); + +/** + * Trace federate receiving a message from the RTI. + * @param event_type The type of event. Possible values are: + * + * @param fed_id The federate identifier. + * @param tag Pointer to the tag that has been received, or NULL. + */ +void tracepoint_federate_from_rti(trace_event_t event_type, int fed_id, tag_t* tag); + +/** + * Trace federate sending a message to another federate. + * @param event_type The type of event. Possible values are: + * + * @param fed_id The federate identifier. + * @param partner_id The partner federate identifier. + * @param tag Pointer to the tag that has been sent, or NULL. + */ +void tracepoint_federate_to_federate(trace_event_t event_type, int fed_id, int partner_id, tag_t* tag); + +/** + * Trace federate receiving a message from another federate. + * @param event_type The type of event. Possible values are: + * + * @param fed_id The federate identifier. + * @param partner_id The partner federate identifier. + * @param tag Pointer to the tag that has been received, or NULL. + */ +void tracepoint_federate_from_federate(trace_event_t event_type, int fed_id, int partner_id, tag_t* tag); + +#else +#define tracepoint_federate_to_rti(...) ; +#define tracepoint_federate_from_rti(...) ; +#define tracepoint_federate_to_federate(...) ; +#define tracepoint_federate_from_federate(...) ; +#endif // FEDERATED + +//////////////////////////////////////////////////////////// +//// For RTI execution + +#ifdef RTI_TRACE + +/** + * Trace RTI sending a message to a federate. + * @param event_type The type of event. Possible values are: + * + * @param fed_id The fedaerate ID. + * @param tag Pointer to the tag that has been sent, or NULL. + */ +void tracepoint_rti_to_federate(trace_event_t event_type, int fed_id, tag_t* tag); + +/** + * Trace RTI receiving a message from a federate. + * @param event_type The type of event. Possible values are: + * + * @param fed_id The fedaerate ID. + * @param tag Pointer to the tag that has been sent, or NULL. + */ +void tracepoint_rti_from_federate(trace_event_t event_type, int fed_id, tag_t* tag); + +#else +#define tracepoint_rti_to_federate(...) ; +#define tracepoint_rti_from_federate(...) ; +#endif // RTI_TRACE + +#else +typedef struct trace_t trace_t; + +// empty definition in case we compile without tracing +#define _lf_register_trace_event(...) 1 +#define register_user_trace_event(...) 1 +#define tracepoint_reaction_starts(...) +#define tracepoint_reaction_ends(...) +#define tracepoint_schedule(...) +#define tracepoint_user_event(...) +#define tracepoint_user_value(...) +#define tracepoint_worker_wait_starts(...) +#define tracepoint_worker_wait_ends(...) +#define tracepoint_scheduler_advancing_time_starts(...) ; +#define tracepoint_scheduler_advancing_time_ends(...) ; +#define tracepoint_reaction_deadline_missed(...) ; +#define tracepoint_federate_to_rti(...) ; +#define tracepoint_federate_from_rti(...) ; +#define tracepoint_federate_to_federate(...) ; +#define tracepoint_federate_from_federate(...) ; +#define tracepoint_rti_to_federate(...) ; +#define tracepoint_rti_from_federate(...) ; + +#define lf_tracing_register_trace_event(...) ; +#define lf_tracing_set_start_time(...) ; +#define tracepoint(...) ; +#define lf_tracing_global_init(...) ; +#define lf_tracing_global_shutdown(...) ; + +#endif // LF_TRACE +#endif // TRACEPOINT_H diff --git a/include/core/utils/hashset/hashset.h b/include/core/utils/hashset/hashset.h index 9efc95efd..b48c32edf 100644 --- a/include/core/utils/hashset/hashset.h +++ b/include/core/utils/hashset/hashset.h @@ -13,7 +13,7 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * Modified in 2022 by Edward A. Lee to conform to documentation standards. * Also, changed so that hashset_create() takes an initial capacity argument. */ @@ -27,56 +27,56 @@ extern "C" { #endif - struct hashset_st { - size_t nbits; - size_t mask; +struct hashset_st { + size_t nbits; + size_t mask; - size_t capacity; - void** items; - size_t nitems; - size_t n_deleted_items; - }; + size_t capacity; + void** items; + size_t nitems; + size_t n_deleted_items; +}; - typedef struct hashset_st *hashset_t; +typedef struct hashset_st* hashset_t; - /** - * @brief Create a hashset instance. - * The returned value is a pointer. - * The caller must call hashset_destroy() to free allocated memory. - * @param nbits The log base 2 of the initial capacity of the hashset. - */ - hashset_t hashset_create(unsigned short nbits); +/** + * @brief Create a hashset instance. + * The returned value is a pointer. + * The caller must call hashset_destroy() to free allocated memory. + * @param nbits The log base 2 of the initial capacity of the hashset. + */ +hashset_t hashset_create(unsigned short nbits); - /** - * @brief Destroy the hashset instance, freeing allocated memory. - */ - void hashset_destroy(hashset_t set); +/** + * @brief Destroy the hashset instance, freeing allocated memory. + */ +void hashset_destroy(hashset_t set); - /** - * @brief Return the number of items in the hashset. - */ - size_t hashset_num_items(hashset_t set); +/** + * @brief Return the number of items in the hashset. + */ +size_t hashset_num_items(hashset_t set); - /** - * @brief Add a pointer to the hashset. - * Note that 0 and 1 are special values, meaning nil and deleted items. - * This function will return -1 indicating error if you try to add 0 or 1. - * This function may resize the hashset if it is approaching capacity. - * Returns zero if the item is already in the set and non-zero otherwise. - */ - int hashset_add(hashset_t set, void *item); +/** + * @brief Add a pointer to the hashset. + * Note that 0 and 1 are special values, meaning nil and deleted items. + * This function will return -1 indicating error if you try to add 0 or 1. + * This function may resize the hashset if it is approaching capacity. + * Returns zero if the item is already in the set and non-zero otherwise. + */ +int hashset_add(hashset_t set, void* item); - /** - * @brief Remove an item from the hashset. - * Return non-zero if the item was removed and zero if the item - * is not on the hashset. - */ - int hashset_remove(hashset_t set, void *item); +/** + * @brief Remove an item from the hashset. + * Return non-zero if the item was removed and zero if the item + * is not on the hashset. + */ +int hashset_remove(hashset_t set, void* item); - /** - * @brief Returns non-zero if the item is in the hashset and zero otherwise. - */ - int hashset_is_member(hashset_t set, void *item); +/** + * @brief Returns non-zero if the item is in the hashset and zero otherwise. + */ +int hashset_is_member(hashset_t set, void* item); #ifdef __cplusplus } diff --git a/include/core/utils/hashset/hashset_itr.h b/include/core/utils/hashset/hashset_itr.h index 8de7e4209..af928d0ab 100644 --- a/include/core/utils/hashset/hashset_itr.h +++ b/include/core/utils/hashset/hashset_itr.h @@ -31,7 +31,7 @@ struct hashset_itr_st { int index; }; -typedef struct hashset_itr_st *hashset_itr_t; +typedef struct hashset_itr_st* hashset_itr_t; /** * @brief Create a hashset iterator. @@ -44,7 +44,7 @@ typedef struct hashset_itr_st *hashset_itr_t; * } * free(iterator); * ``` - * The caller must call free() on this iterator after using it. + * The caller must call free() on this iterator after using it. */ hashset_itr_t hashset_iterator(hashset_t set); diff --git a/include/core/utils/impl/hashmap.h b/include/core/utils/impl/hashmap.h index c998df8c4..8490e3f62 100644 --- a/include/core/utils/impl/hashmap.h +++ b/include/core/utils/impl/hashmap.h @@ -22,7 +22,7 @@ #define HASH_OF(key) (size_t) key #endif #ifndef HASHMAP -#define HASHMAP(token) hashmap ## _ ## token +#define HASHMAP(token) hashmap##_##token #endif #include @@ -32,15 +32,15 @@ ////////////////////////// Type definitions /////////////////////////// typedef struct HASHMAP(entry_t) { - K key; - V value; + K key; + V value; } HASHMAP(entry_t); typedef struct HASHMAP(t) { - HASHMAP(entry_t)* entries; - size_t capacity; - size_t num_entries; - K nothing; + HASHMAP(entry_t) * entries; + size_t capacity; + size_t num_entries; + K nothing; } HASHMAP(t); //////////////////////// Function declarations //////////////////////// @@ -51,27 +51,27 @@ typedef struct HASHMAP(t) { * hashmap will contain. Insufficient surplus capacity will cause poor performance. * @param nothing A key that is guaranteed never to be used. */ -HASHMAP(t)* HASHMAP(new)(size_t capacity, K nothing); +HASHMAP(t) * HASHMAP(new)(size_t capacity, K nothing); /** @brief Free all memory used by the given hashmap. */ -void HASHMAP(free)(HASHMAP(t)* hashmap); +void HASHMAP(free)(HASHMAP(t) * hashmap); /** @brief Associate a value with the given key. */ -void HASHMAP(put)(HASHMAP(t)* hashmap, K key, V value); +void HASHMAP(put)(HASHMAP(t) * hashmap, K key, V value); /** * @brief Get the value associated with the given key. * Precondition: The key must be present in the map. */ -V HASHMAP(get)(HASHMAP(t)* hashmap, K key); +V HASHMAP(get)(HASHMAP(t) * hashmap, K key); /////////////////////////// Private helpers /////////////////////////// -static HASHMAP(entry_t)* HASHMAP(get_ideal_address)(HASHMAP(t)* hashmap, K key) { - HASHMAP(entry_t)* address = hashmap->entries + (HASH_OF(key) % hashmap->capacity); - assert(address >= hashmap->entries); - assert(address < hashmap->entries + hashmap->capacity); - return address; +static HASHMAP(entry_t) * HASHMAP(get_ideal_address)(HASHMAP(t) * hashmap, K key) { + HASHMAP(entry_t)* address = hashmap->entries + (HASH_OF(key) % hashmap->capacity); + assert(address >= hashmap->entries); + assert(address < hashmap->entries + hashmap->capacity); + return address; } /** @@ -80,56 +80,59 @@ static HASHMAP(entry_t)* HASHMAP(get_ideal_address)(HASHMAP(t)* hashmap, K key) * @param key The key from which to begin a search. * @param desired The key that the desired returnable entry should have. */ -static HASHMAP(entry_t)* HASHMAP(get_actual_address)(HASHMAP(t)* hashmap, K key) { - HASHMAP(entry_t)* address = HASHMAP(get_ideal_address)(hashmap, key); - HASHMAP(entry_t)* upper_limit = hashmap->entries + hashmap->capacity; - while ((address->key != hashmap->nothing) & (address->key != key)) address++; - if (address == upper_limit) { - address = hashmap->entries; - while ((address->key != hashmap->nothing) & (address->key != key)) address++; - if (address == upper_limit) return NULL; - } - assert(address->key == key || address->key == hashmap->nothing); - return address; +static HASHMAP(entry_t) * HASHMAP(get_actual_address)(HASHMAP(t) * hashmap, K key) { + HASHMAP(entry_t)* address = HASHMAP(get_ideal_address)(hashmap, key); + HASHMAP(entry_t)* upper_limit = hashmap->entries + hashmap->capacity; + while ((address->key != hashmap->nothing) & (address->key != key)) + address++; + if (address == upper_limit) { + address = hashmap->entries; + while ((address->key != hashmap->nothing) & (address->key != key)) + address++; + if (address == upper_limit) + return NULL; + } + assert(address->key == key || address->key == hashmap->nothing); + return address; } //////////////////////// Function definitions ///////////////////////// -HASHMAP(t)* HASHMAP(new)(size_t capacity, K nothing) { - HASHMAP(entry_t)* entries = (HASHMAP(entry_t)*) malloc( - (capacity + 1) * sizeof(HASHMAP(entry_t)) - ); - if (!entries) exit(1); - HASHMAP(t)* ret = (HASHMAP(t)*) malloc(sizeof(HASHMAP(t))); - if (!ret) exit(1); - // The entry at the end is used as a boundary. It will never again be written to. - for (size_t i = 0; i < capacity + 1; i++) { - entries[i].key = nothing; - } - ret->entries = entries; - ret->capacity = capacity; - ret->num_entries = 0; - // A second nothing may be required if removal is to be supported and we want to make removal - // a constant-time operation. - ret->nothing = nothing; - return ret; +HASHMAP(t) * HASHMAP(new)(size_t capacity, K nothing) { + HASHMAP(entry_t)* entries = (HASHMAP(entry_t)*)malloc((capacity + 1) * sizeof(HASHMAP(entry_t))); + if (!entries) + exit(1); + HASHMAP(t)* ret = (HASHMAP(t)*)malloc(sizeof(HASHMAP(t))); + if (!ret) + exit(1); + // The entry at the end is used as a boundary. It will never again be written to. + for (size_t i = 0; i < capacity + 1; i++) { + entries[i].key = nothing; + } + ret->entries = entries; + ret->capacity = capacity; + ret->num_entries = 0; + // A second nothing may be required if removal is to be supported and we want to make removal + // a constant-time operation. + ret->nothing = nothing; + return ret; } -void HASHMAP(free)(HASHMAP(t)* hashmap) { - free(hashmap->entries); - free(hashmap); +void HASHMAP(free)(HASHMAP(t) * hashmap) { + free(hashmap->entries); + free(hashmap); } -void HASHMAP(put)(HASHMAP(t)* hashmap, K key, V value) { - assert(key != hashmap->nothing); - assert(key >= 0); - HASHMAP(entry_t)* write_to = HASHMAP(get_actual_address)(hashmap, key); - write_to->key = key; - write_to->value = value; +void HASHMAP(put)(HASHMAP(t) * hashmap, K key, V value) { + assert(key != hashmap->nothing); + assert(key >= 0); + HASHMAP(entry_t)* write_to = HASHMAP(get_actual_address)(hashmap, key); + write_to->key = key; + write_to->value = value; } -V HASHMAP(get)(HASHMAP(t)* hashmap, K key) { - assert(key != hashmap->nothing); - HASHMAP(entry_t)* read_from = HASHMAP(get_actual_address)(hashmap, key); - return read_from->value; // Crash the program if the key cannot be found +V HASHMAP(get)(HASHMAP(t) * hashmap, K key) { + assert(key != hashmap->nothing); + HASHMAP(entry_t)* read_from = HASHMAP(get_actual_address)(hashmap, key); + return read_from->value; // Crash the program if the key cannot be found } diff --git a/include/core/utils/impl/pointer_hashmap.h b/include/core/utils/impl/pointer_hashmap.h index f68be0c2d..2184518b3 100644 --- a/include/core/utils/impl/pointer_hashmap.h +++ b/include/core/utils/impl/pointer_hashmap.h @@ -27,7 +27,7 @@ * See hashmap.h for documentation on how to declare other hashmap types. */ -#define HASHMAP(token) hashmap_object2int ## _ ## token +#define HASHMAP(token) hashmap_object2int##_##token #define K void* #define V int #define HASH_OF(key) (size_t) key diff --git a/include/core/utils/lf_semaphore.h b/include/core/utils/lf_semaphore.h index 625dda84f..73d3e4eb4 100644 --- a/include/core/utils/lf_semaphore.h +++ b/include/core/utils/lf_semaphore.h @@ -37,13 +37,13 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define NUMBER_OF_WORKERS 1 #endif // NUMBER_OF_WORKERS -#include "platform.h" +#include "low_level_platform.h" #include typedef struct { - int count; - lf_mutex_t mutex; - lf_cond_t cond; + int count; + lf_mutex_t mutex; + lf_cond_t cond; } lf_semaphore_t; /** diff --git a/include/core/utils/pqueue.h b/include/core/utils/pqueue.h index edfd4968c..e317acbcd 100644 --- a/include/core/utils/pqueue.h +++ b/include/core/utils/pqueue.h @@ -4,7 +4,7 @@ * @author Edward A. Lee * @copyright (c) 2020-2023, The University of California at Berkeley. * License: BSD 2-clause - * + * * @brief Priority queue declarations for the event queue and reaction queue. */ @@ -46,7 +46,7 @@ int reaction_matches(void* a, void* b); * This is used for sorting pointers to event_t structs in the event queue. * @param a A pointer to an event_t. */ -pqueue_pri_t get_event_time(void *event); +pqueue_pri_t get_event_time(void* event); /** * Report a priority equal to the index of the given reaction. @@ -54,46 +54,46 @@ pqueue_pri_t get_event_time(void *event); * blocked and executing queues. * @param reaction A pointer to a reaction_t. */ -pqueue_pri_t get_reaction_index(void *reaction_t); +pqueue_pri_t get_reaction_index(void* reaction_t); /** * Return the given event's position in the queue. * @param event A pointer to an event_t. */ -size_t get_event_position(void *event); +size_t get_event_position(void* event); /** * Return the given reaction's position in the queue. * @param reaction A pointer to a reaction_t. */ -size_t get_reaction_position(void *reaction); +size_t get_reaction_position(void* reaction); /** * Set the given event's position in the queue. * @param event A pointer to an event_t * @param pos The position. */ -void set_event_position(void *event, size_t pos); +void set_event_position(void* event, size_t pos); /** * Set the given reaction's position in the queue. * @param event A pointer to a reaction_t. * @param pos The position. */ -void set_reaction_position(void *reaction, size_t pos); +void set_reaction_position(void* reaction, size_t pos); /** * Print some information about the given reaction. * This only prints something if logging is set to DEBUG. * @param reaction A pointer to a reaction_t. */ -void print_reaction(void *reaction); +void print_reaction(void* reaction); /** * Print some information about the given event. * This only prints something if logging is set to DEBUG. * @param event A pointer to an event_t. */ -void print_event(void *event); +void print_event(void* event); #endif /* PQUEUE_H */ diff --git a/include/core/utils/pqueue_base.h b/include/core/utils/pqueue_base.h index 210cc0eec..8c9fc8f2c 100644 --- a/include/core/utils/pqueue_base.h +++ b/include/core/utils/pqueue_base.h @@ -21,7 +21,7 @@ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * + * * Modified by Marten Lohstroh (May, 2019). * Changes: * - Require implementation of a pqueue_eq_elem_f function to determine @@ -35,7 +35,6 @@ * @{ */ - #ifndef PQUEUE_BASE_H #define PQUEUE_BASE_H @@ -45,7 +44,7 @@ typedef unsigned long long pqueue_pri_t; /** Callback to get the priority of an element. */ -typedef pqueue_pri_t (*pqueue_get_pri_f)(void *a); +typedef pqueue_pri_t (*pqueue_get_pri_f)(void* a); /** Callback to compare two priorities. */ typedef int (*pqueue_cmp_pri_f)(pqueue_pri_t next, pqueue_pri_t curr); @@ -54,27 +53,26 @@ typedef int (*pqueue_cmp_pri_f)(pqueue_pri_t next, pqueue_pri_t curr); typedef int (*pqueue_eq_elem_f)(void* next, void* curr); /** Callback functions to get the position of an element. */ -typedef size_t (*pqueue_get_pos_f)(void *a); +typedef size_t (*pqueue_get_pos_f)(void* a); /** Callback functions to set the position of an element. */ -typedef void (*pqueue_set_pos_f)(void *a, size_t pos); +typedef void (*pqueue_set_pos_f)(void* a, size_t pos); /** Debug callback function to print a entry. */ -typedef void (*pqueue_print_entry_f)(void *a); +typedef void (*pqueue_print_entry_f)(void* a); /** The priority queue handle. */ -typedef struct pqueue_t -{ - size_t size; /**< number of elements in this queue plus 1 */ - size_t avail; /**< slots available in this queue */ - size_t step; /**< growth stepping setting */ - pqueue_cmp_pri_f cmppri; /**< callback to compare priorities */ - pqueue_get_pri_f getpri; /**< callback to get priority of a node */ - pqueue_get_pos_f getpos; /**< callback to get position of a node */ - pqueue_set_pos_f setpos; /**< callback to set position of a node */ - pqueue_eq_elem_f eqelem; /**< callback to compare elements */ - pqueue_print_entry_f prt; /**< callback to print elements */ - void **d; /**< The actual queue in binary heap form */ +typedef struct pqueue_t { + size_t size; /**< number of elements in this queue plus 1 */ + size_t avail; /**< slots available in this queue */ + size_t step; /**< growth stepping setting */ + pqueue_cmp_pri_f cmppri; /**< callback to compare priorities */ + pqueue_get_pri_f getpri; /**< callback to get priority of a node */ + pqueue_get_pos_f getpos; /**< callback to get position of a node */ + pqueue_set_pos_f setpos; /**< callback to set position of a node */ + pqueue_eq_elem_f eqelem; /**< callback to compare elements */ + pqueue_print_entry_f prt; /**< callback to print elements */ + void** d; /**< The actual queue in binary heap form */ } pqueue_t; /** @@ -93,26 +91,20 @@ typedef struct pqueue_t * * @return The handle or NULL for insufficent memory. */ -pqueue_t * -pqueue_init(size_t n, - pqueue_cmp_pri_f cmppri, - pqueue_get_pri_f getpri, - pqueue_get_pos_f getpos, - pqueue_set_pos_f setpos, - pqueue_eq_elem_f eqelem, - pqueue_print_entry_f prt); +pqueue_t* pqueue_init(size_t n, pqueue_cmp_pri_f cmppri, pqueue_get_pri_f getpri, pqueue_get_pos_f getpos, + pqueue_set_pos_f setpos, pqueue_eq_elem_f eqelem, pqueue_print_entry_f prt); /** * free all memory used by the queue * @param q the queue */ -void pqueue_free(pqueue_t *q); +void pqueue_free(pqueue_t* q); /** * return the size of the queue. * @param q the queue */ -size_t pqueue_size(pqueue_t *q); +size_t pqueue_size(pqueue_t* q); /** * Insert an element into the queue. @@ -120,7 +112,7 @@ size_t pqueue_size(pqueue_t *q); * @param e the element * @return 0 on success */ -int pqueue_insert(pqueue_t *q, void *d); +int pqueue_insert(pqueue_t* q, void* d); /** * Move an existing entry to a different priority. @@ -128,17 +120,14 @@ int pqueue_insert(pqueue_t *q, void *d); * @param new_pri the new priority * @param d the entry */ -void -pqueue_change_priority(pqueue_t *q, - pqueue_pri_t new_pri, - void *d); +void pqueue_change_priority(pqueue_t* q, pqueue_pri_t new_pri, void* d); /** * Pop the highest-ranking item from the queue. * @param q the queue * @return NULL on error, otherwise the entry */ -void *pqueue_pop(pqueue_t *q); +void* pqueue_pop(pqueue_t* q); /** * @brief Empty 'src' into 'dest'. @@ -157,7 +146,7 @@ void pqueue_empty_into(pqueue_t** dest, pqueue_t** src); * @param e the entry to compare against * @return NULL if no matching event has been found, otherwise the entry */ -void* pqueue_find_equal_same_priority(pqueue_t *q, void *e); +void* pqueue_find_equal_same_priority(pqueue_t* q, void* e); /** * Find the highest-ranking item with priority up to and including the given @@ -167,7 +156,7 @@ void* pqueue_find_equal_same_priority(pqueue_t *q, void *e); * @param max_priority the maximum priority to consider * @return NULL if no matching event has been found, otherwise the entry */ -void* pqueue_find_equal(pqueue_t *q, void *e, pqueue_pri_t max_priority); +void* pqueue_find_equal(pqueue_t* q, void* e, pqueue_pri_t max_priority); /** * Remove an item from the queue. @@ -175,22 +164,21 @@ void* pqueue_find_equal(pqueue_t *q, void *e, pqueue_pri_t max_priority); * @param e the entry * @return 0 on success */ -int pqueue_remove(pqueue_t *q, void *e); +int pqueue_remove(pqueue_t* q, void* e); /** * Access highest-ranking item without removing it. * @param q the queue * @return NULL on error, otherwise the entry */ -void *pqueue_peek(pqueue_t *q); - +void* pqueue_peek(pqueue_t* q); /** * Print the contents of the queue. * @param q The queue. * @param print The callback function to print the entry or NULL to use the default. */ -void pqueue_print(pqueue_t *q, pqueue_print_entry_f print); +void pqueue_print(pqueue_t* q, pqueue_print_entry_f print); /** * Dump the queue and it's internal structure. @@ -199,9 +187,7 @@ void pqueue_print(pqueue_t *q, pqueue_print_entry_f print); * @param q the queue * @param the callback function to print the entry */ -void -pqueue_dump(pqueue_t *q, - pqueue_print_entry_f print); +void pqueue_dump(pqueue_t* q, pqueue_print_entry_f print); /** * Check that the all entries are in the right order, etc. @@ -209,7 +195,7 @@ pqueue_dump(pqueue_t *q, * debug function only * @param q the queue */ -int pqueue_is_valid(pqueue_t *q); +int pqueue_is_valid(pqueue_t* q); #endif /* PQUEUE_BASE_H */ /** @} */ diff --git a/include/core/utils/pqueue_tag.h b/include/core/utils/pqueue_tag.h index ad4ac84d1..d69de5e56 100644 --- a/include/core/utils/pqueue_tag.h +++ b/include/core/utils/pqueue_tag.h @@ -5,7 +5,7 @@ * @copyright (c) 2023, The University of California at Berkeley * License in [BSD 2-clause](https://github.com/lf-lang/reactor-c/blob/main/LICENSE.md) * @brief Priority queue that uses tags for sorting. - * + * * This file extends the pqueue infrastructure with support for queues that are sorted * by tag instead of by a long long. Elements in this queue are structs of type * pqueue_tag_element_t or a derived struct, as explained below. What you put onto the @@ -21,23 +21,23 @@ /** * @brief The type for an element in a priority queue that is sorted by tag. - * + * * In this design, a pointer to this struct is also a "priority" (it can be * cast to pqueue_pri_t). The actual priority is the tag field of the struct, * in that the queue is sorted from least tag to largest. - * + * * If your struct is dynamically allocated using malloc or calloc, and you * would like the memory freed when the queue is freed, then set the is_dynamic * field of the element to a non-zero value. - * + * * For a priority queue that contains only tags with no payload, you can * avoid creating the element struct by using the functions * pqueue_tag_insert_tag, pqueue_tag_insert_if_no_match, and pqueue_tag_pop_tag. - * + * * To customize the element you put onto the queue, for example to carry * a payload, you can create your own element struct type by simply declaring * the first field to be a pqueue_tag_element_t. For example, if you want an - * element of the queue to include a pointer to your own payload, you can + * element of the queue to include a pointer to your own payload, you can * declare the following struct type: *
  *     typedef struct {
@@ -50,9 +50,9 @@
  * simply cast the result to (my_element_type_t*);
  */
 typedef struct {
-    tag_t tag;
-    size_t pos;       // Needed by any pqueue element.
-    int is_dynamic;   // Non-zero to free this struct when the queue is freed.
+  tag_t tag;
+  size_t pos;     // Needed by any pqueue element.
+  int is_dynamic; // Non-zero to free this struct when the queue is freed.
 } pqueue_tag_element_t;
 
 /**
@@ -62,7 +62,7 @@ typedef pqueue_t pqueue_tag_t;
 
 /**
  * @brief Create a priority queue sorted by tags.
- * 
+ *
  * The elements of the priority queue will be of type pqueue_tag_element_t.
  * The caller should call pqueue_tag_free() when finished with the queue.
  * @return A dynamically allocated priority queue or NULL if memory allocation fails.
@@ -71,21 +71,21 @@ pqueue_tag_t* pqueue_tag_init(size_t initial_size);
 
 /**
  * @brief Free all memory used by the queue including elements that are marked dynamic.
- * 
+ *
  * @param q The queue.
  */
-void pqueue_tag_free(pqueue_tag_t *q);
+void pqueue_tag_free(pqueue_tag_t* q);
 
 /**
  * @brief Return the size of the queue.
- * 
+ *
  * @param q The queue.
  */
-size_t pqueue_tag_size(pqueue_tag_t *q);
+size_t pqueue_tag_size(pqueue_tag_t* q);
 
 /**
  * @brief Insert an element into the queue.
- * 
+ *
  * @param q The queue.
  * @param e The element to insert.
  * @return 0 on success
@@ -94,7 +94,7 @@ int pqueue_tag_insert(pqueue_tag_t* q, pqueue_tag_element_t* d);
 
 /**
  * @brief Insert a tag into the queue.
- * 
+ *
  * This automatically creates a dynamically allocated element in the queue
  * and ensures that if the element is still on the queue when pqueue_tag_free
  * is called, then that memory will be freed.
@@ -106,7 +106,7 @@ int pqueue_tag_insert_tag(pqueue_tag_t* q, tag_t t);
 
 /**
  * @brief Insert a tag into the queue if the tag is not already in the queue.
- * 
+ *
  * This automatically creates a dynamically allocated element in the queue
  * and ensures that if the element is still on the queue when pqueue_tag_free
  * is called, then that memory will be freed.
@@ -122,7 +122,7 @@ int pqueue_tag_insert_if_no_match(pqueue_tag_t* q, tag_t t);
  * @param t The tag.
  * @return An entry with the specified tag or NULL if there isn't one.
  */
-pqueue_tag_element_t* pqueue_tag_find_with_tag(pqueue_tag_t *q, tag_t t);
+pqueue_tag_element_t* pqueue_tag_find_with_tag(pqueue_tag_t* q, tag_t t);
 
 /**
  * @brief Return highest-ranking item (the one with the least tag) without removing it.
@@ -140,7 +140,7 @@ tag_t pqueue_tag_peek_tag(pqueue_tag_t* q);
 
 /**
  * @brief Pop the least-tag element from the queue.
- * 
+ *
  * If the entry was dynamically allocated, then it is now up to the caller
  * to ensure that it is freed. It will not be freed by pqueue_tag_free.
  * @param q The queue.
@@ -150,8 +150,8 @@ pqueue_tag_element_t* pqueue_tag_pop(pqueue_tag_t* q);
 
 /**
  * @brief Pop the least-tag element from the queue and return its tag.
- * 
- * If the queue is empty, return FOREVER_TAG. This function handles freeing 
+ *
+ * If the queue is empty, return FOREVER_TAG. This function handles freeing
  * the element struct if it was dynamically allocated.
  * @param q The queue.
  * @return NULL on error, otherwise the entry
@@ -160,7 +160,7 @@ tag_t pqueue_tag_pop_tag(pqueue_tag_t* q);
 
 /**
  * @brief Remove an item from the queue.
- * 
+ *
  * @param q The queue.
  * @param e The entry to remove.
  */
@@ -168,7 +168,7 @@ void pqueue_tag_remove(pqueue_tag_t* q, pqueue_tag_element_t* e);
 
 /**
  * @brief Remove items from the queue with tags up to and including the specified tag.
- * 
+ *
  * If the specified tag is FOREVER_TAG, then all items will be removed.
  * @param q The queue.
  * @param t The specified tag.
diff --git a/include/core/utils/util.h b/include/core/utils/util.h
index 070874345..2d9998a72 100644
--- a/include/core/utils/util.h
+++ b/include/core/utils/util.h
@@ -33,28 +33,20 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #ifndef UTIL_H
 #define UTIL_H
 
-#include    // Defines va_list
+#include  // Defines va_list
 #include 
-#include    // Defines int64_t
-
-// To silence warnings about a function being a candidate for format checking
-// with gcc, add an attribute.
-// The arguments are the position of the format string (starting with 1)
-// and the start of the remaining arguments, or 0 for vprintf style functions.
-#if defined(__GNUC__)
-#define ATTRIBUTE_FORMAT_PRINTF(f, s) __attribute__((format (printf, f, s)))
-#else
-#define ATTRIBUTE_FORMAT_PRINTF(f, s)
-#endif
+#include  // Defines int64_t
+
+#include "logging_macros.h"
 
 /**
  * Holds generic statistical data
  */
 typedef struct lf_stat_ll {
-    int64_t average;
-    int64_t standard_deviation;
-    int64_t variance;
-    int64_t max;
+  int64_t average;
+  int64_t standard_deviation;
+  int64_t variance;
+  int64_t max;
 } lf_stat_ll;
 
 /**
@@ -88,29 +80,6 @@ typedef struct lf_stat_ll {
 #define LF_MIN(X, Y) (((X) < (Y)) ? (X) : (Y))
 #endif
 
-/**
- * LOG_LEVEL is set in generated code to 0 through 4 if the target
- * logging property is error, warning, info, log, or debug.
- * The default level is info (2). Currently, 0, 1, and 2 are
- * treated identically and lf_print_error, lf_print_warning, and lf_print
- * all result in printed output.
- * If log is set (3), then LOG_DEBUG messages
- * will be printed as well.
- * If debug is set (4), the LF_PRINT_DEBUG messages will
- * be printed as well.
- */
-#define LOG_LEVEL_ERROR 0
-#define LOG_LEVEL_WARNING 1
-#define LOG_LEVEL_INFO 2
-#define LOG_LEVEL_LOG 3
-#define LOG_LEVEL_DEBUG 4
-#define LOG_LEVEL_ALL 255
-
-/** Default log level. */
-#ifndef LOG_LEVEL
-#define LOG_LEVEL LOG_LEVEL_INFO
-#endif
-
 /**
  * The ID of this federate. For a non-federated execution, this will
  * be -1.  For a federated execution, it will be assigned when the generated function
@@ -124,193 +93,42 @@ extern int _lf_my_fed_id;
  */
 int lf_fed_id(void);
 
-/**
- * Report an informational message on stdout with a newline appended at the end.
- * If this execution is federated, then the message will be prefaced by identifying
- * information for the federate. The arguments are just like printf().
- */
-void lf_print(const char* format, ...) ATTRIBUTE_FORMAT_PRINTF(1, 2);
-
 /**
  * varargs alternative of "lf_print"
  */
-void lf_vprint(const char* format, va_list args)  ATTRIBUTE_FORMAT_PRINTF(1, 0);
-
-/**
- * Report an log message on stdout with the prefix "LOG: " and a newline appended
- * at the end. If this execution is federated, then the message will be prefaced by
- * identifying information for the federate. The arguments are just like printf().
- */
-void lf_print_log(const char* format, ...) ATTRIBUTE_FORMAT_PRINTF(1, 2);
+void lf_vprint(const char* format, va_list args) ATTRIBUTE_FORMAT_PRINTF(1, 0);
 
 /**
  * varargs alternative of "lf_print_log"
  */
 void lf_vprint_log(const char* format, va_list args) ATTRIBUTE_FORMAT_PRINTF(1, 0);
 
-/**
- * A macro used to print useful logging information. It can be enabled
- * by setting the target property 'logging' to 'LOG' or
- * by defining LOG_LEVEL to LOG_LEVEL_LOG or
- * LOG_LEVEL_DEBUG in the top-level preamble.
- * The input to this macro is exactly like printf: (format, ...).
- * "LOG: " is prepended to the beginning of the message
- * and a newline is appended to the end of the message.
- *
- * @note This macro is non-empty even if LOG_LEVEL is not defined in
- * user-code. This is to ensure that the compiler will still parse
- * the predicate inside (...) to prevent LF_PRINT_LOG statements
- * to fall out of sync with the rest of the code. This should have
- * a negligible impact on performance if compiler optimization
- * (e.g., -O2 for gcc) is used as long as the arguments passed to
- * it do not themselves incur significant overhead to evaluate.
- */
-#define LF_PRINT_LOG(format, ...) \
-            do { if(LOG_LEVEL >= LOG_LEVEL_LOG) { \
-                    lf_print_log(format, ##__VA_ARGS__); \
-                } } while (0)
-
-/**
- * Report an debug message on stdout with the prefix "DEBUG: " and a newline appended
- * at the end. If this execution is federated, then the message will be prefaced by
- * identifying information for the federate. The arguments are just like printf().
- */
-void lf_print_debug(const char* format, ...) ATTRIBUTE_FORMAT_PRINTF(1, 2);
-
 /**
  * varargs alternative of "lf_print_debug"
  */
 void lf_vprint_debug(const char* format, va_list args) ATTRIBUTE_FORMAT_PRINTF(1, 0);
 
-/**
- * A macro used to print useful debug information. It can be enabled
- * by setting the target property 'logging' to 'DEBUG' or
- * by defining LOG_LEVEL to 2 in the top-level preamble.
- * The input to this macro is exactly like printf: (format, ...).
- * "DEBUG: " is prepended to the beginning of the message
- * and a newline is appended to the end of the message.
- *
- * @note This macro is non-empty even if LOG_LEVEL is not defined in
- * user-code. This is to ensure that the compiler will still parse
- * the predicate inside (...) to prevent LF_PRINT_DEBUG statements
- * to fall out of sync with the rest of the code. This should have
- * a negligible impact on performance if compiler optimization
- * (e.g., -O2 for gcc) is used as long as the arguments passed to
- * it do not themselves incur significant overhead to evaluate.
- */
-#define LF_PRINT_DEBUG(format, ...) \
-            do { if(LOG_LEVEL >= LOG_LEVEL_DEBUG) { \
-                    lf_print_debug(format, ##__VA_ARGS__); \
-                } } while (0)
-
 /**
  * Print the error defined by the errno variable with the
  * specified message as a prefix, then exit with error code 1.
  * @param msg The prefix to the message.
  */
-void error(const char *msg);
-
-/**
- * Report an error with the prefix "ERROR: " and a newline appended
- * at the end.  The arguments are just like printf().
- */
-void lf_print_error(const char* format, ...) ATTRIBUTE_FORMAT_PRINTF(1, 2);
+void error(const char* msg);
 
 /**
  * varargs alternative of "lf_print_error"
  */
 void lf_vprint_error(const char* format, va_list args) ATTRIBUTE_FORMAT_PRINTF(1, 0);
 
-/**
- * Report a warning with the prefix "WARNING: " and a newline appended
- * at the end.  The arguments are just like printf().
- */
-void lf_print_warning(const char* format, ...) ATTRIBUTE_FORMAT_PRINTF(1, 2);
-
 /**
  * varargs alternative of "lf_print_warning"
  */
 void lf_vprint_warning(const char* format, va_list args) ATTRIBUTE_FORMAT_PRINTF(1, 0);
 
-/**
- * Report an error with the prefix "ERROR: " and a newline appended
- * at the end, then exit with the failure code EXIT_FAILURE.
- * The arguments are just like printf().
- */
-void lf_print_error_and_exit(const char* format, ...) ATTRIBUTE_FORMAT_PRINTF(1, 2);
-
-/**
- * Report an error and exit just like lf_print_error_and_exit(), but
- * also print the system error message associated with the error.
- */
-void lf_print_error_system_failure(const char* format, ...);
-
 /**
  * varargs alternative of "lf_print_error_and_exit"
  */
-void lf_vprint_error_and_exit(const char* format, va_list args)
-		ATTRIBUTE_FORMAT_PRINTF(1, 0);
-
-/**
- * Message print function type. The arguments passed to one of
- * these print functions are a printf-style format string followed
- * by a printf-style argument list collected into a va_list
- * (variable argument list).
- */
-typedef void(print_message_function_t)(const char*, va_list);
-
-/**
- * Register a function to display messages. After calling this,
- * all messages passed to the above print functions will be
- * printed using the specified function rather than printf
- * if their log level is greater than the specified level.
- * The level should be one of LOG_LEVEL_ERROR, LOG_LEVEL_WARNING,
- * LOG_LEVEL_INFO, LOG_LEVEL_LOG, or LOG_LEVEL_DEBUG.
- *
- * @param function The print message function or NULL to revert
- *  to using printf.
- * @param log_level The level of messages to redirect.
- */
-void lf_register_print_function(print_message_function_t* function, int log_level);
-
-/**
- * Assertion handling. LF_ASSERT can be used as a shorthand for verifying
- * a condition and calling `lf_print_error_and_exit` if it is not true.
- * The LF_ASSERT version requires that the condition evaluate to true
- * (non-zero), whereas the LF_ASSERTN version requires that the condition
- * evaluate to false (zero).
- * These are optimized to execute the condition argument but not
- * check the result if the NDEBUG flag is defined.
- * The NDEBUG flag will be defined if the user specifies `build-type: Release`
- * in the target properties of the LF program.
- * 
- * LF_ASSERT_NON_NULL can be used to verify that a pointer is not NULL.
- * It differs from LF_ASSERT in that it does nothing at all if the NDEBUG flag is defined.
- */
-#if defined(NDEBUG)
-#define LF_ASSERT(condition, format, ...) (void)(condition)
-#define LF_ASSERTN(condition, format, ...) (void)(condition)
-#define LF_ASSERT_NON_NULL(pointer)
-#else
-#define LF_ASSERT(condition, format, ...) \
-	do { \
-		if (!(condition)) { \
-				lf_print_error_and_exit(format, ##__VA_ARGS__); \
-		} \
-	} while(0)
-#define LF_ASSERTN(condition, format, ...) \
-	do { \
-		if (condition) { \
-				lf_print_error_and_exit(format, ##__VA_ARGS__); \
-		} \
-	} while(0)
-#define LF_ASSERT_NON_NULL(pointer) \
-    do { \
-        if (!(pointer)) { \
-            lf_print_error_and_exit("Assertion failed: pointer is NULL Out of memory?."); \
-        } \
-    } while(0)
-#endif // NDEBUG
+void lf_vprint_error_and_exit(const char* format, va_list args) ATTRIBUTE_FORMAT_PRINTF(1, 0);
 
 /**
  * Initialize mutex with error checking.
diff --git a/include/core/utils/vector.h b/include/core/utils/vector.h
index 281e65a59..af9acc679 100644
--- a/include/core/utils/vector.h
+++ b/include/core/utils/vector.h
@@ -2,7 +2,7 @@
  * This file defines a minimal vector (resizing array) data type.
  * It is intended to be the simplest way of storing a collection of
  * pointers that is frequently filled and then completely emptied.
- * 
+ *
  * @author Peter Donovan (peterdonovan@berkeley.edu)
  * @author Soroush Bateni (soroush@utdallas.edu)
  */
@@ -14,12 +14,12 @@
 #include 
 
 typedef struct vector_t {
-    void** start; /* The start of the underlying array. */
-    void** next;  /* The element after the last element in the underlying array.
-                        start <= next <= end. */
-    void** end;   /* The end of the underlying array. */
-    int votes_required;  /* The number of votes required to shrink this vector. */
-    int votes;    /* The number of votes to shrink this vector. */
+  void** start;       /* The start of the underlying array. */
+  void** next;        /* The element after the last element in the underlying array.
+                            start <= next <= end. */
+  void** end;         /* The end of the underlying array. */
+  int votes_required; /* The number of votes required to shrink this vector. */
+  int votes;          /* The number of votes to shrink this vector. */
 } vector_t;
 
 /**
@@ -66,17 +66,17 @@ void* vector_pop(vector_t* v);
  * is automatically expanded and filled with NULL pointers as needed.
  * If no element at `idx` has been previously set, then the value
  * pointed to by the returned pointer will be NULL.
- * 
+ *
  * @param v The vector.
  * @param idx The index into the vector.
- * 
+ *
  * @return A pointer to the element at 'idx', which is itself a pointer.
  */
 void** vector_at(vector_t* v, size_t idx);
 
 /**
  * @brief Return the size of the vector.
- * 
+ *
  * @param v Any vector
  * @return size_t  The size of the vector.
  */
diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
index aaf9011e8..f0b2c18bc 100644
--- a/lib/CMakeLists.txt
+++ b/lib/CMakeLists.txt
@@ -1,2 +1,3 @@
 add_library(lib schedule.c)
-target_include_directories(lib PRIVATE ${PROJECT_SOURCE_DIR}/include)
+target_link_libraries(lib PRIVATE lf::low-level-platform-api)
+target_link_libraries(lib PRIVATE lf::logging-api)
diff --git a/lib/schedule.c b/lib/schedule.c
index 28ac9b120..645bb41ec 100644
--- a/lib/schedule.c
+++ b/lib/schedule.c
@@ -15,84 +15,80 @@
 #include  // Defines memcpy.
 
 trigger_handle_t lf_schedule(void* action, interval_t offset) {
-    return lf_schedule_token((lf_action_base_t*)action, offset, NULL);
+  return lf_schedule_token((lf_action_base_t*)action, offset, NULL);
 }
 
 trigger_handle_t lf_schedule_int(void* action, interval_t extra_delay, int value) {
-    token_template_t* template = (token_template_t*)action;
-
-    // NOTE: This doesn't acquire the mutex lock in the multithreaded version
-    // until schedule_value is called. This should be OK because the element_size
-    // does not change dynamically.
-    if (template->type.element_size != sizeof(int)) {
-        lf_print_error("Action type is not an integer. element_size is %zu", template->type.element_size);
-        return -1;
-    }
-    int* container = (int*)malloc(sizeof(int));
-    *container = value;
-    return lf_schedule_value(action, extra_delay, container, 1);
+  token_template_t* template = (token_template_t*)action;
+
+  // NOTE: This doesn't acquire the mutex lock in the multithreaded version
+  // until schedule_value is called. This should be OK because the element_size
+  // does not change dynamically.
+  if (template->type.element_size != sizeof(int)) {
+    lf_print_error("Action type is not an integer. element_size is %zu", template->type.element_size);
+    return -1;
+  }
+  int* container = (int*)malloc(sizeof(int));
+  *container = value;
+  return lf_schedule_value(action, extra_delay, container, 1);
 }
 
 trigger_handle_t lf_schedule_token(void* action, interval_t extra_delay, lf_token_t* token) {
-    environment_t* env = ((lf_action_base_t*)action)->parent->environment;
-    
-    LF_CRITICAL_SECTION_ENTER(env);
-    int return_value = lf_schedule_trigger(env, ((lf_action_base_t*)action)->trigger, extra_delay, token);
-    // Notify the main thread in case it is waiting for physical time to elapse.
-    lf_notify_of_event(env);
-    LF_CRITICAL_SECTION_EXIT(env);
-    return return_value;
+  environment_t* env = ((lf_action_base_t*)action)->parent->environment;
+
+  LF_CRITICAL_SECTION_ENTER(env);
+  int return_value = lf_schedule_trigger(env, ((lf_action_base_t*)action)->trigger, extra_delay, token);
+  // Notify the main thread in case it is waiting for physical time to elapse.
+  lf_notify_of_event(env);
+  LF_CRITICAL_SECTION_EXIT(env);
+  return return_value;
 }
 
 trigger_handle_t lf_schedule_copy(void* action, interval_t offset, void* value, size_t length) {
-    if (length < 0) {
-        lf_print_error(
-            "schedule_copy():"
-            " Ignoring request to copy a value with a negative length (%zu).",
-            length
-        );
-        return -1;
-    }
-    if (value == NULL) {
-        return lf_schedule_token(action, offset, NULL);
-    }
-    environment_t* env = ((lf_action_base_t*)action)->parent->environment;
-    token_template_t* template = (token_template_t*)action;
-    if (action == NULL || template->type.element_size <= 0) {
-        lf_print_error("schedule: Invalid element size.");
-        return -1;
-    }
-    LF_CRITICAL_SECTION_ENTER(env);
-    // Initialize token with an array size of length and a reference count of 0.
-    lf_token_t* token = _lf_initialize_token(template, length);
-    // Copy the value into the newly allocated memory.
-    memcpy(token->value, value, template->type.element_size * length);
-    // The schedule function will increment the reference count.
-    trigger_handle_t result = lf_schedule_trigger(env, ((lf_action_base_t*)action)->trigger, offset, token);
-    // Notify the main thread in case it is waiting for physical time to elapse.
-    lf_notify_of_event(env);
-    LF_CRITICAL_SECTION_EXIT(env);
-    return result;
+  if (length < 0) {
+    lf_print_error("schedule_copy():"
+                   " Ignoring request to copy a value with a negative length (%zu).",
+                   length);
+    return -1;
+  }
+  if (value == NULL) {
+    return lf_schedule_token(action, offset, NULL);
+  }
+  environment_t* env = ((lf_action_base_t*)action)->parent->environment;
+  token_template_t* template = (token_template_t*)action;
+  if (action == NULL || template->type.element_size <= 0) {
+    lf_print_error("schedule: Invalid element size.");
+    return -1;
+  }
+  LF_CRITICAL_SECTION_ENTER(env);
+  // Initialize token with an array size of length and a reference count of 0.
+  lf_token_t* token = _lf_initialize_token(template, length);
+  // Copy the value into the newly allocated memory.
+  memcpy(token->value, value, template->type.element_size * length);
+  // The schedule function will increment the reference count.
+  trigger_handle_t result = lf_schedule_trigger(env, ((lf_action_base_t*)action)->trigger, offset, token);
+  // Notify the main thread in case it is waiting for physical time to elapse.
+  lf_notify_of_event(env);
+  LF_CRITICAL_SECTION_EXIT(env);
+  return result;
 }
 
 trigger_handle_t lf_schedule_value(void* action, interval_t extra_delay, void* value, int length) {
-    if (length < 0) {
-        lf_print_error(
-            "schedule_value():"
-            " Ignoring request to schedule an action with a value that has a negative length (%d).",
-            length
-        );
-        return -1;
-    }
-    token_template_t* template = (token_template_t*)action;
-    environment_t* env = ((lf_action_base_t*)action)->parent->environment;
-    LF_CRITICAL_SECTION_ENTER(env);
-    lf_token_t* token = _lf_initialize_token_with_value(template, value, length);
-    int return_value = lf_schedule_trigger(env, ((lf_action_base_t*)action)->trigger, extra_delay, token);
-    // Notify the main thread in case it is waiting for physical time to elapse.
-    lf_notify_of_event(env);
-    LF_CRITICAL_SECTION_EXIT(env);
-    return return_value;
+  if (length < 0) {
+    lf_print_error("schedule_value():"
+                   " Ignoring request to schedule an action with a value that has a negative length (%d).",
+                   length);
+    return -1;
+  }
+  token_template_t* template = (token_template_t*)action;
+  environment_t* env = ((lf_action_base_t*)action)->parent->environment;
+  LF_CRITICAL_SECTION_ENTER(env);
+  lf_token_t* token = _lf_initialize_token_with_value(template, value, length);
+  int return_value = lf_schedule_trigger(env, ((lf_action_base_t*)action)->trigger, extra_delay, token);
+  // Notify the main thread in case it is waiting for physical time to elapse.
+  lf_notify_of_event(env);
+  LF_CRITICAL_SECTION_EXIT(env);
+  return return_value;
 }
 
 /**
@@ -107,239 +103,239 @@ trigger_handle_t lf_schedule_value(void* action, interval_t extra_delay, void* v
  * @return True if the specified deadline has passed and false otherwise.
  */
 bool lf_check_deadline(void* self, bool invoke_deadline_handler) {
-    reaction_t* reaction = ((self_base_t*)self)->executing_reaction;
-    if (lf_time_physical() > (lf_time_logical(((self_base_t *)self)->environment) + reaction->deadline)) {
-        if (invoke_deadline_handler) {
-            reaction->deadline_violation_handler(self);
-        }
-        return true;
+  reaction_t* reaction = ((self_base_t*)self)->executing_reaction;
+  if (lf_time_physical() > (lf_time_logical(((self_base_t*)self)->environment) + reaction->deadline)) {
+    if (invoke_deadline_handler) {
+      reaction->deadline_violation_handler(self);
     }
-    return false;
+    return true;
+  }
+  return false;
 }
 
-trigger_handle_t lf_schedule_trigger(environment_t *env, trigger_t* trigger, interval_t extra_delay, lf_token_t* token) {
-    assert(env != GLOBAL_ENVIRONMENT);
-    if (lf_is_tag_after_stop_tag(env, env->current_tag)) {
-        // If schedule is called after stop_tag
-        // This is a critical condition.
-        _lf_done_using(token);
-        lf_print_warning("lf_schedule() called after stop tag.");
-        return 0;
-    }
+trigger_handle_t lf_schedule_trigger(environment_t* env, trigger_t* trigger, interval_t extra_delay,
+                                     lf_token_t* token) {
+  assert(env != GLOBAL_ENVIRONMENT);
+  if (lf_is_tag_after_stop_tag(env, env->current_tag)) {
+    // If schedule is called after stop_tag
+    // This is a critical condition.
+    _lf_done_using(token);
+    lf_print_warning("lf_schedule() called after stop tag.");
+    return 0;
+  }
 
-    if (extra_delay < 0LL) {
-        lf_print_warning("schedule called with a negative extra_delay " PRINTF_TIME ". Replacing with zero.", extra_delay);
-        extra_delay = 0LL;
-    }
+  if (extra_delay < 0LL) {
+    lf_print_warning("schedule called with a negative extra_delay " PRINTF_TIME ". Replacing with zero.", extra_delay);
+    extra_delay = 0LL;
+  }
 
-    LF_PRINT_DEBUG("lf_schedule_trigger: scheduling trigger %p with delay " PRINTF_TIME " and token %p.",
-            trigger, extra_delay, token);
+  LF_PRINT_DEBUG("lf_schedule_trigger: scheduling trigger %p with delay " PRINTF_TIME " and token %p.", trigger,
+                 extra_delay, token);
 
-    // Increment the reference count of the token.
-    if (token != NULL) {
-        token->ref_count++;
-        LF_PRINT_DEBUG("lf_schedule_trigger: Incremented ref_count of %p to %zu.",
-                token, token->ref_count);
-    }
+  // Increment the reference count of the token.
+  if (token != NULL) {
+    token->ref_count++;
+    LF_PRINT_DEBUG("lf_schedule_trigger: Incremented ref_count of %p to %zu.", token, token->ref_count);
+  }
 
-    // The trigger argument could be null, meaning that nothing is triggered.
-    // Doing this after incrementing the reference count ensures that the
-    // payload will be freed, if there is one.
-    if (trigger == NULL) {
-        _lf_done_using(token);
-        return 0;
-    }
+  // The trigger argument could be null, meaning that nothing is triggered.
+  // Doing this after incrementing the reference count ensures that the
+  // payload will be freed, if there is one.
+  if (trigger == NULL) {
+    _lf_done_using(token);
+    return 0;
+  }
 
-    // Compute the tag (the logical timestamp for the future event).
-    // We first do this assuming it is logical action and then, if it is a
-    // physical action, modify it if physical time exceeds the result.
-    interval_t delay = extra_delay;
-    // Add the offset if this is not a timer because, in that case,
-    // it is the minimum delay.
-    if (!trigger->is_timer) {
-        delay += trigger->offset;
-    }
-    tag_t intended_tag = (tag_t){.time = env->current_tag.time + delay, .microstep = 0};
-    
-    LF_PRINT_DEBUG("lf_schedule_trigger: env->current_tag.time = " PRINTF_TIME ". Total logical delay = " PRINTF_TIME "",
-            env->current_tag.time, delay);
-    interval_t min_spacing = trigger->period;
-
-    event_t* e = lf_get_new_event(env);
-
-    // Initialize the next pointer.
-    e->next = NULL;
-
-    // Set the payload.
-    e->token = token;
-
-    // Make sure the event points to this trigger so when it is
-    // dequeued, it will trigger this trigger.
-    e->trigger = trigger;
-
-    // If the trigger is physical, then we need to check whether
-    // physical time is larger than the intended time and, if so,
-    // modify the intended time.
-    if (trigger->is_physical) {
-        // Get the current physical time and assign it as the intended time.
-        intended_tag.time = lf_time_physical() + delay;
-    } else {
-        // FIXME: We need to verify that we are executing within a reaction?
-        // See reactor_threaded.
-        // If a logical action is scheduled asynchronously (which should never be
-        // done) the computed tag can be smaller than the current tag, in which case
-        // it needs to be adjusted.
-        // FIXME: This can go away once:
-        // - we have eliminated the possibility to have a negative additional delay; and
-        // - we detect the asynchronous use of logical actions
-        #ifndef NDEBUG
-        if (intended_tag.time < env->current_tag.time) {
-            lf_print_warning("Attempting to schedule an event earlier than current time by " PRINTF_TIME " nsec! "
-                    "Revising to the current time " PRINTF_TIME ".",
-                    env->current_tag.time - intended_tag.time, env->current_tag.time);
-            intended_tag.time = env->current_tag.time;
-        }
-        #endif
+  // Compute the tag (the logical timestamp for the future event).
+  // We first do this assuming it is logical action and then, if it is a
+  // physical action, modify it if physical time exceeds the result.
+  interval_t delay = extra_delay;
+  // Add the offset if this is not a timer because, in that case,
+  // it is the minimum delay.
+  if (!trigger->is_timer) {
+    delay += trigger->offset;
+  }
+  tag_t intended_tag = (tag_t){.time = env->current_tag.time + delay, .microstep = 0};
+
+  LF_PRINT_DEBUG("lf_schedule_trigger: env->current_tag.time = " PRINTF_TIME ". Total logical delay = " PRINTF_TIME "",
+                 env->current_tag.time, delay);
+  interval_t min_spacing = trigger->period;
+
+  event_t* e = lf_get_new_event(env);
+
+  // Initialize the next pointer.
+  e->next = NULL;
+
+  // Set the payload.
+  e->token = token;
+
+  // Make sure the event points to this trigger so when it is
+  // dequeued, it will trigger this trigger.
+  e->trigger = trigger;
+
+  // If the trigger is physical, then we need to check whether
+  // physical time is larger than the intended time and, if so,
+  // modify the intended time.
+  if (trigger->is_physical) {
+    // Get the current physical time and assign it as the intended time.
+    intended_tag.time = lf_time_physical() + delay;
+  } else {
+// FIXME: We need to verify that we are executing within a reaction?
+// See reactor_threaded.
+// If a logical action is scheduled asynchronously (which should never be
+// done) the computed tag can be smaller than the current tag, in which case
+// it needs to be adjusted.
+// FIXME: This can go away once:
+// - we have eliminated the possibility to have a negative additional delay; and
+// - we detect the asynchronous use of logical actions
+#ifndef NDEBUG
+    if (intended_tag.time < env->current_tag.time) {
+      lf_print_warning("Attempting to schedule an event earlier than current time by " PRINTF_TIME " nsec! "
+                       "Revising to the current time " PRINTF_TIME ".",
+                       env->current_tag.time - intended_tag.time, env->current_tag.time);
+      intended_tag.time = env->current_tag.time;
     }
+#endif
+  }
 
 #ifdef FEDERATED_DECENTRALIZED
-    // Event inherits the original intended_tag of the trigger
-    // set by the network stack (or the default, which is (NEVER,0))
-    e->intended_tag = trigger->intended_tag;
+  // Event inherits the original intended_tag of the trigger
+  // set by the network stack (or the default, which is (NEVER,0))
+  e->intended_tag = trigger->intended_tag;
 #endif
 
-    // Check for conflicts (a queued event with the same trigger and time).
-    if (min_spacing <= 0) {
-        // No minimum spacing defined.
-        e->time = intended_tag.time;
-        event_t* found = (event_t *)pqueue_find_equal_same_priority(env->event_q, e);
-        // Check for conflicts. Let events pile up in super dense time.
+  // Check for conflicts (a queued event with the same trigger and time).
+  if (min_spacing <= 0) {
+    // No minimum spacing defined.
+    e->time = intended_tag.time;
+    event_t* found = (event_t*)pqueue_find_equal_same_priority(env->event_q, e);
+    // Check for conflicts. Let events pile up in super dense time.
+    if (found != NULL) {
+      intended_tag.microstep++;
+      // Skip to the last node in the linked list.
+      while (found->next != NULL) {
+        found = found->next;
+        intended_tag.microstep++;
+      }
+      if (lf_is_tag_after_stop_tag(env, intended_tag)) {
+        LF_PRINT_DEBUG("Attempt to schedule an event after stop_tag was rejected.");
+        // Scheduling an event will incur a microstep
+        // after the stop tag.
+        lf_recycle_event(env, e);
+        return 0;
+      }
+      // Hook the event into the list.
+      found->next = e;
+      trigger->last_tag = intended_tag;
+      return (0); // FIXME: return value
+    }
+    // If there are not conflicts, schedule as usual. If intended time is
+    // equal to the current logical time, the event will effectively be
+    // scheduled at the next microstep.
+  } else if (!trigger->is_timer && trigger->last_tag.time != NEVER) {
+    // There is a min_spacing and there exists a previously
+    // scheduled event. It determines the
+    // earliest time at which the new event can be scheduled.
+    // Check to see whether the event is too early.
+    instant_t earliest_time = trigger->last_tag.time + min_spacing;
+    LF_PRINT_DEBUG("There is a previously scheduled event; earliest possible time "
+                   "with min spacing: " PRINTF_TIME,
+                   earliest_time);
+    // If the event is early, see which policy applies.
+    if (earliest_time > intended_tag.time) {
+      LF_PRINT_DEBUG("Event is early.");
+      switch (trigger->policy) {
+      case drop:
+        LF_PRINT_DEBUG("Policy is drop. Dropping the event.");
+        // Recycle the new event and decrement the
+        // reference count of the token.
+        _lf_done_using(token);
+        lf_recycle_event(env, e);
+        return (0);
+      case replace:
+        LF_PRINT_DEBUG("Policy is replace. Replacing the previous event.");
+        // If the event with the previous time is still on the event
+        // queue, then replace the token.  To find this event, we have
+        // to construct a dummy event_t struct.
+        event_t* dummy = lf_get_new_event(env);
+        dummy->next = NULL;
+        dummy->trigger = trigger;
+        dummy->time = trigger->last_tag.time;
+        event_t* found = (event_t*)pqueue_find_equal_same_priority(env->event_q, dummy);
+
         if (found != NULL) {
-            intended_tag.microstep++;
-            // Skip to the last node in the linked list.
-            while(found->next != NULL) {
-                found = found->next;
-                intended_tag.microstep++;
-            }
-            if (lf_is_tag_after_stop_tag(env, intended_tag)) {
-                LF_PRINT_DEBUG("Attempt to schedule an event after stop_tag was rejected.");
-                // Scheduling an event will incur a microstep
-                // after the stop tag.
-                lf_recycle_event(env, e);
-                return 0;
-            }
-            // Hook the event into the list.
-            found->next = e;
-            trigger->last_tag = intended_tag;
-            return(0); // FIXME: return value
-        }
-        // If there are not conflicts, schedule as usual. If intended time is
-        // equal to the current logical time, the event will effectively be
-        // scheduled at the next microstep.
-    } else if (!trigger->is_timer && trigger->last_tag.time != NEVER) {
-        // There is a min_spacing and there exists a previously
-        // scheduled event. It determines the
-        // earliest time at which the new event can be scheduled.
-        // Check to see whether the event is too early.
-        instant_t earliest_time = trigger->last_tag.time + min_spacing;
-        LF_PRINT_DEBUG("There is a previously scheduled event; earliest possible time "
-                "with min spacing: " PRINTF_TIME,
-                earliest_time);
-        // If the event is early, see which policy applies.
-        if (earliest_time > intended_tag.time) {
-            LF_PRINT_DEBUG("Event is early.");
-            switch(trigger->policy) {
-                case drop:
-                    LF_PRINT_DEBUG("Policy is drop. Dropping the event.");
-                    // Recycle the new event and decrement the
-                    // reference count of the token.
-                    _lf_done_using(token);
-                    lf_recycle_event(env, e);
-                    return(0);
-                case replace:
-                    LF_PRINT_DEBUG("Policy is replace. Replacing the previous event.");
-                    // If the event with the previous time is still on the event
-                    // queue, then replace the token.  To find this event, we have
-                    // to construct a dummy event_t struct.
-                    event_t* dummy = lf_get_new_event(env);
-                    dummy->next = NULL;
-                    dummy->trigger = trigger;
-                    dummy->time = trigger->last_tag.time;
-                    event_t* found = (event_t *)pqueue_find_equal_same_priority(env->event_q, dummy);
-
-                    if (found != NULL) {
-                        // Recycle the existing token and the new event
-                        // and update the token of the existing event.
-                        lf_replace_token(found, token);
-                        lf_recycle_event(env, e);
-                        lf_recycle_event(env, dummy);
-                        // Leave the last_tag the same.
-                        return(0);
-                    }
-                    lf_recycle_event(env, dummy);
-
-                    // If the preceding event _has_ been handled, then adjust
-                    // the tag to defer the event.
-                    intended_tag = (tag_t){.time = earliest_time, .microstep = 0};
-                    break;
-                default:
-                    // Default policy is defer
-                    intended_tag = (tag_t){.time = earliest_time, .microstep = 0};
-                    break;
-            }
+          // Recycle the existing token and the new event
+          // and update the token of the existing event.
+          lf_replace_token(found, token);
+          lf_recycle_event(env, e);
+          lf_recycle_event(env, dummy);
+          // Leave the last_tag the same.
+          return (0);
         }
-    }
+        lf_recycle_event(env, dummy);
 
-    // Check if the intended time is in the future
-    // This is a sanity check for the logic above
-    // FIXME: This is a development assertion and might
-    // not be necessary for end-user LF programs
-    #ifndef NDEBUG
-    if (intended_tag.time < env->current_tag.time) {
-        lf_print_error("Attempting to schedule an event earlier than current time by " PRINTF_TIME " nsec! "
-                "Revising to the current time " PRINTF_TIME ".",
-                env->current_tag.time - intended_tag.time, env->current_tag.time);
-        intended_tag.time = env->current_tag.time;
+        // If the preceding event _has_ been handled, then adjust
+        // the tag to defer the event.
+        intended_tag = (tag_t){.time = earliest_time, .microstep = 0};
+        break;
+      default:
+        // Default policy is defer
+        intended_tag = (tag_t){.time = earliest_time, .microstep = 0};
+        break;
+      }
     }
-    #endif
+  }
 
-    // Set the tag of the event.
-    e->time = intended_tag.time;
+// Check if the intended time is in the future
+// This is a sanity check for the logic above
+// FIXME: This is a development assertion and might
+// not be necessary for end-user LF programs
+#ifndef NDEBUG
+  if (intended_tag.time < env->current_tag.time) {
+    lf_print_error("Attempting to schedule an event earlier than current time by " PRINTF_TIME " nsec! "
+                   "Revising to the current time " PRINTF_TIME ".",
+                   env->current_tag.time - intended_tag.time, env->current_tag.time);
+    intended_tag.time = env->current_tag.time;
+  }
+#endif
 
-    // Do not schedule events if if the event time is past the stop time
-    // (current microsteps are checked earlier).
-    LF_PRINT_DEBUG("Comparing event with elapsed time " PRINTF_TIME " against stop time " PRINTF_TIME ".", e->time - lf_time_start(), env->stop_tag.time - lf_time_start());
-    if (e->time > env->stop_tag.time) {
-        LF_PRINT_DEBUG("lf_schedule_trigger: event time is past the timeout. Discarding event.");
-        _lf_done_using(token);
-        lf_recycle_event(env, e);
-        return(0);
-    }
+  // Set the tag of the event.
+  e->time = intended_tag.time;
 
-    // Store the time in order to check the min spacing
-    // between this and any following event.
-    trigger->last_tag = intended_tag;
-
-    // Queue the event.
-    // NOTE: There is no need for an explicit microstep because
-    // when this is called, all events at the current tag
-    // (time and microstep) have been pulled from the queue,
-    // and any new events added at this tag will go into the reaction_q
-    // rather than the event_q, so anything put in the event_q with this
-    // same time will automatically be executed at the next microstep.
-    LF_PRINT_LOG("Inserting event in the event queue with elapsed time " PRINTF_TIME ".",
-            e->time - lf_time_start());
-    pqueue_insert(env->event_q, e);
-
-    tracepoint_schedule(env->trace, trigger, e->time - env->current_tag.time);
-
-    // FIXME: make a record of handle and implement unschedule.
-    // NOTE: Rather than wrapping around to get a negative number,
-    // we reset the handle on the assumption that much earlier
-    // handles are irrelevant.
-    trigger_handle_t return_value = env->_lf_handle++;
-    if (env->_lf_handle < 0) {
-        env->_lf_handle = 1;
-    }
-    return return_value;
+  // Do not schedule events if if the event time is past the stop time
+  // (current microsteps are checked earlier).
+  LF_PRINT_DEBUG("Comparing event with elapsed time " PRINTF_TIME " against stop time " PRINTF_TIME ".",
+                 e->time - lf_time_start(), env->stop_tag.time - lf_time_start());
+  if (e->time > env->stop_tag.time) {
+    LF_PRINT_DEBUG("lf_schedule_trigger: event time is past the timeout. Discarding event.");
+    _lf_done_using(token);
+    lf_recycle_event(env, e);
+    return (0);
+  }
+
+  // Store the time in order to check the min spacing
+  // between this and any following event.
+  trigger->last_tag = intended_tag;
+
+  // Queue the event.
+  // NOTE: There is no need for an explicit microstep because
+  // when this is called, all events at the current tag
+  // (time and microstep) have been pulled from the queue,
+  // and any new events added at this tag will go into the reaction_q
+  // rather than the event_q, so anything put in the event_q with this
+  // same time will automatically be executed at the next microstep.
+  LF_PRINT_LOG("Inserting event in the event queue with elapsed time " PRINTF_TIME ".", e->time - lf_time_start());
+  pqueue_insert(env->event_q, e);
+
+  tracepoint_schedule(env, trigger, e->time - env->current_tag.time);
+
+  // FIXME: make a record of handle and implement unschedule.
+  // NOTE: Rather than wrapping around to get a negative number,
+  // we reset the handle on the assumption that much earlier
+  // handles are irrelevant.
+  trigger_handle_t return_value = env->_lf_handle++;
+  if (env->_lf_handle < 0) {
+    env->_lf_handle = 1;
+  }
+  return return_value;
 }
diff --git a/lingua-franca-ref.txt b/lingua-franca-ref.txt
index e7dda3def..1f7391f92 100644
--- a/lingua-franca-ref.txt
+++ b/lingua-franca-ref.txt
@@ -1 +1 @@
-windows-c11
+master
diff --git a/logging/api/CMakeLists.txt b/logging/api/CMakeLists.txt
new file mode 100644
index 000000000..540c7992b
--- /dev/null
+++ b/logging/api/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_library(lf-logging-api INTERFACE)
+target_include_directories(lf-logging-api INTERFACE ${CMAKE_CURRENT_LIST_DIR})
+add_library(lf::logging-api ALIAS lf-logging-api)
diff --git a/logging/api/logging.h b/logging/api/logging.h
new file mode 100644
index 000000000..77444b06a
--- /dev/null
+++ b/logging/api/logging.h
@@ -0,0 +1,97 @@
+#include 
+
+// To silence warnings about a function being a candidate for format checking
+// with gcc, add an attribute.
+// The arguments are the position of the format string (starting with 1)
+// and the start of the remaining arguments, or 0 for vprintf style functions.
+#if defined(__GNUC__)
+#define ATTRIBUTE_FORMAT_PRINTF(f, s) __attribute__((format(printf, f, s)))
+#else
+#define ATTRIBUTE_FORMAT_PRINTF(f, s)
+#endif
+
+/**
+ * LOG_LEVEL is set in generated code to 0 through 4 if the target
+ * logging property is error, warning, info, log, or debug.
+ * The default level is info (2). Currently, 0, 1, and 2 are
+ * treated identically and lf_print_error, lf_print_warning, and lf_print
+ * all result in printed output.
+ * If log is set (3), then LOG_DEBUG messages
+ * will be printed as well.
+ * If debug is set (4), the LF_PRINT_DEBUG messages will
+ * be printed as well.
+ */
+#define LOG_LEVEL_ERROR 0
+#define LOG_LEVEL_WARNING 1
+#define LOG_LEVEL_INFO 2
+#define LOG_LEVEL_LOG 3
+#define LOG_LEVEL_DEBUG 4
+#define LOG_LEVEL_ALL 255
+
+/**
+ * Report an informational message on stdout with a newline appended at the end.
+ * If this execution is federated, then the message will be prefaced by identifying
+ * information for the federate. The arguments are just like printf().
+ */
+void lf_print(const char* format, ...) ATTRIBUTE_FORMAT_PRINTF(1, 2);
+
+/**
+ * Report an log message on stdout with the prefix "LOG: " and a newline appended
+ * at the end. If this execution is federated, then the message will be prefaced by
+ * identifying information for the federate. The arguments are just like printf().
+ */
+void lf_print_log(const char* format, ...) ATTRIBUTE_FORMAT_PRINTF(1, 2);
+
+/**
+ * Report an debug message on stdout with the prefix "DEBUG: " and a newline appended
+ * at the end. If this execution is federated, then the message will be prefaced by
+ * identifying information for the federate. The arguments are just like printf().
+ */
+void lf_print_debug(const char* format, ...) ATTRIBUTE_FORMAT_PRINTF(1, 2);
+
+/**
+ * Report an error with the prefix "ERROR: " and a newline appended
+ * at the end.  The arguments are just like printf().
+ */
+void lf_print_error(const char* format, ...) ATTRIBUTE_FORMAT_PRINTF(1, 2);
+
+/**
+ * Report a warning with the prefix "WARNING: " and a newline appended
+ * at the end.  The arguments are just like printf().
+ */
+void lf_print_warning(const char* format, ...) ATTRIBUTE_FORMAT_PRINTF(1, 2);
+
+/**
+ * Report an error with the prefix "ERROR: " and a newline appended
+ * at the end, then exit with the failure code EXIT_FAILURE.
+ * The arguments are just like printf().
+ */
+void lf_print_error_and_exit(const char* format, ...) ATTRIBUTE_FORMAT_PRINTF(1, 2);
+
+/**
+ * Report an error and exit just like lf_print_error_and_exit(), but
+ * also print the system error message associated with the error.
+ */
+void lf_print_error_system_failure(const char* format, ...);
+
+/**
+ * Message print function type. The arguments passed to one of
+ * these print functions are a printf-style format string followed
+ * by a printf-style argument list collected into a va_list
+ * (variable argument list).
+ */
+typedef void(print_message_function_t)(const char*, va_list);
+
+/**
+ * Register a function to display messages. After calling this,
+ * all messages passed to the above print functions will be
+ * printed using the specified function rather than printf
+ * if their log level is greater than the specified level.
+ * The level should be one of LOG_LEVEL_ERROR, LOG_LEVEL_WARNING,
+ * LOG_LEVEL_INFO, LOG_LEVEL_LOG, or LOG_LEVEL_DEBUG.
+ *
+ * @param function The print message function or NULL to revert
+ *  to using printf.
+ * @param log_level The level of messages to redirect.
+ */
+void lf_register_print_function(print_message_function_t* function, int log_level);
diff --git a/logging/api/logging_macros.h b/logging/api/logging_macros.h
new file mode 100644
index 000000000..73939f576
--- /dev/null
+++ b/logging/api/logging_macros.h
@@ -0,0 +1,102 @@
+#include "logging.h"
+
+/**
+ * Non-C implementations (which cannot benefit from the C preprocessor) should
+ * ignore this file, or merely use it as a suggestion for similar behavior
+ * that they should implement using whatever metaprogramming facilities their
+ * implementation provides in place of the preprocessor.
+ */
+
+/** Default log level. */
+#ifndef LOG_LEVEL
+#define LOG_LEVEL LOG_LEVEL_INFO
+#endif
+
+/**
+ * A macro used to print useful logging information. It can be enabled
+ * by setting the target property 'logging' to 'LOG' or
+ * by defining LOG_LEVEL to LOG_LEVEL_LOG or
+ * LOG_LEVEL_DEBUG in the top-level preamble.
+ * The input to this macro is exactly like printf: (format, ...).
+ * "LOG: " is prepended to the beginning of the message
+ * and a newline is appended to the end of the message.
+ *
+ * @note This macro is non-empty even if LOG_LEVEL is not defined in
+ * user-code. This is to ensure that the compiler will still parse
+ * the predicate inside (...) to prevent LF_PRINT_LOG statements
+ * to fall out of sync with the rest of the code. This should have
+ * a negligible impact on performance if compiler optimization
+ * (e.g., -O2 for gcc) is used as long as the arguments passed to
+ * it do not themselves incur significant overhead to evaluate.
+ */
+#define LF_PRINT_LOG(format, ...)                                                                                      \
+  do {                                                                                                                 \
+    if (LOG_LEVEL >= LOG_LEVEL_LOG) {                                                                                  \
+      lf_print_log(format, ##__VA_ARGS__);                                                                             \
+    }                                                                                                                  \
+  } while (0)
+
+/**
+ * A macro used to print useful debug information. It can be enabled
+ * by setting the target property 'logging' to 'DEBUG' or
+ * by defining LOG_LEVEL to 2 in the top-level preamble.
+ * The input to this macro is exactly like printf: (format, ...).
+ * "DEBUG: " is prepended to the beginning of the message
+ * and a newline is appended to the end of the message.
+ *
+ * @note This macro is non-empty even if LOG_LEVEL is not defined in
+ * user-code. This is to ensure that the compiler will still parse
+ * the predicate inside (...) to prevent LF_PRINT_DEBUG statements
+ * to fall out of sync with the rest of the code. This should have
+ * a negligible impact on performance if compiler optimization
+ * (e.g., -O2 for gcc) is used as long as the arguments passed to
+ * it do not themselves incur significant overhead to evaluate.
+ */
+#define LF_PRINT_DEBUG(format, ...)                                                                                    \
+  do {                                                                                                                 \
+    if (LOG_LEVEL >= LOG_LEVEL_DEBUG) {                                                                                \
+      lf_print_debug(format, ##__VA_ARGS__);                                                                           \
+    }                                                                                                                  \
+  } while (0)
+
+/**
+ * Assertion handling. LF_ASSERT can be used as a shorthand for verifying
+ * a condition and calling `lf_print_error_and_exit` if it is not true.
+ * The LF_ASSERT version requires that the condition evaluate to true
+ * (non-zero), whereas the LF_ASSERTN version requires that the condition
+ * evaluate to false (zero).
+ * These are optimized to execute the condition argument but not
+ * check the result if the NDEBUG flag is defined.
+ * The NDEBUG flag will be defined if the user specifies `build-type: Release`
+ * in the target properties of the LF program.
+ *
+ * LF_ASSERT_NON_NULL can be used to verify that a pointer is not NULL.
+ * It differs from LF_ASSERT in that it does nothing at all if the NDEBUG flag is defined.
+ */
+#if defined(NDEBUG)
+#define LF_ASSERT(condition, format, ...) (void)(condition)
+#define LF_ASSERTN(condition, format, ...) (void)(condition)
+#define LF_ASSERT_NON_NULL(pointer)
+#else
+#define LF_ASSERT(condition, format, ...)                                                                              \
+  do {                                                                                                                 \
+    if (!(condition)) {                                                                                                \
+      lf_print_error_and_exit("`" format "`. Failed assertion in %s:%d(%s):(" #condition ") != true`", ##__VA_ARGS__,  \
+                              __FILE__, __LINE__, __func__);                                                           \
+    }                                                                                                                  \
+  } while (0)
+#define LF_ASSERTN(condition, format, ...)                                                                             \
+  do {                                                                                                                 \
+    if (condition) {                                                                                                   \
+      lf_print_error_and_exit("`" format "`. Failed assertion in %s:%d(%s):(" #condition ") != false`", ##__VA_ARGS__, \
+                              __FILE__, __LINE__, __func__);                                                           \
+    }                                                                                                                  \
+  } while (0)
+#define LF_ASSERT_NON_NULL(pointer)                                                                                    \
+  do {                                                                                                                 \
+    if (!(pointer)) {                                                                                                  \
+      lf_print_error_and_exit("`Out of memory?` Assertion failed in %s:%d(%s):`" #pointer " == NULL`", __FILE__,       \
+                              __LINE__, __func__);                                                                     \
+    }                                                                                                                  \
+  } while (0)
+#endif // NDEBUG
diff --git a/low_level_platform/README.md b/low_level_platform/README.md
new file mode 100644
index 000000000..e9934271c
--- /dev/null
+++ b/low_level_platform/README.md
@@ -0,0 +1,8 @@
+This sub-project defines the platform abstraction used by reactor-c.
+
+It exposes an interface that does include compile-time constructs such as
+typedefs and preprocessor definitions. Use the `platform` subproject if the
+simplified interface that appears there is sufficient.
+
+Strongly prefer to depend on the `platform` subproject if the module you are
+building needs to be compiled using a separate toolchain.
diff --git a/low_level_platform/api/CMakeLists.txt b/low_level_platform/api/CMakeLists.txt
new file mode 100644
index 000000000..b4598ed9c
--- /dev/null
+++ b/low_level_platform/api/CMakeLists.txt
@@ -0,0 +1,12 @@
+add_library(lf-low-level-platform-api INTERFACE)
+target_include_directories(lf-low-level-platform-api INTERFACE ${CMAKE_CURRENT_LIST_DIR})
+add_library(lf::low-level-platform-api ALIAS lf-low-level-platform-api)
+target_link_libraries(lf-low-level-platform-api INTERFACE lf::tag-api)
+
+if(${CMAKE_SYSTEM_NAME} STREQUAL "Nrf52")
+    target_compile_definitions(lf-low-level-platform-api INTERFACE PLATFORM_NRF52)
+elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Zephyr")
+    target_compile_definitions(lf-low-level-platform-api INTERFACE PLATFORM_ZEPHYR)
+elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Rp2040")
+    target_compile_definitions(lf-low-level-platform-api INTERFACE PLATFORM_RP2040)
+endif()
diff --git a/include/core/platform.h b/low_level_platform/api/low_level_platform.h
similarity index 70%
rename from include/core/platform.h
rename to low_level_platform/api/low_level_platform.h
index 5c05ed1f2..e37a166a2 100644
--- a/include/core/platform.h
+++ b/low_level_platform/api/low_level_platform.h
@@ -19,7 +19,7 @@ extern "C" {
 
 #include "tag.h"
 #include 
-#include "lf_atomic.h"
+#include "platform/lf_atomic.h"
 
 // Forward declarations
 typedef struct environment_t environment_t;
@@ -42,62 +42,63 @@ int lf_critical_section_enter(environment_t* env);
  */
 int lf_critical_section_exit(environment_t* env);
 
-
-
 #if defined(PLATFORM_ARDUINO)
-    #include "platform/lf_arduino_support.h"
+#include "platform/lf_arduino_support.h"
 #elif defined(PLATFORM_ZEPHYR)
-    #include "platform/lf_zephyr_support.h"
+#include "platform/lf_zephyr_support.h"
 #elif defined(PLATFORM_NRF52)
-    #include "platform/lf_nrf52_support.h"
+#include "platform/lf_nrf52_support.h"
 #elif defined(PLATFORM_RP2040)
-    #include "platform/lf_rp2040_support.h"
+#include "platform/lf_rp2040_support.h"
 #elif defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__NT__)
-   // Windows platforms
-   #include "lf_windows_support.h"
+// Windows platforms
+#include "platform/lf_windows_support.h"
 #elif __APPLE__
-    // Apple platforms
-    #include "lf_macos_support.h"
+// Apple platforms
+#include "platform/lf_macos_support.h"
 #elif __linux__
-    // Linux
-    #include "lf_linux_support.h"
+// Linux
+#include "platform/lf_linux_support.h"
 #elif __unix__ // all unices not caught above
-    // Unix
-    #include "lf_POSIX_threads_support.h"
+// Unix
+#include "platform/lf_POSIX_threads_support.h"
 #elif defined(_POSIX_VERSION)
-    // POSIX
-    #include "lf_POSIX_threads_support.h"
+// POSIX
+#include "platform/lf_POSIX_threads_support.h"
 #elif defined(__riscv) || defined(__riscv__)
-    // RISC-V (see https://github.com/riscv/riscv-toolchain-conventions)
-    #error "RISC-V not supported"
+// RISC-V (see https://github.com/riscv/riscv-toolchain-conventions)
+#error "RISC-V not supported"
 #else
 #error "Platform not supported"
 #endif
 
 #define LF_TIMEOUT 1
 
-
 // To support the single-threaded runtime, we need the following functions. They
 //  are not required by the threaded runtime and is thus hidden behind a #ifdef.
-#if defined (LF_SINGLE_THREADED)
-    typedef void lf_mutex_t;
-    /** 
-     * @brief Disable interrupts with support for nested calls
-     * @return 0 on success
-     */
-    int lf_disable_interrupts_nested();
-    /**
-     * @brief  Enable interrupts after potentially multiple callse to `lf_disable_interrupts_nested`
-     * @return 0 on success
-     */
-    int lf_enable_interrupts_nested();
-
-    /**
-     * @brief Notify sleeping single-threaded context of new event
-     * @return 0 on success
-     */
-    int _lf_single_threaded_notify_of_event();
-#else 
+#if defined(LF_SINGLE_THREADED)
+typedef void lf_mutex_t;
+/**
+ * @brief Disable interrupts with support for nested calls
+ * @return 0 on success
+ */
+int lf_disable_interrupts_nested();
+/**
+ * @brief  Enable interrupts after potentially multiple callse to `lf_disable_interrupts_nested`
+ * @return 0 on success
+ */
+int lf_enable_interrupts_nested();
+
+/**
+ * @brief Notify sleeping single-threaded context of new event
+ * @return 0 on success
+ */
+int _lf_single_threaded_notify_of_event();
+
+int lf_mutex_unlock(lf_mutex_t* mutex);
+int lf_mutex_init(lf_mutex_t* mutex);
+int lf_mutex_lock(lf_mutex_t* mutex);
+#else
 // For platforms with threading support, the following functions
 // abstract the API so that the LF runtime remains portable.
 
@@ -113,7 +114,12 @@ int lf_available_cores();
  * @return 0 on success, platform-specific error number otherwise.
  *
  */
-int lf_thread_create(lf_thread_t* thread, void *(*lf_thread) (void *), void* arguments);
+int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments);
+
+/**
+ * @brief Helper function for creating a thread.
+ */
+int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments);
 
 /**
  * Make calling thread wait for termination of the thread.  The
@@ -186,7 +192,34 @@ int lf_cond_wait(lf_cond_t* cond);
  *  number otherwise.
  */
 int _lf_cond_timedwait(lf_cond_t* cond, instant_t wakeup_time);
+
+/**
+ * @brief Cross-platform version of the C11 thread_local keyword.
+ */
+#ifndef thread_local
+#if __STDC_VERSION__ >= 201112 && !defined __STDC_NO_THREADS__
+#define thread_local _Thread_local
+#elif defined _WIN32 && (defined _MSC_VER || defined __ICL || defined __DMC__ || defined __BORLANDC__)
+#define thread_local __declspec(thread)
+/* note that ICC (linux) and Clang are covered by __GNUC__ */
+#elif defined __GNUC__ || defined __SUNPRO_C || defined __xlC__
+#define thread_local __thread
+#else
+#error "Cannot define thread_local"
 #endif
+#endif // thread_local
+
+/**
+ * @brief The ID of the current thread. The only guarantee is that these IDs will be a contiguous range of numbers
+ * starting at 0.
+ */
+int lf_thread_id();
+
+/**
+ * @brief Initialize the thread ID for the current thread.
+ */
+void initialize_lf_thread_id();
+#endif // !defined(LF_SINGLE_THREADED)
 
 /**
  * Initialize the LF clock. Must be called before using other clock-related APIs.
@@ -197,7 +230,7 @@ void _lf_initialize_clock(void);
  * Fetch the value of an internal (and platform-specific) physical clock.
  * Ideally, the underlying platform clock should be monotonic. However, the core
  * lib enforces monotonicity at higher level APIs (see clock.h).
- * 
+ *
  * This should not be used directly as it does not apply clock synchronization
  * offsets.
  *
@@ -207,7 +240,7 @@ int _lf_clock_gettime(instant_t* t);
 
 /**
  * Pause execution for a given duration.
- * 
+ *
  * @return 0 for success, or -1 for failure.
  */
 int lf_sleep(interval_t sleep_duration);
@@ -228,11 +261,11 @@ int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup_ti
  * Macros for marking function as deprecated
  */
 #ifdef __GNUC__
-    #define DEPRECATED(X) X __attribute__((deprecated))
+#define DEPRECATED(X) X __attribute__((deprecated))
 #elif defined(_MSC_VER)
-    #define DEPRECATED(X) __declspec(deprecated) X
+#define DEPRECATED(X) __declspec(deprecated) X
 #else
-    #define DEPRECATED(X) X
+#define DEPRECATED(X) X
 #endif
 
 /**
diff --git a/include/core/platform/arduino_mbed/ConditionWrapper.h b/low_level_platform/api/platform/arduino_mbed/ConditionWrapper.h
similarity index 87%
rename from include/core/platform/arduino_mbed/ConditionWrapper.h
rename to low_level_platform/api/platform/arduino_mbed/ConditionWrapper.h
index 5eaedc494..94f6bb4a2 100644
--- a/include/core/platform/arduino_mbed/ConditionWrapper.h
+++ b/low_level_platform/api/platform/arduino_mbed/ConditionWrapper.h
@@ -23,7 +23,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 ***************/
 
 /* Adds condition variable support in RTOS-enabled Arduino Boards (MBED)
- *  
+ *
  *  @author{Anirudh Rengarajan }
  */
 
@@ -36,12 +36,12 @@ struct condition;
 extern "C" {
 #endif
 
-    void* condition_new(void*);
-    void condition_delete(void*);
-    int condition_wait_for(void*, uint64_t);
-    int condition_wait(void*);
-    void condition_notify_one(void*);
-    void condition_notify_all(void*);
+void* condition_new(void*);
+void condition_delete(void*);
+int condition_wait_for(void*, uint64_t);
+int condition_wait(void*);
+void condition_notify_one(void*);
+void condition_notify_all(void*);
 
 #ifdef __cplusplus
 }
diff --git a/include/core/platform/arduino_mbed/MutexWrapper.h b/low_level_platform/api/platform/arduino_mbed/MutexWrapper.h
similarity index 91%
rename from include/core/platform/arduino_mbed/MutexWrapper.h
rename to low_level_platform/api/platform/arduino_mbed/MutexWrapper.h
index 7eb891dd7..89cb236f5 100644
--- a/include/core/platform/arduino_mbed/MutexWrapper.h
+++ b/low_level_platform/api/platform/arduino_mbed/MutexWrapper.h
@@ -23,7 +23,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 ***************/
 
 /* Adds mutex support in RTOS-enabled Arduino Boards (MBED)
- *  
+ *
  *  @author{Anirudh Rengarajan }
  */
 
@@ -34,12 +34,12 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 extern "C" {
 #endif
 
-    void *mutex_new();
-    void mutex_delete();
-    void mutex_lock();
-    bool mutex_trylock();
-    void mutex_unlock();
-    void *mutex_get_owner();
+void* mutex_new();
+void mutex_delete();
+void mutex_lock();
+bool mutex_trylock();
+void mutex_unlock();
+void* mutex_get_owner();
 
 #ifdef __cplusplus
 }
diff --git a/include/core/platform/arduino_mbed/ThreadWrapper.h b/low_level_platform/api/platform/arduino_mbed/ThreadWrapper.h
similarity index 86%
rename from include/core/platform/arduino_mbed/ThreadWrapper.h
rename to low_level_platform/api/platform/arduino_mbed/ThreadWrapper.h
index 1946b24f8..19bfc12ab 100644
--- a/include/core/platform/arduino_mbed/ThreadWrapper.h
+++ b/low_level_platform/api/platform/arduino_mbed/ThreadWrapper.h
@@ -23,7 +23,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 ***************/
 
 /* Adds threading support in RTOS-enabled Arduino Boards (MBED)
- *  
+ *
  *  @author{Anirudh Rengarajan }
  */
 
@@ -34,11 +34,11 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 extern "C" {
 #endif
 
-    void *thread_new();
-    void thread_delete(void* thread);
-    int thread_start(void* thread, void *(*function) (void *), void* arguments);
-    int thread_join(void* thread, int* thread_return);
-    int thread_terminate(void* thread);
+void* thread_new();
+void thread_delete(void* thread);
+int thread_start(void* thread, void* (*function)(void*), void* arguments);
+int thread_join(void* thread, int* thread_return);
+int thread_terminate(void* thread);
 
 #ifdef __cplusplus
 }
diff --git a/include/core/platform/lf_C11_threads_support.h b/low_level_platform/api/platform/lf_C11_threads_support.h
similarity index 97%
rename from include/core/platform/lf_C11_threads_support.h
rename to low_level_platform/api/platform/lf_C11_threads_support.h
index 52423de7c..64a25797f 100644
--- a/include/core/platform/lf_C11_threads_support.h
+++ b/low_level_platform/api/platform/lf_C11_threads_support.h
@@ -36,8 +36,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 typedef mtx_t lf_mutex_t;
 typedef struct {
-    lf_mutex_t* mutex;
-    cnd_t condition;
+  lf_mutex_t* mutex;
+  cnd_t condition;
 } lf_cond_t;
 typedef thrd_t lf_thread_t;
 
diff --git a/include/core/platform/lf_POSIX_threads_support.h b/low_level_platform/api/platform/lf_POSIX_threads_support.h
similarity index 97%
rename from include/core/platform/lf_POSIX_threads_support.h
rename to low_level_platform/api/platform/lf_POSIX_threads_support.h
index 83f0bd100..d27e7a16f 100644
--- a/include/core/platform/lf_POSIX_threads_support.h
+++ b/low_level_platform/api/platform/lf_POSIX_threads_support.h
@@ -39,8 +39,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 typedef pthread_mutex_t lf_mutex_t;
 typedef struct {
-    lf_mutex_t* mutex;
-    pthread_cond_t condition;
+  lf_mutex_t* mutex;
+  pthread_cond_t condition;
 } lf_cond_t;
 typedef pthread_t lf_thread_t;
 
diff --git a/include/core/platform/lf_arduino_support.h b/low_level_platform/api/platform/lf_arduino_support.h
similarity index 79%
rename from include/core/platform/lf_arduino_support.h
rename to low_level_platform/api/platform/lf_arduino_support.h
index ad9adde1c..94c5d4933 100644
--- a/include/core/platform/lf_arduino_support.h
+++ b/low_level_platform/api/platform/lf_arduino_support.h
@@ -25,7 +25,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 ***************/
 
 /* Arduino Platform API support for the C target of Lingua Franca.
- *  
+ *
  *  @author{Anirudh Rengarajan }
  */
 
@@ -44,57 +44,57 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #ifndef BOARD
 #if defined(ARDUINO_AVR_ADK)
-    #define BOARD AVR
-#elif defined(ARDUINO_AVR_BT)    // Bluetooth
-    #define BOARD AVR
+#define BOARD AVR
+#elif defined(ARDUINO_AVR_BT) // Bluetooth
+#define BOARD AVR
 #elif defined(ARDUINO_AVR_DUEMILANOVE)
-    #define BOARD AVR
+#define BOARD AVR
 #elif defined(ARDUINO_AVR_ESPLORA)
-    #define BOARD AVR
+#define BOARD AVR
 #elif defined(ARDUINO_AVR_ETHERNET)
-    #define BOARD AVR
-#elif defined(ARDUINO_AVR_FIO)      
-    #define BOARD AVR
+#define BOARD AVR
+#elif defined(ARDUINO_AVR_FIO)
+#define BOARD AVR
 #elif defined(ARDUINO_AVR_GEMMA)
-    #define BOARD AVR
+#define BOARD AVR
 #elif defined(ARDUINO_AVR_LEONARDO)
-    #define BOARD AVR
+#define BOARD AVR
 #elif defined(ARDUINO_AVR_LILYPAD)
-    #define BOARD AVR
+#define BOARD AVR
 #elif defined(ARDUINO_AVR_LILYPAD_USB)
-    #define BOARD AVR
+#define BOARD AVR
 #elif defined(ARDUINO_AVR_MEGA)
-    #define BOARD AVR
+#define BOARD AVR
 #elif defined(ARDUINO_AVR_MEGA2560)
-    #define BOARD AVR
+#define BOARD AVR
 #elif defined(ARDUINO_AVR_MICRO)
-    #define BOARD AVR
+#define BOARD AVR
 #elif defined(ARDUINO_AVR_MINI)
-    #define BOARD AVR
+#define BOARD AVR
 #elif defined(ARDUINO_AVR_NANO)
-    #define BOARD AVR
+#define BOARD AVR
 #elif defined(ARDUINO_AVR_NG)
-    #define BOARD AVR
+#define BOARD AVR
 #elif defined(ARDUINO_AVR_PRO)
-    #define BOARD AVR
+#define BOARD AVR
 #elif defined(ARDUINO_AVR_ROBOT_CONTROL)
-    #define BOARD AVR
+#define BOARD AVR
 #elif defined(ARDUINO_AVR_ROBOT_MOTOR)
-    #define BOARD AVR
+#define BOARD AVR
 #elif defined(ARDUINO_AVR_UNO) || defined(__AVR_ATmega4809__)
-    #define BOARD AVR
+#define BOARD AVR
 #elif defined(ARDUINO_AVR_YUN)
-    #define BOARD AVR
+#define BOARD AVR
 
 // These boards must be installed separately:
 #elif defined(ARDUINO_SAM_DUE)
-    #define BOARD SAM
+#define BOARD SAM
 #elif defined(ARDUINO_SAMD_ZERO)
-    #define BOARD SAMD
+#define BOARD SAMD
 #elif defined(ARDUINO_ARC32_TOOLS)
-    #define BOARD SAM
+#define BOARD SAM
 #elif defined(ARDUINO_ARDUINO_NANO33BLE)
-    #define BOARD MBED
+#define BOARD MBED
 #endif
 #endif
 
@@ -102,12 +102,12 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #include 
 
 #ifndef __timespec_defined
-#define	__timespec_defined
+#define __timespec_defined
 #ifndef _SYS__TIMESPEC_H_
 #define _SYS__TIMESPEC_H_
 struct timespec {
-	long long	tv_sec;		/* seconds */
-	long	tv_nsec;	/* and nanoseconds */
+  long long tv_sec; /* seconds */
+  long tv_nsec;     /* and nanoseconds */
 };
 #endif
 #endif
diff --git a/include/core/platform/lf_atomic.h b/low_level_platform/api/platform/lf_atomic.h
similarity index 90%
rename from include/core/platform/lf_atomic.h
rename to low_level_platform/api/platform/lf_atomic.h
index 24c24a88e..391678293 100644
--- a/include/core/platform/lf_atomic.h
+++ b/low_level_platform/api/platform/lf_atomic.h
@@ -18,7 +18,7 @@
  * @param val The value to be added.
  * @return The value previously in memory.
  */
-int32_t lf_atomic_fetch_add32(int32_t * ptr, int32_t val);
+int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t val);
 
 /**
  * @brief Atomically fetch 64-bit integer from memory and add a value to it.
@@ -28,7 +28,7 @@ int32_t lf_atomic_fetch_add32(int32_t * ptr, int32_t val);
  * @param val The value to be added.
  * @return The value previously in memory.
  */
-int64_t lf_atomic_fetch_add64(int64_t * ptr, int64_t val);
+int64_t lf_atomic_fetch_add64(int64_t* ptr, int64_t val);
 
 /**
  * @brief Atomically fetch a 32-bit integer from memory and add a value to it.
@@ -38,7 +38,7 @@ int64_t lf_atomic_fetch_add64(int64_t * ptr, int64_t val);
  * @param val The value to be added.
  * @return The new value in memory.
  */
-int32_t lf_atomic_add_fetch32(int32_t * ptr, int32_t val);
+int32_t lf_atomic_add_fetch32(int32_t* ptr, int32_t val);
 
 /**
  * @brief Atomically fetch a 64-bit integer from memory and add a value to it.
@@ -48,7 +48,7 @@ int32_t lf_atomic_add_fetch32(int32_t * ptr, int32_t val);
  * @param val The value to be added.
  * @return The new value in memory.
  */
-int64_t lf_atomic_add_fetch64(int64_t * ptr, int64_t val);
+int64_t lf_atomic_add_fetch64(int64_t* ptr, int64_t val);
 
 /**
  * @brief Atomically perform a compare-and-swap operation on a 32 bit integer in
@@ -85,7 +85,7 @@ bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t new
  * @param newval The value to swap in.
  * @return The value in memory prior to the swap.
  */
-int32_t lf_atomic_val_compare_and_swap32(int32_t *ptr, int32_t oldval, int32_t newval);
+int32_t lf_atomic_val_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval);
 
 /**
  * @brief Atomically perform a compare-and-swap operation on a 64 bit integer in
@@ -98,6 +98,6 @@ int32_t lf_atomic_val_compare_and_swap32(int32_t *ptr, int32_t oldval, int32_t n
  * @param newval The value to swap in.
  * @return The value in memory prior to the swap.
  */
-int64_t lf_atomic_val_compare_and_swap64(int64_t *ptr, int64_t oldval, int64_t newval);
+int64_t lf_atomic_val_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval);
 
 #endif
diff --git a/include/core/platform/lf_linux_support.h b/low_level_platform/api/platform/lf_linux_support.h
similarity index 87%
rename from include/core/platform/lf_linux_support.h
rename to low_level_platform/api/platform/lf_linux_support.h
index 52ede8a4d..18f68b2aa 100644
--- a/include/core/platform/lf_linux_support.h
+++ b/low_level_platform/api/platform/lf_linux_support.h
@@ -40,16 +40,16 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #include "lf_tag_64_32.h"
 
 #if !defined LF_SINGLE_THREADED
-    #if __STDC_VERSION__ < 201112L || defined (__STDC_NO_THREADS__)
-        // (Not C++11 or later) or no threads support
-        #include "lf_POSIX_threads_support.h"
-    #else
-        #include "lf_C11_threads_support.h"
-    #endif
+#if __STDC_VERSION__ < 201112L || defined(__STDC_NO_THREADS__)
+// (Not C++11 or later) or no threads support
+#include "lf_POSIX_threads_support.h"
+#else
+#include "lf_C11_threads_support.h"
+#endif
 #endif
 
 #if !defined(_POSIX_TIMERS) || _POSIX_TIMERS <= 0
-    #error Linux platform misses clock support
+#error Linux platform misses clock support
 #endif
 
 #endif // LF_LINUX_SUPPORT_H
diff --git a/include/core/platform/lf_macos_support.h b/low_level_platform/api/platform/lf_macos_support.h
similarity index 88%
rename from include/core/platform/lf_macos_support.h
rename to low_level_platform/api/platform/lf_macos_support.h
index 60da3c299..357729f08 100644
--- a/include/core/platform/lf_macos_support.h
+++ b/low_level_platform/api/platform/lf_macos_support.h
@@ -38,12 +38,12 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #include "lf_tag_64_32.h"
 
 #if !defined LF_SINGLE_THREADED
-    #if __STDC_VERSION__ < 201112L || defined (__STDC_NO_THREADS__)
-        // (Not C++11 or later) or no threads support
-        #include "lf_POSIX_threads_support.h"
-    #else
-        #include "lf_C11_threads_support.h"
-    #endif
+#if __STDC_VERSION__ < 201112L || defined(__STDC_NO_THREADS__)
+// (Not C++11 or later) or no threads support
+#include "lf_POSIX_threads_support.h"
+#else
+#include "lf_C11_threads_support.h"
+#endif
 #endif
 
 #endif // LF_MACOS_SUPPORT_H
diff --git a/include/core/platform/lf_nrf52_support.h b/low_level_platform/api/platform/lf_nrf52_support.h
similarity index 96%
rename from include/core/platform/lf_nrf52_support.h
rename to low_level_platform/api/platform/lf_nrf52_support.h
index e9e7fdef5..b93edaf8e 100644
--- a/include/core/platform/lf_nrf52_support.h
+++ b/low_level_platform/api/platform/lf_nrf52_support.h
@@ -25,7 +25,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 ***************/
 
 /** nrf52 API support for the C target of Lingua Franca.
- *  
+ *
  *  @author{Soroush Bateni }
  *  @author{Abhi Gundrala }
  *  @author{Erling Rennemo Jellum }
@@ -35,12 +35,12 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #define LF_NRF52_SUPPORT_H
 
 // This embedded platform has no TTY suport
-#define NO_TTY 
+#define NO_TTY
 
 #include  // For fixed-width integral types
 #include 
 
-#include   // Needed to define PRId64 and PRIu32
+#include  // Needed to define PRId64 and PRIu32
 #define PRINTF_TIME "%" PRId64
 #define PRINTF_MICROSTEP "%" PRIu32
 #define PRINTF_TAG "(%" PRId64 ", %" PRIu32 ")"
diff --git a/include/core/platform/lf_rp2040_support.h b/low_level_platform/api/platform/lf_rp2040_support.h
similarity index 100%
rename from include/core/platform/lf_rp2040_support.h
rename to low_level_platform/api/platform/lf_rp2040_support.h
diff --git a/include/core/platform/lf_tag_64_32.h b/low_level_platform/api/platform/lf_tag_64_32.h
similarity index 100%
rename from include/core/platform/lf_tag_64_32.h
rename to low_level_platform/api/platform/lf_tag_64_32.h
diff --git a/include/core/platform/lf_unix_clock_support.h b/low_level_platform/api/platform/lf_unix_clock_support.h
similarity index 95%
rename from include/core/platform/lf_unix_clock_support.h
rename to low_level_platform/api/platform/lf_unix_clock_support.h
index ae70753d7..0a2c80163 100644
--- a/include/core/platform/lf_unix_clock_support.h
+++ b/low_level_platform/api/platform/lf_unix_clock_support.h
@@ -1,8 +1,6 @@
 #include 
 #include 
 
-#include "lf_types.h"
-
 /**
  * @brief Convert a _lf_time_spec_t ('tp') to an instant_t representation in
  * nanoseconds.
diff --git a/include/core/platform/lf_windows_support.h b/low_level_platform/api/platform/lf_windows_support.h
similarity index 74%
rename from include/core/platform/lf_windows_support.h
rename to low_level_platform/api/platform/lf_windows_support.h
index b45420936..ea6ccffb5 100644
--- a/include/core/platform/lf_windows_support.h
+++ b/low_level_platform/api/platform/lf_windows_support.h
@@ -28,10 +28,10 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *  @author{Soroush Bateni }
  *  @author{Erling Jellum }
- *  
+ *
  * The API is implemented in the header files. This is also the case for Linux
- * and macos. 
- *  
+ * and macos.
+ *
  * All functions return 0 on success.
  *
  * @see https://gist.github.com/Soroosh129/127d1893fa4c1da6d3e1db33381bb273
@@ -44,27 +44,26 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #include 
 
 #if !defined LF_SINGLE_THREADED
-        /**
-         * On Windows, one could use both a mutex or
-         * a critical section for the same purpose. However,
-         * critical sections are lighter and limited to one process
-         * and thus fit the requirements of Lingua Franca.
-         */
-        typedef CRITICAL_SECTION lf_mutex_t;
-        /**
-         * For compatibility with other platform APIs, we assume
-         * that mutex is analogous to critical section.
-         */
-        typedef lf_mutex_t _lf_critical_section_t;
-        typedef struct {
-            _lf_critical_section_t* critical_section;
-            CONDITION_VARIABLE condition;
-        } lf_cond_t;
-        typedef HANDLE lf_thread_t;
+/**
+ * On Windows, one could use both a mutex or
+ * a critical section for the same purpose. However,
+ * critical sections are lighter and limited to one process
+ * and thus fit the requirements of Lingua Franca.
+ */
+typedef CRITICAL_SECTION lf_mutex_t;
+/**
+ * For compatibility with other platform APIs, we assume
+ * that mutex is analogous to critical section.
+ */
+typedef lf_mutex_t _lf_critical_section_t;
+typedef struct {
+  _lf_critical_section_t* critical_section;
+  CONDITION_VARIABLE condition;
+} lf_cond_t;
+typedef HANDLE lf_thread_t;
 #endif
 
 // Use 64-bit times and 32-bit unsigned microsteps
 #include "lf_tag_64_32.h"
 
 #endif // LF_WINDOWS_SUPPORT_H
-
diff --git a/low_level_platform/api/platform/lf_zephyr_board_support.h b/low_level_platform/api/platform/lf_zephyr_board_support.h
new file mode 100644
index 000000000..77834e985
--- /dev/null
+++ b/low_level_platform/api/platform/lf_zephyr_board_support.h
@@ -0,0 +1,107 @@
+/*************
+Copyright (c) 2023, Norwegian University of Science and Technology.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+   this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+***************/
+
+/**
+ * @brief Provide preprocessor flags for the particular board that was chosen
+ *
+ * @author{Erling Jellum }
+ */
+
+#ifndef LF_ZEPHYR_BOARD_SUPPORT_H
+#define LF_ZEPHYR_BOARD_SUPPORT_H
+
+// Default options
+#define LF_ZEPHYR_THREAD_PRIORITY_DEFAULT 5
+#define LF_ZEPHYR_STACK_SIZE_DEFAULT 2048
+
+// Unless the user explicitly asks for the kernel clock, then we use a counter
+//  clock because it is more precise.
+#if !defined(LF_ZEPHYR_CLOCK_KERNEL)
+#if defined(CONFIG_SOC_FAMILY_NRF)
+#define LF_ZEPHYR_CLOCK_COUNTER
+#define LF_TIMER DT_NODELABEL(timer1)
+#define LF_WAKEUP_OVERHEAD_US 100
+#define LF_MIN_SLEEP_US 10
+#define LF_RUNTIME_OVERHEAD_US 19
+#elif defined(CONFIG_BOARD_ATSAMD20_XPRO)
+#define LF_TIMER DT_NODELABEL(tc4)
+#define LF_ZEPHYR_CLOCK_COUNTER
+#elif defined(CONFIG_SOC_FAMILY_SAM)
+#define LF_TIMER DT_NODELABEL(tc0)
+#define LF_ZEPHYR_CLOCK_COUNTER
+#elif defined(CONFIG_COUNTER_MICROCHIP_MCP7940N)
+#define LF_ZEPHYR_CLOCK_COUNTER
+#define LF_TIMER DT_NODELABEL(extrtc0)
+#elif defined(CONFIG_COUNTER_RTC0)
+#define LF_ZEPHYR_CLOCK_COUNTER
+#define LF_TIMER DT_NODELABEL(rtc0)
+#elif defined(CONFIG_COUNTER_RTC_STM32)
+#define LF_TIMER DT_INST(0, st_stm32_rtc)
+#define LF_ZEPHYR_CLOCK_COUNTER
+#elif defined(CONFIG_COUNTER_XLNX_AXI_TIMER)
+#define LF_TIMER DT_INST(0, xlnx_xps_timer_1_00_a)
+#define LF_ZEPHYR_CLOCK_COUNTER
+#elif defined(CONFIG_COUNTER_TMR_ESP32)
+#define LF_TIMER DT_NODELABEL(timer0)
+#define LF_ZEPHYR_CLOCK_COUNTER
+#elif defined(CONFIG_COUNTER_MCUX_CTIMER)
+#define LF_TIMER DT_NODELABEL(ctimer0)
+#define LF_ZEPHYR_CLOCK_COUNTER
+#elif defined(CONFIG_SOC_MIMXRT1176_CM7)
+#define LF_TIMER DT_NODELABEL(gpt2)
+#define LF_ZEPHYR_CLOCK_COUNTER
+#else
+// This board does not have support for the counter clock. If the user
+//  explicitly asked for this cock, then throw an error.
+#if defined(LF_ZEPHYR_CLOCK_COUNTER)
+#error "LF_ZEPHYR_CLOCK_COUNTER was requested but it is not supported by the board"
+#else
+#define LF_ZEPHYR_CLOCK_KERNEL
+#endif
+#endif // BOARD
+#endif
+
+#if defined(LF_ZEPHYR_CLOCK_COUNTER)
+#ifndef LF_WAKEUP_OVERHEAD_US
+#define LF_WAKEUP_OVERHEAD_US 0
+#endif
+
+#ifndef LF_MIN_SLEEP_US
+#define LF_MIN_SLEEP_US 10
+#endif
+
+#ifndef LF_RUNTIME_OVERHEAD_US
+#define LF_RUNTIME_OVERHEAD_US 0
+#endif
+
+#ifndef LF_TIMER_ALARM_CHANNEL
+#define LF_TIMER_ALARM_CHANNEL 0
+#endif
+#else
+#if !defined(LF_ZEPHYR_CLOCK_KERNEL)
+#error Neither hi-res nor lo-res clock specified
+#endif
+#endif // LF_ZEPHYR_CLOCK_COUNTER
+
+#endif
diff --git a/include/core/platform/lf_zephyr_support.h b/low_level_platform/api/platform/lf_zephyr_support.h
similarity index 86%
rename from include/core/platform/lf_zephyr_support.h
rename to low_level_platform/api/platform/lf_zephyr_support.h
index 1e318417e..49172eb21 100644
--- a/include/core/platform/lf_zephyr_support.h
+++ b/low_level_platform/api/platform/lf_zephyr_support.h
@@ -44,36 +44,34 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 typedef struct k_mutex lf_mutex_t;
 typedef struct {
-    lf_mutex_t* mutex;
-    struct k_condvar condition;
+  lf_mutex_t* mutex;
+  struct k_condvar condition;
 } lf_cond_t;
 typedef struct k_thread* lf_thread_t;
 
 /**
- * @brief Add `value` to `*ptr` and return original value of `*ptr` 
+ * @brief Add `value` to `*ptr` and return original value of `*ptr`
  */
-int _zephyr_atomic_fetch_add(int *ptr, int value);
+int _zephyr_atomic_fetch_add(int* ptr, int value);
 /**
  * @brief Add `value` to `*ptr` and return new updated value of `*ptr`
  */
-int _zephyr_atomic_add_fetch(int *ptr, int value);
+int _zephyr_atomic_add_fetch(int* ptr, int value);
 
 /**
  * @brief Compare and swap for boolaen value.
- * If `*ptr` is equal to `value` then overwrite it 
+ * If `*ptr` is equal to `value` then overwrite it
  * with `newval`. If not do nothing. Retruns true on overwrite.
  */
-bool _zephyr_bool_compare_and_swap(bool *ptr, bool value, bool newval);
+bool _zephyr_bool_compare_and_swap(bool* ptr, bool value, bool newval);
 
 /**
  * @brief Compare and swap for integers. If `*ptr` is equal
  * to `value`, it is updated to `newval`. The function returns
  * the original value of `*ptr`.
  */
-int  _zephyr_val32_compare_and_swap(uint32_t *ptr, int value, int newval);
+int _zephyr_val32_compare_and_swap(uint32_t* ptr, int value, int newval);
 
 #endif // !LF_SINGLE_THREADED
 
-
-
 #endif // LF_ZEPHYR_SUPPORT_H
diff --git a/low_level_platform/impl/CMakeLists.txt b/low_level_platform/impl/CMakeLists.txt
new file mode 100644
index 000000000..7641dd663
--- /dev/null
+++ b/low_level_platform/impl/CMakeLists.txt
@@ -0,0 +1,47 @@
+# Check which system we are running on to select the correct platform support
+# file and assign the file's path to LF_PLATFORM_FILE
+set(LF_ROOT ${CMAKE_CURRENT_LIST_DIR}/../..)
+set(LF_LOW_LEVEL_PLATFORM_FILES
+    ${CMAKE_CURRENT_LIST_DIR}/src/lf_unix_clock_support.c
+    ${CMAKE_CURRENT_LIST_DIR}/src/lf_unix_syscall_support.c
+    ${CMAKE_CURRENT_LIST_DIR}/src/lf_linux_support.c
+    ${CMAKE_CURRENT_LIST_DIR}/src/lf_macos_support.c
+    ${CMAKE_CURRENT_LIST_DIR}/src/lf_windows_support.c
+    ${CMAKE_CURRENT_LIST_DIR}/src/lf_nrf52_support.c
+    ${CMAKE_CURRENT_LIST_DIR}/src/lf_zephyr_support.c
+    ${CMAKE_CURRENT_LIST_DIR}/src/lf_zephyr_clock_counter.c
+    ${CMAKE_CURRENT_LIST_DIR}/src/lf_zephyr_clock_kernel.c
+    ${CMAKE_CURRENT_LIST_DIR}/src/lf_rp2040_support.c
+    ${CMAKE_CURRENT_LIST_DIR}/src/lf_atomic_windows.c
+    ${CMAKE_CURRENT_LIST_DIR}/src/lf_atomic_gcc_clang.c
+    ${CMAKE_CURRENT_LIST_DIR}/src/lf_atomic_irq.c
+    ${CMAKE_CURRENT_LIST_DIR}/src/platform_internal.c
+)
+if(PLATFORM_ZEPHYR)
+    message("--- Building Zephyr library")
+    zephyr_library_named(lf-low-level-platform-impl)
+    zephyr_library_sources(${LF_LOW_LEVEL_PLATFORM_FILES})
+    zephyr_library_link_libraries(kernel)
+else()
+message("--- Building non-Zephyr library")
+    add_library(lf-low-level-platform-impl STATIC ${LF_LOW_LEVEL_PLATFORM_FILES})
+endif()
+add_library(lf::low-level-platform-impl ALIAS lf-low-level-platform-impl)
+
+target_link_libraries(lf-low-level-platform-impl PRIVATE lf::low-level-platform-api)
+target_link_libraries(lf-low-level-platform-impl PUBLIC lf-logging-api)
+
+target_compile_definitions(lf-low-level-platform-impl PUBLIC PLATFORM_${CMAKE_SYSTEM_NAME})
+message(STATUS "Applying preprocessor definitions to platform...")
+macro(low_level_platform_define X)
+    if(DEFINED ${X})
+        message(STATUS ${X}=${${X}})
+        target_compile_definitions(lf-low-level-platform-impl PUBLIC ${X}=${${X}})
+    endif(DEFINED ${X})
+endmacro()
+low_level_platform_define(LF_SINGLE_THREADED)
+low_level_platform_define(LOG_LEVEL)
+low_level_platform_define(MODAL_REACTORS)
+low_level_platform_define(USER_THREADS)
+low_level_platform_define(NUMBER_OF_WORKERS)
+low_level_platform_define(NUMBER_OF_WATCHDOGS)
diff --git a/core/platform/Platform.cmake b/low_level_platform/impl/Platform.cmake
similarity index 100%
rename from core/platform/Platform.cmake
rename to low_level_platform/impl/Platform.cmake
diff --git a/core/platform/arduino_mbed/ConditionWrapper.cpp b/low_level_platform/impl/src/arduino_mbed/ConditionWrapper.cpp
similarity index 100%
rename from core/platform/arduino_mbed/ConditionWrapper.cpp
rename to low_level_platform/impl/src/arduino_mbed/ConditionWrapper.cpp
diff --git a/core/platform/arduino_mbed/MutexWrapper.cpp b/low_level_platform/impl/src/arduino_mbed/MutexWrapper.cpp
similarity index 100%
rename from core/platform/arduino_mbed/MutexWrapper.cpp
rename to low_level_platform/impl/src/arduino_mbed/MutexWrapper.cpp
diff --git a/core/platform/arduino_mbed/ThreadWrapper.cpp b/low_level_platform/impl/src/arduino_mbed/ThreadWrapper.cpp
similarity index 100%
rename from core/platform/arduino_mbed/ThreadWrapper.cpp
rename to low_level_platform/impl/src/arduino_mbed/ThreadWrapper.cpp
diff --git a/low_level_platform/impl/src/lf_C11_threads_support.c b/low_level_platform/impl/src/lf_C11_threads_support.c
new file mode 100644
index 000000000..527ce28d3
--- /dev/null
+++ b/low_level_platform/impl/src/lf_C11_threads_support.c
@@ -0,0 +1,52 @@
+#if !defined(LF_SINGLE_THREADED) && !defined(PLATFORM_ARDUINO)
+#include "low_level_platform.h"
+#include "platform/lf_C11_threads_support.h"
+#include 
+#include 
+#include  // For fixed-width integral types
+
+int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments) {
+  return thrd_create((thrd_t*)thread, (thrd_start_t)lf_thread, arguments);
+}
+
+int lf_thread_join(lf_thread_t thread, void** thread_return) {
+  // thrd_join wants the second argument to be an int* rather than a void**
+  return thrd_join((thrd_t)thread, (int*)thread_return);
+}
+
+int lf_mutex_init(lf_mutex_t* mutex) {
+  // Set up a timed and recursive mutex (default behavior)
+  return mtx_init((mtx_t*)mutex, mtx_timed | mtx_recursive);
+}
+
+int lf_mutex_lock(lf_mutex_t* mutex) { return mtx_lock((mtx_t*)mutex); }
+
+int lf_mutex_unlock(lf_mutex_t* mutex) { return mtx_unlock((mtx_t*)mutex); }
+
+int lf_cond_init(lf_cond_t* cond, lf_mutex_t* mutex) {
+  cond->mutex = mutex;
+  return cnd_init((cnd_t*)&cond->condition);
+}
+
+int lf_cond_broadcast(lf_cond_t* cond) { return cnd_broadcast((cnd_t*)&cond->condition); }
+
+int lf_cond_signal(lf_cond_t* cond) { return cnd_signal((cnd_t*)&cond->condition); }
+
+int lf_cond_wait(lf_cond_t* cond) { return cnd_wait((cnd_t*)&cond->condition, (mtx_t*)cond->mutex); }
+
+int _lf_cond_timedwait(lf_cond_t* cond, instant_t wakeup_time) {
+  struct timespec timespec_absolute_time = {.tv_sec = wakeup_time / BILLION, .tv_nsec = wakeup_time % BILLION};
+
+  int return_value = cnd_timedwait((cnd_t*)&cond->condition, (mtx_t*)cond->mutex, ×pec_absolute_time);
+
+  switch (return_value) {
+  case thrd_timedout:
+    return_value = LF_TIMEOUT;
+    break;
+
+  default:
+    break;
+  }
+  return return_value;
+}
+#endif
diff --git a/low_level_platform/impl/src/lf_POSIX_threads_support.c b/low_level_platform/impl/src/lf_POSIX_threads_support.c
new file mode 100644
index 000000000..57f3a6811
--- /dev/null
+++ b/low_level_platform/impl/src/lf_POSIX_threads_support.c
@@ -0,0 +1,70 @@
+#if !defined(LF_SINGLE_THREADED) && !defined(PLATFORM_ARDUINO)
+#include "low_level_platform.h"
+#include "platform/lf_POSIX_threads_support.h"
+#include "platform/lf_unix_clock_support.h"
+
+#include 
+#include 
+#include  // For fixed-width integral types
+
+int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments) {
+  return pthread_create((pthread_t*)thread, NULL, lf_thread, arguments);
+}
+
+int lf_thread_join(lf_thread_t thread, void** thread_return) { return pthread_join((pthread_t)thread, thread_return); }
+
+int lf_mutex_init(lf_mutex_t* mutex) {
+  // Set up a recursive mutex
+  pthread_mutexattr_t attr;
+  pthread_mutexattr_init(&attr);
+  // Initialize the mutex to be recursive, meaning that it is OK
+  // for the same thread to lock and unlock the mutex even if it already holds
+  // the lock.
+  // FIXME: This is dangerous. The docs say this: "It is advised that an
+  // application should not use a PTHREAD_MUTEX_RECURSIVE mutex with
+  // condition variables because the implicit unlock performed for a
+  // pthread_cond_wait() or pthread_cond_timedwait() may not actually
+  // release the mutex (if it had been locked multiple times).
+  // If this happens, no other thread can satisfy the condition
+  // of the predicate.”  This seems like a bug in the implementation of
+  // pthreads. Maybe it has been fixed?
+  pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+  return pthread_mutex_init((pthread_mutex_t*)mutex, &attr);
+}
+
+int lf_mutex_lock(lf_mutex_t* mutex) { return pthread_mutex_lock((pthread_mutex_t*)mutex); }
+
+int lf_mutex_unlock(lf_mutex_t* mutex) { return pthread_mutex_unlock((pthread_mutex_t*)mutex); }
+
+int lf_cond_init(lf_cond_t* cond, lf_mutex_t* mutex) {
+  cond->mutex = mutex;
+  pthread_condattr_t cond_attr;
+  pthread_condattr_init(&cond_attr);
+  // Limit the scope of the condition variable to this process (default)
+  pthread_condattr_setpshared(&cond_attr, PTHREAD_PROCESS_PRIVATE);
+  return pthread_cond_init(&cond->condition, &cond_attr);
+}
+
+int lf_cond_broadcast(lf_cond_t* cond) { return pthread_cond_broadcast((pthread_cond_t*)&cond->condition); }
+
+int lf_cond_signal(lf_cond_t* cond) { return pthread_cond_signal((pthread_cond_t*)&cond->condition); }
+
+int lf_cond_wait(lf_cond_t* cond) {
+  return pthread_cond_wait((pthread_cond_t*)&cond->condition, (pthread_mutex_t*)cond->mutex);
+}
+
+int _lf_cond_timedwait(lf_cond_t* cond, instant_t wakeup_time) {
+  struct timespec timespec_absolute_time = convert_ns_to_timespec(wakeup_time);
+  int return_value =
+      pthread_cond_timedwait((pthread_cond_t*)&cond->condition, (pthread_mutex_t*)cond->mutex, ×pec_absolute_time);
+  switch (return_value) {
+  case ETIMEDOUT:
+    return_value = LF_TIMEOUT;
+    break;
+
+  default:
+    break;
+  }
+  return return_value;
+}
+#endif
diff --git a/core/platform/lf_arduino_support.c b/low_level_platform/impl/src/lf_arduino_support.c
similarity index 63%
rename from core/platform/lf_arduino_support.c
rename to low_level_platform/impl/src/lf_arduino_support.c
index a13cfab52..ed9391205 100644
--- a/core/platform/lf_arduino_support.c
+++ b/low_level_platform/impl/src/lf_arduino_support.c
@@ -27,17 +27,16 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *  @author{Erling Rennemo Jellum }
  */
 
-
 #include 
 #include 
 #include 
 
-#include "lf_arduino_support.h"
-#include "../platform.h"
+#include "platform/lf_arduino_support.h"
+#include "low_level_platform.h"
 #include "Arduino.h"
 
 // Combine 2 32bit values into a 64bit
-#define COMBINE_HI_LO(hi,lo) ((((uint64_t) hi) << 32) | ((uint64_t) lo))
+#define COMBINE_HI_LO(hi, lo) ((((uint64_t)hi) << 32) | ((uint64_t)lo))
 
 // Keep track of physical actions being entered into the system
 static volatile bool _lf_async_event = false;
@@ -67,36 +66,36 @@ static volatile uint32_t _lf_time_us_low_last = 0;
  * @return int 0 if successful sleep, -1 if awoken by async event
  */
 int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup) {
-    instant_t now;
+  instant_t now;
 
-    _lf_async_event = false;
-    lf_enable_interrupts_nested();
+  _lf_async_event = false;
+  lf_enable_interrupts_nested();
 
-    // Do busy sleep
-    do {
-        _lf_clock_gettime(&now);
-    } while ((now < wakeup) && !_lf_async_event);
+  // Do busy sleep
+  do {
+    _lf_clock_gettime(&now);
+  } while ((now < wakeup) && !_lf_async_event);
 
-    lf_disable_interrupts_nested();
+  lf_disable_interrupts_nested();
 
-    if (_lf_async_event) {
-        _lf_async_event = false;
-        return -1;
-    } else {
-        return 0;
-    }
+  if (_lf_async_event) {
+    _lf_async_event = false;
+    return -1;
+  } else {
+    return 0;
+  }
 }
 
 int lf_sleep(interval_t sleep_duration) {
-    instant_t now;
-    _lf_clock_gettime(&now);
-    instant_t wakeup = now + sleep_duration;
+  instant_t now;
+  _lf_clock_gettime(&now);
+  instant_t wakeup = now + sleep_duration;
 
-    // Do busy sleep
-    do {
-        _lf_clock_gettime(&now);
-    } while ((now < wakeup));
-    return 0;
+  // Do busy sleep
+  do {
+    _lf_clock_gettime(&now);
+  } while ((now < wakeup));
+  return 0;
 }
 
 /**
@@ -112,49 +111,49 @@ void _lf_initialize_clock() {}
  */
 int _lf_clock_gettime(instant_t* t) {
 
-    assert(t != NULL);
+  assert(t != NULL);
 
-    uint32_t now_us_low = micros();
+  uint32_t now_us_low = micros();
 
-    // Detect whether overflow has occured since last read
-    // TODO: This assumes that we _lf_clock_gettime is called at least once per overflow
-    if (now_us_low < _lf_time_us_low_last) {
-        _lf_time_us_high++;
-    }
+  // Detect whether overflow has occured since last read
+  // TODO: This assumes that we _lf_clock_gettime is called at least once per overflow
+  if (now_us_low < _lf_time_us_low_last) {
+    _lf_time_us_high++;
+  }
 
-    *t = COMBINE_HI_LO(_lf_time_us_high, now_us_low) * 1000ULL;
-    return 0;
+  *t = COMBINE_HI_LO(_lf_time_us_high, now_us_low) * 1000ULL;
+  return 0;
 }
 
 int lf_enable_interrupts_nested() {
-    if (_lf_num_nested_critical_sections++ == 0) {
-        // First nested entry into a critical section.
-        // If interrupts are not initially enabled, then increment again to prevent
-        // TODO: Do we need to check whether the interrupts were enabled to
-        //  begin with? AFAIK there is no Arduino API for that
-        noInterrupts();
-    }
-    return 0;
+  if (_lf_num_nested_critical_sections++ == 0) {
+    // First nested entry into a critical section.
+    // If interrupts are not initially enabled, then increment again to prevent
+    // TODO: Do we need to check whether the interrupts were enabled to
+    //  begin with? AFAIK there is no Arduino API for that
+    noInterrupts();
+  }
+  return 0;
 }
 
 int lf_disable_interrupts_nested() {
-    if (_lf_num_nested_critical_sections <= 0) {
-        return 1;
-    }
-    if (--_lf_num_nested_critical_sections == 0) {
-        interrupts();
-    }
-    return 0;
+  if (_lf_num_nested_critical_sections <= 0) {
+    return 1;
+  }
+  if (--_lf_num_nested_critical_sections == 0) {
+    interrupts();
+  }
+  return 0;
 }
 
 #if defined(LF_SINGLE_THREADED)
 /**
  * Handle notifications from the runtime of changes to the event queue.
  * If a sleep is in progress, it should be interrupted.
-*/
+ */
 int _lf_single_threaded_notify_of_event() {
-   _lf_async_event = true;
-   return 0;
+  _lf_async_event = true;
+  return 0;
 }
 
 #else
@@ -164,71 +163,67 @@ int _lf_single_threaded_notify_of_event() {
 #include "ThreadWrapper.h"
 
 // Typedef that represents the function pointers passed by LF runtime into lf_thread_create
-typedef void *(*lf_function_t) (void *);
+typedef void* (*lf_function_t)(void*);
 
 /**
  * @brief Get the number of cores on the host machine.
  */
-int lf_available_cores() {
-    return 1;
-}
+int lf_available_cores() { return 1; }
 
-int lf_thread_create(lf_thread_t* thread, void *(*lf_thread) (void *), void* arguments) {
-    lf_thread_t t = thread_new();
-    long int start = thread_start(t, *lf_thread, arguments);
-    *thread = t;
-    return start;
+int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments) {
+  lf_thread_t t = thread_new();
+  long int start = thread_start(t, *lf_thread, arguments);
+  *thread = t;
+  return start;
 }
 
-int lf_thread_join(lf_thread_t thread, void** thread_return) {
-   return thread_join(thread, thread_return);
-}
+int lf_thread_join(lf_thread_t thread, void** thread_return) { return thread_join(thread, thread_return); }
 
 int lf_mutex_init(lf_mutex_t* mutex) {
-    *mutex = (lf_mutex_t) mutex_new();
-    return 0;
+  *mutex = (lf_mutex_t)mutex_new();
+  return 0;
 }
 
 int lf_mutex_lock(lf_mutex_t* mutex) {
-    mutex_lock(*mutex);
-    return 0;
+  mutex_lock(*mutex);
+  return 0;
 }
 
 int lf_mutex_unlock(lf_mutex_t* mutex) {
-    mutex_unlock(*mutex);
-    return 0;
+  mutex_unlock(*mutex);
+  return 0;
 }
 
 int lf_cond_init(lf_cond_t* cond, lf_mutex_t* mutex) {
-    *cond = (lf_cond_t) condition_new (*mutex);
-    return 0;
+  *cond = (lf_cond_t)condition_new(*mutex);
+  return 0;
 }
 
 int lf_cond_broadcast(lf_cond_t* cond) {
-    condition_notify_all(*cond);
-    return 0;
+  condition_notify_all(*cond);
+  return 0;
 }
 
 int lf_cond_signal(lf_cond_t* cond) {
-    condition_notify_one(*cond);
-    return 0;
+  condition_notify_one(*cond);
+  return 0;
 }
 
 int lf_cond_wait(lf_cond_t* cond) {
-    condition_wait(*cond);
-    return 0;
+  condition_wait(*cond);
+  return 0;
 }
 
 int _lf_cond_timedwait(lf_cond_t* cond, instant_t wakeup_time) {
-    instant_t now;
-    _lf_clock_gettime(&now);
-    interval_t sleep_duration_ns = wakeup_time - now;
-    bool res = condition_wait_for(*cond, sleep_duration_ns);
-    if (!res) {
-        return 0;
-    } else {
-        return LF_TIMEOUT;
-    }
+  instant_t now;
+  _lf_clock_gettime(&now);
+  interval_t sleep_duration_ns = wakeup_time - now;
+  bool res = condition_wait_for(*cond, sleep_duration_ns);
+  if (!res) {
+    return 0;
+  } else {
+    return LF_TIMEOUT;
+  }
 }
 
 #endif
diff --git a/low_level_platform/impl/src/lf_atomic_gcc_clang.c b/low_level_platform/impl/src/lf_atomic_gcc_clang.c
new file mode 100644
index 000000000..30d671a8a
--- /dev/null
+++ b/low_level_platform/impl/src/lf_atomic_gcc_clang.c
@@ -0,0 +1,32 @@
+#if defined(PLATFORM_Linux) || defined(PLATFORM_Darwin)
+#if defined(__GNUC__) || defined(__clang__)
+/**
+ * @author Soroush Bateni
+ * @author Erling Rennemo Jellum
+ * @copyright (c) 2023
+ * License: BSD 2-clause
+ * @brief Implements the atomics API using GCC/Clang APIs.
+ */
+
+#include "platform/lf_atomic.h"
+#include "low_level_platform.h"
+
+int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t value) { return __sync_fetch_and_add(ptr, value); }
+int64_t lf_atomic_fetch_add64(int64_t* ptr, int64_t value) { return __sync_fetch_and_add(ptr, value); }
+int32_t lf_atomic_add_fetch32(int32_t* ptr, int32_t value) { return __sync_add_and_fetch(ptr, value); }
+int64_t lf_atomic_add_fetch64(int64_t* ptr, int64_t value) { return __sync_add_and_fetch(ptr, value); }
+bool lf_atomic_bool_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) {
+  return __sync_bool_compare_and_swap(ptr, oldval, newval);
+}
+bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval) {
+  return __sync_bool_compare_and_swap(ptr, oldval, newval);
+}
+int32_t lf_atomic_val_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) {
+  return __sync_val_compare_and_swap(ptr, oldval, newval);
+}
+int64_t lf_atomic_val_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval) {
+  return __sync_val_compare_and_swap(ptr, oldval, newval);
+}
+
+#endif
+#endif
diff --git a/low_level_platform/impl/src/lf_atomic_irq.c b/low_level_platform/impl/src/lf_atomic_irq.c
new file mode 100644
index 000000000..7be9aff34
--- /dev/null
+++ b/low_level_platform/impl/src/lf_atomic_irq.c
@@ -0,0 +1,93 @@
+#if defined(PLATFORM_ARDUINO) || defined(PLATFORM_NRF52) || defined(PLATFORM_ZEPHYR) || defined(PLATFORM_RP2040)
+/**
+ * @author Erling Rennemo Jellum
+ * @copyright (c) 2023
+ * License: BSD 2-clause
+ * @brief Implements the atomics API by disabling interrupts. Typically used for platforms that
+ * do not support atomic operations. The platforms need to implement `lf_enable_interrupts_nested`
+ * and `lf_disable_interrupts_nested`.
+ */
+
+#include "platform/lf_atomic.h"
+#include "low_level_platform.h"
+
+// Forward declare the functions for enabling/disabling interrupts. Must be
+// implemented in the platform support file of the target.
+int lf_disable_interrupts_nested();
+int lf_enable_interrupts_nested();
+
+int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t value) {
+  lf_disable_interrupts_nested();
+  int32_t res = *ptr;
+  *ptr += value;
+  lf_enable_interrupts_nested();
+  return res;
+}
+
+int64_t lf_atomic_fetch_add64(int64_t* ptr, int64_t value) {
+  lf_disable_interrupts_nested();
+  int64_t res = *ptr;
+  *ptr += value;
+  lf_enable_interrupts_nested();
+  return res;
+}
+
+int32_t lf_atomic_add_fetch32(int32_t* ptr, int32_t value) {
+  lf_disable_interrupts_nested();
+  int res = *ptr + value;
+  *ptr = res;
+  lf_enable_interrupts_nested();
+  return res;
+}
+
+int64_t lf_atomic_add_fetch64(int64_t* ptr, int64_t value) {
+  lf_disable_interrupts_nested();
+  int64_t res = *ptr + value;
+  *ptr = res;
+  lf_enable_interrupts_nested();
+  return res;
+}
+
+bool lf_atomic_bool_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) {
+  lf_disable_interrupts_nested();
+  bool res = false;
+  if ((*ptr) == oldval) {
+    *ptr = newval;
+    res = true;
+  }
+  lf_enable_interrupts_nested();
+  return res;
+}
+
+bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval) {
+  lf_disable_interrupts_nested();
+  bool res = false;
+  if ((*ptr) == oldval) {
+    *ptr = newval;
+    res = true;
+  }
+  lf_enable_interrupts_nested();
+  return res;
+}
+
+int32_t lf_atomic_val_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) {
+  lf_disable_interrupts_nested();
+  int res = *ptr;
+  if ((*ptr) == oldval) {
+    *ptr = newval;
+  }
+  lf_enable_interrupts_nested();
+  return res;
+}
+
+int64_t lf_atomic_val_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval) {
+  lf_disable_interrupts_nested();
+  int64_t res = *ptr;
+  if ((*ptr) == oldval) {
+    *ptr = newval;
+  }
+  lf_enable_interrupts_nested();
+  return res;
+}
+
+#endif
diff --git a/low_level_platform/impl/src/lf_atomic_windows.c b/low_level_platform/impl/src/lf_atomic_windows.c
new file mode 100644
index 000000000..1db0fa2de
--- /dev/null
+++ b/low_level_platform/impl/src/lf_atomic_windows.c
@@ -0,0 +1,29 @@
+#if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__NT__)
+/**
+ * @author Soroush Bateni
+ * @author Erling Rennemo Jellum
+ * @copyright (c) 2023
+ * License: BSD 2-clause
+ * @brief Implements the atomic API for Windows machines.
+ */
+
+#include "platform/lf_atomic.h"
+#include 
+
+int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t value) { return InterlockedExchangeAdd(ptr, value); }
+int64_t lf_atomic_fetch_add64(int64_t* ptr, int64_t value) { return InterlockedExchangeAdd64(ptr, value); }
+int32_t lf_atomic_add_fetch32(int32_t* ptr, int32_t value) { return InterlockedAdd(ptr, value); }
+int64_t lf_atomic_add_fetch64(int64_t* ptr, int64_t value) { return InterlockedAdd64(ptr, value); }
+bool lf_atomic_bool_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) {
+  return (InterlockedCompareExchange(ptr, newval, oldval) == oldval);
+}
+bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval) {
+  return (InterlockedCompareExchange64(ptr, newval, oldval) == oldval);
+}
+int32_t lf_atomic_val_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) {
+  return InterlockedCompareExchange(ptr, newval, oldval);
+}
+int64_t lf_atomic_val_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval) {
+  return InterlockedCompareExchange64(ptr, newval, oldval);
+}
+#endif
diff --git a/core/platform/lf_linux_support.c b/low_level_platform/impl/src/lf_linux_support.c
similarity index 69%
rename from core/platform/lf_linux_support.c
rename to low_level_platform/impl/src/lf_linux_support.c
index a2847ad37..3edf8d7ea 100644
--- a/core/platform/lf_linux_support.c
+++ b/low_level_platform/impl/src/lf_linux_support.c
@@ -27,47 +27,44 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 /**
  * @brief Platform support for the Linux operating system.
- * 
+ *
  * @author{Soroush Bateni }
  * @author{Marten Lohstroh }
  */
- 
-#include "lf_linux_support.h"
-#include "platform.h"
-#include "tag.h"
+
+#include "platform/lf_linux_support.h"
+#include "low_level_platform.h"
 
 #if defined LF_SINGLE_THREADED
-    #include "lf_os_single_threaded_support.c"
+#include "lf_os_single_threaded_support.c"
 #endif
 
 #if !defined LF_SINGLE_THREADED
-    #if __STDC_VERSION__ < 201112L || defined (__STDC_NO_THREADS__)
-        // (Not C++11 or later) or no threads support
-        #include "lf_POSIX_threads_support.c"
-    #else
-        #include "lf_C11_threads_support.c"
-    #endif
+#if __STDC_VERSION__ < 201112L || defined(__STDC_NO_THREADS__)
+// (Not C++11 or later) or no threads support
+#include "lf_POSIX_threads_support.c"
+#else
+#include "lf_C11_threads_support.c"
+#endif
 #endif
 
-#include "lf_unix_clock_support.h"
+#include "platform/lf_unix_clock_support.h"
 
 int lf_sleep(interval_t sleep_duration) {
-    const struct timespec tp = convert_ns_to_timespec(sleep_duration);
-    struct timespec remaining;
-    return nanosleep((const struct timespec*)&tp, (struct timespec*)&remaining);
+  const struct timespec tp = convert_ns_to_timespec(sleep_duration);
+  struct timespec remaining;
+  return nanosleep((const struct timespec*)&tp, (struct timespec*)&remaining);
 }
 
 int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup_time) {
-    interval_t sleep_duration = wakeup_time - lf_time_physical();
+  interval_t sleep_duration = wakeup_time - lf_time_physical();
 
-    if (sleep_duration <= 0) {
-        return 0;
-    } else {
-        return lf_sleep(sleep_duration);
-    }
-}
-
-int lf_nanosleep(interval_t sleep_duration) {
+  if (sleep_duration <= 0) {
+    return 0;
+  } else {
     return lf_sleep(sleep_duration);
+  }
 }
+
+int lf_nanosleep(interval_t sleep_duration) { return lf_sleep(sleep_duration); }
 #endif
diff --git a/core/platform/lf_macos_support.c b/low_level_platform/impl/src/lf_macos_support.c
similarity index 70%
rename from core/platform/lf_macos_support.c
rename to low_level_platform/impl/src/lf_macos_support.c
index 84e883b37..54fcfd296 100644
--- a/core/platform/lf_macos_support.c
+++ b/low_level_platform/impl/src/lf_macos_support.c
@@ -30,45 +30,41 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *  @author{Soroush Bateni }
  */
 
-#include "lf_macos_support.h"
-#include "platform.h"
+#include "platform/lf_macos_support.h"
+#include "low_level_platform.h"
 #include "tag.h"
 
 #if defined LF_SINGLE_THREADED
-    #include "lf_os_single_threaded_support.c"
+#include "lf_os_single_threaded_support.c"
 #endif
 
 #if !defined LF_SINGLE_THREADED
-    #if __STDC_VERSION__ < 201112L || defined (__STDC_NO_THREADS__)
-        // (Not C++11 or later) or no threads support
-        #include "lf_POSIX_threads_support.c"
-    #else
-        #include "lf_C11_threads_support.c"
-    #endif
+#if __STDC_VERSION__ < 201112L || defined(__STDC_NO_THREADS__)
+// (Not C++11 or later) or no threads support
+#include "lf_POSIX_threads_support.c"
+#else
+#include "lf_C11_threads_support.c"
+#endif
 #endif
 
-
-
-#include "lf_unix_clock_support.h"
+#include "platform/lf_unix_clock_support.h"
 
 // See `man 2 clock_nanosleep` for return values
 int lf_sleep(interval_t sleep_duration) {
-    const struct timespec tp = convert_ns_to_timespec(sleep_duration);
-    struct timespec remaining;
-    return nanosleep((const struct timespec*)&tp, (struct timespec*)&remaining);
+  const struct timespec tp = convert_ns_to_timespec(sleep_duration);
+  struct timespec remaining;
+  return nanosleep((const struct timespec*)&tp, (struct timespec*)&remaining);
 }
 
 int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup_time) {
-    interval_t sleep_duration = wakeup_time - lf_time_physical();
+  interval_t sleep_duration = wakeup_time - lf_time_physical();
 
-    if (sleep_duration <= 0) {
-        return 0;
-    } else {
-        return lf_sleep(sleep_duration);
-    }
-}
-
-int lf_nanosleep(interval_t sleep_duration) {
+  if (sleep_duration <= 0) {
+    return 0;
+  } else {
     return lf_sleep(sleep_duration);
+  }
 }
+
+int lf_nanosleep(interval_t sleep_duration) { return lf_sleep(sleep_duration); }
 #endif
diff --git a/core/platform/lf_nrf52_support.c b/low_level_platform/impl/src/lf_nrf52_support.c
similarity index 52%
rename from core/platform/lf_nrf52_support.c
rename to low_level_platform/impl/src/lf_nrf52_support.c
index ba364108a..f0147a7d3 100644
--- a/core/platform/lf_nrf52_support.c
+++ b/low_level_platform/impl/src/lf_nrf52_support.c
@@ -38,7 +38,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #include 
 #include 
 
-#include "lf_nrf52_support.h"
+#include "platform/lf_nrf52_support.h"
 #include "../platform.h"
 #include "../utils/util.h"
 #include "../tag.h"
@@ -61,11 +61,11 @@ static volatile bool _lf_async_event = false;
 static const nrfx_timer_t g_lf_timer_inst = NRFX_TIMER_INSTANCE(3);
 
 // Combine 2 32bit works to a 64 bit word
-#define COMBINE_HI_LO(hi,lo) ((((uint64_t) hi) << 32) | ((uint64_t) lo))
+#define COMBINE_HI_LO(hi, lo) ((((uint64_t)hi) << 32) | ((uint64_t)lo))
 
 // Maximum and minimum sleep possible
 #define LF_MAX_SLEEP_NS USEC(UINT32_MAX)
-#define LF_MIN_SLEEP_NS USEC(5) 
+#define LF_MIN_SLEEP_NS USEC(5)
 
 /**
  * Variable tracking the higher 32bits of the time.
@@ -88,41 +88,41 @@ uint8_t _lf_nested_region = 0;
  *      channel that fired interrupt on timer
  * [in] p_context
  *      context passed to handler
- * 
+ *
  */
-void lf_timer_event_handler(nrf_timer_event_t event_type, void *p_context) {
-    
-    if (event_type == NRF_TIMER_EVENT_COMPARE2) {
-        _lf_sleep_interrupted = false;
-    } else if (event_type == NRF_TIMER_EVENT_COMPARE3) {
-        _lf_time_us_high =+ 1;
-    }
+void lf_timer_event_handler(nrf_timer_event_t event_type, void* p_context) {
+
+  if (event_type == NRF_TIMER_EVENT_COMPARE2) {
+    _lf_sleep_interrupted = false;
+  } else if (event_type == NRF_TIMER_EVENT_COMPARE3) {
+    _lf_time_us_high = +1;
+  }
 }
 
 void _lf_initialize_clock() {
-    ret_code_t error_code;
-    _lf_time_us_high = 0;
-
-    // Initialize TIMER3 as a free running timer
-    // 1) Set to be a 32 bit timer
-    // 2) Set to count at 1MHz
-    // 3) Clear the timer
-    // 4) Start the timer
-
-    nrfx_timer_config_t timer_conf = {
-        .frequency = NRF_TIMER_FREQ_1MHz,
-        .mode = NRF_TIMER_MODE_TIMER,
-        .bit_width = NRF_TIMER_BIT_WIDTH_32,
-        .interrupt_priority = 7, // lowest
-        .p_context = NULL,
-    };
-
-    error_code = nrfx_timer_init(&g_lf_timer_inst, &timer_conf, &lf_timer_event_handler);
-    APP_ERROR_CHECK(error_code);
-    // Enable an interrupt to occur on channel NRF_TIMER_CC_CHANNEL3
-    // when the timer reaches its maximum value and is about to overflow.
-    nrfx_timer_compare(&g_lf_timer_inst, NRF_TIMER_CC_CHANNEL3, 0x0, true);
-    nrfx_timer_enable(&g_lf_timer_inst);
+  ret_code_t error_code;
+  _lf_time_us_high = 0;
+
+  // Initialize TIMER3 as a free running timer
+  // 1) Set to be a 32 bit timer
+  // 2) Set to count at 1MHz
+  // 3) Clear the timer
+  // 4) Start the timer
+
+  nrfx_timer_config_t timer_conf = {
+      .frequency = NRF_TIMER_FREQ_1MHz,
+      .mode = NRF_TIMER_MODE_TIMER,
+      .bit_width = NRF_TIMER_BIT_WIDTH_32,
+      .interrupt_priority = 7, // lowest
+      .p_context = NULL,
+  };
+
+  error_code = nrfx_timer_init(&g_lf_timer_inst, &timer_conf, &lf_timer_event_handler);
+  APP_ERROR_CHECK(error_code);
+  // Enable an interrupt to occur on channel NRF_TIMER_CC_CHANNEL3
+  // when the timer reaches its maximum value and is about to overflow.
+  nrfx_timer_compare(&g_lf_timer_inst, NRF_TIMER_CC_CHANNEL3, 0x0, true);
+  nrfx_timer_enable(&g_lf_timer_inst);
 }
 
 /**
@@ -130,66 +130,66 @@ void _lf_initialize_clock() {
  * timestamp value in 't' will will be the number of nanoseconds since the board was reset.
  * The timers on the board have only 32 bits and their resolution is in microseconds, so
  * the time returned will always be an integer number of microseconds. Moreover, after about 71
- * minutes of operation, the timer overflows. 
- * 
+ * minutes of operation, the timer overflows.
+ *
  * The function reads out the upper word before and after reading the timer.
  * If the upper word has changed (i.e. there was an overflow in between),
- * we cannot simply combine them. We read once more to be sure that 
+ * we cannot simply combine them. We read once more to be sure that
  * we read after the overflow.
  *
  * @return 0 for success, or -1 for failure. In case of failure, errno will be
  *  set appropriately (see `man 2 clock_gettime`).
  */
 int _lf_clock_gettime(instant_t* t) {
-    assert(t);
-    
-    uint32_t now_us_hi_pre = _lf_time_us_high;
-    uint32_t now_us_low = nrfx_timer_capture(&g_lf_timer_inst, NRF_TIMER_CC_CHANNEL1);
-    uint32_t now_us_hi_post = _lf_time_us_high; 
-
-    // Check if we read the time during a wrap
-    if (now_us_hi_pre != now_us_hi_post) {
-        // There was a wrap. read again and return
-        now_us_low = nrfx_timer_capture(&g_lf_timer_inst, NRF_TIMER_CC_CHANNEL1);
-    }
-    uint64_t now_us = COMBINE_HI_LO(now_us_hi_post, now_us_low);
+  assert(t);
 
-    *t = ((instant_t)now_us) * 1000;
-    return 0;
+  uint32_t now_us_hi_pre = _lf_time_us_high;
+  uint32_t now_us_low = nrfx_timer_capture(&g_lf_timer_inst, NRF_TIMER_CC_CHANNEL1);
+  uint32_t now_us_hi_post = _lf_time_us_high;
+
+  // Check if we read the time during a wrap
+  if (now_us_hi_pre != now_us_hi_post) {
+    // There was a wrap. read again and return
+    now_us_low = nrfx_timer_capture(&g_lf_timer_inst, NRF_TIMER_CC_CHANNEL1);
+  }
+  uint64_t now_us = COMBINE_HI_LO(now_us_hi_post, now_us_low);
+
+  *t = ((instant_t)now_us) * 1000;
+  return 0;
 }
 
 /**
  * @brief Pause execution for a given duration.
- * 
+ *
  * This implementation performs a busy-wait because it is unclear what will
  * happen if this function is called from within an ISR.
- * 
- * @param sleep_duration 
+ *
+ * @param sleep_duration
  * @return 0 for success, or -1 for failure.
  */
 int lf_sleep(interval_t sleep_duration) {
-    instant_t target_time;
-    instant_t current_time;
+  instant_t target_time;
+  instant_t current_time;
+  _lf_clock_gettime(¤t_time);
+  target_time = current_time + sleep_duration;
+
+  while (current_time <= target_time) {
     _lf_clock_gettime(¤t_time);
-    target_time = current_time + sleep_duration;
-    
-    while (current_time <= target_time) {
-        _lf_clock_gettime(¤t_time);
-    }
-    return 0;
+  }
+  return 0;
 }
 
 /**
  * @brief Do a busy-wait until a time instant
- * 
- * @param wakeup_time 
+ *
+ * @param wakeup_time
  */
 
 static void lf_busy_wait_until(instant_t wakeup_time) {
-    instant_t now;
-    do {
-        _lf_clock_gettime(&now);
-    } while (now < wakeup_time);
+  instant_t now;
+  do {
+    _lf_clock_gettime(&now);
+  } while (now < wakeup_time);
 }
 
 /**
@@ -197,96 +197,92 @@ static void lf_busy_wait_until(instant_t wakeup_time) {
  *  1. Wakeup time is already past
  *  2. Implied sleep duration is below `LF_MAX_SLEEP_NS` threshold
  *  3. Implied sleep duration is above `LF_MAX_SLEEP_NS` limit
- * 
+ *
  * @param wakeup_time The time instant at which to wake up.
  * @return int 0 if sleep completed, or -1 if it was interrupted.
  */
 int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup_time) {
-    instant_t now;
-    _lf_clock_gettime(&now);
-    interval_t duration = wakeup_time - now;
-    if (duration <= 0) {
-        return 0;
-    } else if (duration < LF_MIN_SLEEP_NS) {
-        lf_busy_wait_until(wakeup_time);
-        return 0;
-    } 
-
-    // The sleeping while loop continues until either:
-    // 1) A physical action is scheduled, resulting in a new event on the event queue
-    // 2) Sleep has completed successfully
-    bool sleep_next = true;
-    _lf_sleep_interrupted = false;
-    _lf_async_event = false;
-
-    do {
-        // Schedule a new timer interrupt unless we already have one pending
-        if (!_lf_sleep_interrupted) {
-            uint32_t curr_timer_val = nrfx_timer_capture(&g_lf_timer_inst, NRF_TIMER_CC_CHANNEL2);
-            uint32_t target_timer_val = 0;
-            // If the remaining sleep is longer than the limit, sleep for the maximum possible time.
-            if (duration > LF_MAX_SLEEP_NS) {
-                target_timer_val = curr_timer_val-1;
-                duration -= LF_MAX_SLEEP_NS;
-            } else {
-                target_timer_val = (uint32_t)(wakeup_time / 1000);
-                sleep_next = false;        
-            }
-            // init timer interrupt for sleep time
-            _lf_sleep_interrupted = true;
-            nrfx_timer_compare(&g_lf_timer_inst, NRF_TIMER_CC_CHANNEL2, target_timer_val, true);
-        }
-
-        // Leave critical section
-        lf_enable_interrupts_nested();
-        
-        // wait for exception
-        __WFE();
-
-        // Enter critical section again
-        lf_disable_interrupts_nested();
-
-        // Redo while loop and go back to sleep if:
-        //  1) We didnt have async event AND
-        //  2) We have more sleeps left OR the sleep didnt complete
-        // 
-        // This means we leave the sleep while if:
-        //  1) There was an async event OR
-        //  2) no more sleeps AND sleep not interrupted 
-    } while(!_lf_async_event && (sleep_next || _lf_sleep_interrupted));
-    
-    if (!_lf_async_event) {
-        return 0;
-    } else {
-        LF_PRINT_DEBUG("Sleep got interrupted...\n");
-        return -1;
+  instant_t now;
+  _lf_clock_gettime(&now);
+  interval_t duration = wakeup_time - now;
+  if (duration <= 0) {
+    return 0;
+  } else if (duration < LF_MIN_SLEEP_NS) {
+    lf_busy_wait_until(wakeup_time);
+    return 0;
+  }
+
+  // The sleeping while loop continues until either:
+  // 1) A physical action is scheduled, resulting in a new event on the event queue
+  // 2) Sleep has completed successfully
+  bool sleep_next = true;
+  _lf_sleep_interrupted = false;
+  _lf_async_event = false;
+
+  do {
+    // Schedule a new timer interrupt unless we already have one pending
+    if (!_lf_sleep_interrupted) {
+      uint32_t curr_timer_val = nrfx_timer_capture(&g_lf_timer_inst, NRF_TIMER_CC_CHANNEL2);
+      uint32_t target_timer_val = 0;
+      // If the remaining sleep is longer than the limit, sleep for the maximum possible time.
+      if (duration > LF_MAX_SLEEP_NS) {
+        target_timer_val = curr_timer_val - 1;
+        duration -= LF_MAX_SLEEP_NS;
+      } else {
+        target_timer_val = (uint32_t)(wakeup_time / 1000);
+        sleep_next = false;
+      }
+      // init timer interrupt for sleep time
+      _lf_sleep_interrupted = true;
+      nrfx_timer_compare(&g_lf_timer_inst, NRF_TIMER_CC_CHANNEL2, target_timer_val, true);
     }
+
+    // Leave critical section
+    lf_enable_interrupts_nested();
+
+    // wait for exception
+    __WFE();
+
+    // Enter critical section again
+    lf_disable_interrupts_nested();
+
+    // Redo while loop and go back to sleep if:
+    //  1) We didnt have async event AND
+    //  2) We have more sleeps left OR the sleep didnt complete
+    //
+    // This means we leave the sleep while if:
+    //  1) There was an async event OR
+    //  2) no more sleeps AND sleep not interrupted
+  } while (!_lf_async_event && (sleep_next || _lf_sleep_interrupted));
+
+  if (!_lf_async_event) {
+    return 0;
+  } else {
+    LF_PRINT_DEBUG("Sleep got interrupted...\n");
+    return -1;
+  }
 }
 
 /**
  * @brief Enter critical section. Let NRF Softdevice handle nesting
- * @return int 
+ * @return int
  */
-int lf_enable_interrupts_nested() {
-    return sd_nvic_critical_region_enter(&_lf_nested_region);
-}
+int lf_enable_interrupts_nested() { return sd_nvic_critical_region_enter(&_lf_nested_region); }
 
 /**
  * @brief Exit citical section. Let NRF SoftDevice handle nesting
- * 
- * @return int 
+ *
+ * @return int
  */
-int lf_disable_interrupts_nested() {
-    return sd_nvic_critical_region_exit(_lf_nested_region);
-}
+int lf_disable_interrupts_nested() { return sd_nvic_critical_region_exit(_lf_nested_region); }
 
 /**
- * @brief Set global flag to true so that sleep will return when woken 
- * 
- * @return int 
+ * @brief Set global flag to true so that sleep will return when woken
+ *
+ * @return int
  */
 int _lf_single_threaded_notify_of_event() {
-    _lf_async_event = true;
-    return 0;
+  _lf_async_event = true;
+  return 0;
 }
 #endif
diff --git a/core/platform/lf_os_single_threaded_support.c b/low_level_platform/impl/src/lf_os_single_threaded_support.c
similarity index 78%
rename from core/platform/lf_os_single_threaded_support.c
rename to low_level_platform/impl/src/lf_os_single_threaded_support.c
index 1306d4c17..1958a3ef6 100644
--- a/core/platform/lf_os_single_threaded_support.c
+++ b/low_level_platform/impl/src/lf_os_single_threaded_support.c
@@ -12,26 +12,20 @@
  */
 
 #if defined(_THREADS_H) || defined(_PTHREAD_H)
-    #error Usage of threads in the single-threaded runtime is not safe.
+#error Usage of threads in the single-threaded runtime is not safe.
 #endif
 
 /**
  * @brief Single-threaded support under a OS is a special case in which we assume
  * only a single execution context. Other threads scheduling physical actions
  * are not a use-case. ISRs scheduling physical actions are also not a use-case.
- * 
- * @return int 
+ *
+ * @return int
  */
-int lf_disable_interrupts_nested() {
-    return 0;
-}
+int lf_disable_interrupts_nested() { return 0; }
 
-int lf_enable_interrupts_nested() {
-    return 0;
-}
+int lf_enable_interrupts_nested() { return 0; }
 
-int _lf_single_threaded_notify_of_event() {
-    return 0;
-}
+int _lf_single_threaded_notify_of_event() { return 0; }
 
 #endif
diff --git a/core/platform/lf_rp2040_support.c b/low_level_platform/impl/src/lf_rp2040_support.c
similarity index 61%
rename from core/platform/lf_rp2040_support.c
rename to low_level_platform/impl/src/lf_rp2040_support.c
index 6a0194f5e..88940dd94 100644
--- a/core/platform/lf_rp2040_support.c
+++ b/low_level_platform/impl/src/lf_rp2040_support.c
@@ -24,10 +24,10 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 ***************/
 
 /**
- * @brief RP2040 mcu support for the C target of Lingua Franca. 
+ * @brief RP2040 mcu support for the C target of Lingua Franca.
  * This utilizes the pico-sdk which provides C methods for a light runtime
  * and a hardware abstraction layer.
- * 
+ *
  * @author{Abhi Gundrala }
  */
 
@@ -35,8 +35,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #error "Only the single-threaded runtime has support for RP2040"
 #endif
 
-#include "lf_rp2040_support.h"
-#include "platform.h"
+#include "platform/lf_rp2040_support.h"
+#include "low_level_platform.h"
 #include "utils/util.h"
 #include "tag.h"
 
@@ -44,7 +44,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #include 
 #include 
 
-/** 
+/**
  * critical section struct
  * disables external irq and core execution
  * provides mutual exclusion using hardware spin-locks
@@ -52,7 +52,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 static critical_section_t _lf_crit_sec;
 
 /**
- * binary semaphore for lf event notification 
+ * binary semaphore for lf event notification
  * used by external isr or second core thread.
  * used to interact with the lf runtime thread
  */
@@ -62,59 +62,59 @@ static semaphore_t _lf_sem_irq_event;
 static uint32_t _lf_num_nested_crit_sec = 0;
 
 /**
- * Initialize basic runtime infrastructure and 
+ * Initialize basic runtime infrastructure and
  * synchronization structs for an single-threaded runtime.
  */
 void _lf_initialize_clock(void) {
-    // init stdio lib
-    stdio_init_all();
-    // init sync structs
-    critical_section_init(&_lf_crit_sec);
-    sem_init(&_lf_sem_irq_event, 0, 1);
+  // init stdio lib
+  stdio_init_all();
+  // init sync structs
+  critical_section_init(&_lf_crit_sec);
+  sem_init(&_lf_sem_irq_event, 0, 1);
 }
 
 /**
- * Write the time since boot in nanoseconds into 
+ * Write the time since boot in nanoseconds into
  * the time variable pointed to by the argument
  * and return 0.
- * 
+ *
  * @param  t  pointer to the time variable to write to.
- * @return error code or 0 on success. 
+ * @return error code or 0 on success.
  */
 int _lf_clock_gettime(instant_t* t) {
-    if (!t) {
-        return -1;
-    }
-    // time struct
-    absolute_time_t now;
-    uint64_t ns_from_boot;
-
-    now = get_absolute_time();
-    ns_from_boot = to_us_since_boot(now) * 1000;
-    *t = (instant_t) ns_from_boot;
-    return 0; 
+  if (!t) {
+    return -1;
+  }
+  // time struct
+  absolute_time_t now;
+  uint64_t ns_from_boot;
+
+  now = get_absolute_time();
+  ns_from_boot = to_us_since_boot(now) * 1000;
+  *t = (instant_t)ns_from_boot;
+  return 0;
 }
 
 /**
- * Pause execution of the calling core for 
+ * Pause execution of the calling core for
  * a nanosecond duration specified by the argument.
  * Floor the specified duration to the nearest microsecond
  * duration before sleeping and return 0.
  *
  * @param  sleep_duration  time to sleep in nanoseconds
  * @return error code or 0 on success
- */ 
+ */
 int lf_sleep(interval_t sleep_duration) {
-    if (sleep_duration < 0) {
-        return -1;
-    }
-    sleep_us((uint64_t) (sleep_duration / 1000));
-    return 0;
+  if (sleep_duration < 0) {
+    return -1;
+  }
+  sleep_us((uint64_t)(sleep_duration / 1000));
+  return 0;
 }
 
 /**
  * Sleep until the target time since boot in nanoseconds provided
- * by the argument or return early if the binary 
+ * by the argument or return early if the binary
  * _lf_sem_irq_event semaphore is released before the target time.
  *
  * The semaphore is released using the _lf_single_threaded_notify_of_event
@@ -125,28 +125,28 @@ int lf_sleep(interval_t sleep_duration) {
  * @return -1 when interrupted or 0 on successful timeout
  */
 int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup_time) {
-    int ret_code = 0;
-    // return immediately
-    if (wakeup_time < 0) {
-        return ret_code;
-    }
-    // time struct
-    absolute_time_t target;
-    
-    // reset event semaphore 
-    sem_reset(&_lf_sem_irq_event, 0);
-    // create us boot wakeup time
-    target = from_us_since_boot((uint64_t) (wakeup_time / 1000));
-    // allow interrupts
-    LF_CRITICAL_SECTION_EXIT(env);
-    // blocked sleep
-    // return on timeout or on processor event
-    if(sem_acquire_block_until(&_lf_sem_irq_event, target)) {
-        ret_code = -1;
-    }
-    // remove interrupts
-    LF_CRITICAL_SECTION_ENTER(env);
+  int ret_code = 0;
+  // return immediately
+  if (wakeup_time < 0) {
     return ret_code;
+  }
+  // time struct
+  absolute_time_t target;
+
+  // reset event semaphore
+  sem_reset(&_lf_sem_irq_event, 0);
+  // create us boot wakeup time
+  target = from_us_since_boot((uint64_t)(wakeup_time / 1000));
+  // allow interrupts
+  LF_CRITICAL_SECTION_EXIT(env);
+  // blocked sleep
+  // return on timeout or on processor event
+  if (sem_acquire_block_until(&_lf_sem_irq_event, target)) {
+    ret_code = -1;
+  }
+  // remove interrupts
+  LF_CRITICAL_SECTION_ENTER(env);
+  return ret_code;
 }
 
 /**
@@ -157,41 +157,40 @@ int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup_ti
  * @return error code or 0 on success
  */
 int lf_disable_interrupts_nested() {
-    if (!critical_section_is_initialized(&_lf_crit_sec)) {
-        return 1;
-    }
-    // check crit sec count
-    // enter non-rentrant state by disabling interrupts
-    // lock second core execution
-    if (_lf_num_nested_crit_sec == 0) {
-        // block if associated spin lock in use
-        critical_section_enter_blocking(&_lf_crit_sec);
-    }
-    // add crit sec count
-    _lf_num_nested_crit_sec++;
-    return 0;
+  if (!critical_section_is_initialized(&_lf_crit_sec)) {
+    return 1;
+  }
+  // check crit sec count
+  // enter non-rentrant state by disabling interrupts
+  // lock second core execution
+  if (_lf_num_nested_crit_sec == 0) {
+    // block if associated spin lock in use
+    critical_section_enter_blocking(&_lf_crit_sec);
+  }
+  // add crit sec count
+  _lf_num_nested_crit_sec++;
+  return 0;
 }
 
 /**
- * Exit a critical section which will resume second core 
- * execution and enable interrupts. 
+ * Exit a critical section which will resume second core
+ * execution and enable interrupts.
  * Exit only if no other critical sections are left to exit.
  *
  * @return error code or 0 on success
  */
 int lf_enable_interrupts_nested() {
-    if (!critical_section_is_initialized(&_lf_crit_sec) ||
-        _lf_num_nested_crit_sec <= 0) {
-        return 1;
-    }
-    // remove from crit sec count
-    _lf_num_nested_crit_sec--;
-    // check crit sec count
-    // release spin-lock
-    if (_lf_num_nested_crit_sec == 0) {
-        critical_section_exit(&_lf_crit_sec);
-    }
-    return 0;
+  if (!critical_section_is_initialized(&_lf_crit_sec) || _lf_num_nested_crit_sec <= 0) {
+    return 1;
+  }
+  // remove from crit sec count
+  _lf_num_nested_crit_sec--;
+  // check crit sec count
+  // release spin-lock
+  if (_lf_num_nested_crit_sec == 0) {
+    critical_section_exit(&_lf_crit_sec);
+  }
+  return 0;
 }
 
 #if defined(LF_SINGLE_THREADED)
@@ -202,12 +201,10 @@ int lf_enable_interrupts_nested() {
  * @return error code or 0 on success
  */
 int _lf_single_threaded_notify_of_event() {
-    // notify main sleep loop of event
-    sem_release(&_lf_sem_irq_event);
-    return 0;
+  // notify main sleep loop of event
+  sem_release(&_lf_sem_irq_event);
+  return 0;
 }
 #endif // LF_SINGLE_THREADED
 
-
 #endif // PLATFORM_RP2040
-
diff --git a/low_level_platform/impl/src/lf_unix_clock_support.c b/low_level_platform/impl/src/lf_unix_clock_support.c
new file mode 100644
index 000000000..b9c9fae56
--- /dev/null
+++ b/low_level_platform/impl/src/lf_unix_clock_support.c
@@ -0,0 +1,43 @@
+#if defined(PLATFORM_Linux) || defined(PLATFORM_Darwin)
+#include 
+#include 
+
+#include "low_level_platform.h"
+#include "logging.h"
+#include "platform/lf_unix_clock_support.h"
+
+instant_t convert_timespec_to_ns(struct timespec tp) { return ((instant_t)tp.tv_sec) * BILLION + tp.tv_nsec; }
+
+struct timespec convert_ns_to_timespec(instant_t t) {
+  struct timespec tp;
+  tp.tv_sec = t / BILLION;
+  tp.tv_nsec = (t % BILLION);
+  return tp;
+}
+
+void _lf_initialize_clock() {
+  struct timespec res;
+  int return_value = clock_getres(CLOCK_REALTIME, (struct timespec*)&res);
+  if (return_value < 0) {
+    lf_print_error_and_exit("Could not obtain resolution for CLOCK_REALTIME");
+  }
+
+  lf_print("---- System clock resolution: %ld nsec", res.tv_nsec);
+}
+
+/**
+ * Fetch the value of CLOCK_REALTIME and store it in t.
+ * @return 0 for success, or -1 for failure.
+ */
+int _lf_clock_gettime(instant_t* t) {
+  if (t == NULL)
+    return -1;
+  struct timespec tp;
+  if (clock_gettime(CLOCK_REALTIME, (struct timespec*)&tp) != 0) {
+    return -1;
+  }
+  *t = convert_timespec_to_ns(tp);
+  return 0;
+}
+
+#endif
diff --git a/core/platform/lf_unix_syscall_support.c b/low_level_platform/impl/src/lf_unix_syscall_support.c
similarity index 80%
rename from core/platform/lf_unix_syscall_support.c
rename to low_level_platform/impl/src/lf_unix_syscall_support.c
index 331975846..992824c33 100644
--- a/core/platform/lf_unix_syscall_support.c
+++ b/low_level_platform/impl/src/lf_unix_syscall_support.c
@@ -5,14 +5,12 @@
  * @brief Platform support for syscalls in Unix-like systems.
  * @version 0.1
  * @date 2022-03-09
- * 
+ *
  * @copyright Copyright (c) 2022 The University of Texas at Dallas
- * 
+ *
  */
 
 #include 
 
-int lf_available_cores() {
-    return (int)sysconf(_SC_NPROCESSORS_ONLN);
-}
+int lf_available_cores() { return (int)sysconf(_SC_NPROCESSORS_ONLN); }
 #endif
diff --git a/low_level_platform/impl/src/lf_windows_support.c b/low_level_platform/impl/src/lf_windows_support.c
new file mode 100644
index 000000000..1cdadc43c
--- /dev/null
+++ b/low_level_platform/impl/src/lf_windows_support.c
@@ -0,0 +1,287 @@
+#ifdef PLATFORM_Windows
+/* Windows API support for the C target of Lingua Franca. */
+
+/*************
+Copyright (c) 2021, The University of California at Berkeley.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+   this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+***************/
+
+/** Windows API support for the C target of Lingua Franca.
+ *
+ *  @author{Soroush Bateni }
+ *
+ * All functions return 0 on success.
+ *
+ * @see https://gist.github.com/Soroosh129/127d1893fa4c1da6d3e1db33381bb273
+ */
+
+#include  // Order in which windows.h is included does matter!
+#include 
+#include 
+#include 
+#include 
+
+#include "platform/lf_windows_support.h"
+#include "low_level_platform.h"
+#include "tag.h"
+
+/**
+ * Indicate whether or not the underlying hardware
+ * supports Windows' high-resolution counter. It should
+ * always be supported for Windows Xp and later.
+ */
+int _lf_use_performance_counter = 0;
+
+/**
+ * The denominator to convert the performance counter
+ * to nanoseconds.
+ */
+double _lf_frequency_to_ns = 1.0;
+
+void _lf_initialize_clock() {
+  // Check if the performance counter is available
+  LARGE_INTEGER performance_frequency;
+  _lf_use_performance_counter = QueryPerformanceFrequency(&performance_frequency);
+  if (_lf_use_performance_counter) {
+    _lf_frequency_to_ns = (double)performance_frequency.QuadPart / BILLION;
+  } else {
+    lf_print_error("High resolution performance counter is not supported on this machine.");
+    _lf_frequency_to_ns = 0.01;
+  }
+}
+
+/**
+ * Fetch the value of the physical clock (see lf_windows_support.h) and store it in t.
+ * The timestamp value in 't' will be based on QueryPerformanceCounter, adjusted to
+ * reflect time passed in nanoseconds, on most modern Windows systems.
+ *
+ * @return 0 for success, or -1 for failure. In case of failure, errno will be
+ *  set to EINVAL or EFAULT.
+ */
+int _lf_clock_gettime(instant_t* t) {
+  // Adapted from gclib/GResUsage.cpp
+  // (https://github.com/gpertea/gclib/blob/8aee376774ccb2f3bd3f8e3bf1c9df1528ac7c5b/GResUsage.cpp)
+  // License: https://github.com/gpertea/gclib/blob/master/LICENSE.txt
+  int result = -1;
+  if (t == NULL) {
+    // The t argument address references invalid memory
+    errno = EFAULT;
+    return result;
+  }
+  LARGE_INTEGER windows_time;
+  if (_lf_use_performance_counter) {
+    int result = QueryPerformanceCounter(&windows_time);
+    if (result == 0) {
+      lf_print_error("_lf_clock_gettime(): Failed to read the value of the physical clock.");
+      return result;
+    }
+  } else {
+    FILETIME f;
+    GetSystemTimeAsFileTime(&f);
+    windows_time.QuadPart = f.dwHighDateTime;
+    windows_time.QuadPart <<= 32;
+    windows_time.QuadPart |= f.dwLowDateTime;
+  }
+  *t = (instant_t)((double)windows_time.QuadPart / _lf_frequency_to_ns);
+  return (0);
+}
+
+/**
+ * Pause execution for a number of nanoseconds.
+ *
+ * @return 0 for success, or -1 for failure. In case of failure, errno will be
+ *  set to
+ *   - EINTR: The sleep was interrupted by a signal handler
+ *   - EINVAL: All other errors
+ */
+int lf_sleep(interval_t sleep_duration) {
+  /* Declarations */
+  HANDLE timer;     /* Timer handle */
+  LARGE_INTEGER li; /* Time defintion */
+  /* Create timer */
+  if (!(timer = CreateWaitableTimer(NULL, TRUE, NULL))) {
+    return FALSE;
+  }
+  /**
+   * Set timer properties.
+   * A negative number indicates relative time to wait.
+   * The requested sleep duration must be in number of 100 nanoseconds.
+   */
+  li.QuadPart = -1 * (sleep_duration / 100);
+  if (!SetWaitableTimer(timer, &li, 0, NULL, NULL, FALSE)) {
+    CloseHandle(timer);
+    return FALSE;
+  }
+  /* Start & wait for timer */
+  WaitForSingleObject(timer, INFINITE);
+  /* Clean resources */
+  CloseHandle(timer);
+  /* Slept without problems */
+  return TRUE;
+}
+
+int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup_time) {
+  interval_t sleep_duration = wakeup_time - lf_time_physical();
+
+  if (sleep_duration <= 0) {
+    return 0;
+  } else {
+    return lf_sleep(sleep_duration);
+  }
+}
+
+int lf_nanosleep(interval_t sleep_duration) { return lf_sleep(sleep_duration); }
+
+#if defined(LF_SINGLE_THREADED)
+#include "lf_os_single_threaded_support.c"
+#endif
+
+#if !defined(LF_SINGLE_THREADED)
+int lf_available_cores() {
+  SYSTEM_INFO sysinfo;
+  GetSystemInfo(&sysinfo);
+  return sysinfo.dwNumberOfProcessors;
+}
+
+int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments) {
+  uintptr_t handle = _beginthreadex(NULL, 0, lf_thread, arguments, 0, NULL);
+  *thread = (HANDLE)handle;
+  if (handle == 0) {
+    return errno;
+  } else {
+    return 0;
+  }
+}
+
+/**
+ * Make calling thread wait for termination of the thread.  The
+ * exit status of the thread is stored in thread_return, if thread_return
+ * is not NULL.
+ *
+ * @return 0 on success, EINVAL otherwise.
+ */
+int lf_thread_join(lf_thread_t thread, void** thread_return) {
+  DWORD retvalue = WaitForSingleObject(thread, INFINITE);
+  if (retvalue == WAIT_FAILED) {
+    return EINVAL;
+  }
+  return 0;
+}
+
+int lf_mutex_init(_lf_critical_section_t* critical_section) {
+  // Set up a recursive mutex
+  InitializeCriticalSection((PCRITICAL_SECTION)critical_section);
+  if (critical_section != NULL) {
+    return 0;
+  } else {
+    return 1;
+  }
+}
+
+/**
+ * Lock a critical section.
+ *
+ * From https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-entercriticalsection:
+ *    "This function can raise EXCEPTION_POSSIBLE_DEADLOCK if a wait operation on the critical section times out.
+ *     The timeout interval is specified by the following registry value:
+ *     HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\CriticalSectionTimeout.
+ *     Do not handle a possible deadlock exception; instead, debug the application."
+ *
+ * @return 0
+ */
+int lf_mutex_lock(_lf_critical_section_t* critical_section) {
+  // The following Windows API does not return a value. It can
+  // raise a EXCEPTION_POSSIBLE_DEADLOCK. See synchapi.h.
+  EnterCriticalSection((PCRITICAL_SECTION)critical_section);
+  return 0;
+}
+
+int lf_mutex_unlock(_lf_critical_section_t* critical_section) {
+  // The following Windows API does not return a value.
+  LeaveCriticalSection((PCRITICAL_SECTION)critical_section);
+  return 0;
+}
+
+int lf_cond_init(lf_cond_t* cond, _lf_critical_section_t* critical_section) {
+  // The following Windows API does not return a value.
+  cond->critical_section = critical_section;
+  InitializeConditionVariable((PCONDITION_VARIABLE)&cond->condition);
+  return 0;
+}
+
+int lf_cond_broadcast(lf_cond_t* cond) {
+  // The following Windows API does not return a value.
+  WakeAllConditionVariable((PCONDITION_VARIABLE)&cond->condition);
+  return 0;
+}
+
+int lf_cond_signal(lf_cond_t* cond) {
+  // The following Windows API does not return a value.
+  WakeConditionVariable((PCONDITION_VARIABLE)&cond->condition);
+  return 0;
+}
+
+int lf_cond_wait(lf_cond_t* cond) {
+  // According to synchapi.h, the following Windows API returns 0 on failure,
+  // and non-zero on success.
+  int return_value = (int)SleepConditionVariableCS((PCONDITION_VARIABLE)&cond->condition,
+                                                   (PCRITICAL_SECTION)cond->critical_section, INFINITE);
+  switch (return_value) {
+  case 0:
+    // Error
+    return 1;
+    break;
+
+  default:
+    // Success
+    return 0;
+    break;
+  }
+}
+
+int _lf_cond_timedwait(lf_cond_t* cond, instant_t wakeup_time) {
+  // Convert the absolute time to a relative time.
+  interval_t wait_duration = wakeup_time - lf_time_physical();
+  if (wait_duration <= 0) {
+    // physical time has already caught up sufficiently and we do not need to wait anymore
+    return 0;
+  }
+
+  // convert ns to ms and round up to closest full integer
+  DWORD wait_duration_ms = (wait_duration + 999999LL) / 1000000LL;
+
+  int return_value = (int)SleepConditionVariableCS((PCONDITION_VARIABLE)&cond->condition,
+                                                   (PCRITICAL_SECTION)cond->critical_section, wait_duration_ms);
+  if (return_value == 0) {
+    // Error
+    if (GetLastError() == ERROR_TIMEOUT) {
+      return LF_TIMEOUT;
+    }
+    return -1;
+  }
+
+  // Success
+  return 0;
+}
+#endif
+
+#endif
diff --git a/low_level_platform/impl/src/lf_zephyr_clock_counter.c b/low_level_platform/impl/src/lf_zephyr_clock_counter.c
new file mode 100644
index 000000000..fcb285d44
--- /dev/null
+++ b/low_level_platform/impl/src/lf_zephyr_clock_counter.c
@@ -0,0 +1,217 @@
+#if defined(PLATFORM_ZEPHYR)
+#include "platform/lf_zephyr_board_support.h"
+#if defined(LF_ZEPHYR_CLOCK_COUNTER)
+/*************
+Copyright (c) 2023, Norwegian University of Science and Technology.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+   this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+***************/
+
+/**
+ * @brief This implements the timing-related platform API ontop of the Zephyr
+ * Counter API. The Counter API is a generic interface to a timer peripheral. It
+ * gives the best timing performance and allows actual sleeping rather than
+ * busy-waiting which is performed with the Kernel API.
+ *
+ * @author{Erling Jellum }
+ * @author{Marten Lohstroh }
+ */
+#include 
+#include 
+
+#include "platform/lf_zephyr_support.h"
+#include "low_level_platform.h"
+#include "logging_macros.h"
+
+static int64_t epoch_duration_nsec;
+static int64_t epoch_duration_usec;
+static uint32_t counter_max_ticks;
+static volatile int64_t last_epoch_nsec = 0;
+static uint32_t counter_freq;
+static volatile bool async_event = false;
+
+K_SEM_DEFINE(semaphore, 0, 1)
+
+static struct counter_alarm_cfg alarm_cfg;
+const struct device* const counter_dev = DEVICE_DT_GET(LF_TIMER);
+static volatile bool alarm_fired;
+
+/**
+ * This callback is invoked when the underlying Timer peripheral overflows.
+ * Handled by incrementing the epoch variable.
+ */
+static void overflow_callback(const struct device* dev, void* user_data) { last_epoch_nsec += epoch_duration_nsec; }
+
+/**
+ * This callback is invoked when the alarm configured for sleeping expires.
+ * The sleeping thread is released by giving it the semaphore.
+ */
+static void alarm_callback(const struct device* counter_dev, uint8_t chan_id, uint32_t ticks, void* user_data) {
+  alarm_fired = true;
+  k_sem_give(&semaphore);
+}
+
+/**
+ * Initialize the Counter device. Check its frequency and compute epoch
+ * durations.
+ */
+void _lf_initialize_clock() {
+  struct counter_top_cfg counter_top_cfg;
+  uint32_t counter_max_ticks = 0;
+  int res;
+
+  // Verify that we have the device
+  if (!device_is_ready(counter_dev)) {
+    lf_print_error_and_exit("ERROR: counter device not ready.\n");
+  }
+
+  // Verify that it is working as we think
+  if (!counter_is_counting_up(counter_dev)) {
+    lf_print_error_and_exit("ERROR: Counter is counting down \n");
+  }
+
+  // Get the frequency of the timer
+  counter_freq = counter_get_frequency(counter_dev);
+
+  // Calculate the duration of an epoch. Compute both
+  //  nsec and usec now at boot to avoid these computations later
+  counter_max_ticks = counter_get_max_top_value(counter_dev);
+  epoch_duration_usec = counter_ticks_to_us(counter_dev, counter_max_ticks);
+  epoch_duration_nsec = epoch_duration_usec * 1000LL;
+
+  // Set the max_top value to be the maximum
+  counter_top_cfg.ticks = counter_max_ticks;
+  counter_top_cfg.callback = overflow_callback;
+  res = counter_set_top_value(counter_dev, &counter_top_cfg);
+  if (res != 0) {
+    lf_print_error_and_exit("ERROR: Timer couldnt set top value\n");
+  }
+
+  LF_PRINT_LOG("--- Using LF Zephyr Counter Clock with a frequency of %u Hz and wraps every %u sec\n", counter_freq,
+               counter_max_ticks / counter_freq);
+
+  // Prepare the alarm config
+  alarm_cfg.flags = 0;
+  alarm_cfg.ticks = 0;
+  alarm_cfg.callback = alarm_callback;
+  alarm_cfg.user_data = &alarm_cfg;
+
+  // Start counter
+  counter_start(counter_dev);
+}
+
+/**
+ * The Counter device tracks current physical time. Overflows are handled in an
+ * ISR.
+ */
+int _lf_clock_gettime(instant_t* t) {
+  static uint64_t last_nsec = 0;
+  uint32_t now_cycles;
+  int res;
+  uint64_t now_nsec;
+
+  res = counter_get_value(counter_dev, &now_cycles);
+  now_nsec = counter_ticks_to_us(counter_dev, now_cycles) * 1000ULL + last_epoch_nsec;
+
+  // Make sure that the clock is monotonic. We might have had a wrap but the
+  // epoch has not been updated because interrupts are disabled.
+  if (now_nsec < last_nsec) {
+    now_nsec = last_nsec + 1;
+  }
+
+  *t = now_nsec;
+  last_nsec = now_nsec;
+  return 0;
+}
+
+/**
+ * Handle interruptable sleep by configuring a future alarm callback and waiting
+ * on a semaphore. Make sure we can handle sleeps that exceed an entire epoch
+ * of the Counter.
+ */
+int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup) {
+  // Reset flags
+  alarm_fired = false;
+  async_event = false;
+  k_sem_reset(&semaphore);
+
+  // Calculate the sleep duration
+  uint32_t now_cycles, sleep_duration_ticks;
+  counter_get_value(counter_dev, &now_cycles);
+  instant_t now;
+  _lf_clock_gettime(&now);
+  interval_t sleep_for_us = (wakeup - now) / 1000;
+
+  while (!async_event && sleep_for_us > (LF_WAKEUP_OVERHEAD_US + LF_MIN_SLEEP_US)) {
+    if (sleep_for_us < epoch_duration_usec) {
+      sleep_duration_ticks = counter_us_to_ticks(counter_dev, ((uint64_t)sleep_for_us) - LF_WAKEUP_OVERHEAD_US);
+    } else {
+      sleep_duration_ticks = UINT32_MAX;
+    }
+
+    alarm_cfg.ticks = sleep_duration_ticks;
+    int err = counter_set_channel_alarm(counter_dev, LF_TIMER_ALARM_CHANNEL, &alarm_cfg);
+
+    if (err != 0) {
+      lf_print_error_and_exit("Could not setup alarm for sleeping. Errno %i", err);
+    }
+
+    if (lf_critical_section_exit(env)) {
+      lf_print_error_and_exit("Failed to exit critical section.");
+    }
+    k_sem_take(&semaphore, K_FOREVER);
+    if (lf_critical_section_enter(env)) {
+      lf_print_error_and_exit("Failed to enter critical section.");
+    }
+
+    // Then calculating remaining sleep, unless we got woken up by an event
+    if (!async_event) {
+      _lf_clock_gettime(&now);
+      sleep_for_us = (wakeup - now) / 1000;
+    }
+  }
+
+  // Do remaining sleep in busy_wait
+  if (!async_event && sleep_for_us > LF_RUNTIME_OVERHEAD_US) {
+    k_busy_wait((uint32_t)(sleep_for_us - LF_RUNTIME_OVERHEAD_US));
+  }
+
+  if (async_event) {
+    // Cancel the outstanding alarm
+    counter_cancel_channel_alarm(counter_dev, LF_TIMER_ALARM_CHANNEL);
+    async_event = false;
+    return -1;
+  } else {
+    return 0;
+  }
+}
+
+/**
+ * We notify of async events by setting the flag and giving the semaphore.
+ */
+int _lf_single_threaded_notify_of_event() {
+  async_event = true;
+  k_sem_give(&semaphore);
+  return 0;
+}
+
+#endif
+#endif
diff --git a/core/platform/lf_zephyr_clock_kernel.c b/low_level_platform/impl/src/lf_zephyr_clock_kernel.c
similarity index 69%
rename from core/platform/lf_zephyr_clock_kernel.c
rename to low_level_platform/impl/src/lf_zephyr_clock_kernel.c
index ab24de586..183eebbbe 100644
--- a/core/platform/lf_zephyr_clock_kernel.c
+++ b/low_level_platform/impl/src/lf_zephyr_clock_kernel.c
@@ -1,5 +1,5 @@
 #if defined(PLATFORM_ZEPHYR)
-#include "lf_zephyr_board_support.h"
+#include "platform/lf_zephyr_board_support.h"
 #if defined(LF_ZEPHYR_CLOCK_KERNEL)
 
 /*************
@@ -37,9 +37,9 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include 
 
-#include "lf_zephyr_support.h"
-#include "platform.h"
-#include "util.h"
+#include "platform/lf_zephyr_support.h"
+#include "low_level_platform.h"
+#include "logging_macros.h"
 
 static int64_t epoch_duration_nsec;
 static volatile int64_t last_epoch_nsec = 0;
@@ -47,47 +47,51 @@ static uint32_t timer_freq;
 static volatile bool async_event = false;
 
 void _lf_initialize_clock() {
-    timer_freq = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
-    LF_PRINT_LOG("--- Using LF Zephyr Kernel Clock with a frequency of %u Hz\n", timer_freq);
-    last_epoch_nsec = 0;
-    epoch_duration_nsec = ((1LL << 32) * SECONDS(1))/CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
+  timer_freq = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
+  LF_PRINT_LOG("--- Using LF Zephyr Kernel Clock with a frequency of %u Hz\n", timer_freq);
+  last_epoch_nsec = 0;
+  epoch_duration_nsec = ((1LL << 32) * SECONDS(1)) / CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
 }
 
 /**
  * Detect wraps by storing the previous clock readout. When a clock readout is
  * less than the previous we have had a wrap. This only works of `_lf_clock_gettime`
- * is invoked at least once per epoch. 
+ * is invoked at least once per epoch.
  */
 int _lf_clock_gettime(instant_t* t) {
-    static uint32_t last_read_cycles=0;
-    uint32_t now_cycles = k_cycle_get_32();
-    if (now_cycles < last_read_cycles) {
-        last_epoch_nsec += epoch_duration_nsec;
-    }
-    *t = (SECOND(1)/CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC)*now_cycles + last_epoch_nsec;
-    last_read_cycles = now_cycles;
-    return 0;
+  static uint32_t last_read_cycles = 0;
+  uint32_t now_cycles = k_cycle_get_32();
+  if (now_cycles < last_read_cycles) {
+    last_epoch_nsec += epoch_duration_nsec;
+  }
+  *t = (SECOND(1) / CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC) * now_cycles + last_epoch_nsec;
+  last_read_cycles = now_cycles;
+  return 0;
 }
 
 /**
  * Interruptable sleep is implemented using busy-waiting.
  */
 int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup) {
-    async_event=false;    
+  async_event = false;
 
-    LF_CRITICAL_SECTION_EXIT(env);
-    instant_t now;
-    do {
+  if (lf_critical_section_exit(env)) {
+    lf_print_error_and_exit("Failed to exit critical section.");
+  }
+  instant_t now;
+  do {
     _lf_clock_gettime(&now);
-    } while ( (now
 #include 
 
-#include "lf_zephyr_support.h"
-#include "lf_zephyr_board_support.h"
-#include "platform.h"
-#include "reactor.h"
-#include "utils/util.h"
+#include "platform/lf_zephyr_support.h"
+#include "platform/lf_zephyr_board_support.h"
+#include "low_level_platform.h"
 #include "tag.h"
 
 #include 
 
 // Keep track of nested critical sections
-static uint32_t num_nested_critical_sections=0;
+static uint32_t num_nested_critical_sections = 0;
 // Keep track of IRQ mask when entering critical section so we can enable again after
 static volatile unsigned irq_mask = 0;
 
 int lf_sleep(interval_t sleep_duration) {
-    k_sleep(K_NSEC(sleep_duration));
-    return 0;
+  k_sleep(K_NSEC(sleep_duration));
+  return 0;
 }
 
-int lf_nanosleep(interval_t sleep_duration) {
-    return lf_sleep(sleep_duration);
-}
+int lf_nanosleep(interval_t sleep_duration) { return lf_sleep(sleep_duration); }
 
 int lf_disable_interrupts_nested() {
-    if (num_nested_critical_sections++ == 0) {
-        irq_mask = irq_lock();
-    }
-    return 0;
+  if (num_nested_critical_sections++ == 0) {
+    irq_mask = irq_lock();
+  }
+  return 0;
 }
 
 int lf_enable_interrupts_nested() {
-    if (num_nested_critical_sections <= 0) {
-        return 1;
-    }
-    
-    if (--num_nested_critical_sections == 0) {
-        irq_unlock(irq_mask);
-    }
-    return 0;
+  if (num_nested_critical_sections <= 0) {
+    return 1;
+  }
+
+  if (--num_nested_critical_sections == 0) {
+    irq_unlock(irq_mask);
+  }
+  return 0;
 }
 
 #if !defined(LF_SINGLE_THREADED)
 #if !defined(LF_ZEPHYR_STACK_SIZE)
-    #define LF_ZEPHYR_STACK_SIZE LF_ZEPHYR_STACK_SIZE_DEFAULT
+#define LF_ZEPHYR_STACK_SIZE LF_ZEPHYR_STACK_SIZE_DEFAULT
 #endif
 
 #if !defined(LF_ZEPHYR_THREAD_PRIORITY)
-    #define LF_ZEPHYR_THREAD_PRIORITY LF_ZEPHYR_THREAD_PRIORITY_DEFAULT
+#define LF_ZEPHYR_THREAD_PRIORITY LF_ZEPHYR_THREAD_PRIORITY_DEFAULT
 #endif
 
 // If NUMBER_OF_WORKERS is not specified, or set to 0, then we default to 1.
-#if !defined(NUMBER_OF_WORKERS) || NUMBER_OF_WORKERS==0
+#if !defined(NUMBER_OF_WORKERS) || NUMBER_OF_WORKERS == 0
 #undef NUMBER_OF_WORKERS
 #define NUMBER_OF_WORKERS 1
 #endif
@@ -98,7 +93,6 @@ int lf_enable_interrupts_nested() {
 #define NUMBER_OF_WATCHDOGS 0
 #endif
 
-
 #define NUMBER_OF_THREADS (NUMBER_OF_WORKERS + USER_THREADS + NUMBER_OF_WATCHDOGS)
 
 K_MUTEX_DEFINE(thread_mutex);
@@ -107,101 +101,97 @@ static K_THREAD_STACK_ARRAY_DEFINE(stacks, NUMBER_OF_THREADS, LF_ZEPHYR_STACK_SI
 static struct k_thread threads[NUMBER_OF_THREADS];
 
 // Typedef that represents the function pointers passed by LF runtime into lf_thread_create
-typedef void *(*lf_function_t) (void *);
+typedef void* (*lf_function_t)(void*);
 
 // Entry point for all worker threads. an intermediate step to connect Zephyr threads with LF runtimes idea of a thread
-static void zephyr_worker_entry(void * func, void * args, void * unused2) {
-    lf_function_t _func = (lf_function_t) func;
-    _func(args);
+static void zephyr_worker_entry(void* func, void* args, void* unused2) {
+  lf_function_t _func = (lf_function_t)func;
+  _func(args);
 }
 
 int lf_available_cores() {
-    #if defined(CONFIG_MP_NUM_CPUS)
-        return CONFIG_MP_NUM_CPUS;
-    #else
-        return 1;
-    #endif
+#if defined(CONFIG_MP_NUM_CPUS)
+  return CONFIG_MP_NUM_CPUS;
+#else
+  return 1;
+#endif
 }
 
-int lf_thread_create(lf_thread_t* thread, void *(*lf_thread) (void *), void* arguments) {
-    k_mutex_lock(&thread_mutex, K_FOREVER);
+int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments) {
+  k_mutex_lock(&thread_mutex, K_FOREVER);
 
-    // Use static id to map each created thread to a 
-    static int tid = 0;
+  // Use static id to map each created thread to a
+  static int tid = 0;
 
-    // Make sure we dont try to create too many threads
-    if (tid > (NUMBER_OF_THREADS-1)) {
-        return -1;
-    }
+  // Make sure we dont try to create too many threads
+  if (tid > (NUMBER_OF_THREADS - 1)) {
+    return -1;
+  }
 
-    k_tid_t my_tid = k_thread_create(&threads[tid], &stacks[tid][0],
-                                    LF_ZEPHYR_STACK_SIZE, zephyr_worker_entry,
-                                 (void *) lf_thread, arguments, NULL,
-                                 LF_ZEPHYR_THREAD_PRIORITY, 0, K_NO_WAIT);
+  k_thread_create(&threads[tid], &stacks[tid][0], LF_ZEPHYR_STACK_SIZE, zephyr_worker_entry, (void*)lf_thread,
+                  arguments, NULL, LF_ZEPHYR_THREAD_PRIORITY, 0, K_NO_WAIT);
 
+  // Pass the pointer to the k_thread struct out. This is needed
+  // to join on the thread later.
+  *thread = &threads[tid];
 
-    // Pass the pointer to the k_thread struct out. This is needed
-    // to join on the thread later.
-    *thread = &threads[tid];   
+  // Increment the tid counter so that next call to `lf_thread_create`
+  // uses the next available k_thread struct and stack.
+  tid++;
+  k_mutex_unlock(&thread_mutex);
 
-    // Increment the tid counter so that next call to `lf_thread_create`
-    // uses the next available k_thread struct and stack.
-    tid++; 
-    k_mutex_unlock(&thread_mutex);
-
-    return 0;
+  return 0;
 }
 
-int lf_thread_join(lf_thread_t thread, void** thread_return) {
-    return k_thread_join(thread, K_FOREVER);
-}
+int lf_thread_join(lf_thread_t thread, void** thread_return) { return k_thread_join(thread, K_FOREVER); }
 
-int lf_mutex_init(lf_mutex_t* mutex) {
-    return k_mutex_init(mutex);    
+void initialize_lf_thread_id() {
+  static int _lf_worker_thread_count = 0;
+  int* thread_id = (int*)malloc(sizeof(int));
+  *thread_id = lf_atomic_fetch_add32(&_lf_worker_thread_count, 1);
+  k_thread_custom_data_set(thread_id);
 }
 
+int lf_thread_id() { return *((int*)k_thread_custom_data_get()); }
+
+int lf_mutex_init(lf_mutex_t* mutex) { return k_mutex_init(mutex); }
+
 int lf_mutex_lock(lf_mutex_t* mutex) {
-    int res = k_mutex_lock(mutex, K_FOREVER);
-    return res;
+  int res = k_mutex_lock(mutex, K_FOREVER);
+  return res;
 }
 
 int lf_mutex_unlock(lf_mutex_t* mutex) {
-    int res = k_mutex_unlock(mutex);
-    return res;
+  int res = k_mutex_unlock(mutex);
+  return res;
 }
 
 int lf_cond_init(lf_cond_t* cond, lf_mutex_t* mutex) {
-    cond->mutex = mutex;
-    return k_condvar_init(&cond->condition);
+  cond->mutex = mutex;
+  return k_condvar_init(&cond->condition);
 }
 
 int lf_cond_broadcast(lf_cond_t* cond) {
-    k_condvar_broadcast(&cond->condition);
-    return 0;
+  k_condvar_broadcast(&cond->condition);
+  return 0;
 }
 
-int lf_cond_signal(lf_cond_t* cond) {
-    return k_condvar_signal(&cond->condition);
-}
+int lf_cond_signal(lf_cond_t* cond) { return k_condvar_signal(&cond->condition); }
 
-int lf_cond_wait(lf_cond_t* cond) {
-    return k_condvar_wait(&cond->condition, cond->mutex, K_FOREVER);
-}
+int lf_cond_wait(lf_cond_t* cond) { return k_condvar_wait(&cond->condition, cond->mutex, K_FOREVER); }
 
 int _lf_cond_timedwait(lf_cond_t* cond, instant_t wakeup_time) {
-    instant_t now;
-    _lf_clock_gettime(&now);
-    interval_t sleep_duration_ns = wakeup_time - now;
-    k_timeout_t timeout = K_NSEC(sleep_duration_ns);
-    int res = k_condvar_wait(&cond->condition, cond->mutex, timeout);
-    if (res == 0) {
-        return 0;
-    } else {
-        return LF_TIMEOUT;
-    }
+  instant_t now;
+  _lf_clock_gettime(&now);
+  interval_t sleep_duration_ns = wakeup_time - now;
+  k_timeout_t timeout = K_NSEC(sleep_duration_ns);
+  int res = k_condvar_wait(&cond->condition, cond->mutex, timeout);
+  if (res == 0) {
+    return 0;
+  } else {
+    return LF_TIMEOUT;
+  }
 }
 
-
-
-#endif // NUMBER_OF_WORKERS
+#endif // !LF_SINGLE_THREADED
 #endif
diff --git a/low_level_platform/impl/src/platform_internal.c b/low_level_platform/impl/src/platform_internal.c
new file mode 100644
index 000000000..fc14c9f22
--- /dev/null
+++ b/low_level_platform/impl/src/platform_internal.c
@@ -0,0 +1,13 @@
+#include "low_level_platform.h"
+
+#ifndef PLATFORM_ZEPHYR // on Zephyr, this is handled separately
+#ifndef LF_SINGLE_THREADED
+static int _lf_worker_thread_count = 0;
+
+static thread_local int lf_thread_id_var = -1;
+
+int lf_thread_id() { return lf_thread_id_var; }
+
+void initialize_lf_thread_id() { lf_thread_id_var = lf_atomic_fetch_add32(&_lf_worker_thread_count, 1); }
+#endif
+#endif
diff --git a/platform/api/CMakeLists.txt b/platform/api/CMakeLists.txt
new file mode 100644
index 000000000..88b8de512
--- /dev/null
+++ b/platform/api/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_library(lf-platform-api INTERFACE)
+add_library(lf::platform-api ALIAS lf-platform-api)
+target_include_directories(lf-platform-api INTERFACE ${CMAKE_CURRENT_LIST_DIR})
diff --git a/platform/api/platform.h b/platform/api/platform.h
new file mode 100644
index 000000000..1b1950e8e
--- /dev/null
+++ b/platform/api/platform.h
@@ -0,0 +1,41 @@
+/**
+ * @file platform.h
+ * @author Peter Donovan (peter@xronos.com)
+ * @brief Platform API for runtime plugins to use while sharing implementation
+ * source code and binaries with the core and with each other.
+ * @version 0.1
+ * @date 2024-01-29
+ *
+ * @copyright Copyright (c) 2024
+ */
+
+/**
+ * @brief Pointer to the platform-specific implementation of a mutex.
+ */
+typedef void* lf_platform_mutex_ptr_t;
+/**
+ * @brief Create a new mutex and return (a pointer to) it.
+ */
+lf_platform_mutex_ptr_t lf_platform_mutex_new();
+/**
+ * @brief Free all resources associated with the provided mutex.
+ */
+void lf_platform_mutex_free(lf_platform_mutex_ptr_t mutex);
+/**
+ * @brief Acquire the given mutex.
+ *
+ * @return 0 on success, platform-specific error number otherwise.
+ */
+int lf_platform_mutex_lock(lf_platform_mutex_ptr_t mutex);
+/**
+ * @brief Release the given mutex.
+ *
+ * @return 0 on success, platform-specific error number otherwise.
+ */
+int lf_platform_mutex_unlock(lf_platform_mutex_ptr_t mutex);
+
+/**
+ * @brief The ID of the current thread. The only guarantee is that these IDs will be a contiguous range of numbers
+ * starting at 0.
+ */
+int lf_thread_id();
diff --git a/platform/impl/CMakeLists.txt b/platform/impl/CMakeLists.txt
new file mode 100644
index 000000000..df24aac27
--- /dev/null
+++ b/platform/impl/CMakeLists.txt
@@ -0,0 +1,17 @@
+set(LF_PLATFORM_FILES ${CMAKE_CURRENT_LIST_DIR}/platform.c)
+
+
+if(PLATFORM_ZEPHYR)
+    message("--- Building Zephyr library")
+    zephyr_library_named(lf-platform-impl)
+    zephyr_library_sources(${LF_PLATFORM_FILES})
+    zephyr_library_link_libraries(kernel)
+else()
+message("--- Building non-Zephyr library")
+    add_library(lf-platform-impl STATIC)
+    target_sources(lf-platform-impl PUBLIC ${LF_PLATFORM_FILES})
+endif()
+
+add_library(lf::platform-impl ALIAS lf-platform-impl)
+target_link_libraries(lf-platform-impl PRIVATE lf::low-level-platform-api)
+target_link_libraries(lf-platform-impl PRIVATE lf::platform-api)
diff --git a/platform/impl/platform.c b/platform/impl/platform.c
new file mode 100644
index 000000000..ddd182404
--- /dev/null
+++ b/platform/impl/platform.c
@@ -0,0 +1,26 @@
+/**
+ * @file platform.c
+ * @author Peter Donovan (peter@xronos.com)
+ * @brief A variant of the platform abstraction whose ABI is
+ * platform-independent.
+ * @version 0.1
+ * @date 2024-01-29
+ *
+ * @copyright Copyright (c) 2024
+ */
+#include 
+
+#include "low_level_platform.h"
+#include "platform.h"
+
+// MUTEXES *********************************************************************
+
+lf_platform_mutex_ptr_t lf_platform_mutex_new() {
+  lf_platform_mutex_ptr_t mutex = (lf_platform_mutex_ptr_t)malloc(sizeof(lf_mutex_t));
+  if (mutex)
+    lf_mutex_init(mutex);
+  return mutex;
+};
+void lf_platform_mutex_free(lf_platform_mutex_ptr_t mutex) { free((void*)mutex); }
+int lf_platform_mutex_lock(lf_platform_mutex_ptr_t mutex) { return lf_mutex_lock((lf_mutex_t*)mutex); }
+int lf_platform_mutex_unlock(lf_platform_mutex_ptr_t mutex) { return lf_mutex_unlock((lf_mutex_t*)mutex); }
diff --git a/python/include/modal_models/definitions.h b/python/include/modal_models/definitions.h
index fe6a16b98..5ae13c6a5 100644
--- a/python/include/modal_models/definitions.h
+++ b/python/include/modal_models/definitions.h
@@ -11,11 +11,10 @@
 #ifndef PYTHON_MODAL_MODELS_DEFS_H
 #define PYTHON_MODAL_MODELS_DEFS_H
 
-
 #ifdef MODAL_REACTORS
 #include 
 #include 
-#include "../include/core/tag.h"
+#include "tag.h"
 #include "../include/api/schedule.h"
 
 /**
@@ -26,26 +25,20 @@
  * that mode and the type of transition (reset or history).
  */
 typedef struct {
-	PyObject_HEAD
-	PyObject* mode;
-	PyObject* lf_self;
-	lf_mode_change_type_t change_type;
+  PyObject_HEAD PyObject* mode;
+  PyObject* lf_self;
+  lf_mode_change_type_t change_type;
 } mode_capsule_struct_t;
 
-
 /**
  * Set a new mode for a modal model.
  */
-static PyObject* py_mode_set(PyObject *self, PyObject *args);
+static PyObject* py_mode_set(PyObject* self, PyObject* args);
 
 /**
  * Convert a `reactor_mode_t` to a `mode_capsule_t`.
  */
-PyObject* convert_C_mode_to_py(
-		reactor_mode_t* mode,
-		self_base_t* lf_self,
-		lf_mode_change_type_t change_type
-);
+PyObject* convert_C_mode_to_py(reactor_mode_t* mode, self_base_t* lf_self, lf_mode_change_type_t change_type);
 
 /**
  * @brief Initialize `mode_capsule_t` in the `current_module`.
diff --git a/python/include/python_action.h b/python/include/python_action.h
index 5e608c29f..880bfe149 100644
--- a/python/include/python_action.h
+++ b/python/include/python_action.h
@@ -54,15 +54,15 @@ extern PyTypeObject py_action_capsule_t;
  *             at the current logical time
  **/
 typedef struct {
-    token_type_t type;
-    lf_token_t* token;
-    size_t length;
-    bool is_present;
-    lf_action_internal_t _base;
-    self_base_t* parent;
-    bool has_value;
-    PyObject* value;
-    FEDERATED_GENERIC_EXTENSION
+  token_type_t type;
+  lf_token_t* token;
+  size_t length;
+  bool is_present;
+  lf_action_internal_t _base;
+  self_base_t* parent;
+  bool has_value;
+  PyObject* value;
+  FEDERATED_GENERIC_EXTENSION
 } generic_action_instance_struct;
 
 /**
@@ -82,13 +82,13 @@ typedef struct {
  * to a Python reaction.
  **/
 typedef struct {
-    PyObject_HEAD
-    PyObject* action; // Hold the void* pointer to a C action instance. However, passing void* directly
-                      // to Python is considered unsafe practice. Instead, this void* pointer to the C action
-                      // will be stored in a PyCapsule. @see https://docs.python.org/3/c-api/capsule.html
-    PyObject* value; // This value will be copied from the C action->value
-    bool is_present; // Same as value, is_present will be copied from the C action->is_present
-    FEDERATED_CAPSULE_EXTENSION
+  PyObject_HEAD PyObject*
+      action;      // Hold the void* pointer to a C action instance. However, passing void* directly
+                   // to Python is considered unsafe practice. Instead, this void* pointer to the C action
+                   // will be stored in a PyCapsule. @see https://docs.python.org/3/c-api/capsule.html
+  PyObject* value; // This value will be copied from the C action->value
+  bool is_present; // Same as value, is_present will be copied from the C action->is_present
+  FEDERATED_CAPSULE_EXTENSION
 } generic_action_capsule_struct;
 
 #endif
diff --git a/python/include/python_capsule_extension.h b/python/include/python_capsule_extension.h
index 203719ab4..694ecca31 100644
--- a/python/include/python_capsule_extension.h
+++ b/python/include/python_capsule_extension.h
@@ -36,44 +36,46 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #ifdef FEDERATED
 #ifdef FEDERATED_DECENTRALIZED
-#define FEDERATED_GENERIC_EXTENSION \
-    tag_t intended_tag; \
-    instant_t physical_time_of_arrival;
+#define FEDERATED_GENERIC_EXTENSION                                                                                    \
+  tag_t intended_tag;                                                                                                  \
+  instant_t physical_time_of_arrival;
 
-#define FEDERATED_CAPSULE_EXTENSION \
-    py_tag_t* intended_tag; \
-    instant_t physical_time_of_arrival;
+#define FEDERATED_CAPSULE_EXTENSION                                                                                    \
+  py_tag_t* intended_tag;                                                                                              \
+  instant_t physical_time_of_arrival;
 
-#define FEDERATED_CAPSULE_MEMBER \
-    {"intended_tag", T_OBJECT, offsetof(generic_port_capsule_struct, intended_tag), READONLY, "Original intended tag of the event."}, \
-    {"physical_time_of_arrival", T_LONG, offsetof(generic_port_capsule_struct, physical_time_of_arrival), READONLY, "Physical time of arrival of the original message."},
+#define FEDERATED_CAPSULE_MEMBER                                                                                       \
+  {"intended_tag", T_OBJECT, offsetof(generic_port_capsule_struct, intended_tag), READONLY,                            \
+   "Original intended tag of the event."},                                                                             \
+      {"physical_time_of_arrival", T_LONG, offsetof(generic_port_capsule_struct, physical_time_of_arrival), READONLY,  \
+       "Physical time of arrival of the original message."},
 
-#define FEDERATED_ASSIGN_FIELDS(py_port, c_port) \
-do { \
-    py_port->intended_tag = convert_C_tag_to_py(c_port->intended_tag); \
-    py_port->physical_time_of_arrival = c_port->physical_time_of_arrival; \
-} while(0)
+#define FEDERATED_ASSIGN_FIELDS(py_port, c_port)                                                                       \
+  do {                                                                                                                 \
+    py_port->intended_tag = convert_C_tag_to_py(c_port->intended_tag);                                                 \
+    py_port->physical_time_of_arrival = c_port->physical_time_of_arrival;                                              \
+  } while (0)
 
 #else // FEDERATED_CENTRALIZED
-#define FEDERATED_GENERIC_EXTENSION \
-    instant_t physical_time_of_arrival;
+#define FEDERATED_GENERIC_EXTENSION instant_t physical_time_of_arrival;
 
 #define FEDERATED_CAPSULE_EXTENSION FEDERATED_GENERIC_EXTENSION
 
-#define FEDERATED_CAPSULE_MEMBER \
-    {"physical_time_of_arrival", T_INT, offsetof(generic_port_capsule_struct, physical_time_of_arrival), READONLY, "Physical time of arrival of the original message."},
+#define FEDERATED_CAPSULE_MEMBER                                                                                       \
+  {"physical_time_of_arrival", T_INT, offsetof(generic_port_capsule_struct, physical_time_of_arrival), READONLY,       \
+   "Physical time of arrival of the original message."},
 
-#define FEDERATED_ASSIGN_FIELDS(py_port, c_port) \
-do { \
-    py_port->physical_time_of_arrival = c_port->physical_time_of_arrival; \
-} while(0)
-#endif // FEDERATED_DECENTRALIZED
-#else  // not FEDERATED
-#define FEDERATED_GENERIC_EXTENSION // Empty
-#define FEDERATED_CAPSULE_EXTENSION // Empty
-#define FEDERATED_CAPSULE_MEMBER // Empty
-#define FEDERATED_ASSIGN_FIELDS(py_port, c_port) // Empty
+#define FEDERATED_ASSIGN_FIELDS(py_port, c_port)                                                                       \
+  do {                                                                                                                 \
+    py_port->physical_time_of_arrival = c_port->physical_time_of_arrival;                                              \
+  } while (0)
+#endif                                            // FEDERATED_DECENTRALIZED
+#else                                             // not FEDERATED
+#define FEDERATED_GENERIC_EXTENSION               // Empty
+#define FEDERATED_CAPSULE_EXTENSION               // Empty
+#define FEDERATED_CAPSULE_MEMBER                  // Empty
+#define FEDERATED_ASSIGN_FIELDS(py_port, c_port)  // Empty
 #define FEDERATED_COPY_FIELDS(py_port1, py_port2) // Empty
-#endif // FEDERATED
+#endif                                            // FEDERATED
 
 #endif
diff --git a/python/include/python_port.h b/python/include/python_port.h
index 269abe756..7752b16c8 100644
--- a/python/include/python_port.h
+++ b/python/include/python_port.h
@@ -55,15 +55,15 @@ extern PyTypeObject py_port_capsule_t;
  * as its first element a token_type_t.
  */
 typedef struct {
-    size_t element_size;                     // token_type_t
-    void (*destructor) (void* value);        // token_type_t
-    void* (*copy_constructor) (void* value); // token_type_t
-    lf_token_t* token;                       // token_template_t
-    size_t length;                           // token_template_t
-    bool is_present;                         // lf_port_base_t
-    lf_port_internal_t _base;                // lf_port_internal_t
-    PyObject* value;
-    FEDERATED_GENERIC_EXTENSION
+  size_t element_size;                    // token_type_t
+  void (*destructor)(void* value);        // token_type_t
+  void* (*copy_constructor)(void* value); // token_type_t
+  lf_token_t* token;                      // token_template_t
+  size_t length;                          // token_template_t
+  bool is_present;                        // lf_port_base_t
+  lf_port_internal_t _base;               // lf_port_internal_t
+  PyObject* value;
+  FEDERATED_GENERIC_EXTENSION
 } generic_port_instance_struct;
 
 /**
@@ -89,13 +89,12 @@ typedef struct {
  * current_index: Used to facilitate iterative functions (@see port_iter)
  **/
 typedef struct {
-    PyObject_HEAD
-    PyObject* port;
-    PyObject* value;
-    bool is_present;
-    int width;
-    long current_index;
-    FEDERATED_CAPSULE_EXTENSION
+  PyObject_HEAD PyObject* port;
+  PyObject* value;
+  bool is_present;
+  int width;
+  long current_index;
+  FEDERATED_CAPSULE_EXTENSION
 } generic_port_capsule_struct;
 
 void python_count_decrement(void* py_object);
diff --git a/python/include/python_tag.h b/python/include/python_tag.h
index dce9d7d9c..039b08c44 100644
--- a/python/include/python_tag.h
+++ b/python/include/python_tag.h
@@ -41,8 +41,7 @@ extern PyTypeObject PyTagType;
  * Python wrapper for the tag_t struct in the C target.
  **/
 typedef struct {
-    PyObject_HEAD
-    tag_t tag;
+  PyObject_HEAD tag_t tag;
 } py_tag_t;
 
 /**
@@ -53,7 +52,7 @@ typedef struct {
  */
 py_tag_t* convert_C_tag_to_py(tag_t c_tag);
 
-PyObject* py_lf_tag(PyObject *self, PyObject *args);
-PyObject* py_tag_compare(PyObject *self, PyObject *args);
+PyObject* py_lf_tag(PyObject* self, PyObject* args);
+PyObject* py_tag_compare(PyObject* self, PyObject* args);
 
 #endif
diff --git a/python/include/python_time.h b/python/include/python_time.h
index d582ea85d..5ec25c456 100644
--- a/python/include/python_time.h
+++ b/python/include/python_time.h
@@ -32,11 +32,11 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 ///////// Time-keeping functions //////////
-PyObject* py_lf_time_logical(PyObject *self, PyObject *args);
-PyObject* py_lf_time_logical_elapsed(PyObject *self, PyObject *args);
-PyObject* py_lf_time_physical(PyObject *self, PyObject *args);
-PyObject* py_lf_time_physical_elapsed(PyObject *self, PyObject *args);
-PyObject* py_lf_time_start(PyObject *self, PyObject *args);
+PyObject* py_lf_time_logical(PyObject* self, PyObject* args);
+PyObject* py_lf_time_logical_elapsed(PyObject* self, PyObject* args);
+PyObject* py_lf_time_physical(PyObject* self, PyObject* args);
+PyObject* py_lf_time_physical_elapsed(PyObject* self, PyObject* args);
+PyObject* py_lf_time_start(PyObject* self, PyObject* args);
 
 extern PyTypeObject PyTimeType;
 
diff --git a/python/include/pythontarget.h b/python/include/pythontarget.h
index d7f2d1177..effbe0344 100644
--- a/python/include/pythontarget.h
+++ b/python/include/pythontarget.h
@@ -43,7 +43,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #ifndef PYTHON_TARGET_H
 #define PYTHON_TARGET_H
 
-
 #define PY_SSIZE_T_CLEAN
 
 #include 
@@ -64,15 +63,14 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #error "MODULE_NAME is undefined"
 #endif
 
-#define CONCAT(x,y) x##y
-#define GEN_NAME(x,y) CONCAT(x,y)
+#define CONCAT(x, y) x##y
+#define GEN_NAME(x, y) CONCAT(x, y)
 #define STRINGIFY(X) #X
 #define TOSTRING(x) STRINGIFY(x)
 
-
 ////////////// Global variables ///////////////
-extern PyObject *globalPythonModule;
-extern PyObject *globalPythonModuleDict;
+extern PyObject* globalPythonModule;
+extern PyObject* globalPythonModuleDict;
 extern PyObject* global_pickler;
 extern environment_t* top_level_environment;
 
@@ -87,15 +85,14 @@ extern environment_t* top_level_environment;
  *      - action: Pointer to an action on the self struct.
  *      - offset: The time offset over and above that in the action.
  **/
-PyObject* py_schedule(PyObject *self, PyObject *args);
+PyObject* py_schedule(PyObject* self, PyObject* args);
 
 /**
  * Schedule an action to occur with the specified value and time offset
  * with a copy of the specified value.
  * See reactor.h for documentation.
  */
-PyObject* py_schedule_copy(PyObject *self, PyObject *args);
-
+PyObject* py_schedule_copy(PyObject* self, PyObject* args);
 
 //////////////////////////////////////////////////////////////
 /////////////  Python Helper Functions (called from Python code)
@@ -103,11 +100,11 @@ PyObject* py_schedule_copy(PyObject *self, PyObject *args);
 /**
  * Stop execution at the conclusion of the current logical time.
  */
-PyObject* py_request_stop(PyObject *self, PyObject *args);
+PyObject* py_request_stop(PyObject* self, PyObject* args);
 
 //////////////////////////////////////////////////////////////
 ///////////// Main function callable from Python code
-PyObject* py_main(PyObject *self, PyObject *args);
+PyObject* py_main(PyObject* self, PyObject* args);
 
 //////////////////////////////////////////////////////////////
 /////////////  Python Helper Functions
@@ -143,13 +140,12 @@ PyObject* convert_C_port_to_py(void* port, int width);
  * made by this function, the "value" and "is_present" are passed to the function
  * instead of expecting them to exist.
  *
- * The void* pointer to the C action instance is encapsulated in a PyCapsule instead of passing an exposed pointer through
- * Python. @see https://docs.python.org/3/c-api/capsule.html
- * This encapsulation is done by calling PyCapsule_New(action, "name_of_the_container_in_the_capsule", NULL),
- * where "name_of_the_container_in_the_capsule" is an agreed-upon container name inside the capsule. This
- * capsule can then be treated as a PyObject* and safely passed through Python code. On the other end
- * (which is in schedule functions), PyCapsule_GetPointer(recieved_action,"action") can be called to retrieve
- * the void* pointer into recieved_action.
+ * The void* pointer to the C action instance is encapsulated in a PyCapsule instead of passing an exposed pointer
+ *through Python. @see https://docs.python.org/3/c-api/capsule.html This encapsulation is done by calling
+ *PyCapsule_New(action, "name_of_the_container_in_the_capsule", NULL), where "name_of_the_container_in_the_capsule" is
+ *an agreed-upon container name inside the capsule. This capsule can then be treated as a PyObject* and safely passed
+ *through Python code. On the other end (which is in schedule functions), PyCapsule_GetPointer(recieved_action,"action")
+ *can be called to retrieve the void* pointer into recieved_action.
  **/
 PyObject* convert_C_action_to_py(void* action);
 
@@ -182,7 +178,6 @@ PyObject* get_python_function(string module, string class, int instance_id, stri
  * For example for a module named LinguaFrancaFoo, this function
  * will be called PyInit_LinguaFrancaFoo
  */
-PyMODINIT_FUNC
-GEN_NAME(PyInit_,MODULE_NAME)(void);
+PyMODINIT_FUNC GEN_NAME(PyInit_, MODULE_NAME)(void);
 
 #endif // PYTHON_TARGET_H
diff --git a/python/lib/modal_models/impl.c b/python/lib/modal_models/impl.c
index 1b625b68f..c8cdff1f2 100644
--- a/python/lib/modal_models/impl.c
+++ b/python/lib/modal_models/impl.c
@@ -38,25 +38,25 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 /**
  * Set a new mode for a modal model.
  */
-static PyObject* py_mode_set(PyObject *mode_capsule, PyObject *args) {
-    mode_capsule_struct_t* m = (mode_capsule_struct_t*)mode_capsule;
+static PyObject* py_mode_set(PyObject* mode_capsule, PyObject* args) {
+  mode_capsule_struct_t* m = (mode_capsule_struct_t*)mode_capsule;
 
-    reactor_mode_t* mode = PyCapsule_GetPointer(m->mode, "mode");
-    if (mode == NULL) {
-        lf_print_error("Null pointer received.");
-        exit(1);
-    }
+  reactor_mode_t* mode = PyCapsule_GetPointer(m->mode, "mode");
+  if (mode == NULL) {
+    lf_print_error("Null pointer received.");
+    exit(1);
+  }
 
-    self_base_t* self = PyCapsule_GetPointer(m->lf_self, "lf_self");
-    if (self == NULL) {
-        lf_print_error("Null pointer received.");
-        exit(1);
-    }
+  self_base_t* self = PyCapsule_GetPointer(m->lf_self, "lf_self");
+  if (self == NULL) {
+    lf_print_error("Null pointer received.");
+    exit(1);
+  }
 
-    _LF_SET_MODE_WITH_TYPE(mode, m->change_type);
+  _LF_SET_MODE_WITH_TYPE(mode, m->change_type);
 
-    Py_INCREF(Py_None);
-    return Py_None;
+  Py_INCREF(Py_None);
+  return Py_None;
 }
 
 //////////// Python Struct /////////////
@@ -66,8 +66,7 @@ static PyObject* py_mode_set(PyObject *mode_capsule, PyObject *args) {
  * The set function is used to set a new mode.
  */
 static PyMethodDef mode_capsule_methods[] = {
-    {"set", (PyCFunction)py_mode_set, METH_NOARGS, "Set a new mode."},
-    {NULL}  /* Sentinel */
+    {"set", (PyCFunction)py_mode_set, METH_NOARGS, "Set a new mode."}, {NULL} /* Sentinel */
 };
 
 /*
@@ -75,8 +74,7 @@ static PyMethodDef mode_capsule_methods[] = {
  * used to describe how mode_capsule behaves.
  */
 static PyTypeObject mode_capsule_t = {
-    PyVarObject_HEAD_INIT(NULL, 0)
-    .tp_name = "LinguaFranca.mode_capsule",
+    PyVarObject_HEAD_INIT(NULL, 0).tp_name = "LinguaFranca.mode_capsule",
     .tp_doc = "mode_capsule objects",
     .tp_basicsize = sizeof(mode_capsule_struct_t),
     .tp_itemsize = 0,
@@ -85,7 +83,6 @@ static PyTypeObject mode_capsule_t = {
     .tp_methods = mode_capsule_methods,
 };
 
-
 ///////////////// Functions used in mode creation and initialization /////////////
 
 /**
@@ -93,51 +90,46 @@ static PyTypeObject mode_capsule_t = {
  *
  */
 void initialize_mode_capsule_t(PyObject* current_module) {
-    // Initialize the mode_capsule type
-    if (PyType_Ready(&mode_capsule_t) < 0) {
-        return;
-    }
-
-    // Add the mode_capsule type to the module's dictionary.
-    Py_INCREF(&mode_capsule_t);
-    if (PyModule_AddObject(current_module, "mode_capsule", (PyObject *) &mode_capsule_t) < 0) {
-        Py_DECREF(&mode_capsule_t);
-        Py_DECREF(current_module);
-        return;
-    }
+  // Initialize the mode_capsule type
+  if (PyType_Ready(&mode_capsule_t) < 0) {
+    return;
+  }
+
+  // Add the mode_capsule type to the module's dictionary.
+  Py_INCREF(&mode_capsule_t);
+  if (PyModule_AddObject(current_module, "mode_capsule", (PyObject*)&mode_capsule_t) < 0) {
+    Py_DECREF(&mode_capsule_t);
+    Py_DECREF(current_module);
+    return;
+  }
 }
 
 /**
  * Convert a `reactor_mode_t` to a `mode_capsule_t`.
  */
-PyObject* convert_C_mode_to_py(
-		reactor_mode_t* mode,
-		self_base_t* lf_self,
-		lf_mode_change_type_t change_type
-) {
-    // Create the mode struct in Python
-	mode_capsule_struct_t* cap =
-        (mode_capsule_struct_t*)PyObject_GC_New(mode_capsule_struct_t, &mode_capsule_t);
-    if (cap == NULL) {
-        lf_print_error_and_exit("Failed to convert mode.");
-    }
-
-    // Create the capsule to hold the reactor_mode_t* mode
-    PyObject* capsule = PyCapsule_New(mode, "mode", NULL);
-    if (capsule == NULL) {
-        lf_print_error_and_exit("Failed to convert mode.");
-    }
-    // Fill in the Python mode struct.
-    cap->mode = capsule;
-
-    // Create a capsule to point to the self struct.
-    PyObject* self_capsule = PyCapsule_New(lf_self, "lf_self", NULL);
-    if (self_capsule == NULL) {
-        lf_print_error_and_exit("Failed to convert self.");
-    }
-    cap->lf_self = self_capsule;
-
-    cap->change_type = change_type;
-
-	return (PyObject*) cap;
+PyObject* convert_C_mode_to_py(reactor_mode_t* mode, self_base_t* lf_self, lf_mode_change_type_t change_type) {
+  // Create the mode struct in Python
+  mode_capsule_struct_t* cap = (mode_capsule_struct_t*)PyObject_GC_New(mode_capsule_struct_t, &mode_capsule_t);
+  if (cap == NULL) {
+    lf_print_error_and_exit("Failed to convert mode.");
+  }
+
+  // Create the capsule to hold the reactor_mode_t* mode
+  PyObject* capsule = PyCapsule_New(mode, "mode", NULL);
+  if (capsule == NULL) {
+    lf_print_error_and_exit("Failed to convert mode.");
+  }
+  // Fill in the Python mode struct.
+  cap->mode = capsule;
+
+  // Create a capsule to point to the self struct.
+  PyObject* self_capsule = PyCapsule_New(lf_self, "lf_self", NULL);
+  if (self_capsule == NULL) {
+    lf_print_error_and_exit("Failed to convert self.");
+  }
+  cap->lf_self = self_capsule;
+
+  cap->change_type = change_type;
+
+  return (PyObject*)cap;
 }
diff --git a/python/lib/python_action.c b/python/lib/python_action.c
index 096f64ef2..e6d9c556f 100644
--- a/python/lib/python_action.c
+++ b/python/lib/python_action.c
@@ -43,10 +43,10 @@ PyTypeObject py_action_capsule_t;
  * called by the Python grabage collector).
  * @param self
  */
-void py_action_capsule_dealloc(generic_action_capsule_struct *self) {
-    Py_XDECREF(self->action);
-    Py_XDECREF(self->value);
-    Py_TYPE(self)->tp_free((PyObject *) self);
+void py_action_capsule_dealloc(generic_action_capsule_struct* self) {
+  Py_XDECREF(self->action);
+  Py_XDECREF(self->value);
+  Py_TYPE(self)->tp_free((PyObject*)self);
 }
 
 /**
@@ -57,15 +57,15 @@ void py_action_capsule_dealloc(generic_action_capsule_struct *self) {
  * method of type py_action_capsule_t and then assign default values of NULL, NULL, 0
  * to the members of the generic_action_capsule_struct.
  */
-PyObject *py_action_capsule_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {
-    generic_action_capsule_struct *self;
-    self = (generic_action_capsule_struct *) type->tp_alloc(type, 0);
-    if (self != NULL) {
-        self->action = NULL;
-        self->value = NULL;
-        self->is_present = false;
-    }
-    return (PyObject *) self;
+PyObject* py_action_capsule_new(PyTypeObject* type, PyObject* args, PyObject* kwds) {
+  generic_action_capsule_struct* self;
+  self = (generic_action_capsule_struct*)type->tp_alloc(type, 0);
+  if (self != NULL) {
+    self->action = NULL;
+    self->value = NULL;
+    self->is_present = false;
+  }
+  return (PyObject*)self;
 }
 
 /**
@@ -83,30 +83,28 @@ PyObject *py_action_capsule_new(PyTypeObject *type, PyObject *args, PyObject *kw
  *                      is present at the current logical time.
  *      - num_destination: Used for reference-keeping inside the C runtime
  */
-int py_action_capsule_init(generic_action_capsule_struct *self, PyObject *args, PyObject *kwds) {
-    static char *kwlist[] = {"action", "value", "is_present", NULL};
-    PyObject *action = NULL, *value = NULL, *tmp;
-
-    if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOi", kwlist,
-                                     &action, &value, &self->is_present)) {
-        return -1;
-    }
-    if (action) {
-        tmp = self->action;
-        Py_INCREF(action);
-        self->action = action;
-        Py_XDECREF(tmp);
-    }
-    if (value) {
-        tmp = self->value;
-        Py_INCREF(value);
-        self->value = value;
-        Py_XDECREF(tmp);
-    }
-    return 0;
+int py_action_capsule_init(generic_action_capsule_struct* self, PyObject* args, PyObject* kwds) {
+  static char* kwlist[] = {"action", "value", "is_present", NULL};
+  PyObject *action = NULL, *value = NULL, *tmp;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOi", kwlist, &action, &value, &self->is_present)) {
+    return -1;
+  }
+  if (action) {
+    tmp = self->action;
+    Py_INCREF(action);
+    self->action = action;
+    Py_XDECREF(tmp);
+  }
+  if (value) {
+    tmp = self->value;
+    Py_INCREF(value);
+    self->value = value;
+    Py_XDECREF(tmp);
+  }
+  return 0;
 }
 
-
 //////////////////////////////////////////////////////////////
 /////////////  Python Structs
 //// Actions /////
@@ -117,17 +115,17 @@ int py_action_capsule_init(generic_action_capsule_struct *self, PyObject *args,
 PyMemberDef py_action_capsule_members[] = {
     {"action", T_OBJECT, offsetof(generic_action_capsule_struct, action), 0, "The pointer to the C action struct"},
     {"value", T_OBJECT, offsetof(generic_action_capsule_struct, value), 0, "Value of the action"},
-    {"is_present", T_BOOL, offsetof(generic_action_capsule_struct, is_present), 0, "Check that shows if action is present"},
-    {NULL}  /* Sentinel */
+    {"is_present", T_BOOL, offsetof(generic_action_capsule_struct, is_present), 0,
+     "Check that shows if action is present"},
+    {NULL} /* Sentinel */
 };
 
-
 /**
  * The function members of action capsule
  */
 PyMethodDef py_action_capsule_methods[] = {
     {"schedule", (PyCFunction)py_schedule, METH_VARARGS, "Schedule the action with the given offset"},
-    {NULL}  /* Sentinel */
+    {NULL} /* Sentinel */
 };
 
 /*
@@ -135,15 +133,14 @@ PyMethodDef py_action_capsule_methods[] = {
  * Used to describe how an action_capsule behaves.
  */
 PyTypeObject py_action_capsule_t = {
-    PyVarObject_HEAD_INIT(NULL, 0)
-    .tp_name = "LinguaFranca.action_instance",
+    PyVarObject_HEAD_INIT(NULL, 0).tp_name = "LinguaFranca.action_instance",
     .tp_doc = "action_instance object",
     .tp_basicsize = sizeof(generic_action_capsule_struct),
     .tp_itemsize = 0,
     .tp_flags = Py_TPFLAGS_DEFAULT,
     .tp_new = py_action_capsule_new,
-    .tp_init = (initproc) py_action_capsule_init,
-    .tp_dealloc = (destructor) py_action_capsule_dealloc,
+    .tp_init = (initproc)py_action_capsule_init,
+    .tp_dealloc = (destructor)py_action_capsule_dealloc,
     .tp_members = py_action_capsule_members,
     .tp_methods = py_action_capsule_methods,
 };
\ No newline at end of file
diff --git a/python/lib/python_port.c b/python/lib/python_port.c
index d523faecb..62f4b4732 100644
--- a/python/lib/python_port.c
+++ b/python/lib/python_port.c
@@ -47,9 +47,7 @@ PyTypeObject py_port_capsule_t;
  * Python can free its memory.
  * @param py_object A PyObject with count 1 or greater.
  */
-void python_count_decrement(void* py_object) {
-    Py_XDECREF((PyObject*)py_object);
-}
+void python_count_decrement(void* py_object) { Py_XDECREF((PyObject*)py_object); }
 
 //////////// set Function(s) /////////////
 /**
@@ -81,38 +79,37 @@ void python_count_decrement(void* py_object) {
  *      - val: The value to insert into the port struct.
  */
 PyObject* py_port_set(PyObject* self, PyObject* args) {
-    generic_port_capsule_struct* p = (generic_port_capsule_struct*)self;
-    PyObject* val = NULL;
-
-    if (!PyArg_ParseTuple(args, "O", &val)) {
-        PyErr_SetString(PyExc_TypeError, "Could not set objects.");
-        return NULL;
-    }
-
-    generic_port_instance_struct* port =
-        PyCapsule_GetPointer(p->port, "port");
-    if (port == NULL) {
-        lf_print_error("Null pointer received.");
-        exit(1);
-    }
-
-    if (val) {
-        LF_PRINT_DEBUG("Setting value %p with reference count %d.", val, (int) Py_REFCNT(val));
-        //Py_INCREF(val);
-        //python_count_decrement(port->value);
-       
-        lf_token_t* token = lf_new_token((void*)port, val, 1);
-        lf_set_destructor(port, python_count_decrement);
-        lf_set_token(port, token);
-        Py_INCREF(val);
-       
-        // Also set the values for the port capsule.
-        p->value = val;
-        p->is_present = true;
-    }
-
-    Py_INCREF(Py_None);
-    return Py_None;
+  generic_port_capsule_struct* p = (generic_port_capsule_struct*)self;
+  PyObject* val = NULL;
+
+  if (!PyArg_ParseTuple(args, "O", &val)) {
+    PyErr_SetString(PyExc_TypeError, "Could not set objects.");
+    return NULL;
+  }
+
+  generic_port_instance_struct* port = PyCapsule_GetPointer(p->port, "port");
+  if (port == NULL) {
+    lf_print_error("Null pointer received.");
+    exit(1);
+  }
+
+  if (val) {
+    LF_PRINT_DEBUG("Setting value %p with reference count %d.", val, (int)Py_REFCNT(val));
+    // Py_INCREF(val);
+    // python_count_decrement(port->value);
+
+    lf_token_t* token = lf_new_token((void*)port, val, 1);
+    lf_set_destructor(port, python_count_decrement);
+    lf_set_token(port, token);
+    Py_INCREF(val);
+
+    // Also set the values for the port capsule.
+    p->value = val;
+    p->is_present = true;
+  }
+
+  Py_INCREF(Py_None);
+  return Py_None;
 }
 
 /**
@@ -120,10 +117,10 @@ PyObject* py_port_set(PyObject* self, PyObject* args) {
  * garbage collector).
  * @param self An instance of generic_port_instance_struct*
  */
-void py_port_capsule_dealloc(generic_port_capsule_struct *self) {
-    Py_XDECREF(self->port);
-    Py_XDECREF(self->value);
-    Py_TYPE(self)->tp_free((PyObject *) self);
+void py_port_capsule_dealloc(generic_port_capsule_struct* self) {
+  Py_XDECREF(self->port);
+  Py_XDECREF(self->value);
+  Py_TYPE(self)->tp_free((PyObject*)self);
 }
 
 /**
@@ -146,18 +143,18 @@ void py_port_capsule_dealloc(generic_port_capsule_struct *self) {
  *                   is not a multiport, this field will be -2.
  * @param kwds Keywords (@see Python keywords)
  */
-PyObject *py_port_capsule_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {
-    generic_port_capsule_struct *self;
-    self = (generic_port_capsule_struct *) type->tp_alloc(type, 0);
-    if (self != NULL) {
-        self->port = NULL;
-        Py_INCREF(Py_None);
-        self->value = Py_None;
-        self->is_present = false;
-        self->current_index = 0;
-        self->width = -2;
-    }
-    return (PyObject *) self;
+PyObject* py_port_capsule_new(PyTypeObject* type, PyObject* args, PyObject* kwds) {
+  generic_port_capsule_struct* self;
+  self = (generic_port_capsule_struct*)type->tp_alloc(type, 0);
+  if (self != NULL) {
+    self->port = NULL;
+    Py_INCREF(Py_None);
+    self->value = Py_None;
+    self->is_present = false;
+    self->current_index = 0;
+    self->width = -2;
+  }
+  return (PyObject*)self;
 }
 
 /**
@@ -169,7 +166,7 @@ PyObject *py_port_capsule_new(PyTypeObject *type, PyObject *args, PyObject *kwds
  *         p.set(42)
  * possible in Python.
  */
-PyObject *py_port_iter(PyObject *self) {
+PyObject* py_port_iter(PyObject* self) {
   generic_port_capsule_struct* port = (generic_port_capsule_struct*)self;
   port->current_index = 0;
   Py_INCREF(self);
@@ -183,43 +180,41 @@ PyObject *py_port_iter(PyObject *self) {
  *     for p in foo_multiport:
  *         p.set(42)
  */
-PyObject *py_port_iter_next(PyObject *self) {
-    generic_port_capsule_struct* port = (generic_port_capsule_struct*)self;
-    generic_port_capsule_struct* pyport = (generic_port_capsule_struct*)self->ob_type->tp_new(self->ob_type, NULL, NULL);
-
-    if (port->width < 1) {
-        PyErr_Format(PyExc_TypeError,
-                "Non-multiport type is not iteratable.");
-        return NULL;
-    }
-
-    if (port->current_index >= port->width) {
-        port->current_index = 0;
-        return NULL;
-    }
-
-    generic_port_instance_struct **cport =
-        (generic_port_instance_struct **)PyCapsule_GetPointer(port->port,"port");
-    if (cport == NULL) {
-        lf_print_error_and_exit("Null pointer received.");
-    }
-
-    // Py_XINCREF(cport[index]->value);
-    pyport->port = PyCapsule_New(cport[port->current_index], "port", NULL);
-    pyport->value = cport[port->current_index]->value;
-    pyport->is_present = cport[port->current_index]->is_present;
-    pyport->width = -2;
-    FEDERATED_ASSIGN_FIELDS(pyport, cport[port->current_index]);
-
-    port->current_index++;
-
-    if (pyport->value == NULL) {
-        Py_INCREF(Py_None);
-        pyport->value = Py_None;
-    }
-
-    Py_XINCREF(pyport);
-    return (PyObject*)pyport;
+PyObject* py_port_iter_next(PyObject* self) {
+  generic_port_capsule_struct* port = (generic_port_capsule_struct*)self;
+  generic_port_capsule_struct* pyport = (generic_port_capsule_struct*)self->ob_type->tp_new(self->ob_type, NULL, NULL);
+
+  if (port->width < 1) {
+    PyErr_Format(PyExc_TypeError, "Non-multiport type is not iteratable.");
+    return NULL;
+  }
+
+  if (port->current_index >= port->width) {
+    port->current_index = 0;
+    return NULL;
+  }
+
+  generic_port_instance_struct** cport = (generic_port_instance_struct**)PyCapsule_GetPointer(port->port, "port");
+  if (cport == NULL) {
+    lf_print_error_and_exit("Null pointer received.");
+  }
+
+  // Py_XINCREF(cport[index]->value);
+  pyport->port = PyCapsule_New(cport[port->current_index], "port", NULL);
+  pyport->value = cport[port->current_index]->value;
+  pyport->is_present = cport[port->current_index]->is_present;
+  pyport->width = -2;
+  FEDERATED_ASSIGN_FIELDS(pyport, cport[port->current_index]);
+
+  port->current_index++;
+
+  if (pyport->value == NULL) {
+    Py_INCREF(Py_None);
+    pyport->value = Py_None;
+  }
+
+  Py_XINCREF(pyport);
+  return (PyObject*)pyport;
 }
 /**
  * Get an item from a Linugua Franca port capsule type.
@@ -235,59 +230,51 @@ PyObject *py_port_iter_next(PyObject *self) {
  * @param key The index (key) which is used to retrieve an item from the underlying
  *             C array if the port is a multiport.
  */
-PyObject *py_port_capsule_get_item(PyObject *self, PyObject *key) {
-    generic_port_capsule_struct* port = (generic_port_capsule_struct*)self;
-
-    // Port is not a multiport
-    if (port->width == -2) {
-        return self;
-    }
-
-    if (PyObject_TypeCheck(key, &PyLong_Type) == 0) {
-        PyErr_Format(PyExc_TypeError,
-                     "Multiport indices must be integers, not %.200s",
-                     Py_TYPE(key)->tp_name);
-        return NULL;
-    }
-
-    generic_port_capsule_struct* pyport =
-        (generic_port_capsule_struct*)self->ob_type->tp_new(self->ob_type, NULL, NULL);
-    long long index = -3;
-
-    index = PyLong_AsLong(key);
-    if (index == -3) {
-        PyErr_Format(PyExc_TypeError,
-                     "Multiport indices must be integers, not %.200s",
-                     Py_TYPE(key)->tp_name);
-        return NULL;
-    }
-
-    generic_port_instance_struct **cport =
-        (generic_port_instance_struct **)PyCapsule_GetPointer(port->port,"port");
-    if (cport == NULL) {
-        lf_print_error_and_exit("Null pointer received.");
-    }
-
-    // Py_INCREF(cport[index]->value);
-    pyport->port = PyCapsule_New(cport[index], "port", NULL);
-    pyport->value = cport[index]->value;
-    pyport->is_present = cport[index]->is_present;
-    pyport->width = -2;
-    FEDERATED_ASSIGN_FIELDS(pyport, cport[index]);
-
-
-    LF_PRINT_LOG("Getting item index %lld. Is present is %d.", index, pyport->is_present);
-
-
-    if (pyport->value == NULL) {
-        Py_INCREF(Py_None);
-        pyport->value = Py_None;
-    }
-
-    //Py_INCREF(((generic_port_capsule_struct*)port)->value);
-    Py_XINCREF(pyport);
-    //Py_INCREF(self);
-    return (PyObject*)pyport;
+PyObject* py_port_capsule_get_item(PyObject* self, PyObject* key) {
+  generic_port_capsule_struct* port = (generic_port_capsule_struct*)self;
+
+  // Port is not a multiport
+  if (port->width == -2) {
+    return self;
+  }
+
+  if (PyObject_TypeCheck(key, &PyLong_Type) == 0) {
+    PyErr_Format(PyExc_TypeError, "Multiport indices must be integers, not %.200s", Py_TYPE(key)->tp_name);
+    return NULL;
+  }
+
+  generic_port_capsule_struct* pyport = (generic_port_capsule_struct*)self->ob_type->tp_new(self->ob_type, NULL, NULL);
+  long long index = -3;
+
+  index = PyLong_AsLong(key);
+  if (index == -3) {
+    PyErr_Format(PyExc_TypeError, "Multiport indices must be integers, not %.200s", Py_TYPE(key)->tp_name);
+    return NULL;
+  }
+
+  generic_port_instance_struct** cport = (generic_port_instance_struct**)PyCapsule_GetPointer(port->port, "port");
+  if (cport == NULL) {
+    lf_print_error_and_exit("Null pointer received.");
+  }
+
+  // Py_INCREF(cport[index]->value);
+  pyport->port = PyCapsule_New(cport[index], "port", NULL);
+  pyport->value = cport[index]->value;
+  pyport->is_present = cport[index]->is_present;
+  pyport->width = -2;
+  FEDERATED_ASSIGN_FIELDS(pyport, cport[index]);
+
+  LF_PRINT_LOG("Getting item index %lld. Is present is %d.", index, pyport->is_present);
+
+  if (pyport->value == NULL) {
+    Py_INCREF(Py_None);
+    pyport->value = Py_None;
+  }
+
+  // Py_INCREF(((generic_port_capsule_struct*)port)->value);
+  Py_XINCREF(pyport);
+  // Py_INCREF(self);
+  return (PyObject*)pyport;
 }
 
 /**
@@ -297,32 +284,28 @@ PyObject *py_port_capsule_get_item(PyObject *self, PyObject *key) {
  * @param item The index (which is ignored)
  * @param value The value to be assigned (which is ignored)
  */
-int py_port_capsule_assign_get_item(PyObject *self, PyObject *item, PyObject* value) {
-    PyErr_Format(PyExc_TypeError,
-                     "You cannot assign to ports directly. Please use the .set method.",
-                     Py_TYPE(item)->tp_name);
-    return -1;
+int py_port_capsule_assign_get_item(PyObject* self, PyObject* item, PyObject* value) {
+  PyErr_Format(PyExc_TypeError, "You cannot assign to ports directly. Please use the .set method.",
+               Py_TYPE(item)->tp_name);
+  return -1;
 }
 
 /**
  * A function that allows the invocation of len() on a port.
  * @param self A port of type LinguaFranca.port_capsule
  */
-Py_ssize_t py_port_length(PyObject *self) {
-    generic_port_capsule_struct* port = (generic_port_capsule_struct*)self;
-    LF_PRINT_DEBUG("Getting the length, which is %d.", port->width);
-    return (Py_ssize_t)port->width;
+Py_ssize_t py_port_length(PyObject* self) {
+  generic_port_capsule_struct* port = (generic_port_capsule_struct*)self;
+  LF_PRINT_DEBUG("Getting the length, which is %d.", port->width);
+  return (Py_ssize_t)port->width;
 }
 
 /**
  * Methods that convert a LinguaFranca.port_capsule into a mapping,
  * which allows it to be subscriptble.
  */
-PyMappingMethods py_port_as_mapping = {
-    (lenfunc) py_port_length,
-    (binaryfunc) py_port_capsule_get_item,
-    (objobjargproc) py_port_capsule_assign_get_item
-};
+PyMappingMethods py_port_as_mapping = {(lenfunc)py_port_length, (binaryfunc)py_port_capsule_get_item,
+                                       (objobjargproc)py_port_capsule_assign_get_item};
 
 /**
  * Initialize the port capsule self with the given optional values for
@@ -340,30 +323,29 @@ PyMappingMethods py_port_as_mapping = {
  *      - width: Used to indicate the width of a multiport. If the port
  *                   is not a multiport, this field will be -2.
  */
-int py_port_capsule_init(generic_port_capsule_struct *self, PyObject *args, PyObject *kwds) {
-    static char *kwlist[] = { "port", "value", "is_present", "width", "current_index", NULL};
-    PyObject *value = NULL, *tmp, *port = NULL;
-
-    if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOp", kwlist,
-                                     &port, &value, &self->is_present, &self->width, &self->current_index))
-    {
-        return -1;
-    }
-
-    if (value){
-        tmp = self->value;
-        Py_INCREF(value);
-        self->value = value;
-        Py_XDECREF(tmp);
-    }
-
-    if (port){
-        tmp = self->port;
-        Py_INCREF(port);
-        self->port = port;
-        Py_XDECREF(tmp);
-    }
-    return 0;
+int py_port_capsule_init(generic_port_capsule_struct* self, PyObject* args, PyObject* kwds) {
+  static char* kwlist[] = {"port", "value", "is_present", "width", "current_index", NULL};
+  PyObject *value = NULL, *tmp, *port = NULL;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOp", kwlist, &port, &value, &self->is_present, &self->width,
+                                   &self->current_index)) {
+    return -1;
+  }
+
+  if (value) {
+    tmp = self->value;
+    Py_INCREF(value);
+    self->value = value;
+    Py_XDECREF(tmp);
+  }
+
+  if (port) {
+    tmp = self->port;
+    Py_INCREF(port);
+    self->port = port;
+    Py_XDECREF(tmp);
+  }
+  return 0;
 }
 
 ////// Ports //////
@@ -378,10 +360,10 @@ int py_port_capsule_init(generic_port_capsule_struct *self, PyObject *args, PyOb
 PyMemberDef py_port_capsule_members[] = {
     {"port", T_OBJECT, offsetof(generic_port_capsule_struct, port), READONLY, ""},
     {"value", T_OBJECT, offsetof(generic_port_capsule_struct, value), READONLY, "Value of the port"},
-    {"is_present", T_BOOL, offsetof(generic_port_capsule_struct, is_present), READONLY, "Check if value is present at current logical time"},
+    {"is_present", T_BOOL, offsetof(generic_port_capsule_struct, is_present), READONLY,
+     "Check if value is present at current logical time"},
     {"width", T_INT, offsetof(generic_port_capsule_struct, width), READONLY, "Width of the multiport"},
-    FEDERATED_CAPSULE_MEMBER
-    {NULL}  /* Sentinel */
+    FEDERATED_CAPSULE_MEMBER{NULL} /* Sentinel */
 };
 
 /*
@@ -390,19 +372,17 @@ PyMemberDef py_port_capsule_members[] = {
  * set is used to set a port value and its is_present field.
  */
 PyMethodDef py_port_capsule_methods[] = {
-    {"__getitem__", (PyCFunction)py_port_capsule_get_item, METH_O|METH_COEXIST, "x.__getitem__(y) <==> x[y]"},
+    {"__getitem__", (PyCFunction)py_port_capsule_get_item, METH_O | METH_COEXIST, "x.__getitem__(y) <==> x[y]"},
     {"set", (PyCFunction)py_port_set, METH_VARARGS, "Set value of the port as well as the is_present field"},
-    {NULL}  /* Sentinel */
+    {NULL} /* Sentinel */
 };
 
-
 /*
  * The definition of port_capsule type object, which is
  * used to describe how port_capsule behaves.
  */
 PyTypeObject py_port_capsule_t = {
-    PyVarObject_HEAD_INIT(NULL, 0)
-    .tp_name = "LinguaFranca.port_capsule",
+    PyVarObject_HEAD_INIT(NULL, 0).tp_name = "LinguaFranca.port_capsule",
     .tp_doc = "port_capsule objects",
     .tp_basicsize = sizeof(generic_port_capsule_struct),
     .tp_itemsize = 0,
@@ -411,8 +391,8 @@ PyTypeObject py_port_capsule_t = {
     .tp_iter = py_port_iter,
     .tp_iternext = py_port_iter_next,
     .tp_new = py_port_capsule_new,
-    .tp_init = (initproc) py_port_capsule_init,
-    .tp_dealloc = (destructor) py_port_capsule_dealloc,
+    .tp_init = (initproc)py_port_capsule_init,
+    .tp_dealloc = (destructor)py_port_capsule_dealloc,
     .tp_members = py_port_capsule_members,
     .tp_methods = py_port_capsule_methods,
 };
diff --git a/python/lib/python_tag.c b/python/lib/python_tag.c
index 93a16eaa9..991f94f64 100644
--- a/python/lib/python_tag.c
+++ b/python/lib/python_tag.c
@@ -38,13 +38,13 @@ PyTypeObject PyTagType;
 /**
  * Return the current tag object.
  */
-PyObject* py_lf_tag(PyObject *self, PyObject *args) {
-    py_tag_t *t = (py_tag_t *) PyType_GenericNew(&PyTagType, NULL, NULL);
-    if (t == NULL) {
-        return NULL;
-    }
-    t->tag = lf_tag(top_level_environment);
-    return (PyObject *) t;
+PyObject* py_lf_tag(PyObject* self, PyObject* args) {
+  py_tag_t* t = (py_tag_t*)PyType_GenericNew(&PyTagType, NULL, NULL);
+  if (t == NULL) {
+    return NULL;
+  }
+  t->tag = lf_tag(top_level_environment);
+  return (PyObject*)t;
 }
 
 /**
@@ -57,23 +57,21 @@ PyObject* py_lf_tag(PyObject *self, PyObject *args) {
  * @param tag2
  * @return -1, 0, or 1 depending on the relation.
  */
-PyObject* py_tag_compare(PyObject *self, PyObject *args) {
-    PyObject *tag1;
-    PyObject *tag2;
-    if (!PyArg_UnpackTuple(args, "args", 2, 2, &tag1, &tag2)) {
-        return NULL;
-    }
-    if (!PyObject_IsInstance(tag1, (PyObject *) &PyTagType)
-     || !PyObject_IsInstance(tag2, (PyObject *) &PyTagType)) {
-        PyErr_SetString(PyExc_TypeError, "Arguments must be Tag type.");
-        return NULL;
-    }
-    tag_t tag1_v = ((py_tag_t *) tag1)->tag;
-    tag_t tag2_v = ((py_tag_t *) tag2)->tag;
-    return PyLong_FromLong(lf_tag_compare(tag1_v, tag2_v));
+PyObject* py_tag_compare(PyObject* self, PyObject* args) {
+  PyObject* tag1;
+  PyObject* tag2;
+  if (!PyArg_UnpackTuple(args, "args", 2, 2, &tag1, &tag2)) {
+    return NULL;
+  }
+  if (!PyObject_IsInstance(tag1, (PyObject*)&PyTagType) || !PyObject_IsInstance(tag2, (PyObject*)&PyTagType)) {
+    PyErr_SetString(PyExc_TypeError, "Arguments must be Tag type.");
+    return NULL;
+  }
+  tag_t tag1_v = ((py_tag_t*)tag1)->tag;
+  tag_t tag2_v = ((py_tag_t*)tag2)->tag;
+  return PyLong_FromLong(lf_tag_compare(tag1_v, tag2_v));
 }
 
-
 /**
  * Initialize the Tag object with the given values for "time" and "microstep",
  * both of which are required.
@@ -82,12 +80,12 @@ PyObject* py_tag_compare(PyObject *self, PyObject *args) {
  *      - time: A logical time.
  *      - microstep: A microstep within the logical time "time".
  */
-static int Tag_init(py_tag_t *self, PyObject *args, PyObject *kwds) {
-    static char *kwlist[] = {"time", "microstep", NULL};
-    if (!PyArg_ParseTupleAndKeywords(args, kwds, "Lk", kwlist, &(self->tag.time), &(self->tag.microstep))) {
-        return -1;
-    }
-    return 0;
+static int Tag_init(py_tag_t* self, PyObject* args, PyObject* kwds) {
+  static char* kwlist[] = {"time", "microstep", NULL};
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "Lk", kwlist, &(self->tag.time), &(self->tag.microstep))) {
+    return -1;
+  }
+  return 0;
 }
 
 /**
@@ -97,49 +95,47 @@ static int Tag_init(py_tag_t *self, PyObject *args, PyObject *kwds) {
  * @param other A py_tag_t object on the right side of the operator.
  * @param op the comparison operator
  */
-static PyObject *Tag_richcompare(py_tag_t *self, PyObject *other, int op) {
-    if (!PyObject_IsInstance(other, (PyObject *) &PyTagType)) {
-        PyErr_SetString(PyExc_TypeError, "Cannot compare a Tag with a non-Tag type.");
-        return NULL;
-    }
-
-    tag_t other_tag = ((py_tag_t *) other)->tag;
-    int c = -1;
-    if (op == Py_LT) {
-        c = (lf_tag_compare(self->tag, other_tag) < 0);
-    } else if (op == Py_LE) {
-        c = (lf_tag_compare(self->tag, other_tag) <= 0);
-    } else if (op == Py_EQ) {
-        c = (lf_tag_compare(self->tag, other_tag) == 0);
-    } else if (op == Py_NE) {
-        c = (lf_tag_compare(self->tag, other_tag) != 0);
-    } else if (op == Py_GT) {
-        c = (lf_tag_compare(self->tag, other_tag) > 0);
-    } else if (op == Py_GE) {
-        c = (lf_tag_compare(self->tag, other_tag) >= 0);
-    }
-    if (c < 0) {
-        PyErr_SetString(PyExc_RuntimeError, "Invalid comparator (This statement should never be reached). ");
-        return NULL;
-    } else if (c) {
-        Py_RETURN_TRUE;
-    } else {
-        Py_RETURN_FALSE;
-    }
+static PyObject* Tag_richcompare(py_tag_t* self, PyObject* other, int op) {
+  if (!PyObject_IsInstance(other, (PyObject*)&PyTagType)) {
+    PyErr_SetString(PyExc_TypeError, "Cannot compare a Tag with a non-Tag type.");
+    return NULL;
+  }
+
+  tag_t other_tag = ((py_tag_t*)other)->tag;
+  int c = -1;
+  if (op == Py_LT) {
+    c = (lf_tag_compare(self->tag, other_tag) < 0);
+  } else if (op == Py_LE) {
+    c = (lf_tag_compare(self->tag, other_tag) <= 0);
+  } else if (op == Py_EQ) {
+    c = (lf_tag_compare(self->tag, other_tag) == 0);
+  } else if (op == Py_NE) {
+    c = (lf_tag_compare(self->tag, other_tag) != 0);
+  } else if (op == Py_GT) {
+    c = (lf_tag_compare(self->tag, other_tag) > 0);
+  } else if (op == Py_GE) {
+    c = (lf_tag_compare(self->tag, other_tag) >= 0);
+  }
+  if (c < 0) {
+    PyErr_SetString(PyExc_RuntimeError, "Invalid comparator (This statement should never be reached). ");
+    return NULL;
+  } else if (c) {
+    Py_RETURN_TRUE;
+  } else {
+    Py_RETURN_FALSE;
+  }
 }
 
 /**
  * Tag getter for the "time" attribute
  **/
-static PyObject* Tag_get_time(py_tag_t *self, void *closure) {
-    return PyLong_FromLongLong(self->tag.time);
-}
+static PyObject* Tag_get_time(py_tag_t* self, void* closure) { return PyLong_FromLongLong(self->tag.time); }
 
 /**
  * Tag getter for the "microstep" attribute
  **/
-static PyObject* Tag_get_microstep(py_tag_t *self, void *closure) {
-    return PyLong_FromUnsignedLong(self->tag.microstep);
+static PyObject* Tag_get_microstep(py_tag_t* self, void* closure) {
+  return PyLong_FromUnsignedLong(self->tag.microstep);
 }
 
 /**
@@ -152,45 +148,38 @@ static PyObject* Tag_get_microstep(py_tag_t *self, void *closure) {
  * >>> t.time = 1  # illegal since setters are omitted.
  **/
 static PyGetSetDef Tag_getsetters[] = {
-    {"time", (getter) Tag_get_time},
-    {"microstep", (getter) Tag_get_microstep},
-    {NULL}  /* Sentinel */
+    {"time", (getter)Tag_get_time}, {"microstep", (getter)Tag_get_microstep}, {NULL} /* Sentinel */
 };
 /**
  * String representation for Tag object
  **/
-PyObject *Tag_str(PyObject *self) {
-    // Get PyLong representation of the "time" attribute. 
-    PyObject *time = Tag_get_time((py_tag_t*)self, NULL);
-    // Get PyLong representation of the "microstep" attribute. 
-    PyObject *microstep = Tag_get_microstep((py_tag_t*)self, NULL);
-
-    // Create the tag's string representation
-    PyObject *str = PyUnicode_FromFormat(
-        "Tag(time=%U, microstep=%U)",
-        PyObject_Str(time),
-        PyObject_Str(microstep) 
-    ); 
-    
-    Py_DECREF(time);
-    Py_DECREF(microstep);
-
-    return str;
+PyObject* Tag_str(PyObject* self) {
+  // Get PyLong representation of the "time" attribute.
+  PyObject* time = Tag_get_time((py_tag_t*)self, NULL);
+  // Get PyLong representation of the "microstep" attribute.
+  PyObject* microstep = Tag_get_microstep((py_tag_t*)self, NULL);
+
+  // Create the tag's string representation
+  PyObject* str = PyUnicode_FromFormat("Tag(time=%U, microstep=%U)", PyObject_Str(time), PyObject_Str(microstep));
+
+  Py_DECREF(time);
+  Py_DECREF(microstep);
+
+  return str;
 }
 
 /**
  * Definition of the PyTagType Object.
  **/
 PyTypeObject PyTagType = {
-    PyVarObject_HEAD_INIT(NULL, 0)
-    .tp_name = "LinguaFranca.Tag",
+    PyVarObject_HEAD_INIT(NULL, 0).tp_name = "LinguaFranca.Tag",
     .tp_doc = "Tag object",
     .tp_basicsize = sizeof(py_tag_t),
     .tp_itemsize = 0,
     .tp_flags = Py_TPFLAGS_DEFAULT,
     .tp_new = PyType_GenericNew,
-    .tp_init = (initproc) Tag_init,
-    .tp_richcompare = (richcmpfunc) Tag_richcompare,
+    .tp_init = (initproc)Tag_init,
+    .tp_richcompare = (richcmpfunc)Tag_richcompare,
     .tp_getset = Tag_getsetters,
     .tp_str = Tag_str,
 };
@@ -202,10 +191,10 @@ PyTypeObject PyTagType = {
  * @return PyObject* The tag in Python.
  */
 py_tag_t* convert_C_tag_to_py(tag_t c_tag) {
-    py_tag_t* py_tag = PyObject_GC_New(py_tag_t, &PyTagType);
-    if (py_tag == NULL) {
-        lf_print_error_and_exit("Failed to convert tag from C to Python.");
-    }
-    py_tag->tag = c_tag;
-    return py_tag;
+  py_tag_t* py_tag = PyObject_GC_New(py_tag_t, &PyTagType);
+  if (py_tag == NULL) {
+    lf_print_error_and_exit("Failed to convert tag from C to Python.");
+  }
+  py_tag->tag = c_tag;
+  return py_tag;
 }
diff --git a/python/lib/python_time.c b/python/lib/python_time.c
index 6acd1ef85..34e4724a2 100644
--- a/python/lib/python_time.c
+++ b/python/lib/python_time.c
@@ -40,55 +40,52 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 /**
  * Return the logical time in nanoseconds.
  */
-PyObject* py_lf_time_logical(PyObject *self, PyObject *args) {
-    return PyLong_FromLongLong(lf_time_logical(top_level_environment));
+PyObject* py_lf_time_logical(PyObject* self, PyObject* args) {
+  return PyLong_FromLongLong(lf_time_logical(top_level_environment));
 }
 
 /**
  * Return the elapsed logical time in nanoseconds.
  */
-PyObject* py_lf_time_logical_elapsed(PyObject *self, PyObject *args) {
-    return PyLong_FromLongLong(lf_time_logical_elapsed(top_level_environment));
+PyObject* py_lf_time_logical_elapsed(PyObject* self, PyObject* args) {
+  return PyLong_FromLongLong(lf_time_logical_elapsed(top_level_environment));
 }
 
 /**
  * Return the physical time in nanoseconds.
  */
-PyObject* py_lf_time_physical(PyObject *self, PyObject *args) {
-    return PyLong_FromLongLong(lf_time_physical());
-}
+PyObject* py_lf_time_physical(PyObject* self, PyObject* args) { return PyLong_FromLongLong(lf_time_physical()); }
 
 /**
  * Return the elapsed physical time in nanoseconds.
  */
-PyObject* py_lf_time_physical_elapsed(PyObject *self, PyObject *args) {
-    return PyLong_FromLongLong(lf_time_physical_elapsed());
+PyObject* py_lf_time_physical_elapsed(PyObject* self, PyObject* args) {
+  return PyLong_FromLongLong(lf_time_physical_elapsed());
 }
 
 /**
  * Return the start time in nanoseconds.
  */
-PyObject* py_lf_time_start(PyObject *self, PyObject *args) {
-    return PyLong_FromLongLong(lf_time_start());
-}
+PyObject* py_lf_time_start(PyObject* self, PyObject* args) { return PyLong_FromLongLong(lf_time_start()); }
 
 PyTypeObject PyTimeType;
 
 PyMethodDef PyTimeTypeMethods[] = {
-    {"logical", (PyCFunction) py_lf_time_logical, METH_NOARGS|METH_STATIC, "Get the current logical time."},
-    {"logical_elapsed", (PyCFunction) py_lf_time_logical_elapsed, METH_NOARGS|METH_STATIC, "Get the current elapsed logical time"},
-    {"physical", (PyCFunction) py_lf_time_physical, METH_NOARGS|METH_STATIC, "Get the current physical time"},
-    {"physical_elapsed", (PyCFunction) py_lf_time_physical_elapsed, METH_NOARGS|METH_STATIC, "Get the current elapsed physical time"},
-    {"start", (PyCFunction) py_lf_time_start, METH_NOARGS|METH_STATIC, "Get the start time"},
-    {NULL}  /* Sentinel */
+    {"logical", (PyCFunction)py_lf_time_logical, METH_NOARGS | METH_STATIC, "Get the current logical time."},
+    {"logical_elapsed", (PyCFunction)py_lf_time_logical_elapsed, METH_NOARGS | METH_STATIC,
+     "Get the current elapsed logical time"},
+    {"physical", (PyCFunction)py_lf_time_physical, METH_NOARGS | METH_STATIC, "Get the current physical time"},
+    {"physical_elapsed", (PyCFunction)py_lf_time_physical_elapsed, METH_NOARGS | METH_STATIC,
+     "Get the current elapsed physical time"},
+    {"start", (PyCFunction)py_lf_time_start, METH_NOARGS | METH_STATIC, "Get the start time"},
+    {NULL} /* Sentinel */
 };
 
 /**
  * Definition of the PyTimeType Object.
  **/
 PyTypeObject PyTimeType = {
-    PyVarObject_HEAD_INIT(NULL, 0)
-    .tp_name = "LinguaFranca.TimeType",
+    PyVarObject_HEAD_INIT(NULL, 0).tp_name = "LinguaFranca.TimeType",
     .tp_doc = "Time object",
     .tp_basicsize = 0,
     .tp_itemsize = 0,
diff --git a/python/lib/pythontarget.c b/python/lib/pythontarget.c
index 026f156a8..a485efaf5 100644
--- a/python/lib/pythontarget.c
+++ b/python/lib/pythontarget.c
@@ -33,7 +33,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "pythontarget.h"
 #include "modal_models/definitions.h"
-#include "platform.h"  // defines MAX_PATH on Windows
+#include "platform.h" // defines MAX_PATH on Windows
 #include "python_action.h"
 #include "python_port.h"
 #include "python_tag.h"
@@ -48,19 +48,17 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 ////////////// Global variables ///////////////
 // The global Python object that holds the .py module that the
 // C runtime interacts with
-PyObject *globalPythonModule = NULL;
+PyObject* globalPythonModule = NULL;
 
 // The dictionary of the Python module that is used to load
 // class objects from
-PyObject *globalPythonModuleDict = NULL;
-
+PyObject* globalPythonModuleDict = NULL;
 
 // Import pickle to enable native serialization
 PyObject* global_pickler = NULL;
 
 environment_t* top_level_environment = NULL;
 
-
 //////////// schedule Function(s) /////////////
 
 /**
@@ -76,78 +74,76 @@ environment_t* top_level_environment = NULL;
  *      - action: Pointer to an action on the self struct.
  *      - offset: The time offset over and above that in the action.
  **/
-PyObject* py_schedule(PyObject *self, PyObject *args) {
-    generic_action_capsule_struct* act = (generic_action_capsule_struct*)self;
-    long long offset;
-    PyObject* value = NULL;
-
-    if (!PyArg_ParseTuple(args, "L|O", &offset, &value))
-        return NULL;
-
-    lf_action_base_t* action = (lf_action_base_t*)PyCapsule_GetPointer(act->action,"action");
-    if (action == NULL) {
-        lf_print_error("Null pointer received.");
-        exit(1);
-    }
-
-    trigger_t* trigger = action->trigger;
-    lf_token_t* t = NULL;
-
-    // Check to see if value exists and token is not NULL
-    if (value && (trigger->tmplt.token != NULL)) {
-        // DEBUG: adjust the element_size (might not be necessary)
-        trigger->tmplt.token->type->element_size = sizeof(PyObject*);
-        trigger->tmplt.type.element_size = sizeof(PyObject*);
-        t = _lf_initialize_token_with_value(&trigger->tmplt, value, 1);
-
-        // Also give the new value back to the Python action itself
-        Py_INCREF(value);
-        act->value = value;
-    }
-
-
-    // Pass the token along
-    lf_schedule_token(action, offset, t);
-
-    // FIXME: handle is not passed to the Python side
-
-    Py_INCREF(Py_None);
-    return Py_None;
+PyObject* py_schedule(PyObject* self, PyObject* args) {
+  generic_action_capsule_struct* act = (generic_action_capsule_struct*)self;
+  long long offset;
+  PyObject* value = NULL;
+
+  if (!PyArg_ParseTuple(args, "L|O", &offset, &value))
+    return NULL;
+
+  lf_action_base_t* action = (lf_action_base_t*)PyCapsule_GetPointer(act->action, "action");
+  if (action == NULL) {
+    lf_print_error("Null pointer received.");
+    exit(1);
+  }
+
+  trigger_t* trigger = action->trigger;
+  lf_token_t* t = NULL;
+
+  // Check to see if value exists and token is not NULL
+  if (value && (trigger->tmplt.token != NULL)) {
+    // DEBUG: adjust the element_size (might not be necessary)
+    trigger->tmplt.token->type->element_size = sizeof(PyObject*);
+    trigger->tmplt.type.element_size = sizeof(PyObject*);
+    t = _lf_initialize_token_with_value(&trigger->tmplt, value, 1);
+
+    // Also give the new value back to the Python action itself
+    Py_INCREF(value);
+    act->value = value;
+  }
+
+  // Pass the token along
+  lf_schedule_token(action, offset, t);
+
+  // FIXME: handle is not passed to the Python side
+
+  Py_INCREF(Py_None);
+  return Py_None;
 }
 
-
 /**
  * Schedule an action to occur with the specified value and time offset
  * with a copy of the specified value.
  * See reactor.h for documentation.
  */
-PyObject* py_schedule_copy(PyObject *self, PyObject *args) {
-    generic_action_capsule_struct* act;
-    long long offset;
-    PyObject* value;
-    int length;
+PyObject* py_schedule_copy(PyObject* self, PyObject* args) {
+  generic_action_capsule_struct* act;
+  long long offset;
+  PyObject* value;
+  int length;
 
-    if (!PyArg_ParseTuple(args, "OLOi" ,&act, &offset, &value, &length))
-        return NULL;
+  if (!PyArg_ParseTuple(args, "OLOi", &act, &offset, &value, &length))
+    return NULL;
 
-    lf_action_base_t* action = (lf_action_base_t*)PyCapsule_GetPointer(act->action,"action");
-    if (action == NULL) {
-        lf_print_error("Null pointer received.");
-        exit(1);
-    }
+  lf_action_base_t* action = (lf_action_base_t*)PyCapsule_GetPointer(act->action, "action");
+  if (action == NULL) {
+    lf_print_error("Null pointer received.");
+    exit(1);
+  }
 
-    lf_schedule_copy(action, offset, value, length);
+  lf_schedule_copy(action, offset, value, length);
 
-    // FIXME: handle is not passed to the Python side
+  // FIXME: handle is not passed to the Python side
 
-    Py_INCREF(Py_None);
-    return Py_None;
+  Py_INCREF(Py_None);
+  return Py_None;
 }
 
 /**
  * Prototype for the main function.
  */
-int lf_reactor_c_main(int argc, const char *argv[]);
+int lf_reactor_c_main(int argc, const char* argv[]);
 
 /**
  * Prototype for lf_request_stop().
@@ -159,11 +155,11 @@ void lf_request_stop(void);
 /**
  * Stop execution at the conclusion of the current logical time.
  */
-PyObject* py_request_stop(PyObject *self, PyObject *args) {
-    lf_request_stop();
+PyObject* py_request_stop(PyObject* self, PyObject* args) {
+  lf_request_stop();
 
-    Py_INCREF(Py_None);
-    return Py_None;
+  Py_INCREF(Py_None);
+  return Py_None;
 }
 
 /**
@@ -181,59 +177,59 @@ PyObject* py_request_stop(PyObject *self, PyObject *args) {
  *  command-line argument.
  */
 const char** _lf_py_parse_argv_impl(PyObject* py_argv, size_t* argc) {
-    if (argc == NULL) {
-        lf_print_error_and_exit("_lf_py_parse_argv_impl called with an unallocated argc argument.");
+  if (argc == NULL) {
+    lf_print_error_and_exit("_lf_py_parse_argv_impl called with an unallocated argc argument.");
+  }
+
+  // List of arguments
+  const char** argv;
+
+  // Read the optional argvs
+  PyObject* py_argv_parsed = NULL;
+
+  if (!PyArg_ParseTuple(py_argv, "|O", &py_argv_parsed)) {
+    PyErr_SetString(PyExc_TypeError, "Could not get argvs.");
+    return NULL;
+  }
+
+  if (py_argv_parsed == NULL) {
+    // Build a generic argv with just one argument, which
+    // is the module name.
+    *argc = 1;
+    argv = malloc(2 * sizeof(char*));
+    argv[0] = TOSTRING(MODULE_NAME);
+    argv[1] = NULL;
+    return argv;
+  }
+
+  Py_ssize_t argv_size = PyList_Size(py_argv_parsed);
+  argv = malloc(argv_size * sizeof(char*));
+  for (Py_ssize_t i = 0; i < argv_size; i++) {
+    PyObject* list_item = PyList_GetItem(py_argv_parsed, i);
+    if (list_item == NULL) {
+      if (PyErr_Occurred()) {
+        PyErr_Print();
+      }
+      lf_print_error_and_exit("Could not get argv list item %zd.", i);
     }
 
-    // List of arguments
-    const char** argv;
-
-    // Read the optional argvs
-    PyObject* py_argv_parsed = NULL;
-
-    if (!PyArg_ParseTuple(py_argv, "|O", &py_argv_parsed)) {
-        PyErr_SetString(PyExc_TypeError, "Could not get argvs.");
-        return NULL;
+    PyObject* encoded_string = PyUnicode_AsEncodedString(list_item, "UTF-8", "strict");
+    if (encoded_string == NULL) {
+      if (PyErr_Occurred()) {
+        PyErr_Print();
+      }
+      lf_print_error_and_exit("Failed to encode argv list item %zd.", i);
     }
 
-    if (py_argv_parsed == NULL) {
-        // Build a generic argv with just one argument, which
-        // is the module name.
-        *argc = 1;
-        argv = malloc(2 * sizeof(char*));
-        argv[0] = TOSTRING(MODULE_NAME);
-        argv[1] = NULL;
-        return argv;
-    }
+    argv[i] = PyBytes_AsString(encoded_string);
 
-    Py_ssize_t argv_size = PyList_Size(py_argv_parsed);
-    argv = malloc(argv_size * sizeof(char *));
-    for (Py_ssize_t i = 0; i < argv_size; i++) {
-        PyObject* list_item = PyList_GetItem(py_argv_parsed, i);
-        if (list_item == NULL) {
-            if (PyErr_Occurred()) {
-                PyErr_Print();
-            }
-            lf_print_error_and_exit("Could not get argv list item %zd.", i);
-        }
-
-        PyObject *encoded_string = PyUnicode_AsEncodedString(list_item, "UTF-8", "strict");
-        if (encoded_string == NULL) {
-            if (PyErr_Occurred()) {
-                PyErr_Print();
-            }
-            lf_print_error_and_exit("Failed to encode argv list item %zd.", i);
-        }
-
-        argv[i] = PyBytes_AsString(encoded_string);
-
-        if (PyErr_Occurred()) {
-            PyErr_Print();
-            lf_print_error_and_exit("Could not convert argv list item %zd to char*.", i);
-        }
+    if (PyErr_Occurred()) {
+      PyErr_Print();
+      lf_print_error_and_exit("Could not convert argv list item %zd to char*.", i);
     }
-    *argc = argv_size;
-    return argv;
+  }
+  *argc = argv_size;
+  return argv;
 }
 
 static bool py_initialized = false;
@@ -242,14 +238,14 @@ static bool py_initialized = false;
  * @brief Initialize the Python interpreter if it hasn't already been.
  */
 void py_initialize_interpreter(void) {
-    if (!py_initialized) {
-        py_initialized = true;
+  if (!py_initialized) {
+    py_initialized = true;
 
-        // Initialize the Python interpreter
-        Py_Initialize();
+    // Initialize the Python interpreter
+    Py_Initialize();
 
-        LF_PRINT_DEBUG("Initialized the Python interpreter.");
-    }
+    LF_PRINT_DEBUG("Initialized the Python interpreter.");
+  }
 }
 
 //////////////////////////////////////////////////////////////
@@ -262,34 +258,33 @@ void py_initialize_interpreter(void) {
  */
 PyObject* py_main(PyObject* self, PyObject* py_args) {
 
-    LF_PRINT_DEBUG("Initializing main.");
+  LF_PRINT_DEBUG("Initializing main.");
 
-    size_t argc;
-    const char** argv = _lf_py_parse_argv_impl(py_args, &argc);
+  size_t argc;
+  const char** argv = _lf_py_parse_argv_impl(py_args, &argc);
 
-    py_initialize_interpreter();
+  py_initialize_interpreter();
 
-    // Load the pickle module
+  // Load the pickle module
+  if (global_pickler == NULL) {
+    global_pickler = PyImport_ImportModule("pickle");
     if (global_pickler == NULL) {
-        global_pickler = PyImport_ImportModule("pickle");
-        if (global_pickler == NULL) {
-            if (PyErr_Occurred()) {
-                PyErr_Print();
-            }
-            lf_print_error_and_exit("Failed to load the module 'pickle'.");
-        }
+      if (PyErr_Occurred()) {
+        PyErr_Print();
+      }
+      lf_print_error_and_exit("Failed to load the module 'pickle'.");
     }
+  }
 
-    // Store a reference to the top-level environment
-    int num_environments = _lf_get_environments(&top_level_environment);
-    LF_ASSERT(num_environments == 1, "Python target only supports programs with a single environment/enclave");
+  // Store a reference to the top-level environment
+  int num_environments = _lf_get_environments(&top_level_environment);
+  LF_ASSERT(num_environments == 1, "Python target only supports programs with a single environment/enclave");
 
-    Py_BEGIN_ALLOW_THREADS
-    lf_reactor_c_main(argc, argv);
-    Py_END_ALLOW_THREADS
+  Py_BEGIN_ALLOW_THREADS lf_reactor_c_main(argc, argv);
+  Py_END_ALLOW_THREADS
 
-    Py_INCREF(Py_None);
-    return Py_None;
+      Py_INCREF(Py_None);
+  return Py_None;
 }
 
 ///// Python Module Built-ins
@@ -304,27 +299,19 @@ PyObject* py_main(PyObject* self, PyObject* py_args) {
  * @see schedule_copy
  * @see request_stop
  */
-static PyMethodDef GEN_NAME(MODULE_NAME,_methods)[] = {
-  {"start", py_main, METH_VARARGS, NULL},
-  {"schedule_copy", py_schedule_copy, METH_VARARGS, NULL},
-  {"tag", py_lf_tag, METH_NOARGS, NULL},
-  {"tag_compare", py_tag_compare, METH_VARARGS, NULL},
-  {"request_stop", py_request_stop, METH_NOARGS, "Request stop"},
-  {NULL, NULL, 0, NULL}
-};
-
+static PyMethodDef GEN_NAME(MODULE_NAME, _methods)[] = {{"start", py_main, METH_VARARGS, NULL},
+                                                        {"schedule_copy", py_schedule_copy, METH_VARARGS, NULL},
+                                                        {"tag", py_lf_tag, METH_NOARGS, NULL},
+                                                        {"tag_compare", py_tag_compare, METH_VARARGS, NULL},
+                                                        {"request_stop", py_request_stop, METH_NOARGS, "Request stop"},
+                                                        {NULL, NULL, 0, NULL}};
 
 /**
  * Define the Lingua Franca module.
  * The MODULE_NAME is given by the generated code.
  */
-static PyModuleDef MODULE_NAME = {
-    PyModuleDef_HEAD_INIT,
-    TOSTRING(MODULE_NAME),
-    "LinguaFranca Python Module",
-    -1,
-    GEN_NAME(MODULE_NAME,_methods)
-};
+static PyModuleDef MODULE_NAME = {PyModuleDef_HEAD_INIT, TOSTRING(MODULE_NAME), "LinguaFranca Python Module", -1,
+                                  GEN_NAME(MODULE_NAME, _methods)};
 
 //////////////////////////////////////////////////////////////
 /////////////  Module Initialization
@@ -338,75 +325,74 @@ static PyModuleDef MODULE_NAME = {
  * For example for a module named LinguaFrancaFoo, this function
  * will be called PyInit_LinguaFrancaFoo
  */
-PyMODINIT_FUNC
-GEN_NAME(PyInit_,MODULE_NAME)(void) {
-
-    PyObject *m;
-
-    // As of Python 11, this function may be called before py_main, so we need to
-    // initialize the interpreter.
-    py_initialize_interpreter();
-
-    // Initialize the port_capsule type
-    if (PyType_Ready(&py_port_capsule_t) < 0) {
-        return NULL;
-    }
-
-    // Initialize the action_capsule type
-    if (PyType_Ready(&py_action_capsule_t) < 0) {
-        return NULL;
-    }
-
-    // Initialize the Tag type
-    if (PyType_Ready(&PyTagType) < 0) {
-        return NULL;
-    }
-
-    // Initialize the Time type
-    if (PyType_Ready(&PyTimeType) < 0) {
-        return NULL;
-    }
-
-    m = PyModule_Create(&MODULE_NAME);
-
-    if (m == NULL) {
-        return NULL;
-    }
-
-    initialize_mode_capsule_t(m);
-
-    // Add the port_capsule type to the module's dictionary
-    Py_INCREF(&py_port_capsule_t);
-    if (PyModule_AddObject(m, "port_capsule", (PyObject *) &py_port_capsule_t) < 0) {
-        Py_DECREF(&py_port_capsule_t);
-        Py_DECREF(m);
-        return NULL;
-    }
-
-    // Add the action_capsule type to the module's dictionary
-    Py_INCREF(&py_action_capsule_t);
-    if (PyModule_AddObject(m, "action_capsule_t", (PyObject *) &py_action_capsule_t) < 0) {
-        Py_DECREF(&py_action_capsule_t);
-        Py_DECREF(m);
-        return NULL;
-    }
-
-    // Add the Tag type to the module's dictionary
-    Py_INCREF(&PyTagType);
-    if (PyModule_AddObject(m, "Tag", (PyObject *) &PyTagType) < 0) {
-        Py_DECREF(&PyTagType);
-        Py_DECREF(m);
-        return NULL;
-    }
-
-    // Add the Time type to the module's dictionary
-    Py_INCREF(&PyTimeType);
-    if (PyModule_AddObject(m, "time", (PyObject *) &PyTimeType) < 0) {
-        Py_DECREF(&PyTimeType);
-        Py_DECREF(m);
-        return NULL;
-    }
-    return m;
+PyMODINIT_FUNC GEN_NAME(PyInit_, MODULE_NAME)(void) {
+
+  PyObject* m;
+
+  // As of Python 11, this function may be called before py_main, so we need to
+  // initialize the interpreter.
+  py_initialize_interpreter();
+
+  // Initialize the port_capsule type
+  if (PyType_Ready(&py_port_capsule_t) < 0) {
+    return NULL;
+  }
+
+  // Initialize the action_capsule type
+  if (PyType_Ready(&py_action_capsule_t) < 0) {
+    return NULL;
+  }
+
+  // Initialize the Tag type
+  if (PyType_Ready(&PyTagType) < 0) {
+    return NULL;
+  }
+
+  // Initialize the Time type
+  if (PyType_Ready(&PyTimeType) < 0) {
+    return NULL;
+  }
+
+  m = PyModule_Create(&MODULE_NAME);
+
+  if (m == NULL) {
+    return NULL;
+  }
+
+  initialize_mode_capsule_t(m);
+
+  // Add the port_capsule type to the module's dictionary
+  Py_INCREF(&py_port_capsule_t);
+  if (PyModule_AddObject(m, "port_capsule", (PyObject*)&py_port_capsule_t) < 0) {
+    Py_DECREF(&py_port_capsule_t);
+    Py_DECREF(m);
+    return NULL;
+  }
+
+  // Add the action_capsule type to the module's dictionary
+  Py_INCREF(&py_action_capsule_t);
+  if (PyModule_AddObject(m, "action_capsule_t", (PyObject*)&py_action_capsule_t) < 0) {
+    Py_DECREF(&py_action_capsule_t);
+    Py_DECREF(m);
+    return NULL;
+  }
+
+  // Add the Tag type to the module's dictionary
+  Py_INCREF(&PyTagType);
+  if (PyModule_AddObject(m, "Tag", (PyObject*)&PyTagType) < 0) {
+    Py_DECREF(&PyTagType);
+    Py_DECREF(m);
+    return NULL;
+  }
+
+  // Add the Time type to the module's dictionary
+  Py_INCREF(&PyTimeType);
+  if (PyModule_AddObject(m, "time", (PyObject*)&PyTimeType) < 0) {
+    Py_DECREF(&PyTimeType);
+    Py_DECREF(m);
+    return NULL;
+  }
+  return m;
 }
 
 //////////////////////////////////////////////////////////////
@@ -417,9 +403,7 @@ GEN_NAME(PyInit_,MODULE_NAME)(void) {
 /**
  * A function that destroys action capsules
  **/
-void destroy_action_capsule(PyObject* capsule) {
-    free(PyCapsule_GetPointer(capsule, "action"));
-}
+void destroy_action_capsule(PyObject* capsule) { free(PyCapsule_GetPointer(capsule, "action")); }
 
 /**
  * A function that is called any time a Python reaction is called with
@@ -437,48 +421,46 @@ void destroy_action_capsule(PyObject* capsule) {
  * Individual ports can then later be accessed in Python code as port[idx].
  */
 PyObject* convert_C_port_to_py(void* port, int width) {
-    // Create the port struct in Python
-    PyObject* cap =
-        (PyObject*)PyObject_GC_New(generic_port_capsule_struct, &py_port_capsule_t);
-    if (cap == NULL) {
-        lf_print_error_and_exit("Failed to convert port.");
+  // Create the port struct in Python
+  PyObject* cap = (PyObject*)PyObject_GC_New(generic_port_capsule_struct, &py_port_capsule_t);
+  if (cap == NULL) {
+    lf_print_error_and_exit("Failed to convert port.");
+  }
+
+  // Create the capsule to hold the void* port
+  PyObject* capsule = PyCapsule_New(port, "port", NULL);
+  if (capsule == NULL) {
+    lf_print_error_and_exit("Failed to convert port.");
+  }
+
+  // Fill in the Python port struct
+  ((generic_port_capsule_struct*)cap)->port = capsule;
+  ((generic_port_capsule_struct*)cap)->width = width;
+
+  if (width == -2) {
+    generic_port_instance_struct* cport = (generic_port_instance_struct*)port;
+    FEDERATED_ASSIGN_FIELDS(((generic_port_capsule_struct*)cap), cport);
+
+    ((generic_port_capsule_struct*)cap)->is_present = cport->is_present;
+
+    if (cport->value == NULL) {
+      // Value is absent
+      Py_INCREF(Py_None);
+      ((generic_port_capsule_struct*)cap)->value = Py_None;
+      return cap;
     }
 
-    // Create the capsule to hold the void* port
-    PyObject* capsule = PyCapsule_New(port, "port", NULL);
-    if (capsule == NULL) {
-        lf_print_error_and_exit("Failed to convert port.");
-    }
-
-    // Fill in the Python port struct
-    ((generic_port_capsule_struct*)cap)->port = capsule;
-    ((generic_port_capsule_struct*)cap)->width = width;
-
-    if (width == -2) {
-        generic_port_instance_struct* cport = (generic_port_instance_struct *) port;
-        FEDERATED_ASSIGN_FIELDS(((generic_port_capsule_struct*)cap), cport);
-
-        ((generic_port_capsule_struct*)cap)->is_present =
-            cport->is_present;
-
-        if (cport->value == NULL) {
-            // Value is absent
-            Py_INCREF(Py_None);
-            ((generic_port_capsule_struct*)cap)->value = Py_None;
-            return cap;
-        }
-
-        //Py_INCREF(cport->value);
-        ((generic_port_capsule_struct*)cap)->value = cport->value;
-    } else {
-        // Multiport. Value of the multiport itself cannot be accessed, so we set it to
-        // None.
-        Py_INCREF(Py_None);
-        ((generic_port_capsule_struct*)cap)->value = Py_None;
-        ((generic_port_capsule_struct*)cap)->is_present = false;
-    }
+    // Py_INCREF(cport->value);
+    ((generic_port_capsule_struct*)cap)->value = cport->value;
+  } else {
+    // Multiport. Value of the multiport itself cannot be accessed, so we set it to
+    // None.
+    Py_INCREF(Py_None);
+    ((generic_port_capsule_struct*)cap)->value = Py_None;
+    ((generic_port_capsule_struct*)cap)->is_present = false;
+  }
 
-    return cap;
+  return cap;
 }
 
 /**
@@ -495,52 +477,51 @@ PyObject* convert_C_port_to_py(void* port, int width) {
  * made by this function, the "value" and "is_present" are passed to the function
  * instead of expecting them to exist.
  *
- * The void* pointer to the C action instance is encapsulated in a PyCapsule instead of passing an exposed pointer through
- * Python. @see https://docs.python.org/3/c-api/capsule.html
- * This encapsulation is done by calling PyCapsule_New(action, "name_of_the_container_in_the_capsule", NULL),
- * where "name_of_the_container_in_the_capsule" is an agreed-upon container name inside the capsule. This
- * capsule can then be treated as a PyObject* and safely passed through Python code. On the other end
- * (which is in schedule functions), PyCapsule_GetPointer(received_action,"action") can be called to retrieve
- * the void* pointer into received_action.
+ * The void* pointer to the C action instance is encapsulated in a PyCapsule instead of passing an exposed pointer
+ *through Python. @see https://docs.python.org/3/c-api/capsule.html This encapsulation is done by calling
+ *PyCapsule_New(action, "name_of_the_container_in_the_capsule", NULL), where "name_of_the_container_in_the_capsule" is
+ *an agreed-upon container name inside the capsule. This capsule can then be treated as a PyObject* and safely passed
+ *through Python code. On the other end (which is in schedule functions), PyCapsule_GetPointer(received_action,"action")
+ *can be called to retrieve the void* pointer into received_action.
  **/
 PyObject* convert_C_action_to_py(void* action) {
-    // Convert to trigger_t
-    trigger_t* trigger = ((lf_action_base_t*)action)->trigger;
-
-    // Create the action struct in Python
-    PyObject* cap = (PyObject*)PyObject_GC_New(generic_action_capsule_struct, &py_action_capsule_t);
-    if (cap == NULL) {
-        lf_print_error_and_exit("Failed to convert action.");
-    }
-
-    // Create the capsule to hold the void* action
-    PyObject* capsule = PyCapsule_New(action, "action", NULL);
-    if (capsule == NULL) {
-        lf_print_error_and_exit("Failed to convert action.");
-    }
-
-    // Fill in the Python action struct
-    ((generic_action_capsule_struct*)cap)->action = capsule;
-    ((generic_action_capsule_struct*)cap)->is_present = trigger->status;
-    FEDERATED_ASSIGN_FIELDS(((generic_port_capsule_struct*)cap), ((generic_action_instance_struct*)action));
-
-    // If token is not initialized, that is all we need to set
-    if (trigger->tmplt.token == NULL) {
-        Py_INCREF(Py_None);
-        ((generic_action_capsule_struct*)cap)->value = Py_None;
-        return cap;
-    }
+  // Convert to trigger_t
+  trigger_t* trigger = ((lf_action_base_t*)action)->trigger;
+
+  // Create the action struct in Python
+  PyObject* cap = (PyObject*)PyObject_GC_New(generic_action_capsule_struct, &py_action_capsule_t);
+  if (cap == NULL) {
+    lf_print_error_and_exit("Failed to convert action.");
+  }
+
+  // Create the capsule to hold the void* action
+  PyObject* capsule = PyCapsule_New(action, "action", NULL);
+  if (capsule == NULL) {
+    lf_print_error_and_exit("Failed to convert action.");
+  }
+
+  // Fill in the Python action struct
+  ((generic_action_capsule_struct*)cap)->action = capsule;
+  ((generic_action_capsule_struct*)cap)->is_present = trigger->status;
+  FEDERATED_ASSIGN_FIELDS(((generic_port_capsule_struct*)cap), ((generic_action_instance_struct*)action));
+
+  // If token is not initialized, that is all we need to set
+  if (trigger->tmplt.token == NULL) {
+    Py_INCREF(Py_None);
+    ((generic_action_capsule_struct*)cap)->value = Py_None;
+    return cap;
+  }
 
-    // Default value is None
-    if (trigger->tmplt.token->value == NULL) {
-        Py_INCREF(Py_None);
-        trigger->tmplt.token->value = Py_None;
-    }
+  // Default value is None
+  if (trigger->tmplt.token->value == NULL) {
+    Py_INCREF(Py_None);
+    trigger->tmplt.token->value = Py_None;
+  }
 
-    // Actions in Python always use token type
-    ((generic_action_capsule_struct*)cap)->value = trigger->tmplt.token->value;
+  // Actions in Python always use token type
+  ((generic_action_capsule_struct*)cap)->value = trigger->tmplt.token->value;
 
-    return cap;
+  return cap;
 }
 
 /**
@@ -562,135 +543,132 @@ PyObject* convert_C_action_to_py(void* action) {
  * @param pArgs the PyList of arguments to be sent to function func()
  * @return The function or NULL on error.
  */
-PyObject*
-get_python_function(string module, string class, int instance_id, string func) {
-    LF_PRINT_DEBUG("Starting the function start().");
-
-    // Necessary PyObject variables to load the react() function from test.py
-    PyObject* pFileName = NULL;
-    PyObject* pModule = NULL;
-    PyObject* pDict = NULL;
-    PyObject* pClasses = NULL;
-    PyObject* pClass = NULL;
-    PyObject* pFunc = NULL;
-
-    // According to
-    // https://docs.python.org/3/c-api/init.html#non-python-created-threads
-    // the following code does the following:
-    // - Register this thread with the interpreter
-    // - Acquire the GIL (Global Interpreter Lock)
-    // - Store (return) the thread pointer
-    // When done, we should always call PyGILState_Release(gstate);
-    PyGILState_STATE gstate;
-    gstate = PyGILState_Ensure();
-
-    // If the Python module is already loaded, skip this.
-    if (globalPythonModule == NULL) {
-        // Decode the MODULE name into a filesystem compatible string
-        pFileName = PyUnicode_DecodeFSDefault(module);
-
-        // Set the Python search path to be the current working directory
-        char cwd[PATH_MAX];
-        if ( getcwd(cwd, sizeof(cwd)) == NULL) {
-            lf_print_error_and_exit("Failed to get the current working directory.");
-        }
-
-        wchar_t wcwd[PATH_MAX];
-
-        mbstowcs(wcwd, cwd, PATH_MAX);
-
-        Py_SetPath(wcwd);
-
-        LF_PRINT_DEBUG("Loading module %s in %s.", module, cwd);
-
-        pModule = PyImport_Import(pFileName);
-
-        LF_PRINT_DEBUG("Loaded module %p.", pModule);
-
-        // Free the memory occupied by pFileName
-        Py_DECREF(pFileName);
-
-        // Check if the module was correctly loaded
-        if (pModule != NULL) {
-            // Get contents of module. pDict is a borrowed reference.
-            pDict = PyModule_GetDict(pModule);
-            if (pDict == NULL) {
-                PyErr_Print();
-                lf_print_error("Failed to load contents of module %s.", module);
-                /* Release the thread. No Python API allowed beyond this point. */
-                PyGILState_Release(gstate);
-                return NULL;
-            }
-
-            Py_INCREF(pModule);
-            globalPythonModule = pModule;
-            Py_INCREF(pDict);
-            globalPythonModuleDict = pDict;
-
-        }
+PyObject* get_python_function(string module, string class, int instance_id, string func) {
+  LF_PRINT_DEBUG("Starting the function start().");
+
+  // Necessary PyObject variables to load the react() function from test.py
+  PyObject* pFileName = NULL;
+  PyObject* pModule = NULL;
+  PyObject* pDict = NULL;
+  PyObject* pClasses = NULL;
+  PyObject* pClass = NULL;
+  PyObject* pFunc = NULL;
+
+  // According to
+  // https://docs.python.org/3/c-api/init.html#non-python-created-threads
+  // the following code does the following:
+  // - Register this thread with the interpreter
+  // - Acquire the GIL (Global Interpreter Lock)
+  // - Store (return) the thread pointer
+  // When done, we should always call PyGILState_Release(gstate);
+  PyGILState_STATE gstate;
+  gstate = PyGILState_Ensure();
+
+  // If the Python module is already loaded, skip this.
+  if (globalPythonModule == NULL) {
+    // Decode the MODULE name into a filesystem compatible string
+    pFileName = PyUnicode_DecodeFSDefault(module);
+
+    // Set the Python search path to be the current working directory
+    char cwd[PATH_MAX];
+    if (getcwd(cwd, sizeof(cwd)) == NULL) {
+      lf_print_error_and_exit("Failed to get the current working directory.");
     }
 
-    if (globalPythonModule != NULL && globalPythonModuleDict != NULL) {
-        Py_INCREF(globalPythonModule);
-        // Convert the class name to a PyObject
-        PyObject* list_name = PyUnicode_DecodeFSDefault(class);
-
-        // Get the class list
-        Py_INCREF(globalPythonModuleDict);
-        pClasses = PyDict_GetItem(globalPythonModuleDict, list_name);
-        if (pClasses == NULL){
-            PyErr_Print();
-            lf_print_error("Failed to load class list \"%s\" in module %s.", class, module);
-            /* Release the thread. No Python API allowed beyond this point. */
-            PyGILState_Release(gstate);
-            return NULL;
-        }
-
-        Py_DECREF(globalPythonModuleDict);
-
-        pClass = PyList_GetItem(pClasses, instance_id);
-        if (pClass == NULL) {
-            PyErr_Print();
-            lf_print_error("Failed to load class \"%s[%d]\" in module %s.", class, instance_id, module);
-            /* Release the thread. No Python API allowed beyond this point. */
-            PyGILState_Release(gstate);
-            return NULL;
-        }
-
-        LF_PRINT_DEBUG("Loading function %s.", func);
-
-        // Get the function react from test.py
-        pFunc = PyObject_GetAttrString(pClass, func);
-
-        LF_PRINT_DEBUG("Loaded function %p.", pFunc);
-
-        // Check if the funciton is loaded properly
-        // and if it is callable
-        if (pFunc && PyCallable_Check(pFunc)) {
-            LF_PRINT_DEBUG("Calling function %s from class %s[%d].", func , class, instance_id);
-            Py_INCREF(pFunc);
-            /* Release the thread. No Python API allowed beyond this point. */
-            PyGILState_Release(gstate);
-            return pFunc;
-        }
-        else {
-            // Function is not found or it is not callable
-            if (PyErr_Occurred()) {
-                PyErr_Print();
-            }
-            lf_print_error("Function %s was not found or is not callable.", func);
-        }
-        Py_XDECREF(pFunc);
-        Py_DECREF(globalPythonModule);
-    } else {
+    wchar_t wcwd[PATH_MAX];
+
+    mbstowcs(wcwd, cwd, PATH_MAX);
+
+    Py_SetPath(wcwd);
+
+    LF_PRINT_DEBUG("Loading module %s in %s.", module, cwd);
+
+    pModule = PyImport_Import(pFileName);
+
+    LF_PRINT_DEBUG("Loaded module %p.", pModule);
+
+    // Free the memory occupied by pFileName
+    Py_DECREF(pFileName);
+
+    // Check if the module was correctly loaded
+    if (pModule != NULL) {
+      // Get contents of module. pDict is a borrowed reference.
+      pDict = PyModule_GetDict(pModule);
+      if (pDict == NULL) {
         PyErr_Print();
-        lf_print_error("Failed to load \"%s\".", module);
+        lf_print_error("Failed to load contents of module %s.", module);
+        /* Release the thread. No Python API allowed beyond this point. */
+        PyGILState_Release(gstate);
+        return NULL;
+      }
+
+      Py_INCREF(pModule);
+      globalPythonModule = pModule;
+      Py_INCREF(pDict);
+      globalPythonModuleDict = pDict;
+    }
+  }
+
+  if (globalPythonModule != NULL && globalPythonModuleDict != NULL) {
+    Py_INCREF(globalPythonModule);
+    // Convert the class name to a PyObject
+    PyObject* list_name = PyUnicode_DecodeFSDefault(class);
+
+    // Get the class list
+    Py_INCREF(globalPythonModuleDict);
+    pClasses = PyDict_GetItem(globalPythonModuleDict, list_name);
+    if (pClasses == NULL) {
+      PyErr_Print();
+      lf_print_error("Failed to load class list \"%s\" in module %s.", class, module);
+      /* Release the thread. No Python API allowed beyond this point. */
+      PyGILState_Release(gstate);
+      return NULL;
     }
 
-    LF_PRINT_DEBUG("Done with start().");
+    Py_DECREF(globalPythonModuleDict);
 
-    Py_INCREF(Py_None);
-    /* Release the thread. No Python API allowed beyond this point. */
-    PyGILState_Release(gstate);
-    return Py_None;
+    pClass = PyList_GetItem(pClasses, instance_id);
+    if (pClass == NULL) {
+      PyErr_Print();
+      lf_print_error("Failed to load class \"%s[%d]\" in module %s.", class, instance_id, module);
+      /* Release the thread. No Python API allowed beyond this point. */
+      PyGILState_Release(gstate);
+      return NULL;
+    }
+
+    LF_PRINT_DEBUG("Loading function %s.", func);
+
+    // Get the function react from test.py
+    pFunc = PyObject_GetAttrString(pClass, func);
+
+    LF_PRINT_DEBUG("Loaded function %p.", pFunc);
+
+    // Check if the funciton is loaded properly
+    // and if it is callable
+    if (pFunc && PyCallable_Check(pFunc)) {
+      LF_PRINT_DEBUG("Calling function %s from class %s[%d].", func, class, instance_id);
+      Py_INCREF(pFunc);
+      /* Release the thread. No Python API allowed beyond this point. */
+      PyGILState_Release(gstate);
+      return pFunc;
+    } else {
+      // Function is not found or it is not callable
+      if (PyErr_Occurred()) {
+        PyErr_Print();
+      }
+      lf_print_error("Function %s was not found or is not callable.", func);
+    }
+    Py_XDECREF(pFunc);
+    Py_DECREF(globalPythonModule);
+  } else {
+    PyErr_Print();
+    lf_print_error("Failed to load \"%s\".", module);
+  }
+
+  LF_PRINT_DEBUG("Done with start().");
+
+  Py_INCREF(Py_None);
+  /* Release the thread. No Python API allowed beyond this point. */
+  PyGILState_Release(gstate);
+  return Py_None;
 }
diff --git a/tag/api/CMakeLists.txt b/tag/api/CMakeLists.txt
new file mode 100644
index 000000000..b8ca986a0
--- /dev/null
+++ b/tag/api/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_library(lf-tag-api INTERFACE)
+target_include_directories(lf-tag-api INTERFACE ${CMAKE_CURRENT_LIST_DIR})
+add_library(lf::tag-api ALIAS lf-tag-api)
diff --git a/include/core/tag.h b/tag/api/tag.h
similarity index 75%
rename from include/core/tag.h
rename to tag/api/tag.h
index 55d83c35d..2ad4cc73c 100644
--- a/include/core/tag.h
+++ b/tag/api/tag.h
@@ -11,39 +11,44 @@
 #ifndef TAG_H
 #define TAG_H
 
-#define NSEC(t)     ((interval_t) (t * 1LL))
-#define NSECS(t)    ((interval_t) (t * 1LL))
-#define USEC(t)     ((interval_t) (t * 1000LL))
-#define USECS(t)    ((interval_t) (t * 1000LL))
-#define MSEC(t)     ((interval_t) (t * 1000000LL))
-#define MSECS(t)    ((interval_t) (t * 1000000LL))
-#define SEC(t)      ((interval_t) (t * 1000000000LL))
-#define SECS(t)     ((interval_t) (t * 1000000000LL))
-#define SECOND(t)   ((interval_t) (t * 1000000000LL))
-#define SECONDS(t)  ((interval_t) (t * 1000000000LL))
-#define MINUTE(t)   ((interval_t) (t * 60000000000LL))
-#define MINUTES(t)  ((interval_t) (t * 60000000000LL))
-#define HOUR(t)     ((interval_t) (t * 3600000000000LL))
-#define HOURS(t)    ((interval_t) (t * 3600000000000LL))
-#define DAY(t)      ((interval_t) (t * 86400000000000LL))
-#define DAYS(t)     ((interval_t) (t * 86400000000000LL))
-#define WEEK(t)     ((interval_t) (t * 604800000000000LL))
-#define WEEKS(t)    ((interval_t) (t * 604800000000000LL))
-
-#define NEVER ((interval_t) LLONG_MIN)
+#define NSEC(t) ((interval_t)(t * 1LL))
+#define NSECS(t) ((interval_t)(t * 1LL))
+#define USEC(t) ((interval_t)(t * 1000LL))
+#define USECS(t) ((interval_t)(t * 1000LL))
+#define MSEC(t) ((interval_t)(t * 1000000LL))
+#define MSECS(t) ((interval_t)(t * 1000000LL))
+#define SEC(t) ((interval_t)(t * 1000000000LL))
+#define SECS(t) ((interval_t)(t * 1000000000LL))
+#define SECOND(t) ((interval_t)(t * 1000000000LL))
+#define SECONDS(t) ((interval_t)(t * 1000000000LL))
+#define MINUTE(t) ((interval_t)(t * 60000000000LL))
+#define MINUTES(t) ((interval_t)(t * 60000000000LL))
+#define HOUR(t) ((interval_t)(t * 3600000000000LL))
+#define HOURS(t) ((interval_t)(t * 3600000000000LL))
+#define DAY(t) ((interval_t)(t * 86400000000000LL))
+#define DAYS(t) ((interval_t)(t * 86400000000000LL))
+#define WEEK(t) ((interval_t)(t * 604800000000000LL))
+#define WEEKS(t) ((interval_t)(t * 604800000000000LL))
+
+#define NEVER ((interval_t)LLONG_MIN)
 #define NEVER_MICROSTEP 0u
-#define FOREVER ((interval_t) LLONG_MAX)
+#define FOREVER ((interval_t)LLONG_MAX)
 #define FOREVER_MICROSTEP UINT_MAX
-#define NEVER_TAG (tag_t) { .time = NEVER, .microstep = NEVER_MICROSTEP }
+#define NEVER_TAG                                                                                                      \
+  (tag_t) { .time = NEVER, .microstep = NEVER_MICROSTEP }
 // Need a separate initializer expression to comply with some C compilers
-#define NEVER_TAG_INITIALIZER { NEVER,  NEVER_MICROSTEP }
-#define FOREVER_TAG (tag_t) { .time = FOREVER, .microstep = FOREVER_MICROSTEP }
+#define NEVER_TAG_INITIALIZER                                                                                          \
+  { NEVER, NEVER_MICROSTEP }
+#define FOREVER_TAG                                                                                                    \
+  (tag_t) { .time = FOREVER, .microstep = FOREVER_MICROSTEP }
 // Need a separate initializer expression to comply with some C compilers
-#define FOREVER_TAG_INITIALIZER { FOREVER,  FOREVER_MICROSTEP }
-#define ZERO_TAG (tag_t) { .time = 0LL, .microstep = 0u }
+#define FOREVER_TAG_INITIALIZER                                                                                        \
+  { FOREVER, FOREVER_MICROSTEP }
+#define ZERO_TAG                                                                                                       \
+  (tag_t) { .time = 0LL, .microstep = 0u }
 
 // Convenience for converting times
-#define BILLION ((instant_t) 1000000000LL)
+#define BILLION ((instant_t)1000000000LL)
 
 #include 
 #include 
@@ -71,8 +76,8 @@ typedef uint32_t microstep_t;
  * A tag is a time, microstep pair.
  */
 typedef struct {
-    instant_t time;
-    microstep_t microstep;
+  instant_t time;
+  microstep_t microstep;
 } tag_t;
 
 ////////////////  Functions
@@ -161,7 +166,7 @@ instant_t lf_time_logical(void* env);
  * @param env The environment from which we want the elapsed logical time.
  * @return A time interval.
  */
-interval_t lf_time_logical_elapsed(void *env);
+interval_t lf_time_logical_elapsed(void* env);
 
 /**
  * Return the current physical time in nanoseconds.
@@ -187,7 +192,6 @@ instant_t lf_time_physical_elapsed(void);
  */
 instant_t lf_time_start(void);
 
-
 /**
  * For user-friendly reporting of time values, the buffer length required.
  * This is calculated as follows, based on 64-bit time in nanoseconds:
@@ -199,21 +203,22 @@ instant_t lf_time_start(void);
  * Maximum number of nanoseconds is 999,999,999
  * Maximum number of microsteps is 4,294,967,295
  * Total number of characters for the above is 24.
- * Text descriptions and spaces add an additional 55,
- * for a total of 79. One more allows for a null terminator.
+ * Text descriptions and spaces add an additional 30,
+ * for a total of 54. One more allows for a null terminator.
+ * Round up to a power of two.
  */
-#define LF_TIME_BUFFER_LENGTH 80
+#define LF_TIME_BUFFER_LENGTH 64
 
 /**
  * Store into the specified buffer a string giving a human-readable
  * rendition of the specified time. The buffer must have length at least
  * equal to LF_TIME_BUFFER_LENGTH. The format is:
  * ```
- *    x weeks, x days, x hours, x minutes, x seconds, x unit
+ *    x weeks, x d, x hr, x min, x s, x unit
  * ```
  * where each `x` is a string of numbers with commas inserted if needed
- * every three numbers and `unit` is nanoseconds, microseconds, or
- * milliseconds.
+ * every three numbers and `unit` is ns, us, or
+ * ms.
  * @param buffer The buffer into which to write the string.
  * @param time The time to write.
  * @return The number of characters written (not counting the null terminator).
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 8c652b01a..89ed5d967 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -1 +1,2 @@
-add_library(test-lib STATIC src_gen_stub.c rand_utils.c)
\ No newline at end of file
+add_library(test-lib STATIC src_gen_stub.c rand_utils.c)
+target_link_libraries(test-lib PRIVATE lf::low-level-platform-api)
diff --git a/test/RTI/rti_common_test.c b/test/RTI/rti_common_test.c
index c34088cf2..86f3faf81 100644
--- a/test/RTI/rti_common_test.c
+++ b/test/RTI/rti_common_test.c
@@ -10,12 +10,13 @@
 // The RTI under test.
 static rti_common_t test_rti;
 
-/******************************************Start of Utility Functions******************************************************/
+/******************************************Start of Utility
+ * Functions******************************************************/
 
 /**
  * Free dynamically allocated memory on the scheduling node.
  * @param node The node to be freed
-*/
+ */
 void delete_scheduling_node(scheduling_node_t* node) {
   if (node->immediate_upstreams != NULL) {
     free(node->immediate_upstreams);
@@ -39,22 +40,19 @@ void delete_scheduling_node(scheduling_node_t* node) {
  * @param immediate_upstreams The array of IDs from immediate upstream nodes.
  * @param immediate_upstream_delays The array of delays from immediate upstream nodes.
  * @param immediate_downstreams The array of IDs from immediate downstream nodes.
-*/
-void set_scheduling_node(
-    int id, 
-    int num_immediate_upstreams, 
-    int num_immediate_downstreams, 
-    int* immediate_upstreams, 
-    interval_t* immediate_upstream_delays, 
-    int* immediate_downstreams) {
+ */
+void set_scheduling_node(int id, int num_immediate_upstreams, int num_immediate_downstreams, int* immediate_upstreams,
+                         interval_t* immediate_upstream_delays, int* immediate_downstreams) {
   // Save the number of immediate upstream and immediate downstream nodes.
   test_rti.scheduling_nodes[id]->num_immediate_upstreams = num_immediate_upstreams;
   test_rti.scheduling_nodes[id]->num_immediate_downstreams = num_immediate_downstreams;
 
   // If there is any immediate upstream nodes, store IDs and delays from the upstream nodes into the structure.
   if (test_rti.scheduling_nodes[id]->num_immediate_upstreams > 0) {
-    test_rti.scheduling_nodes[id]->immediate_upstreams = (uint16_t*) calloc(test_rti.scheduling_nodes[id]->num_immediate_upstreams, sizeof(uint16_t));
-    test_rti.scheduling_nodes[id]->immediate_upstream_delays = (interval_t*) calloc(test_rti.scheduling_nodes[id]->num_immediate_upstreams, sizeof(interval_t));
+    test_rti.scheduling_nodes[id]->immediate_upstreams =
+        (uint16_t*)calloc(test_rti.scheduling_nodes[id]->num_immediate_upstreams, sizeof(uint16_t));
+    test_rti.scheduling_nodes[id]->immediate_upstream_delays =
+        (interval_t*)calloc(test_rti.scheduling_nodes[id]->num_immediate_upstreams, sizeof(interval_t));
     for (int i = 0; i < test_rti.scheduling_nodes[id]->num_immediate_upstreams; i++) {
       test_rti.scheduling_nodes[id]->immediate_upstreams[i] = immediate_upstreams[i];
       test_rti.scheduling_nodes[id]->immediate_upstream_delays[i] = immediate_upstream_delays[i];
@@ -62,7 +60,8 @@ void set_scheduling_node(
   }
   // If there is any immediate downstream nodes, store IDs of the downstream nodes into the structure.
   if (test_rti.scheduling_nodes[id]->num_immediate_downstreams > 0) {
-    test_rti.scheduling_nodes[id]->immediate_downstreams = (uint16_t*) calloc(test_rti.scheduling_nodes[id]->num_immediate_downstreams, sizeof(uint16_t));
+    test_rti.scheduling_nodes[id]->immediate_downstreams =
+        (uint16_t*)calloc(test_rti.scheduling_nodes[id]->num_immediate_downstreams, sizeof(uint16_t));
     for (int i = 0; i < test_rti.scheduling_nodes[id]->num_immediate_downstreams; i++) {
       test_rti.scheduling_nodes[id]->immediate_downstreams[i] = immediate_downstreams[i];
     }
@@ -72,7 +71,7 @@ void set_scheduling_node(
 /**
  * Reset the RTI to re-construct the structure of nodes.
  * This includes freeing every scheduling node and the array of nodes.
-*/
+ */
 void reset_common_RTI() {
   // For every scheduling nodes, delete them and free themselves, too.
   for (uint16_t i = 0; i < test_rti.number_of_scheduling_nodes; i++) {
@@ -90,19 +89,20 @@ void reset_common_RTI() {
  * Set the number of nodes and create an array for scheduling nodes.
  * This includes resetting the previous RTI.
  * @param num_nodes The number of scheduling nodes.
-*/
+ */
 void set_common_RTI(uint16_t num_nodes) {
   reset_common_RTI();
 
   test_rti.number_of_scheduling_nodes = num_nodes;
 
   // Allocate memory for the scheduling nodes
-  test_rti.scheduling_nodes = (scheduling_node_t**)calloc(test_rti.number_of_scheduling_nodes, sizeof(scheduling_node_t*));
-  test_rti.min_delays = (tag_t*)calloc((num_nodes*num_nodes), sizeof(tag_t));
+  test_rti.scheduling_nodes =
+      (scheduling_node_t**)calloc(test_rti.number_of_scheduling_nodes, sizeof(scheduling_node_t*));
+  test_rti.min_delays = (tag_t*)calloc((num_nodes * num_nodes), sizeof(tag_t));
   for (uint16_t i = 0; i < test_rti.number_of_scheduling_nodes; i++) {
-      scheduling_node_t *scheduling_node = (scheduling_node_t *) malloc(sizeof(scheduling_node_t));
-      initialize_scheduling_node(scheduling_node, i);
-      test_rti.scheduling_nodes[i] = scheduling_node;
+    scheduling_node_t* scheduling_node = (scheduling_node_t*)malloc(sizeof(scheduling_node_t));
+    initialize_scheduling_node(scheduling_node, i);
+    test_rti.scheduling_nodes[i] = scheduling_node;
   }
 }
 
@@ -110,21 +110,22 @@ void set_common_RTI(uint16_t num_nodes) {
  * Set the state of every scheduling node. The state can be NOT_CONNECTED, GRANTED,
  * or PENDING.
  * @param state The state that every scheduling node will have.
-*/
+ */
 void set_state_of_nodes(scheduling_node_state_t state) {
   for (uint16_t i = 0; i < test_rti.number_of_scheduling_nodes; i++) {
     test_rti.scheduling_nodes[i]->state = state;
   }
 }
-/******************************************End of Utility Functions******************************************************/
+/******************************************End of Utility
+ * Functions******************************************************/
 
 void valid_cache() {
   set_common_RTI(2);
 
   // Construct the structure illustrated below.
   // node[0] --> node[1]
-  set_scheduling_node(0, 0, 1, NULL, NULL, (int[]) {1});
-  set_scheduling_node(1, 1, 0, (int[]) {0}, (interval_t[]) {NEVER}, NULL);
+  set_scheduling_node(0, 0, 1, NULL, NULL, (int[]){1});
+  set_scheduling_node(1, 1, 0, (int[]){0}, (interval_t[]){NEVER}, NULL);
 
   set_state_of_nodes(GRANTED);
 
@@ -140,8 +141,8 @@ void not_connected() {
 
   // Construct the structure illustrated below.
   // node[0] --> node[1]
-  set_scheduling_node(0, 0, 1, NULL, NULL, (int[]) {1});
-  set_scheduling_node(1, 1, 0, (int[]) {0}, (interval_t[]) {NEVER}, NULL);
+  set_scheduling_node(0, 0, 1, NULL, NULL, (int[]){1});
+  set_scheduling_node(1, 1, 0, (int[]){0}, (interval_t[]){NEVER}, NULL);
 
   set_state_of_nodes(NOT_CONNECTED);
 
@@ -156,8 +157,8 @@ static void two_nodes_no_delay() {
 
   // Construct the structure illustrated below.
   // node[0] --> node[1]
-  set_scheduling_node(0, 0, 1, NULL, NULL, (int[]) {1});
-  set_scheduling_node(1, 1, 0, (int[]) {0}, (interval_t[]) {NEVER}, NULL);
+  set_scheduling_node(0, 0, 1, NULL, NULL, (int[]){1});
+  set_scheduling_node(1, 1, 0, (int[]){0}, (interval_t[]){NEVER}, NULL);
 
   set_state_of_nodes(GRANTED);
 
@@ -167,10 +168,10 @@ static void two_nodes_no_delay() {
 
   update_min_delays_upstream(test_rti.scheduling_nodes[1]);
   assert(test_rti.scheduling_nodes[1]->num_all_upstreams == 1); // node[1] has one upstream nodes.
-  assert(test_rti.scheduling_nodes[1]->all_upstreams[0] == 0); // node[1]'s upstream node is node[0].
+  assert(test_rti.scheduling_nodes[1]->all_upstreams[0] == 0);  // node[1]'s upstream node is node[0].
   // The min_delay between them is node[0] and node[1] which means no delay.
   // assert(lf_tag_compare(test_rti.scheduling_nodes[1]->min_delays[0].min_delay, ZERO_TAG) == 0);
-  assert(lf_tag_compare(test_rti.min_delays[0*n + 1], ZERO_TAG) == 0);
+  assert(lf_tag_compare(test_rti.min_delays[0 * n + 1], ZERO_TAG) == 0);
 }
 
 static void two_nodes_zero_delay() {
@@ -179,8 +180,8 @@ static void two_nodes_zero_delay() {
 
   // Construct the structure illustrated below.
   // node[0] --/0/--> node[1]
-  set_scheduling_node(0, 0, 1, NULL, NULL, (int[]) {1});
-  set_scheduling_node(1, 1, 0, (int[]) {0}, (interval_t[]) {0}, NULL);
+  set_scheduling_node(0, 0, 1, NULL, NULL, (int[]){1});
+  set_scheduling_node(1, 1, 0, (int[]){0}, (interval_t[]){0}, NULL);
 
   set_state_of_nodes(GRANTED);
 
@@ -190,9 +191,9 @@ static void two_nodes_zero_delay() {
 
   update_min_delays_upstream(test_rti.scheduling_nodes[1]);
   assert(test_rti.scheduling_nodes[1]->num_all_upstreams == 1); // node[1] has one upstream nodes.
-  assert(test_rti.scheduling_nodes[1]->all_upstreams[0] == 0); // node[1]'s upstream node is node[0].
+  assert(test_rti.scheduling_nodes[1]->all_upstreams[0] == 0);  // node[1]'s upstream node is node[0].
   // The min_delay between node[0] and node[1] is (0, 1) which means zero delay.
-  assert(lf_tag_compare(test_rti.min_delays[0*n + 1], (tag_t) {.time = 0, .microstep = 1}) == 0);
+  assert(lf_tag_compare(test_rti.min_delays[0 * n + 1], (tag_t){.time = 0, .microstep = 1}) == 0);
 
   // Test update_all_downstreams
   update_all_downstreams(test_rti.scheduling_nodes[0]);
@@ -208,8 +209,8 @@ static void two_nodes_normal_delay() {
 
   // Construct the structure illustrated below.
   // node[0] --/1 nsec/--> node[1]
-  set_scheduling_node(0, 0, 1, NULL, NULL, (int[]) {1});
-  set_scheduling_node(1, 1, 0, (int[]) {0}, (interval_t[]) {NSEC(1)}, NULL);
+  set_scheduling_node(0, 0, 1, NULL, NULL, (int[]){1});
+  set_scheduling_node(1, 1, 0, (int[]){0}, (interval_t[]){NSEC(1)}, NULL);
 
   set_state_of_nodes(GRANTED);
 
@@ -218,9 +219,9 @@ static void two_nodes_normal_delay() {
 
   update_min_delays_upstream(test_rti.scheduling_nodes[1]);
   assert(test_rti.scheduling_nodes[1]->num_all_upstreams == 1); // node[1] has one upstream nodes.
-  assert(test_rti.scheduling_nodes[1]->all_upstreams[0] == 0); // node[1]'s upstream node is node[0].
+  assert(test_rti.scheduling_nodes[1]->all_upstreams[0] == 0);  // node[1]'s upstream node is node[0].
   // The min_delay between node[0] and node[1] is (1 nsec, 0).
-  assert(lf_tag_compare(test_rti.min_delays[0*n + 1], (tag_t) {.time = NSEC(1), .microstep = 0}) == 0);
+  assert(lf_tag_compare(test_rti.min_delays[0 * n + 1], (tag_t){.time = NSEC(1), .microstep = 0}) == 0);
 }
 
 static void multiple_nodes() {
@@ -229,49 +230,49 @@ static void multiple_nodes() {
 
   // Construct the structure illustrated below.
   // node[0] --/1 nsec/--> node[1] --/0/--> node[2] --/2 nsec/--> node[3]
-  set_scheduling_node(0, 0, 1, NULL, NULL, (int[]) {1});
-  set_scheduling_node(1, 1, 1, (int[]) {0}, (interval_t[]) {NSEC(1)}, (int[]) {2});
-  set_scheduling_node(2, 1, 1, (int[]) {1}, (interval_t[]) {0}, (int[]) {3});
-  set_scheduling_node(3, 1, 0, (int[]) {2}, (interval_t[]) {NSEC(2)}, NULL);
+  set_scheduling_node(0, 0, 1, NULL, NULL, (int[]){1});
+  set_scheduling_node(1, 1, 1, (int[]){0}, (interval_t[]){NSEC(1)}, (int[]){2});
+  set_scheduling_node(2, 1, 1, (int[]){1}, (interval_t[]){0}, (int[]){3});
+  set_scheduling_node(3, 1, 0, (int[]){2}, (interval_t[]){NSEC(2)}, NULL);
 
   set_state_of_nodes(GRANTED);
 
   // Test update_min_delays_upstream
   update_min_delays_upstream(test_rti.scheduling_nodes[2]);
   assert(test_rti.scheduling_nodes[2]->num_all_upstreams == 2); // node[2] has two upstream nodes.
-  assert(test_rti.scheduling_nodes[2]->all_upstreams[0] == 0); // node[0] is an upstream node of node[2].
+  assert(test_rti.scheduling_nodes[2]->all_upstreams[0] == 0);  // node[0] is an upstream node of node[2].
   // The min_delay between node[0] and node[2] is (1 nsec, 1) = 1 nsec + zero delay.
-  assert(lf_tag_compare(test_rti.min_delays[0*n + 2], (tag_t) {NSEC(1), 1}) == 0);
+  assert(lf_tag_compare(test_rti.min_delays[0 * n + 2], (tag_t){NSEC(1), 1}) == 0);
   assert(test_rti.scheduling_nodes[2]->all_upstreams[1] == 1); // node[1] is an upstream node of node[2].
   // The min_delay between node[1] and node[2] is (0, 1), which denotes zero delay.
-  assert(lf_tag_compare(test_rti.min_delays[1*n + 2], (tag_t) {0, 1}) == 0);
+  assert(lf_tag_compare(test_rti.min_delays[1 * n + 2], (tag_t){0, 1}) == 0);
 
   update_min_delays_upstream(test_rti.scheduling_nodes[3]);
   assert(test_rti.scheduling_nodes[3]->num_all_upstreams == 3); // node[3] has three upstream nodes.
-  assert(test_rti.scheduling_nodes[3]->all_upstreams[0] == 0); // node[0] is an upstream node of node[3].
+  assert(test_rti.scheduling_nodes[3]->all_upstreams[0] == 0);  // node[0] is an upstream node of node[3].
   // The min_delay between node[0] and node[3] is (3 nsec, 0) = 1 nsec + zero_delay + 2 nsec.
-  assert(lf_tag_compare(test_rti.min_delays[0*n + 3], (tag_t) {NSEC(3), 0}) == 0);
+  assert(lf_tag_compare(test_rti.min_delays[0 * n + 3], (tag_t){NSEC(3), 0}) == 0);
   assert(test_rti.scheduling_nodes[3]->all_upstreams[1] == 1); // node[1] is an upstream node of node[3].
   // The min_delay between node[1] and node[3] is (2 nsec, 0) = zero_delay + 2 nsec.
-  assert(lf_tag_compare(test_rti.min_delays[1*n + 3], (tag_t) {NSEC(2), 0}) == 0);
+  assert(lf_tag_compare(test_rti.min_delays[1 * n + 3], (tag_t){NSEC(2), 0}) == 0);
   assert(test_rti.scheduling_nodes[3]->all_upstreams[2] == 2); // node[2] is an upstream node of node[3].
   // The min_delay between node[2] and node[3] is (2 nsec, 0).
-  assert(lf_tag_compare(test_rti.min_delays[2*n + 3], (tag_t) {NSEC(2), 0}) == 0);
+  assert(lf_tag_compare(test_rti.min_delays[2 * n + 3], (tag_t){NSEC(2), 0}) == 0);
 
   // Test update_all_downstreams
   update_all_downstreams(test_rti.scheduling_nodes[0]);
   assert(test_rti.scheduling_nodes[0]->num_all_downstreams == 3); // node[0] has three downstream nodes.
-  assert(test_rti.scheduling_nodes[0]->all_downstreams[0] == 1); // node[1] is a downstream node of node[3].
-  assert(test_rti.scheduling_nodes[0]->all_downstreams[1] == 2); // node[2] is a downstream node of node[3].
-  assert(test_rti.scheduling_nodes[0]->all_downstreams[2] == 3); // node[3] is a downstream node of node[3].
+  assert(test_rti.scheduling_nodes[0]->all_downstreams[0] == 1);  // node[1] is a downstream node of node[3].
+  assert(test_rti.scheduling_nodes[0]->all_downstreams[1] == 2);  // node[2] is a downstream node of node[3].
+  assert(test_rti.scheduling_nodes[0]->all_downstreams[2] == 3);  // node[3] is a downstream node of node[3].
 
   update_all_downstreams(test_rti.scheduling_nodes[1]);
   assert(test_rti.scheduling_nodes[1]->num_all_downstreams == 2); // node[1] has two downstream nodes.
-  assert(test_rti.scheduling_nodes[1]->all_downstreams[0] == 2); // node[2] is a downstream node of node[3].
-  assert(test_rti.scheduling_nodes[1]->all_downstreams[1] == 3); // node[3] is a downstream node of node[3].
+  assert(test_rti.scheduling_nodes[1]->all_downstreams[0] == 2);  // node[2] is a downstream node of node[3].
+  assert(test_rti.scheduling_nodes[1]->all_downstreams[1] == 3);  // node[3] is a downstream node of node[3].
 }
 
-int main(int argc, char **argv) {
+int main(int argc, char** argv) {
   initialize_rti_common(&test_rti);
 
   // Tests for the function update_min_delays_upstream() and update_all_downstreams()
diff --git a/test/Tests.cmake b/test/Tests.cmake
index fef6dc0aa..4130b7c09 100644
--- a/test/Tests.cmake
+++ b/test/Tests.cmake
@@ -4,6 +4,7 @@ include(CTest)
 set(TestLib test-lib)
 set(TEST_DIR ${CMAKE_CURRENT_SOURCE_DIR}/test)
 set(TEST_SUFFIX test.c)  # Files that are tests must have names ending with TEST_SUFFIX.
+set(LF_ROOT ${CMAKE_CURRENT_LIST_DIR}/..)
 
 # Add the test files found in DIR to TEST_FILES.
 function(add_test_dir DIR)
@@ -41,10 +42,12 @@ endforeach(FILE ${TEST_FILES})
 if (NOT DEFINED LF_SINGLE_THREADED)
     # Check which system we are running on to select the correct platform support
     # file and assign the file's path to LF_PLATFORM_FILE
+    # FIXME: This is effectively a second build script for the RTI that we have to maintain. This is code duplication.
+    # FIXME: We should not be reaching into the platform directory and bypassing its CMake build.
     if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
-      set(LF_PLATFORM_FILE ${CoreLibPath}/platform/lf_linux_support.c)
+      set(LF_PLATFORM_FILE ${LF_ROOT}/low_level_platform/impl/src/lf_linux_support.c)
     elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Darwin")
-      set(LF_PLATFORM_FILE ${CoreLibPath}/platform/lf_macos_support.c)
+      set(LF_PLATFORM_FILE ${LF_ROOT}/low_level_platform/impl/src/lf_macos_support.c)
     else()
       message(FATAL_ERROR "Your platform is not supported! RTI supports Linux and MacOS.")
     endif()
@@ -57,10 +60,11 @@ if (NOT DEFINED LF_SINGLE_THREADED)
       ${TEST_DIR}/RTI/rti_common_test.c
       ${RTI_DIR}/rti_common.c
       ${RTI_DIR}/rti_remote.c
-      ${CoreLibPath}/trace.c
+      ${CoreLibPath}/tracepoint.c
       ${LF_PLATFORM_FILE}
-      ${CoreLibPath}/platform/lf_atomic_gcc_clang.c
-      ${CoreLibPath}/platform/lf_unix_clock_support.c
+      ${LF_ROOT}/low_level_platform/impl/src/platform_internal.c
+      ${LF_ROOT}/low_level_platform/impl/src/lf_atomic_gcc_clang.c
+      ${LF_ROOT}/low_level_platform/impl/src/lf_unix_clock_support.c
       ${CoreLibPath}/utils/util.c
       ${CoreLibPath}/tag.c
       ${CoreLibPath}/clock.c
@@ -74,7 +78,8 @@ if (NOT DEFINED LF_SINGLE_THREADED)
     target_include_directories(rti_common_test PUBLIC ${IncludeDir})
     target_include_directories(rti_common_test PUBLIC ${IncludeDir}/federated)
     target_include_directories(rti_common_test PUBLIC ${IncludeDir}/modal_models)
-    target_include_directories(rti_common_test PUBLIC ${IncludeDir}/platform)
+    target_link_libraries(rti_common_test lf::low-level-platform-api)
+    target_link_libraries(rti_common_test lf::logging-api)
     target_include_directories(rti_common_test PUBLIC ${IncludeDir}/utils)
     # Set the STANDALONE_RTI flag to include the rti_remote and rti_common.
     target_compile_definitions(rti_common_test PUBLIC STANDALONE_RTI=1)
diff --git a/test/general/tag_test.c b/test/general/tag_test.c
index 279a79ec3..aff2d6875 100644
--- a/test/general/tag_test.c
+++ b/test/general/tag_test.c
@@ -2,7 +2,7 @@
 #include 
 #include "lf_types.h"
 
-int main(int argc, char **argv) {
+int main(int argc, char** argv) {
   char* buf = malloc(sizeof(char) * 128);
   lf_readable_time(buf, 0);
   printf("%s", buf);
diff --git a/test/general/utils/hashmap_test.c b/test/general/utils/hashmap_test.c
index ac9b4ef75..2134071e6 100644
--- a/test/general/utils/hashmap_test.c
+++ b/test/general/utils/hashmap_test.c
@@ -15,36 +15,35 @@ static hashmap_object2int_entry_t mock[CAPACITY];
 static size_t mock_size = 0;
 
 void test_put(hashmap_object2int_t* h) {
-    void* key = NULL;
-    while (!key) key = NULL + (rand() % CAPACITY);  // Generate a dummy pointer.
-    int value = rand();
-    hashmap_object2int_entry_t entry = (hashmap_object2int_entry_t) { .key = key, .value = value };
-    hashmap_object2int_put(h, entry.key, entry.value);
-    // printf("Putting (%p, %d).\n", entry.key, entry.value);
-    mock[mock_size++] = entry;
+  void* key = NULL;
+  while (!key)
+    key = NULL + (rand() % CAPACITY); // Generate a dummy pointer.
+  int value = rand();
+  hashmap_object2int_entry_t entry = (hashmap_object2int_entry_t){.key = key, .value = value};
+  hashmap_object2int_put(h, entry.key, entry.value);
+  // printf("Putting (%p, %d).\n", entry.key, entry.value);
+  mock[mock_size++] = entry;
 }
 
 void test_get(hashmap_object2int_t* h) {
-    if (!mock_size) return;
-    size_t r = rand() % mock_size;
-    hashmap_object2int_entry_t desired = mock[r];
-    int found = hashmap_object2int_get(h, desired.key);
-    // printf("Getting (%p, %d) from %d.\n", desired.key, desired.value, r);
-    if (desired.value != found) {
-        // It is possible that two distinct values were associated with the same key. Search the
-        // "mock" array to check if this is the case.
-        for (size_t i = mock_size - 1; i >= 0; i--) {
-            if (mock[i].key == desired.key) {
-                if (mock[i].value == found) return; // Everything is OK.
-                break;
-            }
-        }
-        lf_print_error_and_exit(
-            "Expected %d but got %d when getting from a hashmap.\n",
-            desired.value,
-            found
-        );
+  if (!mock_size)
+    return;
+  size_t r = rand() % mock_size;
+  hashmap_object2int_entry_t desired = mock[r];
+  int found = hashmap_object2int_get(h, desired.key);
+  // printf("Getting (%p, %d) from %d.\n", desired.key, desired.value, r);
+  if (desired.value != found) {
+    // It is possible that two distinct values were associated with the same key. Search the
+    // "mock" array to check if this is the case.
+    for (size_t i = mock_size - 1; i >= 0; i--) {
+      if (mock[i].key == desired.key) {
+        if (mock[i].value == found)
+          return; // Everything is OK.
+        break;
+      }
     }
+    lf_print_error_and_exit("Expected %d but got %d when getting from a hashmap.\n", desired.value, found);
+  }
 }
 
 /**
@@ -55,32 +54,29 @@ void test_get(hashmap_object2int_t* h) {
  * which each of two actions are performed, expressed as percents.
  */
 void run_test(hashmap_object2int_t* h, int* distribution) {
-    int result = 1;
-    int r = rand();
-    int choice = (r < 0 ? -r : r) % 100;
-    if ((choice = choice - distribution[0]) < 0) {
-        test_put(h);
-    } else {
-        test_get(h);
-    }
+  int result = 1;
+  int r = rand();
+  int choice = (r < 0 ? -r : r) % 100;
+  if ((choice = choice - distribution[0]) < 0) {
+    test_put(h);
+  } else {
+    test_get(h);
+  }
 }
 
 int main() {
-    srand(RANDOM_SEED);
-    for (int i = 0; i < N; i++) {
-        int perturbed[2];
-        perturb(distribution, 2, perturbed);
-        LF_PRINT_DEBUG(
-            "Distribution: %d, %d",
-            perturbed[0], perturbed[1]
-        );
-        hashmap_object2int_t* h = hashmap_object2int_new(CAPACITY, NULL);
-        int j = rand() % (CAPACITY / 2);
-        while (j--) {
-            run_test(h, perturbed);
-        }
-        hashmap_object2int_free(h);
-        mock_size = 0;
+  srand(RANDOM_SEED);
+  for (int i = 0; i < N; i++) {
+    int perturbed[2];
+    perturb(distribution, 2, perturbed);
+    LF_PRINT_DEBUG("Distribution: %d, %d", perturbed[0], perturbed[1]);
+    hashmap_object2int_t* h = hashmap_object2int_new(CAPACITY, NULL);
+    int j = rand() % (CAPACITY / 2);
+    while (j--) {
+      run_test(h, perturbed);
     }
-    return 0;
+    hashmap_object2int_free(h);
+    mock_size = 0;
+  }
+  return 0;
 }
diff --git a/test/general/utils/hashset_test.c b/test/general/utils/hashset_test.c
index 3a59390e7..4b425b848 100644
--- a/test/general/utils/hashset_test.c
+++ b/test/general/utils/hashset_test.c
@@ -4,220 +4,211 @@
 #include "hashset/hashset.h"
 #include "hashset/hashset_itr.h"
 
-static void trivial(void)
-{
-    char *missing = "missing";
-    char *items[] = {"zero", "one", "two", "three", NULL};
-    char *foo = "foo";
-    size_t ii, nitems = 4;
-    hashset_t set = hashset_create(3);
-
-    if (set == NULL) {
-        fprintf(stderr, "failed to create hashset instance\n");
-        abort();
-    }
-
-    for (ii = 0; ii < nitems; ++ii) {
-        hashset_add(set, items[ii]);
-    }
-
-    for (ii = 0; ii < nitems; ++ii) {
-        assert(hashset_is_member(set, items[ii]));
-    }
-    assert(hashset_is_member(set, missing) == 0);
-
-    assert(hashset_remove(set, items[1]) == 1);
-    assert(hashset_num_items(set) == 3);
-    assert(hashset_remove(set, items[1]) == 0);
-
-    assert(hashset_add(set, foo) == 1);
-    assert(hashset_add(set, foo) == 0);
-
-    hashset_destroy(set);
+static void trivial(void) {
+  char* missing = "missing";
+  char* items[] = {"zero", "one", "two", "three", NULL};
+  char* foo = "foo";
+  size_t ii, nitems = 4;
+  hashset_t set = hashset_create(3);
+
+  if (set == NULL) {
+    fprintf(stderr, "failed to create hashset instance\n");
+    abort();
+  }
+
+  for (ii = 0; ii < nitems; ++ii) {
+    hashset_add(set, items[ii]);
+  }
+
+  for (ii = 0; ii < nitems; ++ii) {
+    assert(hashset_is_member(set, items[ii]));
+  }
+  assert(hashset_is_member(set, missing) == 0);
+
+  assert(hashset_remove(set, items[1]) == 1);
+  assert(hashset_num_items(set) == 3);
+  assert(hashset_remove(set, items[1]) == 0);
+
+  assert(hashset_add(set, foo) == 1);
+  assert(hashset_add(set, foo) == 0);
+
+  hashset_destroy(set);
 }
 
-static void test_gaps(void)
-{
-    hashset_t set = hashset_create(3);
+static void test_gaps(void) {
+  hashset_t set = hashset_create(3);
 
-    /* fill the hashset */
-    hashset_add(set, (void *)0xbabe);
-    hashset_add(set, (void *)0xbeef);
-    hashset_add(set, (void *)0xbad);
-    hashset_add(set, (void *)0xf00d);
-    /* 0xf00d (nil) (nil) (nil) (nil) 0xbad 0xbabe 0xbeef */
+  /* fill the hashset */
+  hashset_add(set, (void*)0xbabe);
+  hashset_add(set, (void*)0xbeef);
+  hashset_add(set, (void*)0xbad);
+  hashset_add(set, (void*)0xf00d);
+  /* 0xf00d (nil) (nil) (nil) (nil) 0xbad 0xbabe 0xbeef */
 
-    /* make a gap */
-    hashset_remove(set, (void *)0xbeef);
-    /* 0xf00d (nil) (nil) (nil) (nil) 0xbad 0xbabe 0x1 */
+  /* make a gap */
+  hashset_remove(set, (void*)0xbeef);
+  /* 0xf00d (nil) (nil) (nil) (nil) 0xbad 0xbabe 0x1 */
 
-    /* check that 0xf00d is still reachable */
-    assert(hashset_is_member(set, (void *)0xf00d));
+  /* check that 0xf00d is still reachable */
+  assert(hashset_is_member(set, (void*)0xf00d));
 
-    /* add 0xbeef back */
-    hashset_add(set, (void *)0xbeef);
-    /* 0xf00d (nil) (nil) (nil) (nil) 0xbad 0xbabe 0xbeef */
+  /* add 0xbeef back */
+  hashset_add(set, (void*)0xbeef);
+  /* 0xf00d (nil) (nil) (nil) (nil) 0xbad 0xbabe 0xbeef */
 
-    /* verify */
-    assert(hashset_is_member(set, (void *)0xbeef));
-    assert(hashset_is_member(set, (void *)0xf00d));
+  /* verify */
+  assert(hashset_is_member(set, (void*)0xbeef));
+  assert(hashset_is_member(set, (void*)0xf00d));
 }
 
-static void test_exceptions(void)
-{
-    hashset_t set = hashset_create(3);
+static void test_exceptions(void) {
+  hashset_t set = hashset_create(3);
 
-    assert(hashset_add(set, (void *)0) == -1);
-    assert(hashset_add(set, (void *)1) == -1);
+  assert(hashset_add(set, (void*)0) == -1);
+  assert(hashset_add(set, (void*)1) == -1);
 }
 
-static void test_rehashing_items_placed_beyond_nitems(void)
-{
-    hashset_t set = hashset_create(3);
-
-    assert(hashset_add(set, (void *)20644128) == 1);
-    assert(hashset_add(set, (void *)21747760) == 1);
-    assert(hashset_add(set, (void *)17204864) == 1);
-    assert(hashset_add(set, (void *)22937440) == 1);
-    assert(hashset_add(set, (void *)14734272) == 1);
-    assert(hashset_add(set, (void *)13948320) == 1);
-    assert(hashset_add(set, (void *)18116496) == 1);
-    assert(hashset_add(set, (void *)18229952) == 1);
-    assert(hashset_add(set, (void *)20390128) == 1);
-    assert(hashset_add(set, (void *)23523264) == 1);
-    assert(hashset_add(set, (void *)22866784) == 1);
-    assert(hashset_add(set, (void *)17501248) == 1);
-    assert(hashset_add(set, (void *)17168832) == 1);
-    assert(hashset_add(set, (void *)13389824) == 1);
-    assert(hashset_add(set, (void *)15795136) == 1);
-    assert(hashset_add(set, (void *)15154464) == 1);
-    assert(hashset_add(set, (void *)22507840) == 1);
-    assert(hashset_add(set, (void *)22977920) == 1);
-    assert(hashset_add(set, (void *)20527584) == 1);
-    assert(hashset_add(set, (void *)21557872) == 1);
-    assert(hashset_add(set, (void *)23089952) == 1);
-    assert(hashset_add(set, (void *)21606240) == 1);
-    assert(hashset_add(set, (void *)25168704) == 1);
-    assert(hashset_add(set, (void *)25198096) == 1);
-    assert(hashset_add(set, (void *)25248000) == 1);
-    assert(hashset_add(set, (void *)25260976) == 1);
-    assert(hashset_add(set, (void *)25905520) == 1);
-    assert(hashset_add(set, (void *)25934608) == 1);
-    assert(hashset_add(set, (void *)26015264) == 1);
-    assert(hashset_add(set, (void *)26044352) == 1);
-    assert(hashset_add(set, (void *)24784800) == 1);
-    assert(hashset_add(set, (void *)24813888) == 1);
-    assert(hashset_add(set, (void *)24663936) == 1);
-    assert(hashset_add(set, (void *)24693536) == 1);
-    assert(hashset_add(set, (void *)24743792) == 1);
-    assert(hashset_add(set, (void *)24756480) == 1);
-
-    assert(hashset_is_member(set, (void *)20644128) == 1);
-    assert(hashset_is_member(set, (void *)21747760) == 1);
-    assert(hashset_is_member(set, (void *)17204864) == 1);
-    assert(hashset_is_member(set, (void *)22937440) == 1);
-    assert(hashset_is_member(set, (void *)14734272) == 1);
-    assert(hashset_is_member(set, (void *)13948320) == 1);
-    assert(hashset_is_member(set, (void *)18116496) == 1);
-    assert(hashset_is_member(set, (void *)18229952) == 1);
-    assert(hashset_is_member(set, (void *)20390128) == 1);
-    assert(hashset_is_member(set, (void *)23523264) == 1);
-    assert(hashset_is_member(set, (void *)22866784) == 1);
-    assert(hashset_is_member(set, (void *)17501248) == 1);
-    assert(hashset_is_member(set, (void *)17168832) == 1);
-    assert(hashset_is_member(set, (void *)13389824) == 1);
-    assert(hashset_is_member(set, (void *)15795136) == 1);
-    assert(hashset_is_member(set, (void *)15154464) == 1);
-    assert(hashset_is_member(set, (void *)22507840) == 1);
-    assert(hashset_is_member(set, (void *)22977920) == 1);
-    assert(hashset_is_member(set, (void *)20527584) == 1);
-    assert(hashset_is_member(set, (void *)21557872) == 1);
-    assert(hashset_is_member(set, (void *)23089952) == 1);
-    assert(hashset_is_member(set, (void *)21606240) == 1);
-    assert(hashset_is_member(set, (void *)25168704) == 1);
-    assert(hashset_is_member(set, (void *)25198096) == 1);
-    assert(hashset_is_member(set, (void *)25248000) == 1);
-    assert(hashset_is_member(set, (void *)25260976) == 1);
-    assert(hashset_is_member(set, (void *)25905520) == 1);
-    assert(hashset_is_member(set, (void *)25934608) == 1);
-    assert(hashset_is_member(set, (void *)26015264) == 1);
-    assert(hashset_is_member(set, (void *)26044352) == 1);
-    assert(hashset_is_member(set, (void *)24784800) == 1);
-    assert(hashset_is_member(set, (void *)24813888) == 1);
-    assert(hashset_is_member(set, (void *)24663936) == 1);
-    assert(hashset_is_member(set, (void *)24693536) == 1);
-    assert(hashset_is_member(set, (void *)24743792) == 1);
-    assert(hashset_is_member(set, (void *)24756480) == 1);
+static void test_rehashing_items_placed_beyond_nitems(void) {
+  hashset_t set = hashset_create(3);
+
+  assert(hashset_add(set, (void*)20644128) == 1);
+  assert(hashset_add(set, (void*)21747760) == 1);
+  assert(hashset_add(set, (void*)17204864) == 1);
+  assert(hashset_add(set, (void*)22937440) == 1);
+  assert(hashset_add(set, (void*)14734272) == 1);
+  assert(hashset_add(set, (void*)13948320) == 1);
+  assert(hashset_add(set, (void*)18116496) == 1);
+  assert(hashset_add(set, (void*)18229952) == 1);
+  assert(hashset_add(set, (void*)20390128) == 1);
+  assert(hashset_add(set, (void*)23523264) == 1);
+  assert(hashset_add(set, (void*)22866784) == 1);
+  assert(hashset_add(set, (void*)17501248) == 1);
+  assert(hashset_add(set, (void*)17168832) == 1);
+  assert(hashset_add(set, (void*)13389824) == 1);
+  assert(hashset_add(set, (void*)15795136) == 1);
+  assert(hashset_add(set, (void*)15154464) == 1);
+  assert(hashset_add(set, (void*)22507840) == 1);
+  assert(hashset_add(set, (void*)22977920) == 1);
+  assert(hashset_add(set, (void*)20527584) == 1);
+  assert(hashset_add(set, (void*)21557872) == 1);
+  assert(hashset_add(set, (void*)23089952) == 1);
+  assert(hashset_add(set, (void*)21606240) == 1);
+  assert(hashset_add(set, (void*)25168704) == 1);
+  assert(hashset_add(set, (void*)25198096) == 1);
+  assert(hashset_add(set, (void*)25248000) == 1);
+  assert(hashset_add(set, (void*)25260976) == 1);
+  assert(hashset_add(set, (void*)25905520) == 1);
+  assert(hashset_add(set, (void*)25934608) == 1);
+  assert(hashset_add(set, (void*)26015264) == 1);
+  assert(hashset_add(set, (void*)26044352) == 1);
+  assert(hashset_add(set, (void*)24784800) == 1);
+  assert(hashset_add(set, (void*)24813888) == 1);
+  assert(hashset_add(set, (void*)24663936) == 1);
+  assert(hashset_add(set, (void*)24693536) == 1);
+  assert(hashset_add(set, (void*)24743792) == 1);
+  assert(hashset_add(set, (void*)24756480) == 1);
+
+  assert(hashset_is_member(set, (void*)20644128) == 1);
+  assert(hashset_is_member(set, (void*)21747760) == 1);
+  assert(hashset_is_member(set, (void*)17204864) == 1);
+  assert(hashset_is_member(set, (void*)22937440) == 1);
+  assert(hashset_is_member(set, (void*)14734272) == 1);
+  assert(hashset_is_member(set, (void*)13948320) == 1);
+  assert(hashset_is_member(set, (void*)18116496) == 1);
+  assert(hashset_is_member(set, (void*)18229952) == 1);
+  assert(hashset_is_member(set, (void*)20390128) == 1);
+  assert(hashset_is_member(set, (void*)23523264) == 1);
+  assert(hashset_is_member(set, (void*)22866784) == 1);
+  assert(hashset_is_member(set, (void*)17501248) == 1);
+  assert(hashset_is_member(set, (void*)17168832) == 1);
+  assert(hashset_is_member(set, (void*)13389824) == 1);
+  assert(hashset_is_member(set, (void*)15795136) == 1);
+  assert(hashset_is_member(set, (void*)15154464) == 1);
+  assert(hashset_is_member(set, (void*)22507840) == 1);
+  assert(hashset_is_member(set, (void*)22977920) == 1);
+  assert(hashset_is_member(set, (void*)20527584) == 1);
+  assert(hashset_is_member(set, (void*)21557872) == 1);
+  assert(hashset_is_member(set, (void*)23089952) == 1);
+  assert(hashset_is_member(set, (void*)21606240) == 1);
+  assert(hashset_is_member(set, (void*)25168704) == 1);
+  assert(hashset_is_member(set, (void*)25198096) == 1);
+  assert(hashset_is_member(set, (void*)25248000) == 1);
+  assert(hashset_is_member(set, (void*)25260976) == 1);
+  assert(hashset_is_member(set, (void*)25905520) == 1);
+  assert(hashset_is_member(set, (void*)25934608) == 1);
+  assert(hashset_is_member(set, (void*)26015264) == 1);
+  assert(hashset_is_member(set, (void*)26044352) == 1);
+  assert(hashset_is_member(set, (void*)24784800) == 1);
+  assert(hashset_is_member(set, (void*)24813888) == 1);
+  assert(hashset_is_member(set, (void*)24663936) == 1);
+  assert(hashset_is_member(set, (void*)24693536) == 1);
+  assert(hashset_is_member(set, (void*)24743792) == 1);
+  assert(hashset_is_member(set, (void*)24756480) == 1);
 }
 
-
-static void test_iterating(void)
-{
-    hashset_t set = hashset_create(3);
-    hashset_itr_t iter = hashset_iterator(set);
-    unsigned short step;
-
-    /* fill the hashset */
-    hashset_add(set, (void *)"Bob");
-    hashset_add(set, (void *)"Steve");
-    hashset_add(set, (void *)"Karen");
-    hashset_add(set, (void *)"Ellen");
-
-    step = 0;
-
-    // Check contents independent of ordering.
-    while(hashset_iterator_next(iter) >= 0) {
-        char* value = (char *)hashset_iterator_value(iter);
-        if (strcmp("Bob", value) == 0) {
-            assert((step & 1) == 0);
-            step = step | 1;
-        } else if (strcmp("Steve", value) == 0) {
-            assert((step & 2) == 0);
-            step = step | 2;
-        } else if (strcmp("Karen", value) == 0) {
-            assert((step & 4) == 0);
-            step = step | 4;
-        } else if (strcmp("Ellen", value) == 0) {
-            assert((step & 8) == 0);
-            step = step | 8;
-        }
+static void test_iterating(void) {
+  hashset_t set = hashset_create(3);
+  hashset_itr_t iter = hashset_iterator(set);
+  unsigned short step;
+
+  /* fill the hashset */
+  hashset_add(set, (void*)"Bob");
+  hashset_add(set, (void*)"Steve");
+  hashset_add(set, (void*)"Karen");
+  hashset_add(set, (void*)"Ellen");
+
+  step = 0;
+
+  // Check contents independent of ordering.
+  while (hashset_iterator_next(iter) >= 0) {
+    char* value = (char*)hashset_iterator_value(iter);
+    if (strcmp("Bob", value) == 0) {
+      assert((step & 1) == 0);
+      step = step | 1;
+    } else if (strcmp("Steve", value) == 0) {
+      assert((step & 2) == 0);
+      step = step | 2;
+    } else if (strcmp("Karen", value) == 0) {
+      assert((step & 4) == 0);
+      step = step | 4;
+    } else if (strcmp("Ellen", value) == 0) {
+      assert((step & 8) == 0);
+      step = step | 8;
     }
-    assert(hashset_iterator_has_next(iter) == 0);
-    assert(hashset_iterator_next(iter) == -1);
-    assert(step == 0xf);
+  }
+  assert(hashset_iterator_has_next(iter) == 0);
+  assert(hashset_iterator_next(iter) == -1);
+  assert(step == 0xf);
 }
 
-static void test_fill_with_deleted_items()
-{
-    char *s = "some string";
-    hashset_t set = hashset_create(3);
-    if (set == NULL)
-        abort();
-
-    /* fill `set` with deleted items */
-    for (int i = 0; i < 8; ++i)
-    {
-        hashset_add(set, s + i);
-        hashset_remove(set, s + i);
-    }
+static void test_fill_with_deleted_items() {
+  char* s = "some string";
+  hashset_t set = hashset_create(3);
+  if (set == NULL)
+    abort();
+
+  /* fill `set` with deleted items */
+  for (int i = 0; i < 8; ++i) {
+    hashset_add(set, s + i);
+    hashset_remove(set, s + i);
+  }
 
-    /* this should not cause an infinite loop */
-    assert(hashset_is_member(set, s) == 0);
+  /* this should not cause an infinite loop */
+  assert(hashset_is_member(set, s) == 0);
 
-    hashset_destroy(set);
+  hashset_destroy(set);
 }
 
-int main(int argc, char *argv[])
-{
-    trivial();
-    test_gaps();
-    test_exceptions();
-    test_rehashing_items_placed_beyond_nitems();
-    test_iterating();
-    test_fill_with_deleted_items();
-
-    (void)argc;
-    (void)argv;
-    printf("Tests passed.\n");
-    return 0;
+int main(int argc, char* argv[]) {
+  trivial();
+  test_gaps();
+  test_exceptions();
+  test_rehashing_items_placed_beyond_nitems();
+  test_iterating();
+  test_fill_with_deleted_items();
+
+  (void)argc;
+  (void)argv;
+  printf("Tests passed.\n");
+  return 0;
 }
diff --git a/test/general/utils/pqueue_test.c b/test/general/utils/pqueue_test.c
index e0f252c7d..da8e2c6b7 100644
--- a/test/general/utils/pqueue_test.c
+++ b/test/general/utils/pqueue_test.c
@@ -6,99 +6,99 @@
 #include "tag.h"
 
 static void trivial(void) {
-    // Create an event queue.
-    pqueue_tag_t* q = pqueue_tag_init(1);
-    assert(q != NULL);
-    assert(pqueue_is_valid((pqueue_t*)q));
-    pqueue_print((pqueue_t*)q, NULL);
-    pqueue_tag_free(q);
+  // Create an event queue.
+  pqueue_tag_t* q = pqueue_tag_init(1);
+  assert(q != NULL);
+  assert(pqueue_is_valid((pqueue_t*)q));
+  pqueue_print((pqueue_t*)q, NULL);
+  pqueue_tag_free(q);
 }
 
 static void insert_on_queue(pqueue_tag_t* q) {
-    tag_t t1 = {.time = USEC(3), .microstep = 0};
-    tag_t t2 = {.time = USEC(2), .microstep = 1};
-    tag_t t3 = {.time = USEC(2), .microstep = 0};
-    tag_t t4 = {.time = USEC(1), .microstep = 2};
-    assert(!pqueue_tag_insert_tag(q, t1));
-    assert(!pqueue_tag_insert_tag(q, t2));
-    assert(!pqueue_tag_insert_tag(q, t3));
+  tag_t t1 = {.time = USEC(3), .microstep = 0};
+  tag_t t2 = {.time = USEC(2), .microstep = 1};
+  tag_t t3 = {.time = USEC(2), .microstep = 0};
+  tag_t t4 = {.time = USEC(1), .microstep = 2};
+  assert(!pqueue_tag_insert_tag(q, t1));
+  assert(!pqueue_tag_insert_tag(q, t2));
+  assert(!pqueue_tag_insert_tag(q, t3));
 
-    assert(!pqueue_tag_insert_if_no_match(q, t4));
-    assert(pqueue_tag_insert_if_no_match(q, t1));
-    assert(pqueue_tag_insert_if_no_match(q, t4));
-    printf("======== Contents of the queue:\n");
-    pqueue_print((pqueue_t*)q, NULL);
-    assert(pqueue_tag_size(q) == 4);
+  assert(!pqueue_tag_insert_if_no_match(q, t4));
+  assert(pqueue_tag_insert_if_no_match(q, t1));
+  assert(pqueue_tag_insert_if_no_match(q, t4));
+  printf("======== Contents of the queue:\n");
+  pqueue_print((pqueue_t*)q, NULL);
+  assert(pqueue_tag_size(q) == 4);
 }
 
 static void find_from_queue(pqueue_tag_t* q) {
-    tag_t t1 = {.time = USEC(3), .microstep = 0};
-    tag_t t2 = {.time = USEC(2), .microstep = 1};
-    tag_t t3 = {.time = USEC(2), .microstep = 0};
-    tag_t t4 = {.time = USEC(1), .microstep = 2};
-    tag_t t5 = {.time = USEC(0), .microstep = 0};
-    tag_t t6 = {.time = USEC(3), .microstep = 2};
-    assert(pqueue_tag_find_with_tag(q, t1) != NULL);
-    assert(pqueue_tag_find_with_tag(q, t2) != NULL);
-    assert(pqueue_tag_find_with_tag(q, t3) != NULL);
-    assert(pqueue_tag_find_with_tag(q, t4) != NULL);
-    assert(pqueue_tag_find_with_tag(q, t5) == NULL);
-    assert(pqueue_tag_find_with_tag(q, t6) == NULL);
+  tag_t t1 = {.time = USEC(3), .microstep = 0};
+  tag_t t2 = {.time = USEC(2), .microstep = 1};
+  tag_t t3 = {.time = USEC(2), .microstep = 0};
+  tag_t t4 = {.time = USEC(1), .microstep = 2};
+  tag_t t5 = {.time = USEC(0), .microstep = 0};
+  tag_t t6 = {.time = USEC(3), .microstep = 2};
+  assert(pqueue_tag_find_with_tag(q, t1) != NULL);
+  assert(pqueue_tag_find_with_tag(q, t2) != NULL);
+  assert(pqueue_tag_find_with_tag(q, t3) != NULL);
+  assert(pqueue_tag_find_with_tag(q, t4) != NULL);
+  assert(pqueue_tag_find_with_tag(q, t5) == NULL);
+  assert(pqueue_tag_find_with_tag(q, t6) == NULL);
 }
 
 static void insert_if_no_match(pqueue_tag_t* q) {
-    int size = pqueue_tag_size(q);
-    tag_t t1 = {.time = USEC(3), .microstep = 0};
-    tag_t t4 = {.time = USEC(1), .microstep = 2};
-    // Return value is non-zero on failure to insert:
-    assert(pqueue_tag_insert_if_no_match(q, t1));
-    assert(pqueue_tag_insert_if_no_match(q, t4));
-    assert(size == pqueue_tag_size(q));
+  int size = pqueue_tag_size(q);
+  tag_t t1 = {.time = USEC(3), .microstep = 0};
+  tag_t t4 = {.time = USEC(1), .microstep = 2};
+  // Return value is non-zero on failure to insert:
+  assert(pqueue_tag_insert_if_no_match(q, t1));
+  assert(pqueue_tag_insert_if_no_match(q, t4));
+  assert(size == pqueue_tag_size(q));
 }
 
 static void pop_from_queue(pqueue_tag_t* q) {
-    tag_t t1_back = pqueue_tag_pop_tag(q);
-    assert(t1_back.time == USEC(1));
-    assert(t1_back.microstep == 2);
-    tag_t t2_back = pqueue_tag_pop_tag(q);
-    assert(t2_back.time == USEC(2));
-    assert(t2_back.microstep == 0);
-    tag_t t3_back = pqueue_tag_pop_tag(q);
-    assert(t3_back.time == USEC(2));
-    assert(t3_back.microstep == 1);
-    tag_t t4_back = pqueue_tag_pop_tag(q);
-    assert(t4_back.time == USEC(3));
-    assert(t4_back.microstep == 0);
+  tag_t t1_back = pqueue_tag_pop_tag(q);
+  assert(t1_back.time == USEC(1));
+  assert(t1_back.microstep == 2);
+  tag_t t2_back = pqueue_tag_pop_tag(q);
+  assert(t2_back.time == USEC(2));
+  assert(t2_back.microstep == 0);
+  tag_t t3_back = pqueue_tag_pop_tag(q);
+  assert(t3_back.time == USEC(2));
+  assert(t3_back.microstep == 1);
+  tag_t t4_back = pqueue_tag_pop_tag(q);
+  assert(t4_back.time == USEC(3));
+  assert(t4_back.microstep == 0);
 }
 
 static void pop_empty(pqueue_tag_t* q) {
-    assert(pqueue_tag_size(q) == 0);
-    assert(pqueue_tag_pop(q) == NULL);
+  assert(pqueue_tag_size(q) == 0);
+  assert(pqueue_tag_pop(q) == NULL);
 }
 
 static void remove_from_queue(pqueue_tag_t* q, pqueue_tag_element_t* e1, pqueue_tag_element_t* e2) {
-    assert(pqueue_tag_insert(q, e1) == 0);
-    assert(pqueue_tag_insert(q, e2) == 0);
-    pqueue_tag_remove(q, e1);
-    assert(pqueue_tag_peek(q) == e2);
-    assert(pqueue_tag_size(q) == 1);
+  assert(pqueue_tag_insert(q, e1) == 0);
+  assert(pqueue_tag_insert(q, e2) == 0);
+  pqueue_tag_remove(q, e1);
+  assert(pqueue_tag_peek(q) == e2);
+  assert(pqueue_tag_size(q) == 1);
 }
 
-int main(int argc, char *argv[]) {
-    trivial();
-    // Create an event queue.
-    pqueue_tag_t* q = pqueue_tag_init(2);
+int main(int argc, char* argv[]) {
+  trivial();
+  // Create an event queue.
+  pqueue_tag_t* q = pqueue_tag_init(2);
 
-    insert_on_queue(q);
-    find_from_queue(q);
-    insert_if_no_match(q);
-    pop_from_queue(q);
-    pop_empty(q);
+  insert_on_queue(q);
+  find_from_queue(q);
+  insert_if_no_match(q);
+  pop_from_queue(q);
+  pop_empty(q);
 
-    pqueue_tag_element_t e1 = {.tag = {.time = USEC(3), .microstep = 0}, .pos = 0, .is_dynamic = 0};
-    pqueue_tag_element_t e2 = {.tag = {.time = USEC(2), .microstep = 0}, .pos = 0, .is_dynamic = 0};
+  pqueue_tag_element_t e1 = {.tag = {.time = USEC(3), .microstep = 0}, .pos = 0, .is_dynamic = 0};
+  pqueue_tag_element_t e2 = {.tag = {.time = USEC(2), .microstep = 0}, .pos = 0, .is_dynamic = 0};
 
-    remove_from_queue(q, &e1, &e2);
+  remove_from_queue(q, &e1, &e2);
 
-    pqueue_tag_free(q);
+  pqueue_tag_free(q);
 }
diff --git a/test/general/utils/vector_test.c b/test/general/utils/vector_test.c
index 894698a66..245cf37af 100644
--- a/test/general/utils/vector_test.c
+++ b/test/general/utils/vector_test.c
@@ -21,10 +21,10 @@ static int distribution[4] = {30, 50, 5, 15};
  * @param x Any pointer.
  */
 void test_push(vector_t* v) {
-    LF_PRINT_DEBUG("push.");
-    void* x = mock + rand();
-    vector_push(v, x);
-    mock[mock_size++] = x;
+  LF_PRINT_DEBUG("push.");
+  void* x = mock + rand();
+  vector_push(v, x);
+  mock[mock_size++] = x;
 }
 
 /**
@@ -33,17 +33,12 @@ void test_push(vector_t* v) {
  * @param v A vector.
  */
 void test_pop(vector_t* v) {
-    LF_PRINT_DEBUG("pop.");
-    void* expected;
-    void* found;
-    if (mock_size && (
-        (found = vector_pop(v)) != (expected = mock[--mock_size])
-    )) {
-        lf_print_error_and_exit(
-            "Expected %p but got %p while popping from a vector.",
-            expected, found
-        );
-    }
+  LF_PRINT_DEBUG("pop.");
+  void* expected;
+  void* found;
+  if (mock_size && ((found = vector_pop(v)) != (expected = mock[--mock_size]))) {
+    lf_print_error_and_exit("Expected %p but got %p while popping from a vector.", expected, found);
+  }
 }
 
 /**
@@ -53,14 +48,14 @@ void test_pop(vector_t* v) {
  * @return The number of items pushed to `v`.
  */
 int test_pushall(vector_t* v) {
-    LF_PRINT_DEBUG("pushall.");
-    int count = rand() % MAX_PUSHALL;
-    void** mock_start = mock + mock_size;
-    for (int i = 0; i < count; i++) {
-        mock[mock_size++] = mock - rand();
-    }
-    vector_pushall(v, mock_start, count);
-    return count;
+  LF_PRINT_DEBUG("pushall.");
+  int count = rand() % MAX_PUSHALL;
+  void** mock_start = mock + mock_size;
+  for (int i = 0; i < count; i++) {
+    mock[mock_size++] = mock - rand();
+  }
+  vector_pushall(v, mock_start, count);
+  return count;
 }
 
 /**
@@ -70,15 +65,12 @@ int test_pushall(vector_t* v) {
  * @param v A vector.
  */
 void test_random_access(vector_t* v) {
-    if (mock_size) {
-        int idx = rand() % mock_size;
-        if (v->start[idx] != mock[idx]) {
-            lf_print_error_and_exit(
-                "Expected %p but got %p while randomly accessing a vector.",
-                mock[idx], v->start[idx]
-            );
-        }
+  if (mock_size) {
+    int idx = rand() % mock_size;
+    if (v->start[idx] != mock[idx]) {
+      lf_print_error_and_exit("Expected %p but got %p while randomly accessing a vector.", mock[idx], v->start[idx]);
     }
+  }
 }
 
 /**
@@ -87,8 +79,8 @@ void test_random_access(vector_t* v) {
  * @param v A vector.
  */
 void test_vote(vector_t* v) {
-    LF_PRINT_DEBUG("vote.");
-    vector_vote(v);
+  LF_PRINT_DEBUG("vote.");
+  vector_vote(v);
 }
 
 /**
@@ -101,39 +93,36 @@ void test_vote(vector_t* v) {
  * by the number of items added to `v`.
  */
 int run_test(vector_t* v, int* distribution) {
-    int result = 1;
-    int choice = rand() % 100;
-    if ((choice = choice - distribution[0]) < 0) {
-        test_push(v);
-    } else if ((choice = choice - distribution[1]) < 0) {
-        test_pop(v);
-    } else if ((choice = choice - distribution[2]) < 0) {
-        result += test_pushall(v);
-    } else {
-        test_vote(v);
-    }
-    test_random_access(v);
-    return result;
+  int result = 1;
+  int choice = rand() % 100;
+  if ((choice = choice - distribution[0]) < 0) {
+    test_push(v);
+  } else if ((choice = choice - distribution[1]) < 0) {
+    test_pop(v);
+  } else if ((choice = choice - distribution[2]) < 0) {
+    result += test_pushall(v);
+  } else {
+    test_vote(v);
+  }
+  test_random_access(v);
+  return result;
 }
 
 int main() {
-    srand(RANDOM_SEED);
-    for (int i = 0; i < N; i++) {
-        int perturbed[4];
-        perturb(distribution, 4, perturbed);
-        LF_PRINT_DEBUG(
-            "Distribution: %d, %d, %d, %d",
-            perturbed[0], perturbed[1], perturbed[2], perturbed[3]
-        );
-        // FIXME: Decide whether it should be possible to initialize
-        //  vectors with zero capacity.
-        vector_t v = vector_new(rand() % CAPACITY + 1);
-        mock_size = 0;
-        int j = 0;
-        while (j < CAPACITY) {
-            j += run_test(&v, perturbed);
-        }
-        vector_free(&v);
+  srand(RANDOM_SEED);
+  for (int i = 0; i < N; i++) {
+    int perturbed[4];
+    perturb(distribution, 4, perturbed);
+    LF_PRINT_DEBUG("Distribution: %d, %d, %d, %d", perturbed[0], perturbed[1], perturbed[2], perturbed[3]);
+    // FIXME: Decide whether it should be possible to initialize
+    //  vectors with zero capacity.
+    vector_t v = vector_new(rand() % CAPACITY + 1);
+    mock_size = 0;
+    int j = 0;
+    while (j < CAPACITY) {
+      j += run_test(&v, perturbed);
     }
-    return 0;
+    vector_free(&v);
+  }
+  return 0;
 }
diff --git a/test/rand_utils.c b/test/rand_utils.c
index f0e55c748..49c1b5230 100644
--- a/test/rand_utils.c
+++ b/test/rand_utils.c
@@ -5,17 +5,17 @@
  * @brief Ensures that the expectation of each entry of `out` is equal
  * to the corresponding entry of `src`. Assumes that a random seed has
  * already been set using `srand`.
- * 
+ *
  * @param src An array of integers of size `size`.
  * @param size The size of both `src` and `out`.
  * @param out An array of integers of size `size`.
  */
 void perturb(int* src, size_t size, int* out) {
-    out[size - 1] = src[size - 1];
-    for (int a = 0; a < size - 1; a += 2) {
-        int min = src[a] < src[a + 1] ? src[a] : src[a + 1];
-        int diff = rand() % (min * 2) - min;
-        out[a] = src[a] + diff;
-        out[a + 1] = src[a + 1] - diff;
-    }
+  out[size - 1] = src[size - 1];
+  for (int a = 0; a < size - 1; a += 2) {
+    int min = src[a] < src[a + 1] ? src[a] : src[a + 1];
+    int diff = rand() % (min * 2) - min;
+    out[a] = src[a] + diff;
+    out[a + 1] = src[a + 1] - diff;
+  }
 }
diff --git a/test/rand_utils.h b/test/rand_utils.h
index 43704cee9..7accf5269 100644
--- a/test/rand_utils.h
+++ b/test/rand_utils.h
@@ -4,7 +4,7 @@
  * @brief Ensures that the expectation of each entry of `out` is equal
  * to the corresponding entry of `src`. Assumes that a random seed has
  * already been set using `srand`.
- * 
+ *
  * @param src An array of integers of size `size`.
  * @param size The size of both `src` and `out`.
  * @param out An array of integers of size `size`.
diff --git a/test/src_gen_stub.c b/test/src_gen_stub.c
index ce397822f..d67d238ea 100644
--- a/test/src_gen_stub.c
+++ b/test/src_gen_stub.c
@@ -14,9 +14,9 @@ environment_t _env;
 void _lf_initialize_trigger_objects(void) {}
 void lf_terminate_execution(void) {}
 void lf_set_default_command_line_options(void) {}
-void _lf_initialize_watchdogs(environment_t ** envs) {}
+void _lf_initialize_watchdogs(environment_t** envs) {}
 void logical_tag_complete(tag_t tag_to_send) {}
-int _lf_get_environments(environment_t ** envs) {
+int _lf_get_environments(environment_t** envs) {
   *envs = &_env;
   return 1;
 }
\ No newline at end of file
diff --git a/trace/api/CMakeLists.txt b/trace/api/CMakeLists.txt
new file mode 100644
index 000000000..c639096ea
--- /dev/null
+++ b/trace/api/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_library(lf-trace-api INTERFACE)
+add_library(lf::trace-api ALIAS lf-trace-api)
+target_include_directories(lf-trace-api INTERFACE ${CMAKE_CURRENT_LIST_DIR})
diff --git a/trace/api/trace.h b/trace/api/trace.h
new file mode 100644
index 000000000..614eda541
--- /dev/null
+++ b/trace/api/trace.h
@@ -0,0 +1,84 @@
+#ifndef TRACE_H
+#define TRACE_H
+
+#include 
+#include 
+
+#include "lf_core_version.h"
+
+/**
+ * @brief Return a description of the compile-time properties of the current
+ * plugin.
+ */
+version_t lf_version_tracing();
+
+/**
+ * Identifier for what is in the object table.
+ */
+typedef enum {
+  trace_reactor, // Self struct.
+  trace_trigger, // Timer or action (argument to schedule()).
+  trace_user     // User-defined trace object.
+} _lf_trace_object_t;
+
+/**
+ * Struct for table of pointers to a description of the object.
+ */
+typedef struct object_description_t object_description_t;
+struct object_description_t {
+  void* pointer;           // Pointer-sized value that uniquely identifies the object.
+  void* trigger;           // Pointer to the trigger (action or timer) or other secondary ID, if any.
+  _lf_trace_object_t type; // The type of trace object.
+  char* description;       // A NULL terminated string.
+};
+
+typedef struct {
+  int event_type;
+  void* pointer;
+  int src_id;
+  int dst_id;
+  int64_t logical_time;
+  int64_t microstep;
+  int64_t physical_time;
+  void* trigger;
+  int64_t extra_delay;
+} trace_record_nodeps_t;
+
+/**
+ * @brief Initialize the tracing module. Calling other API functions before
+ * calling this procedure is undefined behavior.
+ *
+ * @param file_name_prefix Prefix to attach to any files that may be produced by
+ * the tracing module.
+ * @param process_id The ID of the current federate, or -1 if this is the RTI. 0
+ * if unfederated.
+ * @param max_num_local_threads An upper bound on the number of threads created
+ * by this process.
+ */
+void lf_tracing_global_init(char* file_name_prefix, int process_id, int max_num_local_threads);
+/**
+ * @brief Register a kind of trace event. This should be called before
+ * tracepoints are reached.
+ *
+ * @param description A description of some trace events which may be received
+ * in the future. This may be invoked after many tracepoints have already been
+ * recorded but should be invoked early.
+ */
+void lf_tracing_register_trace_event(object_description_t description);
+/**
+ * @brief Give the tracing module access to the start time. This may be invoked
+ * after many tracepoints have already been recorded but should be invoked
+ * early.
+ */
+void lf_tracing_set_start_time(int64_t start_time);
+/**
+ * @brief Submit a tracepoint from the given worker to the tracing module.
+ */
+void lf_tracing_tracepoint(int worker, trace_record_nodeps_t* tr);
+/**
+ * @brief Shut down the tracing module. Calling other API functions after
+ * calling this procedure is undefined behavior.
+ */
+void lf_tracing_global_shutdown();
+
+#endif // TRACE_H
diff --git a/trace/impl/CMakeLists.txt b/trace/impl/CMakeLists.txt
new file mode 100644
index 000000000..6aeeb6870
--- /dev/null
+++ b/trace/impl/CMakeLists.txt
@@ -0,0 +1,25 @@
+add_library(lf-trace-impl STATIC)
+add_library(lf::trace-impl ALIAS lf-trace-impl)
+target_link_libraries(lf-trace-impl PRIVATE lf::trace-api)
+target_link_libraries(lf-trace-impl PRIVATE lf::platform-api)
+target_link_libraries(lf-trace-impl PRIVATE lf::logging-api)
+target_link_libraries(lf-trace-impl PRIVATE lf::version-api)
+
+target_sources(lf-trace-impl PUBLIC ${CMAKE_CURRENT_LIST_DIR}/src/trace_impl.c)
+
+target_include_directories(lf-trace-impl PUBLIC ${CMAKE_CURRENT_LIST_DIR}/include)
+
+# handle compile-time parameters
+if(NOT DEFINED LOG_LEVEL)
+    message(FATAL_ERROR "You must set LOG_LEVEL cmake argument")
+endif()
+target_compile_definitions(lf-trace-impl PRIVATE LOG_LEVEL=${LOG_LEVEL})
+# build type parameter (release, debug, etc) is implicitly handled by CMake
+
+# make name platform-independent
+set_target_properties(lf-trace-impl PROPERTIES PREFIX "")
+set_target_properties(lf-trace-impl PROPERTIES OUTPUT_NAME "lf-trace-impl")
+set_target_properties(lf-trace-impl PROPERTIES SUFFIX ".a")
+set_target_properties(lf-trace-impl PROPERTIES ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_CURRENT_LIST_DIR}/lib")
+set_target_properties(lf-trace-impl PROPERTIES ARCHIVE_OUTPUT_DIRECTORY_DEBUG "${CMAKE_CURRENT_LIST_DIR}/lib")
+set_target_properties(lf-trace-impl PROPERTIES ARCHIVE_OUTPUT_DIRECTORY_RELEASE "${CMAKE_CURRENT_LIST_DIR}/lib")
diff --git a/trace/impl/build.sh b/trace/impl/build.sh
new file mode 100755
index 000000000..d96cc5b34
--- /dev/null
+++ b/trace/impl/build.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+cmake -S . -B build -DLOG_LEVEL=4
+cmake --build build
diff --git a/trace/impl/include/trace_impl.h b/trace/impl/include/trace_impl.h
new file mode 100644
index 000000000..3e1bd6fe6
--- /dev/null
+++ b/trace/impl/include/trace_impl.h
@@ -0,0 +1,46 @@
+#include "trace.h"
+
+// FIXME: Target property should specify the capacity of the trace buffer.
+#define TRACE_BUFFER_CAPACITY 2048
+
+/** Size of the table of trace objects. */
+#define TRACE_OBJECT_TABLE_SIZE 1024
+
+// TYPE DEFINITIONS **********************************************************
+
+/**
+ * @brief This struct holds all the state associated with tracing in a single environment.
+ * Each environment which has tracing enabled will have such a struct on its environment struct.
+ *
+ */
+typedef struct trace_t {
+  /**
+   * Array of buffers into which traces are written.
+   * When a buffer becomes full, the contents is flushed to the file,
+   * which will create a significant pause in the calling thread.
+   */
+  trace_record_nodeps_t** _lf_trace_buffer;
+  int* _lf_trace_buffer_size;
+
+  /** The number of trace buffers allocated when tracing starts. */
+  int _lf_number_of_trace_buffers;
+
+  /** Marker that tracing is stopping or has stopped. */
+  int _lf_trace_stop;
+
+  /** The file into which traces are written. */
+  FILE* _lf_trace_file;
+
+  /** The file name where the traces are written*/
+  char* filename;
+
+  /** Table of pointers to a description of the object. */
+  object_description_t _lf_trace_object_descriptions[TRACE_OBJECT_TABLE_SIZE];
+  int _lf_trace_object_descriptions_size;
+
+  /** Indicator that the trace header information has been written to the file. */
+  bool _lf_trace_header_written;
+
+  // /** Pointer back to the environment which we are tracing within*/
+  // environment_t* env;
+} trace_t;
diff --git a/trace/impl/src/trace_impl.c b/trace/impl/src/trace_impl.c
new file mode 100644
index 000000000..d43a36e87
--- /dev/null
+++ b/trace/impl/src/trace_impl.c
@@ -0,0 +1,285 @@
+#include  // debugging only
+#include 
+#include 
+#include 
+#include 
+
+#include "trace.h"
+#include "platform.h"
+#include "logging_macros.h"
+#include "trace_impl.h"
+
+/** Macro to use when access to trace file fails. */
+#define _LF_TRACE_FAILURE(trace)                                                                                       \
+  do {                                                                                                                 \
+    fprintf(stderr, "WARNING: Access to trace file failed.\n");                                                        \
+    fclose(trace->_lf_trace_file);                                                                                     \
+    trace->_lf_trace_file = NULL;                                                                                      \
+    return -1;                                                                                                         \
+  } while (0)
+
+// PRIVATE DATA STRUCTURES ***************************************************
+
+static lf_platform_mutex_ptr_t trace_mutex;
+static trace_t trace;
+static int process_id;
+static int64_t start_time;
+
+// PRIVATE HELPERS ***********************************************************
+
+/**
+ * Write the trace header information.
+ * See trace.h.
+ * @return The number of items written to the object table or -1 for failure.
+ */
+static int write_trace_header(trace_t* trace) {
+  if (trace->_lf_trace_file != NULL) {
+    size_t items_written = fwrite(&start_time, sizeof(int64_t), 1, trace->_lf_trace_file);
+    if (items_written != 1)
+      _LF_TRACE_FAILURE(trace);
+
+    // The next item in the header is the size of the
+    // _lf_trace_object_descriptions table.
+    items_written = fwrite(&trace->_lf_trace_object_descriptions_size, sizeof(int), 1, trace->_lf_trace_file);
+    if (items_written != 1)
+      _LF_TRACE_FAILURE(trace);
+
+    // Next we write the table.
+    for (int i = 0; i < trace->_lf_trace_object_descriptions_size; i++) {
+      // Write the pointer to the self struct.
+      items_written = fwrite(&trace->_lf_trace_object_descriptions[i].pointer, sizeof(void*), 1, trace->_lf_trace_file);
+      if (items_written != 1)
+        _LF_TRACE_FAILURE(trace);
+
+      // Write the pointer to the trigger_t struct.
+      items_written = fwrite(&trace->_lf_trace_object_descriptions[i].trigger, sizeof(void*), 1, trace->_lf_trace_file);
+      if (items_written != 1)
+        _LF_TRACE_FAILURE(trace);
+
+      // Write the object type.
+      items_written = fwrite(&trace->_lf_trace_object_descriptions[i].type, // Write the pointer value.
+                             sizeof(_lf_trace_object_t), 1, trace->_lf_trace_file);
+      if (items_written != 1)
+        _LF_TRACE_FAILURE(trace);
+
+      // Write the description.
+      int description_size = strlen(trace->_lf_trace_object_descriptions[i].description);
+      items_written = fwrite(trace->_lf_trace_object_descriptions[i].description, sizeof(char),
+                             description_size + 1, // Include null terminator.
+                             trace->_lf_trace_file);
+      if (items_written != description_size + 1)
+        _LF_TRACE_FAILURE(trace);
+    }
+  }
+  return trace->_lf_trace_object_descriptions_size;
+}
+
+/**
+ * @brief Flush the specified buffer to a file.
+ * This assumes the caller has entered a critical section.
+ * @param worker Index specifying the trace to flush.
+ */
+static void flush_trace_locked(trace_t* trace, int worker) {
+  if (trace->_lf_trace_stop == 0 && trace->_lf_trace_file != NULL && trace->_lf_trace_buffer_size[worker] > 0) {
+    // If the trace header has not been written, write it now.
+    // This is deferred to here so that user trace objects can be
+    // registered in startup reactions.
+    if (!trace->_lf_trace_header_written) {
+      if (write_trace_header(trace) < 0) {
+        lf_print_error("Failed to write trace header. Trace file will be incomplete.");
+        return;
+      }
+      trace->_lf_trace_header_written = true;
+    }
+
+    // Write first the length of the array.
+    size_t items_written = fwrite(&trace->_lf_trace_buffer_size[worker], sizeof(int), 1, trace->_lf_trace_file);
+    if (items_written != 1) {
+      fprintf(stderr, "WARNING: Access to trace file failed.\n");
+      fclose(trace->_lf_trace_file);
+      trace->_lf_trace_file = NULL;
+    } else {
+      // Write the contents.
+      items_written = fwrite(trace->_lf_trace_buffer[worker], sizeof(trace_record_nodeps_t),
+                             trace->_lf_trace_buffer_size[worker], trace->_lf_trace_file);
+      if (items_written != trace->_lf_trace_buffer_size[worker]) {
+        fprintf(stderr, "WARNING: Access to trace file failed.\n");
+        fclose(trace->_lf_trace_file);
+        trace->_lf_trace_file = NULL;
+      }
+    }
+    trace->_lf_trace_buffer_size[worker] = 0;
+  }
+}
+
+/**
+ * @brief Flush the specified buffer to a file.
+ * @param worker Index specifying the trace to flush.
+ */
+static void flush_trace(trace_t* trace, int worker) {
+  // To avoid having more than one worker writing to the file at the same time,
+  // enter a critical section.
+  lf_platform_mutex_lock(trace_mutex);
+  flush_trace_locked(trace, worker);
+  lf_platform_mutex_unlock(trace_mutex);
+}
+
+static void start_trace(trace_t* trace, int max_num_local_threads) {
+  // Do not write the trace header information to the file yet
+  // so that startup reactions can register user-defined trace objects.
+  // write_trace_header();
+  trace->_lf_trace_header_written = false;
+
+  // Allocate an array of arrays of trace records, one per worker thread plus one
+  // for the 0 thread (the main thread, or in an single-threaded program, the only
+  // thread).
+  trace->_lf_number_of_trace_buffers = max_num_local_threads;
+  trace->_lf_trace_buffer =
+      (trace_record_nodeps_t**)malloc(sizeof(trace_record_nodeps_t*) * (trace->_lf_number_of_trace_buffers + 1));
+  trace->_lf_trace_buffer++; // the buffer at index -1 is a fallback for user threads.
+  for (int i = -1; i < trace->_lf_number_of_trace_buffers; i++) {
+    trace->_lf_trace_buffer[i] = (trace_record_nodeps_t*)malloc(sizeof(trace_record_nodeps_t) * TRACE_BUFFER_CAPACITY);
+  }
+  // Array of counters that track the size of each trace record (per thread).
+  trace->_lf_trace_buffer_size = (int*)calloc(sizeof(int), trace->_lf_number_of_trace_buffers + 1);
+  trace->_lf_trace_buffer_size++;
+
+  trace->_lf_trace_stop = 0;
+  LF_PRINT_DEBUG("Started tracing.");
+}
+
+static void trace_new(char* filename) {
+
+  // Determine length of the filename
+  size_t len = strlen(filename) + 1;
+
+  // Allocate memory for the filename on the trace struct
+  trace.filename = (char*)malloc(len * sizeof(char));
+  LF_ASSERT(trace.filename, "Out of memory");
+
+  // Copy it to the struct
+  strncpy(trace.filename, filename, len);
+  // FIXME: location of trace file should be customizable.
+  trace._lf_trace_file = fopen(trace.filename, "w");
+  if (trace._lf_trace_file == NULL) {
+    fprintf(stderr,
+            "WARNING: Failed to open log file with error code %d."
+            "No log will be written.\n",
+            errno);
+  } else {
+    LF_PRINT_DEBUG("Opened trace file %s.", trace.filename);
+  }
+}
+
+static void trace_free(trace_t* trace) { free(trace->filename); }
+
+static void stop_trace_locked(trace_t* trace) {
+  if (trace->_lf_trace_stop) {
+    // Trace was already stopped. Nothing to do.
+    return;
+  }
+  for (int i = -1; i < trace->_lf_number_of_trace_buffers; i++) {
+    // Flush the buffer if it has data.
+    LF_PRINT_DEBUG("Trace buffer %d has %d records.", i, trace->_lf_trace_buffer_size[i]);
+    if (trace->_lf_trace_buffer_size && trace->_lf_trace_buffer_size[i] > 0) {
+      flush_trace_locked(trace, i);
+    }
+  }
+  trace->_lf_trace_stop = 1;
+  if (trace->_lf_trace_file != NULL) {
+    fclose(trace->_lf_trace_file);
+    trace->_lf_trace_file = NULL;
+  }
+  LF_PRINT_DEBUG("Stopped tracing.");
+}
+
+static void stop_trace(trace_t* trace) {
+  lf_platform_mutex_lock(trace_mutex);
+  stop_trace_locked(trace);
+  lf_platform_mutex_unlock(trace_mutex);
+}
+
+// IMPLEMENTATION OF VERSION API *********************************************
+
+version_t lf_version_tracing() {
+  return (version_t){
+      .build_config =
+          (build_config_t){
+              .single_threaded = TRIBOOL_DOES_NOT_MATTER,
+#ifdef NDEBUG
+              .build_type_is_debug = TRIBOOL_FALSE,
+#else
+              .build_type_is_debug = TRIBOOL_TRUE,
+#endif
+              .log_level = LOG_LEVEL,
+          },
+      .core_version_name = NULL,
+  };
+}
+
+// IMPLEMENTATION OF TRACE API ***********************************************
+
+void lf_tracing_register_trace_event(object_description_t description) {
+  lf_platform_mutex_lock(trace_mutex);
+  if (trace._lf_trace_object_descriptions_size >= TRACE_OBJECT_TABLE_SIZE) {
+    lf_platform_mutex_unlock(trace_mutex);
+    fprintf(stderr, "WARNING: Exceeded trace object table size. Trace file will be incomplete.\n");
+    return;
+  }
+  trace._lf_trace_object_descriptions[trace._lf_trace_object_descriptions_size++] = description;
+  lf_platform_mutex_unlock(trace_mutex);
+}
+
+void lf_tracing_tracepoint(int worker, trace_record_nodeps_t* tr) {
+  // Worker argument determines which buffer to write to.
+  int tid = lf_thread_id();
+  if (tid < 0) {
+    // The current thread was created by the user. It is not managed by LF, its ID is not known,
+    // and most importantly it does not count toward the limit on the total number of threads.
+    // Therefore we should fall back to using a mutex.
+    lf_platform_mutex_lock(trace_mutex);
+  }
+  if (tid > trace._lf_number_of_trace_buffers) {
+    lf_print_error_and_exit("the thread id (%d) exceeds the number of trace buffers (%d)", tid,
+                            trace._lf_number_of_trace_buffers);
+  }
+
+  // Flush the buffer if it is full.
+  if (trace._lf_trace_buffer_size[tid] >= TRACE_BUFFER_CAPACITY) {
+    // No more room in the buffer. Write the buffer to the file.
+    flush_trace(&trace, tid);
+  }
+  // The above flush_trace resets the write pointer.
+  int i = trace._lf_trace_buffer_size[tid];
+  // Write to memory buffer.
+  // Get the correct time of the event
+
+  trace._lf_trace_buffer[tid][i] = *tr;
+  trace._lf_trace_buffer_size[tid]++;
+  if (tid < 0) {
+    lf_platform_mutex_unlock(trace_mutex);
+  }
+}
+
+void lf_tracing_global_init(char* file_name_prefix, int fedid, int max_num_local_threads) {
+  trace_mutex = lf_platform_mutex_new();
+  if (!trace_mutex) {
+    fprintf(stderr, "WARNING: Failed to initialize trace mutex.\n");
+    exit(1);
+  }
+  process_id = fedid;
+  char filename[100];
+  if (strcmp(file_name_prefix, "rti") == 0) {
+    sprintf(filename, "%s.lft", file_name_prefix);
+  } else {
+    sprintf(filename, "%s%d.lft", file_name_prefix, process_id);
+  }
+  trace_new(filename);
+  start_trace(&trace, max_num_local_threads);
+}
+void lf_tracing_set_start_time(int64_t time) { start_time = time; }
+void lf_tracing_global_shutdown() {
+  stop_trace(&trace);
+  trace_free(&trace);
+  lf_platform_mutex_free(trace_mutex);
+}
diff --git a/util/audio_loop.h b/util/audio_loop.h
index 011b31eb5..d22a61c49 100644
--- a/util/audio_loop.h
+++ b/util/audio_loop.h
@@ -4,7 +4,7 @@
  * @author Soroush Bateni
  * @copyright (c) 2020-2023, The University of California at Berkeley and UT Dallas.
  * License in [BSD 2-clause](https://github.com/lf-lang/reactor-c/blob/main/LICENSE.md)
- * 
+ *
  * @brief Utility function for playing audio on Linux or MacOS.
  *
  * Audio functions for Linux or MacOS. To start an audio loop, call
@@ -38,17 +38,17 @@
 #define AUDIO_LOOP_H
 
 #include "wave_file_reader.h" // Defines lf_waveform_t.
-#include "tag.h"         // Defines instant_t.
+#include "tag.h"              // Defines instant_t.
 
 // Constants for playback. These are all coupled.
 #define SAMPLE_RATE 44100
-#define AUDIO_BUFFER_SIZE  4410  // 1/10 second, 100 msec
+#define AUDIO_BUFFER_SIZE 4410 // 1/10 second, 100 msec
 #define BUFFER_DURATION_NS 100000000LL
 #define NUM_CHANNELS 1 // 2 for stereo
 
 #define MAX_AMPLITUDE 32765
 
-#define NUM_NOTES 8  // Maximum number of notes that can play simultaneously.
+#define NUM_NOTES 8 // Maximum number of notes that can play simultaneously.
 
 /**
  * Start an audio loop thread that becomes ready to receive
diff --git a/util/audio_loop_linux.c b/util/audio_loop_linux.c
index b6b4ac2f9..c1aead32a 100644
--- a/util/audio_loop_linux.c
+++ b/util/audio_loop_linux.c
@@ -4,11 +4,11 @@
  * @author Soroush Bateni
  * @copyright (c) 2020-2023, The University of California at Berkeley and UT Dallas.
  * License in [BSD 2-clause](https://github.com/lf-lang/reactor-c/blob/main/LICENSE.md)
- * 
+ *
  * @brief Utility function for playing audio on Linux.
- * 
+ *
  * See audio_loop.h for instructions.
- * 
+ *
  * Help from http://equalarea.com/paul/alsa-audio.html
  *
  */
@@ -34,17 +34,17 @@ pthread_cond_t lf_audio_cond = PTHREAD_COND_INITIALIZER;
 int16_t* next_buffer = NULL;
 instant_t next_buffer_start_time = NEVER;
 
-snd_pcm_t *playback_handle;
-snd_async_handler_t *pcm_callback;
+snd_pcm_t* playback_handle;
+snd_async_handler_t* pcm_callback;
 
 struct note {
-    lf_waveform_t* waveform;
-    int position;   // Starts at 0 when note starts.
-    double volume;  // 0.0 for not active.
+  lf_waveform_t* waveform;
+  int position;  // Starts at 0 when note starts.
+  double volume; // 0.0 for not active.
 };
 
 // Array keeping track of notes being played.
-struct note notes[NUM_NOTES] = { 0 };
+struct note notes[NUM_NOTES] = {0};
 
 // Notes are added sequentially.
 // When we reach the end of the notes array, we cycle
@@ -60,74 +60,74 @@ int note_counter = 0;
  * @param value The amplitude to add to whatever amplitude is already there.
  */
 void add_to_sound(int index_offset, double value) {
-    int sample_value = next_buffer[index_offset] + value;
-    if (sample_value > MAX_AMPLITUDE) {
-        sample_value = MAX_AMPLITUDE;
-    } else if (sample_value < -MAX_AMPLITUDE) {
-        sample_value = -MAX_AMPLITUDE;
-    }
-    next_buffer[index_offset] = (int16_t)sample_value;
+  int sample_value = next_buffer[index_offset] + value;
+  if (sample_value > MAX_AMPLITUDE) {
+    sample_value = MAX_AMPLITUDE;
+  } else if (sample_value < -MAX_AMPLITUDE) {
+    sample_value = -MAX_AMPLITUDE;
+  }
+  next_buffer[index_offset] = (int16_t)sample_value;
 }
 
 /**
  * Function that is called by the audio loop to fill the audio buffer
  * with the next batch of audio data.  When this callback occurs,
- * this grabs the mutex lock, copies the buffer that the main program 
+ * this grabs the mutex lock, copies the buffer that the main program
  * has been filling into the destination buffer, clears the next
  * buffer, and updates the start time of the next buffer.
  * @param playback_handle Handle for the audio interface
  * @param buffer_ref Reference to the buffer of size AUDIO_BUFFER_SIZE to be copied to the hardware
  */
-int callback (snd_pcm_t *playback_handle,  int16_t buf_ref[]) {
-    int error_number;
-    pthread_mutex_lock(&lf_audio_mutex);
-
-    // next_buffer = buf_ref;
-    next_buffer = buf_ref;
-    // memset(next_buffer, 0, AUDIO_BUFFER_SIZE * sizeof(int16_t));
-
-    // Clear out the next buffer.
-    next_buffer_start_time += BUFFER_DURATION_NS;
-    
-    // Fill the buffer with any trailing sample data that
-    // didn't fit in the previous buffer.
-    for (int note_to_use = 0; note_to_use < NUM_NOTES; note_to_use++) {
-        struct note* note_instance = &(notes[note_to_use]);
-
-        // Add as much of the note instance into the buffer as will fit.
-        for (int i = 0; i < AUDIO_BUFFER_SIZE; i++) {
-            if (note_instance->waveform == NULL || note_instance->volume == 0.0) {
-                continue;
-            }
-            // Calculate the value to add to the sound by averaging all the channels.
-            int value = 0;
-            for (int channel = 0; channel < note_instance->waveform->num_channels; channel++) {
-                value += note_instance->waveform->waveform[note_instance->position + channel];
-            }
-            value = value / note_instance->waveform->num_channels;
-            add_to_sound(i, value * note_instance->volume);
-
-            note_instance->position += note_instance->waveform->num_channels;
-            if (note_instance->position >= note_instance->waveform->length - note_instance->waveform->num_channels) {
-                // Reached the end of the note. Reset the note.
-                note_instance->volume = 0.0;
-                note_instance->position = 0;
-                note_instance->waveform = NULL;
-                break;
-            }
-        }
-    }
-    
-    // Reinsert this same audio buffer at the end of the queue.
-    if ((error_number = snd_pcm_writei(playback_handle, buf_ref, AUDIO_BUFFER_SIZE)) < 0) {
-        lf_print_error("Writing to sound buffer failed: %s", snd_strerror(error_number));
+int callback(snd_pcm_t* playback_handle, int16_t buf_ref[]) {
+  int error_number;
+  pthread_mutex_lock(&lf_audio_mutex);
+
+  // next_buffer = buf_ref;
+  next_buffer = buf_ref;
+  // memset(next_buffer, 0, AUDIO_BUFFER_SIZE * sizeof(int16_t));
+
+  // Clear out the next buffer.
+  next_buffer_start_time += BUFFER_DURATION_NS;
+
+  // Fill the buffer with any trailing sample data that
+  // didn't fit in the previous buffer.
+  for (int note_to_use = 0; note_to_use < NUM_NOTES; note_to_use++) {
+    struct note* note_instance = &(notes[note_to_use]);
+
+    // Add as much of the note instance into the buffer as will fit.
+    for (int i = 0; i < AUDIO_BUFFER_SIZE; i++) {
+      if (note_instance->waveform == NULL || note_instance->volume == 0.0) {
+        continue;
+      }
+      // Calculate the value to add to the sound by averaging all the channels.
+      int value = 0;
+      for (int channel = 0; channel < note_instance->waveform->num_channels; channel++) {
+        value += note_instance->waveform->waveform[note_instance->position + channel];
+      }
+      value = value / note_instance->waveform->num_channels;
+      add_to_sound(i, value * note_instance->volume);
+
+      note_instance->position += note_instance->waveform->num_channels;
+      if (note_instance->position >= note_instance->waveform->length - note_instance->waveform->num_channels) {
+        // Reached the end of the note. Reset the note.
+        note_instance->volume = 0.0;
+        note_instance->position = 0;
+        note_instance->waveform = NULL;
+        break;
+      }
     }
-
-    // In case the other thread is waiting for this event, notify
-    // (the other thread should not be waiting).
-    pthread_cond_signal(&lf_audio_cond);
-    pthread_mutex_unlock(&lf_audio_mutex);
-    return error_number;
+  }
+
+  // Reinsert this same audio buffer at the end of the queue.
+  if ((error_number = snd_pcm_writei(playback_handle, buf_ref, AUDIO_BUFFER_SIZE)) < 0) {
+    lf_print_error("Writing to sound buffer failed: %s", snd_strerror(error_number));
+  }
+
+  // In case the other thread is waiting for this event, notify
+  // (the other thread should not be waiting).
+  pthread_cond_signal(&lf_audio_cond);
+  pthread_mutex_unlock(&lf_audio_mutex);
+  return error_number;
 }
 
 bool stop_audio = false;
@@ -136,258 +136,236 @@ bool stop_audio = false;
  * Run the audio loop indefinitely.
  */
 void* run_audio_loop(void* ignored) {
-    snd_pcm_hw_params_t *hw_params;
-    snd_pcm_sw_params_t *sw_params;
-    snd_pcm_sframes_t frames_to_deliver;
-    int error_number;
-    unsigned int sample_rate = SAMPLE_RATE;
-    const char* device_name = AUDIO_DEVICE;
-    int buffer_size_bytes = AUDIO_BUFFER_SIZE * 4 * NUM_CHANNELS;
-
-    if ((error_number = snd_pcm_open(&playback_handle, device_name, SND_PCM_STREAM_PLAYBACK, 0)) < 0) {
-        lf_print_error_and_exit("Cannot open audio device %s (%s)\n",
-                AUDIO_DEVICE,
-             snd_strerror(error_number));
-    }
-
-    if ((error_number = snd_pcm_hw_params_malloc(&hw_params)) < 0) {
-        lf_print_error_and_exit("Cannot allocate hardware parameter structure (%s)\n",
-             snd_strerror(error_number));
-    }
-
-    if ((error_number = snd_pcm_hw_params_any(playback_handle, hw_params)) < 0) {
-        lf_print_error_and_exit("Cannot initialize hardware parameter structure (%s)\n",
-             snd_strerror(error_number));
-    }
+  snd_pcm_hw_params_t* hw_params;
+  snd_pcm_sw_params_t* sw_params;
+  snd_pcm_sframes_t frames_to_deliver;
+  int error_number;
+  unsigned int sample_rate = SAMPLE_RATE;
+  const char* device_name = AUDIO_DEVICE;
+  int buffer_size_bytes = AUDIO_BUFFER_SIZE * 4 * NUM_CHANNELS;
+
+  if ((error_number = snd_pcm_open(&playback_handle, device_name, SND_PCM_STREAM_PLAYBACK, 0)) < 0) {
+    lf_print_error_and_exit("Cannot open audio device %s (%s)\n", AUDIO_DEVICE, snd_strerror(error_number));
+  }
+
+  if ((error_number = snd_pcm_hw_params_malloc(&hw_params)) < 0) {
+    lf_print_error_and_exit("Cannot allocate hardware parameter structure (%s)\n", snd_strerror(error_number));
+  }
+
+  if ((error_number = snd_pcm_hw_params_any(playback_handle, hw_params)) < 0) {
+    lf_print_error_and_exit("Cannot initialize hardware parameter structure (%s)\n", snd_strerror(error_number));
+  }
+
+  if ((error_number = snd_pcm_hw_params_set_access(playback_handle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED)) < 0) {
+    lf_print_error_and_exit("Cannot set access type (%s)\n", snd_strerror(error_number));
+  }
+
+  if ((error_number = snd_pcm_hw_params_set_format(playback_handle, hw_params, SND_PCM_FORMAT_S16_LE)) < 0) {
+    lf_print_error_and_exit("Cannot set sample format (%s)\n", snd_strerror(error_number));
+  }
+
+  if ((error_number = snd_pcm_hw_params_set_rate_near(playback_handle, hw_params, &sample_rate, 0)) < 0) {
+    lf_print_error_and_exit("Cannot set sample rate (%s)\n", snd_strerror(error_number));
+  }
+  // FIXME: check sample rate
+
+  if ((error_number = snd_pcm_hw_params_set_channels(playback_handle, hw_params, NUM_CHANNELS)) < 0) {
+    lf_print_error_and_exit("Cannot set channel count (%s)\n", snd_strerror(error_number));
+  }
+  snd_pcm_uframes_t periods = buffer_size_bytes / AUDIO_BUFFER_SIZE;
+  if ((error_number = snd_pcm_hw_params_set_periods(playback_handle, hw_params, periods, 0)) < 0) {
+    lf_print_error_and_exit("Cannot set channel count (%s)\n", snd_strerror(error_number));
+  }
+  snd_pcm_uframes_t size = buffer_size_bytes;
+  if ((error_number = snd_pcm_hw_params_set_buffer_size_near(playback_handle, hw_params, &size)) < 0) {
+    lf_print_error_and_exit("Cannot set channel count (%s)\n", snd_strerror(error_number));
+  }
+  if ((error_number = snd_pcm_hw_params(playback_handle, hw_params)) < 0) {
+    lf_print_error_and_exit("Cannot set parameters (%s)\n", snd_strerror(error_number));
+  }
+
+  snd_pcm_hw_params_free(hw_params);
+
+  /* tell ALSA to wake us up whenever 4096 or more frames
+     of playback data can be delivered. Also, tell
+     ALSA that we'll start the device ourselves.
+  */
+
+  if ((error_number = snd_pcm_sw_params_malloc(&sw_params)) < 0) {
+    lf_print_error_and_exit("Cannot allocate software parameters structure (%s)\n", snd_strerror(error_number));
+  }
+  if ((error_number = snd_pcm_sw_params_current(playback_handle, sw_params)) < 0) {
+    lf_print_error_and_exit("Cannot initialize software parameters structure (%s)\n", snd_strerror(error_number));
+  }
+  if ((error_number = snd_pcm_sw_params_set_avail_min(playback_handle, sw_params, buffer_size_bytes)) < 0) {
+    lf_print_error_and_exit("Cannot set minimum available count (%s)\n", snd_strerror(error_number));
+  }
+  if ((error_number = snd_pcm_sw_params_set_start_threshold(playback_handle, sw_params, AUDIO_BUFFER_SIZE)) < 0) {
+    lf_print_error_and_exit("Cannot set start mode (%s)\n", snd_strerror(error_number));
+  }
+  if ((error_number = snd_pcm_sw_params(playback_handle, sw_params)) < 0) {
+    lf_print_error_and_exit("Cannot set software parameters (%s)\n", snd_strerror(error_number));
+  }
+
+  snd_pcm_sw_params_free(sw_params);
+
+  /*
+   * The interface will interrupt the kernel every AUDIO_BUFFER_SIZE frames, and ALSA
+   * will wake up this program very soon after that.
+   */
+
+  if ((error_number = snd_pcm_prepare(playback_handle)) < 0) {
+    lf_print_error_and_exit("Cannot prepare audio interface for use (%s)\n", snd_strerror(error_number));
+  }
+
+  int16_t buffer[buffer_size_bytes];
+  memset(buffer, 0, buffer_size_bytes * sizeof(int16_t));
+  int head = 0;
+  while (!stop_audio) {
+    /*
+     * Wait until the interface is ready for data, or BUFFER_DURATION_NS
+     * has elapsed.
+     */
 
-    if ((error_number = snd_pcm_hw_params_set_access(playback_handle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED)) < 0) {
-        lf_print_error_and_exit("Cannot set access type (%s)\n",
-             snd_strerror(error_number));
+    if ((error_number = snd_pcm_wait(playback_handle, BUFFER_DURATION_NS / 1000)) < 0) {
+      lf_print_error("Poll failed (%s)\n", strerror(errno));
+      break;
     }
 
-    if ((error_number = snd_pcm_hw_params_set_format(playback_handle, hw_params, SND_PCM_FORMAT_S16_LE)) < 0) {
-        lf_print_error_and_exit("Cannot set sample format (%s)\n",
-             snd_strerror(error_number));
-    }
+    /* Find out how much space is available for playback data */
 
-    if ((error_number = snd_pcm_hw_params_set_rate_near(playback_handle, hw_params, &sample_rate, 0)) < 0) {
-        lf_print_error_and_exit("Cannot set sample rate (%s)\n",
-             snd_strerror(error_number));
+    if ((frames_to_deliver = snd_pcm_avail_update(playback_handle)) < 0) {
+      if (frames_to_deliver == -EPIPE) {
+        lf_print_error("An xrun occured\n");
+        continue;
+      } else {
+        lf_print_error("Unknown ALSA avail update return value (%d)\n", frames_to_deliver);
+        break;
+      }
     }
-    // FIXME: check sample rate
 
-    if ((error_number = snd_pcm_hw_params_set_channels(playback_handle, hw_params, NUM_CHANNELS)) < 0) {
-        lf_print_error_and_exit("Cannot set channel count (%s)\n",
-             snd_strerror(error_number));
-    }
-    snd_pcm_uframes_t periods = buffer_size_bytes / AUDIO_BUFFER_SIZE;
-    if ((error_number = snd_pcm_hw_params_set_periods(playback_handle, hw_params, periods, 0)) < 0) {
-        lf_print_error_and_exit("Cannot set channel count (%s)\n",
-             snd_strerror(error_number));
-    }
-    snd_pcm_uframes_t size = buffer_size_bytes;
-    if ((error_number = snd_pcm_hw_params_set_buffer_size_near(playback_handle, hw_params, &size)) < 0) {
-        lf_print_error_and_exit("Cannot set channel count (%s)\n",
-             snd_strerror(error_number));
-    }
-    if ((error_number = snd_pcm_hw_params(playback_handle, hw_params)) < 0) {
-        lf_print_error_and_exit("Cannot set parameters (%s)\n",
-             snd_strerror(error_number));
+    if (frames_to_deliver < AUDIO_BUFFER_SIZE) {
+      continue;
     }
 
-    snd_pcm_hw_params_free(hw_params);
+    /* deliver the data */
+    callback(playback_handle, &(buffer[head]));
 
-    /* tell ALSA to wake us up whenever 4096 or more frames
-       of playback data can be delivered. Also, tell
-       ALSA that we'll start the device ourselves.
-    */
-
-    if ((error_number = snd_pcm_sw_params_malloc(&sw_params)) < 0) {
-        lf_print_error_and_exit("Cannot allocate software parameters structure (%s)\n",
-             snd_strerror (error_number));
-    }
-    if ((error_number = snd_pcm_sw_params_current(playback_handle, sw_params)) < 0) {
-        lf_print_error_and_exit("Cannot initialize software parameters structure (%s)\n",
-             snd_strerror (error_number));
-    }
-    if ((error_number = snd_pcm_sw_params_set_avail_min(playback_handle, sw_params, buffer_size_bytes)) < 0) {
-        lf_print_error_and_exit("Cannot set minimum available count (%s)\n",
-             snd_strerror (error_number));
-    }
-    if ((error_number = snd_pcm_sw_params_set_start_threshold(playback_handle, sw_params, AUDIO_BUFFER_SIZE)) < 0) {
-        lf_print_error_and_exit("Cannot set start mode (%s)\n",
-             snd_strerror (error_number));
-    }
-    if ((error_number = snd_pcm_sw_params(playback_handle, sw_params)) < 0) {
-        lf_print_error_and_exit("Cannot set software parameters (%s)\n",
-             snd_strerror (error_number));
-    }
-
-    snd_pcm_sw_params_free(sw_params);
-
-    /*
-     * The interface will interrupt the kernel every AUDIO_BUFFER_SIZE frames, and ALSA
-     * will wake up this program very soon after that.
-    */
-
-    if ((error_number = snd_pcm_prepare(playback_handle)) < 0) {
-        lf_print_error_and_exit("Cannot prepare audio interface for use (%s)\n",
-             snd_strerror (error_number));
-    }
-
-
-    int16_t buffer[buffer_size_bytes];
-    memset(buffer, 0, buffer_size_bytes * sizeof(int16_t));
-    int head = 0;
-    while (!stop_audio) {
-        /*
-         * Wait until the interface is ready for data, or BUFFER_DURATION_NS
-         * has elapsed.
-        */
-
-        if ((error_number = snd_pcm_wait(playback_handle, BUFFER_DURATION_NS/1000)) < 0) {
-            lf_print_error("Poll failed (%s)\n", strerror(errno));
-            break;
-        }
-
-        /* Find out how much space is available for playback data */
-
-        if ((frames_to_deliver = snd_pcm_avail_update(playback_handle)) < 0) {
-            if (frames_to_deliver == -EPIPE) {
-                lf_print_error("An xrun occured\n");
-                continue;
-            } else {
-                lf_print_error("Unknown ALSA avail update return value (%d)\n",
-                     frames_to_deliver);
-                break;
-            }
-        }
-
-        if (frames_to_deliver < AUDIO_BUFFER_SIZE) {
-            continue;
-        }
-
-        /* deliver the data */
-        callback(playback_handle, &(buffer[head]));
-
-
-        if (head <= (buffer_size_bytes - (2 * AUDIO_BUFFER_SIZE))) {
-            head += AUDIO_BUFFER_SIZE;
-        } else {
-            head = 0;
-        }
-        // Clear out the next buffer.
-        memset(&(buffer[head]), 0, AUDIO_BUFFER_SIZE * sizeof(int16_t));
-        next_buffer = &(buffer[head]);
+    if (head <= (buffer_size_bytes - (2 * AUDIO_BUFFER_SIZE))) {
+      head += AUDIO_BUFFER_SIZE;
+    } else {
+      head = 0;
     }
+    // Clear out the next buffer.
+    memset(&(buffer[head]), 0, AUDIO_BUFFER_SIZE * sizeof(int16_t));
+    next_buffer = &(buffer[head]);
+  }
 
-    snd_pcm_close(playback_handle);
-
+  snd_pcm_close(playback_handle);
 
-    return NULL;
+  return NULL;
 }
 
 pthread_t loop_thread_id;
 bool loop_thread_started = false;
 
 void lf_start_audio_loop(instant_t start_time) {
-    
-    if (loop_thread_started) return;
-    loop_thread_started = true;
-    
-    // Set the start time of the current buffer to the current time
-    // minus twice the buffer duration. The two calls to callback()
-    // during setup will increment this to equal to the start time.
-    // Then create a thread to
-    // start the audio loop. That thread will place
-    // two empty audio buffers in the queue and will schedule the
-    // audio to start at the current logical time plus the buffer
-    // duration. The current buffer being filled (the second buffer)
-    // will have logical start time 0, but will play later by less
-    // than the buffer duration.
-    next_buffer_start_time = start_time - 2 * BUFFER_DURATION_NS;
-    
-    // Start the audio loop thread.
-    pthread_create(&loop_thread_id, NULL, &run_audio_loop, NULL);
-}
 
-void lf_stop_audio_loop() {
-    stop_audio = true;
+  if (loop_thread_started)
+    return;
+  loop_thread_started = true;
+
+  // Set the start time of the current buffer to the current time
+  // minus twice the buffer duration. The two calls to callback()
+  // during setup will increment this to equal to the start time.
+  // Then create a thread to
+  // start the audio loop. That thread will place
+  // two empty audio buffers in the queue and will schedule the
+  // audio to start at the current logical time plus the buffer
+  // duration. The current buffer being filled (the second buffer)
+  // will have logical start time 0, but will play later by less
+  // than the buffer duration.
+  next_buffer_start_time = start_time - 2 * BUFFER_DURATION_NS;
+
+  // Start the audio loop thread.
+  pthread_create(&loop_thread_id, NULL, &run_audio_loop, NULL);
 }
 
+void lf_stop_audio_loop() { stop_audio = true; }
+
 int lf_play_audio_waveform(lf_waveform_t* waveform, float emphasis, instant_t start_time) {
-    int result = 0;
-    pthread_mutex_lock(&lf_audio_mutex);
-    
-    // If the buffer into which to write has not yet been set up, wait.
-    while (next_buffer == NULL) {
-        pthread_cond_wait(&lf_audio_cond, &lf_audio_mutex);
-    }
-    instant_t time_offset = start_time - next_buffer_start_time;
-    
-    // If this is late, then tick right away.
+  int result = 0;
+  pthread_mutex_lock(&lf_audio_mutex);
+
+  // If the buffer into which to write has not yet been set up, wait.
+  while (next_buffer == NULL) {
+    pthread_cond_wait(&lf_audio_cond, &lf_audio_mutex);
+  }
+  instant_t time_offset = start_time - next_buffer_start_time;
+
+  // If this is late, then tick right away.
+  if (time_offset < 0) {
+    // printf("WARNING: audio has passed the specified time by %lld.\n", time_offset);
+    time_offset = 0;
+    result = 1;
+  }
+  // Calculate the index of the tick.
+  size_t index_offset = (time_offset * SAMPLE_RATE) / BILLION;
+
+  // If the offset is beyond the end of the audio buffer, then the program
+  // has gotten ahead of the audio. Wait for audio to catch up.
+  // This happens when a timestamp is at or close to the start time
+  // for the buffer because the audio system has not yet invoked the
+  // callback to swap buffers.  Here, we wait for the callback to
+  // occur.
+  while (index_offset >= AUDIO_BUFFER_SIZE) {
+    pthread_cond_wait(&lf_audio_cond, &lf_audio_mutex);
+    // next_buffer_start_time has been incremented by BUFFER_DURATION_NS.
+    time_offset = start_time - next_buffer_start_time;
+    // time_offset should be >= 0, but just in case:
     if (time_offset < 0) {
-        // printf("WARNING: audio has passed the specified time by %lld.\n", time_offset);
-        time_offset = 0;
-        result = 1;
+      time_offset = 0;
+      result = 1;
     }
-    // Calculate the index of the tick.
-    size_t index_offset = (time_offset * SAMPLE_RATE) / BILLION;
-    
-    // If the offset is beyond the end of the audio buffer, then the program
-    // has gotten ahead of the audio. Wait for audio to catch up.
-    // This happens when a timestamp is at or close to the start time
-    // for the buffer because the audio system has not yet invoked the
-    // callback to swap buffers.  Here, we wait for the callback to
-    // occur.
-    while (index_offset >= AUDIO_BUFFER_SIZE) {
-        pthread_cond_wait(&lf_audio_cond, &lf_audio_mutex);
-        // next_buffer_start_time has been incremented by BUFFER_DURATION_NS.
-        time_offset = start_time - next_buffer_start_time;
-        // time_offset should be >= 0, but just in case:
-        if (time_offset < 0) {
-            time_offset = 0;
-            result = 1;
-        }
-        index_offset = (time_offset * SAMPLE_RATE) / BILLION;
+    index_offset = (time_offset * SAMPLE_RATE) / BILLION;
+  }
+
+  if (waveform == NULL) {
+    // Waveform ID is out of range. Just emit a tick.
+    add_to_sound(index_offset, MAX_AMPLITUDE * emphasis);
+  } else {
+    int note_to_use = note_counter++; // Increment so that the next note uses a new slot.
+    if (note_counter >= NUM_NOTES) {
+      note_counter = 0; // Wrap around.
     }
-    
-    if (waveform == NULL) {
-        // Waveform ID is out of range. Just emit a tick.
-        add_to_sound(index_offset, MAX_AMPLITUDE * emphasis);
-    } else {
-        int note_to_use = note_counter++; // Increment so that the next note uses a new slot.
-        if (note_counter >= NUM_NOTES) {
-            note_counter = 0; // Wrap around.
+    // Initialize the note instance to start playing.
+    struct note* note_instance = ¬es[note_to_use];
+    note_instance->waveform = waveform;
+    // If the waveform length is 0, do not play anything.
+    if (waveform->length > 0) {
+      note_instance->volume = emphasis;
+      note_instance->position = 0;
+
+      // Add as much of the note instance into the buffer as will fit.
+      for (int i = index_offset; i < AUDIO_BUFFER_SIZE; i++) {
+        // Calculate the value to add to the sound by averaging all the channels.
+        int value = 0;
+        for (int channel = 0; channel < waveform->num_channels; channel++) {
+          value += waveform->waveform[note_instance->position + channel];
         }
-        // Initialize the note instance to start playing.
-        struct note* note_instance = ¬es[note_to_use];
-        note_instance->waveform = waveform;
-        // If the waveform length is 0, do not play anything.
-        if (waveform->length > 0) {
-            note_instance->volume = emphasis;
-            note_instance->position = 0;
-                        
-            // Add as much of the note instance into the buffer as will fit.
-            for (int i = index_offset; i < AUDIO_BUFFER_SIZE; i++) {
-                // Calculate the value to add to the sound by averaging all the channels.
-                int value = 0;
-                for (int channel = 0; channel < waveform->num_channels; channel++) {
-                    value += waveform->waveform[note_instance->position + channel];
-                }
-                value = value / waveform->num_channels;
-                add_to_sound(i, value * emphasis);
-            
-                note_instance->position += note_instance->waveform->num_channels;
-                if (note_instance->position >= note_instance->waveform->length - note_instance->waveform->num_channels) {
-                    // Reached the end of the note. Reset the note.
-                    note_instance->volume = 0.0;
-                    note_instance->position = 0;
-                    break;
-                }
-            }
+        value = value / waveform->num_channels;
+        add_to_sound(i, value * emphasis);
+
+        note_instance->position += note_instance->waveform->num_channels;
+        if (note_instance->position >= note_instance->waveform->length - note_instance->waveform->num_channels) {
+          // Reached the end of the note. Reset the note.
+          note_instance->volume = 0.0;
+          note_instance->position = 0;
+          break;
         }
+      }
     }
-    pthread_mutex_unlock(&lf_audio_mutex);
-    return result;
+  }
+  pthread_mutex_unlock(&lf_audio_mutex);
+  return result;
 }
diff --git a/util/audio_loop_mac.c b/util/audio_loop_mac.c
index c888d3250..332c434a9 100644
--- a/util/audio_loop_mac.c
+++ b/util/audio_loop_mac.c
@@ -3,11 +3,11 @@
  * @author Edward A. Lee
  * @copyright (c) 2020-2023, The University of California at Berkeley and UT Dallas.
  * License in [BSD 2-clause](https://github.com/lf-lang/reactor-c/blob/main/LICENSE.md)
- * 
+ *
  * @brief Utility function for playing audio on MacOS.
- * 
+ *
  * See audio_loop.h for instructions.
- * 
+ *
  */
 
 #include 
@@ -27,13 +27,13 @@ int16_t* next_buffer = NULL;
 instant_t next_buffer_start_time = NEVER;
 
 struct note {
-    lf_waveform_t* waveform;
-    int position;   // Starts at 0 when note starts.
-    double volume;  // 0.0 for not active.
+  lf_waveform_t* waveform;
+  int position;  // Starts at 0 when note starts.
+  double volume; // 0.0 for not active.
 };
 
 // Array keeping track of notes being played.
-struct note notes[NUM_NOTES] = { 0 };
+struct note notes[NUM_NOTES] = {0};
 
 // Notes are added sequentially.
 // When we reach the end of the notes array, we cycle
@@ -49,133 +49,132 @@ int note_counter = 0;
  * @param value The amplitude to add to whatever amplitude is already there.
  */
 void add_to_sound(int index_offset, double value) {
-    int sample_value = next_buffer[index_offset] + value;
-    if (sample_value > MAX_AMPLITUDE) {
-        sample_value = MAX_AMPLITUDE;
-    } else if (sample_value < -MAX_AMPLITUDE) {
-        sample_value = -MAX_AMPLITUDE;
-    }
-    next_buffer[index_offset] = (int16_t)sample_value;
+  int sample_value = next_buffer[index_offset] + value;
+  if (sample_value > MAX_AMPLITUDE) {
+    sample_value = MAX_AMPLITUDE;
+  } else if (sample_value < -MAX_AMPLITUDE) {
+    sample_value = -MAX_AMPLITUDE;
+  }
+  next_buffer[index_offset] = (int16_t)sample_value;
 }
 
 /**
  * Function that is called by the audio loop to fill the audio buffer
  * with the next batch of audio data.  When this callback occurs,
- * this grabs the mutex lock, copies the buffer that the main program 
+ * this grabs the mutex lock, copies the buffer that the main program
  * has been filling into the destination buffer, clears the next
  * buffer, and updates the start time of the next buffer.
  */
-void callback (void *ignored, AudioQueueRef queue, AudioQueueBufferRef buf_ref) {
-    // Get a C pointer from the reference passed in.
-    AudioQueueBuffer *buf = buf_ref;
-    
-    // Array of samples in the buffer.
-    int16_t *samples = buf->mAudioData;
-    
-    pthread_mutex_lock(&lf_audio_mutex);
-    // Make this the new buffer to write into.
-    next_buffer = buf->mAudioData;
-    // Clear out the next buffer.
-    memset(next_buffer, 0, AUDIO_BUFFER_SIZE * sizeof(int16_t));
-    next_buffer_start_time += BUFFER_DURATION_NS;
-    
-    // Fill the buffer with any trailing sample data that
-    // didn't fit in the previous buffer.
-    for (int note_to_use = 0; note_to_use < NUM_NOTES; note_to_use++) {
-        struct note* note_instance = &(notes[note_to_use]);
-    
-        // Add as much of the note instance into the buffer as will fit.
-        for (int i = 0; i < AUDIO_BUFFER_SIZE; i++) {
-            if (note_instance->waveform == NULL || note_instance->volume == 0.0) {
-                continue;
-            }
-            // Calculate the value to add to the sound by averaging all the channels.
-            int value = 0;
-            for (int channel = 0; channel < note_instance->waveform->num_channels; channel++) {
-                value += note_instance->waveform->waveform[note_instance->position + channel];
-            }
-            value = value / note_instance->waveform->num_channels;
-            add_to_sound(i, value * note_instance->volume);
-        
-            note_instance->position += note_instance->waveform->num_channels;
-            if (note_instance->position >= note_instance->waveform->length - note_instance->waveform->num_channels) {
-                // Reached the end of the note. Reset the note.
-                note_instance->volume = 0.0;
-                note_instance->position = 0;
-                note_instance->waveform = NULL;
-                break;
-            }
-        }
+void callback(void* ignored, AudioQueueRef queue, AudioQueueBufferRef buf_ref) {
+  // Get a C pointer from the reference passed in.
+  AudioQueueBuffer* buf = buf_ref;
+
+  // Array of samples in the buffer.
+  int16_t* samples = buf->mAudioData;
+
+  pthread_mutex_lock(&lf_audio_mutex);
+  // Make this the new buffer to write into.
+  next_buffer = buf->mAudioData;
+  // Clear out the next buffer.
+  memset(next_buffer, 0, AUDIO_BUFFER_SIZE * sizeof(int16_t));
+  next_buffer_start_time += BUFFER_DURATION_NS;
+
+  // Fill the buffer with any trailing sample data that
+  // didn't fit in the previous buffer.
+  for (int note_to_use = 0; note_to_use < NUM_NOTES; note_to_use++) {
+    struct note* note_instance = &(notes[note_to_use]);
+
+    // Add as much of the note instance into the buffer as will fit.
+    for (int i = 0; i < AUDIO_BUFFER_SIZE; i++) {
+      if (note_instance->waveform == NULL || note_instance->volume == 0.0) {
+        continue;
+      }
+      // Calculate the value to add to the sound by averaging all the channels.
+      int value = 0;
+      for (int channel = 0; channel < note_instance->waveform->num_channels; channel++) {
+        value += note_instance->waveform->waveform[note_instance->position + channel];
+      }
+      value = value / note_instance->waveform->num_channels;
+      add_to_sound(i, value * note_instance->volume);
+
+      note_instance->position += note_instance->waveform->num_channels;
+      if (note_instance->position >= note_instance->waveform->length - note_instance->waveform->num_channels) {
+        // Reached the end of the note. Reset the note.
+        note_instance->volume = 0.0;
+        note_instance->position = 0;
+        note_instance->waveform = NULL;
+        break;
+      }
     }
-    
-    // Reinsert this same audio buffer at the end of the queue.
-    AudioQueueEnqueueBuffer (queue, buf_ref, 0, NULL);
-    
-    // In case the other thread is waiting for this event, notify
-    // (the other thread should not be waiting).
-    pthread_cond_signal(&lf_audio_cond);
-    pthread_mutex_unlock(&lf_audio_mutex);
+  }
+
+  // Reinsert this same audio buffer at the end of the queue.
+  AudioQueueEnqueueBuffer(queue, buf_ref, 0, NULL);
+
+  // In case the other thread is waiting for this event, notify
+  // (the other thread should not be waiting).
+  pthread_cond_signal(&lf_audio_cond);
+  pthread_mutex_unlock(&lf_audio_mutex);
 }
 
 /**
  * Run the audio loop indefinitely.
  */
 void* run_audio_loop(void* ignored) {
-    // Create an audio format description.
-    AudioStreamBasicDescription fmt = { 0 };
-    fmt.mSampleRate = 44100;
-    fmt.mFormatID = kAudioFormatLinearPCM;
-    fmt.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
-    fmt.mFramesPerPacket = 1;
-    fmt.mChannelsPerFrame = 1; // 2 for stereo
-    fmt.mBytesPerPacket = fmt.mBytesPerFrame = 2; // x2 for stereo
-    fmt.mBitsPerChannel = 16;
-
-    AudioQueueRef queue;
-
-    // Double buffering. 
-    AudioQueueBufferRef buf_ref1, buf_ref2;
-    
-    int buffer_size_bytes = AUDIO_BUFFER_SIZE * 2;
-    
-    // Create an audio queue output with the specified format.
-    // Third argument is an optional pointer to pass to the callback function.
-    if (AudioQueueNewOutput(&fmt, callback, NULL, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &queue) != 0
-        || AudioQueueAllocateBuffer (queue, buffer_size_bytes, &buf_ref1) != 0
-        || AudioQueueAllocateBuffer (queue, buffer_size_bytes, &buf_ref2) != 0
-    ) {
-        fprintf(stderr, "WARNING: Failed to create audio output. No audio will be produced");
-        return NULL;
-    }
-    // Convert reference to a C pointer.
-    AudioQueueBuffer* buf1 = buf_ref1;
-    AudioQueueBuffer* buf2 = buf_ref2;
-    
-    // Set buffer size
-    buf1->mAudioDataByteSize = buffer_size_bytes;
-    buf2->mAudioDataByteSize = buffer_size_bytes;
-    
-    // Put both buffers in the queue.
-    callback (NULL, queue, buf_ref1);
-    callback (NULL, queue, buf_ref2);
-    // At this point, next_buffer_start_time == start time of the model.
-    
-    // Set the second buffer to be the one being currently written into.
-    next_buffer = buf2->mAudioData;
-    
-    // Set the volume. (Ignoring errors)
-    AudioQueueSetParameter (queue, kAudioQueueParam_Volume, 1.0);
-    
-    // Start audio at start time plus one buffer duration.
-    struct AudioTimeStamp time_stamp = { 0 };
-    time_stamp.mHostTime = next_buffer_start_time + BUFFER_DURATION_NS;
-    
-    // Start as soon as possible.
-    if (AudioQueueStart (queue, &time_stamp) != 0) {
-        fprintf(stderr, "WARNING: Failed to start audio output. No audio will be produced");
-    }
-    CFRunLoopRun();
+  // Create an audio format description.
+  AudioStreamBasicDescription fmt = {0};
+  fmt.mSampleRate = 44100;
+  fmt.mFormatID = kAudioFormatLinearPCM;
+  fmt.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
+  fmt.mFramesPerPacket = 1;
+  fmt.mChannelsPerFrame = 1;                    // 2 for stereo
+  fmt.mBytesPerPacket = fmt.mBytesPerFrame = 2; // x2 for stereo
+  fmt.mBitsPerChannel = 16;
+
+  AudioQueueRef queue;
+
+  // Double buffering.
+  AudioQueueBufferRef buf_ref1, buf_ref2;
+
+  int buffer_size_bytes = AUDIO_BUFFER_SIZE * 2;
+
+  // Create an audio queue output with the specified format.
+  // Third argument is an optional pointer to pass to the callback function.
+  if (AudioQueueNewOutput(&fmt, callback, NULL, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &queue) != 0 ||
+      AudioQueueAllocateBuffer(queue, buffer_size_bytes, &buf_ref1) != 0 ||
+      AudioQueueAllocateBuffer(queue, buffer_size_bytes, &buf_ref2) != 0) {
+    fprintf(stderr, "WARNING: Failed to create audio output. No audio will be produced");
     return NULL;
+  }
+  // Convert reference to a C pointer.
+  AudioQueueBuffer* buf1 = buf_ref1;
+  AudioQueueBuffer* buf2 = buf_ref2;
+
+  // Set buffer size
+  buf1->mAudioDataByteSize = buffer_size_bytes;
+  buf2->mAudioDataByteSize = buffer_size_bytes;
+
+  // Put both buffers in the queue.
+  callback(NULL, queue, buf_ref1);
+  callback(NULL, queue, buf_ref2);
+  // At this point, next_buffer_start_time == start time of the model.
+
+  // Set the second buffer to be the one being currently written into.
+  next_buffer = buf2->mAudioData;
+
+  // Set the volume. (Ignoring errors)
+  AudioQueueSetParameter(queue, kAudioQueueParam_Volume, 1.0);
+
+  // Start audio at start time plus one buffer duration.
+  struct AudioTimeStamp time_stamp = {0};
+  time_stamp.mHostTime = next_buffer_start_time + BUFFER_DURATION_NS;
+
+  // Start as soon as possible.
+  if (AudioQueueStart(queue, &time_stamp) != 0) {
+    fprintf(stderr, "WARNING: Failed to start audio output. No audio will be produced");
+  }
+  CFRunLoopRun();
+  return NULL;
 }
 
 pthread_t loop_thread_id;
@@ -189,123 +188,122 @@ bool loop_thread_started = false;
  *  first audio buffer.
  */
 void lf_start_audio_loop(instant_t start_time) {
-    
-    if (loop_thread_started) return;
-    loop_thread_started = true;
-    
-    // Set the start time of the current buffer to the current time
-    // minus twice the buffer duration. The two calls to callback()
-    // during setup will increment this to equal to the start time.
-    // Then create a thread to
-    // start the audio loop. That thread will place
-    // two empty audio buffers in the queue and will schedule the
-    // audio to start at the current logical time plus the buffer
-    // duration. The current buffer being filled (the second buffer)
-    // will have logical start time 0, but will play later by less
-    // than the buffer duration.
-    next_buffer_start_time = start_time - 2 * BUFFER_DURATION_NS;
-    
-    // Start the audio loop thread.
-    pthread_create(&loop_thread_id, NULL, &run_audio_loop, NULL);
+
+  if (loop_thread_started)
+    return;
+  loop_thread_started = true;
+
+  // Set the start time of the current buffer to the current time
+  // minus twice the buffer duration. The two calls to callback()
+  // during setup will increment this to equal to the start time.
+  // Then create a thread to
+  // start the audio loop. That thread will place
+  // two empty audio buffers in the queue and will schedule the
+  // audio to start at the current logical time plus the buffer
+  // duration. The current buffer being filled (the second buffer)
+  // will have logical start time 0, but will play later by less
+  // than the buffer duration.
+  next_buffer_start_time = start_time - 2 * BUFFER_DURATION_NS;
+
+  // Start the audio loop thread.
+  pthread_create(&loop_thread_id, NULL, &run_audio_loop, NULL);
 }
 
 /**
  * Stop the audio loop thread.
  */
-void lf_stop_audio_loop() {
-    CFRunLoopStop(CFRunLoopGetCurrent());
-}
+void lf_stop_audio_loop() { CFRunLoopStop(CFRunLoopGetCurrent()); }
 
 /**
  * Play the specified waveform with the specified emphasis at
  * the specified time. If the waveform is null, play a simple tick
  * (an impulse). If the waveform has length zero or volume 0,
  * play nothing.
- * 
+ *
  * If the time is too far in the future
  * (beyond the window of the current audio write buffer), then
  * block until the audio output catches up. If the audio playback
  * has already passed the specified point, then play the waveform
  * as soon as possible and return 1.
  * Otherwise, return 0.
- * 
+ *
  * @param waveform The waveform to play or NULL to just play a tick.
  * @param emphasis The emphasis (0.0 for silence, 1.0 for waveform volume).
  * @param start_time The time to start playing the waveform.
  */
 int lf_play_audio_waveform(lf_waveform_t* waveform, float emphasis, instant_t start_time) {
-    int result = 0;
-    pthread_mutex_lock(&lf_audio_mutex);
-    
-    // If the buffer into which to write has not yet been set up, wait.
-    while (next_buffer == NULL) {
-        pthread_cond_wait(&lf_audio_cond, &lf_audio_mutex);
-    }
-    instant_t time_offset = start_time - next_buffer_start_time;
-    
-    // If this is late, then tick right away.
+  int result = 0;
+  pthread_mutex_lock(&lf_audio_mutex);
+
+  // If the buffer into which to write has not yet been set up, wait.
+  while (next_buffer == NULL) {
+    pthread_cond_wait(&lf_audio_cond, &lf_audio_mutex);
+  }
+  instant_t time_offset = start_time - next_buffer_start_time;
+
+  // If this is late, then tick right away.
+  if (time_offset < 0) {
+    // printf("WARNING: audio has passed the specified time by %lld.\n", time_offset);
+    time_offset = 0;
+    result = 1;
+  }
+  // Calculate the index of the tick.
+  size_t index_offset = (time_offset * SAMPLE_RATE) / BILLION;
+
+  // If the offset is beyond the end of the audio buffer, then the program
+  // has gotten ahead of the audio. Wait for audio to catch up.
+  // This happens when a timestamp is at or close to the start time
+  // for the buffer because the audio system has not yet invoked the
+  // callback to swap buffers.  Here, we wait for the callback to
+  // occur.
+  while (index_offset >= AUDIO_BUFFER_SIZE) {
+    pthread_cond_wait(&lf_audio_cond, &lf_audio_mutex);
+    // next_buffer_start_time has been incremented by BUFFER_DURATION_NS.
+    time_offset = start_time - next_buffer_start_time;
+    // time_offset should be >= 0, but just in case:
     if (time_offset < 0) {
-        // printf("WARNING: audio has passed the specified time by %lld.\n", time_offset);
-        time_offset = 0;
-        result = 1;
+      time_offset = 0;
+      result = 1;
     }
-    // Calculate the index of the tick.
-    size_t index_offset = (time_offset * SAMPLE_RATE) / BILLION;
-    
-    // If the offset is beyond the end of the audio buffer, then the program
-    // has gotten ahead of the audio. Wait for audio to catch up.
-    // This happens when a timestamp is at or close to the start time
-    // for the buffer because the audio system has not yet invoked the
-    // callback to swap buffers.  Here, we wait for the callback to
-    // occur.
-    while (index_offset >= AUDIO_BUFFER_SIZE) {
-        pthread_cond_wait(&lf_audio_cond, &lf_audio_mutex);
-        // next_buffer_start_time has been incremented by BUFFER_DURATION_NS.
-        time_offset = start_time - next_buffer_start_time;
-        // time_offset should be >= 0, but just in case:
-        if (time_offset < 0) {
-            time_offset = 0;
-            result = 1;
-        }
-        index_offset = (time_offset * SAMPLE_RATE) / BILLION;
+    index_offset = (time_offset * SAMPLE_RATE) / BILLION;
+  }
+
+  if (waveform == NULL) {
+    // Waveform ID is out of range. Just emit a tick.
+    add_to_sound(index_offset, MAX_AMPLITUDE * emphasis);
+  } else {
+    int note_to_use = note_counter++; // Increment so that the next note uses a new slot.
+    if (note_counter >= NUM_NOTES) {
+      note_counter = 0; // Wrap around.
     }
-    
-    if (waveform == NULL) {
-        // Waveform ID is out of range. Just emit a tick.
-        add_to_sound(index_offset, MAX_AMPLITUDE * emphasis);
-    } else {
-        int note_to_use = note_counter++; // Increment so that the next note uses a new slot.
-        if (note_counter >= NUM_NOTES) {
-            note_counter = 0; // Wrap around.
+    // Initialize the note instance to start playing.
+    struct note* note_instance = ¬es[note_to_use];
+    note_instance->waveform = waveform;
+    // If the waveform length is 0, do not play anything.
+    if (waveform->length > 0) {
+      note_instance->volume = emphasis;
+      note_instance->position = 0;
+
+      // Add as much of the note instance into the buffer as will fit.
+      for (int i = index_offset; i < AUDIO_BUFFER_SIZE; i++) {
+        // Calculate the value to add to the sound by averaging all the channels.
+        int value = 0;
+        for (int channel = 0; channel < waveform->num_channels; channel++) {
+          value += waveform->waveform[note_instance->position + channel];
         }
-        // Initialize the note instance to start playing.
-        struct note* note_instance = ¬es[note_to_use];
-        note_instance->waveform = waveform;
-        // If the waveform length is 0, do not play anything.
-        if (waveform->length > 0) {
-            note_instance->volume = emphasis;
-            note_instance->position = 0;
-                        
-            // Add as much of the note instance into the buffer as will fit.
-            for (int i = index_offset; i < AUDIO_BUFFER_SIZE; i++) {
-                // Calculate the value to add to the sound by averaging all the channels.
-                int value = 0;
-                for (int channel = 0; channel < waveform->num_channels; channel++) {
-                    value += waveform->waveform[note_instance->position + channel];
-                }
-                value = value / waveform->num_channels;
-                add_to_sound(i, value * emphasis);
-            
-                note_instance->position += note_instance->waveform->num_channels;
-                if (note_instance->position >= note_instance->waveform->length - note_instance->waveform->num_channels) {
-                    // Reached the end of the note. Reset the note.
-                    note_instance->volume = 0.0;
-                    note_instance->position = 0;
-                    break;
-                }
-            }
+        value = value / waveform->num_channels;
+        add_to_sound(i, value * emphasis);
+
+        note_instance->position += note_instance->waveform->num_channels;
+        if (note_instance->position >= note_instance->waveform->length - note_instance->waveform->num_channels) {
+          // Reached the end of the note. Reset the note.
+          note_instance->volume = 0.0;
+          note_instance->position = 0;
+          break;
         }
+      }
     }
-    pthread_mutex_unlock(&lf_audio_mutex);
-    return result;
+  }
+  pthread_mutex_unlock(&lf_audio_mutex);
+  return result;
 }
diff --git a/util/deque.c b/util/deque.c
index 4038c3b01..372ad642e 100644
--- a/util/deque.c
+++ b/util/deque.c
@@ -63,9 +63,9 @@ Alternatively, you can call initialize:
  * A node in the queue.
  */
 typedef struct deque_node_t {
-    struct deque_node_t *next;
-    struct deque_node_t *prev;
-    void* value;
+  struct deque_node_t* next;
+  struct deque_node_t* prev;
+  void* value;
 } deque_node_t;
 
 /**
@@ -73,11 +73,11 @@ typedef struct deque_node_t {
  * @param d The deque.
  */
 void deque_initialize(deque_t* d) {
-    if (d != NULL) {
-        d->front = NULL;
-        d->back = NULL;
-        d->size = 0;
-    }
+  if (d != NULL) {
+    d->front = NULL;
+    d->back = NULL;
+    d->size = 0;
+  }
 }
 
 /**
@@ -85,10 +85,10 @@ void deque_initialize(deque_t* d) {
  * @param d The deque.
  */
 bool deque_is_empty(deque_t* d) {
-    if (d != NULL) {
-        return (d->front == NULL);
-    }
-    return true;
+  if (d != NULL) {
+    return (d->front == NULL);
+  }
+  return true;
 }
 
 /**
@@ -96,9 +96,7 @@ bool deque_is_empty(deque_t* d) {
  * @param d The deque.
  * @return The size of the queue.
  */
-size_t deque_size(deque_t* d) {
-	return d->size;
-}
+size_t deque_size(deque_t* d) { return d->size; }
 
 /**
  * Internal function to create a node to insert in the deque.
@@ -108,9 +106,9 @@ size_t deque_size(deque_t* d) {
  * @param value The payload of the node.
  */
 deque_node_t* _deque_create_node(void* value) {
-    deque_node_t *new_node = (deque_node_t *) calloc(1, sizeof(deque_node_t));
-    new_node->value = value;
-    return new_node;
+  deque_node_t* new_node = (deque_node_t*)calloc(1, sizeof(deque_node_t));
+  new_node->value = value;
+  return new_node;
 }
 
 /**
@@ -119,16 +117,16 @@ deque_node_t* _deque_create_node(void* value) {
  * @param value The value to push.
  */
 void deque_push_front(deque_t* d, void* value) {
-    deque_node_t *n = _deque_create_node(value);
-    if (d->front == NULL) {
-    	d->back = d->front = n;
-        d->size = 1;
-    } else {
-        d->front->prev = n;
-        n->next = d->front;
-        d->front = n;
-        d->size++;
-    }
+  deque_node_t* n = _deque_create_node(value);
+  if (d->front == NULL) {
+    d->back = d->front = n;
+    d->size = 1;
+  } else {
+    d->front->prev = n;
+    n->next = d->front;
+    d->front = n;
+    d->size++;
+  }
 }
 
 /**
@@ -137,16 +135,16 @@ void deque_push_front(deque_t* d, void* value) {
  * @param value The value to push.
  */
 void deque_push_back(deque_t* d, void* value) {
-    deque_node_t *n = _deque_create_node(value);
-    if (d->back == NULL) {
-        d->back = d->front = n;
-        d->size++;
-    } else {
-        d->back->next = n;
-        n->prev = d->back;
-        d->back = n;
-        d->size++;
-    }
+  deque_node_t* n = _deque_create_node(value);
+  if (d->back == NULL) {
+    d->back = d->front = n;
+    d->size++;
+  } else {
+    d->back->next = n;
+    n->prev = d->back;
+    d->back = n;
+    d->size++;
+  }
 }
 
 /**
@@ -155,22 +153,22 @@ void deque_push_back(deque_t* d, void* value) {
  * @return The value on the front of the queue or NULL if the queue is empty.
  */
 void* deque_pop_front(deque_t* d) {
-    if (d == NULL || d->front == NULL) {
-        return NULL;
-    }
-
-    void* value = d->front->value;
-    deque_node_t *temp = d->front; // temporary pointer for freeing up memory
-
-    if (d->front == d->back) {
-        // popping last element in deque
-        d->front = d->back = NULL;
-    } else {
-        d->front = d->front->next;
-    }
-    free(temp); // free memory for popped node
-    d->size--;
-    return value;
+  if (d == NULL || d->front == NULL) {
+    return NULL;
+  }
+
+  void* value = d->front->value;
+  deque_node_t* temp = d->front; // temporary pointer for freeing up memory
+
+  if (d->front == d->back) {
+    // popping last element in deque
+    d->front = d->back = NULL;
+  } else {
+    d->front = d->front->next;
+  }
+  free(temp); // free memory for popped node
+  d->size--;
+  return value;
 }
 
 /**
@@ -179,21 +177,21 @@ void* deque_pop_front(deque_t* d) {
  * @return The value on the back of the queue or NULL if the queue is empty.
  */
 void* deque_pop_back(deque_t* d) {
-    if (d == NULL || d->back == NULL) {
-        return NULL;
-    }
-
-    void* value = d->back->value;
-    deque_node_t *temp = d->back; // temporary pointer for freeing up memory
-    if (d->front == d->back) {
-        // popping last element in deque
-        d->front = d->back = NULL;
-    } else {
-        d->back = d->back->prev;
-    }
-    free(temp);
-    d->size--;
-    return value;
+  if (d == NULL || d->back == NULL) {
+    return NULL;
+  }
+
+  void* value = d->back->value;
+  deque_node_t* temp = d->back; // temporary pointer for freeing up memory
+  if (d->front == d->back) {
+    // popping last element in deque
+    d->front = d->back = NULL;
+  } else {
+    d->back = d->back->prev;
+  }
+  free(temp);
+  d->size--;
+  return value;
 }
 
 /**
@@ -202,10 +200,10 @@ void* deque_pop_back(deque_t* d) {
  * @return The value on the front of the queue or NULL if the queue is empty.
  */
 void* deque_peek_back(deque_t* d) {
-    if (d == NULL || d->back == NULL) {
-        return NULL;
-    }
-    return d->back->value;
+  if (d == NULL || d->back == NULL) {
+    return NULL;
+  }
+  return d->back->value;
 }
 
 /**
@@ -214,8 +212,8 @@ void* deque_peek_back(deque_t* d) {
  * @return The value on the back of the queue or NULL if the queue is empty.
  */
 void* deque_peek_front(deque_t* d) {
-    if (d == NULL || d->front == NULL) {
-        return NULL;
-    }
-    return d->front->value;
+  if (d == NULL || d->front == NULL) {
+    return NULL;
+  }
+  return d->front->value;
 }
diff --git a/util/deque.h b/util/deque.h
index 91f9746f7..590f4de34 100644
--- a/util/deque.h
+++ b/util/deque.h
@@ -67,9 +67,9 @@ Alternatively, you can call initialize:
  * A double-ended queue data structure.
  */
 typedef struct deque_t {
-    struct deque_node_t* front;
-    struct deque_node_t* back;
-    size_t size;
+  struct deque_node_t* front;
+  struct deque_node_t* back;
+  size_t size;
 } deque_t;
 
 /**
diff --git a/util/generics.h b/util/generics.h
index 68e63d192..93e29f7e7 100644
--- a/util/generics.h
+++ b/util/generics.h
@@ -50,18 +50,21 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #define lf_is_type_equal(typename_a, typename_b) __builtin_types_compatible_p(typename_a, typename_b)
 
 /// Checks if the passed variable `p` is array or a pointer
-#define lf_is_pointer_or_array(p)  (__builtin_classify_type(p) == 5)
+#define lf_is_pointer_or_array(p) (__builtin_classify_type(p) == 5)
 
-#define lf_decay(p)  (&*__builtin_choose_expr(lf_is_pointer_or_array(p), p, NULL))
+#define lf_decay(p) (&*__builtin_choose_expr(lf_is_pointer_or_array(p), p, NULL))
 
 /// Checks if passed variable `p` is a pointer
-#define lf_is_pointer(p)  lf_is_same_type(p, lf_decay(p))
+#define lf_is_pointer(p) lf_is_same_type(p, lf_decay(p))
 
 /// Returns the pointer for specified `p`
 #define lf_get_pointer(p) __builtin_choose_expr(lf_is_pointer(p), p, &p)
 
 /// Checks types for both `left` and `right` and returns appropriate value based on `left` type
-#define lf_to_left_type(left, right) __builtin_choose_expr(lf_is_pointer_or_array(left), __builtin_choose_expr(lf_is_pointer_or_array(right), (right), &(right)), __builtin_choose_expr(lf_is_pointer_or_array(right), *(right), (right)))
+#define lf_to_left_type(left, right)                                                                                   \
+  __builtin_choose_expr(lf_is_pointer_or_array(left),                                                                  \
+                        __builtin_choose_expr(lf_is_pointer_or_array(right), (right), &(right)),                       \
+                        __builtin_choose_expr(lf_is_pointer_or_array(right), *(right), (right)))
 
 #else // buitin are not available
 
@@ -77,4 +80,3 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #endif // __has_builtin
 
 #endif // GENERICS_H
-
diff --git a/util/sensor_simulator.c b/util/sensor_simulator.c
index f96d1b1f7..2ad8b8ef9 100644
--- a/util/sensor_simulator.c
+++ b/util/sensor_simulator.c
@@ -3,7 +3,7 @@
  * @author Edward A. Lee
  * @copyright (c) 2020-2023, The University of California at Berkeley and UT Dallas.
  * License in [BSD 2-clause](https://github.com/lf-lang/reactor-c/blob/main/LICENSE.md)
- * 
+ *
  * @brief Simple terminal-based user interface based on ncurses.
  * See sensor_simulator.h.
  */
@@ -16,7 +16,7 @@
 #include "sensor_simulator.h"
 #include "include/api/schedule.h"
 #include "util.h"
-#include "platform.h"
+#include "low_level_platform.h"
 
 // Maximum number of milliseconds that wgetchr will block for.
 #define WGETCHR_TIMEOUT 1000
@@ -36,59 +36,57 @@ lf_action_base_t* _lf_sensor_any_key_trigger = NULL;
 lf_mutex_t _lf_sensor_mutex;
 lf_cond_t _lf_sensor_simulator_cond_var;
 
-enum _lf_sensor_message_type {
-	_lf_sensor_message, _lf_sensor_tick, _lf_sensor_close_windows
-};
+enum _lf_sensor_message_type { _lf_sensor_message, _lf_sensor_tick, _lf_sensor_close_windows };
 
 typedef struct _lf_sensor_message_t {
-	enum _lf_sensor_message_type type;
-    char* message;
-	struct _lf_sensor_message_t* next;
+  enum _lf_sensor_message_type type;
+  char* message;
+  struct _lf_sensor_message_t* next;
 } _lf_sensor_message_t;
 
 struct {
-	lf_thread_t input_thread_id;
-	lf_thread_t output_thread_id;
-	int thread_created;
+  lf_thread_t input_thread_id;
+  lf_thread_t output_thread_id;
+  int thread_created;
 
-	/**
-	 * Default window from which to get input characters.
-	 * If show_welcome_message() is called, this will be the welcome
-	 * message window. Otherwise, it will be stdscr, the default
-	 * curses window.
-	 */
-	WINDOW* default_window;
+  /**
+   * Default window from which to get input characters.
+   * If show_welcome_message() is called, this will be the welcome
+   * message window. Otherwise, it will be stdscr, the default
+   * curses window.
+   */
+  WINDOW* default_window;
 
-	/** Tick window. */
-	WINDOW* tick_window;
+  /** Tick window. */
+  WINDOW* tick_window;
 
-	/**
-	 * Keep track of the tick cursor position directly so it
-	 * doesn't get as messed up by printf() calls.
-	 */
-	int tick_cursor_x, tick_cursor_y;
-	int print_cursor_x, print_cursor_y;
+  /**
+   * Keep track of the tick cursor position directly so it
+   * doesn't get as messed up by printf() calls.
+   */
+  int tick_cursor_x, tick_cursor_y;
+  int print_cursor_x, print_cursor_y;
 
-	/** Print window. */
-	WINDOW* print_window;
+  /** Print window. */
+  WINDOW* print_window;
 
-	/** The print window height. */
-	int print_window_height;
+  /** The print window height. */
+  int print_window_height;
 
-	/** File to which to write log data, or NULL to send to window. */
-	FILE* log_file;
+  /** File to which to write log data, or NULL to send to window. */
+  FILE* log_file;
 
-	/** The welcome message. */
-	const char** welcome_message;
+  /** The welcome message. */
+  const char** welcome_message;
 
-	/** The length of the welcome message. */
-	int welcome_message_length;
+  /** The length of the welcome message. */
+  int welcome_message_length;
 
-	struct _lf_sensor_message_t* message_q;
-	struct _lf_sensor_message_t* message_recycle_q;
+  struct _lf_sensor_message_t* message_q;
+  struct _lf_sensor_message_t* message_recycle_q;
 
-	/** The width of the tick window. */
-	int tick_window_width;
+  /** The width of the tick window. */
+  int tick_window_width;
 } _lf_sensor;
 
 /**
@@ -101,29 +99,29 @@ struct {
  * @param number_of_lines The number of lines.
  */
 void _lf_show_message(const char* message_lines[], int number_of_lines) {
-    int term_height, term_width;
-    int message_width = 0;
-    // Find the widest message in the list.
-    for (int i = 0; i < number_of_lines; i++) {
-        size_t width = strlen(message_lines[i]);
-        if (width > message_width) {
-            message_width = width;
-        }
+  int term_height, term_width;
+  int message_width = 0;
+  // Find the widest message in the list.
+  for (int i = 0; i < number_of_lines; i++) {
+    size_t width = strlen(message_lines[i]);
+    if (width > message_width) {
+      message_width = width;
     }
-    getmaxyx(stdscr, term_height, term_width);   // Get the size of the terminal window.
-    WINDOW* center_win = newwin(number_of_lines + 2, message_width + 2, 0, 0);
-    box(center_win, 0, 0);
+  }
+  getmaxyx(stdscr, term_height, term_width); // Get the size of the terminal window.
+  WINDOW* center_win = newwin(number_of_lines + 2, message_width + 2, 0, 0);
+  box(center_win, 0, 0);
+  wrefresh(center_win);
+
+  // wattron(center_win, COLOR_PAIR(2));
+
+  for (int i = 0; i < number_of_lines; i++) {
+    mvwprintw(center_win, i + 1, 1, "%s", message_lines[i]);
+    // According to curses docs, the following should not be necessary
+    // after each print. But if I wait and do it later, the output
+    // gets garbled.
     wrefresh(center_win);
-
-    // wattron(center_win, COLOR_PAIR(2));
-
-    for (int i = 0; i < number_of_lines; i++) {
-        mvwprintw(center_win, i + 1, 1, "%s", message_lines[i]);
-        // According to curses docs, the following should not be necessary
-        // after each print. But if I wait and do it later, the output
-        // gets garbled.
-        wrefresh(center_win);
-    }
+  }
 }
 
 /**
@@ -133,14 +131,14 @@ void _lf_show_message(const char* message_lines[], int number_of_lines) {
  * @param width The width of the window.
  */
 void _lf_start_tick_window(int width) {
-    int term_height, term_width;
-    getmaxyx(stdscr, term_height, term_width);   // Get the size of the terminal window.
-    _lf_sensor.tick_window = newwin(term_height, width + 2, 0, term_width - width - 2);
-    box(_lf_sensor.tick_window, 0, 0);
-    wrefresh(_lf_sensor.tick_window);
-    wmove(_lf_sensor.tick_window, 1, 1);  // Ensure to not overwrite the box.
-    _lf_sensor.tick_cursor_x = _lf_sensor.tick_cursor_y = 1;
-    // move(0, 0);
+  int term_height, term_width;
+  getmaxyx(stdscr, term_height, term_width); // Get the size of the terminal window.
+  _lf_sensor.tick_window = newwin(term_height, width + 2, 0, term_width - width - 2);
+  box(_lf_sensor.tick_window, 0, 0);
+  wrefresh(_lf_sensor.tick_window);
+  wmove(_lf_sensor.tick_window, 1, 1); // Ensure to not overwrite the box.
+  _lf_sensor.tick_cursor_x = _lf_sensor.tick_cursor_y = 1;
+  // move(0, 0);
 }
 
 /**
@@ -152,14 +150,14 @@ void _lf_start_tick_window(int width) {
  * @param right Space to leave to the right of the window.
  */
 void _lf_start_print_window(int above, int right) {
-    int term_height, term_width;
-    getmaxyx(stdscr, term_height, term_width);   // Get the size of the terminal window.
-    _lf_sensor.print_window_height = term_height - above;
-    _lf_sensor.print_window = newwin(_lf_sensor.print_window_height, term_width - right, above, 0);
-    wrefresh(_lf_sensor.print_window);
-    wmove(_lf_sensor.print_window, 0, 0);
-    _lf_sensor.print_cursor_y = _lf_sensor.print_cursor_x = 0;
-    _lf_sensor.default_window = _lf_sensor.print_window;
+  int term_height, term_width;
+  getmaxyx(stdscr, term_height, term_width); // Get the size of the terminal window.
+  _lf_sensor.print_window_height = term_height - above;
+  _lf_sensor.print_window = newwin(_lf_sensor.print_window_height, term_width - right, above, 0);
+  wrefresh(_lf_sensor.print_window);
+  wmove(_lf_sensor.print_window, 0, 0);
+  _lf_sensor.print_cursor_y = _lf_sensor.print_cursor_x = 0;
+  _lf_sensor.default_window = _lf_sensor.print_window;
 }
 
 /**
@@ -170,35 +168,35 @@ void _lf_start_print_window(int above, int right) {
  * @param body The message, or NULL for exit type.
  */
 void _lf_sensor_post_message(enum _lf_sensor_message_type type, char* body) {
-    LF_MUTEX_LOCK(&_lf_sensor_mutex);
-    _lf_sensor_message_t* message = _lf_sensor.message_recycle_q;
-    if (message == NULL) {
-    	// Create a new message struct.
-    	message = calloc(1, sizeof(_lf_sensor_message_t));
-    } else {
-    	// Take this item off the recycle queue.
-    	_lf_sensor.message_recycle_q = _lf_sensor.message_recycle_q->next;
+  LF_MUTEX_LOCK(&_lf_sensor_mutex);
+  _lf_sensor_message_t* message = _lf_sensor.message_recycle_q;
+  if (message == NULL) {
+    // Create a new message struct.
+    message = calloc(1, sizeof(_lf_sensor_message_t));
+  } else {
+    // Take this item off the recycle queue.
+    _lf_sensor.message_recycle_q = _lf_sensor.message_recycle_q->next;
+  }
+  message->message = body;
+  message->type = type;
+  message->next = NULL; // Will be the new last message in the queue.
+  // Find the tail of the message queue and put the message there.
+  _lf_sensor_message_t* tail = _lf_sensor.message_q;
+  if (tail == NULL) {
+    _lf_sensor.message_q = message;
+  } else {
+    while (tail != NULL) {
+      if (tail->next == NULL) {
+        // tail is the last message in the queue.
+        tail->next = message;
+        break;
+      }
+      // Not yet at the last message.
+      tail = tail->next;
     }
-    message->message = body;
-    message->type = type;
-	message->next = NULL; // Will be the new last message in the queue.
-	// Find the tail of the message queue and put the message there.
-	_lf_sensor_message_t* tail = _lf_sensor.message_q;
-	if (tail == NULL) {
-		_lf_sensor.message_q = message;
-	} else {
-		while (tail != NULL) {
-			if (tail->next == NULL) {
-				// tail is the last message in the queue.
-				tail->next = message;
-				break;
-			}
-			// Not yet at the last message.
-			tail = tail->next;
-		}
-	}
-	lf_cond_signal(&_lf_sensor_simulator_cond_var);
-    LF_MUTEX_UNLOCK(&_lf_sensor_mutex);
+  }
+  lf_cond_signal(&_lf_sensor_simulator_cond_var);
+  LF_MUTEX_UNLOCK(&_lf_sensor_mutex);
 }
 
 /**
@@ -206,13 +204,13 @@ void _lf_sensor_post_message(enum _lf_sensor_message_type type, char* body) {
  * This acquires the mutex lock.
  */
 void _lf_print_message_function(const char* format, va_list args) {
-	if (_lf_sensor.log_file != NULL) {
-		// Write to a log file in addition to the window.
-		vfprintf(_lf_sensor.log_file, format, args);
-	}
-    char* copy;
-    vasprintf(©, format, args);
-    _lf_sensor_post_message(_lf_sensor_message, copy);
+  if (_lf_sensor.log_file != NULL) {
+    // Write to a log file in addition to the window.
+    vfprintf(_lf_sensor.log_file, format, args);
+  }
+  char* copy;
+  vasprintf(©, format, args);
+  _lf_sensor_post_message(_lf_sensor_message, copy);
 }
 
 /**
@@ -223,32 +221,32 @@ void _lf_print_message_function(const char* format, va_list args) {
  * Otherwise, the character is ignored.
  */
 void* _lf_sensor_read_input(void* ignored) {
-    while(_lf_sensor.thread_created != 0) {
-        int c = wgetch(_lf_sensor.default_window);
-        if (c == EOF) {
-            // End of file received. Exit thread.
-            break;
-        } else if (c != ERR) {
-            // wgetch returns ERR if it times out, in which case, we continue
-            // and check whether _lf_sensor.thread_created has been set to 0.
-            // So here, ERR was not returned.
-            // It is imperative that we not hold the _lf_sensor_mutex when
-            // calling schedule(), because schedule() acquires another mutex.
-            // We would create a deadlock risk.  The following code is correct
-            // because a _lf_sensor_trigger_table entry, once assigned a value, becomes
-            // immutable.
-            if (c == '\n' && _lf_sensor_sensor_newline_trigger != NULL) {
-                lf_schedule_copy(_lf_sensor_sensor_newline_trigger, 0, &c, 1);
-            } else if (c - 32 >= 0 && c - 32 < LF_SENSOR_TRIGGER_TABLE_SIZE && _lf_sensor_trigger_table[c-32] != NULL) {
-                lf_schedule_copy(_lf_sensor_trigger_table[c-32], 0, &c, 1);
-            }
-            // Any key trigger triggers after specific keys.
-            if (_lf_sensor_any_key_trigger != NULL) {
-                lf_schedule_copy(_lf_sensor_any_key_trigger, 0, &c, 1);
-            }
-        }
+  while (_lf_sensor.thread_created != 0) {
+    int c = wgetch(_lf_sensor.default_window);
+    if (c == EOF) {
+      // End of file received. Exit thread.
+      break;
+    } else if (c != ERR) {
+      // wgetch returns ERR if it times out, in which case, we continue
+      // and check whether _lf_sensor.thread_created has been set to 0.
+      // So here, ERR was not returned.
+      // It is imperative that we not hold the _lf_sensor_mutex when
+      // calling schedule(), because schedule() acquires another mutex.
+      // We would create a deadlock risk.  The following code is correct
+      // because a _lf_sensor_trigger_table entry, once assigned a value, becomes
+      // immutable.
+      if (c == '\n' && _lf_sensor_sensor_newline_trigger != NULL) {
+        lf_schedule_copy(_lf_sensor_sensor_newline_trigger, 0, &c, 1);
+      } else if (c - 32 >= 0 && c - 32 < LF_SENSOR_TRIGGER_TABLE_SIZE && _lf_sensor_trigger_table[c - 32] != NULL) {
+        lf_schedule_copy(_lf_sensor_trigger_table[c - 32], 0, &c, 1);
+      }
+      // Any key trigger triggers after specific keys.
+      if (_lf_sensor_any_key_trigger != NULL) {
+        lf_schedule_copy(_lf_sensor_any_key_trigger, 0, &c, 1);
+      }
     }
-    return NULL;
+  }
+  return NULL;
 }
 
 /**
@@ -256,196 +254,191 @@ void* _lf_sensor_read_input(void* ignored) {
  * message window.
  */
 void* _lf_sensor_simulator_thread(void* ignored) {
-    LF_MUTEX_LOCK(&_lf_sensor_mutex);
-    _lf_sensor.thread_created = 1;
-    // Clean up any previous curses state.
-    if (!isendwin()) {
-        endwin();
+  LF_MUTEX_LOCK(&_lf_sensor_mutex);
+  _lf_sensor.thread_created = 1;
+  // Clean up any previous curses state.
+  if (!isendwin()) {
+    endwin();
+  }
+  // Initialize ncurses.
+  LF_PRINT_DEBUG("Initializing ncurses.");
+  initscr();
+  start_color();                     // Allow colors.
+  noecho();                          // Don't echo input
+  cbreak();                          // Don't wait for Return or Enter
+  wtimeout(stdscr, WGETCHR_TIMEOUT); // Don't wait longer than this for input.
+  keypad(stdscr, TRUE);              // Enable keypad input.
+  refresh();                         // Not documented, but needed?
+
+  _lf_sensor.default_window = stdscr;
+  if (_lf_sensor.welcome_message != NULL && _lf_sensor.welcome_message_length > 0) {
+    _lf_show_message(_lf_sensor.welcome_message, _lf_sensor.welcome_message_length);
+  }
+  _lf_sensor.tick_window = stdscr;
+  if (_lf_sensor.tick_window_width > 0) {
+    _lf_start_tick_window(_lf_sensor.tick_window_width);
+  }
+  _lf_start_print_window(_lf_sensor.welcome_message_length + 2, _lf_sensor.tick_window_width + 2);
+
+  // ncurses is not thread safe, but since the wtimeout option does not work,
+  // there is no way to simultaneously listen for inputs and produce outputs.
+  // Here, we create a thread that produces no output and just listens for input.
+  // This thread is exclusively responsible for producing output.
+  int result = lf_thread_create(&_lf_sensor.input_thread_id, &_lf_sensor_read_input, NULL);
+  if (result != 0) {
+    lf_print_error("Failed to start sensor simulator input listener!");
+  }
+
+  while (_lf_sensor.thread_created != 0) {
+    // Sadly, ncurses is not thread safe, so this thread deals with all messages.
+    while (_lf_sensor.message_q == NULL) {
+      lf_cond_wait(&_lf_sensor_simulator_cond_var);
     }
-    // Initialize ncurses.
-    LF_PRINT_DEBUG("Initializing ncurses.");
-    initscr();
-    start_color();     // Allow colors.
-    noecho();          // Don't echo input
-    cbreak();          // Don't wait for Return or Enter
-    wtimeout(stdscr, WGETCHR_TIMEOUT); // Don't wait longer than this for input.
-    keypad(stdscr, TRUE);  // Enable keypad input.
-    refresh();         // Not documented, but needed?
-
-    _lf_sensor.default_window = stdscr;
-    if (_lf_sensor.welcome_message != NULL && _lf_sensor.welcome_message_length > 0) {
-        _lf_show_message(_lf_sensor.welcome_message, _lf_sensor.welcome_message_length);
-    }
-    _lf_sensor.tick_window = stdscr;
-    if (_lf_sensor.tick_window_width > 0) {
-        _lf_start_tick_window(_lf_sensor.tick_window_width);
-    }
-    _lf_start_print_window(_lf_sensor.welcome_message_length + 2, _lf_sensor.tick_window_width + 2);
-
-    // ncurses is not thread safe, but since the wtimeout option does not work,
-    // there is no way to simultaneously listen for inputs and produce outputs.
-    // Here, we create a thread that produces no output and just listens for input.
-    // This thread is exclusively responsible for producing output.
-    int result = lf_thread_create(&_lf_sensor.input_thread_id, &_lf_sensor_read_input, NULL);
-    if (result != 0) {
-        lf_print_error("Failed to start sensor simulator input listener!");
-    }
-
-    while(_lf_sensor.thread_created != 0) {
-    	// Sadly, ncurses is not thread safe, so this thread deals with all messages.
-    	while (_lf_sensor.message_q == NULL) {
-            lf_cond_wait(&_lf_sensor_simulator_cond_var);
-    	}
-    	// Show all messages in the queue.
-		while (_lf_sensor.message_q != NULL) {
-			if (_lf_sensor.message_q->type == _lf_sensor_close_windows) {
-			    lf_register_print_function(NULL, -1);
-			    endwin();
-			    LF_MUTEX_UNLOCK(&_lf_sensor_mutex);
-				return NULL;
-			} else if (_lf_sensor.message_q->type == _lf_sensor_tick) {
-			    wmove(_lf_sensor.tick_window, _lf_sensor.tick_cursor_y, _lf_sensor.tick_cursor_x);
-			    wprintw(_lf_sensor.tick_window, _lf_sensor.message_q->message);
-			    int tick_height, tick_width;
-			    getmaxyx(_lf_sensor.tick_window, tick_height, tick_width);
-			    _lf_sensor.tick_cursor_x += strlen(_lf_sensor.message_q->message);
-			    if (_lf_sensor.tick_cursor_x >= tick_width - 1) {
-			        _lf_sensor.tick_cursor_x = 1;
-			        _lf_sensor.tick_cursor_y++;
-			    }
-			    if (_lf_sensor.tick_cursor_y >= tick_height - 1) {
-			        _lf_sensor.tick_cursor_y = 1;
-			    }
-			    wmove(_lf_sensor.tick_window, _lf_sensor.tick_cursor_y, _lf_sensor.tick_cursor_x);
-			    wrefresh(_lf_sensor.tick_window);
-			} else if (_lf_sensor.message_q->type == _lf_sensor_message) {
-				wmove(_lf_sensor.print_window, _lf_sensor.print_cursor_y, _lf_sensor.print_cursor_x);
-				wclrtoeol(_lf_sensor.print_window);
-				wprintw(_lf_sensor.print_window, _lf_sensor.message_q->message);
-				_lf_sensor.print_cursor_x = 0;
-				_lf_sensor.print_cursor_y += 1;
-				if (_lf_sensor.print_cursor_y >= _lf_sensor.print_window_height - 1) {
-					_lf_sensor.print_cursor_y = 0;
-				}
-				wmove(_lf_sensor.print_window, _lf_sensor.print_cursor_y, _lf_sensor.print_cursor_x);
-				wclrtoeol(_lf_sensor.print_window);
-				wrefresh(_lf_sensor.print_window);
-
-				free(_lf_sensor.message_q->message);
-			}
-			refresh();
-			_lf_sensor_message_t* tmp_recycle = _lf_sensor.message_recycle_q;
-			_lf_sensor_message_t* tmp_message = _lf_sensor.message_q;
-			_lf_sensor.message_recycle_q = _lf_sensor.message_q;
-			_lf_sensor.message_q = tmp_message->next;
-			_lf_sensor.message_recycle_q->next = tmp_recycle;
-		}
+    // Show all messages in the queue.
+    while (_lf_sensor.message_q != NULL) {
+      if (_lf_sensor.message_q->type == _lf_sensor_close_windows) {
+        lf_register_print_function(NULL, -1);
+        endwin();
+        LF_MUTEX_UNLOCK(&_lf_sensor_mutex);
+        return NULL;
+      } else if (_lf_sensor.message_q->type == _lf_sensor_tick) {
+        wmove(_lf_sensor.tick_window, _lf_sensor.tick_cursor_y, _lf_sensor.tick_cursor_x);
+        wprintw(_lf_sensor.tick_window, _lf_sensor.message_q->message);
+        int tick_height, tick_width;
+        getmaxyx(_lf_sensor.tick_window, tick_height, tick_width);
+        _lf_sensor.tick_cursor_x += strlen(_lf_sensor.message_q->message);
+        if (_lf_sensor.tick_cursor_x >= tick_width - 1) {
+          _lf_sensor.tick_cursor_x = 1;
+          _lf_sensor.tick_cursor_y++;
+        }
+        if (_lf_sensor.tick_cursor_y >= tick_height - 1) {
+          _lf_sensor.tick_cursor_y = 1;
+        }
+        wmove(_lf_sensor.tick_window, _lf_sensor.tick_cursor_y, _lf_sensor.tick_cursor_x);
+        wrefresh(_lf_sensor.tick_window);
+      } else if (_lf_sensor.message_q->type == _lf_sensor_message) {
+        wmove(_lf_sensor.print_window, _lf_sensor.print_cursor_y, _lf_sensor.print_cursor_x);
+        wclrtoeol(_lf_sensor.print_window);
+        wprintw(_lf_sensor.print_window, _lf_sensor.message_q->message);
+        _lf_sensor.print_cursor_x = 0;
+        _lf_sensor.print_cursor_y += 1;
+        if (_lf_sensor.print_cursor_y >= _lf_sensor.print_window_height - 1) {
+          _lf_sensor.print_cursor_y = 0;
+        }
+        wmove(_lf_sensor.print_window, _lf_sensor.print_cursor_y, _lf_sensor.print_cursor_x);
+        wclrtoeol(_lf_sensor.print_window);
+        wrefresh(_lf_sensor.print_window);
+
+        free(_lf_sensor.message_q->message);
+      }
+      refresh();
+      _lf_sensor_message_t* tmp_recycle = _lf_sensor.message_recycle_q;
+      _lf_sensor_message_t* tmp_message = _lf_sensor.message_q;
+      _lf_sensor.message_recycle_q = _lf_sensor.message_q;
+      _lf_sensor.message_q = tmp_message->next;
+      _lf_sensor.message_recycle_q->next = tmp_recycle;
     }
-    LF_MUTEX_UNLOCK(&_lf_sensor_mutex);
-    return NULL;
+  }
+  LF_MUTEX_UNLOCK(&_lf_sensor_mutex);
+  return NULL;
 }
 
 void end_sensor_simulator() {
-    lf_register_print_function(NULL, -1);
-	_lf_sensor_post_message(_lf_sensor_close_windows, NULL);
+  lf_register_print_function(NULL, -1);
+  _lf_sensor_post_message(_lf_sensor_close_windows, NULL);
 
-	void* thread_return;
-	lf_thread_join(_lf_sensor.output_thread_id, &thread_return);
+  void* thread_return;
+  lf_thread_join(_lf_sensor.output_thread_id, &thread_return);
 
-    // Timeout mode should result in the input thread exiting on its own.
-    // pthread_kill(_lf_sensor.input_thread_id, SIGINT);
+  // Timeout mode should result in the input thread exiting on its own.
+  // pthread_kill(_lf_sensor.input_thread_id, SIGINT);
 
-    _lf_sensor.thread_created = 0;
-	if (_lf_sensor.log_file != NULL) {
-		fclose(_lf_sensor.log_file);
-	}
+  _lf_sensor.thread_created = 0;
+  if (_lf_sensor.log_file != NULL) {
+    fclose(_lf_sensor.log_file);
+  }
 }
 
-int start_sensor_simulator(
-		const char* message_lines[],
-		int number_of_lines,
-		int tick_window_width,
-		char* log_file,
-		int log_level
-) {
-    int result = 0;
-    _lf_sensor.welcome_message = message_lines;
-    _lf_sensor.welcome_message_length = number_of_lines;
-    _lf_sensor.tick_window_width = tick_window_width;
-    _lf_sensor.message_q = NULL;
-    _lf_sensor.message_recycle_q = NULL;
-    _lf_sensor.thread_created = 0;
-    LF_COND_INIT(&_lf_sensor_simulator_cond_var, &_lf_sensor_mutex);
-    if (_lf_sensor.thread_created == 0) {
-        // Thread has not been created.
-        // Zero out the trigger table.
-        for (int i = 0; i < LF_SENSOR_TRIGGER_TABLE_SIZE; i++) {
-            _lf_sensor_trigger_table[i] = NULL;
-        }
-        // For some strange reason, this log file has to be opened before
-        // ncurses is initialized, otherwise, ncurses gets disabled (won't
-        // accept input).
-    	if (log_file != NULL) {
-    		_lf_sensor.log_file = fopen(log_file, "w");
-    	} else {
-    		_lf_sensor.log_file = NULL;
-    	}
-    	// Register the print function before starting the thread.
-    	// Subsequent print messages will go into the queue.
-        lf_register_print_function(&_lf_print_message_function, log_level);
-
-        // FIXME: Is this needed? Users should call end_sensor_simulator in
-        // a shutdown reaction.
-        if (atexit(end_sensor_simulator) != 0) {
-            lf_print_warning("sensor_simulator: Failed to register end_sensor_simulator function!");
-        }
+int start_sensor_simulator(const char* message_lines[], int number_of_lines, int tick_window_width, char* log_file,
+                           int log_level) {
+  int result = 0;
+  _lf_sensor.welcome_message = message_lines;
+  _lf_sensor.welcome_message_length = number_of_lines;
+  _lf_sensor.tick_window_width = tick_window_width;
+  _lf_sensor.message_q = NULL;
+  _lf_sensor.message_recycle_q = NULL;
+  _lf_sensor.thread_created = 0;
+  LF_COND_INIT(&_lf_sensor_simulator_cond_var, &_lf_sensor_mutex);
+  if (_lf_sensor.thread_created == 0) {
+    // Thread has not been created.
+    // Zero out the trigger table.
+    for (int i = 0; i < LF_SENSOR_TRIGGER_TABLE_SIZE; i++) {
+      _lf_sensor_trigger_table[i] = NULL;
+    }
+    // For some strange reason, this log file has to be opened before
+    // ncurses is initialized, otherwise, ncurses gets disabled (won't
+    // accept input).
+    if (log_file != NULL) {
+      _lf_sensor.log_file = fopen(log_file, "w");
+    } else {
+      _lf_sensor.log_file = NULL;
+    }
+    // Register the print function before starting the thread.
+    // Subsequent print messages will go into the queue.
+    lf_register_print_function(&_lf_print_message_function, log_level);
+
+    // FIXME: Is this needed? Users should call end_sensor_simulator in
+    // a shutdown reaction.
+    if (atexit(end_sensor_simulator) != 0) {
+      lf_print_warning("sensor_simulator: Failed to register end_sensor_simulator function!");
+    }
 
-        // ncurses is not thread safe, so create a one thread that does all
-        // the writing to the window and one that does all the reading.
-        result = lf_thread_create(&_lf_sensor.output_thread_id, &_lf_sensor_simulator_thread, NULL);
-        if (result != 0) {
-            lf_print_error("Failed to start sensor simulator!");
-        }
+    // ncurses is not thread safe, so create a one thread that does all
+    // the writing to the window and one that does all the reading.
+    result = lf_thread_create(&_lf_sensor.output_thread_id, &_lf_sensor_simulator_thread, NULL);
+    if (result != 0) {
+      lf_print_error("Failed to start sensor simulator!");
     }
-    return result;
+  }
+  return result;
 }
 
 void show_tick(const char* character) {
-    if (character != NULL) {
-        char* copy;
-        asprintf(©, "%s", character);
-    	_lf_sensor_post_message(_lf_sensor_tick, copy);
-    }
+  if (character != NULL) {
+    char* copy;
+    asprintf(©, "%s", character);
+    _lf_sensor_post_message(_lf_sensor_tick, copy);
+  }
 }
 
 int register_sensor_key(char key, void* action) {
-    if (action == NULL) {
-        return 3;
-    }
-    int index = key - 32;
-    if (key != '\n' && key != '\0' && (index < 0 || index >= LF_SENSOR_TRIGGER_TABLE_SIZE)) {
-        return 2;
+  if (action == NULL) {
+    return 3;
+  }
+  int index = key - 32;
+  if (key != '\n' && key != '\0' && (index < 0 || index >= LF_SENSOR_TRIGGER_TABLE_SIZE)) {
+    return 2;
+  }
+  int result = 0;
+  LF_MUTEX_LOCK(&_lf_sensor_mutex);
+  if (key == '\n') {
+    if (_lf_sensor_sensor_newline_trigger != NULL) {
+      result = 1;
+    } else {
+      _lf_sensor_sensor_newline_trigger = (lf_action_base_t*)action;
     }
-    int result = 0;
-    LF_MUTEX_LOCK(&_lf_sensor_mutex);
-    if (key == '\n') {
-        if (_lf_sensor_sensor_newline_trigger != NULL) {
-            result = 1;
-        } else {
-            _lf_sensor_sensor_newline_trigger = (lf_action_base_t*)action;
-        }
-    } else if (key == '\0') {
-        // Any key trigger.
-        if (_lf_sensor_any_key_trigger != NULL) {
-            result = 1;
-        } else {
-            _lf_sensor_any_key_trigger = (lf_action_base_t*)action;
-        }
-    } else if (_lf_sensor_trigger_table[index] != NULL) {
-        result = 1;
+  } else if (key == '\0') {
+    // Any key trigger.
+    if (_lf_sensor_any_key_trigger != NULL) {
+      result = 1;
     } else {
-        _lf_sensor_trigger_table[index] = (lf_action_base_t*)action;
+      _lf_sensor_any_key_trigger = (lf_action_base_t*)action;
     }
-    LF_MUTEX_UNLOCK(&_lf_sensor_mutex);
-    return result;
+  } else if (_lf_sensor_trigger_table[index] != NULL) {
+    result = 1;
+  } else {
+    _lf_sensor_trigger_table[index] = (lf_action_base_t*)action;
+  }
+  LF_MUTEX_UNLOCK(&_lf_sensor_mutex);
+  return result;
 }
diff --git a/util/sensor_simulator.h b/util/sensor_simulator.h
index 82749f3d3..22086adab 100644
--- a/util/sensor_simulator.h
+++ b/util/sensor_simulator.h
@@ -5,27 +5,27 @@
  * @author Edward A. Lee
  * @copyright (c) 2020-2023, The University of California at Berkeley and UT Dallas.
  * License in [BSD 2-clause](https://github.com/lf-lang/reactor-c/blob/main/LICENSE.md)
- * 
+ *
  * @brief Simple terminal-based user interface based on ncurses.
- * 
+ *
  * When prototyping Lingua Franca programs on a laptop, it is convenient to use
  * the laptop keyboard to simulate asynchronous sensor input. This small library
  * provides a convenient way to do that.
- * 
+ *
  * To use this, include the following flags in your target properties:
  * 
  * target C {
     files: [
-        "/lib/c/reactor-c/util/sensor_simulator.c", 
+        "/lib/c/reactor-c/util/sensor_simulator.c",
         "/lib/c/reactor-c/util/sensor_simulator.h",
     ],
     cmake-include: [
         "/lib/c/reactor-c/util/sensor_simulator.cmake",
-    ] 
+    ]
  * };
  * 
* This requires `ncurses`, a library providing somewhat portable keyboard access. - * + * * In addition, you need this in your Lingua Franca file: *
  * preamble {=
@@ -35,7 +35,7 @@
  * To start the sensor simulator, call `start_sensor_simulator` passing it
  * an array of strings to print and the width of the window to use to display
  * characters using the `show_tick` function.
- * 
+ *
  * To print messages to the screen, rather than using printf(), you should use
  * the messaging functions in util.h, such as lf_print(). Otherwise, your messages
  * will be printed over other information.
@@ -60,13 +60,8 @@
  *  LOG_LEVEL_INFO, LOG_LEVEL_LOG, LOG_LEVEL_DEBUG, or LOG_LEVEL_ALL.
  * @return 0 for success, error code for failure.
  */
-int start_sensor_simulator(
-		const char* message_lines[],
-		int number_of_lines,
-		int tick_window_width,
-		char* log_file,
-		int log_level
-);
+int start_sensor_simulator(const char* message_lines[], int number_of_lines, int tick_window_width, char* log_file,
+                           int log_level);
 
 /**
  * End ncurses control of the terminal.
diff --git a/util/tracing/Makefile b/util/tracing/Makefile
index 15fd0c13e..436087752 100644
--- a/util/tracing/Makefile
+++ b/util/tracing/Makefile
@@ -6,8 +6,14 @@ CURRENT_PATH=$(shell pwd)
 CC=gcc
 CFLAGS=	-I$(REACTOR_C)/include/core/ \
 		-I$(REACTOR_C)/include/core/modal_models \
-		-I$(REACTOR_C)/include/core/platform \
 		-I$(REACTOR_C)/include/core/utils \
+		-I$(REACTOR_C)/include \
+		-I$(REACTOR_C)/low_level_platform/api \
+		-I$(REACTOR_C)/tag/api \
+		-I$(REACTOR_C)/trace/api \
+		-I$(REACTOR_C)/version/api \
+		-I$(REACTOR_C)/logging/api \
+		-I$(REACTOR_C)/trace/impl/include \
 		-DLF_SINGLE_THREADED=1 \
 		-Wall
 DEPS=
diff --git a/util/tracing/influxdb.h b/util/tracing/influxdb.h
index 5d423d633..eef34cf29 100644
--- a/util/tracing/influxdb.h
+++ b/util/tracing/influxdb.h
@@ -24,164 +24,176 @@
             INFLUX_END);
 
   **NOTICE**: For best performance you should sort tags by key before sending them to the database.
-              The sort should match the results from the [Go bytes.Compare function](https://golang.org/pkg/bytes/#Compare).
+              The sort should match the results from the [Go bytes.Compare
+  function](https://golang.org/pkg/bytes/#Compare).
  */
 
-#define INFLUX_MEAS(m)        IF_TYPE_MEAS, (m)
-#define INFLUX_TAG(k, v)      IF_TYPE_TAG, (k), (v)
-#define INFLUX_F_STR(k, v)    IF_TYPE_FIELD_STRING, (k), (v)
+#define INFLUX_MEAS(m) IF_TYPE_MEAS, (m)
+#define INFLUX_TAG(k, v) IF_TYPE_TAG, (k), (v)
+#define INFLUX_F_STR(k, v) IF_TYPE_FIELD_STRING, (k), (v)
 #define INFLUX_F_FLT(k, v, p) IF_TYPE_FIELD_FLOAT, (k), (double)(v), (int)(p)
-#define INFLUX_F_INT(k, v)    IF_TYPE_FIELD_INTEGER, (k), (long long)(v)
-#define INFLUX_F_BOL(k, v)    IF_TYPE_FIELD_BOOLEAN, (k), ((v) ? 1 : 0)
-#define INFLUX_TS(ts)         IF_TYPE_TIMESTAMP, (long long)(ts)
-#define INFLUX_END            IF_TYPE_ARG_END
-
-typedef struct _influx_client_t
-{
-    char* host;
-    int   port;
-    char* db;  // http only
-    char* usr; // http only [optional for auth]
-    char* pwd; // http only [optional for auth]
-    char* token; // http only
+#define INFLUX_F_INT(k, v) IF_TYPE_FIELD_INTEGER, (k), (long long)(v)
+#define INFLUX_F_BOL(k, v) IF_TYPE_FIELD_BOOLEAN, (k), ((v) ? 1 : 0)
+#define INFLUX_TS(ts) IF_TYPE_TIMESTAMP, (long long)(ts)
+#define INFLUX_END IF_TYPE_ARG_END
+
+typedef struct _influx_client_t {
+  char* host;
+  int port;
+  char* db;    // http only
+  char* usr;   // http only [optional for auth]
+  char* pwd;   // http only [optional for auth]
+  char* token; // http only
 } influx_client_t;
 
-typedef struct _influx_v2_client_t
-{
-    char* host;
-    int   port;
-    char* org;  
-    char* bucket;
-    char* precision;
-    char* usr; // http only [optional for auth]
-    char* pwd; // http only [optional for auth]
-    char* token; // http only
+typedef struct _influx_v2_client_t {
+  char* host;
+  int port;
+  char* org;
+  char* bucket;
+  char* precision;
+  char* usr;   // http only [optional for auth]
+  char* pwd;   // http only [optional for auth]
+  char* token; // http only
 } influx_v2_client_t;
 
-int format_line(char **buf, int *len, size_t used, ...);
+int format_line(char** buf, int* len, size_t used, ...);
 int post_http(influx_client_t* c, ...);
 int send_udp(influx_client_t* c, ...);
 int post_curl(influx_v2_client_t* c, ...);
 
-#define IF_TYPE_ARG_END       0
-#define IF_TYPE_MEAS          1
-#define IF_TYPE_TAG           2
-#define IF_TYPE_FIELD_STRING  3
-#define IF_TYPE_FIELD_FLOAT   4
+#define IF_TYPE_ARG_END 0
+#define IF_TYPE_MEAS 1
+#define IF_TYPE_TAG 2
+#define IF_TYPE_FIELD_STRING 3
+#define IF_TYPE_FIELD_FLOAT 4
 #define IF_TYPE_FIELD_INTEGER 5
 #define IF_TYPE_FIELD_BOOLEAN 6
-#define IF_TYPE_TIMESTAMP     7
+#define IF_TYPE_TIMESTAMP 7
 
 int _escaped_append(char** dest, size_t* len, size_t* used, const char* src, const char* escape_seq);
-int _begin_line(char **buf);
+int _begin_line(char** buf);
 int _format_line(char** buf, va_list ap);
-int _format_line2(char** buf, va_list ap, size_t *, size_t);
-int post_http_send_line(influx_client_t *c, char *buf, int len);
-int send_udp_line(influx_client_t* c, char *line, int len);
-
-int post_http_send_line(influx_client_t *c, char *buf, int len)
-{
-    int sock = -1   , ret_code = 0, content_length = 0;
-    struct sockaddr_in addr;
-    struct iovec iv[2];
-    char ch;
-
-    iv[1].iov_base = buf;
-    iv[1].iov_len = len;
-
-    if(!(iv[0].iov_base = (char*)malloc(len = 0x800))) {
-        free(iv[1].iov_base);
-        return -2;
-    }
-
-    for(;;) {
-        iv[0].iov_len = snprintf((char*)iv[0].iov_base, len, 
-            "POST /write?db=%s&u=%s&p=%s HTTP/1.1\r\n"
-            "Host: %s\r\n"
-            "Accept: application/json\r\n"
-            "Content-type: text/plain\r\n"
-            "Authorization: Token %s\r\n"
-            "Content-Length: %zd\r\n"
-            "\r\n", // Final blank line is needed.
-            c->db, c->usr ? c->usr : "", c->pwd ? c->pwd : "", c->host, c->token ? c->token : "", iv[1].iov_len);
-        if((int)iv[0].iov_len >= len && !(iv[0].iov_base = (char*)realloc(iv[0].iov_base, len *= 2))) {
-            free(iv[1].iov_base);
-            free(iv[0].iov_base);
-            return -3;
-        }
-        else
-            break;
-    }
-
-	fprintf(stderr, "influxdb-c::post_http: iv[0] = '%s'\n", (char *)iv[0].iov_base);
-	fprintf(stderr, "influxdb-c::post_http: iv[1] = '%s'\n", (char *)iv[1].iov_base);
-
-    addr.sin_family = AF_INET;
-    addr.sin_port = htons(c->port);
-    // EAL: Rather than just an IP address, allow a hostname, like "localhost"
-    struct hostent* resolved_host = gethostbyname(c->host);
-    if (!resolved_host) {
-        free(iv[1].iov_base);
-        free(iv[0].iov_base);
-        return -4;
-    }
-    memcpy(&addr.sin_addr, resolved_host->h_addr_list[0], resolved_host->h_length);
-    /*
-    if((addr.sin_addr.s_addr = inet_addr(resolved_host->h_addr)) == INADDR_NONE) {
-        free(iv[1].iov_base);
-        free(iv[0].iov_base);
-        return -4;
-    }
-    */
+int _format_line2(char** buf, va_list ap, size_t*, size_t);
+int post_http_send_line(influx_client_t* c, char* buf, int len);
+int send_udp_line(influx_client_t* c, char* line, int len);
 
-    if((sock = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
-        free(iv[1].iov_base);
-        free(iv[0].iov_base);
-        return -5;
-    }
+int post_http_send_line(influx_client_t* c, char* buf, int len) {
+  int sock = -1, ret_code = 0, content_length = 0;
+  struct sockaddr_in addr;
+  struct iovec iv[2];
+  char ch;
 
-    if(connect(sock, (struct sockaddr*)(&addr), sizeof(addr)) < 0) {
-        ret_code = -6;
-        goto END;
-    }
+  iv[1].iov_base = buf;
+  iv[1].iov_len = len;
 
-    if(writev(sock, iv, 2) < (int)(iv[0].iov_len + iv[1].iov_len)) {
-        ret_code = -7;
-        goto END;
+  if (!(iv[0].iov_base = (char*)malloc(len = 0x800))) {
+    free(iv[1].iov_base);
+    return -2;
+  }
+
+  for (;;) {
+    iv[0].iov_len =
+        snprintf((char*)iv[0].iov_base, len,
+                 "POST /write?db=%s&u=%s&p=%s HTTP/1.1\r\n"
+                 "Host: %s\r\n"
+                 "Accept: application/json\r\n"
+                 "Content-type: text/plain\r\n"
+                 "Authorization: Token %s\r\n"
+                 "Content-Length: %zd\r\n"
+                 "\r\n", // Final blank line is needed.
+                 c->db, c->usr ? c->usr : "", c->pwd ? c->pwd : "", c->host, c->token ? c->token : "", iv[1].iov_len);
+    if ((int)iv[0].iov_len >= len && !(iv[0].iov_base = (char*)realloc(iv[0].iov_base, len *= 2))) {
+      free(iv[1].iov_base);
+      free(iv[0].iov_base);
+      return -3;
+    } else
+      break;
+  }
+
+  fprintf(stderr, "influxdb-c::post_http: iv[0] = '%s'\n", (char*)iv[0].iov_base);
+  fprintf(stderr, "influxdb-c::post_http: iv[1] = '%s'\n", (char*)iv[1].iov_base);
+
+  addr.sin_family = AF_INET;
+  addr.sin_port = htons(c->port);
+  // EAL: Rather than just an IP address, allow a hostname, like "localhost"
+  struct hostent* resolved_host = gethostbyname(c->host);
+  if (!resolved_host) {
+    free(iv[1].iov_base);
+    free(iv[0].iov_base);
+    return -4;
+  }
+  memcpy(&addr.sin_addr, resolved_host->h_addr_list[0], resolved_host->h_length);
+  /*
+  if((addr.sin_addr.s_addr = inet_addr(resolved_host->h_addr)) == INADDR_NONE) {
+      free(iv[1].iov_base);
+      free(iv[0].iov_base);
+      return -4;
+  }
+  */
+
+  if ((sock = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
+    free(iv[1].iov_base);
+    free(iv[0].iov_base);
+    return -5;
+  }
+
+  if (connect(sock, (struct sockaddr*)(&addr), sizeof(addr)) < 0) {
+    ret_code = -6;
+    goto END;
+  }
+
+  if (writev(sock, iv, 2) < (int)(iv[0].iov_len + iv[1].iov_len)) {
+    ret_code = -7;
+    goto END;
+  }
+  iv[0].iov_len = len;
+
+#define _GET_NEXT_CHAR()                                                                                               \
+  (ch = (len >= (int)iv[0].iov_len &&                                                                                  \
+                 (iv[0].iov_len = recv(sock, iv[0].iov_base, iv[0].iov_len, len = 0)) == (size_t)(-1)                  \
+             ? 0                                                                                                       \
+             : *((char*)iv[0].iov_base + len++)))
+#define _LOOP_NEXT(statement)                                                                                          \
+  for (;;) {                                                                                                           \
+    if (!(_GET_NEXT_CHAR())) {                                                                                         \
+      ret_code = -8;                                                                                                   \
+      goto END;                                                                                                        \
+    }                                                                                                                  \
+    statement                                                                                                          \
+  }
+#define _UNTIL(c) _LOOP_NEXT(if (ch == c) break;)
+#define _GET_NUMBER(n) _LOOP_NEXT(if (ch >= '0' && ch <= '9') n = n * 10 + (ch - '0'); else break;)
+#define _(c)                                                                                                           \
+  if ((_GET_NEXT_CHAR()) != c)                                                                                         \
+    break;
+
+  _UNTIL(' ') _GET_NUMBER(ret_code) for (;;) {
+    _UNTIL('\n')
+    switch (_GET_NEXT_CHAR()) {
+    case 'C':
+      _('o')
+      _('n')
+      _('t')
+      _('e')
+      _('n') _('t') _('-') _('L') _('e') _('n') _('g') _('t') _('h') _(':') _(' ') _GET_NUMBER(content_length) break;
+    case '\r':
+      _('\n')
+      while (content_length-- > 0 && _GET_NEXT_CHAR())
+        ; // printf("%c", ch);
+      goto END;
     }
-    iv[0].iov_len = len;
-
-#define _GET_NEXT_CHAR() (ch = (len >= (int)iv[0].iov_len && \
-    (iv[0].iov_len = recv(sock, iv[0].iov_base, iv[0].iov_len, len = 0)) == (size_t)(-1) ? \
-     0 : *((char*)iv[0].iov_base + len++)))
-#define _LOOP_NEXT(statement) for(;;) { if(!(_GET_NEXT_CHAR())) { ret_code = -8; goto END; } statement }
-#define _UNTIL(c) _LOOP_NEXT( if(ch == c) break; )
-#define _GET_NUMBER(n) _LOOP_NEXT( if(ch >= '0' && ch <= '9') n = n * 10 + (ch - '0'); else break; )
-#define _(c) if((_GET_NEXT_CHAR()) != c) break;
-
-    _UNTIL(' ')_GET_NUMBER(ret_code)
-    for(;;) {
-        _UNTIL('\n')
-        switch(_GET_NEXT_CHAR()) {
-            case 'C':_('o')_('n')_('t')_('e')_('n')_('t')_('-')
-                _('L')_('e')_('n')_('g')_('t')_('h')_(':')_(' ')
-                _GET_NUMBER(content_length)
-                break;
-            case '\r':_('\n')
-                while(content_length-- > 0 && _GET_NEXT_CHAR());// printf("%c", ch);
-                goto END;
-        }
-        if(!ch) {
-            ret_code = -10;
-            goto END;
-        }
+    if (!ch) {
+      ret_code = -10;
+      goto END;
     }
-    ret_code = -11;
+  }
+  ret_code = -11;
 END:
-    close(sock);
-    free(iv[0].iov_base);
-    free(iv[1].iov_base);
-    return ret_code / 100 == 2 ? 0 : ret_code;
+  close(sock);
+  free(iv[0].iov_base);
+  free(iv[1].iov_base);
+  return ret_code / 100 == 2 ? 0 : ret_code;
 }
 #undef _GET_NEXT_CHAR
 #undef _LOOP_NEXT
@@ -189,259 +201,247 @@ int post_http_send_line(influx_client_t *c, char *buf, int len)
 #undef _GET_NUMBER
 #undef _
 
-int post_http(influx_client_t* c, ...)
-{
-    va_list ap;
-    char *line = NULL;
-    int ret_code = 0, len = 0;
+int post_http(influx_client_t* c, ...) {
+  va_list ap;
+  char* line = NULL;
+  int ret_code = 0, len = 0;
 
-    va_start(ap, c);
-    len = _format_line((char**)&line, ap);
-    va_end(ap);
-    if(len < 0)
-        return -1;
+  va_start(ap, c);
+  len = _format_line((char**)&line, ap);
+  va_end(ap);
+  if (len < 0)
+    return -1;
 
-    ret_code = post_http_send_line(c, line, len);
+  ret_code = post_http_send_line(c, line, len);
 
-    return ret_code;
+  return ret_code;
 }
 
-int send_udp_line(influx_client_t* c, char *line, int len)
-{
-    int sock = -1, ret = 0;
-    struct sockaddr_in addr;
+int send_udp_line(influx_client_t* c, char* line, int len) {
+  int sock = -1, ret = 0;
+  struct sockaddr_in addr;
 
-    addr.sin_family = AF_INET;
-    addr.sin_port = htons(c->port);
-    if((addr.sin_addr.s_addr = inet_addr(c->host)) == INADDR_NONE) {
-        ret = -2;
-        goto END;
-    }
+  addr.sin_family = AF_INET;
+  addr.sin_port = htons(c->port);
+  if ((addr.sin_addr.s_addr = inet_addr(c->host)) == INADDR_NONE) {
+    ret = -2;
+    goto END;
+  }
 
-    if((sock = socket(AF_INET, SOCK_DGRAM, 0)) < 0) {
-        ret = -3;
-        goto END;
-    }
+  if ((sock = socket(AF_INET, SOCK_DGRAM, 0)) < 0) {
+    ret = -3;
+    goto END;
+  }
 
-    if(sendto(sock, line, len, 0, (struct sockaddr *)&addr, sizeof(addr)) < len)
-        ret = -4;
+  if (sendto(sock, line, len, 0, (struct sockaddr*)&addr, sizeof(addr)) < len)
+    ret = -4;
 
 END:
-    if (sock >= 0) {
-        close(sock);
-    }
-    return ret;
+  if (sock >= 0) {
+    close(sock);
+  }
+  return ret;
 }
 
-int send_udp(influx_client_t* c, ...)
-{
-    int ret = 0, len;
-    va_list ap;
-    char* line = NULL;
+int send_udp(influx_client_t* c, ...) {
+  int ret = 0, len;
+  va_list ap;
+  char* line = NULL;
 
-    va_start(ap, c);
-    len = _format_line(&line, ap);
-    va_end(ap);
-    if(len < 0)
-        return -1;
+  va_start(ap, c);
+  len = _format_line(&line, ap);
+  va_end(ap);
+  if (len < 0)
+    return -1;
 
-    ret = send_udp_line(c, line, len);
+  ret = send_udp_line(c, line, len);
 
-    free(line);
-    return ret;
+  free(line);
+  return ret;
 }
 
-int post_curl(influx_v2_client_t* c, ...)
-{
-    va_list ap;
-    char *data = NULL;
-    int len = 0;
-    va_start(ap, c);
-    len = _format_line((char**)&data, ap);
-    va_end(ap);
-
-    CURL *curl;
-
-    /* In windows, this will init the winsock stuff */ 
-    curl_global_init(CURL_GLOBAL_ALL);
-    CURLcode res;
-
-    /* get a curl handle */ 
-    curl = curl_easy_init();
-    if(!curl) {
-        return CURLE_FAILED_INIT;
-    }
-
-    char* url_string = (char*)malloc(len);
-    snprintf(url_string, len, 
-            "http://%s:%d/api/v2/write?org=%s&bucket=%s&precision=%s",
-            c->host ? c->host: "localhost", c->port ? c->port : 8086, c->org, c->bucket, c->precision ? c->precision : "ns");
-
-    curl_easy_setopt(curl, CURLOPT_URL, url_string);   
-    free(url_string);
-            
-    char* token_string = (char*)malloc(120*sizeof(char));
-    sprintf(token_string, "Authorization: Token %s", c->token);
-
-    struct curl_slist *list = NULL;
-    list = curl_slist_append(list, token_string);
-    curl_easy_setopt(curl, CURLOPT_HTTPHEADER, list);
-    free(token_string);
-    
-    curl_easy_setopt(curl, CURLOPT_POSTFIELDS, data);
-    curl_easy_setopt(curl, CURLOPT_USERAGENT, "libcurl-agent/1.0");
-
-    /* Perform the request, res will get the return code */ 
-    res = curl_easy_perform(curl);
-    /* Check for errors */ 
-    if(res != CURLE_OK){
-        fprintf(stderr, "curl_easy_perform() failed: %s\n",
-                curl_easy_strerror(res));   
-    }
-        
-    free(data);
-    curl_easy_cleanup(curl);
-    curl_global_cleanup();
-    return res;
+int post_curl(influx_v2_client_t* c, ...) {
+  va_list ap;
+  char* data = NULL;
+  int len = 0;
+  va_start(ap, c);
+  len = _format_line((char**)&data, ap);
+  va_end(ap);
+
+  CURL* curl;
+
+  /* In windows, this will init the winsock stuff */
+  curl_global_init(CURL_GLOBAL_ALL);
+  CURLcode res;
+
+  /* get a curl handle */
+  curl = curl_easy_init();
+  if (!curl) {
+    return CURLE_FAILED_INIT;
+  }
+
+  char* url_string = (char*)malloc(len);
+  snprintf(url_string, len, "http://%s:%d/api/v2/write?org=%s&bucket=%s&precision=%s", c->host ? c->host : "localhost",
+           c->port ? c->port : 8086, c->org, c->bucket, c->precision ? c->precision : "ns");
+
+  curl_easy_setopt(curl, CURLOPT_URL, url_string);
+  free(url_string);
+
+  char* token_string = (char*)malloc(120 * sizeof(char));
+  sprintf(token_string, "Authorization: Token %s", c->token);
+
+  struct curl_slist* list = NULL;
+  list = curl_slist_append(list, token_string);
+  curl_easy_setopt(curl, CURLOPT_HTTPHEADER, list);
+  free(token_string);
+
+  curl_easy_setopt(curl, CURLOPT_POSTFIELDS, data);
+  curl_easy_setopt(curl, CURLOPT_USERAGENT, "libcurl-agent/1.0");
+
+  /* Perform the request, res will get the return code */
+  res = curl_easy_perform(curl);
+  /* Check for errors */
+  if (res != CURLE_OK) {
+    fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res));
+  }
+
+  free(data);
+  curl_easy_cleanup(curl);
+  curl_global_cleanup();
+  return res;
 }
 
-int format_line(char **buf, int *len, size_t used, ...)
-{
-    va_list ap;
-    va_start(ap, used);
-    used = _format_line2(buf, ap, (size_t *)len, used);
-    va_end(ap);
-    if(*len < 0)
-        return -1;
-    else
-	return used;
+int format_line(char** buf, int* len, size_t used, ...) {
+  va_list ap;
+  va_start(ap, used);
+  used = _format_line2(buf, ap, (size_t*)len, used);
+  va_end(ap);
+  if (*len < 0)
+    return -1;
+  else
+    return used;
 }
 
-int _begin_line(char **buf)
-{
-    int len = 0x100;
-    if(!(*buf = (char*)malloc(len)))
-        return -1;
-    return len;
+int _begin_line(char** buf) {
+  int len = 0x100;
+  if (!(*buf = (char*)malloc(len)))
+    return -1;
+  return len;
 }
 
-int _format_line(char** buf, va_list ap)
-{
-	size_t len = 0;
-	*buf = NULL;
-	return _format_line2(buf, ap, &len, 0);
+int _format_line(char** buf, va_list ap) {
+  size_t len = 0;
+  *buf = NULL;
+  return _format_line2(buf, ap, &len, 0);
 }
 
-int _format_line2(char** buf, va_list ap, size_t *_len, size_t used)
-{
-#define _APPEND(fmter...) \
-    for(;;) {\
-        if((written = snprintf(*buf + used, len - used, ##fmter)) < 0)\
-            goto FAIL;\
-        if(used + written >= len && !(*buf = (char*)realloc(*buf, len *= 2)))\
-            return -1;\
-        else {\
-            used += written;\
-            break;\
-        }\
+int _format_line2(char** buf, va_list ap, size_t* _len, size_t used) {
+#define _APPEND(fmter...)                                                                                              \
+  for (;;) {                                                                                                           \
+    if ((written = snprintf(*buf + used, len - used, ##fmter)) < 0)                                                    \
+      goto FAIL;                                                                                                       \
+    if (used + written >= len && !(*buf = (char*)realloc(*buf, len *= 2)))                                             \
+      return -1;                                                                                                       \
+    else {                                                                                                             \
+      used += written;                                                                                                 \
+      break;                                                                                                           \
+    }                                                                                                                  \
+  }
+
+  size_t len = *_len;
+  int written = 0, type = 0, last_type = 0;
+  unsigned long long i = 0;
+  double d = 0.0;
+
+  if (*buf == NULL) {
+    len = _begin_line(buf);
+    used = 0;
+  }
+
+  type = va_arg(ap, int);
+  while (type != IF_TYPE_ARG_END) {
+    if (type >= IF_TYPE_TAG && type <= IF_TYPE_FIELD_BOOLEAN) {
+      if (last_type < IF_TYPE_MEAS || last_type > (type == IF_TYPE_TAG ? IF_TYPE_TAG : IF_TYPE_FIELD_BOOLEAN))
+        goto FAIL;
+      _APPEND("%c", (last_type <= IF_TYPE_TAG && type > IF_TYPE_TAG) ? ' ' : ',');
+      if (_escaped_append(buf, &len, &used, va_arg(ap, char*), ",= "))
+        return -2;
+      _APPEND("=");
     }
-
-    size_t len = *_len;
-    int written = 0, type = 0, last_type = 0;
-    unsigned long long i = 0;
-    double d = 0.0;
-
-    if (*buf == NULL) {
-	    len = _begin_line(buf);
-	    used = 0;
+    switch (type) {
+    case IF_TYPE_MEAS:
+      if (last_type)
+        _APPEND("\n");
+      if (last_type && last_type <= IF_TYPE_TAG)
+        goto FAIL;
+      if (_escaped_append(buf, &len, &used, va_arg(ap, char*), ", "))
+        return -3;
+      break;
+    case IF_TYPE_TAG:
+      if (_escaped_append(buf, &len, &used, va_arg(ap, char*), ",= "))
+        return -4;
+      break;
+    case IF_TYPE_FIELD_STRING:
+      _APPEND("\"");
+      if (_escaped_append(buf, &len, &used, va_arg(ap, char*), "\""))
+        return -5;
+      _APPEND("\"");
+      break;
+    case IF_TYPE_FIELD_FLOAT:
+      d = va_arg(ap, double);
+      i = va_arg(ap, int);
+      _APPEND("%.*lf", (int)i, d);
+      break;
+    case IF_TYPE_FIELD_INTEGER:
+      i = va_arg(ap, long long);
+      _APPEND("%lldi", i);
+      break;
+    case IF_TYPE_FIELD_BOOLEAN:
+      i = va_arg(ap, int);
+      _APPEND("%c", i ? 't' : 'f');
+      break;
+    case IF_TYPE_TIMESTAMP:
+      if (last_type < IF_TYPE_FIELD_STRING || last_type > IF_TYPE_FIELD_BOOLEAN)
+        goto FAIL;
+      i = va_arg(ap, long long);
+      _APPEND(" %lld", i);
+      break;
+    default:
+      goto FAIL;
     }
-
+    last_type = type;
     type = va_arg(ap, int);
-    while(type != IF_TYPE_ARG_END) {
-        if(type >= IF_TYPE_TAG && type <= IF_TYPE_FIELD_BOOLEAN) {
-            if(last_type < IF_TYPE_MEAS || last_type > (type == IF_TYPE_TAG ? IF_TYPE_TAG : IF_TYPE_FIELD_BOOLEAN))
-                goto FAIL;
-            _APPEND("%c", (last_type <= IF_TYPE_TAG && type > IF_TYPE_TAG) ? ' ' : ',');
-            if(_escaped_append(buf, &len, &used, va_arg(ap, char*), ",= "))
-                return -2;
-            _APPEND("=");
-        }
-        switch(type) {
-            case IF_TYPE_MEAS:
-                if(last_type)
-                    _APPEND("\n");
-                if(last_type && last_type <= IF_TYPE_TAG)
-                    goto FAIL;
-                if(_escaped_append(buf, &len, &used, va_arg(ap, char*), ", "))
-                    return -3;
-                break;
-            case IF_TYPE_TAG:
-                if(_escaped_append(buf, &len, &used, va_arg(ap, char*), ",= "))
-                    return -4;
-                break;
-            case IF_TYPE_FIELD_STRING:
-                _APPEND("\"");
-                if(_escaped_append(buf, &len, &used, va_arg(ap, char*), "\""))
-                    return -5;
-                _APPEND("\"");
-                break;
-            case IF_TYPE_FIELD_FLOAT:
-                d = va_arg(ap, double);
-                i = va_arg(ap, int);
-                _APPEND("%.*lf", (int)i, d);
-                break;
-            case IF_TYPE_FIELD_INTEGER:
-                i = va_arg(ap, long long);
-                _APPEND("%lldi", i);
-                break;
-            case IF_TYPE_FIELD_BOOLEAN:
-                i = va_arg(ap, int);
-                _APPEND("%c", i ? 't' : 'f');
-                break;
-            case IF_TYPE_TIMESTAMP:
-                if(last_type < IF_TYPE_FIELD_STRING || last_type > IF_TYPE_FIELD_BOOLEAN)
-                    goto FAIL;
-                i = va_arg(ap, long long);
-                _APPEND(" %lld", i);
-                break;
-            default:
-                goto FAIL;
-        }
-        last_type = type;
-        type = va_arg(ap, int);
-    }
-    _APPEND("\n");
-    if(last_type <= IF_TYPE_TAG)
-        goto FAIL;
-    *_len = len;
-    return used;
+  }
+  _APPEND("\n");
+  if (last_type <= IF_TYPE_TAG)
+    goto FAIL;
+  *_len = len;
+  return used;
 FAIL:
-    free(*buf);
-    *buf = NULL;
-    return -1;
+  free(*buf);
+  *buf = NULL;
+  return -1;
 }
 #undef _APPEND
 
-int _escaped_append(char** dest, size_t* len, size_t* used, const char* src, const char* escape_seq)
-{
-    size_t i = 0;
-
-    for(;;) {
-        if((i = strcspn(src, escape_seq)) > 0) {
-            if(*used + i > *len && !(*dest = (char*)realloc(*dest, (*len) *= 2)))
-                return -1;
-            strncpy(*dest + *used, src, i);
-            *used += i;
-            src += i;
-        }
-        if(*src) {
-            if(*used + 2 > *len && !(*dest = (char*)realloc(*dest, (*len) *= 2)))
-                return -2;
-            (*dest)[(*used)++] = '\\';
-            (*dest)[(*used)++] = *src++;
-        }
-        else
-            return 0;
+int _escaped_append(char** dest, size_t* len, size_t* used, const char* src, const char* escape_seq) {
+  size_t i = 0;
+
+  for (;;) {
+    if ((i = strcspn(src, escape_seq)) > 0) {
+      if (*used + i > *len && !(*dest = (char*)realloc(*dest, (*len) *= 2)))
+        return -1;
+      strncpy(*dest + *used, src, i);
+      *used += i;
+      src += i;
     }
-    return 0;
+    if (*src) {
+      if (*used + 2 > *len && !(*dest = (char*)realloc(*dest, (*len) *= 2)))
+        return -2;
+      (*dest)[(*used)++] = '\\';
+      (*dest)[(*used)++] = *src++;
+    } else
+      return 0;
+  }
+  return 0;
 }
diff --git a/util/tracing/trace_to_chrome.c b/util/tracing/trace_to_chrome.c
index ec4cb1b1e..ff941cd4a 100644
--- a/util/tracing/trace_to_chrome.c
+++ b/util/tracing/trace_to_chrome.c
@@ -31,14 +31,15 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  * point your chrome browser to chrome://tracing/ and the load the .json file.
  */
 #define LF_TRACE
+#define __STDC_WANT_LIB_EXT2 1 // needed for asprintf
 #include 
 #include 
 #include "reactor.h"
 #include "trace.h"
 #include "trace_util.h"
 
-#define PID_FOR_USER_EVENT 1000000 // Assumes no more than a million reactors.
-#define PID_FOR_WORKER_WAIT 0  // Use 1000001 to show in separate trace.
+#define PID_FOR_USER_EVENT 1000000      // Assumes no more than a million reactors.
+#define PID_FOR_WORKER_WAIT 0           // Use 1000001 to show in separate trace.
 #define PID_FOR_WORKER_ADVANCING_TIME 0 // Use 1000002 to show in separate trace.
 #define PID_FOR_UNKNOWN_EVENT 2000000
 
@@ -55,11 +56,11 @@ FILE* output_file = NULL;
  * Print a usage message.
  */
 void usage() {
-    printf("\nUsage: trace_to_chrome [options] trace_file (with or without .lft extension)\n");
-    printf("Options: \n");
-    printf("  -p, --physical\n");
-    printf("   Use only physical time, not logical time, for all horizontal axes.\n");
-    printf("\n");
+  printf("\nUsage: trace_to_chrome [options] trace_file (with or without .lft extension)\n");
+  printf("Options: \n");
+  printf("  -p, --physical\n");
+  printf("   Use only physical time, not logical time, for all horizontal axes.\n");
+  printf("\n");
 }
 
 /** Maximum reaction number encountered. */
@@ -73,194 +74,183 @@ bool physical_time_only = false;
  * @return The number of records read or 0 upon seeing an EOF.
  */
 size_t read_and_write_trace() {
-    int trace_length = read_trace();
-    if (trace_length == 0) return 0;
-    // Write each line.
-    for (int i = 0; i < trace_length; i++) {
-        char* reaction_name = "\"UNKNOWN\"";
+  int trace_length = read_trace();
+  if (trace_length == 0)
+    return 0;
+  // Write each line.
+  for (int i = 0; i < trace_length; i++) {
+    char* reaction_name = "\"UNKNOWN\"";
 
-        // Ignore federated trace events.
-        if (trace[i].event_type > federated) continue;
+    // Ignore federated trace events.
+    if (trace[i].event_type > federated)
+      continue;
 
-        if (trace[i].dst_id >= 0) {
-            reaction_name = (char*)malloc(4);
-            snprintf(reaction_name, 4, "%d", trace[i].dst_id);
-        }
-        // printf("DEBUG: Reactor's self struct pointer: %p\n", trace[i].pointer);
-        int reactor_index;
-        char* reactor_name = get_object_description(trace[i].pointer, &reactor_index);
-        if (reactor_name == NULL) {
-            if (trace[i].event_type == worker_wait_starts || trace[i].event_type == worker_wait_ends) {
-                reactor_name = "WAIT";
-            } else if (trace[i].event_type == scheduler_advancing_time_starts
-                    || trace[i].event_type == scheduler_advancing_time_starts) {
-                reactor_name = "ADVANCE TIME";
-            } else {
-                reactor_name = "NO REACTOR";
-            }
-        }
-        // Default name is the reactor name.
-        char* name = reactor_name;
+    if (trace[i].dst_id >= 0) {
+      reaction_name = (char*)malloc(4);
+      snprintf(reaction_name, 4, "%d", trace[i].dst_id);
+    }
+    // printf("DEBUG: Reactor's self struct pointer: %p\n", trace[i].pointer);
+    int reactor_index;
+    char* reactor_name = get_object_description(trace[i].pointer, &reactor_index);
+    if (reactor_name == NULL) {
+      if (trace[i].event_type == worker_wait_starts || trace[i].event_type == worker_wait_ends) {
+        reactor_name = "WAIT";
+      } else if (trace[i].event_type == scheduler_advancing_time_starts ||
+                 trace[i].event_type == scheduler_advancing_time_starts) {
+        reactor_name = "ADVANCE TIME";
+      } else {
+        reactor_name = "NO REACTOR";
+      }
+    }
+    // Default name is the reactor name.
+    char* name = reactor_name;
 
-        int trigger_index;
-        char* trigger_name = get_trigger_name(trace[i].trigger, &trigger_index);
-        if (trigger_name == NULL) {
-            trigger_name = "NONE";
-        }
-        // By default, the timestamp used in the trace is the elapsed
-        // physical time in microseconds.  But for schedule_called events,
-        // it will instead be the logical time at which the action or timer
-        // is to be scheduled.
-        interval_t elapsed_physical_time = (trace[i].physical_time - start_time)/1000;
-        interval_t timestamp = elapsed_physical_time;
-        interval_t elapsed_logical_time = (trace[i].logical_time - start_time)/1000;
+    int trigger_index;
+    char* trigger_name = get_trigger_name(trace[i].trigger, &trigger_index);
+    if (trigger_name == NULL) {
+      trigger_name = "NONE";
+    }
+    // By default, the timestamp used in the trace is the elapsed
+    // physical time in microseconds.  But for schedule_called events,
+    // it will instead be the logical time at which the action or timer
+    // is to be scheduled.
+    interval_t elapsed_physical_time = (trace[i].physical_time - start_time) / 1000;
+    interval_t timestamp = elapsed_physical_time;
+    interval_t elapsed_logical_time = (trace[i].logical_time - start_time) / 1000;
 
-        if (elapsed_physical_time < 0) {
-            fprintf(stderr, "WARNING: Negative elapsed physical time %lld. Skipping trace entry.\n", elapsed_physical_time);
-            continue;
-        }
-        if (elapsed_logical_time < 0) {
-            fprintf(stderr, "WARNING: Negative elapsed logical time %lld. Skipping trace entry.\n", elapsed_logical_time);
-            continue;
-        }
+    if (elapsed_physical_time < 0) {
+      fprintf(stderr, "WARNING: Negative elapsed physical time %lld. Skipping trace entry.\n",
+              (long long int)elapsed_physical_time);
+      continue;
+    }
+    if (elapsed_logical_time < 0) {
+      fprintf(stderr, "WARNING: Negative elapsed logical time %lld. Skipping trace entry.\n",
+              (long long int)elapsed_logical_time);
+      continue;
+    }
 
-        // Default thread id is the worker number.
-        int thread_id = trace[i].src_id;
+    // Default thread id is the worker number.
+    int thread_id = trace[i].src_id;
 
-        char* args;
-        asprintf(&args, "{"
-                        "\"reaction\": %s,"          // reaction number.
-                        "\"logical time\": %lld,"    // logical time.
-                        "\"physical time\": %lld,"   // physical time.
-                        "\"microstep\": %d"          // microstep.
-                    "}",
-                reaction_name,
-                elapsed_logical_time,
-                elapsed_physical_time,
-                trace[i].microstep
-        );
-        char* phase;
-        int pid;
-        switch(trace[i].event_type) {
-            case reaction_starts:
-                phase = "B";
-                pid = 0; // Process 0 will be named "Execution"
-                break;
-            case reaction_ends:
-                phase = "E";
-                pid = 0; // Process 0 will be named "Execution"
-                break;
-            case schedule_called:
-                phase = "i";
-                pid = reactor_index + 1; // One pid per reactor.
-                if (!physical_time_only) {
-                    timestamp = elapsed_logical_time + trace[i].extra_delay/1000;
-                }
-                thread_id = trigger_index;
-                name = trigger_name;
-                break;
-            case user_event:
-                pid = PID_FOR_USER_EVENT;
-                phase= "i";
-                if (!physical_time_only) {
-                    timestamp = elapsed_logical_time;
-                }
-                thread_id = reactor_index;
-                break;
-            case user_value:
-                pid = PID_FOR_USER_EVENT;
-                phase= "C";
-                if (!physical_time_only) {
-                    timestamp = elapsed_logical_time;
-                }
-                thread_id = reactor_index;
-                free(args);
-                asprintf(&args, "{\"value\": %lld}", trace[i].extra_delay);
-                break;
-            case worker_wait_starts:
-                pid = PID_FOR_WORKER_WAIT;
-                phase = "B";
-                break;
-            case worker_wait_ends:
-                pid = PID_FOR_WORKER_WAIT;
-                phase = "E";
-                break;
-            case scheduler_advancing_time_starts:
-                pid = PID_FOR_WORKER_ADVANCING_TIME;
-                phase = "B";
-                break;
-            case scheduler_advancing_time_ends:
-                pid = PID_FOR_WORKER_ADVANCING_TIME;
-                phase = "E";
-                break;
-            default:
-                fprintf(stderr, "WARNING: Unrecognized event type %d: %s\n",
-                        trace[i].event_type, trace_event_names[trace[i].event_type]);
-                pid = PID_FOR_UNKNOWN_EVENT;
-                phase = "i";
-        }
-        fprintf(output_file, "{"
-                    "\"name\": \"%s\", "   // name is the reactor or trigger name.
-                    "\"cat\": \"%s\", "    // category is the type of event.
-                    "\"ph\": \"%s\", "     // phase is "B" (begin), "E" (end), or "X" (complete).
-                    "\"tid\": %d, "        // thread ID.
-                    "\"pid\": %d, "        // process ID is required.
-                    "\"ts\": %lld, "       // timestamp in microseconds
-                    "\"args\": %s"         // additional arguments from above.
-                    "},\n",
-                name,
-                trace_event_names[trace[i].event_type],
-                phase,
-                thread_id,
-                pid,
-                timestamp,
-                args
-        );
-        free(args);
+    char* args;
+    asprintf(&args,
+             "{"
+             "\"reaction\": %s,"        // reaction number.
+             "\"logical time\": %lld,"  // logical time.
+             "\"physical time\": %lld," // physical time.
+             "\"microstep\": %d"        // microstep.
+             "}",
+             reaction_name, elapsed_logical_time, elapsed_physical_time, trace[i].microstep);
+    char* phase;
+    int pid;
+    switch (trace[i].event_type) {
+    case reaction_starts:
+      phase = "B";
+      pid = 0; // Process 0 will be named "Execution"
+      break;
+    case reaction_ends:
+      phase = "E";
+      pid = 0; // Process 0 will be named "Execution"
+      break;
+    case schedule_called:
+      phase = "i";
+      pid = reactor_index + 1; // One pid per reactor.
+      if (!physical_time_only) {
+        timestamp = elapsed_logical_time + trace[i].extra_delay / 1000;
+      }
+      thread_id = trigger_index;
+      name = trigger_name;
+      break;
+    case user_event:
+      pid = PID_FOR_USER_EVENT;
+      phase = "i";
+      if (!physical_time_only) {
+        timestamp = elapsed_logical_time;
+      }
+      thread_id = reactor_index;
+      break;
+    case user_value:
+      pid = PID_FOR_USER_EVENT;
+      phase = "C";
+      if (!physical_time_only) {
+        timestamp = elapsed_logical_time;
+      }
+      thread_id = reactor_index;
+      free(args);
+      asprintf(&args, "{\"value\": %lld}", trace[i].extra_delay);
+      break;
+    case worker_wait_starts:
+      pid = PID_FOR_WORKER_WAIT;
+      phase = "B";
+      break;
+    case worker_wait_ends:
+      pid = PID_FOR_WORKER_WAIT;
+      phase = "E";
+      break;
+    case scheduler_advancing_time_starts:
+      pid = PID_FOR_WORKER_ADVANCING_TIME;
+      phase = "B";
+      break;
+    case scheduler_advancing_time_ends:
+      pid = PID_FOR_WORKER_ADVANCING_TIME;
+      phase = "E";
+      break;
+    default:
+      fprintf(stderr, "WARNING: Unrecognized event type %d: %s\n", trace[i].event_type,
+              trace_event_names[trace[i].event_type]);
+      pid = PID_FOR_UNKNOWN_EVENT;
+      phase = "i";
+    }
+    fprintf(output_file,
+            "{"
+            "\"name\": \"%s\", " // name is the reactor or trigger name.
+            "\"cat\": \"%s\", "  // category is the type of event.
+            "\"ph\": \"%s\", "   // phase is "B" (begin), "E" (end), or "X" (complete).
+            "\"tid\": %d, "      // thread ID.
+            "\"pid\": %d, "      // process ID is required.
+            "\"ts\": %lld, "     // timestamp in microseconds
+            "\"args\": %s"       // additional arguments from above.
+            "},\n",
+            name, trace_event_names[trace[i].event_type], phase, thread_id, pid, (long long int)timestamp, args);
+    free(args);
 
-        if (trace[i].src_id > max_thread_id) {
-            max_thread_id = trace[i].src_id;
-        }
-        // If the event is reaction_starts and physical_time_only is not set,
-        // then also generate an instantaneous
-        // event to be shown in the reactor's section, along with timers and actions.
-        if (trace[i].event_type == reaction_starts && !physical_time_only) {
-            phase = "i";
-            pid = reactor_index + 1;
-            reaction_name = (char*)malloc(4);
-            char name[13];
-            snprintf(name, 13, "reaction %d", trace[i].dst_id);
+    if (trace[i].src_id > max_thread_id) {
+      max_thread_id = trace[i].src_id;
+    }
+    // If the event is reaction_starts and physical_time_only is not set,
+    // then also generate an instantaneous
+    // event to be shown in the reactor's section, along with timers and actions.
+    if (trace[i].event_type == reaction_starts && !physical_time_only) {
+      phase = "i";
+      pid = reactor_index + 1;
+      reaction_name = (char*)malloc(4);
+      char name[13];
+      snprintf(name, 13, "reaction %d", trace[i].dst_id);
 
-            // NOTE: If the reactor has more than 1024 timers and actions, then
-            // there will be a collision of thread IDs here.
-            thread_id = 1024 + trace[i].dst_id;
-            if (trace[i].dst_id > max_reaction_number) {
-                max_reaction_number = trace[i].dst_id;
-            }
+      // NOTE: If the reactor has more than 1024 timers and actions, then
+      // there will be a collision of thread IDs here.
+      thread_id = 1024 + trace[i].dst_id;
+      if (trace[i].dst_id > max_reaction_number) {
+        max_reaction_number = trace[i].dst_id;
+      }
 
-            fprintf(output_file, "{"
-                    "\"name\": \"%s\", "   // name is the reactor or trigger name.
-                    "\"cat\": \"%s\", "    // category is the type of event.
-                    "\"ph\": \"%s\", "     // phase is "B" (begin), "E" (end), or "X" (complete).
-                    "\"tid\": %d, "        // thread ID.
-                    "\"pid\": %d, "        // process ID is required.
-                    "\"ts\": %lld, "       // timestamp in microseconds
-                    "\"args\": {"
-                        "\"microstep\": %d, "       // microstep.
-                        "\"physical time\": %lld"   // physical time.
-                    "}},\n",
-                name,
-                "Reaction",
-                phase,
-                thread_id,
-                pid,
-                elapsed_logical_time,
-                trace[i].microstep,
-                elapsed_physical_time
-            );
-        }
+      fprintf(output_file,
+              "{"
+              "\"name\": \"%s\", " // name is the reactor or trigger name.
+              "\"cat\": \"%s\", "  // category is the type of event.
+              "\"ph\": \"%s\", "   // phase is "B" (begin), "E" (end), or "X" (complete).
+              "\"tid\": %d, "      // thread ID.
+              "\"pid\": %d, "      // process ID is required.
+              "\"ts\": %lld, "     // timestamp in microseconds
+              "\"args\": {"
+              "\"microstep\": %d, "     // microstep.
+              "\"physical time\": %lld" // physical time.
+              "}},\n",
+              name, "Reaction", phase, thread_id, pid, (long long int)elapsed_logical_time, trace[i].microstep,
+              (long long int)elapsed_physical_time);
     }
-    return trace_length;
+  }
+  return trace_length;
 }
 
 /**
@@ -268,183 +258,189 @@ size_t read_and_write_trace() {
  * @param output_file An open output .json file.
  */
 void write_metadata_events(FILE* output_file) {
-    // Thread 0 is the main thread.
-    fprintf(output_file, "{"
+  // Thread 0 is the main thread.
+  fprintf(output_file, "{"
+                       "\"name\": \"thread_name\", "
+                       "\"ph\": \"M\", " // mark as metadata.
+                       "\"pid\": 0, "
+                       "\"tid\": 0, "
+                       "\"args\": {"
+                       "\"name\": \"Main thread\""
+                       "}},\n");
+
+  // Name the worker threads.
+  for (int i = 1; i <= max_thread_id; i++) {
+    fprintf(output_file,
+            "{"
             "\"name\": \"thread_name\", "
-            "\"ph\": \"M\", "      // mark as metadata.
+            "\"ph\": \"M\", " // mark as metadata.
             "\"pid\": 0, "
-            "\"tid\": 0, "
+            "\"tid\": %d, "
             "\"args\": {"
-                "\"name\": \"Main thread\""
-                    "}},\n"
-        );
-
-    // Name the worker threads.
-    for (int i = 1; i <= max_thread_id; i++) {
-        fprintf(output_file, "{"
-                    "\"name\": \"thread_name\", "
-                    "\"ph\": \"M\", "      // mark as metadata.
-                    "\"pid\": 0, "
-                    "\"tid\": %d, "
-                    "\"args\": {"
-                        "\"name\": \"Worker %d\""
-                    "}},\n",
-                i, i
-        );
-        fprintf(output_file, "{"
-                    "\"name\": \"thread_name\", "
-                    "\"ph\": \"M\", "      // mark as metadata.
-                    "\"pid\": %d, "
-                    "\"tid\": %d, "
-                    "\"args\": {"
-                        "\"name\": \"Worker %d\""
-                    "}},\n",
-                PID_FOR_WORKER_WAIT, i, i
-        );
-        fprintf(output_file, "{"
-                    "\"name\": \"thread_name\", "
-                    "\"ph\": \"M\", "      // mark as metadata.
-                    "\"pid\": %d, "
-                    "\"tid\": %d, "
-                    "\"args\": {"
-                        "\"name\": \"Worker %d\""
-                    "}},\n",
-                PID_FOR_WORKER_ADVANCING_TIME, i, i
-        );
-    }
+            "\"name\": \"Worker %d\""
+            "}},\n",
+            i, i);
+    fprintf(output_file,
+            "{"
+            "\"name\": \"thread_name\", "
+            "\"ph\": \"M\", " // mark as metadata.
+            "\"pid\": %d, "
+            "\"tid\": %d, "
+            "\"args\": {"
+            "\"name\": \"Worker %d\""
+            "}},\n",
+            PID_FOR_WORKER_WAIT, i, i);
+    fprintf(output_file,
+            "{"
+            "\"name\": \"thread_name\", "
+            "\"ph\": \"M\", " // mark as metadata.
+            "\"pid\": %d, "
+            "\"tid\": %d, "
+            "\"args\": {"
+            "\"name\": \"Worker %d\""
+            "}},\n",
+            PID_FOR_WORKER_ADVANCING_TIME, i, i);
+  }
 
-    // Name reactions for each reactor.
-    for (int reactor_index = 1; reactor_index <= object_table_size; reactor_index++) {
-        for (int reaction_number = 0; reaction_number <= max_reaction_number; reaction_number++) {
-            fprintf(output_file, "{"
-                    "\"name\": \"thread_name\", "
-                    "\"ph\": \"M\", "      // mark as metadata.
-                    "\"pid\": %d, "
-                    "\"tid\": %d, "
-                    "\"args\": {"
-                        "\"name\": \"Reaction %d\""
-                    "}},\n",
-                reactor_index, reaction_number + 1024, reaction_number
-            );
-        }
+  // Name reactions for each reactor.
+  for (int reactor_index = 1; reactor_index <= object_table_size; reactor_index++) {
+    for (int reaction_number = 0; reaction_number <= max_reaction_number; reaction_number++) {
+      fprintf(output_file,
+              "{"
+              "\"name\": \"thread_name\", "
+              "\"ph\": \"M\", " // mark as metadata.
+              "\"pid\": %d, "
+              "\"tid\": %d, "
+              "\"args\": {"
+              "\"name\": \"Reaction %d\""
+              "}},\n",
+              reactor_index, reaction_number + 1024, reaction_number);
     }
+  }
 
-    // Write the reactor names for the logical timelines.
-    for (int i = 0; i < object_table_size; i++) {
-        if (object_table[i].type == trace_trigger) {
-            // We need the reactor index (not the name) to set the pid.
-            int reactor_index;
-            get_object_description(object_table[i].pointer, &reactor_index);
-            fprintf(output_file, "{"
-                    "\"name\": \"thread_name\", "   // metadata for thread name.
-                    "\"ph\": \"M\", "       // mark as metadata.
-                    "\"pid\": %d, "         // the "process" to identify by reactor.
-                    "\"tid\": %d,"          // The "thread" to label with action or timer name.
-                    "\"args\": {"
-                        "\"name\": \"Trigger %s\""
-                    "}},\n",
-                reactor_index + 1, // Offset of 1 prevents collision with Execution.
-                i,
-                object_table[i].description);
-        } else if (object_table[i].type == trace_reactor) {
-            fprintf(output_file, "{"
-                    "\"name\": \"process_name\", "   // metadata for process name.
-                    "\"ph\": \"M\", "      // mark as metadata.
-                    "\"pid\": %d, "         // the "process" to label as reactor.
-                    "\"args\": {"
-                        "\"name\": \"Reactor %s reactions, actions, and timers in logical time\""
-                    "}},\n",
-                i + 1,  // Offset of 1 prevents collision with Execution.
-                object_table[i].description);
-        } else if (object_table[i].type == trace_user) {
-            fprintf(output_file, "{"
-                    "\"name\": \"thread_name\", "   // metadata for thread name.
-                    "\"ph\": \"M\", "      // mark as metadata.
-                    "\"pid\": %d, "         // the "process" to label as reactor.
-                    "\"tid\": %d,"          // The "thread" to label with action or timer name.
-                    "\"args\": {"
-                        "\"name\": \"%s\""
-                    "}},\n",
-                PID_FOR_USER_EVENT,
-                i, // This is the index in the object table.
-                object_table[i].description);
-        }
-    }
-    // Name the "process" for "Execution"
-    fprintf(output_file, "{"
-                    "\"name\": \"process_name\", "   // metadata for process name.
-                    "\"ph\": \"M\", "      // mark as metadata.
-                    "\"pid\": 0, "         // the "process" to label "Execution".
-                    "\"args\": {"
-                        "\"name\": \"Execution of %s\""
-                    "}},\n",
-                top_level);
-    // Name the "process" for "Worker Waiting" if the PID is not the main execution one.
-    if (PID_FOR_WORKER_WAIT > 0) {
-        fprintf(output_file, "{"
-                    "\"name\": \"process_name\", "   // metadata for process name.
-                    "\"ph\": \"M\", "      // mark as metadata.
-                    "\"pid\": %d, "        // the "process" to label "Workers waiting for reaction queue".
-                    "\"args\": {"
-                        "\"name\": \"Workers waiting for reaction queue\""
-                    "}},\n",
-                PID_FOR_WORKER_WAIT);
+  // Write the reactor names for the logical timelines.
+  for (int i = 0; i < object_table_size; i++) {
+    if (object_table[i].type == trace_trigger) {
+      // We need the reactor index (not the name) to set the pid.
+      int reactor_index;
+      get_object_description(object_table[i].pointer, &reactor_index);
+      fprintf(output_file,
+              "{"
+              "\"name\": \"thread_name\", " // metadata for thread name.
+              "\"ph\": \"M\", "             // mark as metadata.
+              "\"pid\": %d, "               // the "process" to identify by reactor.
+              "\"tid\": %d,"                // The "thread" to label with action or timer name.
+              "\"args\": {"
+              "\"name\": \"Trigger %s\""
+              "}},\n",
+              reactor_index + 1, // Offset of 1 prevents collision with Execution.
+              i, object_table[i].description);
+    } else if (object_table[i].type == trace_reactor) {
+      fprintf(output_file,
+              "{"
+              "\"name\": \"process_name\", " // metadata for process name.
+              "\"ph\": \"M\", "              // mark as metadata.
+              "\"pid\": %d, "                // the "process" to label as reactor.
+              "\"args\": {"
+              "\"name\": \"Reactor %s reactions, actions, and timers in logical time\""
+              "}},\n",
+              i + 1, // Offset of 1 prevents collision with Execution.
+              object_table[i].description);
+    } else if (object_table[i].type == trace_user) {
+      fprintf(output_file,
+              "{"
+              "\"name\": \"thread_name\", " // metadata for thread name.
+              "\"ph\": \"M\", "             // mark as metadata.
+              "\"pid\": %d, "               // the "process" to label as reactor.
+              "\"tid\": %d,"                // The "thread" to label with action or timer name.
+              "\"args\": {"
+              "\"name\": \"%s\""
+              "}},\n",
+              PID_FOR_USER_EVENT,
+              i, // This is the index in the object table.
+              object_table[i].description);
     }
-    // Name the "process" for "Worker advancing time" if the PID is not the main execution one.
-    if (PID_FOR_WORKER_ADVANCING_TIME > 0) {
-        fprintf(output_file, "{"
-                    "\"name\": \"process_name\", "   // metadata for process name.
-                    "\"ph\": \"M\", "      // mark as metadata.
-                    "\"pid\": %d, "        // the "process" to label "Workers waiting for reaction queue".
-                    "\"args\": {"
-                        "\"name\": \"Workers advancing time\""
-                    "}},\n",
-                PID_FOR_WORKER_ADVANCING_TIME);
-    }
-    // Name the "process" for "User Events"
-    // Last metadata entry lacks a comma.
-    fprintf(output_file, "{"
-                    "\"name\": \"process_name\", "   // metadata for process name.
-                    "\"ph\": \"M\", "      // mark as metadata.
-                    "\"pid\": %d, "        // the "process" to label "User events".
-                    "\"args\": {"
-                        "\"name\": \"User events in %s, shown in physical time:\""
-                    "}}\n",
-                PID_FOR_USER_EVENT, top_level);
+  }
+  // Name the "process" for "Execution"
+  fprintf(output_file,
+          "{"
+          "\"name\": \"process_name\", " // metadata for process name.
+          "\"ph\": \"M\", "              // mark as metadata.
+          "\"pid\": 0, "                 // the "process" to label "Execution".
+          "\"args\": {"
+          "\"name\": \"Execution of %s\""
+          "}},\n",
+          top_level);
+  // Name the "process" for "Worker Waiting" if the PID is not the main execution one.
+  if (PID_FOR_WORKER_WAIT > 0) {
+    fprintf(output_file,
+            "{"
+            "\"name\": \"process_name\", " // metadata for process name.
+            "\"ph\": \"M\", "              // mark as metadata.
+            "\"pid\": %d, "                // the "process" to label "Workers waiting for reaction queue".
+            "\"args\": {"
+            "\"name\": \"Workers waiting for reaction queue\""
+            "}},\n",
+            PID_FOR_WORKER_WAIT);
+  }
+  // Name the "process" for "Worker advancing time" if the PID is not the main execution one.
+  if (PID_FOR_WORKER_ADVANCING_TIME > 0) {
+    fprintf(output_file,
+            "{"
+            "\"name\": \"process_name\", " // metadata for process name.
+            "\"ph\": \"M\", "              // mark as metadata.
+            "\"pid\": %d, "                // the "process" to label "Workers waiting for reaction queue".
+            "\"args\": {"
+            "\"name\": \"Workers advancing time\""
+            "}},\n",
+            PID_FOR_WORKER_ADVANCING_TIME);
+  }
+  // Name the "process" for "User Events"
+  // Last metadata entry lacks a comma.
+  fprintf(output_file,
+          "{"
+          "\"name\": \"process_name\", " // metadata for process name.
+          "\"ph\": \"M\", "              // mark as metadata.
+          "\"pid\": %d, "                // the "process" to label "User events".
+          "\"args\": {"
+          "\"name\": \"User events in %s, shown in physical time:\""
+          "}}\n",
+          PID_FOR_USER_EVENT, top_level);
 }
 
 int main(int argc, char* argv[]) {
-    char* filename = NULL;
-    for (int i = 1; i < argc; i++) {
-        if (strncmp(argv[i], "-p", 2) == 0 || strncmp(argv[i], "--physical", 10) == 0) {
-            physical_time_only = true;
-        } else if (argv[i][0] == '-') {
-            usage();
-            return(1);
-        } else {
-           filename = argv[i];
-        }
-    }
-    if (filename == NULL) {
-        usage();
-        exit(0);
+  char* filename = NULL;
+  for (int i = 1; i < argc; i++) {
+    if (strncmp(argv[i], "-p", 2) == 0 || strncmp(argv[i], "--physical", 10) == 0) {
+      physical_time_only = true;
+    } else if (argv[i][0] == '-') {
+      usage();
+      return (1);
+    } else {
+      filename = argv[i];
     }
+  }
+  if (filename == NULL) {
+    usage();
+    exit(0);
+  }
 
-    // Open the trace file.
-    trace_file = open_file(filename, "r");
+  // Open the trace file.
+  trace_file = open_file(filename, "r");
 
-    // Construct the name of the csv output file and open it.
-    char* root = root_name(filename);
-    char json_filename[strlen(root) + 6];
-    strcpy(json_filename, root);
-    strcat(json_filename, ".json");
-    output_file = open_file(json_filename, "w");
+  // Construct the name of the csv output file and open it.
+  char* root = root_name(filename);
+  char json_filename[strlen(root) + 6];
+  strcpy(json_filename, root);
+  strcat(json_filename, ".json");
+  output_file = open_file(json_filename, "w");
 
-    if (read_header() >= 0) {
-        // Write the opening bracket into the json file.
-        fprintf(output_file, "{ \"traceEvents\": [\n");
-        while (read_and_write_trace() != 0) {};
-        write_metadata_events(output_file);
-        fprintf(output_file, "]}\n");
-   }
+  if (read_header() >= 0) {
+    // Write the opening bracket into the json file.
+    fprintf(output_file, "{ \"traceEvents\": [\n");
+    while (read_and_write_trace() != 0) {
+    };
+    write_metadata_events(output_file);
+    fprintf(output_file, "]}\n");
+  }
 }
diff --git a/util/tracing/trace_to_csv.c b/util/tracing/trace_to_csv.c
index d1002269b..1ddf4a993 100644
--- a/util/tracing/trace_to_csv.c
+++ b/util/tracing/trace_to_csv.c
@@ -35,8 +35,9 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #include "reactor.h"
 #include "trace.h"
 #include "trace_util.h"
+#include "trace_impl.h"
 
-#define MAX_NUM_REACTIONS 64  // Maximum number of reactions reported in summary stats.
+#define MAX_NUM_REACTIONS 64 // Maximum number of reactions reported in summary stats.
 #define MAX_NUM_WORKERS 64
 
 /** File containing the trace binary data. */
@@ -55,39 +56,39 @@ int table_size;
  * Print a usage message.
  */
 void usage() {
-    printf("\nUsage: trace_to_csv [options] trace_file_root (with .lft extension)\n\n");
-    printf("\nOptions: \n\n");
-    printf("  -s, --start [time_spec] [units]\n");
-    printf("   The target time to begin tracing.\n\n");
-    printf("  -e, --end [time_spec] [units]\n");
-    printf("   The target time to stop tracing.\n\n");
-    printf("\n\n");
+  printf("\nUsage: trace_to_csv [options] trace_file_root (with .lft extension)\n\n");
+  printf("\nOptions: \n\n");
+  printf("  -s, --start [time_spec] [units]\n");
+  printf("   The target time to begin tracing.\n\n");
+  printf("  -e, --end [time_spec] [units]\n");
+  printf("   The target time to stop tracing.\n\n");
+  printf("\n\n");
 }
 
 /**
  * Struct for collecting summary statistics for reaction invocations.
  */
 typedef struct reaction_stats_t {
-    int occurrences;
-    instant_t latest_start_time;
-    interval_t total_exec_time;
-    interval_t max_exec_time;
-    interval_t min_exec_time;
+  int occurrences;
+  instant_t latest_start_time;
+  interval_t total_exec_time;
+  interval_t max_exec_time;
+  interval_t min_exec_time;
 } reaction_stats_t;
 
 /**
  * Struct for collecting summary statistics.
  */
 typedef struct summary_stats_t {
-    trace_event_t event_type; // Use reaction_ends for reactions.
-    const char* description;  // Description in the reaction table (e.g. reactor name).
-    int occurrences;          // Number of occurrences of this description.
-    int num_reactions_seen;
-    reaction_stats_t reactions[MAX_NUM_REACTIONS];
+  trace_event_t event_type; // Use reaction_ends for reactions.
+  const char* description;  // Description in the reaction table (e.g. reactor name).
+  int occurrences;          // Number of occurrences of this description.
+  int num_reactions_seen;
+  reaction_stats_t reactions[MAX_NUM_REACTIONS];
 } summary_stats_t;
 
 /**
- * Sumary stats array. This array has the same size as the
+ * Summary stats array. This array has the same size as the
  * object table. Pointer in the array will be void if there
  * are no stats for the object table item.
  */
@@ -101,421 +102,400 @@ instant_t latest_time = 0LL;
  * @return The number of records read or 0 upon seeing an EOF.
  */
 size_t read_and_write_trace(instant_t trace_start_time, instant_t trace_end_time) {
-    int trace_length = read_trace();
-    if (trace_length == 0) return 0;
-    // Write each line.
-    for (int i = 0; i < trace_length; i++) {
-        // printf("DEBUG: reactor self struct pointer: %p\n", trace[i].pointer);
-        int object_instance = -1;
-        char* reactor_name = get_object_description(trace[i].pointer, &object_instance);
-        if (reactor_name == NULL) {
-            reactor_name = "NO REACTOR";
+  int trace_length = read_trace();
+  if (trace_length == 0)
+    return 0;
+  // Write each line.
+  for (int i = 0; i < trace_length; i++) {
+    // printf("DEBUG: reactor self struct pointer: %p\n", trace[i].pointer);
+    int object_instance = -1;
+    char* reactor_name = get_object_description(trace[i].pointer, &object_instance);
+    if (reactor_name == NULL) {
+      reactor_name = "NO REACTOR";
+    }
+    int trigger_instance = -1;
+    char* trigger_name = get_trigger_name(trace[i].trigger, &trigger_instance);
+    if (trigger_name == NULL) {
+      trigger_name = "NO TRIGGER";
+    }
+    if ((trace[i].logical_time - start_time) >= trace_start_time &&
+        (trace[i].logical_time - start_time) < trace_end_time) {
+      fprintf(output_file, "%s, %s, %d, %d, " PRINTF_TIME ", %d, " PRINTF_TIME ", %s, " PRINTF_TIME "\n",
+              trace_event_names[trace[i].event_type], reactor_name, trace[i].src_id, trace[i].dst_id,
+              trace[i].logical_time - start_time, trace[i].microstep, trace[i].physical_time - start_time, trigger_name,
+              trace[i].extra_delay);
+      // Update summary statistics.
+      if (trace[i].physical_time > latest_time) {
+        latest_time = trace[i].physical_time;
+      }
+      if (object_instance >= 0 && summary_stats[NUM_EVENT_TYPES + object_instance] == NULL) {
+        summary_stats[NUM_EVENT_TYPES + object_instance] = (summary_stats_t*)calloc(1, sizeof(summary_stats_t));
+      }
+      if (trigger_instance >= 0 && summary_stats[NUM_EVENT_TYPES + trigger_instance] == NULL) {
+        summary_stats[NUM_EVENT_TYPES + trigger_instance] = (summary_stats_t*)calloc(1, sizeof(summary_stats_t));
+      }
+
+      summary_stats_t* stats = NULL;
+      interval_t exec_time;
+      reaction_stats_t* rstats;
+      int index;
+
+      // Count of event type.
+      if (summary_stats[trace[i].event_type] == NULL) {
+        summary_stats[trace[i].event_type] = (summary_stats_t*)calloc(1, sizeof(summary_stats_t));
+      }
+      summary_stats[trace[i].event_type]->event_type = trace[i].event_type;
+      summary_stats[trace[i].event_type]->description = trace_event_names[trace[i].event_type];
+      summary_stats[trace[i].event_type]->occurrences++;
+
+      switch (trace[i].event_type) {
+      case reaction_starts:
+      case reaction_ends:
+        // This code relies on the mutual exclusion of reactions in a reactor
+        // and the ordering of reaction_starts and reaction_ends events.
+        if (trace[i].dst_id >= MAX_NUM_REACTIONS) {
+          fprintf(stderr, "WARNING: Too many reactions. Not all will be shown in summary file.\n");
+          continue;
+        }
+        stats = summary_stats[NUM_EVENT_TYPES + object_instance];
+        stats->description = reactor_name;
+        if (trace[i].dst_id >= stats->num_reactions_seen) {
+          stats->num_reactions_seen = trace[i].dst_id + 1;
+        }
+        rstats = &stats->reactions[trace[i].dst_id];
+        if (trace[i].event_type == reaction_starts) {
+          rstats->latest_start_time = trace[i].physical_time;
+        } else {
+          rstats->occurrences++;
+          exec_time = trace[i].physical_time - rstats->latest_start_time;
+          rstats->latest_start_time = 0LL;
+          rstats->total_exec_time += exec_time;
+          if (exec_time > rstats->max_exec_time) {
+            rstats->max_exec_time = exec_time;
+          }
+          if (exec_time < rstats->min_exec_time || rstats->min_exec_time == 0LL) {
+            rstats->min_exec_time = exec_time;
+          }
+        }
+        break;
+      case schedule_called:
+        if (trigger_instance < 0) {
+          // No trigger. Do not report.
+          continue;
+        }
+        stats = summary_stats[NUM_EVENT_TYPES + trigger_instance];
+        stats->description = trigger_name;
+        break;
+      case user_event:
+        // Although these are not exec times and not reactions,
+        // commandeer the first entry in the reactions array to track values.
+        stats = summary_stats[NUM_EVENT_TYPES + object_instance];
+        stats->description = reactor_name;
+        break;
+      case user_value:
+        // Although these are not exec times and not reactions,
+        // commandeer the first entry in the reactions array to track values.
+        stats = summary_stats[NUM_EVENT_TYPES + object_instance];
+        stats->description = reactor_name;
+        rstats = &stats->reactions[0];
+        rstats->occurrences++;
+        // User values are stored in the "extra_delay" field, which is an interval_t.
+        interval_t value = trace[i].extra_delay;
+        rstats->total_exec_time += value;
+        if (value > rstats->max_exec_time) {
+          rstats->max_exec_time = value;
+        }
+        if (value < rstats->min_exec_time || rstats->min_exec_time == 0LL) {
+          rstats->min_exec_time = value;
+        }
+        break;
+      case worker_wait_starts:
+      case worker_wait_ends:
+      case scheduler_advancing_time_starts:
+      case scheduler_advancing_time_ends:
+        // Use the reactions array to store data.
+        // There will be two entries per worker, one for waits on the
+        // reaction queue and one for waits while advancing time.
+        index = trace[i].src_id * 2;
+        // Even numbered indices are used for waits on reaction queue.
+        // Odd numbered indices for waits for time advancement.
+        if (trace[i].event_type == scheduler_advancing_time_starts ||
+            trace[i].event_type == scheduler_advancing_time_ends) {
+          index++;
+        }
+        if (object_table_size + index >= table_size) {
+          fprintf(stderr, "WARNING: Too many workers. Not all will be shown in summary file.\n");
+          continue;
+        }
+        stats = summary_stats[NUM_EVENT_TYPES + object_table_size + index];
+        if (stats == NULL) {
+          stats = (summary_stats_t*)calloc(1, sizeof(summary_stats_t));
+          summary_stats[NUM_EVENT_TYPES + object_table_size + index] = stats;
         }
-        int trigger_instance = -1;
-        char* trigger_name = get_trigger_name(trace[i].trigger, &trigger_instance);
-        if (trigger_name == NULL) {
-            trigger_name = "NO TRIGGER";
+        // num_reactions_seen here will be used to store the number of
+        // entries in the reactions array, which is twice the number of workers.
+        if (index >= stats->num_reactions_seen) {
+          stats->num_reactions_seen = index;
         }
-        if ((trace[i].logical_time - start_time) >= trace_start_time
-            && (trace[i].logical_time - start_time) < trace_end_time) {
-            fprintf(output_file, "%s, %s, %d, %d, %lld, %d, %lld, %s, %lld\n",
-                    trace_event_names[trace[i].event_type],
-                    reactor_name,
-                    trace[i].src_id,
-                    trace[i].dst_id,
-                    trace[i].logical_time - start_time,
-                    trace[i].microstep,
-                    trace[i].physical_time - start_time,
-                    trigger_name,
-                    trace[i].extra_delay
-            );
-            // Update summary statistics.
-            if (trace[i].physical_time > latest_time) {
-                latest_time = trace[i].physical_time;
-            }
-            if (object_instance >= 0 && summary_stats[NUM_EVENT_TYPES + object_instance] == NULL) {
-                summary_stats[NUM_EVENT_TYPES + object_instance] = (summary_stats_t*)calloc(1, sizeof(summary_stats_t));
-            }
-            if (trigger_instance >= 0 && summary_stats[NUM_EVENT_TYPES + trigger_instance] == NULL) {
-                summary_stats[NUM_EVENT_TYPES + trigger_instance] = (summary_stats_t*)calloc(1, sizeof(summary_stats_t));
-            }
-
-            summary_stats_t* stats = NULL;
-            interval_t exec_time;
-            reaction_stats_t* rstats;
-            int index;
-
-            // Count of event type.
-            if (summary_stats[trace[i].event_type] == NULL) {
-                summary_stats[trace[i].event_type] = (summary_stats_t*)calloc(1, sizeof(summary_stats_t));
-            }
-            summary_stats[trace[i].event_type]->event_type = trace[i].event_type;
-            summary_stats[trace[i].event_type]->description = trace_event_names[trace[i].event_type];
-            summary_stats[trace[i].event_type]->occurrences++;
-
-            switch(trace[i].event_type) {
-                case reaction_starts:
-                case reaction_ends:
-                    // This code relies on the mutual exclusion of reactions in a reactor
-                    // and the ordering of reaction_starts and reaction_ends events.
-                    if (trace[i].dst_id >= MAX_NUM_REACTIONS) {
-                        fprintf(stderr, "WARNING: Too many reactions. Not all will be shown in summary file.\n");
-                        continue;
-                    }
-                    stats = summary_stats[NUM_EVENT_TYPES + object_instance];
-                    stats->description = reactor_name;
-                    if (trace[i].dst_id >= stats->num_reactions_seen) {
-                        stats->num_reactions_seen = trace[i].dst_id + 1;
-                    }
-                    rstats = &stats->reactions[trace[i].dst_id];
-                    if (trace[i].event_type == reaction_starts) {
-                        rstats->latest_start_time = trace[i].physical_time;
-                    } else {
-                        rstats->occurrences++;
-                        exec_time = trace[i].physical_time - rstats->latest_start_time;
-                        rstats->latest_start_time = 0LL;
-                        rstats->total_exec_time += exec_time;
-                        if (exec_time > rstats->max_exec_time) {
-                            rstats->max_exec_time = exec_time;
-                        }
-                        if (exec_time < rstats->min_exec_time || rstats->min_exec_time == 0LL) {
-                            rstats->min_exec_time = exec_time;
-                        }
-                    }
-                    break;
-                case schedule_called:
-                    if (trigger_instance < 0) {
-                        // No trigger. Do not report.
-                        continue;
-                    }
-                    stats = summary_stats[NUM_EVENT_TYPES + trigger_instance];
-                    stats->description = trigger_name;
-                    break;
-                case user_event:
-                    // Although these are not exec times and not reactions,
-                    // commandeer the first entry in the reactions array to track values.
-                    stats = summary_stats[NUM_EVENT_TYPES + object_instance];
-                    stats->description = reactor_name;
-                    break;
-                case user_value:
-                    // Although these are not exec times and not reactions,
-                    // commandeer the first entry in the reactions array to track values.
-                    stats = summary_stats[NUM_EVENT_TYPES + object_instance];
-                    stats->description = reactor_name;
-                    rstats = &stats->reactions[0];
-                    rstats->occurrences++;
-                    // User values are stored in the "extra_delay" field, which is an interval_t.
-                    interval_t value = trace[i].extra_delay;
-                    rstats->total_exec_time += value;
-                    if (value > rstats->max_exec_time) {
-                        rstats->max_exec_time = value;
-                    }
-                    if (value < rstats->min_exec_time || rstats->min_exec_time == 0LL) {
-                        rstats->min_exec_time = value;
-                    }
-                    break;
-                case worker_wait_starts:
-                case worker_wait_ends:
-                case scheduler_advancing_time_starts:
-                case scheduler_advancing_time_ends:
-                    // Use the reactions array to store data.
-                    // There will be two entries per worker, one for waits on the
-                    // reaction queue and one for waits while advancing time.
-                    index = trace[i].src_id * 2;
-                    // Even numbered indices are used for waits on reaction queue.
-                    // Odd numbered indices for waits for time advancement.
-                    if (trace[i].event_type == scheduler_advancing_time_starts
-                            || trace[i].event_type == scheduler_advancing_time_ends) {
-                        index++;
-                    }
-                    if (object_table_size + index >= table_size) {
-                        fprintf(stderr, "WARNING: Too many workers. Not all will be shown in summary file.\n");
-                        continue;
-                    }
-                    stats = summary_stats[NUM_EVENT_TYPES + object_table_size + index];
-                    if (stats == NULL) {
-                        stats = (summary_stats_t*)calloc(1, sizeof(summary_stats_t));
-                        summary_stats[NUM_EVENT_TYPES + object_table_size + index] = stats;
-                    }
-                    // num_reactions_seen here will be used to store the number of
-                    // entries in the reactions array, which is twice the number of workers.
-                    if (index >= stats->num_reactions_seen) {
-                        stats->num_reactions_seen = index;
-                    }
-                    rstats = &stats->reactions[index];
-                    if (trace[i].event_type == worker_wait_starts
-                            || trace[i].event_type == scheduler_advancing_time_starts
-                    ) {
-                        rstats->latest_start_time = trace[i].physical_time;
-                    } else {
-                        rstats->occurrences++;
-                        exec_time = trace[i].physical_time - rstats->latest_start_time;
-                        rstats->latest_start_time = 0LL;
-                        rstats->total_exec_time += exec_time;
-                        if (exec_time > rstats->max_exec_time) {
-                            rstats->max_exec_time = exec_time;
-                        }
-                        if (exec_time < rstats->min_exec_time || rstats->min_exec_time == 0LL) {
-                            rstats->min_exec_time = exec_time;
-                        }
-                    }
-                    break;
-                default:
-                    // No special summary statistics for the rest.
-                    break;
-            }
-            // Common stats across event types.
-            if (stats != NULL) {
-                stats->occurrences++;
-                stats->event_type = trace[i].event_type;
-            }
+        rstats = &stats->reactions[index];
+        if (trace[i].event_type == worker_wait_starts || trace[i].event_type == scheduler_advancing_time_starts) {
+          rstats->latest_start_time = trace[i].physical_time;
         } else {
-            // Out of scope.
+          rstats->occurrences++;
+          exec_time = trace[i].physical_time - rstats->latest_start_time;
+          rstats->latest_start_time = 0LL;
+          rstats->total_exec_time += exec_time;
+          if (exec_time > rstats->max_exec_time) {
+            rstats->max_exec_time = exec_time;
+          }
+          if (exec_time < rstats->min_exec_time || rstats->min_exec_time == 0LL) {
+            rstats->min_exec_time = exec_time;
+          }
         }
+        break;
+      default:
+        // No special summary statistics for the rest.
+        break;
+      }
+      // Common stats across event types.
+      if (stats != NULL) {
+        stats->occurrences++;
+        stats->event_type = trace[i].event_type;
+      }
+    } else {
+      // Out of scope.
     }
-    return trace_length;
+  }
+  return trace_length;
 }
 
 /**
  * Write the summary file.
  */
 void write_summary_file() {
-    // Overall stats.
-    fprintf(summary_file, "Start time:, %lld\n", start_time);
-    fprintf(summary_file, "End time:, %lld\n", latest_time);
-    fprintf(summary_file, "Total time:, %lld\n", latest_time - start_time);
-
-    fprintf(summary_file, "\nTotal Event Occurrences\n");
-    for (int i = 0; i < NUM_EVENT_TYPES; i++) {
-        summary_stats_t* stats = summary_stats[i];
-        if (stats != NULL) {
-            fprintf(summary_file, "%s, %d\n",
-                stats->description,
-                stats->occurrences
-            );
-        }
+  // Overall stats.
+  fprintf(summary_file, "Start time:, " PRINTF_TIME "\n", start_time);
+  fprintf(summary_file, "End time:, " PRINTF_TIME "\n", latest_time);
+  fprintf(summary_file, "Total time:, " PRINTF_TIME "\n", latest_time - start_time);
+
+  fprintf(summary_file, "\nTotal Event Occurrences\n");
+  for (int i = 0; i < NUM_EVENT_TYPES; i++) {
+    summary_stats_t* stats = summary_stats[i];
+    if (stats != NULL) {
+      fprintf(summary_file, "%s, %d\n", stats->description, stats->occurrences);
     }
-
-    // First pass looks for reaction invocations.
-    // First print a header.
-    fprintf(summary_file, "\nReaction Executions\n");
-    fprintf(summary_file, "Reactor, Reaction, Occurrences, Total Time, Pct Total Time, Avg Time, Max Time, Min Time\n");
-    for (int i = NUM_EVENT_TYPES; i < table_size; i++) {
-        summary_stats_t* stats = summary_stats[i];
-        if (stats != NULL && stats->num_reactions_seen > 0) {
-            for (int j = 0; j < stats->num_reactions_seen; j++) {
-                reaction_stats_t* rstats = &stats->reactions[j];
-                if (rstats->occurrences > 0) {
-                    fprintf(summary_file, "%s, %d, %d, %lld, %f, %lld, %lld, %lld\n",
-                            stats->description,
-                            j, // Reaction number.
-                            rstats->occurrences,
-                            rstats->total_exec_time,
-                            rstats->total_exec_time * 100.0 / (latest_time - start_time),
-                            rstats->total_exec_time / rstats->occurrences,
-                            rstats->max_exec_time,
-                            rstats->min_exec_time
-                    );
-                }
-            }
+  }
+
+  // First pass looks for reaction invocations.
+  // First print a header.
+  fprintf(summary_file, "\nReaction Executions\n");
+  fprintf(summary_file, "Reactor, Reaction, Occurrences, Total Time, Pct Total Time, Avg Time, Max Time, Min Time\n");
+  for (int i = NUM_EVENT_TYPES; i < table_size; i++) {
+    summary_stats_t* stats = summary_stats[i];
+    if (stats != NULL && stats->num_reactions_seen > 0) {
+      for (int j = 0; j < stats->num_reactions_seen; j++) {
+        reaction_stats_t* rstats = &stats->reactions[j];
+        if (rstats->occurrences > 0) {
+          fprintf(summary_file, "%s, %d, %d, " PRINTF_TIME ", %f, " PRINTF_TIME ", " PRINTF_TIME ", " PRINTF_TIME "\n",
+                  stats->description,
+                  j, // Reaction number.
+                  rstats->occurrences, rstats->total_exec_time,
+                  rstats->total_exec_time * 100.0 / (latest_time - start_time),
+                  rstats->total_exec_time / rstats->occurrences, rstats->max_exec_time, rstats->min_exec_time);
         }
+      }
     }
-
-    // Next pass looks for calls to schedule.
-    bool first = true;
-    for (int i = NUM_EVENT_TYPES; i < table_size; i++) {
-        summary_stats_t* stats = summary_stats[i];
-        if (stats != NULL && stats->event_type == schedule_called && stats->occurrences > 0) {
-            if (first) {
-                first = false;
-                fprintf(summary_file, "\nSchedule calls\n");
-                fprintf(summary_file, "Trigger, Occurrences\n");
-            }
-            fprintf(summary_file, "%s, %d\n", stats->description, stats->occurrences);
-        }
+  }
+
+  // Next pass looks for calls to schedule.
+  bool first = true;
+  for (int i = NUM_EVENT_TYPES; i < table_size; i++) {
+    summary_stats_t* stats = summary_stats[i];
+    if (stats != NULL && stats->event_type == schedule_called && stats->occurrences > 0) {
+      if (first) {
+        first = false;
+        fprintf(summary_file, "\nSchedule calls\n");
+        fprintf(summary_file, "Trigger, Occurrences\n");
+      }
+      fprintf(summary_file, "%s, %d\n", stats->description, stats->occurrences);
     }
-
-    // Next pass looks for user-defined events.
-    first = true;
-    for (int i = NUM_EVENT_TYPES; i < table_size; i++) {
-        summary_stats_t* stats = summary_stats[i];
-        if (stats != NULL
-                && (stats->event_type == user_event || stats->event_type == user_value)
-                && stats->occurrences > 0) {
-            if (first) {
-                first = false;
-                fprintf(summary_file, "\nUser events\n");
-                fprintf(summary_file, "Description, Occurrences, Total Value, Avg Value, Max Value, Min Value\n");
-            }
-            fprintf(summary_file, "%s, %d", stats->description, stats->occurrences);
-            if (stats->event_type == user_value && stats->reactions[0].occurrences > 0) {
-                // This assumes that the first "reactions" entry has been comandeered for this data.
-                fprintf(summary_file, ", %lld, %lld, %lld, %lld\n",
-                        stats->reactions[0].total_exec_time,
-                        stats->reactions[0].total_exec_time / stats->reactions[0].occurrences,
-                        stats->reactions[0].max_exec_time,
-                        stats->reactions[0].min_exec_time
-                );
-            } else {
-                fprintf(summary_file, "\n");
-            }
-        }
+  }
+
+  // Next pass looks for user-defined events.
+  first = true;
+  for (int i = NUM_EVENT_TYPES; i < table_size; i++) {
+    summary_stats_t* stats = summary_stats[i];
+    if (stats != NULL && (stats->event_type == user_event || stats->event_type == user_value) &&
+        stats->occurrences > 0) {
+      if (first) {
+        first = false;
+        fprintf(summary_file, "\nUser events\n");
+        fprintf(summary_file, "Description, Occurrences, Total Value, Avg Value, Max Value, Min Value\n");
+      }
+      fprintf(summary_file, "%s, %d", stats->description, stats->occurrences);
+      if (stats->event_type == user_value && stats->reactions[0].occurrences > 0) {
+        // This assumes that the first "reactions" entry has been comandeered for this data.
+        fprintf(summary_file, ", " PRINTF_TIME ", " PRINTF_TIME ", " PRINTF_TIME ", " PRINTF_TIME "\n",
+                stats->reactions[0].total_exec_time,
+                stats->reactions[0].total_exec_time / stats->reactions[0].occurrences,
+                stats->reactions[0].max_exec_time, stats->reactions[0].min_exec_time);
+      } else {
+        fprintf(summary_file, "\n");
+      }
     }
-
-    // Next pass looks for wait events.
-    first = true;
-    for (int i = NUM_EVENT_TYPES; i < table_size; i++) {
-        summary_stats_t* stats = summary_stats[i];
-        if (stats != NULL && (
-                stats->event_type == worker_wait_ends
-                || stats->event_type == scheduler_advancing_time_ends)
-        ) {
-            if (first) {
-                first = false;
-                fprintf(summary_file, "\nWorkers Waiting\n");
-                fprintf(summary_file, "Worker, Waiting On, Occurrences, Total Time, Pct Total Time, Avg Time, Max Time, Min Time\n");
-            }
-            char* waitee = "reaction queue";
-            if (stats->event_type == scheduler_advancing_time_ends
-                    || stats->event_type == scheduler_advancing_time_starts) {
-                waitee = "advancing time";
-            }
-            for (int j = 0; j <= stats->num_reactions_seen; j++) {
-                reaction_stats_t* rstats = &stats->reactions[j];
-                if (rstats->occurrences > 0) {
-                    fprintf(summary_file, "%d, %s, %d, %lld, %f, %lld, %lld, %lld\n",
-                            j / 2,
-                            waitee,
-                            rstats->occurrences,
-                            rstats->total_exec_time,
-                            rstats->total_exec_time * 100.0 / (latest_time - start_time),
-                            rstats->total_exec_time / rstats->occurrences,
-                            rstats->max_exec_time,
-                            rstats->min_exec_time
-                    );
-                }
-            }
+  }
+
+  // Next pass looks for wait events.
+  first = true;
+  for (int i = NUM_EVENT_TYPES; i < table_size; i++) {
+    summary_stats_t* stats = summary_stats[i];
+    if (stats != NULL &&
+        (stats->event_type == worker_wait_ends || stats->event_type == scheduler_advancing_time_ends)) {
+      if (first) {
+        first = false;
+        fprintf(summary_file, "\nWorkers Waiting\n");
+        fprintf(summary_file,
+                "Worker, Waiting On, Occurrences, Total Time, Pct Total Time, Avg Time, Max Time, Min Time\n");
+      }
+      char* waitee = "reaction queue";
+      if (stats->event_type == scheduler_advancing_time_ends || stats->event_type == scheduler_advancing_time_starts) {
+        waitee = "advancing time";
+      }
+      for (int j = 0; j <= stats->num_reactions_seen; j++) {
+        reaction_stats_t* rstats = &stats->reactions[j];
+        if (rstats->occurrences > 0) {
+          fprintf(summary_file, "%d, %s, %d, " PRINTF_TIME ", %f, " PRINTF_TIME ", " PRINTF_TIME ", " PRINTF_TIME "\n",
+                  j / 2, waitee, rstats->occurrences, rstats->total_exec_time,
+                  rstats->total_exec_time * 100.0 / (latest_time - start_time),
+                  rstats->total_exec_time / rstats->occurrences, rstats->max_exec_time, rstats->min_exec_time);
         }
+      }
     }
+  }
 }
 
 instant_t string_to_instant(const char* time_spec, const char* units) {
-    instant_t duration;
-    #if defined(PLATFORM_ARDUINO)
-    duration = atol(time_spec);
-    #else
-    duration = atoll(time_spec);
-    #endif
-    // A parse error returns 0LL, so check to see whether that is what is meant.
-    if (duration == 0LL && strncmp(time_spec, "0", 1) != 0) {
-        // Parse error.
-        printf("Invalid time value: %s", time_spec);
-        return -1;
-    }
-    if (strncmp(units, "sec", 3) == 0) {
-        duration = SEC(duration);
-    } else if (strncmp(units, "msec", 4) == 0) {
-        duration = MSEC(duration);
-    } else if (strncmp(units, "usec", 4) == 0) {
-        duration = USEC(duration);
-    } else if (strncmp(units, "nsec", 4) == 0) {
-        duration = NSEC(duration);
-    } else if (strncmp(units, "min", 3) == 0) {
-        duration = MINUTE(duration);
-    } else if (strncmp(units, "hour", 4) == 0) {
-        duration = HOUR(duration);
-    } else if (strncmp(units, "day", 3) == 0) {
-        duration = DAY(duration);
-    } else if (strncmp(units, "week", 4) == 0) {
-        duration = WEEK(duration);
-    } else {
-        // Invalid units.
-        printf("Invalid time units: %s", units);
-        return -1;
-    }
-    return duration;
+  instant_t duration;
+#if defined(PLATFORM_ARDUINO)
+  duration = atol(time_spec);
+#else
+  duration = atoll(time_spec);
+#endif
+  // A parse error returns 0LL, so check to see whether that is what is meant.
+  if (duration == 0LL && strncmp(time_spec, "0", 1) != 0) {
+    // Parse error.
+    printf("Invalid time value: %s", time_spec);
+    return -1;
+  }
+  if (strncmp(units, "sec", 3) == 0) {
+    duration = SEC(duration);
+  } else if (strncmp(units, "msec", 4) == 0) {
+    duration = MSEC(duration);
+  } else if (strncmp(units, "usec", 4) == 0) {
+    duration = USEC(duration);
+  } else if (strncmp(units, "nsec", 4) == 0) {
+    duration = NSEC(duration);
+  } else if (strncmp(units, "min", 3) == 0) {
+    duration = MINUTE(duration);
+  } else if (strncmp(units, "hour", 4) == 0) {
+    duration = HOUR(duration);
+  } else if (strncmp(units, "day", 3) == 0) {
+    duration = DAY(duration);
+  } else if (strncmp(units, "week", 4) == 0) {
+    duration = WEEK(duration);
+  } else {
+    // Invalid units.
+    printf("Invalid time units: %s", units);
+    return -1;
+  }
+  return duration;
 }
 
 int process_args(int argc, const char* argv[], char** root, instant_t* start_time, instant_t* end_time) {
-    int i = 1;
-    while (i < argc){
-        const char* arg = argv[i++];
-        if (strcmp(strrchr(arg, '\0') - 4, ".lft") == 0) {
-            // Open the trace file.
-            trace_file = open_file(arg, "r");
-            if (trace_file == NULL) exit(1);
-            *root = root_name(arg);
-        } else if (strcmp(arg, "-s") == 0) {
-            // sscanf(argv[++i], "%ld", start_time);
-            if (argc < i + 2) {
-                printf("-s needs time value and unit.");
-                usage();
-                return -1;
-            }
-            const char* time_spec = argv[i++];
-            const char* units = argv[i++];
-            *start_time = string_to_instant(time_spec, units);
-            if (*start_time == -1) {
-                usage();
-                return -1;
-            }
-        } else if (strcmp(arg, "-e") == 0) {
-            if (argc < i + 2) {
-                printf("-e needs time value and unit.");
-                usage();
-                return -1;
-            }
-            const char* time_spec = argv[i++];
-            const char* units = argv[i++];
-            *end_time = string_to_instant(time_spec, units);
-            if (*end_time == -1) {
-                usage();
-                return -1;
-            }
-        } else {
-            usage();
-            exit(0);
-        }
+  int i = 1;
+  while (i < argc) {
+    const char* arg = argv[i++];
+    if (strcmp(strrchr(arg, '\0') - 4, ".lft") == 0) {
+      // Open the trace file.
+      trace_file = open_file(arg, "r");
+      if (trace_file == NULL)
+        exit(1);
+      *root = root_name(arg);
+    } else if (strcmp(arg, "-s") == 0) {
+      // sscanf(argv[++i], "%ld", start_time);
+      if (argc < i + 2) {
+        printf("-s needs time value and unit.");
+        usage();
+        return -1;
+      }
+      const char* time_spec = argv[i++];
+      const char* units = argv[i++];
+      *start_time = string_to_instant(time_spec, units);
+      if (*start_time == -1) {
+        usage();
+        return -1;
+      }
+    } else if (strcmp(arg, "-e") == 0) {
+      if (argc < i + 2) {
+        printf("-e needs time value and unit.");
+        usage();
+        return -1;
+      }
+      const char* time_spec = argv[i++];
+      const char* units = argv[i++];
+      *end_time = string_to_instant(time_spec, units);
+      if (*end_time == -1) {
+        usage();
+        return -1;
+      }
+    } else {
+      usage();
+      exit(0);
     }
-    return 0;
+  }
+  return 0;
 }
 
 int main(int argc, const char* argv[]) {
-    instant_t trace_start_time = NEVER;
-    instant_t trace_end_time = FOREVER;
-    char* root;
-
-    if (process_args(argc, argv, &root, &trace_start_time, &trace_end_time) != 0) {
-        return -1;
-    }
-
-    // Construct the name of the csv output file and open it.
-    char csv_filename[strlen(root) + 5];
-    strcpy(csv_filename, root);
-    strcat(csv_filename, ".csv");
-    output_file = open_file(csv_filename, "w");
-    if (output_file == NULL) exit(1);
-
-    // Construct the name of the summary output file and open it.
-    char summary_filename[strlen(root) + 13];
-    strcpy(summary_filename, root);
-    strcat(summary_filename, "_summary.csv");
-    summary_file = open_file(summary_filename, "w");
-    if (summary_file == NULL) exit(1);
-
-    free(root);
-
-    if (read_header() >= 0) {
-        // Allocate an array for summary statistics.
-        table_size = NUM_EVENT_TYPES + object_table_size + (MAX_NUM_WORKERS * 2);
-        summary_stats = (summary_stats_t**)calloc(table_size, sizeof(summary_stats_t*));
-
-        // Write a header line into the CSV file.
-        fprintf(output_file, "Event, Reactor, Source, Destination, Elapsed Logical Time, Microstep, Elapsed Physical Time, Trigger, Extra Delay\n");
-        while (read_and_write_trace(trace_start_time, trace_end_time) != 0) {};
-
-        write_summary_file();
-
-        // File closing is handled by termination function.
-    }
-}
\ No newline at end of file
+  instant_t trace_start_time = NEVER;
+  instant_t trace_end_time = FOREVER;
+  char* root;
+
+  if (process_args(argc, argv, &root, &trace_start_time, &trace_end_time) != 0) {
+    return -1;
+  }
+
+  // Construct the name of the csv output file and open it.
+  char csv_filename[strlen(root) + 5];
+  strcpy(csv_filename, root);
+  strcat(csv_filename, ".csv");
+  output_file = open_file(csv_filename, "w");
+  if (output_file == NULL)
+    exit(1);
+
+  // Construct the name of the summary output file and open it.
+  char summary_filename[strlen(root) + 13];
+  strcpy(summary_filename, root);
+  strcat(summary_filename, "_summary.csv");
+  summary_file = open_file(summary_filename, "w");
+  if (summary_file == NULL)
+    exit(1);
+
+  free(root);
+
+  if (read_header() >= 0) {
+    // Allocate an array for summary statistics.
+    table_size = NUM_EVENT_TYPES + object_table_size + (MAX_NUM_WORKERS * 2);
+    summary_stats = (summary_stats_t**)calloc(table_size, sizeof(summary_stats_t*));
+
+    // Write a header line into the CSV file.
+    fprintf(output_file, "Event, Reactor, Source, Destination, Elapsed Logical Time, Microstep, Elapsed Physical Time, "
+                         "Trigger, Extra Delay\n");
+    while (read_and_write_trace(trace_start_time, trace_end_time) != 0) {
+    };
+
+    write_summary_file();
+
+    // File closing is handled by termination function.
+  }
+}
diff --git a/util/tracing/trace_to_influxdb.c b/util/tracing/trace_to_influxdb.c
index c7d6db55b..3d2abde3e 100644
--- a/util/tracing/trace_to_influxdb.c
+++ b/util/tracing/trace_to_influxdb.c
@@ -101,7 +101,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * You can also specify the following command-line options:
  * * -h, --host: The host name running InfluxDB. If not given, this defaults to "localhost".
- * * -p, --port: The port for accessing InfluxDB. This defaults to 8086. If you used 8087, as shown above, then you have to give this option.
+ * * -p, --port: The port for accessing InfluxDB. This defaults to 8086. If you used 8087, as shown above, then you have
+to give this option.
  *
  * The data can then be viewed in the InfluxDB browser, or you can configure an external
  * tool such as Grafana to visualize it (see https://grafana.com/docs/grafana/latest/datasources/influxdb/).
@@ -113,7 +114,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #include "trace_util.h"
 #include "influxdb.h"
 
-#define MAX_NUM_REACTIONS 64  // Maximum number of reactions reported in summary stats.
+#define MAX_NUM_REACTIONS 64 // Maximum number of reactions reported in summary stats.
 #define MAX_NUM_WORKERS 64
 
 /** File containing the trace binary data. */
@@ -126,19 +127,19 @@ influx_v2_client_t influx_v2_client;
  * Print a usage message.
  */
 void usage() {
-    printf("\nUsage: trace_to_influxdb [options] trace_file [options]\n\n");
-    printf("\nOptions: \n\n");
-    printf("   -t, --token TOKEN\n");
-    printf("   The token for access to InfluxDB (required argument).\n\n");
-    printf("   -h, --host HOSTNAME\n");
-    printf("   The host name for access to InfluxDB (default is 'localhost').\n\n");
-    printf("   -p, --port PORT\n");
-    printf("   The port for access to InfluxDB (default is 8086).\n\n");
-    printf("   -o, --ort ORGANIZATION\n");
-    printf("   The organization for access to InfluxDB (default is 'iCyPhy').\n\n");
-    printf("   -b, --bucket BUCKET\n");
-    printf("   The bucket into which to put the data (default is 'test').\n\n");
-    printf("\n\n");
+  printf("\nUsage: trace_to_influxdb [options] trace_file [options]\n\n");
+  printf("\nOptions: \n\n");
+  printf("   -t, --token TOKEN\n");
+  printf("   The token for access to InfluxDB (required argument).\n\n");
+  printf("   -h, --host HOSTNAME\n");
+  printf("   The host name for access to InfluxDB (default is 'localhost').\n\n");
+  printf("   -p, --port PORT\n");
+  printf("   The port for access to InfluxDB (default is 8086).\n\n");
+  printf("   -o, --ort ORGANIZATION\n");
+  printf("   The organization for access to InfluxDB (default is 'iCyPhy').\n\n");
+  printf("   -b, --bucket BUCKET\n");
+  printf("   The bucket into which to put the data (default is 'test').\n\n");
+  printf("\n\n");
 }
 
 /** Largest timestamp seen. */
@@ -149,131 +150,127 @@ instant_t latest_time = 0LL;
  * @return The number of records read or 0 upon seeing an EOF.
  */
 size_t read_and_write_trace() {
-    int trace_length = read_trace();
-    if (trace_length == 0) return 0;
-    // Write each line.
-    for (int i = 0; i < trace_length; i++) {
+  int trace_length = read_trace();
+  if (trace_length == 0)
+    return 0;
+  // Write each line.
+  for (int i = 0; i < trace_length; i++) {
 
-        // Ignore federated traces.
-        if (trace[i].event_type > federated) continue;
+    // Ignore federated traces.
+    if (trace[i].event_type > federated)
+      continue;
 
-        char* reaction_name = "none";
-        if (trace[i].dst_id >= 0) {
-            reaction_name = (char*)malloc(4);
-            snprintf(reaction_name, 4, "%d", trace[i].dst_id);
-        }
-        // printf("DEBUG: reactor self struct pointer: %p\n", trace[i].pointer);
-        int object_instance = -1;
-        char* reactor_name = get_object_description(trace[i].pointer, &object_instance);
-        if (reactor_name == NULL) {
-            reactor_name = "NO REACTOR";
-        }
-        int trigger_instance = -1;
-        char* trigger_name = get_trigger_name(trace[i].trigger, &trigger_instance);
-        if (trigger_name == NULL) {
-            trigger_name = "NO TRIGGER";
-        }
-        // FIXME: Treating physical time as the timestamp.
-        // Do we want this to optionally be logical time?
-        // FIXME: What is the difference between a TAG and F_STR (presumably, Field String)?
-        // Presumably, the HTTP post is formatted as a "line protocol" command. See:
-        // https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/
-        int response_code = post_curl(&influx_v2_client,
-            INFLUX_MEAS(trace_event_names[trace[i].event_type]),
-            INFLUX_TAG("Reactor", reactor_name),
-            INFLUX_TAG("Reaction", reaction_name),
-            INFLUX_F_INT("Worker", trace[i].src_id),
-            INFLUX_F_INT("Logical Time", trace[i].logical_time),
-            INFLUX_F_INT("Microstep", trace[i].microstep),
-            INFLUX_F_STR("Trigger Name", trigger_name),
-            INFLUX_F_INT("Extra Delay", trace[i].extra_delay),
-            INFLUX_TS(trace[i].physical_time),
-            INFLUX_END
-        );
-        if (response_code != 0) {
-            fprintf(stderr, "****** response code: %d\n", response_code);
-            return 0;
-        }
+    char* reaction_name = "none";
+    if (trace[i].dst_id >= 0) {
+      reaction_name = (char*)malloc(4);
+      snprintf(reaction_name, 4, "%d", trace[i].dst_id);
     }
-    return trace_length;
+    // printf("DEBUG: reactor self struct pointer: %p\n", trace[i].pointer);
+    int object_instance = -1;
+    char* reactor_name = get_object_description(trace[i].pointer, &object_instance);
+    if (reactor_name == NULL) {
+      reactor_name = "NO REACTOR";
+    }
+    int trigger_instance = -1;
+    char* trigger_name = get_trigger_name(trace[i].trigger, &trigger_instance);
+    if (trigger_name == NULL) {
+      trigger_name = "NO TRIGGER";
+    }
+    // FIXME: Treating physical time as the timestamp.
+    // Do we want this to optionally be logical time?
+    // FIXME: What is the difference between a TAG and F_STR (presumably, Field String)?
+    // Presumably, the HTTP post is formatted as a "line protocol" command. See:
+    // https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/
+    int response_code =
+        post_curl(&influx_v2_client, INFLUX_MEAS(trace_event_names[trace[i].event_type]),
+                  INFLUX_TAG("Reactor", reactor_name), INFLUX_TAG("Reaction", reaction_name),
+                  INFLUX_F_INT("Worker", trace[i].src_id), INFLUX_F_INT("Logical Time", trace[i].logical_time),
+                  INFLUX_F_INT("Microstep", trace[i].microstep), INFLUX_F_STR("Trigger Name", trigger_name),
+                  INFLUX_F_INT("Extra Delay", trace[i].extra_delay), INFLUX_TS(trace[i].physical_time), INFLUX_END);
+    if (response_code != 0) {
+      fprintf(stderr, "****** response code: %d\n", response_code);
+      return 0;
+    }
+  }
+  return trace_length;
 }
 
 int main(int argc, char* argv[]) {
-    if (argc < 2) {
-        usage();
-        exit(0);
-    }
-    // Defaults.
-    influx_v2_client.token = NULL;
-    influx_v2_client.host = "localhost";
-    influx_v2_client.port = 8086;
-    influx_v2_client.org = "iCyPhy";
-    influx_v2_client.bucket = "test";
+  if (argc < 2) {
+    usage();
+    exit(0);
+  }
+  // Defaults.
+  influx_v2_client.token = NULL;
+  influx_v2_client.host = "localhost";
+  influx_v2_client.port = 8086;
+  influx_v2_client.org = "iCyPhy";
+  influx_v2_client.bucket = "test";
 
-    char* filename = NULL;
+  char* filename = NULL;
 
-    for (int i = 1; i < argc; i++) {
-        if (strcmp("-t", argv[i]) == 0 || strcmp("--token", argv[i]) == 0) {
-            if (i++ == argc - 1) {
-                usage();
-                fprintf(stderr, "No token specified.\n");
-                exit(1);
-            }
-            influx_v2_client.token = argv[i];
-        } else if (strcmp("-h", argv[i]) == 0 || strcmp("--host", argv[i]) == 0) {
-            if (i++ == argc - 1) {
-                usage();
-                fprintf(stderr, "No host specified.\n");
-                exit(1);
-            }
-            influx_v2_client.host = argv[i];
-        } else if (strcmp("-p", argv[i]) == 0 || strcmp("--port", argv[i]) == 0) {
-            if (i++ == argc - 1) {
-                usage();
-                fprintf(stderr, "No port specified.\n");
-                exit(1);
-            }
-            influx_v2_client.port = atoi(argv[i]);
-            if (influx_v2_client.port == 0) {
-                fprintf(stderr, "Invalid port: %s.\n", argv[i]);
-            }
-        } else if (strcmp("-o", argv[i]) == 0 || strcmp("--org", argv[i]) == 0) {
-            if (i++ == argc - 1) {
-                usage();
-                fprintf(stderr, "No organization specified.\n");
-                exit(1);
-            }
-            influx_v2_client.org = argv[i];
-        } else if (strcmp("-b", argv[i]) == 0 || strcmp("--bucket", argv[i]) == 0) {
-            if (i++ == argc - 1) {
-                usage();
-                fprintf(stderr, "No bucket specified.\n");
-                exit(1);
-            }
-            influx_v2_client.bucket = argv[i];
-        } else {
-            // Must be the filename.
-            filename = argv[i];
-        }
-    }
-    if (influx_v2_client.token == NULL) {
+  for (int i = 1; i < argc; i++) {
+    if (strcmp("-t", argv[i]) == 0 || strcmp("--token", argv[i]) == 0) {
+      if (i++ == argc - 1) {
+        usage();
         fprintf(stderr, "No token specified.\n");
         exit(1);
-    }
-    if (filename == NULL) {
-        fprintf(stderr, "No trace file specified.\n");
+      }
+      influx_v2_client.token = argv[i];
+    } else if (strcmp("-h", argv[i]) == 0 || strcmp("--host", argv[i]) == 0) {
+      if (i++ == argc - 1) {
+        usage();
+        fprintf(stderr, "No host specified.\n");
+        exit(1);
+      }
+      influx_v2_client.host = argv[i];
+    } else if (strcmp("-p", argv[i]) == 0 || strcmp("--port", argv[i]) == 0) {
+      if (i++ == argc - 1) {
+        usage();
+        fprintf(stderr, "No port specified.\n");
+        exit(1);
+      }
+      influx_v2_client.port = atoi(argv[i]);
+      if (influx_v2_client.port == 0) {
+        fprintf(stderr, "Invalid port: %s.\n", argv[i]);
+      }
+    } else if (strcmp("-o", argv[i]) == 0 || strcmp("--org", argv[i]) == 0) {
+      if (i++ == argc - 1) {
+        usage();
+        fprintf(stderr, "No organization specified.\n");
+        exit(1);
+      }
+      influx_v2_client.org = argv[i];
+    } else if (strcmp("-b", argv[i]) == 0 || strcmp("--bucket", argv[i]) == 0) {
+      if (i++ == argc - 1) {
+        usage();
+        fprintf(stderr, "No bucket specified.\n");
         exit(1);
+      }
+      influx_v2_client.bucket = argv[i];
+    } else {
+      // Must be the filename.
+      filename = argv[i];
     }
+  }
+  if (influx_v2_client.token == NULL) {
+    fprintf(stderr, "No token specified.\n");
+    exit(1);
+  }
+  if (filename == NULL) {
+    fprintf(stderr, "No trace file specified.\n");
+    exit(1);
+  }
 
-    // Open the trace file.
-    trace_file = open_file(filename, "r");
+  // Open the trace file.
+  trace_file = open_file(filename, "r");
 
-    if (read_header() >= 0) {
-        size_t num_records = 0, result;
-        while ((result = read_and_write_trace()) != 0) {
-            num_records = result;
-        };
-        printf("***** %zu records written to InfluxDB.\n", num_records);
-        // File closing is handled by termination function.
-    }
+  if (read_header() >= 0) {
+    size_t num_records = 0, result;
+    while ((result = read_and_write_trace()) != 0) {
+      num_records = result;
+    };
+    printf("***** %zu records written to InfluxDB.\n", num_records);
+    // File closing is handled by termination function.
+  }
 }
diff --git a/util/tracing/trace_util.c b/util/tracing/trace_util.c
index 902a97298..ed32c5baa 100644
--- a/util/tracing/trace_util.c
+++ b/util/tracing/trace_util.c
@@ -36,6 +36,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #include "reactor.h"
 #include "trace.h"
 #include "trace_util.h"
+#include "trace_impl.h"
 
 /** Buffer for reading object descriptions. Size limit is BUFFER_SIZE bytes. */
 char buffer[BUFFER_SIZE];
@@ -56,8 +57,8 @@ int object_table_size = 0;
 
 typedef struct open_file_t open_file_t;
 typedef struct open_file_t {
-    FILE* file;
-    open_file_t* next;
+  FILE* file;
+  open_file_t* next;
 } open_file_t;
 open_file_t* _open_files = NULL;
 
@@ -65,61 +66,63 @@ open_file_t* _open_files = NULL;
  * Function to be invoked upon exiting.
  */
 void termination() {
-    // Free memory in object description table.
-    for (int i = 0; i < object_table_size; i++) {
-        free(object_table[i].description);
-    }
-    while (_open_files != NULL) {
-        fclose(_open_files->file);
-        open_file_t* tmp = _open_files->next;
-        free(_open_files);
-        _open_files = tmp;
-    }
-    printf("Done!\n");
+  // Free memory in object description table.
+  for (int i = 0; i < object_table_size; i++) {
+    free(object_table[i].description);
+  }
+  while (_open_files != NULL) {
+    fclose(_open_files->file);
+    open_file_t* tmp = _open_files->next;
+    free(_open_files);
+    _open_files = tmp;
+  }
+  printf("Done!\n");
 }
 
 const char PATH_SEPARATOR =
 #ifdef _WIN32
-                            '\\';
+    '\\';
 #else
-                            '/';
+    '/';
 #endif
 
 char* root_name(const char* path) {
-    if (path == NULL) return NULL;
-
-    // Remove any path.
-    char* last_separator = strrchr(path, PATH_SEPARATOR);
-    if (last_separator != NULL) path = last_separator + 1;
-
-    // Allocate and copy name without extension.
-    char* last_period = strrchr(path, '.');
-    size_t length = (last_period == NULL) ?
-        strlen(path) : last_period - path;
-    char* result = (char*)malloc(length + 1);
-    if (result == NULL) return NULL;
-    strncpy(result, path, length);
-    result[length] = '\0';
-
-    return result;
+  if (path == NULL)
+    return NULL;
+
+  // Remove any path.
+  char* last_separator = strrchr(path, PATH_SEPARATOR);
+  if (last_separator != NULL)
+    path = last_separator + 1;
+
+  // Allocate and copy name without extension.
+  char* last_period = strrchr(path, '.');
+  size_t length = (last_period == NULL) ? strlen(path) : last_period - path;
+  char* result = (char*)malloc(length + 1);
+  if (result == NULL)
+    return NULL;
+  strncpy(result, path, length);
+  result[length] = '\0';
+
+  return result;
 }
 
 FILE* open_file(const char* path, const char* mode) {
-    FILE* result = fopen(path, mode);
-    if (result == NULL) {
-        fprintf(stderr, "No file named %s.\n", path);
-        usage();
-        exit(2);
-    }
-    open_file_t* record = (open_file_t*)malloc(sizeof(open_file_t));
-    if (record == NULL) {
-        fprintf(stderr, "Out of memory.\n");
-        exit(3);
-    }
-    record->file = result;
-    record->next = _open_files;
-    _open_files = record;
-    return result;
+  FILE* result = fopen(path, mode);
+  if (result == NULL) {
+    fprintf(stderr, "No file named %s.\n", path);
+    usage();
+    exit(2);
+  }
+  open_file_t* record = (open_file_t*)malloc(sizeof(open_file_t));
+  if (record == NULL) {
+    fprintf(stderr, "Out of memory.\n");
+    exit(3);
+  }
+  record->file = result;
+  record->next = _open_files;
+  _open_files = record;
+  return result;
 }
 
 /**
@@ -134,19 +137,19 @@ FILE* open_file(const char* path, const char* mode) {
  * @param index An optional pointer into which to write the index.
  */
 char* get_object_description(void* pointer, int* index) {
-    // FIXME: Replace with a hash table implementation.
-    for (int i = 0; i < object_table_size; i++) {
-        if (object_table[i].pointer == pointer) {
-            if (index != NULL) {
-                *index = i;
-            }
-            return object_table[i].description;
-        }
+  // FIXME: Replace with a hash table implementation.
+  for (int i = 0; i < object_table_size; i++) {
+    if (object_table[i].pointer == pointer) {
+      if (index != NULL) {
+        *index = i;
+      }
+      return object_table[i].description;
     }
-    if (index != NULL) {
-        *index = 0;
-    }
-    return NULL;
+  }
+  if (index != NULL) {
+    *index = 0;
+  }
+  return NULL;
 }
 
 /**
@@ -159,128 +162,133 @@ char* get_object_description(void* pointer, int* index) {
  * @param index An optional pointer into which to write the index.
  */
 char* get_trigger_name(void* trigger, int* index) {
-    // FIXME: Replace with a hash table implementation.
-    for (int i = 0; i < object_table_size; i++) {
-        if (object_table[i].trigger == trigger && object_table[i].type == trace_trigger) {
-            if (index != NULL) {
-                *index = i;
-            }
-            return object_table[i].description;
-        }
+  // FIXME: Replace with a hash table implementation.
+  for (int i = 0; i < object_table_size; i++) {
+    if (object_table[i].trigger == trigger && object_table[i].type == trace_trigger) {
+      if (index != NULL) {
+        *index = i;
+      }
+      return object_table[i].description;
     }
-    if (index != NULL) {
-        *index = 0;
-    }
-    return NULL;
+  }
+  if (index != NULL) {
+    *index = 0;
+  }
+  return NULL;
 }
 
 /**
  * Print the object to description table.
  */
 void print_table() {
-    printf("------- objects traced:\n");
-    for (int i = 0; i < object_table_size; i++) {
-        char* type;
-        if (object_table[i].type == trace_reactor) {
-            type = "reactor";
-        } else if (object_table[i].type == trace_trigger) {
-            type = "trigger";
-        } else if (object_table[i].type == trace_user) {
-            type = "user-defined";
-        } else {
-            type = "unknown type";
-        }
-        printf("pointer = %p, trigger = %p, type = %s: %s\n",
-            object_table[i].pointer,
-            object_table[i].trigger,
-            type,
-            object_table[i].description);
+  printf("------- objects traced:\n");
+  for (int i = 0; i < object_table_size; i++) {
+    char* type;
+    if (object_table[i].type == trace_reactor) {
+      type = "reactor";
+    } else if (object_table[i].type == trace_trigger) {
+      type = "trigger";
+    } else if (object_table[i].type == trace_user) {
+      type = "user-defined";
+    } else {
+      type = "unknown type";
     }
-    printf("-------\n");
+    printf("pointer = %p, trigger = %p, type = %s: %s\n", object_table[i].pointer, object_table[i].trigger, type,
+           object_table[i].description);
+  }
+  printf("-------\n");
 }
 
 size_t read_header() {
-    // Read the start time.
-    int items_read = fread(&start_time, sizeof(instant_t), 1, trace_file);
-    if (items_read != 1) _LF_TRACE_FAILURE(trace_file);
-
-    printf("Start time is %lld.\n", start_time);
-
-    // Read the table mapping pointers to descriptions.
-    // First read its length.
-    items_read = fread(&object_table_size, sizeof(int), 1, trace_file);
-    if (items_read != 1) _LF_TRACE_FAILURE(trace_file);
-
-    printf("There are %d objects traced.\n", object_table_size);
-
-    object_table = calloc(object_table_size, sizeof(trace_record_t));
-    if (object_table == NULL) {
-        fprintf(stderr, "ERROR: Memory allocation failure %d.\n", errno);
-        return -1;
+  // Read the start time.
+  int items_read = fread(&start_time, sizeof(instant_t), 1, trace_file);
+  if (items_read != 1)
+    _LF_TRACE_FAILURE(trace_file);
+
+  printf("Start time is %lld.\n", (long long int)start_time);
+
+  // Read the table mapping pointers to descriptions.
+  // First read its length.
+  items_read = fread(&object_table_size, sizeof(int), 1, trace_file);
+  if (items_read != 1)
+    _LF_TRACE_FAILURE(trace_file);
+
+  printf("There are %d objects traced.\n", object_table_size);
+
+  object_table = calloc(object_table_size, sizeof(trace_record_t));
+  if (object_table == NULL) {
+    fprintf(stderr, "ERROR: Memory allocation failure %d.\n", errno);
+    return -1;
+  }
+
+  // Next, read each table entry.
+  for (int i = 0; i < object_table_size; i++) {
+    void* reactor;
+    items_read = fread(&reactor, sizeof(void*), 1, trace_file);
+    if (items_read != 1)
+      _LF_TRACE_FAILURE(trace_file);
+    object_table[i].pointer = reactor;
+
+    void* trigger;
+    items_read = fread(&trigger, sizeof(trigger_t*), 1, trace_file);
+    if (items_read != 1)
+      _LF_TRACE_FAILURE(trace_file);
+    object_table[i].trigger = trigger;
+
+    // Next, read the type.
+    _lf_trace_object_t trace_type;
+    items_read = fread(&trace_type, sizeof(_lf_trace_object_t), 1, trace_file);
+    if (items_read != 1)
+      _LF_TRACE_FAILURE(trace_file);
+    object_table[i].type = trace_type;
+
+    // Next, read the string description into the buffer.
+    int description_length = 0;
+    char character;
+    items_read = fread(&character, sizeof(char), 1, trace_file);
+    if (items_read != 1)
+      _LF_TRACE_FAILURE(trace_file);
+    while (character != 0 && description_length < BUFFER_SIZE - 1) {
+      buffer[description_length++] = character;
+      items_read = fread(&character, sizeof(char), 1, trace_file);
+      if (items_read != 1)
+        _LF_TRACE_FAILURE(trace_file);
     }
+    // Terminate with null.
+    buffer[description_length++] = 0;
 
-    // Next, read each table entry.
-    for (int i = 0; i < object_table_size; i++) {
-        void* reactor;
-        items_read = fread(&reactor, sizeof(void*), 1, trace_file);
-        if (items_read != 1) _LF_TRACE_FAILURE(trace_file);
-        object_table[i].pointer = reactor;
-
-        void* trigger;
-        items_read = fread(&trigger, sizeof(trigger_t*), 1, trace_file);
-        if (items_read != 1) _LF_TRACE_FAILURE(trace_file);
-        object_table[i].trigger = trigger;
-
-        // Next, read the type.
-        _lf_trace_object_t trace_type;
-        items_read = fread(&trace_type, sizeof(_lf_trace_object_t), 1, trace_file);
-        if (items_read != 1) _LF_TRACE_FAILURE(trace_file);
-        object_table[i].type = trace_type;
-
-        // Next, read the string description into the buffer.
-        int description_length = 0;
-        char character;
-        items_read = fread(&character, sizeof(char), 1, trace_file);
-        if (items_read != 1) _LF_TRACE_FAILURE(trace_file);
-        while(character != 0 && description_length < BUFFER_SIZE - 1) {
-            buffer[description_length++] = character;
-            items_read = fread(&character, sizeof(char), 1, trace_file);
-            if (items_read != 1) _LF_TRACE_FAILURE(trace_file);
-        }
-        // Terminate with null.
-        buffer[description_length++] = 0;
-
-        // Allocate memory to store the description.
-        object_table[i].description = malloc(description_length);
-        strcpy(object_table[i].description, buffer);
-
-        if (top_level == NULL) {
-            top_level = object_table[i].description;
-        }
+    // Allocate memory to store the description.
+    object_table[i].description = malloc(description_length);
+    strcpy(object_table[i].description, buffer);
+
+    if (top_level == NULL) {
+      top_level = object_table[i].description;
     }
-    print_table();
-    return object_table_size;
+  }
+  print_table();
+  return object_table_size;
 }
 
 int read_trace() {
-    // Read first the int giving the length of the trace.
-    int trace_length;
-    int items_read = fread(&trace_length, sizeof(int), 1, trace_file);
-    if (items_read != 1) {
-        if (feof(trace_file)) return 0;
-        fprintf(stderr, "Failed to read trace length.\n");
-        exit(3);
-    }
-    if (trace_length > TRACE_BUFFER_CAPACITY) {
-        fprintf(stderr, "ERROR: Trace length %d exceeds capacity. File is garbled.\n", trace_length);
-        exit(4);
-    }
-    // printf("DEBUG: Trace of length %d being converted.\n", trace_length);
-
-    items_read = fread(&trace, sizeof(trace_record_t), trace_length, trace_file);
-    if (items_read != trace_length) {
-        fprintf(stderr, "Failed to read trace of length %d.\n", trace_length);
-        exit(5);
-    }
-    return trace_length;
+  // Read first the int giving the length of the trace.
+  int trace_length;
+  int items_read = fread(&trace_length, sizeof(int), 1, trace_file);
+  if (items_read != 1) {
+    if (feof(trace_file))
+      return 0;
+    fprintf(stderr, "Failed to read trace length.\n");
+    exit(3);
+  }
+  if (trace_length > TRACE_BUFFER_CAPACITY) {
+    fprintf(stderr, "ERROR: Trace length %d exceeds capacity. File is garbled.\n", trace_length);
+    exit(4);
+  }
+  // printf("DEBUG: Trace of length %d being converted.\n", trace_length);
+
+  items_read = fread(&trace, sizeof(trace_record_t), trace_length, trace_file);
+  if (items_read != trace_length) {
+    fprintf(stderr, "Failed to read trace of length %d.\n", trace_length);
+    exit(5);
+  }
+  return trace_length;
 }
diff --git a/util/tracing/trace_util.h b/util/tracing/trace_util.h
index 6f8774035..67d3c705b 100644
--- a/util/tracing/trace_util.h
+++ b/util/tracing/trace_util.h
@@ -34,13 +34,13 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #include "trace.h"
 
 /** Macro to use when access to trace file fails. */
-#define _LF_TRACE_FAILURE(trace_file) \
-    do { \
-        fprintf(stderr, "ERROR: Access to trace file failed.\n"); \
-        fclose(trace_file); \
-        trace_file = NULL; \
-        exit(1); \
-    } while(0)
+#define _LF_TRACE_FAILURE(trace_file)                                                                                  \
+  do {                                                                                                                 \
+    fprintf(stderr, "ERROR: Access to trace file failed.\n");                                                          \
+    fclose(trace_file);                                                                                                \
+    trace_file = NULL;                                                                                                 \
+    exit(1);                                                                                                           \
+  } while (0)
 
 /** Buffer for reading object descriptions. Size limit is BUFFER_SIZE bytes. */
 #define BUFFER_SIZE 1024
@@ -77,7 +77,7 @@ extern char* top_level;
  * Given a path to a file, this function returns a dynamically
  * allocated string (which you must free) that points to the root
  * filename without the preceding path and without the file extension.
- * @param path The path including the full filename. 
+ * @param path The path including the full filename.
  * @return The root name of the file or NULL for failure.
  */
 char* root_name(const char* path);
diff --git a/util/tracing/visualization/fedsd.py b/util/tracing/visualization/fedsd.py
index 2f4b4d389..a680d27c4 100644
--- a/util/tracing/visualization/fedsd.py
+++ b/util/tracing/visualization/fedsd.py
@@ -113,7 +113,7 @@
 # Events matching at the sender and receiver ends depend on whether they are tagged
 # (the elapsed logical time and microstep have to be the same) or not. 
 # Set of tagged events (messages)
-non_tagged_messages = {'FED_ID', 'ACK', 'RESIGN', 'FAILED', 'REJECT', 'ADR_RQ', 'ADR_AD', 'MSG', 'P2P_MSG'}
+non_tagged_messages = {'FED_ID', 'ACK', 'RESIGN', 'FAILED', 'REJECT', 'ADR_QR', 'ADR_AD', 'MSG', 'P2P_MSG'}
 
 
 ################################################################################
@@ -675,7 +675,7 @@ def get_and_convert_lft_files(rti_lft_file, federates_lft_files, start_time, end
             # FIXME: Using microseconds is hardwired here.
             physical_time = f'{int(row["physical_time"]/1000):,}'
 
-            if (row['event'] in {'FED_ID', 'ACK', 'FAILED', 'REJECT', 'ADR_RQ', 'ADR_AD', 'MSG', 'P2P_MSG'}):
+            if (row['event'] in {'FED_ID', 'ACK', 'FAILED', 'REJECT', 'ADR_QR', 'ADR_AD', 'MSG', 'P2P_MSG'}):
                 label = row['event']
             else:
                 label = row['event'] + '(' + f'{int(row["logical_time"]):,}' + ', ' + str(row['microstep']) + ')'
diff --git a/util/type_converter.h b/util/type_converter.h
index 86061f126..6268ddca2 100644
--- a/util/type_converter.h
+++ b/util/type_converter.h
@@ -37,9 +37,9 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #ifndef TYPE_CONVERTER_H_
 #define TYPE_CONVERTER_H_
 
-#define PASTE(x,y) x ## y
+#define PASTE(x, y) x##y
 
-#define RESOLVE(i, o, in)  PASTE(convert__##i, _to__##o)(in)
+#define RESOLVE(i, o, in) PASTE(convert__##i, _to__##o)(in)
 
 /// @name DO_CONVERT
 /// @param fromType Typename of  value  field
diff --git a/util/wave_file_reader.c b/util/wave_file_reader.c
index bb84bd120..e192ee22f 100644
--- a/util/wave_file_reader.c
+++ b/util/wave_file_reader.c
@@ -4,7 +4,7 @@
  * @copyright (c) 2020-2023, The University of California at Berkeley
  * License in [BSD 2-clause](https://github.com/lf-lang/reactor-c/blob/main/LICENSE.md)
  * @brief Utility function for reading WAV audio files.
- * 
+ *
  * See wave_file_reader.h for instructions.
  */
 
@@ -26,9 +26,9 @@
  * by the sequence of four chars 'RIFF'.
  */
 typedef struct {
-    char chunk_id[4];    // "RIFF"
-    uint32_t chunk_size; // 36 + subchunk_size
-    char format[4];      // "WAVE"
+  char chunk_id[4];    // "RIFF"
+  uint32_t chunk_size; // 36 + subchunk_size
+  char format[4];      // "WAVE"
 } lf_wav_riff_t;
 
 /**
@@ -37,14 +37,14 @@ typedef struct {
  * by the sequence of four chars 'fmt '.
  */
 typedef struct {
-    char subchunk_id[4];    // 'fmt '
-    uint32_t subchunk_size; // 16 for linear PCM.
-    uint16_t audio_format;  // 1 for linear PCM
-    uint16_t num_channels;  // 1 for mono = 1, 2 for stereo, etc.
-    uint32_t sample_rate;   // 44100
-    uint32_t byte_rate;     // sample_rate * num_channels * bits_per_sample/8
-    uint16_t BlockAlign;    /* = num_channels * bits_per_sample/8 */
-    uint16_t bits_per_sample; /* 8bits, 16bits, etc. */
+  char subchunk_id[4];      // 'fmt '
+  uint32_t subchunk_size;   // 16 for linear PCM.
+  uint16_t audio_format;    // 1 for linear PCM
+  uint16_t num_channels;    // 1 for mono = 1, 2 for stereo, etc.
+  uint32_t sample_rate;     // 44100
+  uint32_t byte_rate;       // sample_rate * num_channels * bits_per_sample/8
+  uint16_t BlockAlign;      /* = num_channels * bits_per_sample/8 */
+  uint16_t bits_per_sample; /* 8bits, 16bits, etc. */
 } lf_wav_format_t;
 
 /**
@@ -53,109 +53,100 @@ typedef struct {
  * is an int formed by the sequenced of four chars 'data'.
  */
 typedef struct {
-    char subchunk_id[4];    // 'data'
-    uint32_t subchunk_size; // data size in bytes
+  char subchunk_id[4];    // 'data'
+  uint32_t subchunk_size; // data size in bytes
 } lf_wav_data_t;
 
 /**
  * Overall wave data.
  */
 typedef struct {
-   lf_wav_riff_t riff;
-   lf_wav_format_t fmt;
-   lf_wav_data_t data;
+  lf_wav_riff_t riff;
+  lf_wav_format_t fmt;
+  lf_wav_data_t data;
 } lf_wav_t;
 
 lf_waveform_t* read_wave_file(const char* path) {
-    FILE *fp = NULL;
-    
-    lf_wav_t wav;
-    fp = fopen(path, "rb");
+  FILE* fp = NULL;
+
+  lf_wav_t wav;
+  fp = fopen(path, "rb");
+  if (!fp) {
+    // Try prefixing the file name with "src-gen".
+    // On a remote host, the waveform files will be put in that directory.
+    char alt_path[strlen(path) + 9];
+    strcpy(alt_path, "src-gen");
+    alt_path[7] = FILE_PATH_SEPARATOR;
+    strcpy(&(alt_path[8]), path);
+    fp = fopen(alt_path, "rb");
     if (!fp) {
-        // Try prefixing the file name with "src-gen".
-        // On a remote host, the waveform files will be put in that directory.
-        char alt_path[strlen(path) + 9];
-        strcpy(alt_path, "src-gen");
-        alt_path[7] = FILE_PATH_SEPARATOR;
-        strcpy(&(alt_path[8]), path);
-        fp = fopen(alt_path, "rb");
-        if (!fp) {
-            fprintf(stderr, "WARNING: Failed to open waveform sample file: %s\n", path);
-            return NULL;
-        }
+      fprintf(stderr, "WARNING: Failed to open waveform sample file: %s\n", path);
+      return NULL;
     }
- 
-    fread(&wav, 1, sizeof(lf_wav_t), fp);
-     
-    lf_wav_format_t fmt = wav.fmt;
-    lf_wav_data_t data = wav.data;
- 
-    // Wave file format is described here:
-    // https://sites.google.com/site/musicgapi/technical-documents/wav-file-format
-    uint32_t expected_chunk_id = (uint32_t)'FFIR';     // Little-endian version of RIFF.
-    uint32_t expected_format = (uint32_t)'EVAW';       // Little-endian version of WAVE.
-    uint32_t expected_subchunk_id = (uint32_t)' tmf';  // Little-endian version of 'fmt '.
-    if (*(uint32_t*)wav.riff.chunk_id != expected_chunk_id
-        || *(uint32_t*)wav.riff.format != expected_format
-        || *(uint32_t*)fmt.subchunk_id != expected_subchunk_id
-        || fmt.subchunk_size != 16
-        || fmt.audio_format != 1
-        || fmt.sample_rate != 44100
-        || fmt.bits_per_sample != 16
-    ) {
-        fprintf(stderr, "WARNING: Waveform sample not a supported format.\n");
-        fprintf(stderr, "Chunk ID was expected to be 'RIFF'. Got: '%c%c%c%c'.\n",
-                wav.riff.chunk_id[0], wav.riff.chunk_id[1], wav.riff.chunk_id[2], wav.riff.chunk_id[3]);
-        fprintf(stderr, "Format was expected to be 'WAVE'. Got: '%c%c%c%c'.\n",
-                wav.riff.format[0], wav.riff.format[1], wav.riff.format[2], wav.riff.format[3]);
-        fprintf(stderr, "Subchunk ID was expected to be 'fmt '. Got: '%c%c%c%c'.\n",
-                fmt.subchunk_id[0], fmt.subchunk_id[1], fmt.subchunk_id[2], fmt.subchunk_id[3]);
-        fprintf(stderr, "Subchunk size was expected to be 16. Got: '%d'.\n",
-                fmt.subchunk_size);
-        fprintf(stderr, "Audio format was expected to be 1 (LPCM, no compression). Got: '%d'.\n",
-                fmt.audio_format);
-        fprintf(stderr, "Sample rate was expected to be 44100). Got: '%d'.\n",
-                fmt.sample_rate);
-        fprintf(stderr, "Bits per sample was expected to be 16. Got: '%d'.\n",
-                fmt.bits_per_sample);
+  }
+
+  fread(&wav, 1, sizeof(lf_wav_t), fp);
+
+  lf_wav_format_t fmt = wav.fmt;
+  lf_wav_data_t data = wav.data;
+
+  // Wave file format is described here:
+  // https://sites.google.com/site/musicgapi/technical-documents/wav-file-format
+  uint32_t expected_chunk_id = (uint32_t)'FFIR';    // Little-endian version of RIFF.
+  uint32_t expected_format = (uint32_t)'EVAW';      // Little-endian version of WAVE.
+  uint32_t expected_subchunk_id = (uint32_t)' tmf'; // Little-endian version of 'fmt '.
+  if (*(uint32_t*)wav.riff.chunk_id != expected_chunk_id || *(uint32_t*)wav.riff.format != expected_format ||
+      *(uint32_t*)fmt.subchunk_id != expected_subchunk_id || fmt.subchunk_size != 16 || fmt.audio_format != 1 ||
+      fmt.sample_rate != 44100 || fmt.bits_per_sample != 16) {
+    fprintf(stderr, "WARNING: Waveform sample not a supported format.\n");
+    fprintf(stderr, "Chunk ID was expected to be 'RIFF'. Got: '%c%c%c%c'.\n", wav.riff.chunk_id[0],
+            wav.riff.chunk_id[1], wav.riff.chunk_id[2], wav.riff.chunk_id[3]);
+    fprintf(stderr, "Format was expected to be 'WAVE'. Got: '%c%c%c%c'.\n", wav.riff.format[0], wav.riff.format[1],
+            wav.riff.format[2], wav.riff.format[3]);
+    fprintf(stderr, "Subchunk ID was expected to be 'fmt '. Got: '%c%c%c%c'.\n", fmt.subchunk_id[0], fmt.subchunk_id[1],
+            fmt.subchunk_id[2], fmt.subchunk_id[3]);
+    fprintf(stderr, "Subchunk size was expected to be 16. Got: '%d'.\n", fmt.subchunk_size);
+    fprintf(stderr, "Audio format was expected to be 1 (LPCM, no compression). Got: '%d'.\n", fmt.audio_format);
+    fprintf(stderr, "Sample rate was expected to be 44100). Got: '%d'.\n", fmt.sample_rate);
+    fprintf(stderr, "Bits per sample was expected to be 16. Got: '%d'.\n", fmt.bits_per_sample);
+  }
+  // Ignore any intermediate chunks that are not 'data' chunks.
+  // Apparently, Apple software sometimes inserts junk here.
+  uint32_t expected_data_id = (uint32_t)'atad'; // Little-endian version of 'data'.
+  while (*(uint32_t*)data.subchunk_id != expected_data_id) {
+    char junk[data.subchunk_size];
+    size_t bytes_read = fread(junk, 1, data.subchunk_size, fp);
+    if (bytes_read != data.subchunk_size) {
+      fprintf(stderr, "Intermediate junk chunk '%c%c%c%c' could not be read. Giving up.\n", data.subchunk_id[0],
+              data.subchunk_id[1], data.subchunk_id[2], data.subchunk_id[3]);
+      break;
     }
-    // Ignore any intermediate chunks that are not 'data' chunks.
-    // Apparently, Apple software sometimes inserts junk here.
-    uint32_t expected_data_id = (uint32_t)'atad';      // Little-endian version of 'data'.
-    while (*(uint32_t*)data.subchunk_id != expected_data_id) {
-        char junk[data.subchunk_size];
-        size_t bytes_read = fread(junk, 1, data.subchunk_size , fp);
-        if (bytes_read != data.subchunk_size) {
-            fprintf(stderr, "Intermediate junk chunk '%c%c%c%c' could not be read. Giving up.\n",
-                data.subchunk_id[0], data.subchunk_id[1], data.subchunk_id[2], data.subchunk_id[3]);
-            break;
-        }
-        bytes_read = fread(&data, 1, sizeof(lf_wav_data_t) , fp);
-        if (bytes_read != sizeof(lf_wav_data_t)) {
-            fprintf(stderr, "Missing 'data' chunk in file %s.\n", path);
-            break;
-        }
+    bytes_read = fread(&data, 1, sizeof(lf_wav_data_t), fp);
+    if (bytes_read != sizeof(lf_wav_data_t)) {
+      fprintf(stderr, "Missing 'data' chunk in file %s.\n", path);
+      break;
     }
+  }
 
-    uint16_t num_channels = fmt.num_channels;     
+  uint16_t num_channels = fmt.num_channels;
 
-    // Ignoring the following fields. Should we?
-    // printf("byte_rate \t%d\n", fmt.byte_rate);
-    // printf("BlockAlign \t%d\n", fmt.BlockAlign);
+  // Ignoring the following fields. Should we?
+  // printf("byte_rate \t%d\n", fmt.byte_rate);
+  // printf("BlockAlign \t%d\n", fmt.BlockAlign);
 
-    // printf("Data subchunk size \t%d\n", data.subchunk_size);
+  // printf("Data subchunk size \t%d\n", data.subchunk_size);
 
-    lf_waveform_t* result = (lf_waveform_t*)malloc(sizeof(lf_waveform_t));
-    // printf("Size of lf_waveform_t %d", sizeof(lf_waveform_t));
-    result->length = data.subchunk_size/2; // Subchunk size is in bytes, but length is number of samples.
-    result->num_channels = num_channels;
-    result->waveform = (int16_t*)calloc(data.subchunk_size/2, sizeof(int16_t));
+  lf_waveform_t* result = (lf_waveform_t*)malloc(sizeof(lf_waveform_t));
+  // printf("Size of lf_waveform_t %d", sizeof(lf_waveform_t));
+  result->length = data.subchunk_size / 2; // Subchunk size is in bytes, but length is number of samples.
+  result->num_channels = num_channels;
+  result->waveform = (int16_t*)calloc(data.subchunk_size / 2, sizeof(int16_t));
 
-    size_t bytes_read = fread(result->waveform, sizeof(int16_t), data.subchunk_size/2 , fp);
-    if (bytes_read != data.subchunk_size/2) {
-        fprintf(stderr, "WARNING: Expected %d bytes, but got %zu.\n", data.subchunk_size, bytes_read);
-    }
+  size_t bytes_read = fread(result->waveform, sizeof(int16_t), data.subchunk_size / 2, fp);
+  if (bytes_read != data.subchunk_size / 2) {
+    fprintf(stderr, "WARNING: Expected %d bytes, but got %zu.\n", data.subchunk_size, bytes_read);
+  }
 
-    // printf("duration \t%f\n", (data.subchunk_size * 1.0) / fmt.byte_rate);
-    return result;
- }
+  // printf("duration \t%f\n", (data.subchunk_size * 1.0) / fmt.byte_rate);
+  return result;
+}
diff --git a/util/wave_file_reader.h b/util/wave_file_reader.h
index 917d6b4e6..cd072c5dc 100644
--- a/util/wave_file_reader.h
+++ b/util/wave_file_reader.h
@@ -4,15 +4,15 @@
  * @copyright (c) 2020-2023, The University of California at Berkeley
  * License in [BSD 2-clause](https://github.com/lf-lang/reactor-c/blob/main/LICENSE.md)
  * @brief Utility function for reading WAV audio files.
- * 
+ *
  * This defines functions and data types for importing audio files with the
  * wave audio format. The main function is read_wave_file(), which, given
  * a path to a .wav file, reads the file and, if the format of the file is
  * supported, returns an lf_waveform_t struct, which contains the raw
  * audio data in 16-bit linear PCM form.
- * 
+ *
  * This code has few dependencies, so it should run on just about any platform.
- * 
+ *
  * To use this, include the following flags in your target properties:
  * 
 target C {
@@ -25,7 +25,7 @@ target C {
     ]
 }
  * 
- * + * * In addition, you need this in your Lingua Franca file or reactor: *
  * preamble {=
@@ -45,9 +45,9 @@ target C {
  * of samples, a multiple of the number of channels.
  */
 typedef struct lf_waveform_t {
-    uint32_t length;
-    uint16_t num_channels;
-    int16_t* waveform;
+  uint32_t length;
+  uint16_t num_channels;
+  int16_t* waveform;
 } lf_waveform_t;
 
 /**
@@ -59,7 +59,7 @@ typedef struct lf_waveform_t {
  * This implementation supports only 16-bit linear PCM files.
  * On a Mac, you can convert audio files into this format
  * using the afconvert utility.
- * 
+ *
  * @param path The path to the file.
  * @return An array of sample data or NULL if the file can't be opened
  *  or has an usupported format.
diff --git a/version/api/CMakeLists.txt b/version/api/CMakeLists.txt
new file mode 100644
index 000000000..897c21f13
--- /dev/null
+++ b/version/api/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_library(lf-version-api INTERFACE)
+target_include_directories(lf-version-api INTERFACE ${CMAKE_CURRENT_LIST_DIR})
+add_library(lf::version-api ALIAS lf-version-api)
diff --git a/version/api/lf_core_version.h b/version/api/lf_core_version.h
new file mode 100644
index 000000000..0fc561bd9
--- /dev/null
+++ b/version/api/lf_core_version.h
@@ -0,0 +1,33 @@
+/**
+ * @file lf_core_version.h
+ * @author Peter Donovan (peter@xronos.com)
+ * @brief API for runtime plugins to use to sanity-check compatibility with the
+ * core. Plugins APIs can include a function to get information about the
+ * version of the plugin, and the core can use that information to determine if
+ * the plugin is compatible with the core.
+ * @version 0.1
+ * @date 2024-01-29
+ *
+ * @copyright Copyright (c) 2024
+ */
+#ifndef VERSION_H
+#define VERSION_H
+
+typedef enum {
+  TRIBOOL_FALSE = 0,
+  TRIBOOL_TRUE = 1,
+  TRIBOOL_DOES_NOT_MATTER = 2,
+} tribool_t;
+
+typedef struct {
+  tribool_t single_threaded;
+  tribool_t build_type_is_debug;
+  int log_level;
+} build_config_t;
+
+typedef struct {
+  const build_config_t build_config;
+  const char* core_version_name;
+} version_t;
+
+#endif // VERSION_H