diff --git a/.github/workflows/api-docs.yml b/.github/workflows/api-docs.yml index c63befc80..73fccbcc8 100644 --- a/.github/workflows/api-docs.yml +++ b/.github/workflows/api-docs.yml @@ -6,17 +6,18 @@ on: jobs: build: - runs-on: macos-latest - steps: - name: Requirements - run: brew install doxygen - && brew install sphinx-doc - && pip3 install sphinx-rtd-theme - && pip3 install breathe - && pip3 install sphinx-sitemap - && pip3 install exhale + run: | + brew install doxygen + brew install sphinx-doc + python3 -m venv .venv + source .venv/bin/activate + pip3 install sphinx-rtd-theme + pip3 install breathe + pip3 install sphinx-sitemap + pip3 install exhale - name: Checkout repo uses: actions/checkout@1.0.0 - name: Build docs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 39267f34f..4ba3c90c3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -55,6 +55,14 @@ jobs: compiler-ref: ${{ needs.fetch-lf.outputs.ref }} if: ${{ !github.event.pull_request.draft ||contains( github.event.pull_request.labels.*.name, 'zephyr') }} + lf-default-flexpret: + needs: [fetch-lf] + uses: lf-lang/lingua-franca/.github/workflows/c-flexpret-tests.yml@master + with: + runtime-ref: ${{ github.ref }} + compiler-ref: ${{ needs.fetch-lf.outputs.ref }} + if: ${{ !github.event.pull_request.draft ||contains( github.event.pull_request.labels.*.name, 'flexpret') }} + lf-default: needs: [fetch-lf] uses: lf-lang/lingua-franca/.github/workflows/c-tests.yml@master diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index fc8a7123d..1ced71f16 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -11,7 +11,7 @@ jobs: run: strategy: matrix: - platform: [ubuntu-latest, macos-latest, windows-latest] + platform: [ubuntu-latest, macos-latest] runs-on: ${{ matrix.platform }} steps: @@ -24,4 +24,13 @@ jobs: cd build cmake .. ${{ inputs.cmake-args }} cmake --build . - make test + sudo make test + - name: Run RTI unit tests + run: | + cd core/federated/RTI + mkdir build + cd build + cmake .. + cmake --build . + ctest + diff --git a/.gitignore b/.gitignore index b8237101d..d11ccaa7a 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,6 @@ util/tracing/trace_to_csv.o util/tracing/trace_to_influxdb util/tracing/trace_to_influxdb.o util/tracing/trace_util.o + +# Generated trace lib +trace/**/*.a diff --git a/CMakeLists.txt b/CMakeLists.txt index 8a916023f..186e5b670 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -22,10 +22,6 @@ if(DEFINED LF_SINGLE_THREADED) add_compile_definitions(LF_SINGLE_THREADED=1) endif() -# Warnings as errors -add_compile_options(-Werror) - -set(Test test) set(Lib lib) set(CoreLibPath core) set(CoreLib reactor-c) @@ -42,7 +38,6 @@ include_directories(${CMAKE_SOURCE_DIR}/include/core/utils) include_directories(${CMAKE_SOURCE_DIR}/include/api) enable_testing() -add_subdirectory(${Test}) add_subdirectory(${Lib}) add_subdirectory(${CoreLibPath}) diff --git a/README.md b/README.md index 3d1313a1f..8c714a165 100644 --- a/README.md +++ b/README.md @@ -41,6 +41,11 @@ To run tests for the multithreaded runtime, provide a nonzero number of workers when invoking `cmake`. For example: - `cmake .. -DNUMBER_OF_WORKERS=2` +- `cmake --build .` +- `sudo make test` + +Note that one of the tests in the multithreaded test suite requires sudo because +it changes the scheduling policy and priorities. To define/undefine other preprocessor definitions such as `LOG_LEVEL`, pass them as arguments to `cmake` in the same way as with `NUMBER_OF_WORKERS`, using the same diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index 19397c16d..3b1a6d0a0 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -55,13 +55,26 @@ lf_enable_compiler_warnings(reactor-c) if (DEFINED LF_TRACE) include(${LF_ROOT}/trace/api/CMakeLists.txt) - if(NOT LF_TRACE_PLUGIN) - set(LF_TRACE_PLUGIN lf::trace-impl) + target_link_libraries(reactor-c PUBLIC lf::trace-api) + # If the user specified an external trace plugin. Find it and link with it + if (LF_TRACE_PLUGIN) + message(STATUS "Linking trace plugin library ${LF_TRACE_PLUGIN}") + find_library(TRACE_LIB NAMES ${LF_TRACE_PLUGIN} HINTS "${LF_ROOT}") + if (NOT TRACE_LIB) + message(FATAL_ERROR "The trace plugin library ${LF_TRACE_PLUGIN} not found") + endif() + # We also link with libdl because it is needed for some platforms. + # TODO: Figure out why this is the case and how to avoid it. + target_link_libraries(reactor-c PRIVATE ${TRACE_LIB} dl) + else() + # If not, use the default implementation + message(STATUS "Linking with default trace implementation") include(${LF_ROOT}/trace/impl/CMakeLists.txt) + target_link_libraries(reactor-c PRIVATE lf::trace-impl) endif() - message(STATUS "linking trace plugin library ${LF_TRACE_PLUGIN}") - target_link_libraries(reactor-c PUBLIC lf::trace-api) - target_link_libraries(reactor-c PRIVATE "${LF_TRACE_PLUGIN}") +else() + include(${LF_ROOT}/trace/api/types/CMakeLists.txt) + target_link_libraries(reactor-c PUBLIC lf::trace-api-types) endif() include(${LF_ROOT}/version/api/CMakeLists.txt) @@ -109,7 +122,7 @@ if(DEFINED FEDERATED_AUTHENTICATED) target_link_libraries(reactor-c PUBLIC OpenSSL::SSL) endif() -if(DEFINED _LF_CLOCK_SYNC_ON) +if(DEFINED FEDERATED) find_library(MATH_LIBRARY m) if(MATH_LIBRARY) target_link_libraries(reactor-c PUBLIC ${MATH_LIBRARY}) @@ -128,8 +141,9 @@ target_compile_definitions(reactor-c PRIVATE INITIAL_EVENT_QUEUE_SIZE=${INITIAL_ target_compile_definitions(reactor-c PRIVATE INITIAL_REACT_QUEUE_SIZE=${INITIAL_REACT_QUEUE_SIZE}) target_compile_definitions(reactor-c PUBLIC PLATFORM_${CMAKE_SYSTEM_NAME}) -# Macro for translating a command-line argument into compile definition for -# reactor-c lib +# If variable X is defined in cMake (set using SET()) or passed in as a command-line +# argument using -DX=, then make it a compiler flag for reactor-c so that X +# is also defined in the C code for reactor-c. macro(define X) if(DEFINED ${X}) message(STATUS ${X}=${${X}}) @@ -142,9 +156,9 @@ message(STATUS "Applying preprocessor definitions...") define(_LF_CLOCK_SYNC_ATTENUATION) define(_LF_CLOCK_SYNC_COLLECT_STATS) define(_LF_CLOCK_SYNC_EXCHANGES_PER_INTERVAL) -define(_LF_CLOCK_SYNC_INITIAL) -define(_LF_CLOCK_SYNC_ON) +define(LF_CLOCK_SYNC) # 1 for OFF, 2 for INIT and 3 for ON. define(_LF_CLOCK_SYNC_PERIOD_NS) +define(_LF_FEDERATE_NAMES_COMMA_SEPARATED) define(ADVANCE_MESSAGE_INTERVAL) define(EXECUTABLE_PREAMBLE) define(FEDERATED_CENTRALIZED) diff --git a/core/clock.c b/core/clock.c index d5b250fc3..8b10297a7 100644 --- a/core/clock.c +++ b/core/clock.c @@ -8,9 +8,14 @@ #include "clock.h" #include "low_level_platform.h" -#if defined(_LF_CLOCK_SYNC_ON) +// If we are federated, include clock-sync API (and implementation) +#if defined(FEDERATED) #include "clock-sync.h" -#endif +#else +// In the unfederated case, just provide empty implementations. +void clock_sync_add_offset(instant_t* t) { (void)t; } +void clock_sync_subtract_offset(instant_t* t) { (void)t; } +#endif // defined(FEDERATED) static instant_t last_read_physical_time = NEVER; @@ -20,9 +25,7 @@ int lf_clock_gettime(instant_t* now) { if (res != 0) { return -1; } -#if defined(_LF_CLOCK_SYNC_ON) - clock_sync_apply_offset(now); -#endif + clock_sync_add_offset(now); do { // Atomically fetch the last read value. This is done with // atomics to guarantee that it works on 32bit platforms as well. @@ -42,19 +45,15 @@ int lf_clock_gettime(instant_t* now) { } int lf_clock_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup_time) { -#if defined(_LF_CLOCK_SYNC_ON) // Remove any clock sync offset and call the Platform API. - clock_sync_remove_offset(&wakeup_time); -#endif + clock_sync_subtract_offset(&wakeup_time); return _lf_interruptable_sleep_until_locked(env, wakeup_time); } #if !defined(LF_SINGLE_THREADED) int lf_clock_cond_timedwait(lf_cond_t* cond, instant_t wakeup_time) { -#if defined(_LF_CLOCK_SYNC_ON) // Remove any clock sync offset and call the Platform API. - clock_sync_remove_offset(&wakeup_time); -#endif + clock_sync_subtract_offset(&wakeup_time); return _lf_cond_timedwait(cond, wakeup_time); } -#endif +#endif // !defined(LF_SINGLE_THREADED) diff --git a/core/environment.c b/core/environment.c index 4523c4721..d2d56a593 100644 --- a/core/environment.c +++ b/core/environment.c @@ -1,32 +1,11 @@ /** * @file - * @author Erling R. Jellum (erling.r.jellum@ntnu.no) + * @author Erling R. Jellum + * @copyright (c) 2023-2024, The Norwegian University of Science and Technology. + * License: BSD 2-clause * - * @section LICENSE - * Copyright (c) 2023, The Norwegian University of Science and Technology. - * - * Redistribution and use in source and binary forms, with or without modification, - * are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF - * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * @section DESCRIPTION Functions intitializing and freeing memory for environments. - * See environment.h for docs. + * This file defines functions intitializing and freeing memory for environments. + * See environment.h for docs. */ #include "environment.h" diff --git a/core/federated/RTI/CMakeLists.txt b/core/federated/RTI/CMakeLists.txt index 9c4f996d2..5bfbf0196 100644 --- a/core/federated/RTI/CMakeLists.txt +++ b/core/federated/RTI/CMakeLists.txt @@ -1,58 +1,15 @@ -# This is a cmake build script providing a solution for compiling -# the RTI in this directory.. -# -# Usage: -# -# To compile with cmake, run the following commands: -# -# $> mkdir build && cd build -# $> cmake ../ -# $> make -# $> sudo make install -# -# This create a binary RTI in the current working directory. Please put this in -# a directory that is on the path. -# -# To enable DEBUG messages, use the following build commands instead: -# -# $> mkdir build && cd build -# $> cmake -DCMAKE_BUILD_TYPE=DEBUG ../ -# $> make -# $> sudo make install -# -# If you would like to go back to non-DEBUG mode, you would have to remove all -# contents of the `build` folder. - -# To enable simple HMAC-based authentication of federates, -# add `-DAUTH=ON` option to the cmake command as shown below: -# -# $> mkdir build && cd build -# $> cmake -DAUTH=ON ../ -# $> make -# $> sudo make install -# -# If you would like to go back to non-AUTH mode, you would have to remove all -# contents of the `build` folder. - cmake_minimum_required(VERSION 3.12) project(RTI VERSION 1.0.0 LANGUAGES C) set(CoreLib ../../../core) set(LF_ROOT ${CMAKE_CURRENT_LIST_DIR}/../../..) - set(IncludeDir ../../../include/core) -include_directories(../../../include) -include_directories(${IncludeDir}) -include_directories(${IncludeDir}/federated) -include_directories(${IncludeDir}/federated/network) -include_directories(${IncludeDir}/modal_models) -include_directories(${IncludeDir}/utils) - - -# Declare a new executable target and list all its sources -add_executable( - RTI - main.c +set(RTI_LIB rti_lib) +set(RTI_MAIN RTI) + +# Add common RTI functionality to a static library. This is done to simplify +# the building of unit tests. +add_library(${RTI_LIB} STATIC rti_common.c rti_remote.c ${CoreLib}/tracepoint.c @@ -64,6 +21,17 @@ add_executable( ${CoreLib}/utils/pqueue_tag.c ${CoreLib}/utils/pqueue.c ) + +# Add the main target which will link with the library. +add_executable(${RTI_MAIN} main.c) + +target_include_directories(${RTI_LIB} PUBLIC ../../../include) +target_include_directories(${RTI_LIB} PUBLIC ${IncludeDir}) +target_include_directories(${RTI_LIB} PUBLIC ${IncludeDir}/federated) +target_include_directories(${RTI_LIB} PUBLIC ${IncludeDir}/federated/network) +target_include_directories(${RTI_LIB} PUBLIC ${IncludeDir}/modal_models) +target_include_directories(${RTI_LIB} PUBLIC ${IncludeDir}/utils) + if (NOT DEFINED LOG_LEVEL) set(LOG_LEVEL 0) ENDIF(NOT DEFINED LOG_LEVEL) @@ -73,62 +41,79 @@ IF(CMAKE_BUILD_TYPE MATCHES DEBUG) message("-- Building RTI with DEBUG messages enabled") set(LOG_LEVEL 4) ENDIF(CMAKE_BUILD_TYPE MATCHES DEBUG) -target_compile_definitions(RTI PUBLIC LOG_LEVEL=${LOG_LEVEL}) +target_compile_definitions(${RTI_LIB} PUBLIC LOG_LEVEL=${LOG_LEVEL}) include(${LF_ROOT}/version/api/CMakeLists.txt) -target_link_libraries(RTI lf::version-api) +target_link_libraries(${RTI_LIB} PUBLIC lf::version-api) include(${LF_ROOT}/logging/api/CMakeLists.txt) -target_link_libraries(RTI lf::logging-api) +target_link_libraries(${RTI_LIB} PUBLIC lf::logging-api) include(${LF_ROOT}/tag/api/CMakeLists.txt) -target_link_libraries(RTI lf::tag-api) +target_link_libraries(${RTI_LIB} PUBLIC lf::tag-api) include(${LF_ROOT}/platform/api/CMakeLists.txt) -target_link_libraries(RTI lf::platform-api) +target_link_libraries(${RTI_LIB} PUBLIC lf::platform-api) include(${LF_ROOT}/platform/impl/CMakeLists.txt) -target_link_libraries(RTI lf::platform-impl) +target_link_libraries(${RTI_LIB} PUBLIC lf::platform-impl) include(${LF_ROOT}/trace/api/CMakeLists.txt) -target_link_libraries(RTI lf::trace-api) +target_link_libraries(${RTI_LIB} PUBLIC lf::trace-api) include(${LF_ROOT}/trace/impl/CMakeLists.txt) -target_link_libraries(RTI lf::trace-impl) +target_link_libraries(${RTI_LIB} PUBLIC lf::trace-impl) include(${LF_ROOT}/low_level_platform/impl/CMakeLists.txt) -target_link_libraries(RTI lf::low-level-platform-impl) +target_link_libraries(${RTI_LIB} PUBLIC lf::low-level-platform-impl) include(${LF_ROOT}/low_level_platform/api/CMakeLists.txt) -target_link_libraries(RTI lf::low-level-platform-api) +target_link_libraries(${RTI_LIB} PUBLIC lf::low-level-platform-api) # Set the STANDALONE_RTI flag to include the rti_remote and rti_common. -target_compile_definitions(RTI PUBLIC STANDALONE_RTI=1) +target_compile_definitions(${RTI_LIB} PUBLIC STANDALONE_RTI=1) # Set FEDERATED to get federated compilation support -target_compile_definitions(RTI PUBLIC FEDERATED=1) - -target_compile_definitions(RTI PUBLIC PLATFORM_${CMAKE_SYSTEM_NAME}) +target_compile_definitions(${RTI_LIB} PUBLIC FEDERATED=1) +target_compile_definitions(${RTI_LIB} PUBLIC PLATFORM_${CMAKE_SYSTEM_NAME}) # Set RTI Tracing -target_compile_definitions(RTI PUBLIC RTI_TRACE) +target_compile_definitions(${RTI_LIB} PUBLIC RTI_TRACE) # Warnings as errors -target_compile_options(RTI PUBLIC -Werror) +target_compile_options(${RTI_LIB} PUBLIC -Werror) + # Find threads and link to it find_package(Threads REQUIRED) -target_link_libraries(RTI Threads::Threads) +target_link_libraries(${RTI_LIB} PUBLIC Threads::Threads) # Option for enabling federate authentication by RTI. option(AUTH "Federate authentication by RTI enabled." OFF) IF(AUTH MATCHES ON) - add_compile_definitions(__RTI_AUTH__) + target_compile_definitions(${RTI_LIB} PUBLIC __RTI_AUTH__) # Find OpenSSL and link to it find_package(OpenSSL REQUIRED) - target_link_libraries(RTI OpenSSL::SSL) + target_link_libraries(${RTI_LIB} PUBLIC OpenSSL::SSL) ENDIF(AUTH MATCHES ON) +# Link the main target with the library. +target_link_libraries(${RTI_MAIN} PRIVATE ${RTI_LIB}) + install( TARGETS RTI DESTINATION bin ) + +# Build unit tests +enable_testing() +set(TEST_DIR ${CMAKE_CURRENT_SOURCE_DIR}/test) +set(TEST_SRCS + ${TEST_DIR}/rti_common_test.c +) +foreach(TEST_SRC ${TEST_SRCS}) + get_filename_component(TEST_NAME ${TEST_SRC} NAME_WE) + add_executable(${TEST_NAME} ${TEST_SRC}) + add_test(NAME ${TEST_NAME} COMMAND ${TEST_NAME}) + target_link_libraries(${TEST_NAME} PUBLIC ${RTI_LIB}) + target_include_directories(${TEST_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) +endforeach() diff --git a/core/federated/RTI/README.md b/core/federated/RTI/README.md index 916959f6e..cea8581c5 100644 --- a/core/federated/RTI/README.md +++ b/core/federated/RTI/README.md @@ -8,6 +8,11 @@ make sudo make install ``` +To run the unit tests +```bash +make test +``` + **Note:** To enable DEBUG messages, use the following build commands instead: ```bash @@ -47,3 +52,4 @@ docker login -u [username] ``` To authenticate, request a PAT on [DockerHub](https://hub.docker.com/settings/security). + diff --git a/core/federated/RTI/main.c b/core/federated/RTI/main.c index 5726dd775..53ee39f80 100644 --- a/core/federated/RTI/main.c +++ b/core/federated/RTI/main.c @@ -227,7 +227,7 @@ int process_args(int argc, const char* argv[]) { } i++; long num_federates = strtol(argv[i], NULL, 10); - if (num_federates == 0L || num_federates == LONG_MAX || num_federates == LONG_MIN) { + if (num_federates <= 0L || num_federates == LONG_MAX || num_federates == LONG_MIN) { lf_print_error("--number_of_federates needs a valid positive integer argument."); usage(argc, argv); return 0; @@ -289,6 +289,7 @@ int process_args(int argc, const char* argv[]) { return 0; } } +<<<<<<< HEAD if (rti.base.number_of_scheduling_nodes == 0) { lf_print_error("--number_of_federates needs a valid positive integer argument."); usage(argc, argv); @@ -299,6 +300,8 @@ int process_args(int argc, const char* argv[]) { usage(argc, argv); return 0; } +======= +>>>>>>> main return 1; } int main(int argc, const char* argv[]) { @@ -334,7 +337,7 @@ int main(int argc, const char* argv[]) { // sync thread. Add 1 for the thread that responds to erroneous // connections attempted after initialization phase has completed. Add 1 // for the main thread. - lf_tracing_global_init("rti", -1, _lf_number_of_workers * 2 + 3); + lf_tracing_global_init("rti", NULL, -1, _lf_number_of_workers * 2 + 3); lf_print("Tracing the RTI execution in %s file.", rti_trace_file_name); } diff --git a/core/federated/RTI/rti_remote.c b/core/federated/RTI/rti_remote.c index 4cce64d0c..3c5cf9b1f 100644 --- a/core/federated/RTI/rti_remote.c +++ b/core/federated/RTI/rti_remote.c @@ -569,11 +569,10 @@ void handle_timed_message(federate_info_t* sending_federate, unsigned char* buff // issue a TAG before this message has been forwarded. LF_MUTEX_LOCK(&rti_mutex); - // If the destination federate is no longer connected, issue a warning - // and return. + // If the destination federate is no longer connected, issue a warning, + // remove the message from the socket and return. federate_info_t* fed = GET_FED_INFO(federate_id); if (fed->enclave.state == NOT_CONNECTED) { - LF_MUTEX_UNLOCK(&rti_mutex); lf_print_warning("RTI: Destination federate %d is no longer connected. Dropping message.", federate_id); LF_PRINT_LOG("Fed status: next_event " PRINTF_TAG ", " "completed " PRINTF_TAG ", " @@ -584,6 +583,18 @@ void handle_timed_message(federate_info_t* sending_federate, unsigned char* buff fed->enclave.last_granted.time - start_time, fed->enclave.last_granted.microstep, fed->enclave.last_provisionally_granted.time - start_time, fed->enclave.last_provisionally_granted.microstep); + // If the message was larger than the buffer, we must empty out the remainder also. + size_t total_bytes_read = bytes_read; + while (total_bytes_read < total_bytes_to_read) { + bytes_to_read = total_bytes_to_read - total_bytes_read; + if (bytes_to_read > FED_COM_BUFFER_SIZE) { + bytes_to_read = FED_COM_BUFFER_SIZE; + } + read_from_socket_fail_on_error(&sending_federate->socket, bytes_to_read, buffer, NULL, + "RTI failed to clear message chunks."); + total_bytes_read += bytes_to_read; + } + LF_MUTEX_UNLOCK(&rti_mutex); return; } else { if (lf_tag_compare(intended_tag, fed->effective_start_tag) < 0) { @@ -1382,7 +1393,7 @@ void* federate_info_thread_TCP(void* fed) { int read_failed = read_from_socket(my_fed->socket, 1, buffer); if (read_failed) { // Socket is closed - lf_print_warning("RTI: Socket to federate %d is closed. Exiting the thread.", my_fed->enclave.id); + lf_print_error("RTI: Socket to federate %d is closed. Exiting the thread.", my_fed->enclave.id); my_fed->enclave.state = NOT_CONNECTED; my_fed->socket = -1; // FIXME: We need better error handling here, but do not stop execution here. @@ -1513,6 +1524,9 @@ static int32_t receive_and_check_fed_id_message(int* socket_id, struct sockaddr_ // If the connection is a peer-to-peer connection between two // federates, reject the connection with the WRONG_SERVER error. send_reject(socket_id, WRONG_SERVER); + } else if (buffer[0] == MSG_TYPE_FED_NONCE) { + send_reject(socket_id, RTI_NOT_EXECUTED_WITH_AUTH); + lf_print_error("RTI not executed with HMAC authentication option using -a or --auth."); } else { send_reject(socket_id, UNEXPECTED_MESSAGE); } @@ -2348,14 +2362,26 @@ void initialize_RTI(rti_remote_t* rti) { rti_remote->phase = startup_phase; } +// The RTI includes clock.c, which requires the following functions that are defined +// in clock-sync.c. But clock-sync.c is not included in the standalone RTI. +// Provide empty implementations of these functions. +void clock_sync_add_offset(instant_t* t) { (void)t; } +void clock_sync_subtract_offset(instant_t* t) { (void)t; } + void free_scheduling_nodes(scheduling_node_t** scheduling_nodes, uint16_t number_of_scheduling_nodes) { for (uint16_t i = 0; i < number_of_scheduling_nodes; i++) { - // FIXME: Gives error freeing memory not allocated!!!! scheduling_node_t* node = scheduling_nodes[i]; - if (node->upstream != NULL) + if (node->upstream != NULL) { free(node->upstream); - if (node->downstream != NULL) + free(node->upstream_delay); + } + if (node->min_delays != NULL) { + free(node->min_delays); + } + if (node->downstream != NULL) { free(node->downstream); + } + free(node); } free(scheduling_nodes); } diff --git a/test/RTI/rti_common_test.c b/core/federated/RTI/test/rti_common_test.c similarity index 99% rename from test/RTI/rti_common_test.c rename to core/federated/RTI/test/rti_common_test.c index 3d2c73af9..107d08057 100644 --- a/test/RTI/rti_common_test.c +++ b/core/federated/RTI/test/rti_common_test.c @@ -1,4 +1,3 @@ -#if defined STANDALONE_RTI #include #include #include @@ -246,7 +245,7 @@ static void multiple_nodes() { assert(lf_tag_compare(test_rti.scheduling_nodes[3]->min_delays[0].min_delay, (tag_t){NSEC(3), 0}) == 0); } -int main(int argc, char** argv) { +int main() { initialize_rti_common(&test_rti); // Tests for the function update_min_delays_upstream() @@ -257,4 +256,3 @@ int main(int argc, char** argv) { two_nodes_normal_delay(); multiple_nodes(); } -#endif \ No newline at end of file diff --git a/core/federated/clock-sync.c b/core/federated/clock-sync.c index 577d1104a..b18efb650 100644 --- a/core/federated/clock-sync.c +++ b/core/federated/clock-sync.c @@ -85,6 +85,9 @@ static void adjust_lf_clock_sync_offset(interval_t adjustment) { } #ifdef _LF_CLOCK_SYNC_COLLECT_STATS + +#include // For sqrtl() + /** * Update statistic on the socket based on the newly calculated network delay * and clock synchronization error @@ -135,7 +138,7 @@ lf_stat_ll calculate_socket_stat(struct socket_stat_t* socket_stat) { return stats; } -#endif +#endif // _LF_CLOCK_SYNC_COLLECT_STATS /** * Reset statistics on the socket. @@ -158,8 +161,8 @@ void reset_socket_stat(struct socket_stat_t* socket_stat) { * will be sent. */ uint16_t setup_clock_synchronization_with_rti() { - uint16_t port_to_return = UINT16_MAX; -#ifdef _LF_CLOCK_SYNC_ON + uint16_t port_to_return = UINT16_MAX; // Default if clock sync is off. +#if (LF_CLOCK_SYNC >= LF_CLOCK_SYNC_ON) // Initialize the UDP socket _lf_rti_socket_UDP = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); // Initialize the necessary information for the UDP address @@ -198,11 +201,9 @@ uint16_t setup_clock_synchronization_with_rti() { if (setsockopt(_lf_rti_socket_UDP, SOL_SOCKET, SO_SNDTIMEO, (const char*)&timeout_time, sizeof(timeout_time)) < 0) { lf_print_error("Failed to set SO_SNDTIMEO option on the socket: %s.", strerror(errno)); } -#else // No runtime clock synchronization. Send port -1 or 0 instead. -#ifdef _LF_CLOCK_SYNC_INITIAL +#elif (LF_CLOCK_SYNC == LF_CLOCK_SYNC_INIT) port_to_return = 0u; -#endif -#endif // _LF_CLOCK_SYNC_ON +#endif // (LF_CLOCK_SYNC >= LF_CLOCK_SYNC_ON) return port_to_return; } @@ -389,7 +390,7 @@ void handle_T4_clock_sync_message(unsigned char* buffer, int socket, instant_t r #ifdef _LF_CLOCK_SYNC_COLLECT_STATS // Enabled by default // Update RTI's socket stats update_socket_stat(&_lf_rti_socket_stat, network_round_trip_delay, estimated_clock_error); -#endif +#endif // _LF_CLOCK_SYNC_COLLECT_STATS // FIXME: Enable alternative regression mechanism here. LF_PRINT_DEBUG("Clock sync: Adjusting clock offset running average by " PRINTF_TIME ".", @@ -412,13 +413,13 @@ void handle_T4_clock_sync_message(unsigned char* buffer, int socket, instant_t r reset_socket_stat(&_lf_rti_socket_stat); return; } -#endif +#endif // _LF_CLOCK_SYNC_COLLECT_STATS // The number of received T4 messages has reached _LF_CLOCK_SYNC_EXCHANGES_PER_INTERVAL // which means we can now adjust the clock offset. // For the AVG algorithm, history is a running average and can be directly - // applied + // applied. adjust_lf_clock_sync_offset(_lf_rti_socket_stat.history); - // @note AVG and SD will be zero if collect-stats is set to false + // @note AVG and SD will be zero if _LF_CLOCK_SYNC_COLLECT_STATS is set to false LF_PRINT_LOG("Clock sync:" " New offset: " PRINTF_TIME "." " Round trip delay to RTI (now): " PRINTF_TIME "." @@ -527,17 +528,21 @@ void* listen_to_rti_UDP_thread(void* args) { // If clock synchronization is enabled, provide implementations. If not // just empty implementations that should be optimized away. -#if defined(FEDERATED) && defined(_LF_CLOCK_SYNC_ON) -void clock_sync_apply_offset(instant_t* t) { *t += (_lf_clock_sync_offset + _lf_clock_sync_constant_bias); } +#if (LF_CLOCK_SYNC >= LF_CLOCK_SYNC_INIT) +void clock_sync_add_offset(instant_t* t) { + *t = lf_time_add(*t, (_lf_clock_sync_offset + _lf_clock_sync_constant_bias)); +} -void clock_sync_remove_offset(instant_t* t) { *t -= (_lf_clock_sync_offset + _lf_clock_sync_constant_bias); } +void clock_sync_subtract_offset(instant_t* t) { + *t = lf_time_add(*t, -(_lf_clock_sync_offset + _lf_clock_sync_constant_bias)); +} void clock_sync_set_constant_bias(interval_t offset) { _lf_clock_sync_constant_bias = offset; } -#else -void clock_sync_apply_offset(instant_t* t) { (void)t; } -void clock_sync_remove_offset(instant_t* t) { (void)t; } +#else // i.e. (LF_CLOCK_SYNC < LF_CLOCK_SYNC_INIT) +void clock_sync_add_offset(instant_t* t) { (void)t; } +void clock_sync_subtract_offset(instant_t* t) { (void)t; } void clock_sync_set_constant_bias(interval_t offset) { (void)offset; } -#endif +#endif // (LF_CLOCK_SYNC >= LF_CLOCK_SYNC_INIT) /** * Create the thread responsible for handling clock synchronization @@ -548,13 +553,13 @@ void clock_sync_set_constant_bias(interval_t offset) { (void)offset; } * \ingroup agroup */ int create_clock_sync_thread(lf_thread_t* thread_id) { -#ifdef _LF_CLOCK_SYNC_ON +#if (LF_CLOCK_SYNC >= LF_CLOCK_SYNC_ON) // One for UDP messages if clock synchronization is enabled for this federate return lf_thread_create(thread_id, listen_to_rti_UDP_thread, NULL); -#else - (void)thread_id; -#endif // _LF_CLOCK_SYNC_ON +#else // i.e. (LF_CLOCK_SYNC < LF_CLOCK_SYNC_ON) + (void)thread_id; // Suppress unused parameter warning. +#endif // (LF_CLOCK_SYNC >= LF_CLOCK_SYNC_ON) return 0; } -#endif +#endif // FEDERATED diff --git a/core/federated/federate.c b/core/federated/federate.c index 2fa7bee54..214e2af50 100644 --- a/core/federated/federate.c +++ b/core/federated/federate.c @@ -864,6 +864,9 @@ static int perform_hmac_authentication() { if (received[0] == MSG_TYPE_FAILED) { lf_print_error("RTI has failed."); return -1; + } else if (received[0] == MSG_TYPE_REJECT && received[1] == RTI_NOT_EXECUTED_WITH_AUTH) { + lf_print_error("RTI is not executed with HMAC option."); + return -1; } else { lf_print_error("Received unexpected response %u from the RTI (see net_common.h).", received[0]); return -1; @@ -2639,14 +2642,18 @@ void lf_set_federation_id(const char* fid) { federation_metadata.federation_id = void lf_spawn_staa_thread() { lf_thread_create(&_fed.staaSetter, update_ports_from_staa_offsets, NULL); } #endif // FEDERATED_DECENTRALIZED -void lf_stall_advance_level_federation(environment_t* env, size_t level) { - LF_PRINT_DEBUG("Acquiring the environment mutex."); - LF_MUTEX_LOCK(&env->mutex); - LF_PRINT_DEBUG("Waiting on MLAA with next_reaction_level %zu and MLAA %d.", level, max_level_allowed_to_advance); +void lf_stall_advance_level_federation_locked(size_t level) { + LF_PRINT_DEBUG("Waiting for MLAA %d to exceed level %zu.", max_level_allowed_to_advance, level); while (((int)level) >= max_level_allowed_to_advance) { lf_cond_wait(&lf_port_status_changed); }; - LF_PRINT_DEBUG("Exiting wait with MLAA %d and next_reaction_level %zu.", max_level_allowed_to_advance, level); + LF_PRINT_DEBUG("Exiting wait with MLAA %d and level %zu.", max_level_allowed_to_advance, level); +} + +void lf_stall_advance_level_federation(environment_t* env, size_t level) { + LF_PRINT_DEBUG("Acquiring the environment mutex."); + LF_MUTEX_LOCK(&env->mutex); + lf_stall_advance_level_federation_locked(level); LF_MUTEX_UNLOCK(&env->mutex); } diff --git a/core/federated/network/net_util.c b/core/federated/network/net_util.c index 3ab04c5a2..61d4804bd 100644 --- a/core/federated/network/net_util.c +++ b/core/federated/network/net_util.c @@ -95,17 +95,17 @@ int read_from_socket(int socket, size_t num_bytes, unsigned char* buffer) { return -1; } ssize_t bytes_read = 0; - int retry_count = 0; while (bytes_read < (ssize_t)num_bytes) { ssize_t more = read(socket, buffer + bytes_read, num_bytes - (size_t)bytes_read); - if (more < 0 && retry_count++ < NUM_SOCKET_RETRIES && (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)) { + if (more < 0 && (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)) { // Those error codes set by the socket indicates // that we should try again (@see man errno). - lf_print_warning("Reading from socket failed. Will try again."); + LF_PRINT_DEBUG("Reading from socket %d failed with error: `%s`. Will try again.", socket, strerror(errno)); lf_sleep(DELAY_BETWEEN_SOCKET_RETRIES); continue; } else if (more < 0) { // A more serious error occurred. + lf_print_error("Reading from socket %d failed. With error: `%s`", socket, strerror(errno)); return -1; } else if (more == 0) { // EOF received. @@ -143,7 +143,9 @@ void read_from_socket_fail_on_error(int* socket, size_t num_bytes, unsigned char LF_MUTEX_UNLOCK(mutex); } if (format != NULL) { + va_start(args, format); lf_print_error_system_failure(format, args); + va_end(args); } else { lf_print_error_system_failure("Failed to read from socket."); } @@ -171,11 +173,12 @@ int write_to_socket(int socket, size_t num_bytes, unsigned char* buffer) { // The error codes EAGAIN or EWOULDBLOCK indicate // that we should try again (@see man errno). // The error code EINTR means the system call was interrupted before completing. - LF_PRINT_DEBUG("Writing to socket was blocked. Will try again."); + LF_PRINT_DEBUG("Writing to socket %d was blocked. Will try again.", socket); lf_sleep(DELAY_BETWEEN_SOCKET_RETRIES); continue; } else if (more < 0) { // A more serious error occurred. + lf_print_error("Writing to socket %d failed. With error: `%s`", socket, strerror(errno)); return -1; } bytes_written += more; @@ -209,7 +212,9 @@ void write_to_socket_fail_on_error(int* socket, size_t num_bytes, unsigned char* LF_MUTEX_UNLOCK(mutex); } if (format != NULL) { + va_start(args, format); lf_print_error_system_failure(format, args); + va_end(args); } else { lf_print_error("Failed to write to socket. Closing it."); } @@ -515,7 +520,7 @@ bool extract_match_groups(const char* rti_addr, char** rti_addr_strs, bool** rti } void extract_rti_addr_info(const char* rti_addr, rti_addr_info_t* rti_addr_info) { - const char* regex_str = "(([a-zA-Z0-9_-]{1,254})@)?([a-zA-Z0-9.]{1,255})(:([0-9]{1,5}))?"; + const char* regex_str = "(([a-zA-Z0-9_-]{1,254})@)?([a-zA-Z0-9._-]{1,255})(:([0-9]{1,5}))?"; size_t max_groups = 6; // The group indices of each field of interest in the regex. int user_gid = 2, host_gid = 3, port_gid = 5; diff --git a/core/modal_models/modes.c b/core/modal_models/modes.c index 922d02f9e..42c35a65f 100644 --- a/core/modal_models/modes.c +++ b/core/modal_models/modes.c @@ -507,7 +507,7 @@ void _lf_process_mode_changes(environment_t* env, reactor_mode_state_t* states[] if (env->modes->triggered_reactions_request) { // Insert a dummy event in the event queue for the next microstep to make // sure startup/reset reactions (if any) are triggered as soon as possible. - tag_t dummy_event_tag = (tag_t){.time = env->current_tag.time, .microstep = 1}; + tag_t dummy_event_tag = (tag_t){.time = env->current_tag.time, .microstep = env->current_tag.microstep + 1}; pqueue_tag_insert(env->event_q, (pqueue_tag_element_t*)_lf_create_dummy_events(env, dummy_event_tag)); } } diff --git a/core/reactor.c b/core/reactor.c index 00df9e07f..6e2fd24a1 100644 --- a/core/reactor.c +++ b/core/reactor.c @@ -18,8 +18,8 @@ #include "reactor_common.h" #include "environment.h" -// Embedded platforms with no TTY shouldnt have signals -#if !defined(NO_TTY) +// Embedded platforms with no command line interface shouldnt have signals +#if !defined(NO_CLI) #include // To trap ctrl-c and invoke termination(). #endif @@ -164,6 +164,7 @@ int _lf_do_step(environment_t* env) { // Deadline violation has occurred. violation = true; // Invoke the local handler, if there is one. + tracepoint_reaction_starts(env, reaction, 0); reaction_function_t handler = reaction->deadline_violation_handler; if (handler != NULL) { (*handler)(reaction->self); @@ -171,6 +172,7 @@ int _lf_do_step(environment_t* env) { // triggered reactions into the queue. schedule_output_reactions(env, reaction, 0); } + tracepoint_reaction_ends(env, reaction, 0); } } @@ -286,12 +288,6 @@ void lf_request_stop(void) { lf_set_stop_tag(env, new_stop_tag); } -/** - * Return false. - * @param reaction The reaction. - */ -bool _lf_is_blocked_by_executing_reaction(void) { return false; } - /** * The main loop of the LF program. * @@ -319,8 +315,8 @@ int lf_reactor_c_main(int argc, const char* argv[]) { // The above handles only "normal" termination (via a call to exit). // As a consequence, we need to also trap Ctrl-C, which issues a SIGINT, // and cause it to call exit. - // Embedded platforms with NO_TTY have no concept of a signal; for those, we exclude this call. -#ifndef NO_TTY + // Embedded platforms with NO_CLI have no concept of a signal; for those, we exclude this call. +#ifndef NO_CLI signal(SIGINT, exit); #endif // Create and initialize the environment diff --git a/core/reactor_common.c b/core/reactor_common.c index 33e5582f5..55d3342ca 100644 --- a/core/reactor_common.c +++ b/core/reactor_common.c @@ -824,6 +824,7 @@ void schedule_output_reactions(environment_t* env, reaction_t* reaction, int wor violation = true; // Invoke the local handler, if there is one. reaction_function_t handler = downstream_to_execute_now->deadline_violation_handler; + tracepoint_reaction_starts(env, downstream_to_execute_now, worker); if (handler != NULL) { // Assume the mutex is still not held. (*handler)(downstream_to_execute_now->self); @@ -832,6 +833,7 @@ void schedule_output_reactions(environment_t* env, reaction_t* reaction, int wor // triggered reactions into the queue or execute them directly if possible. schedule_output_reactions(env, downstream_to_execute_now, worker); } + tracepoint_reaction_ends(env, downstream_to_execute_now, worker); } } if (!violation) { @@ -852,7 +854,7 @@ void schedule_output_reactions(environment_t* env, reaction_t* reaction, int wor /** * Print a usage message. - * TODO: This is not necessary for NO_TTY + * TODO: This is not necessary for NO_CLI */ void usage(int argc, const char* argv[]) { printf("\nCommand-line arguments: \n\n"); @@ -890,7 +892,7 @@ const char** default_argv = NULL; * Process the command-line arguments. If the command line arguments are not * understood, then print a usage message and return 0. Otherwise, return 1. * @return 1 if the arguments processed successfully, 0 otherwise. - * TODO: Not necessary for NO_TTY + * TODO: Not necessary for NO_CLI */ int process_args(int argc, const char* argv[]) { int i = 1; @@ -1038,26 +1040,26 @@ int process_args(int argc, const char* argv[]) { * core runtime. */ #ifdef LF_TRACE -static void check_version(version_t version) { +static void check_version(const version_t* version) { #ifdef LF_SINGLE_THREADED - LF_ASSERT(version.build_config.single_threaded == TRIBOOL_TRUE || - version.build_config.single_threaded == TRIBOOL_DOES_NOT_MATTER, + LF_ASSERT(version->build_config.single_threaded == TRIBOOL_TRUE || + version->build_config.single_threaded == TRIBOOL_DOES_NOT_MATTER, "expected single-threaded version"); #else - LF_ASSERT(version.build_config.single_threaded == TRIBOOL_FALSE || - version.build_config.single_threaded == TRIBOOL_DOES_NOT_MATTER, + LF_ASSERT(version->build_config.single_threaded == TRIBOOL_FALSE || + version->build_config.single_threaded == TRIBOOL_DOES_NOT_MATTER, "expected multi-threaded version"); #endif #ifdef NDEBUG - LF_ASSERT(version.build_config.build_type_is_debug == TRIBOOL_FALSE || - version.build_config.build_type_is_debug == TRIBOOL_DOES_NOT_MATTER, + LF_ASSERT(version->build_config.build_type_is_debug == TRIBOOL_FALSE || + version->build_config.build_type_is_debug == TRIBOOL_DOES_NOT_MATTER, "expected release version"); #else - LF_ASSERT(version.build_config.build_type_is_debug == TRIBOOL_TRUE || - version.build_config.build_type_is_debug == TRIBOOL_DOES_NOT_MATTER, + LF_ASSERT(version->build_config.build_type_is_debug == TRIBOOL_TRUE || + version->build_config.build_type_is_debug == TRIBOOL_DOES_NOT_MATTER, "expected debug version"); #endif - LF_ASSERT(version.build_config.log_level == LOG_LEVEL || version.build_config.log_level == INT_MAX, + LF_ASSERT(version->build_config.log_level == LOG_LEVEL || version->build_config.log_level == INT_MAX, "expected log level %d", LOG_LEVEL); // assert(!version.core_version_name || strcmp(version.core_version_name, CORE_SHA) == 0); // TODO: provide CORE_SHA } @@ -1079,14 +1081,15 @@ void initialize_global(void) { int num_envs = _lf_get_environments(&envs); int max_threads_tracing = envs[0].num_workers * num_envs + 1; // add 1 for the main thread #endif + #if defined(FEDERATED) // NUMBER_OF_FEDERATES is an upper bound on the number of upstream federates // -- threads are spawned to listen to upstream federates. Add 1 for the // clock sync thread and add 1 for the staa thread max_threads_tracing += NUMBER_OF_FEDERATES + 2; - lf_tracing_global_init("federate__", FEDERATE_ID, max_threads_tracing); + lf_tracing_global_init(envs[0].name, _LF_FEDERATE_NAMES_COMMA_SEPARATED, FEDERATE_ID, max_threads_tracing); #else - lf_tracing_global_init("trace_", 0, max_threads_tracing); + lf_tracing_global_init("main", NULL, 0, max_threads_tracing); #endif // Call the code-generated function to initialize all actions, timers, and ports // This is done for all environments/enclaves at the same time. @@ -1163,6 +1166,7 @@ void termination(void) { } } } + lf_tracing_global_shutdown(); // Skip most cleanup on abnormal termination. if (_lf_normal_termination) { _lf_free_all_tokens(); // Must be done before freeing reactors. @@ -1195,7 +1199,6 @@ void termination(void) { free_local_rti(); #endif } - lf_tracing_global_shutdown(); } index_t lf_combine_deadline_and_level(interval_t deadline, int level) { diff --git a/core/tag.c b/core/tag.c index 695bf05db..38419a8e1 100644 --- a/core/tag.c +++ b/core/tag.c @@ -45,20 +45,46 @@ tag_t lf_tag(void* env) { return ((environment_t*)env)->current_tag; } +instant_t lf_time_add(instant_t a, interval_t b) { + if (a == NEVER || b == NEVER) { + return NEVER; + } + if (a == FOREVER || b == FOREVER) { + return FOREVER; + } + instant_t res = a + b; + // Check for overflow + if (res < a && b > 0) { + return FOREVER; + } + // Check for underflow + if (res > a && b < 0) { + return NEVER; + } + return res; +} + tag_t lf_tag_add(tag_t a, tag_t b) { - if (a.time == NEVER || b.time == NEVER) - return NEVER_TAG; - if (a.time == FOREVER || b.time == FOREVER) + instant_t res = lf_time_add(a.time, b.time); + if (res == FOREVER) { return FOREVER_TAG; - if (b.time > 0) + } + if (res == NEVER) { + return NEVER_TAG; + } + + if (b.time > 0) { + // NOTE: The reason for handling this case is to "reset" the microstep counter at each after delay. a.microstep = 0; // Ignore microstep of first arg if time of second is > 0. - tag_t result = {.time = a.time + b.time, .microstep = a.microstep + b.microstep}; - if (result.microstep < a.microstep) - return FOREVER_TAG; - if (result.time < a.time && b.time > 0) + } + tag_t result = {.time = res, .microstep = a.microstep + b.microstep}; + + // If microsteps overflows + // FIXME: What should be the resulting tag in case of microstep overflow. + // see https://github.com/lf-lang/reactor-c/issues/430 + if (result.microstep < a.microstep) { return FOREVER_TAG; - if (result.time > a.time && b.time < 0) - return NEVER_TAG; + } return result; } diff --git a/core/threaded/reactor_threaded.c b/core/threaded/reactor_threaded.c index 5aa38170a..77efed099 100644 --- a/core/threaded/reactor_threaded.c +++ b/core/threaded/reactor_threaded.c @@ -1,8 +1,8 @@ /** * @file - * @author Edward A. Lee (eal@berkeley.edu) - * @author{Marten Lohstroh } - * @author{Soroush Bateni } + * @author Edward A. Lee + * @author Marten Lohstroh + * @author Soroush Bateni * @copyright (c) 2020-2024, The University of California at Berkeley. * License: BSD 2-clause * @brief Runtime infrastructure for the threaded version of the C target of Lingua Franca. @@ -731,6 +731,7 @@ bool _lf_worker_handle_deadline_violation_for_reaction(environment_t* env, int w tracepoint_reaction_deadline_missed(env, reaction, worker_number); violation_occurred = true; // Invoke the local handler, if there is one. + tracepoint_reaction_starts(env, reaction, worker_number); reaction_function_t handler = reaction->deadline_violation_handler; if (handler != NULL) { LF_PRINT_LOG("Worker %d: Deadline violation. Invoking deadline handler.", worker_number); @@ -741,6 +742,7 @@ bool _lf_worker_handle_deadline_violation_for_reaction(environment_t* env, int w schedule_output_reactions(env, reaction, worker_number); // Remove the reaction from the executing queue. } + tracepoint_reaction_ends(env, reaction, worker_number); } } return violation_occurred; @@ -852,19 +854,13 @@ void _lf_worker_invoke_reaction(environment_t* env, int worker_number, reaction_ reaction->is_STP_violated = false; } -void try_advance_level(environment_t* env, volatile size_t* next_reaction_level) { -#ifdef FEDERATED - lf_stall_advance_level_federation(env, *next_reaction_level); -#else - (void)env; -#endif - if (*next_reaction_level < SIZE_MAX) - *next_reaction_level += 1; -} - /** - * The main looping logic of each LF worker thread. - * This function assumes the caller holds the mutex lock. + * @brief The main looping logic of each LF worker thread. + * + * This function returns when the scheduler's lf_sched_get_ready_reaction() + * implementation returns NULL, indicating that there are no more reactions to execute. + * + * This function assumes the caller does not hold the mutex lock on the environment. * * @param env Environment within which we are executing. * @param worker_number The number assigned to this worker thread @@ -884,10 +880,9 @@ void _lf_worker_do_work(environment_t* env, int worker_number) { while ((current_reaction_to_execute = lf_sched_get_ready_reaction(env->scheduler, worker_number)) != NULL) { // Got a reaction that is ready to run. LF_PRINT_DEBUG("Worker %d: Got from scheduler reaction %s: " - "level: %lld, is input reaction: %d, chain ID: %llu, and deadline " PRINTF_TIME ".", + "level: %lld, is input reaction: %d, and deadline " PRINTF_TIME ".", worker_number, current_reaction_to_execute->name, LF_LEVEL(current_reaction_to_execute->index), - current_reaction_to_execute->is_an_input_reaction, current_reaction_to_execute->chain_id, - current_reaction_to_execute->deadline); + current_reaction_to_execute->is_an_input_reaction, current_reaction_to_execute->deadline); bool violation = _lf_worker_handle_violations(env, worker_number, current_reaction_to_execute); @@ -984,18 +979,6 @@ void lf_print_snapshot(environment_t* env) { } #endif // NDEBUG -// Start threads in the thread pool. -void start_threads(environment_t* env) { - assert(env != GLOBAL_ENVIRONMENT); - - LF_PRINT_LOG("Starting %u worker threads in environment", env->num_workers); - for (int i = 0; i < env->num_workers; i++) { - if (lf_thread_create(&env->thread_ids[i], worker, env) != 0) { - lf_print_error_and_exit("Could not start thread-%u", i); - } - } -} - /** * @brief Determine the number of workers. */ @@ -1074,8 +1057,13 @@ int lf_reactor_c_main(int argc, const char* argv[]) { LF_PRINT_DEBUG("Start time: " PRINTF_TIME "ns", start_time); struct timespec physical_time_timespec = {start_time / BILLION, start_time % BILLION}; + +#ifdef MINIMAL_STDLIB + lf_print("---- Start execution ----"); +#else lf_print("---- Start execution at time %s---- plus %ld nanoseconds", ctime(&physical_time_timespec.tv_sec), physical_time_timespec.tv_nsec); +#endif // MINIMAL_STDLIB // Create and initialize the environments for each enclave lf_create_environments(); @@ -1121,23 +1109,49 @@ int lf_reactor_c_main(int argc, const char* argv[]) { _lf_initialize_start_tag(env); lf_print("Environment %u: ---- Spawning %d workers.", env->id, env->num_workers); - start_threads(env); + + for (int j = 0; j < env->num_workers; j++) { + if (i == 0 && j == 0) { + // The first worker thread of the first environment will be + // run on the main thread, rather than creating a new thread. + // This is important for bare-metal platforms, who can't + // afford to have the main thread sit idle. + env->thread_ids[j] = lf_thread_self(); + continue; + } + if (lf_thread_create(&env->thread_ids[j], worker, env) != 0) { + lf_print_error_and_exit("Could not start thread-%u", j); + } + } + // Unlock mutex and allow threads proceed LF_MUTEX_UNLOCK(&env->mutex); } + // main thread worker (first worker thread of first environment) + void* main_thread_exit_status = NULL; + if (num_envs > 0 && envs[0].num_workers > 0) { + environment_t* env = &envs[0]; + main_thread_exit_status = worker(env); + } + for (int i = 0; i < num_envs; i++) { // Wait for the worker threads to exit. environment_t* env = &envs[i]; void* worker_thread_exit_status = NULL; int ret = 0; - for (int i = 0; i < env->num_workers; i++) { - int failure = lf_thread_join(env->thread_ids[i], &worker_thread_exit_status); - if (failure) { - lf_print_error("Failed to join thread listening for incoming messages: %s", strerror(failure)); + for (int j = 0; j < env->num_workers; j++) { + if (i == 0 && j == 0) { + // main thread worker + worker_thread_exit_status = main_thread_exit_status; + } else { + int failure = lf_thread_join(env->thread_ids[j], &worker_thread_exit_status); + if (failure) { + lf_print_error("Failed to join thread listening for incoming messages: %s", strerror(failure)); + } } if (worker_thread_exit_status != NULL) { - lf_print_error("---- Worker %d reports error code %p", i, worker_thread_exit_status); + lf_print_error("---- Worker %d reports error code %p", j, worker_thread_exit_status); ret = 1; } } diff --git a/core/threaded/scheduler_GEDF_NP.c b/core/threaded/scheduler_GEDF_NP.c index d590adecb..e77257209 100644 --- a/core/threaded/scheduler_GEDF_NP.c +++ b/core/threaded/scheduler_GEDF_NP.c @@ -1,12 +1,20 @@ /** * @file - * @author{Soroush Bateni } - * @author{Edward A. Lee } - * @author{Marten Lohstroh } + * @author Soroush Bateni + * @author Edward A. Lee + * @author Marten Lohstroh * @copyright (c) 2020-2024, The University of California at Berkeley. * License: BSD 2-clause * @brief Global Earliest Deadline First (GEDF) non-preemptive scheduler for the * threaded runtime of the C target of Lingua Franca. + * + * At each tag, this scheduler prioritizes reactions with the smallest (inferred) deadline. + * An inferred deadline for reaction _R_ is either an explicitly declared deadline or the declared deadline of + * a reaction that depends on _R_. This scheduler is non-preemptive, meaning that once a worker thread starts + * executing a reaction, it will execute that reaction to completion. The underlying thread scheduler, of + * course, could preempt the execution in favor of some other worker thread. + * This scheduler does not take into account execution times of reactions. + * Moreover, it does not prioritize reactions across distinct tags. */ #include "lf_types.h" @@ -25,153 +33,79 @@ #include "scheduler_instance.h" #include "scheduler_sync_tag_advance.h" #include "scheduler.h" -#include "lf_semaphore.h" #include "tracepoint.h" #include "util.h" -/////////////////// Scheduler Private API ///////////////////////// -/** - * @brief Insert 'reaction' into scheduler->triggered_reactions - * at the appropriate level. - * - * @param reaction The reaction to insert. - */ -static inline void _lf_sched_insert_reaction(lf_scheduler_t* scheduler, reaction_t* reaction) { - size_t reaction_level = LF_LEVEL(reaction->index); - LF_PRINT_DEBUG("Scheduler: Trying to lock the mutex for level %zu.", reaction_level); - LF_MUTEX_LOCK(&scheduler->array_of_mutexes[reaction_level]); - LF_PRINT_DEBUG("Scheduler: Locked the mutex for level %zu.", reaction_level); - pqueue_insert(((pqueue_t**)scheduler->triggered_reactions)[reaction_level], (void*)reaction); - LF_MUTEX_UNLOCK(&scheduler->array_of_mutexes[reaction_level]); -} +#ifdef FEDERATED +#include "federate.h" +#endif -/** - * @brief Distribute any reaction that is ready to execute to idle worker - * thread(s). - * - * @return Number of reactions that were successfully distributed to worker - * threads. - */ -int _lf_sched_distribute_ready_reactions(lf_scheduler_t* scheduler) { - pqueue_t* tmp_queue = NULL; - // Note: All the threads are idle, which means that they are done inserting - // reactions. Therefore, the reaction queues can be accessed without locking - // a mutex. - - while (scheduler->next_reaction_level <= scheduler->max_reaction_level) { - LF_PRINT_DEBUG("Waiting with curr_reaction_level %zu.", scheduler->next_reaction_level); - try_advance_level(scheduler->env, &scheduler->next_reaction_level); - - tmp_queue = ((pqueue_t**)scheduler->triggered_reactions)[scheduler->next_reaction_level - 1]; - size_t reactions_to_execute = pqueue_size(tmp_queue); - - if (reactions_to_execute) { - scheduler->executing_reactions = tmp_queue; - return reactions_to_execute; - } - } - - return 0; -} +// Data specific to the GEDF scheduler. +typedef struct custom_scheduler_data_t { + pqueue_t* reaction_q; + lf_cond_t reaction_q_changed; + size_t current_level; + bool solo_holds_mutex; // Indicates sole thread holds the mutex. +} custom_scheduler_data_t; -/** - * @brief If there is work to be done, notify workers individually. - * - * This assumes that the caller is not holding any thread mutexes. - */ -void _lf_sched_notify_workers(lf_scheduler_t* scheduler) { - // Note: All threads are idle. Therefore, there is no need to lock the mutex - // while accessing the executing queue (which is pointing to one of the - // reaction queues). - size_t workers_to_awaken = - LF_MIN(scheduler->number_of_idle_workers, pqueue_size((pqueue_t*)scheduler->executing_reactions)); - LF_PRINT_DEBUG("Scheduler: Notifying %zu workers.", workers_to_awaken); - scheduler->number_of_idle_workers -= workers_to_awaken; - LF_PRINT_DEBUG("Scheduler: New number of idle workers: %zu.", scheduler->number_of_idle_workers); - if (workers_to_awaken > 1) { - // Notify all the workers except the worker thread that has called this - // function. - lf_semaphore_release(scheduler->semaphore, (workers_to_awaken - 1)); - } -} +/////////////////// Scheduler Private API ///////////////////////// /** - * @brief Signal all worker threads that it is time to stop. - * + * @brief Mark the calling thread idle and wait for notification of change to the reaction queue. + * @param scheduler The scheduler. + * @param worker_number The number of the worker thread. */ -void _lf_sched_signal_stop(lf_scheduler_t* scheduler) { - scheduler->should_stop = true; - lf_semaphore_release(scheduler->semaphore, (scheduler->number_of_workers - 1)); +inline static void wait_for_reaction_queue_updates(lf_scheduler_t* scheduler, int worker_number) { + scheduler->number_of_idle_workers++; + tracepoint_worker_wait_starts(scheduler->env, worker_number); + LF_COND_WAIT(&scheduler->custom_data->reaction_q_changed); + tracepoint_worker_wait_ends(scheduler->env, worker_number); + scheduler->number_of_idle_workers--; } /** - * @brief Advance tag or distribute reactions to worker threads. - * - * Advance tag if there are no reactions on the reaction queue. If - * there are such reactions, distribute them to worker threads. - * - * This function assumes the caller does not hold the 'mutex' lock. + * @brief Assuming this is the last worker to go idle, advance the tag. + * @param scheduler The scheduler. + * @return Non-zero if the stop tag has been reached. */ -void _lf_scheduler_try_advance_tag_and_distribute(lf_scheduler_t* scheduler) { - environment_t* env = scheduler->env; - - // Executing queue must be empty when this is called. - assert(pqueue_size((pqueue_t*)scheduler->executing_reactions) == 0); - - // Loop until it's time to stop or work has been distributed - while (true) { - if (scheduler->next_reaction_level == (scheduler->max_reaction_level + 1)) { - scheduler->next_reaction_level = 0; - LF_MUTEX_LOCK(&env->mutex); - // Nothing more happening at this tag. - LF_PRINT_DEBUG("Scheduler: Advancing tag."); - // This worker thread will take charge of advancing tag. - if (_lf_sched_advance_tag_locked(scheduler)) { - LF_PRINT_DEBUG("Scheduler: Reached stop tag."); - _lf_sched_signal_stop(scheduler); - LF_MUTEX_UNLOCK(&env->mutex); - break; - } - LF_MUTEX_UNLOCK(&env->mutex); - } - - if (_lf_sched_distribute_ready_reactions(scheduler) > 0) { - _lf_sched_notify_workers(scheduler); - break; - } +static int advance_tag(lf_scheduler_t* scheduler) { + // Set a flag in the scheduler that the lock is held by the sole executing thread. + // This prevents acquiring the mutex in lf_scheduler_trigger_reaction. + scheduler->custom_data->solo_holds_mutex = true; + if (_lf_sched_advance_tag_locked(scheduler)) { + LF_PRINT_DEBUG("Scheduler: Reached stop tag."); + scheduler->should_stop = true; + scheduler->custom_data->solo_holds_mutex = false; + // Notify all threads that the stop tag has been reached. + LF_COND_BROADCAST(&scheduler->custom_data->reaction_q_changed); + return 1; } + scheduler->custom_data->solo_holds_mutex = false; + // Reset the level to 0. + scheduler->custom_data->current_level = 0; +#ifdef FEDERATED + // In case there are blocking network input reactions at this level, stall. + lf_stall_advance_level_federation_locked(scheduler->custom_data->current_level); +#endif + return 0; } /** - * @brief Wait until the scheduler assigns work. - * - * If the calling worker thread is the last to become idle, it will call on the - * scheduler to distribute work. Otherwise, it will wait on - * 'scheduler->semaphore'. - * - * @param worker_number The worker number of the worker thread asking for work - * to be assigned to it. + * @brief Assuming all other workers are idle, advance to the next level. + * @param scheduler The scheduler. */ -void _lf_sched_wait_for_work(lf_scheduler_t* scheduler, size_t worker_number) { - // Increment the number of idle workers by 1 and check if this is the last - // worker thread to become idle. - if (((size_t)lf_atomic_add_fetch32((int32_t*)&scheduler->number_of_idle_workers, 1)) == - scheduler->number_of_workers) { - // Last thread to go idle - LF_PRINT_DEBUG("Scheduler: Worker %zu is the last idle thread.", worker_number); - // Call on the scheduler to distribute work or advance tag. - _lf_scheduler_try_advance_tag_and_distribute(scheduler); - } else { - // Not the last thread to become idle. - // Wait for work to be released. - LF_PRINT_DEBUG("Scheduler: Worker %zu is trying to acquire the scheduling " - "semaphore.", - worker_number); - lf_semaphore_acquire(scheduler->semaphore); - LF_PRINT_DEBUG("Scheduler: Worker %zu acquired the scheduling semaphore.", worker_number); +static void advance_level(lf_scheduler_t* scheduler) { + if (++scheduler->custom_data->current_level > scheduler->max_reaction_level) { + // Since the reaction queue is not empty, we must be cycling back to level 0 due to deadlines + // having been given precedence over levels. Reset the current level to 1. + scheduler->custom_data->current_level = 0; } + LF_PRINT_DEBUG("Scheduler: Advancing to next reaction level %zu.", scheduler->custom_data->current_level); +#ifdef FEDERATED + // In case there are blocking network input reactions at this level, stall. + lf_stall_advance_level_federation_locked(scheduler->custom_data->current_level); +#endif } - ///////////////////// Scheduler Init and Destroy API ///////////////////////// /** * @brief Initialize the scheduler. @@ -195,26 +129,17 @@ void lf_sched_init(environment_t* env, size_t number_of_workers, sched_params_t* } lf_scheduler_t* scheduler = env->scheduler; - scheduler->triggered_reactions = calloc((scheduler->max_reaction_level + 1), sizeof(pqueue_t*)); - - scheduler->array_of_mutexes = (lf_mutex_t*)calloc((scheduler->max_reaction_level + 1), sizeof(lf_mutex_t)); + scheduler->custom_data = (custom_scheduler_data_t*)calloc(1, sizeof(custom_scheduler_data_t)); + // Initialize the reaction queue. size_t queue_size = INITIAL_REACT_QUEUE_SIZE; - for (size_t i = 0; i <= scheduler->max_reaction_level; i++) { - if (params != NULL) { - if (params->num_reactions_per_level != NULL) { - queue_size = params->num_reactions_per_level[i]; - } - } - // Initialize the reaction queues - ((pqueue_t**)scheduler->triggered_reactions)[i] = - pqueue_init(queue_size, in_reverse_order, get_reaction_index, get_reaction_position, set_reaction_position, - reaction_matches, print_reaction); - // Initialize the mutexes for the reaction queues - LF_MUTEX_INIT(&scheduler->array_of_mutexes[i]); - } + scheduler->custom_data->reaction_q = + pqueue_init(queue_size, in_reverse_order, get_reaction_index, get_reaction_position, set_reaction_position, + reaction_matches, print_reaction); + + LF_COND_INIT(&scheduler->custom_data->reaction_q_changed, &env->mutex); - scheduler->executing_reactions = ((pqueue_t**)scheduler->triggered_reactions)[0]; + scheduler->custom_data->current_level = 0; } /** @@ -223,91 +148,118 @@ void lf_sched_init(environment_t* env, size_t number_of_workers, sched_params_t* * This must be called when the scheduler is no longer needed. */ void lf_sched_free(lf_scheduler_t* scheduler) { - // for (size_t j = 0; j <= scheduler->max_reaction_level; j++) { - // pqueue_free(scheduler->triggered_reactions[j]); - // FIXME: This is causing weird memory errors. - // } - pqueue_free((pqueue_t*)scheduler->executing_reactions); - lf_semaphore_destroy(scheduler->semaphore); + pqueue_free((pqueue_t*)scheduler->custom_data->reaction_q); + free(scheduler->custom_data); } ///////////////////// Scheduler Worker API (public) ///////////////////////// -/** - * @brief Ask the scheduler for one more reaction. - * - * This function blocks until it can return a ready reaction for worker thread - * 'worker_number' or it is time for the worker thread to stop and exit (where a - * NULL value would be returned). - * - * @param worker_number - * @return reaction_t* A reaction for the worker to execute. NULL if the calling - * worker thread should exit. - */ + reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_number) { - // Iterate until the stop_tag is reached or reaction queue is empty - while (!scheduler->should_stop) { - // Need to lock the mutex for the current level - size_t current_level = scheduler->next_reaction_level - 1; - LF_PRINT_DEBUG("Scheduler: Worker %d trying to lock the mutex for level %zu.", worker_number, current_level); - LF_MUTEX_LOCK(&scheduler->array_of_mutexes[current_level]); - LF_PRINT_DEBUG("Scheduler: Worker %d locked the mutex for level %zu.", worker_number, current_level); - reaction_t* reaction_to_return = (reaction_t*)pqueue_pop((pqueue_t*)scheduler->executing_reactions); - LF_MUTEX_UNLOCK(&scheduler->array_of_mutexes[current_level]); + // Need to lock the environment mutex. + LF_PRINT_DEBUG("Scheduler: Worker %d locking environment mutex.", worker_number); + LF_MUTEX_LOCK(&scheduler->env->mutex); + LF_PRINT_DEBUG("Scheduler: Worker %d locked environment mutex.", worker_number); + // Iterate until the stop_tag is reached or the event queue is empty. + while (!scheduler->should_stop) { + reaction_t* reaction_to_return = (reaction_t*)pqueue_peek(scheduler->custom_data->reaction_q); if (reaction_to_return != NULL) { - // Got a reaction - return reaction_to_return; - } + // Found a reaction. Check the level. Notice that because of deadlines, the current level + // may advance to the maximum and then back down to 0. + if (LF_LEVEL(reaction_to_return->index) == scheduler->custom_data->current_level) { + // Found a reaction at the current level. + LF_PRINT_DEBUG("Scheduler: Worker %d found a reaction at level %zu.", worker_number, + scheduler->custom_data->current_level); + // Remove the reaction from the queue. + pqueue_pop(scheduler->custom_data->reaction_q); - LF_PRINT_DEBUG("Worker %d is out of ready reactions.", worker_number); + // If there is another reaction at the current level and an idle thread, then + // notify an idle thread. + reaction_t* next_reaction = (reaction_t*)pqueue_peek(scheduler->custom_data->reaction_q); + if (next_reaction != NULL && LF_LEVEL(next_reaction->index) == scheduler->custom_data->current_level && + scheduler->number_of_idle_workers > 0) { + // Notify an idle thread. Note that we could do a broadcast here, but it's probably not + // a good idea because all workers awakened need to acquire the same mutex to examine the + // reaction queue. Only one of them will acquire the mutex, and that worker can check whether + // there are further reactions on the same level that warrant waking another worker thread. + // So we opt to wake one other worker here rather than broadcasting. + LF_COND_SIGNAL(&scheduler->custom_data->reaction_q_changed); + } + LF_MUTEX_UNLOCK(&scheduler->env->mutex); + return reaction_to_return; + } else { + // Found a reaction at a level other than the current level. + LF_PRINT_DEBUG("Scheduler: Worker %d found a reaction at level %lld. Current level is %zu", worker_number, + LF_LEVEL(reaction_to_return->index), scheduler->custom_data->current_level); + // We need to wait to advance to the next level or get a new reaction at the current level. + if (scheduler->number_of_idle_workers == scheduler->number_of_workers - 1) { + // All other workers are idle. Advance to the next level. + advance_level(scheduler); + } else { + // Some workers are still working on reactions on the current level. + // Wait for them to finish. + wait_for_reaction_queue_updates(scheduler, worker_number); + } + } + } else { + // The reaction queue is empty. + LF_PRINT_DEBUG("Worker %d finds nothing on the reaction queue.", worker_number); - // Ask the scheduler for more work and wait - tracepoint_worker_wait_starts(scheduler->env, worker_number); - _lf_sched_wait_for_work(scheduler, worker_number); - tracepoint_worker_wait_ends(scheduler->env, worker_number); + // If all other workers are idle, then we are done with this tag. + if (scheduler->number_of_idle_workers == scheduler->number_of_workers - 1) { + // Last thread to go idle + LF_PRINT_DEBUG("Scheduler: Worker %d is advancing the tag.", worker_number); + if (advance_tag(scheduler)) { + // Stop tag has been reached. + break; + } + } else { + // Some other workers are still working on reactions on the current level. + // Wait for them to finish. + wait_for_reaction_queue_updates(scheduler, worker_number); + } + } } // It's time for the worker thread to stop and exit. + LF_MUTEX_UNLOCK(&scheduler->env->mutex); return NULL; } -/** - * @brief Inform the scheduler that worker thread 'worker_number' is done - * executing the 'done_reaction'. - * - * @param worker_number The worker number for the worker thread that has - * finished executing 'done_reaction'. - * @param done_reaction The reaction that is done. - */ void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction) { - (void)worker_number; + (void)worker_number; // Suppress unused parameter warning. if (!lf_atomic_bool_compare_and_swap32((int32_t*)&done_reaction->status, queued, inactive)) { lf_print_error_and_exit("Unexpected reaction status: %d. Expected %d.", done_reaction->status, queued); } } -/** - * @brief Inform the scheduler that worker thread 'worker_number' would like to - * trigger 'reaction' at the current tag. - * - * If a worker number is not available (e.g., this function is not called by a - * worker thread), -1 should be passed as the 'worker_number'. - * - * The scheduler will ensure that the same reaction is not triggered twice in - * the same tag. - * - * @param reaction The reaction to trigger at the current tag. - * @param worker_number The ID of the worker that is making this call. 0 should - * be used if there is only one worker (e.g., when the program is using the - * single-threaded C runtime). -1 is used for an anonymous call in a context where a - * worker number does not make sense (e.g., the caller is not a worker thread). - */ void lf_scheduler_trigger_reaction(lf_scheduler_t* scheduler, reaction_t* reaction, int worker_number) { - (void)worker_number; + (void)worker_number; // Suppress unused parameter warning. if (reaction == NULL || !lf_atomic_bool_compare_and_swap32((int32_t*)&reaction->status, inactive, queued)) { return; } LF_PRINT_DEBUG("Scheduler: Enqueueing reaction %s, which has level %lld.", reaction->name, LF_LEVEL(reaction->index)); - _lf_sched_insert_reaction(scheduler, reaction); + + // Mutex not needed when pulling from the event queue. + if (!scheduler->custom_data->solo_holds_mutex) { + LF_PRINT_DEBUG("Scheduler: Locking mutex for environment."); + LF_MUTEX_LOCK(&scheduler->env->mutex); + LF_PRINT_DEBUG("Scheduler: Locked mutex for environment."); + } + pqueue_insert(scheduler->custom_data->reaction_q, (void*)reaction); + if (!scheduler->custom_data->solo_holds_mutex) { + // If this is called from a reaction execution, then the triggered reaction + // has one level higher than the current level. No need to notify idle threads. + // But in federated execution, it could be called because of message arrival. + // Also, in modal models, reset and startup reactions may be triggered. +#if defined(FEDERATED) || (defined(MODAL) && !defined(LF_SINGLE_THREADED)) + reaction_t* triggered_reaction = (reaction_t*)pqueue_peek(scheduler->custom_data->reaction_q); + if (LF_LEVEL(triggered_reaction->index) == scheduler->custom_data->current_level) { + LF_COND_SIGNAL(&scheduler->custom_data->reaction_q_changed); + } +#endif // FEDERATED || MODAL + + LF_MUTEX_UNLOCK(&scheduler->env->mutex); + } } #endif // SCHEDULER == SCHED_GEDF_NP diff --git a/core/threaded/scheduler_NP.c b/core/threaded/scheduler_NP.c index 01b510477..7edd41a81 100644 --- a/core/threaded/scheduler_NP.c +++ b/core/threaded/scheduler_NP.c @@ -1,8 +1,8 @@ /** * @file - * @author{Soroush Bateni } - * @author{Edward A. Lee } - * @author{Marten Lohstroh } + * @author Soroush Bateni + * @author Edward A. Lee + * @author Marten Lohstroh * @copyright (c) 2020-2024, The University of California at Berkeley. * License: BSD 2-clause * @brief Non-preemptive scheduler for the threaded runtime of the C target of Lingua Franca. @@ -27,10 +27,26 @@ #include "util.h" #include "reactor_threaded.h" +#ifdef FEDERATED +#include "federate.h" +#endif + +// Data specific to the NP scheduler. +typedef struct custom_scheduler_data_t { + reaction_t** executing_reactions; + lf_mutex_t* array_of_mutexes; + reaction_t*** triggered_reactions; + volatile size_t next_reaction_level; + lf_semaphore_t* semaphore; // Signal the maximum number of worker threads that should + // be executing work at the same time. Initially 0. + // For example, if the scheduler releases the semaphore with a count of 4, + // no more than 4 worker threads should wake up to process reactions. +} custom_scheduler_data_t; + /////////////////// Scheduler Private API ///////////////////////// + /** - * @brief Insert 'reaction' into - * scheduler->triggered_reactions at the appropriate level. + * @brief Insert 'reaction' into scheduler->triggered_reactions at the appropriate level. * * @param reaction The reaction to insert. */ @@ -39,19 +55,19 @@ static inline void _lf_sched_insert_reaction(lf_scheduler_t* scheduler, reaction #ifdef FEDERATED // Lock the mutex if federated because a federate can insert reactions with // a level equal to the current level. - size_t current_level = scheduler->next_reaction_level - 1; + size_t current_level = scheduler->custom_data->next_reaction_level - 1; // There is a race condition here where - // `scheduler->next_reaction_level` can change after it is + // `scheduler->custom_data->next_reaction_level` can change after it is // cached here. In that case, if the cached value is equal to // `reaction_level`, the cost will be an additional unnecessary mutex lock, // but no logic error. If the cached value is not equal to `reaction_level`, // it can never become `reaction_level` because the scheduler will only - // change the `scheduler->next_reaction_level` if it can + // change the `scheduler->custom_data->next_reaction_level` if it can // ensure that all worker threads are idle, and thus, none are triggering // reactions (and therefore calling this function). if (reaction_level == current_level) { LF_PRINT_DEBUG("Scheduler: Trying to lock the mutex for level %zu.", reaction_level); - LF_MUTEX_LOCK(&scheduler->array_of_mutexes[reaction_level]); + LF_MUTEX_LOCK(&scheduler->custom_data->array_of_mutexes[reaction_level]); LF_PRINT_DEBUG("Scheduler: Locked the mutex for level %zu.", reaction_level); } // The level index for the current level can sometimes become negative. Set @@ -65,11 +81,11 @@ static inline void _lf_sched_insert_reaction(lf_scheduler_t* scheduler, reaction assert(reaction_q_level_index >= 0); LF_PRINT_DEBUG("Scheduler: Accessing triggered reactions at the level %zu with index %d.", reaction_level, reaction_q_level_index); - ((reaction_t***)scheduler->triggered_reactions)[reaction_level][reaction_q_level_index] = reaction; + ((reaction_t***)scheduler->custom_data->triggered_reactions)[reaction_level][reaction_q_level_index] = reaction; LF_PRINT_DEBUG("Scheduler: Index for level %zu is at %d.", reaction_level, reaction_q_level_index); #ifdef FEDERATED if (reaction_level == current_level) { - LF_MUTEX_UNLOCK(&scheduler->array_of_mutexes[reaction_level]); + LF_MUTEX_UNLOCK(&scheduler->custom_data->array_of_mutexes[reaction_level]); } #endif } @@ -80,20 +96,22 @@ static inline void _lf_sched_insert_reaction(lf_scheduler_t* scheduler, reaction * * @return 1 if any reaction is ready. 0 otherwise. */ -int _lf_sched_distribute_ready_reactions(lf_scheduler_t* scheduler) { +static int _lf_sched_distribute_ready_reactions(lf_scheduler_t* scheduler) { // Note: All the threads are idle, which means that they are done inserting // reactions. Therefore, the reaction vectors can be accessed without // locking a mutex. - while (scheduler->next_reaction_level <= scheduler->max_reaction_level) { - LF_PRINT_DEBUG("Waiting with curr_reaction_level %zu.", scheduler->next_reaction_level); - try_advance_level(scheduler->env, &scheduler->next_reaction_level); + while (scheduler->custom_data->next_reaction_level <= scheduler->max_reaction_level) { +#ifdef FEDERATED + lf_stall_advance_level_federation(scheduler->env, scheduler->custom_data->next_reaction_level); +#endif + scheduler->custom_data->executing_reactions = + scheduler->custom_data->triggered_reactions[scheduler->custom_data->next_reaction_level]; + LF_PRINT_DEBUG("Start of rxn queue at %zu is %p", scheduler->custom_data->next_reaction_level, + (void*)((reaction_t**)scheduler->custom_data->executing_reactions)[0]); - scheduler->executing_reactions = - (void*)((reaction_t***)scheduler->triggered_reactions)[scheduler->next_reaction_level - 1]; + scheduler->custom_data->next_reaction_level++; - LF_PRINT_DEBUG("Start of rxn queue at %zu is %p", scheduler->next_reaction_level - 1, - (void*)((reaction_t**)scheduler->executing_reactions)[0]); - if (((reaction_t**)scheduler->executing_reactions)[0] != NULL) { + if (scheduler->custom_data->executing_reactions[0] != NULL) { // There is at least one reaction to execute return 1; } @@ -107,13 +125,13 @@ int _lf_sched_distribute_ready_reactions(lf_scheduler_t* scheduler) { * * This assumes that the caller is not holding any thread mutexes. */ -void _lf_sched_notify_workers(lf_scheduler_t* scheduler) { +static void _lf_sched_notify_workers(lf_scheduler_t* scheduler) { // Calculate the number of workers that we need to wake up, which is the // number of reactions enabled at this level. // Note: All threads are idle. Therefore, there is no need to lock the mutex while accessing the index for the // current level. - size_t workers_to_awaken = - LF_MIN(scheduler->number_of_idle_workers, (size_t)(scheduler->indexes[scheduler->next_reaction_level - 1])); + size_t workers_to_awaken = LF_MIN(scheduler->number_of_idle_workers, + (size_t)(scheduler->indexes[scheduler->custom_data->next_reaction_level - 1])); LF_PRINT_DEBUG("Scheduler: Notifying %zu workers.", workers_to_awaken); scheduler->number_of_idle_workers -= workers_to_awaken; @@ -122,7 +140,7 @@ void _lf_sched_notify_workers(lf_scheduler_t* scheduler) { if (workers_to_awaken > 1) { // Notify all the workers except the worker thread that has called this // function. - lf_semaphore_release(scheduler->semaphore, (workers_to_awaken - 1)); + lf_semaphore_release(scheduler->custom_data->semaphore, (workers_to_awaken - 1)); } } @@ -130,9 +148,9 @@ void _lf_sched_notify_workers(lf_scheduler_t* scheduler) { * @brief Signal all worker threads that it is time to stop. * */ -void _lf_sched_signal_stop(lf_scheduler_t* scheduler) { +static void _lf_sched_signal_stop(lf_scheduler_t* scheduler) { scheduler->should_stop = true; - lf_semaphore_release(scheduler->semaphore, (scheduler->number_of_workers - 1)); + lf_semaphore_release(scheduler->custom_data->semaphore, (scheduler->number_of_workers - 1)); } /** @@ -143,15 +161,15 @@ void _lf_sched_signal_stop(lf_scheduler_t* scheduler) { * * This function assumes the caller does not hold the 'mutex' lock. */ -void _lf_scheduler_try_advance_tag_and_distribute(lf_scheduler_t* scheduler) { +static void _lf_scheduler_try_advance_tag_and_distribute(lf_scheduler_t* scheduler) { // Reset the index environment_t* env = scheduler->env; - scheduler->indexes[scheduler->next_reaction_level - 1] = 0; + scheduler->indexes[scheduler->custom_data->next_reaction_level - 1] = 0; // Loop until it's time to stop or work has been distributed while (true) { - if (scheduler->next_reaction_level == (scheduler->max_reaction_level + 1)) { - scheduler->next_reaction_level = 0; + if (scheduler->custom_data->next_reaction_level == (scheduler->max_reaction_level + 1)) { + scheduler->custom_data->next_reaction_level = 0; LF_MUTEX_LOCK(&env->mutex); // Nothing more happening at this tag. LF_PRINT_DEBUG("Scheduler: Advancing tag."); @@ -177,12 +195,12 @@ void _lf_scheduler_try_advance_tag_and_distribute(lf_scheduler_t* scheduler) { * * If the calling worker thread is the last to become idle, it will call on the * scheduler to distribute work. Otherwise, it will wait on - * 'scheduler->semaphore'. + * 'scheduler->custom_data->semaphore'. * * @param worker_number The worker number of the worker thread asking for work * to be assigned to it. */ -void _lf_sched_wait_for_work(lf_scheduler_t* scheduler, size_t worker_number) { +static void _lf_sched_wait_for_work(lf_scheduler_t* scheduler, size_t worker_number) { // Increment the number of idle workers by 1 and check if this is the last // worker thread to become idle. if (lf_atomic_add_fetch32((int32_t*)&scheduler->number_of_idle_workers, 1) == (int)scheduler->number_of_workers) { @@ -192,15 +210,14 @@ void _lf_sched_wait_for_work(lf_scheduler_t* scheduler, size_t worker_number) { _lf_scheduler_try_advance_tag_and_distribute(scheduler); } else { // Not the last thread to become idle. Wait for work to be released. - LF_PRINT_DEBUG("Scheduler: Worker %zu is trying to acquire the scheduling " - "semaphore.", - worker_number); - lf_semaphore_acquire(scheduler->semaphore); + LF_PRINT_DEBUG("Scheduler: Worker %zu is trying to acquire the scheduling semaphore.", worker_number); + lf_semaphore_acquire(scheduler->custom_data->semaphore); LF_PRINT_DEBUG("Scheduler: Worker %zu acquired the scheduling semaphore.", worker_number); } } ///////////////////// Scheduler Init and Destroy API ///////////////////////// + /** * @brief Initialize the scheduler. * @@ -233,9 +250,17 @@ void lf_sched_init(environment_t* env, size_t number_of_workers, sched_params_t* LF_PRINT_DEBUG("Scheduler: Max reaction level: %zu", env->scheduler->max_reaction_level); - env->scheduler->triggered_reactions = calloc((env->scheduler->max_reaction_level + 1), sizeof(reaction_t**)); + env->scheduler->custom_data = (custom_scheduler_data_t*)calloc(1, sizeof(custom_scheduler_data_t)); + + env->scheduler->custom_data->triggered_reactions = + (reaction_t***)calloc((env->scheduler->max_reaction_level + 1), sizeof(reaction_t**)); + + env->scheduler->custom_data->array_of_mutexes = + (lf_mutex_t*)calloc((env->scheduler->max_reaction_level + 1), sizeof(lf_mutex_t)); - env->scheduler->array_of_mutexes = (lf_mutex_t*)calloc((env->scheduler->max_reaction_level + 1), sizeof(lf_mutex_t)); + env->scheduler->custom_data->semaphore = lf_semaphore_new(0); + + env->scheduler->custom_data->next_reaction_level = 1; env->scheduler->indexes = (volatile int*)calloc((env->scheduler->max_reaction_level + 1), sizeof(volatile int)); @@ -247,15 +272,14 @@ void lf_sched_init(environment_t* env, size_t number_of_workers, sched_params_t* } } // Initialize the reaction vectors - ((reaction_t***)env->scheduler->triggered_reactions)[i] = (reaction_t**)calloc(queue_size, sizeof(reaction_t*)); + env->scheduler->custom_data->triggered_reactions[i] = (reaction_t**)calloc(queue_size, sizeof(reaction_t*)); LF_PRINT_DEBUG("Scheduler: Initialized vector of reactions for level %zu with size %zu", i, queue_size); // Initialize the mutexes for the reaction vectors - LF_MUTEX_INIT(&env->scheduler->array_of_mutexes[i]); + LF_MUTEX_INIT(&env->scheduler->custom_data->array_of_mutexes[i]); } - - env->scheduler->executing_reactions = (void*)((reaction_t***)env->scheduler->triggered_reactions)[0]; + env->scheduler->custom_data->executing_reactions = env->scheduler->custom_data->triggered_reactions[0]; } /** @@ -264,14 +288,15 @@ void lf_sched_init(environment_t* env, size_t number_of_workers, sched_params_t* * This must be called when the scheduler is no longer needed. */ void lf_sched_free(lf_scheduler_t* scheduler) { - if (scheduler->triggered_reactions) { + if (scheduler->custom_data->triggered_reactions) { for (size_t j = 0; j <= scheduler->max_reaction_level; j++) { - free(((reaction_t***)scheduler->triggered_reactions)[j]); + free(scheduler->custom_data->triggered_reactions[j]); } - free(scheduler->triggered_reactions); + free(scheduler->custom_data->triggered_reactions); } - - lf_semaphore_destroy(scheduler->semaphore); + free(scheduler->custom_data->array_of_mutexes); + lf_semaphore_destroy(scheduler->custom_data->semaphore); + free(scheduler->custom_data); } ///////////////////// Scheduler Worker API (public) ///////////////////////// @@ -290,23 +315,23 @@ reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_nu // Iterate until the stop tag is reached or reaction vectors are empty while (!scheduler->should_stop) { // Calculate the current level of reactions to execute - size_t current_level = scheduler->next_reaction_level - 1; + size_t current_level = scheduler->custom_data->next_reaction_level - 1; reaction_t* reaction_to_return = NULL; #ifdef FEDERATED // Need to lock the mutex because federate.c could trigger reactions at // the current level (if there is a causality loop) - LF_MUTEX_LOCK(&scheduler->array_of_mutexes[current_level]); + LF_MUTEX_LOCK(&scheduler->custom_data->array_of_mutexes[current_level]); #endif int current_level_q_index = lf_atomic_add_fetch32((int32_t*)&scheduler->indexes[current_level], -1); if (current_level_q_index >= 0) { LF_PRINT_DEBUG("Scheduler: Worker %d popping reaction with level %zu, index " "for level: %d.", worker_number, current_level, current_level_q_index); - reaction_to_return = ((reaction_t**)scheduler->executing_reactions)[current_level_q_index]; - ((reaction_t**)scheduler->executing_reactions)[current_level_q_index] = NULL; + reaction_to_return = scheduler->custom_data->executing_reactions[current_level_q_index]; + scheduler->custom_data->executing_reactions[current_level_q_index] = NULL; } #ifdef FEDERATED - lf_mutex_unlock(&scheduler->array_of_mutexes[current_level]); + lf_mutex_unlock(&scheduler->custom_data->array_of_mutexes[current_level]); #endif if (reaction_to_return != NULL) { diff --git a/core/threaded/scheduler_adaptive.c b/core/threaded/scheduler_adaptive.c index 4b0843028..1f90c90a6 100644 --- a/core/threaded/scheduler_adaptive.c +++ b/core/threaded/scheduler_adaptive.c @@ -1,6 +1,6 @@ /** * @file - * @author{Peter Donovan } + * @author Peter Donovan * @copyright (c) 2020-2024, The University of California at Berkeley. * License: BSD 2-clause * @brief This is a non-priority-driven scheduler. See scheduler.h for documentation. @@ -21,12 +21,14 @@ #include "environment.h" #include "util.h" +#ifdef FEDERATED +#include "federate.h" +#endif + #ifndef MAX_REACTION_LEVEL #define MAX_REACTION_LEVEL INITIAL_REACT_QUEUE_SIZE #endif -void try_advance_level(environment_t* env, volatile size_t* next_reaction_level); - /////////////////// Forward declarations ///////////////////////// extern bool fast; static void worker_states_lock(lf_scheduler_t* scheduler, size_t worker); @@ -132,7 +134,7 @@ static size_t cond_of(size_t worker) { static void set_level(lf_scheduler_t* scheduler, size_t level) { worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; assert(level < worker_assignments->num_levels); - assert(0 <= level); + assert(0 <= (long long)level); data_collection_end_level(scheduler, worker_assignments->current_level, worker_assignments->num_workers); worker_assignments->current_level = level; worker_assignments->num_reactions_by_worker = worker_assignments->num_reactions_by_worker_by_level[level]; @@ -222,7 +224,7 @@ static reaction_t* get_reaction(lf_scheduler_t* scheduler, size_t worker) { if (old_num_reactions <= 0) return NULL; } while ((current_num_reactions = lf_atomic_val_compare_and_swap32( - ((int32_t*)worker_assignments->num_reactions_by_worker + worker), old_num_reactions, + (int32_t*)(worker_assignments->num_reactions_by_worker + worker), old_num_reactions, (index = old_num_reactions - 1))) != old_num_reactions); return worker_assignments->reactions_by_worker[worker][index]; #endif @@ -236,9 +238,9 @@ static reaction_t* get_reaction(lf_scheduler_t* scheduler, size_t worker) { */ static reaction_t* worker_assignments_get_or_lock(lf_scheduler_t* scheduler, size_t worker) { worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; - assert(worker >= 0); + assert((long long)worker >= 0); // assert(worker < num_workers); // There are edge cases where this doesn't hold. - assert(worker_assignments->num_reactions_by_worker[worker] >= 0); + assert((long long)worker_assignments->num_reactions_by_worker[worker] >= 0); reaction_t* ret; while (true) { if ((ret = get_reaction(scheduler, worker))) @@ -423,6 +425,7 @@ static void worker_states_sleep_and_unlock(lf_scheduler_t* scheduler, size_t wor static void advance_level_and_unlock(lf_scheduler_t* scheduler, size_t worker) { worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; size_t max_level = worker_assignments->num_levels - 1; + size_t total_num_reactions; while (true) { if (worker_assignments->current_level == max_level) { data_collection_end_tag(scheduler, worker_assignments->num_workers_by_level, @@ -435,10 +438,16 @@ static void advance_level_and_unlock(lf_scheduler_t* scheduler, size_t worker) { return; } } else { - try_advance_level(scheduler->env, &worker_assignments->current_level); - set_level(scheduler, worker_assignments->current_level); +#ifdef FEDERATED + lf_stall_advance_level_federation_locked(worker_assignments->current_level); +#endif + total_num_reactions = get_num_reactions(scheduler); + if (!total_num_reactions) { + worker_assignments->current_level++; + set_level(scheduler, worker_assignments->current_level); + } } - size_t total_num_reactions = get_num_reactions(scheduler); + total_num_reactions = get_num_reactions(scheduler); if (total_num_reactions) { size_t num_workers_to_awaken = LF_MIN(total_num_reactions, worker_assignments->num_workers); LF_ASSERT(num_workers_to_awaken > 0, ""); @@ -593,6 +602,7 @@ static size_t restrict_to_range(size_t start_inclusive, size_t end_inclusive, si */ static void compute_number_of_workers(lf_scheduler_t* scheduler, size_t* num_workers_by_level, size_t* max_num_workers_by_level, bool jitter) { + data_collection_t* data_collection = scheduler->custom_data->data_collection; for (size_t level = 0; level < data_collection->num_levels; level++) { interval_t this_execution_time = @@ -684,7 +694,6 @@ void lf_sched_free(lf_scheduler_t* scheduler) { worker_assignments_free(scheduler); data_collection_free(scheduler); free(scheduler->custom_data); - lf_semaphore_destroy(scheduler->semaphore); } ///////////////////////// Scheduler Worker API /////////////////////////////// diff --git a/core/threaded/scheduler_instance.c b/core/threaded/scheduler_instance.c index 5487ead65..8146327bf 100644 --- a/core/threaded/scheduler_instance.c +++ b/core/threaded/scheduler_instance.c @@ -1,3 +1,14 @@ +/** + * @file + * @author Soroush Bateni + * @author Edward A. Lee + * @copyright (c) 2022-2024, The University of Texas at Dallas and The University of California at Berkeley. + * License: BSD 2-clause + * @brief Common scheduler functions. + * + * This file defines functions that are common across multiple schedulers. + */ + #include #include "scheduler_instance.h" #include "environment.h" @@ -32,9 +43,7 @@ bool init_sched_instance(environment_t* env, lf_scheduler_t** instance, size_t n } } - (*instance)->semaphore = lf_semaphore_new(0); (*instance)->number_of_workers = number_of_workers; - (*instance)->next_reaction_level = 1; (*instance)->should_stop = false; (*instance)->env = env; diff --git a/core/threaded/scheduler_sync_tag_advance.c b/core/threaded/scheduler_sync_tag_advance.c index 1b0556ba1..cc91c88f0 100644 --- a/core/threaded/scheduler_sync_tag_advance.c +++ b/core/threaded/scheduler_sync_tag_advance.c @@ -1,63 +1,29 @@ -#if !defined(LF_SINGLE_THREADED) -/************* -Copyright (c) 2022, The University of Texas at Dallas. -Copyright (c) 2022, The University of California at Berkeley. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL -THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF -THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -***************/ - /** - * @file scheduler_sync_tag_advance.c - * @author Soroush Bateni (soroush@utdallas.edu) - * @author Edward A. Lee - * @author Marten Lohstroh + * @file + * @author Soroush Bateni + * @author Edward A. Lee + * @author Marten Lohstroh * @brief API used to advance tag globally. - * - * @copyright Copyright (c) 2022, The University of Texas at Dallas. - * @copyright Copyright (c) 2022, The University of California at Berkeley. + * @copyright (c) 2020-2024, The University of California at Berkeley and The University of Texas at Dallas + * License: BSD 2-clause */ +#if !defined(LF_SINGLE_THREADED) + #include "scheduler_sync_tag_advance.h" #include "rti_local.h" #include "environment.h" #include "tracepoint.h" #include "util.h" -/////////////////// External Functions ///////////////////////// -/** - * Placeholder for function that will advance tag and initially fill the - * reaction queue. - * - * This does not acquire the mutex lock. It assumes the lock is already held. - */ +// Forward declaration of function defined in reactor_threaded.h +void _lf_next_locked(struct environment_t* env); /** * @brief Indicator that execution of at least one tag has completed. */ static bool _latest_tag_completed = false; -/** - * Return true if the worker should stop now; false otherwise. - * This function assumes the caller holds the mutex lock. - */ bool should_stop_locked(lf_scheduler_t* sched) { // If this is not the very first step, check against the stop tag to see whether this is the last step. if (_latest_tag_completed) { @@ -70,14 +36,6 @@ bool should_stop_locked(lf_scheduler_t* sched) { return false; } -/** - * Advance tag. This will also pop events for the newly acquired tag and put - * the triggered reactions on the '_lf_sched_vector_of_reaction_qs'. - * - * This function assumes the caller holds the 'mutex' lock. - * - * @return should_exit True if the worker thread should exit. False otherwise. - */ bool _lf_sched_advance_tag_locked(lf_scheduler_t* sched) { environment_t* env = sched->env; logical_tag_complete(env->current_tag); diff --git a/core/threaded/watchdog.c b/core/threaded/watchdog.c index 3b9a6d62c..7cb319533 100644 --- a/core/threaded/watchdog.c +++ b/core/threaded/watchdog.c @@ -140,13 +140,15 @@ void lf_watchdog_start(watchdog_t* watchdog, interval_t additional_timeout) { } void lf_watchdog_stop(watchdog_t* watchdog) { - // If the watchdog isnt active, then it is no reason to stop it. + // Assumes reactor mutex is already held. + watchdog->expiration = NEVER; + + // If lf_watchdog_stop is called very close to lf_watchdog_start, it might + // not have had the time to wake up and start sleeping. if (!watchdog->active) { return; } - // Assumes reactor mutex is already held. - watchdog->expiration = NEVER; LF_COND_SIGNAL(&watchdog->cond); } diff --git a/core/utils/pqueue.c b/core/utils/pqueue.c index b2bf05090..65f6dd1d9 100644 --- a/core/utils/pqueue.c +++ b/core/utils/pqueue.c @@ -35,5 +35,5 @@ void set_reaction_position(void* reaction, size_t pos) { ((reaction_t*)reaction) void print_reaction(void* reaction) { reaction_t* r = (reaction_t*)reaction; - LF_PRINT_DEBUG("%s: chain_id: %llu, index: %llx, reaction: %p", r->name, r->chain_id, r->index, reaction); + LF_PRINT_DEBUG("%s: index: %llx, reaction: %p", r->name, r->index, reaction); } diff --git a/core/utils/pqueue_tag.c b/core/utils/pqueue_tag.c index 24899374b..c1abe35ba 100644 --- a/core/utils/pqueue_tag.c +++ b/core/utils/pqueue_tag.c @@ -9,6 +9,7 @@ */ #include +#include #include "pqueue_tag.h" #include "util.h" // For lf_print @@ -23,7 +24,10 @@ * element is also the priority. This function is of type pqueue_get_pri_f. * @param element A pointer to a pqueue_tag_element_t, cast to void*. */ -static pqueue_pri_t pqueue_tag_get_priority(void* element) { return (pqueue_pri_t)element; } +static pqueue_pri_t pqueue_tag_get_priority(void* element) { + // Suppress "error: cast from pointer to integer of different size" by casting to uintptr_t first. + return (pqueue_pri_t)(uintptr_t)element; +} /** * @brief Callback function to determine whether two elements are equivalent. @@ -65,7 +69,9 @@ static void pqueue_tag_print_element(void* element) { // Functions defined in pqueue_tag.h. int pqueue_tag_compare(pqueue_pri_t priority1, pqueue_pri_t priority2) { - return (lf_tag_compare(((pqueue_tag_element_t*)priority1)->tag, ((pqueue_tag_element_t*)priority2)->tag)); + // Suppress "error: cast from pointer to integer of different size" by casting to uintptr_t first. + return (lf_tag_compare(((pqueue_tag_element_t*)(uintptr_t)priority1)->tag, + ((pqueue_tag_element_t*)(uintptr_t)priority2)->tag)); } pqueue_tag_t* pqueue_tag_init(size_t initial_size) { @@ -147,7 +153,7 @@ void pqueue_tag_remove(pqueue_tag_t* q, pqueue_tag_element_t* e) { pqueue_remove void pqueue_tag_remove_up_to(pqueue_tag_t* q, tag_t t) { tag_t head = pqueue_tag_peek_tag(q); while (lf_tag_compare(head, FOREVER_TAG) < 0 && lf_tag_compare(head, t) <= 0) { - pqueue_tag_pop(q); + pqueue_tag_pop_tag(q); head = pqueue_tag_peek_tag(q); } } diff --git a/core/utils/util.c b/core/utils/util.c index 881b6dc05..62de9fd27 100644 --- a/core/utils/util.c +++ b/core/utils/util.c @@ -33,12 +33,13 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "util.h" +#include + #ifndef STANDALONE_RTI #include "environment.h" #endif #include -#include #include #include #include // Defines memcpy() diff --git a/include/core/environment.h b/include/core/environment.h index a776dee95..038f97e4e 100644 --- a/include/core/environment.h +++ b/include/core/environment.h @@ -1,31 +1,11 @@ /** * @file - * @author Erling R. Jellum (erling.r.jellum@ntnu.no) + * @author Erling R. Jellum + * @copyright (c) 2023, The Norwegian University of Science and Technology. + * License: BSD 2-clause + * @brief API for the environment data structure. * - * @section LICENSE - * Copyright (c) 2023, The Norwegian University of Science and Technology. - * - * Redistribution and use in source and binary forms, with or without modification, - * are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF - * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * @section DESCRIPTION API for creating and destroying environments. An environment is the + * This is an API for creating and destroying environments. An environment is the * "context" within which the reactors are executed. The environment contains data structures * which are shared among the reactors such as priority queues, the current logical tag, * the worker scheduler, and a lot of meta data. Each reactor stores a pointer to its diff --git a/include/core/federated/clock-sync.h b/include/core/federated/clock-sync.h index 72263e6bc..b003a3150 100644 --- a/include/core/federated/clock-sync.h +++ b/include/core/federated/clock-sync.h @@ -35,6 +35,15 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "low_level_platform.h" +// Clock synchronization defaults to performing clock synchronization only at initialization. +#define LF_CLOCK_SYNC_OFF 1 +#define LF_CLOCK_SYNC_INIT 2 +#define LF_CLOCK_SYNC_ON 3 + +#ifndef LF_CLOCK_SYNC +#define LF_CLOCK_SYNC LF_CLOCK_SYNC_INIT +#endif + /** * Number of required clock sync T4 messages per synchronization * interval. The offset to the clock will not be adjusted until @@ -49,6 +58,11 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define _LF_CLOCK_SYNC_ATTENUATION 10 #endif +/** By default, collect statistics on clock synchronization. */ +#ifndef _LF_CLOCK_SYNC_COLLECT_STATS +#define _LF_CLOCK_SYNC_COLLECT_STATS true +#endif + /** * Define a guard band to filter clock synchronization * messages based on discrepancies in the network delay. @@ -121,7 +135,7 @@ void update_socket_stat(struct socket_stat_t* socket_stat, long long network_del * @return An lf_stat_ll struct with relevant information. */ struct lf_stat_ll calculate_socket_stat(struct socket_stat_t* socket_stat); -#endif +#endif // _LF_CLOCK_SYNC_COLLECT_STATS /** * Reset statistics on the socket. @@ -202,13 +216,13 @@ int create_clock_sync_thread(lf_thread_t* thread_id); * @brief Add the current clock synchronization offset to a specified timestamp. * @param t Pointer to the timestamp to which to add the offset. */ -void clock_sync_apply_offset(instant_t* t); +void clock_sync_add_offset(instant_t* t); /** * @brief Subtract the clock synchronization offset from a timestamp. * @param t The timestamp from which to subtract the current clock sync offset. */ -void clock_sync_remove_offset(instant_t* t); +void clock_sync_subtract_offset(instant_t* t); /** * Set a fixed offset to the physical clock. diff --git a/include/core/federated/federate.h b/include/core/federated/federate.h index 132ebe34a..dbf85f66a 100644 --- a/include/core/federated/federate.h +++ b/include/core/federated/federate.h @@ -502,6 +502,12 @@ void lf_spawn_staa_thread(void); */ void lf_stall_advance_level_federation(environment_t* env, size_t level); +/** + * @brief Version of lf_stall_advance_level_federation() that assumes the caller holds the mutex lock. + * @param level The level to which we would like to advance. + */ +void lf_stall_advance_level_federation_locked(size_t level); + /** * @brief Synchronize the start with other federates via the RTI. * diff --git a/include/core/federated/network/net_common.h b/include/core/federated/network/net_common.h index 73d71bef5..d9fa5f5ae 100644 --- a/include/core/federated/network/net_common.h +++ b/include/core/federated/network/net_common.h @@ -716,4 +716,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** HMAC authentication failed. */ #define HMAC_DOES_NOT_MATCH 6 +/** RTI not executed using -a or --auth option. */ +#define RTI_NOT_EXECUTED_WITH_AUTH 7 + #endif /* NET_COMMON_H */ diff --git a/include/core/lf_types.h b/include/core/lf_types.h index 75a61e405..a3a103041 100644 --- a/include/core/lf_types.h +++ b/include/core/lf_types.h @@ -157,9 +157,7 @@ struct reaction_t { void* self; // Pointer to a struct with the reactor's state. INSTANCE. int number; // The number of the reaction in the reactor (0 is the first reaction). index_t index; // Inverse priority determined by dependency analysis. INSTANCE. - // Binary encoding of the branches that this reaction has upstream in the dependency graph. INSTANCE. - unsigned long long chain_id; - size_t pos; // Current position in the priority queue. RUNTIME. + size_t pos; // Current position in the priority queue. RUNTIME. reaction_t* last_enabling_reaction; // The last enabling reaction, or NULL if there is none. Used for optimization. INSTANCE. size_t num_outputs; // Number of outputs that may possibly be produced by this function. COMMON. diff --git a/include/core/threaded/reactor_threaded.h b/include/core/threaded/reactor_threaded.h index 96de7ac49..0d58f7431 100644 --- a/include/core/threaded/reactor_threaded.h +++ b/include/core/threaded/reactor_threaded.h @@ -1,8 +1,8 @@ /** * @file - * @author Edward A. Lee (eal@berkeley.edu) - * @author{Marten Lohstroh } - * @author{Soroush Bateni } + * @author Edward A. Lee + * @author Marten Lohstroh + * @author Soroush Bateni * @copyright (c) 2020-2024, The University of California at Berkeley. * License: BSD 2-clause * @brief Runtime infrastructure for the threaded version of the C target of Lingua Franca. @@ -12,16 +12,6 @@ #include "lf_types.h" -/** - * @brief Advance to the next level. - * For federated runtimes, this function should - * stall the advance until we know that we can safely execute the next level - * given knowledge about upstream network port statuses. - * @param env The environment. - * @param next_reaction_level The place to store the next reaction level. - */ -void try_advance_level(environment_t* env, volatile size_t* next_reaction_level); - /** * Enqueue port absent reactions that will send a PORT_ABSENT * message to downstream federates if a given network output port is not present. diff --git a/include/core/threaded/scheduler.h b/include/core/threaded/scheduler.h index ea9f008c2..f49f0bc54 100644 --- a/include/core/threaded/scheduler.h +++ b/include/core/threaded/scheduler.h @@ -1,38 +1,13 @@ -/************* -Copyright (c) 2022, The University of Texas at Dallas. -Copyright (c) 2022, The University of California at Berkeley. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL -THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF -THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -***************/ - /** - * @file scheduler.h - * @author Soroush Bateni + * @file + * @author Soroush Bateni + * @author Edward A. Lee + * @copyright (c) 2022-2024, The University of Texas at Dallas and The University of California at Berkeley. + * License: BSD 2-clause * @brief Scheduler API for the threaded C runtime. * * A scheduler for the threaded runtime of reactor-c should provide an * implementation for functions that are defined in this header file. - * - * @copyright Copyright (c) 2022, The University of Texas at Dallas. - * @copyright Copyright (c) 2022, The University of California at Berkeley. */ #ifndef LF_SCHEDULER_H @@ -40,13 +15,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "lf_types.h" #include "scheduler_instance.h" -/** - * @brief Default value that is assumed to be the maximum reaction level in the - * program. - * - * Can be overriden by passing the appropriate `parameters` argument to - * `lf_sched_init`. - */ /** * @brief Initialize the scheduler. @@ -76,6 +44,7 @@ void lf_sched_free(lf_scheduler_t* scheduler); * This function blocks until it can return a ready reaction for worker thread * 'worker_number' or it is time for the worker thread to stop and exit (where a * NULL value would be returned). + * This function assumes that the environment mutex is not locked. * * @param scheduler The scheduler * @param worker_number For the calling worker thread. diff --git a/include/core/threaded/scheduler_instance.h b/include/core/threaded/scheduler_instance.h index f664066e6..df55a86be 100644 --- a/include/core/threaded/scheduler_instance.h +++ b/include/core/threaded/scheduler_instance.h @@ -1,38 +1,12 @@ -/************* -Copyright (c) 2022, The University of Texas at Dallas. Copyright (c) 2022, The -University of California at Berkeley. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -***************/ - /** - * @file scheduler_params.h - * @author Soroush Bateni - * @brief Scheduler parameters. - * - * Meant for book-keeping in the threaded schedulers in the reactor C runtime. + * @file + * @author Soroush Bateni + * @author Edward A. Lee + * @copyright (c) 2022-2024, The University of Texas at Dallas and The University of California at Berkeley. + * License: BSD 2-clause + * @brief Common scheduler parameters. * - * @copyright Copyright (c) 2022, The University of Texas at Dallas. - * @copyright Copyright (c) 2022, The University of California at Berkeley. + * This file defines data types and functions that are common across multiple schedulers. */ #ifndef LF_SCHEDULER_PARAMS_H @@ -42,8 +16,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define NUMBER_OF_WORKERS 1 #endif // NUMBER_OF_WORKERS -#include "lf_semaphore.h" #include +#include // for size_t #define DEFAULT_MAX_REACTION_LEVEL 100 @@ -65,38 +39,11 @@ typedef struct lf_scheduler_t { */ size_t max_reaction_level; - /** - * @brief Used by the scheduler to signal the maximum number of worker - * threads that should be executing work at the same time. - * - * Initially, the count is set to 0. Maximum value of count should be - * `number_of_workers`. - * - * For example, if the scheduler releases the semaphore with a count of 4, - * no more than 4 worker threads should wake up to process reactions. - * - * FIXME: specific comment - */ - lf_semaphore_t* semaphore; - /** * @brief Indicate whether the program should stop */ volatile bool should_stop; - /** - * @brief Hold triggered reactions. - */ - void* triggered_reactions; - - /** - * @brief An array of mutexes. - * - * Can be used to avoid race conditions. Schedulers are allowed to - * initialize as many mutexes as they deem fit. - */ - lf_mutex_t* array_of_mutexes; - /** * @brief An array of atomic indexes. * @@ -105,11 +52,6 @@ typedef struct lf_scheduler_t { */ volatile int* indexes; - /** - * @brief Hold currently executing reactions. - */ - void* executing_reactions; - /** * @brief Hold reactions temporarily. */ @@ -126,11 +68,6 @@ typedef struct lf_scheduler_t { */ volatile size_t number_of_idle_workers; - /** - * @brief The next level of reactions to execute. - */ - volatile size_t next_reaction_level; - // Pointer to an optional custom data structure that each scheduler can define. // The type is forward declared here and must be declared again in the scheduler source file // Is not touched by `init_sched_instance` and must be initialized by each scheduler that needs it diff --git a/include/core/threaded/scheduler_sync_tag_advance.h b/include/core/threaded/scheduler_sync_tag_advance.h index 3de92e540..309fffd1e 100644 --- a/include/core/threaded/scheduler_sync_tag_advance.h +++ b/include/core/threaded/scheduler_sync_tag_advance.h @@ -1,27 +1,12 @@ -/************* -Copyright (c) 2022, The University of Texas at Dallas. -Copyright (c) 2022, The University of California at Berkeley. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL -THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF -THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -***************/ +/** + * @file + * @author Soroush Bateni + * @author Edward A. Lee + * @author Marten Lohstroh + * @brief API used to advance tag globally. + * @copyright (c) 2020-2024, The University of California at Berkeley and The University of Texas at Dallas + * License: BSD 2-clause + */ #ifndef SCHEDULER_SYNC_TAG_ADVANCE_H #define SCHEDULER_SYNC_TAG_ADVANCE_H @@ -31,8 +16,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "tag.h" #include "scheduler_instance.h" -/////////////////// External Functions ///////////////////////// -void _lf_next_locked(struct environment_t* env); /** * Placeholder for code-generated function that will, in a federated * execution, be used to coordinate the advancement of tag. It will notify @@ -42,7 +25,25 @@ void _lf_next_locked(struct environment_t* env); * @param tag_to_send The tag to send. */ void logical_tag_complete(tag_t tag_to_send); + +/** + * @brief Return true if the worker should stop now; false otherwise. + * + * This function assumes the caller holds the mutex lock. + * @param sched The scheduler instance to check. + */ bool should_stop_locked(lf_scheduler_t* sched); + +/** + * @brief Advance the tag to the next tag on the event queue + * + * This will also pop events for the newly acquired tag and trigger + * the enabled reactions using the scheduler. + * + * This function assumes the caller holds the environment mutex lock. + * @param sched The scheduler instance to check. + * @return True if the worker thread should exit. False otherwise. + */ bool _lf_sched_advance_tag_locked(lf_scheduler_t* sched); #endif // LF_C11_THREADS_SUPPORT_H diff --git a/include/core/tracepoint.h b/include/core/tracepoint.h index 78c95b6fc..4a8a5bc79 100644 --- a/include/core/tracepoint.h +++ b/include/core/tracepoint.h @@ -37,72 +37,7 @@ #include "net_common.h" #endif // FEDERATED -/** - * Trace event types. If you update this, be sure to update the - * string representation below. Also, create a tracepoint function - * for each event type. - */ -typedef enum { - reaction_starts, - reaction_ends, - reaction_deadline_missed, - schedule_called, - user_event, - user_value, - worker_wait_starts, - worker_wait_ends, - scheduler_advancing_time_starts, - scheduler_advancing_time_ends, - federated, // Everything below this is for tracing federated interactions. - // Sending messages - send_ACK, - send_FAILED, - send_TIMESTAMP, - send_NET, - send_LTC, - send_STOP_REQ, - send_STOP_REQ_REP, - send_STOP_GRN, - send_FED_ID, - send_PTAG, - send_TAG, - send_REJECT, - send_RESIGN, - send_PORT_ABS, - send_CLOSE_RQ, - send_TAGGED_MSG, - send_P2P_TAGGED_MSG, - send_MSG, - send_P2P_MSG, - send_ADR_AD, - send_ADR_QR, - // Receiving messages - receive_ACK, - receive_FAILED, - receive_TIMESTAMP, - receive_NET, - receive_LTC, - receive_STOP_REQ, - receive_STOP_REQ_REP, - receive_STOP_GRN, - receive_FED_ID, - receive_PTAG, - receive_TAG, - receive_REJECT, - receive_RESIGN, - receive_PORT_ABS, - receive_CLOSE_RQ, - receive_TAGGED_MSG, - receive_P2P_TAGGED_MSG, - receive_MSG, - receive_P2P_MSG, - receive_ADR_AD, - receive_ADR_QR, - receive_UNIDENTIFIED, - send_STOP, - receive_STOP, - NUM_EVENT_TYPES -} trace_event_t; +#include "trace_types.h" #ifdef LF_TRACE @@ -168,7 +103,8 @@ int register_user_trace_event(void* self, char* description); * @param worker The thread number of the worker thread or 0 for single-threaded execution. */ #define tracepoint_reaction_starts(env, reaction, worker) \ - call_tracepoint(reaction_starts, reaction->self, env->current_tag, worker, worker, reaction->number, NULL, NULL, 0) + call_tracepoint(reaction_starts, reaction->self, env->current_tag, worker, worker, reaction->number, NULL, NULL, \ + reaction->deadline) /** * Trace the end of a reaction execution. @@ -177,7 +113,8 @@ int register_user_trace_event(void* self, char* description); * @param worker The thread number of the worker thread or 0 for single-threaded execution. */ #define tracepoint_reaction_ends(env, reaction, worker) \ - call_tracepoint(reaction_ends, reaction->self, env->current_tag, worker, worker, reaction->number, NULL, NULL, 0) + call_tracepoint(reaction_ends, reaction->self, env->current_tag, worker, worker, reaction->number, NULL, NULL, \ + reaction->deadline) /** * Trace a call to schedule. @@ -418,8 +355,10 @@ static inline void tracepoint_federate_from_federate(trace_event_t event_type, i (void)partner_id; (void)tag; } -static inline void lf_tracing_global_init(char* file_name_prefix, int process_id, int max_num_local_threads) { - (void)file_name_prefix; +static inline void lf_tracing_global_init(char* process_name, char* process_names, int process_id, + int max_num_local_threads) { + (void)process_name; + (void)process_names; (void)process_id; (void)max_num_local_threads; } diff --git a/include/core/utils/pqueue_base.h b/include/core/utils/pqueue_base.h index b913ab64f..4b76fab0e 100644 --- a/include/core/utils/pqueue_base.h +++ b/include/core/utils/pqueue_base.h @@ -122,14 +122,6 @@ size_t pqueue_size(pqueue_t* q); */ int pqueue_insert(pqueue_t* q, void* d); -/** - * Move an existing entry to a different priority. - * @param q the queue - * @param new_pri the new priority - * @param d the entry - */ -void pqueue_change_priority(pqueue_t* q, pqueue_pri_t new_pri, void* d); - /** * Pop the highest-ranking item from the queue. * @param q the queue diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt index 0b805b7e4..6cf727325 100644 --- a/lib/CMakeLists.txt +++ b/lib/CMakeLists.txt @@ -4,5 +4,6 @@ include(${LF_ROOT}/core/lf_utils.cmake) add_library(lib schedule.c) target_link_libraries(lib PRIVATE lf::low-level-platform-api) target_link_libraries(lib PRIVATE lf::logging-api) +target_link_libraries(lib PUBLIC lf::trace-api-types) lf_enable_compiler_warnings(lib) \ No newline at end of file diff --git a/lib/schedule.c b/lib/schedule.c index 5aa9fd528..168352373 100644 --- a/lib/schedule.c +++ b/lib/schedule.c @@ -234,6 +234,7 @@ trigger_handle_t lf_schedule_trigger(environment_t* env, trigger_t* trigger, int // If the event is early, see which policy applies. if (earliest_time > intended_tag.time) { LF_PRINT_DEBUG("Event is early."); + event_t *dummy, *found; switch (trigger->policy) { case drop: LF_PRINT_DEBUG("Policy is drop. Dropping the event."); @@ -247,10 +248,10 @@ trigger_handle_t lf_schedule_trigger(environment_t* env, trigger_t* trigger, int // If the event with the previous tag is still on the event // queue, then replace the token. To find this event, we have // to construct a dummy event_t struct. - event_t* dummy = lf_get_new_event(env); + dummy = lf_get_new_event(env); dummy->trigger = trigger; dummy->base.tag = trigger->last_tag; - event_t* found = (event_t*)pqueue_tag_find_equal_same_tag(env->event_q, (pqueue_tag_element_t*)dummy); + found = (event_t*)pqueue_tag_find_equal_same_tag(env->event_q, (pqueue_tag_element_t*)dummy); if (found != NULL) { // Recycle the existing token and the new event diff --git a/low_level_platform/api/CMakeLists.txt b/low_level_platform/api/CMakeLists.txt index b4598ed9c..9f2172bce 100644 --- a/low_level_platform/api/CMakeLists.txt +++ b/low_level_platform/api/CMakeLists.txt @@ -3,10 +3,16 @@ target_include_directories(lf-low-level-platform-api INTERFACE ${CMAKE_CURRENT_L add_library(lf::low-level-platform-api ALIAS lf-low-level-platform-api) target_link_libraries(lf-low-level-platform-api INTERFACE lf::tag-api) -if(${CMAKE_SYSTEM_NAME} STREQUAL "Nrf52") +if(${CMAKE_SYSTEM_NAME} STREQUAL "nRF52") target_compile_definitions(lf-low-level-platform-api INTERFACE PLATFORM_NRF52) elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Zephyr") target_compile_definitions(lf-low-level-platform-api INTERFACE PLATFORM_ZEPHYR) elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Rp2040") target_compile_definitions(lf-low-level-platform-api INTERFACE PLATFORM_RP2040) + target_link_libraries(lf-low-level-platform-api INTERFACE pico_stdlib) + target_link_libraries(lf-low-level-platform-api INTERFACE pico_multicore) + target_link_libraries(lf-low-level-platform-api INTERFACE pico_sync) +elseif(${CMAKE_SYSTEM_NAME} STREQUAL "FlexPRET") + target_compile_definitions(lf-low-level-platform-api INTERFACE PLATFORM_FLEXPRET) + target_link_libraries(lf-low-level-platform-api INTERFACE fp-sdk) endif() diff --git a/low_level_platform/api/low_level_platform.h b/low_level_platform/api/low_level_platform.h index 103801c35..9611870cc 100644 --- a/low_level_platform/api/low_level_platform.h +++ b/low_level_platform/api/low_level_platform.h @@ -50,6 +50,8 @@ int lf_critical_section_exit(environment_t* env); #include "platform/lf_nrf52_support.h" #elif defined(PLATFORM_RP2040) #include "platform/lf_rp2040_support.h" +#elif defined(PLATFORM_FLEXPRET) +#include "platform/lf_flexpret_support.h" #elif defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__NT__) // Windows platforms #include "platform/lf_windows_support.h" @@ -74,6 +76,10 @@ int lf_critical_section_exit(environment_t* env); #define LF_TIMEOUT 1 +// Worker priorities range from 0 to 99 where 99 is the highest priority. +#define LF_SCHED_MAX_PRIORITY 99 +#define LF_SCHED_MIN_PRIORITY 0 + // To support the single-threaded runtime, we need the following functions. They // are not required by the threaded runtime and is thus hidden behind a #ifdef. #if defined(LF_SINGLE_THREADED) @@ -107,12 +113,16 @@ int lf_mutex_lock(lf_mutex_t* mutex); */ int lf_available_cores(); +/** + * @brief Return the lf_thread_t of the calling thread. + */ +lf_thread_t lf_thread_self(); + /** * Create a new thread, starting with execution of lf_thread * getting passed arguments. The new handle is stored in thread_id. * * @return 0 on success, platform-specific error number otherwise. - * */ int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments); @@ -132,6 +142,54 @@ int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* argum */ int lf_thread_join(lf_thread_t thread, void** thread_return); +/** + * @brief The thread scheduling policies. + */ +typedef enum { + LF_SCHED_FAIR, // Non real-time scheduling policy. Corresponds to SCHED_OTHER + LF_SCHED_TIMESLICE, // Real-time, time-slicing priority-based policty. Corresponds to SCHED_RR. + LF_SCHED_PRIORITY, // Real-time, priority-only based scheduling. Corresponds to SCHED_FIFO. +} lf_scheduling_policy_type_t; + +typedef struct { + lf_scheduling_policy_type_t policy; // The scheduling policy + int priority; // The priority, if applicable + interval_t time_slice; // The time-slice allocated, if applicable. +} lf_scheduling_policy_t; + +/** + * @brief Pin a thread to a specific CPU. + * + * @param thread The thread + * @param cpu_number the CPU ID + * @return 0 on success, platform-specific error number otherwise. + */ +int lf_thread_set_cpu(lf_thread_t thread, size_t cpu_number); + +/** + * @brief Set the priority of a thread. + * Priority ranges from 0 to 99 where a higher + * number indicates higher priority. Setting the priority of a thread only + * makes sense if the thread is scheduled with LF_SCHED_TIMESLICE or LF_THREAD_PRIORITY + * + * @param thread The thread. + * @param priority The priority. + * @return int 0 on success, platform-specific error otherwise + */ +int lf_thread_set_priority(lf_thread_t thread, int priority); + +/** + * @brief Set the scheduling policy of a thread. This is based on the scheduling + * concept from Linux explained here: https://man7.org/linux/man-pages/man7/sched.7.html + * A scheduling policy is specific to a thread/worker. We have three policies + * LF_SCHED_PRIORITY which corresponds to SCHED_FIFO on Linux. + * LF_SCHED_TIMESLICE which corresponds to SCHED_RR on Linux. + * LF_SCHED_FAIR which corresponds to SCHED_OTHER on Linux. + * + * @return int 0 on success, platform-specific error number otherwise. + */ +int lf_thread_set_scheduling_policy(lf_thread_t thread, lf_scheduling_policy_t* policy); + /** * Initialize a mutex. * @@ -176,7 +234,7 @@ int lf_cond_signal(lf_cond_t* cond); /** * Wait for condition variable "cond" to be signaled or broadcast. - * "mutex" is assumed to be locked before. + * The cond->mutex is assumed to be locked when this is called. * * @return 0 on success, platform-specific error number otherwise. */ diff --git a/low_level_platform/api/platform/lf_C11_threads_support.h b/low_level_platform/api/platform/lf_C11_threads_support.h deleted file mode 100644 index 64a25797f..000000000 --- a/low_level_platform/api/platform/lf_C11_threads_support.h +++ /dev/null @@ -1,44 +0,0 @@ -/* C11 threads support for the C target of Lingua Franca. */ - -/************* -Copyright (c) 2019, The University of California at Berkeley. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL -THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF -THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -***************/ - -/** \file if_c11_threads_support.c - * C11 threads support for the C target of Lingua Franca. - * - * @author{Soroush Bateni } - */ -#ifndef LF_C11_THREADS_SUPPORT_H -#define LF_C11_THREADS_SUPPORT_H - -#include - -typedef mtx_t lf_mutex_t; -typedef struct { - lf_mutex_t* mutex; - cnd_t condition; -} lf_cond_t; -typedef thrd_t lf_thread_t; - -#endif diff --git a/low_level_platform/api/platform/lf_arduino_support.h b/low_level_platform/api/platform/lf_arduino_support.h index 94c5d4933..aa76af8e3 100644 --- a/low_level_platform/api/platform/lf_arduino_support.h +++ b/low_level_platform/api/platform/lf_arduino_support.h @@ -129,7 +129,7 @@ typedef void* lf_thread_t; #define LLONG_MIN (-LLONG_MAX - 1LL) #define ULLONG_MAX (LLONG_MAX * 2ULL + 1ULL) -// Arduinos are embedded platforms with no tty -#define NO_TTY +// Arduinos are embedded platforms with no command line interface +#define NO_CLI #endif // LF_ARDUINO_SUPPORT_H diff --git a/low_level_platform/api/platform/lf_flexpret_support.h b/low_level_platform/api/platform/lf_flexpret_support.h new file mode 100644 index 000000000..8a6296ee7 --- /dev/null +++ b/low_level_platform/api/platform/lf_flexpret_support.h @@ -0,0 +1,98 @@ +/* FlexPRET API support for the C target of Lingua Franca. */ + +/************* +Copyright (c) 2021, The University of California at Berkeley. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +***************/ + +/** + * FlexPRET API support for the C target of Lingua Franca. + * + * @author{Magnus Mæhlum } + */ + +#ifndef LF_FLEXPRET_SUPPORT_H +#define LF_FLEXPRET_SUPPORT_H + +#include + +/** + * Like nRF52, for FlexPRET, each mutex will control an interrupt. + * + * The mutex holds the interrupt number. + * For example, a mutex might be defined for the GPIOTE peripheral interrupt number + * + * When initialized, the interrupt is inserted into a global linked list + * for disabling and enabling all interrupts during sleep functions. + * - All interrupts are disabled by default after initialization + * - Priority levels are restricted between (0-7) + * + */ + +#include // Needed to define PRId64 and PRIu32 +#define PRINTF_TIME "%" PRId64 +#define PRINTF_MICROSTEP "%" PRIu32 + +// For convenience, the following string can be inserted in a printf +// format for printing both time and microstep as follows: +// printf("Tag is " PRINTF_TAG "\n", time_value, microstep); +#define PRINTF_TAG "(%" PRId64 ", %" PRIu32 ")" + +#if !defined(LF_SINGLE_THREADED) +typedef fp_lock_t lf_mutex_t; +typedef fp_thread_t lf_thread_t; +typedef fp_cond_t lf_cond_t; +#endif + +// This will filter out some unecessary calls to standard library functions +// and save code space +#define NO_CLI +#define MINIMAL_STDLIB + +/** + * Need to include `stdio` here, because we #define `fprintf` and `vfprintf` below. + * Since stdio.h contains declarations for these functions, including it + * after will result in the following: + * + * #define fprintf(s, f, ...) printf(f, ##__VA_ARGS__) + * + * int fprintf (FILE *__restrict, const char *__restrict, ...) + * _ATTRIBUTE ((__format__ (__printf__, 2, 3))); + * + * Which the preprocessor will replace with: + * + * int printf (FILE *__restrict, const char *__restrict, ...) + * _ATTRIBUTE ((__format__ (__printf__, 2, 3))); + * + * Which will yield an error. + * + */ +#include + +// Likewise, fprintf is used to print to `stderr`, but FlexPRET has no `stderr` +// We instead redirect its output to normal printf +// Note: Most compilers do not support passing this on the command line, so CMake +// will drop it if you try... But that would be the better option. +#define fprintf(stream, fmt, ...) printf(fmt, ##__VA_ARGS__) +#define vfprintf(fp, fmt, args) vprintf(fmt, args) + +#endif // LF_FLEXPRET_SUPPORT_H diff --git a/low_level_platform/api/platform/lf_linux_support.h b/low_level_platform/api/platform/lf_linux_support.h index 18f68b2aa..cdeb17969 100644 --- a/low_level_platform/api/platform/lf_linux_support.h +++ b/low_level_platform/api/platform/lf_linux_support.h @@ -40,12 +40,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "lf_tag_64_32.h" #if !defined LF_SINGLE_THREADED -#if __STDC_VERSION__ < 201112L || defined(__STDC_NO_THREADS__) -// (Not C++11 or later) or no threads support #include "lf_POSIX_threads_support.h" -#else -#include "lf_C11_threads_support.h" -#endif #endif #if !defined(_POSIX_TIMERS) || _POSIX_TIMERS <= 0 diff --git a/low_level_platform/api/platform/lf_macos_support.h b/low_level_platform/api/platform/lf_macos_support.h index 357729f08..e0f4cbf4b 100644 --- a/low_level_platform/api/platform/lf_macos_support.h +++ b/low_level_platform/api/platform/lf_macos_support.h @@ -38,12 +38,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "lf_tag_64_32.h" #if !defined LF_SINGLE_THREADED -#if __STDC_VERSION__ < 201112L || defined(__STDC_NO_THREADS__) -// (Not C++11 or later) or no threads support #include "lf_POSIX_threads_support.h" -#else -#include "lf_C11_threads_support.h" -#endif #endif #endif // LF_MACOS_SUPPORT_H diff --git a/low_level_platform/api/platform/lf_nrf52_support.h b/low_level_platform/api/platform/lf_nrf52_support.h index 18613b2e0..bd657d224 100644 --- a/low_level_platform/api/platform/lf_nrf52_support.h +++ b/low_level_platform/api/platform/lf_nrf52_support.h @@ -1,4 +1,4 @@ -/* nRF52832 API support for the C target of Lingua Franca. */ +/* nRF52 API support for the C target of Lingua Franca. */ /************* Copyright (c) 2021, The University of California at Berkeley. @@ -24,18 +24,20 @@ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************/ -/** nrf52 API support for the C target of Lingua Franca. +/** + * nRF52 API support for the C target of Lingua Franca. * - * @author{Soroush Bateni } - * @author{Abhi Gundrala } - * @author{Erling Rennemo Jellum } + * @author{Soroush Bateni } + * @author{Abhi Gundrala } + * @author{Erling Rennemo Jellum } */ #ifndef LF_NRF52_SUPPORT_H #define LF_NRF52_SUPPORT_H -// This embedded platform has no TTY suport -#define NO_TTY +// This embedded platform has no command line interface +#define NO_CLI +#define MINIMAL_STDLIB #include // For fixed-width integral types #include diff --git a/low_level_platform/api/platform/lf_platform_util.h b/low_level_platform/api/platform/lf_platform_util.h new file mode 100644 index 000000000..0874f4cdf --- /dev/null +++ b/low_level_platform/api/platform/lf_platform_util.h @@ -0,0 +1,8 @@ +#ifndef LF_PLATFORM_UTIL_H +#define LF_PLATFORM_UTIL_H +/** + * @brief Maps a priority into a destination priority range. + */ +int map_priorities(int priority, int dest_min, int dest_max); + +#endif \ No newline at end of file diff --git a/low_level_platform/api/platform/lf_rp2040_support.h b/low_level_platform/api/platform/lf_rp2040_support.h index 1b23e3a2e..0cea2b1ea 100644 --- a/low_level_platform/api/platform/lf_rp2040_support.h +++ b/low_level_platform/api/platform/lf_rp2040_support.h @@ -10,7 +10,8 @@ #include #include -#define NO_TTY +#define NO_CLI +#define MINIMAL_STDLIB // Defines for formatting time in printf for pico #define PRINTF_TAG "(" PRINTF_TIME ", " PRINTF_MICROSTEP ")" @@ -20,4 +21,16 @@ #define LF_TIME_BUFFER_LENGTH 80 #define _LF_TIMEOUT 1 +#ifndef LF_SINGLE_THREADED +#warning "Threaded support on rp2040 is still experimental" + +typedef recursive_mutex_t lf_mutex_t; +typedef struct { + semaphore_t notifs[NUM_CORES]; + lf_mutex_t* mutex; +} lf_cond_t; +typedef int lf_thread_t; + +#endif // LF_SINGLE_THREADED + #endif // LF_PICO_SUPPORT_H diff --git a/low_level_platform/api/platform/lf_zephyr_support.h b/low_level_platform/api/platform/lf_zephyr_support.h index 0f7ab6b4d..724bbe4e5 100644 --- a/low_level_platform/api/platform/lf_zephyr_support.h +++ b/low_level_platform/api/platform/lf_zephyr_support.h @@ -39,7 +39,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include -#define NO_TTY +#define NO_CLI +#define MINIMAL_STDLIB #if !defined(LF_SINGLE_THREADED) typedef struct k_mutex lf_mutex_t; diff --git a/low_level_platform/impl/CMakeLists.txt b/low_level_platform/impl/CMakeLists.txt index 2322266ec..c0f2d8bb5 100644 --- a/low_level_platform/impl/CMakeLists.txt +++ b/low_level_platform/impl/CMakeLists.txt @@ -13,18 +13,16 @@ if(${CMAKE_SYSTEM_NAME} STREQUAL "Windows") elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") set(LF_LOW_LEVEL_PLATFORM_FILES ${CMAKE_CURRENT_LIST_DIR}/src/lf_unix_clock_support.c - ${CMAKE_CURRENT_LIST_DIR}/src/lf_unix_syscall_support.c ${CMAKE_CURRENT_LIST_DIR}/src/lf_linux_support.c ${CMAKE_CURRENT_LIST_DIR}/src/lf_atomic_gcc_clang.c ) elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Darwin") set(LF_LOW_LEVEL_PLATFORM_FILES ${CMAKE_CURRENT_LIST_DIR}/src/lf_unix_clock_support.c - ${CMAKE_CURRENT_LIST_DIR}/src/lf_unix_syscall_support.c ${CMAKE_CURRENT_LIST_DIR}/src/lf_macos_support.c ${CMAKE_CURRENT_LIST_DIR}/src/lf_atomic_gcc_clang.c ) -elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Nrf52") +elseif(${CMAKE_SYSTEM_NAME} STREQUAL "nRF52") set(LF_LOW_LEVEL_PLATFORM_FILES ${CMAKE_CURRENT_LIST_DIR}/src/lf_nrf52_support.c ${CMAKE_CURRENT_LIST_DIR}/src/lf_atomic_irq.c @@ -41,11 +39,16 @@ elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Rp2040") ${CMAKE_CURRENT_LIST_DIR}/src/lf_rp2040_support.c ${CMAKE_CURRENT_LIST_DIR}/src/lf_atomic_irq.c ) +elseif(${CMAKE_SYSTEM_NAME} STREQUAL "FlexPRET") + set(LF_LOW_LEVEL_PLATFORM_FILES + ${CMAKE_CURRENT_LIST_DIR}/src/lf_flexpret_support.c + ${CMAKE_CURRENT_LIST_DIR}/src/lf_atomic_irq.c + ) else() - message(FATAL_ERROR "Your platform is not supported! The C target supports Linux, MacOS, Windows, Zephyr, Nrf52 and RP2040.") + message(FATAL_ERROR "Your platform is not supported! The C target supports FlexPRET, Linux, MacOS, nRF52, RP2040, Windows, and Zephyr.") endif() -list(APPEND LF_LOW_LEVEL_PLATFORM_FILES ${CMAKE_CURRENT_LIST_DIR}/src/platform_internal.c) +list(APPEND LF_LOW_LEVEL_PLATFORM_FILES ${CMAKE_CURRENT_LIST_DIR}/src/lf_platform_util.c) if(${CMAKE_SYSTEM_NAME} STREQUAL "Zephyr") if(${LF_ZEPHYR_CLOCK_COUNTER}) @@ -56,6 +59,40 @@ if(${CMAKE_SYSTEM_NAME} STREQUAL "Zephyr") zephyr_library_named(lf-low-level-platform-impl) zephyr_library_sources(${LF_LOW_LEVEL_PLATFORM_FILES}) zephyr_library_link_libraries(kernel) +elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Rp2040") + add_library(lf-low-level-platform-impl STATIC ${LF_LOW_LEVEL_PLATFORM_FILES}) + if (DEFINED NUMBER_OF_WORKERS) + if (${NUMBER_OF_WORKERS} GREATER 2) + message(FATAL_ERROR "RP2040 can have at most 2 workers (one per core).\ + Number of requested workers is ${NUMBER_OF_WORKERS}.") + endif() + endif() +elseif(${CMAKE_SYSTEM_NAME} STREQUAL "FlexPRET") + add_library(lf-low-level-platform-impl STATIC ${LF_LOW_LEVEL_PLATFORM_FILES}) + target_link_libraries(lf-low-level-platform-impl PRIVATE fp-sdk) + + if (DEFINED NUMBER_OF_WORKERS) + # Verify that FlexPRET has the number of requested workers + # That information is available in the SDK's hwconfig + include($ENV{FP_SDK_PATH}/flexpret/hwconfig.cmake) + if (NOT DEFINED THREADS) + message(FATAL_ERROR + "Missing FlexPRET hardware configuration; check that FlexPRET has \ + been installed to the SDK." + ) + endif() + + math(EXPR FLEXPRET_AVAILABLE_WORKERS "${THREADS} - 1") + + if (${NUMBER_OF_WORKERS} GREATER ${FLEXPRET_AVAILABLE_WORKERS}) + message(FATAL_ERROR + "Number of requested workers (${NUMBER_OF_WORKERS}) is higher \ + than FlexPRET's number of available workers \ + (${FLEXPRET_AVAILABLE_WORKERS}). Note that FlexPRET uses \ + hardware threads, not the usual software threads" + ) + endif() + endif() else() add_library(lf-low-level-platform-impl STATIC ${LF_LOW_LEVEL_PLATFORM_FILES}) # Link the platform to a threading library @@ -72,7 +109,7 @@ target_link_libraries(lf-low-level-platform-impl PRIVATE lf::low-level-platform- target_link_libraries(lf-low-level-platform-impl PUBLIC lf-logging-api) target_compile_definitions(lf-low-level-platform-impl PUBLIC PLATFORM_${CMAKE_SYSTEM_NAME}) -message(STATUS "Applying preprocessor definitions to platform...") +message(STATUS "Applying preprocessor definitions to low-level-platform...") macro(low_level_platform_define X) if(DEFINED ${X}) message(STATUS ${X}=${${X}}) diff --git a/low_level_platform/impl/src/lf_C11_threads_support.c b/low_level_platform/impl/src/lf_C11_threads_support.c deleted file mode 100644 index 34ccd3969..000000000 --- a/low_level_platform/impl/src/lf_C11_threads_support.c +++ /dev/null @@ -1,68 +0,0 @@ -#if !defined(LF_SINGLE_THREADED) && !defined(PLATFORM_ARDUINO) -#include "low_level_platform.h" -#include "platform/lf_C11_threads_support.h" -#include -#include -#include // For fixed-width integral types - -struct lf_thread_data { - void* (*thread)(void*); - void* arguments; -}; - -static int lf_thread_c11_wrapper(void* args) { - struct lf_thread_data* thread_data = (struct lf_thread_data*)args; - thread_data->thread(thread_data->arguments); - free(thread_data); - return 0; -} - -int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments) { - struct lf_thread_data* thread_data = (struct lf_thread_data*)malloc(sizeof(struct lf_thread_data)); - thread_data->thread = lf_thread; - thread_data->arguments = arguments; - - return thrd_create((thrd_t*)thread, (thrd_start_t)lf_thread_c11_wrapper, thread_data); -} - -int lf_thread_join(lf_thread_t thread, void** thread_return) { - // thrd_join wants the second argument to be an int* rather than a void** - return thrd_join((thrd_t)thread, (int*)thread_return); -} - -int lf_mutex_init(lf_mutex_t* mutex) { - // Set up a timed and recursive mutex (default behavior) - return mtx_init((mtx_t*)mutex, mtx_timed | mtx_recursive); -} - -int lf_mutex_lock(lf_mutex_t* mutex) { return mtx_lock((mtx_t*)mutex); } - -int lf_mutex_unlock(lf_mutex_t* mutex) { return mtx_unlock((mtx_t*)mutex); } - -int lf_cond_init(lf_cond_t* cond, lf_mutex_t* mutex) { - cond->mutex = mutex; - return cnd_init((cnd_t*)&cond->condition); -} - -int lf_cond_broadcast(lf_cond_t* cond) { return cnd_broadcast((cnd_t*)&cond->condition); } - -int lf_cond_signal(lf_cond_t* cond) { return cnd_signal((cnd_t*)&cond->condition); } - -int lf_cond_wait(lf_cond_t* cond) { return cnd_wait((cnd_t*)&cond->condition, (mtx_t*)cond->mutex); } - -int _lf_cond_timedwait(lf_cond_t* cond, instant_t wakeup_time) { - struct timespec timespec_absolute_time = {.tv_sec = wakeup_time / BILLION, .tv_nsec = wakeup_time % BILLION}; - - int return_value = cnd_timedwait((cnd_t*)&cond->condition, (mtx_t*)cond->mutex, ×pec_absolute_time); - - switch (return_value) { - case thrd_timedout: - return_value = LF_TIMEOUT; - break; - - default: - break; - } - return return_value; -} -#endif diff --git a/low_level_platform/impl/src/lf_POSIX_threads_support.c b/low_level_platform/impl/src/lf_POSIX_threads_support.c index 57f3a6811..255f38255 100644 --- a/low_level_platform/impl/src/lf_POSIX_threads_support.c +++ b/low_level_platform/impl/src/lf_POSIX_threads_support.c @@ -6,11 +6,16 @@ #include #include #include // For fixed-width integral types +#include + +int lf_available_cores() { return (int)sysconf(_SC_NPROCESSORS_ONLN); } int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments) { return pthread_create((pthread_t*)thread, NULL, lf_thread, arguments); } +lf_thread_t lf_thread_self() { return pthread_self(); } + int lf_thread_join(lf_thread_t thread, void** thread_return) { return pthread_join((pthread_t)thread, thread_return); } int lf_mutex_init(lf_mutex_t* mutex) { diff --git a/low_level_platform/impl/src/lf_arduino_support.c b/low_level_platform/impl/src/lf_arduino_support.c index ed9391205..5793bd650 100644 --- a/low_level_platform/impl/src/lf_arduino_support.c +++ b/low_level_platform/impl/src/lf_arduino_support.c @@ -170,6 +170,13 @@ typedef void* (*lf_function_t)(void*); */ int lf_available_cores() { return 1; } +lf_thread_t lf_thread_self() { + // Not implemented. Although Arduino mbed provides a ThisThread API and a + // get_id() function, it does not provide a way to get the current thread as a + // Thread object. + return NULL; +} + int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments) { lf_thread_t t = thread_new(); long int start = thread_start(t, *lf_thread, arguments); diff --git a/low_level_platform/impl/src/lf_atomic_irq.c b/low_level_platform/impl/src/lf_atomic_irq.c index 7be9aff34..2854a6f11 100644 --- a/low_level_platform/impl/src/lf_atomic_irq.c +++ b/low_level_platform/impl/src/lf_atomic_irq.c @@ -1,4 +1,5 @@ -#if defined(PLATFORM_ARDUINO) || defined(PLATFORM_NRF52) || defined(PLATFORM_ZEPHYR) || defined(PLATFORM_RP2040) +#if defined(PLATFORM_ARDUINO) || defined(PLATFORM_NRF52) || defined(PLATFORM_ZEPHYR) || defined(PLATFORM_RP2040) || \ + defined(PLATFORM_FLEXPRET) /** * @author Erling Rennemo Jellum * @copyright (c) 2023 diff --git a/low_level_platform/impl/src/lf_flexpret_support.c b/low_level_platform/impl/src/lf_flexpret_support.c new file mode 100644 index 000000000..cf37c1b8a --- /dev/null +++ b/low_level_platform/impl/src/lf_flexpret_support.c @@ -0,0 +1,234 @@ +#if defined(PLATFORM_FLEXPRET) +/************* +Copyright (c) 2021, The University of California at Berkeley. +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +***************/ + +/** Support file for Bare-metal FlexPRET platform. + * + * @author{Shaokai Lin } + * @author{Magnus Mæhlum } + */ + +#include +#include +#include +#include "low_level_platform.h" + +/** + * Used to keep track of the number of nested critical sections. + * + * We should only disable interrupts when this is zero and we enter a critical section + * We should only enable interrupts when we exit a critical section and this is zero + */ +static int critical_section_num_nested[FP_THREADS] = THREAD_ARRAY_INITIALIZER(0); + +static volatile bool _lf_async_event_occurred = false; + +#define EPOCH_DURATION_NS (1ULL << 32) + +int _lf_clock_gettime(instant_t* t) { + *t = (instant_t)rdtime64(); + return 0; +} + +int _lf_sleep_common(instant_t wakeup_time, bool interruptable) { + // Store the number of epochs; i.e., how many times the 32-bit timer + // will overflow + uint32_t wakeup_time_epochs = 0; + uint32_t wakeup_time_after_epochs = 0; + uint32_t sleep_start = rdtime(); + + if (wakeup_time > (instant_t)EPOCH_DURATION_NS) { + wakeup_time_epochs = wakeup_time / EPOCH_DURATION_NS; + wakeup_time_after_epochs = wakeup_time % EPOCH_DURATION_NS; + + if (wakeup_time < sleep_start) { + // This means we need to do another epoch + wakeup_time_epochs++; + } + } else { + wakeup_time_epochs = 0; + wakeup_time_after_epochs = wakeup_time; + if (wakeup_time < sleep_start) { + // Nothing to do; should not happen + // LF_PRINT_DEBUG("FlexPRET: _lf_sleep_common called with wakeup_time < current time\n"); + return 0; + } + } + + const uint32_t max_uint32_value = 0xFFFFFFFF; + _lf_async_event_occurred = false; + + for (uint32_t i = 0; i < wakeup_time_epochs; i++) { + // The first sleep until will only be partial + if (interruptable) { + // Can be interrupted + // NOTE: Does not work until this issue is resolved: + // https://github.com/pretis/flexpret/issues/93 + fp_wait_until(max_uint32_value); + if (_lf_async_event_occurred) + break; + } else { + // Cannot be interrupted + // NOTE: Does not work until this issue is resolved: + // https://github.com/pretis/flexpret/issues/93 + fp_delay_until(max_uint32_value); + } + } + + if (interruptable) { + if (!_lf_async_event_occurred) { + fp_wait_until(wakeup_time_after_epochs); + } + } else { + // Cannot be interrupted + fp_delay_until(wakeup_time_after_epochs); + } + + return _lf_async_event_occurred; +} + +int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup_time) { + // Enable interrupts and execute wait until instruction + lf_critical_section_exit(env); + + // Wait until will stop sleep if interrupt occurs + int ret = _lf_sleep_common(wakeup_time, true); + + lf_critical_section_enter(env); + return ret; +} + +int lf_sleep(interval_t sleep_duration) { + interval_t sleep_until = rdtime64() + sleep_duration; + return _lf_sleep_common(sleep_until, false); +} + +/** + * Initialize the LF clock. + */ +void _lf_initialize_clock() { + // FlexPRET clock does not require any initialization +} + +int lf_disable_interrupts_nested() { + // In the special case where this function is called during an interrupt + // subroutine (isr) it should have no effect + if ((read_csr(CSR_STATUS) & 0x04) == 0x04) + return 0; + + uint32_t hartid = read_hartid(); + + fp_assert(critical_section_num_nested[hartid] >= 0, "Number of nested critical sections less than zero."); + if (critical_section_num_nested[hartid]++ == 0) { + fp_interrupt_disable(); + } + return 0; +} + +int lf_enable_interrupts_nested() { + // In the special case where this function is called during an interrupt + // subroutine (isr) it should have no effect + if ((read_csr(CSR_STATUS) & 0x04) == 0x04) + return 0; + + uint32_t hartid = read_hartid(); + + if (--critical_section_num_nested[hartid] == 0) { + fp_interrupt_enable(); + } + fp_assert(critical_section_num_nested[hartid] >= 0, "Number of nested critical sections less than zero."); + return 0; +} + +/** + * Pause execution for a number of nanoseconds. + * + * @return 0 for success, or -1 for failure. In case of failure, errno will be + * set appropriately (see `man 2 clock_nanosleep`). + */ +int lf_nanosleep(interval_t requested_time) { return lf_sleep(requested_time); } + +#if defined(LF_SINGLE_THREADED) + +int _lf_single_threaded_notify_of_event() { + _lf_async_event_occurred = true; + return 0; +} + +#else // Multi threaded + +int lf_available_cores() { + return FP_THREADS - 1; // Return the number of Flexpret HW threads +} + +lf_thread_t lf_thread_self() { + // Not implemented. + return NULL; +} + +int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments) { + /** + * Need to select between HRTT or SRTT; see + * https://github.com/lf-lang/reactor-c/issues/421 + */ + return fp_thread_create(HRTT, thread, lf_thread, arguments); +} + +int lf_thread_join(lf_thread_t thread, void** thread_return) { return fp_thread_join(thread, thread_return); } + +int lf_mutex_init(lf_mutex_t* mutex) { + *mutex = (lf_mutex_t)FP_LOCK_INITIALIZER; + return 0; +} + +int lf_mutex_lock(lf_mutex_t* mutex) { + fp_lock_acquire(mutex); + return 0; +} + +int lf_mutex_unlock(lf_mutex_t* mutex) { + fp_lock_release(mutex); + return 0; +} + +int lf_cond_init(lf_cond_t* cond, lf_mutex_t* mutex) { + *cond = (lf_cond_t)FP_COND_INITIALIZER(mutex); + return 0; +} + +int lf_cond_broadcast(lf_cond_t* cond) { return fp_cond_broadcast(cond); } + +int lf_cond_signal(lf_cond_t* cond) { return fp_cond_signal(cond); } + +int lf_cond_wait(lf_cond_t* cond) { return fp_cond_wait(cond); } + +int _lf_cond_timedwait(lf_cond_t* cond, instant_t absolute_time_ns) { + return (fp_cond_timed_wait(cond, absolute_time_ns) == FP_TIMEOUT) ? LF_TIMEOUT : 0; +} + +int lf_thread_id() { return read_hartid(); } + +void initialize_lf_thread_id() { + // Nothing needed here; thread ID's are already available in harware registers + // which can be fetched with `read_hartid`. +} +#endif + +#endif // PLATFORM_FLEXPRET diff --git a/low_level_platform/impl/src/lf_linux_support.c b/low_level_platform/impl/src/lf_linux_support.c index fb2d19cba..a8caa3d47 100644 --- a/low_level_platform/impl/src/lf_linux_support.c +++ b/low_level_platform/impl/src/lf_linux_support.c @@ -25,30 +25,115 @@ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************/ -/** +/**i * @brief Platform support for the Linux operating system. * * @author{Soroush Bateni } * @author{Marten Lohstroh } + * @author{Erling Jellum } */ +#define _GNU_SOURCE // Needed to get access to Linux thread-scheduling API #include "platform/lf_linux_support.h" +#include "platform/lf_platform_util.h" #include "low_level_platform.h" +#include "platform/lf_unix_clock_support.h" + #if defined LF_SINGLE_THREADED #include "lf_os_single_threaded_support.c" -#endif - -#if !defined LF_SINGLE_THREADED -#if __STDC_VERSION__ < 201112L || defined(__STDC_NO_THREADS__) -// (Not C++11 or later) or no threads support -#include "lf_POSIX_threads_support.c" #else -#include "lf_C11_threads_support.c" -#endif -#endif +#include "lf_POSIX_threads_support.c" -#include "platform/lf_unix_clock_support.h" +int lf_thread_set_cpu(lf_thread_t thread, size_t cpu_number) { + // Create a CPU-set consisting of only the desired CPU + cpu_set_t cpu_set; + CPU_ZERO(&cpu_set); + CPU_SET(cpu_number, &cpu_set); + + return pthread_setaffinity_np(thread, sizeof(cpu_set), &cpu_set); +} + +int lf_thread_set_priority(lf_thread_t thread, int priority) { + int posix_policy, min_pri, max_pri, final_priority, res; + struct sched_param schedparam; + + if (priority > LF_SCHED_MAX_PRIORITY || priority < LF_SCHED_MIN_PRIORITY) { + return -1; + } + + // Get the current scheduling policy + res = pthread_getschedparam(thread, &posix_policy, &schedparam); + if (res != 0) { + return res; + } + + min_pri = sched_get_priority_min(posix_policy); + max_pri = sched_get_priority_max(posix_policy); + if (min_pri == -1 || max_pri == -1) { + return -1; + } + + final_priority = map_priorities(priority, min_pri, max_pri); + if (final_priority < 0) { + return -1; + } + + return pthread_setschedprio(thread, final_priority); +} + +int lf_thread_set_scheduling_policy(lf_thread_t thread, lf_scheduling_policy_t* policy) { + int posix_policy, res; + bool set_priority; + struct sched_param schedparam; + + // Get the current scheduling policy + res = pthread_getschedparam(thread, &posix_policy, &schedparam); + if (res != 0) { + return res; + } + + // Update the policy, and initially set the priority to max. + // The priority value is later updated. Initializing it + // is just to avoid code duplication. + switch (policy->policy) { + case LF_SCHED_FAIR: + posix_policy = SCHED_OTHER; + schedparam.sched_priority = 0; + set_priority = false; + break; + case LF_SCHED_TIMESLICE: + posix_policy = SCHED_RR; + schedparam.sched_priority = sched_get_priority_max(SCHED_RR); + set_priority = true; + break; + case LF_SCHED_PRIORITY: + posix_policy = SCHED_FIFO; + schedparam.sched_priority = sched_get_priority_max(SCHED_FIFO); + set_priority = true; + break; + default: + return -1; + break; + } + + // Write it back + res = pthread_setschedparam(thread, posix_policy, &schedparam); + if (res != 0) { + return res; + } + + // Set the priority of we chose a RT scheduler + if (set_priority) { + res = lf_thread_set_priority(thread, policy->priority); + if (res != 0) { + return res; + } + } + + return 0; +} +#endif int lf_sleep(interval_t sleep_duration) { const struct timespec tp = convert_ns_to_timespec(sleep_duration); diff --git a/low_level_platform/impl/src/lf_macos_support.c b/low_level_platform/impl/src/lf_macos_support.c index 4cb3002cd..bf96362cb 100644 --- a/low_level_platform/impl/src/lf_macos_support.c +++ b/low_level_platform/impl/src/lf_macos_support.c @@ -36,15 +36,17 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if defined LF_SINGLE_THREADED #include "lf_os_single_threaded_support.c" -#endif - -#if !defined LF_SINGLE_THREADED -#if __STDC_VERSION__ < 201112L || defined(__STDC_NO_THREADS__) -// (Not C++11 or later) or no threads support -#include "lf_POSIX_threads_support.c" #else -#include "lf_C11_threads_support.c" -#endif +#include "lf_POSIX_threads_support.c" + +/** + * Real-time scheduling API not implemented for macOS. + */ +int lf_thread_set_cpu(lf_thread_t thread, size_t cpu_number) { return -1; } + +int lf_thread_set_priority(lf_thread_t thread, int priority) { return -1; } + +int lf_thread_set_scheduling_policy(lf_thread_t thread, lf_scheduling_policy_t* policy) { return -1; } #endif #include "platform/lf_unix_clock_support.h" diff --git a/low_level_platform/impl/src/lf_nrf52_support.c b/low_level_platform/impl/src/lf_nrf52_support.c index f0147a7d3..a5765e859 100644 --- a/low_level_platform/impl/src/lf_nrf52_support.c +++ b/low_level_platform/impl/src/lf_nrf52_support.c @@ -39,9 +39,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include "platform/lf_nrf52_support.h" -#include "../platform.h" -#include "../utils/util.h" -#include "../tag.h" +#include "low_level_platform.h" +#include "tag.h" #include "nrf.h" #include "nrfx_timer.h" @@ -74,9 +73,9 @@ static const nrfx_timer_t g_lf_timer_inst = NRFX_TIMER_INSTANCE(3); static volatile uint32_t _lf_time_us_high = 0; /** - * Flag passed to sd_nvic_critical_region_* + * Flag used to count nested interrupt disables. */ -uint8_t _lf_nested_region = 0; +static volatile uint8_t _lf_nested_count = 0; /** * @brief Handle LF timer interrupts @@ -95,7 +94,7 @@ void lf_timer_event_handler(nrf_timer_event_t event_type, void* p_context) { if (event_type == NRF_TIMER_EVENT_COMPARE2) { _lf_sleep_interrupted = false; } else if (event_type == NRF_TIMER_EVENT_COMPARE3) { - _lf_time_us_high = +1; + _lf_time_us_high += 1; } } @@ -193,13 +192,19 @@ static void lf_busy_wait_until(instant_t wakeup_time) { } /** - * @brief Sleep until the given wakeup time. There are a couple of edge cases to consider + * @brief Sleep until the given wakeup time. + * + * There are a couple of edge cases to consider: * 1. Wakeup time is already past * 2. Implied sleep duration is below `LF_MAX_SLEEP_NS` threshold * 3. Implied sleep duration is above `LF_MAX_SLEEP_NS` limit * + * This function assumes the caller is in a critical section, so interrupts are disabled. + * It may exit the critical section while waiting for an event, but it will re-enter the + * critical section before returning. + * * @param wakeup_time The time instant at which to wake up. - * @return int 0 if sleep completed, or -1 if it was interrupted. + * @return 0 if sleep completed, or -1 if it was interrupted. */ int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup_time) { instant_t now; @@ -258,23 +263,41 @@ int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup_ti if (!_lf_async_event) { return 0; } else { - LF_PRINT_DEBUG("Sleep got interrupted...\n"); + // LF_PRINT_DEBUG("Sleep got interrupted...\n"); return -1; } } +// Definition required by sd_nvic_critical_region_enter() and exit() below. +nrf_nvic_state_t nrf_nvic_state = {0}; + /** * @brief Enter critical section. Let NRF Softdevice handle nesting - * @return int + * @return 0 */ -int lf_enable_interrupts_nested() { return sd_nvic_critical_region_enter(&_lf_nested_region); } +int lf_enable_interrupts_nested() { + if (_lf_nested_count == 0) + return 1; // Error. Interrupts have not been disabled. + _lf_nested_count--; + return sd_nvic_critical_region_exit(0); + // FIXME: If softdevice is not enabled, do the following instead of above: + // __enable_irq(); + // return 0; +} /** * @brief Exit citical section. Let NRF SoftDevice handle nesting * * @return int */ -int lf_disable_interrupts_nested() { return sd_nvic_critical_region_exit(_lf_nested_region); } +int lf_disable_interrupts_nested() { + _lf_nested_count++; + uint8_t success = 0; + return sd_nvic_critical_region_enter(&success); + // FIXME: If softdevice is not enabled, do the following instead of the above: + // __disable_irq(); + // return 0; +} /** * @brief Set global flag to true so that sleep will return when woken diff --git a/low_level_platform/impl/src/lf_platform_util.c b/low_level_platform/impl/src/lf_platform_util.c new file mode 100644 index 000000000..0225aa423 --- /dev/null +++ b/low_level_platform/impl/src/lf_platform_util.c @@ -0,0 +1,26 @@ +#include "low_level_platform.h" +#include "platform/lf_platform_util.h" + +int map_priorities(int priority, int dest_min, int dest_max) { + // Check if priority is within the legal range + if (priority < LF_SCHED_MIN_PRIORITY || priority > LF_SCHED_MAX_PRIORITY) { + return -1; + } + + // Perform the linear mapping. Since we are working with integers, it is + // important to multiply before we divide + return dest_min + (((priority - LF_SCHED_MIN_PRIORITY) * (dest_max - dest_min)) / + (LF_SCHED_MAX_PRIORITY - LF_SCHED_MIN_PRIORITY)); +} + +#ifndef PLATFORM_ZEPHYR // on Zephyr, this is handled separately +#ifndef LF_SINGLE_THREADED +static int _lf_worker_thread_count = 0; + +static thread_local int lf_thread_id_var = -1; + +int lf_thread_id() { return lf_thread_id_var; } + +void initialize_lf_thread_id() { lf_thread_id_var = lf_atomic_fetch_add32(&_lf_worker_thread_count, 1); } +#endif +#endif diff --git a/low_level_platform/impl/src/lf_rp2040_support.c b/low_level_platform/impl/src/lf_rp2040_support.c index 88940dd94..a3f0237aa 100644 --- a/low_level_platform/impl/src/lf_rp2040_support.c +++ b/low_level_platform/impl/src/lf_rp2040_support.c @@ -31,13 +31,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * @author{Abhi Gundrala } */ -#if !defined(LF_SINGLE_THREADED) -#error "Only the single-threaded runtime has support for RP2040" -#endif - #include "platform/lf_rp2040_support.h" #include "low_level_platform.h" -#include "utils/util.h" #include "tag.h" #include @@ -67,6 +62,8 @@ static uint32_t _lf_num_nested_crit_sec = 0; */ void _lf_initialize_clock(void) { // init stdio lib + // may fail, but failure may be ok/expected if printing is not needed + // (i.e. if neither USB nor UART are enabled) stdio_init_all(); // init sync structs critical_section_init(&_lf_crit_sec); @@ -137,15 +134,15 @@ int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup_ti sem_reset(&_lf_sem_irq_event, 0); // create us boot wakeup time target = from_us_since_boot((uint64_t)(wakeup_time / 1000)); - // allow interrupts - LF_CRITICAL_SECTION_EXIT(env); + // Enable interrupts. + lf_critical_section_exit(env); // blocked sleep // return on timeout or on processor event if (sem_acquire_block_until(&_lf_sem_irq_event, target)) { ret_code = -1; } - // remove interrupts - LF_CRITICAL_SECTION_ENTER(env); + // Disable interrupts. + lf_critical_section_enter(env); return ret_code; } @@ -161,7 +158,7 @@ int lf_disable_interrupts_nested() { return 1; } // check crit sec count - // enter non-rentrant state by disabling interrupts + // enter non-reentrant state by disabling interrupts // lock second core execution if (_lf_num_nested_crit_sec == 0) { // block if associated spin lock in use @@ -202,9 +199,120 @@ int lf_enable_interrupts_nested() { */ int _lf_single_threaded_notify_of_event() { // notify main sleep loop of event - sem_release(&_lf_sem_irq_event); + if (sem_release(&_lf_sem_irq_event)) { + return 0; + } + return 1; +} + +#else // LF_SINGLE_THREADED + +#warning "Threaded runtime on RP2040 is still experimental" + +/** + * @brief Get the number of cores on the host machine. + */ +int lf_available_cores() { return 2; } + +static void* (*thread_1)(void*); +static void* thread_1_args; +static int num_create_threads_called = 0; +static semaphore_t thread_1_done; +static void* thread_1_return; + +#define MAGIC_THREAD1_ID 314159 + +void core1_entry() { + thread_1_return = thread_1(thread_1_args); + sem_reset(&thread_1_done, 1); +} + +int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments) { + // make sure this fn is only called once + if (num_create_threads_called != 0) { + return 1; + } + thread_1 = lf_thread; + thread_1_args = arguments; + num_create_threads_called += 1; + sem_init(&thread_1_done, 0, 1); + multicore_launch_core1(core1_entry); + *thread = MAGIC_THREAD1_ID; + return 0; +} + +int lf_thread_join(lf_thread_t thread, void** thread_return) { + if (thread != MAGIC_THREAD1_ID) { + return 1; + } + sem_acquire_blocking(&thread_1_done); + // release in case join is called again + if (!sem_release(&thread_1_done)) { + // shouldn't be possible; lf_thread_join is only called from main thread + return 1; + } + if (thread_return) { + *thread_return = thread_1_return; + } + return 0; +} + +int lf_mutex_init(lf_mutex_t* mutex) { + recursive_mutex_init(mutex); + return 0; +} + +int lf_mutex_lock(lf_mutex_t* mutex) { + recursive_mutex_enter_blocking(mutex); + return 0; +} + +int lf_mutex_unlock(lf_mutex_t* mutex) { + recursive_mutex_exit(mutex); return 0; } -#endif // LF_SINGLE_THREADED + +// condition variables "notify" threads using a semaphore per core. +// although there are only two cores, may not use just a single semaphore +// as a cond_broadcast may be called from within an interrupt +int lf_cond_init(lf_cond_t* cond, lf_mutex_t* mutex) { + for (int i = 0; i < NUM_CORES; i++) { + sem_init(&(cond->notifs[i]), 0, 1); + } + cond->mutex = mutex; + return 0; +} + +int lf_cond_broadcast(lf_cond_t* cond) { + for (int i = 0; i < NUM_CORES; i++) { + sem_reset(&(cond->notifs[i]), 1); + } + return 0; +} + +int lf_cond_signal(lf_cond_t* cond) { + return lf_cond_broadcast(cond); // spurious wakeups, but that's ok +} + +int lf_cond_wait(lf_cond_t* cond) { + semaphore_t* mailbox = &(cond->notifs[get_core_num()]); + lf_mutex_unlock(cond->mutex); + sem_acquire_blocking(mailbox); + lf_mutex_lock(cond->mutex); + return 0; +} + +int _lf_cond_timedwait(lf_cond_t* cond, instant_t absolute_time_ns) { + semaphore_t* mailbox = &(cond->notifs[get_core_num()]); + absolute_time_t a = from_us_since_boot(absolute_time_ns / 1000); + bool acquired_permit = sem_acquire_block_until(mailbox, a); + return acquired_permit ? 0 : LF_TIMEOUT; +} + +void initialize_lf_thread_id() {} + +int lf_thread_id() { return get_core_num(); } + +#endif // !LF_SINGLE_THREADED #endif // PLATFORM_RP2040 diff --git a/low_level_platform/impl/src/lf_unix_syscall_support.c b/low_level_platform/impl/src/lf_unix_syscall_support.c deleted file mode 100644 index 992824c33..000000000 --- a/low_level_platform/impl/src/lf_unix_syscall_support.c +++ /dev/null @@ -1,16 +0,0 @@ -#if defined(PLATFORM_Linux) || defined(PLATFORM_Darwin) -/** - * @file lf_unix_syscall_support.c - * @author Soroush Bateni (soroush@utdallas.edu) - * @brief Platform support for syscalls in Unix-like systems. - * @version 0.1 - * @date 2022-03-09 - * - * @copyright Copyright (c) 2022 The University of Texas at Dallas - * - */ - -#include - -int lf_available_cores() { return (int)sysconf(_SC_NPROCESSORS_ONLN); } -#endif diff --git a/low_level_platform/impl/src/lf_windows_support.c b/low_level_platform/impl/src/lf_windows_support.c index 1cdadc43c..61424ac7f 100644 --- a/low_level_platform/impl/src/lf_windows_support.c +++ b/low_level_platform/impl/src/lf_windows_support.c @@ -162,6 +162,8 @@ int lf_available_cores() { return sysinfo.dwNumberOfProcessors; } +lf_thread_t lf_thread_self() { return GetCurrentThread(); } + int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments) { uintptr_t handle = _beginthreadex(NULL, 0, lf_thread, arguments, 0, NULL); *thread = (HANDLE)handle; @@ -187,6 +189,15 @@ int lf_thread_join(lf_thread_t thread, void** thread_return) { return 0; } +/** + * Real-time scheduling API not implemented for Windows. + */ +int lf_thread_set_cpu(lf_thread_t thread, size_t cpu_number) { return -1; } + +int lf_thread_set_priority(lf_thread_t thread, int priority) { return -1; } + +int lf_thread_set_scheduling_policy(lf_thread_t thread, lf_scheduling_policy_t* policy) { return -1; } + int lf_mutex_init(_lf_critical_section_t* critical_section) { // Set up a recursive mutex InitializeCriticalSection((PCRITICAL_SECTION)critical_section); diff --git a/low_level_platform/impl/src/lf_zephyr_clock_kernel.c b/low_level_platform/impl/src/lf_zephyr_clock_kernel.c index 8c1f5ac1a..e23332f81 100644 --- a/low_level_platform/impl/src/lf_zephyr_clock_kernel.c +++ b/low_level_platform/impl/src/lf_zephyr_clock_kernel.c @@ -36,6 +36,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include +#include #include "platform/lf_zephyr_support.h" #include "low_level_platform.h" @@ -86,6 +87,10 @@ int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup) { return 0; } + // Reset the semaphore. This is safe to do before we leave the critical + // section. + k_sem_reset(&sleeping_sem); + if (lf_critical_section_exit(env)) { lf_print_error_and_exit("Failed to exit critical section."); } @@ -96,11 +101,18 @@ int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup) { lf_print_error_and_exit("Failed to exit critical section."); } - if (res < 0 || async_event == true) { + if (res == 0) { + // We got the semaphore, this means there should be a new event + if (!async_event) { + lf_print_warning("Sleep was interrupted, but no new event"); + } async_event = false; return -1; - } else { + } else if (res == -EAGAIN) { + // This means we timed out and have reached our wakeup instant. return 0; + } else { + lf_print_error_and_exit("k_sem_take returned %d", res); } } diff --git a/low_level_platform/impl/src/lf_zephyr_support.c b/low_level_platform/impl/src/lf_zephyr_support.c index ff5c37e59..5e5efb82d 100644 --- a/low_level_platform/impl/src/lf_zephyr_support.c +++ b/low_level_platform/impl/src/lf_zephyr_support.c @@ -33,6 +33,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "platform/lf_zephyr_support.h" #include "platform/lf_zephyr_board_support.h" +#include "platform/lf_platform_util.h" #include "low_level_platform.h" #include "tag.h" @@ -93,7 +94,9 @@ int lf_enable_interrupts_nested() { #define NUMBER_OF_WATCHDOGS 0 #endif -#define NUMBER_OF_THREADS (NUMBER_OF_WORKERS + USER_THREADS + NUMBER_OF_WATCHDOGS) +// Number of additional threads that will be created +// One worker will run on the main thread, so for N workers, only (N - 1) worker threads should be created +#define NUMBER_OF_THREADS ((NUMBER_OF_WORKERS - 1) + USER_THREADS + NUMBER_OF_WATCHDOGS) K_MUTEX_DEFINE(thread_mutex); @@ -154,6 +157,56 @@ void initialize_lf_thread_id() { int lf_thread_id() { return *((int*)k_thread_custom_data_get()); } +lf_thread_t lf_thread_self() { return k_current_get(); } + +int lf_thread_set_cpu(lf_thread_t thread, size_t cpu_number) { return k_thread_cpu_pin(thread, cpu_number); } + +/** + * Real-time scheduling API + */ +int lf_thread_set_priority(lf_thread_t thread, int priority) { + int final_priority; + if (priority > LF_SCHED_MAX_PRIORITY || priority < LF_SCHED_MIN_PRIORITY) { + return -1; + } + + final_priority = map_priorities(priority, CONFIG_NUM_PREEMPT_PRIORITIES - 1, 0); + if (final_priority < 0) { + return -1; + } + + k_thread_priority_set(thread, final_priority); + return 0; +} + +int lf_thread_set_scheduling_policy(lf_thread_t thread, lf_scheduling_policy_t* policy) { + // Update the policy + switch (policy->policy) { + break; + case LF_SCHED_TIMESLICE: { + // This sets timeslicing for all threads on all priorities. I.e. it is not + // set on a per-thread basis. + k_sched_time_slice_set(policy->time_slice / MSEC(1), 0); + if (lf_thread_set_priority(thread, policy->priority) != 0) { + return -1; + } + break; + } + case LF_SCHED_PRIORITY: { + if (lf_thread_set_priority(thread, policy->priority) != 0) { + return -1; + } + break; + } + case LF_SCHED_FAIR: + default: + return -1; + break; + } + + return 0; +} + int lf_mutex_init(lf_mutex_t* mutex) { return k_mutex_init(mutex); } int lf_mutex_lock(lf_mutex_t* mutex) { diff --git a/low_level_platform/impl/src/platform_internal.c b/low_level_platform/impl/src/platform_internal.c deleted file mode 100644 index fc14c9f22..000000000 --- a/low_level_platform/impl/src/platform_internal.c +++ /dev/null @@ -1,13 +0,0 @@ -#include "low_level_platform.h" - -#ifndef PLATFORM_ZEPHYR // on Zephyr, this is handled separately -#ifndef LF_SINGLE_THREADED -static int _lf_worker_thread_count = 0; - -static thread_local int lf_thread_id_var = -1; - -int lf_thread_id() { return lf_thread_id_var; } - -void initialize_lf_thread_id() { lf_thread_id_var = lf_atomic_fetch_add32(&_lf_worker_thread_count, 1); } -#endif -#endif diff --git a/platform/impl/CMakeLists.txt b/platform/impl/CMakeLists.txt index cef66b5ef..32753e7eb 100644 --- a/platform/impl/CMakeLists.txt +++ b/platform/impl/CMakeLists.txt @@ -5,6 +5,10 @@ if(${CMAKE_SYSTEM_NAME} STREQUAL "Zephyr") zephyr_library_named(lf-platform-impl) zephyr_library_sources(${LF_PLATFORM_FILES}) zephyr_library_link_libraries(kernel) +elseif(${CMAKE_SYSTEM_NAME} STREQUAL "FlexPRET") + add_library(lf-platform-impl STATIC) + target_sources(lf-platform-impl PUBLIC ${LF_PLATFORM_FILES}) + target_link_libraries(lf-platform-impl PRIVATE fp-sdk) else() add_library(lf-platform-impl STATIC) target_sources(lf-platform-impl PUBLIC ${LF_PLATFORM_FILES}) @@ -13,3 +17,13 @@ endif() add_library(lf::platform-impl ALIAS lf-platform-impl) target_link_libraries(lf-platform-impl PRIVATE lf::low-level-platform-api) target_link_libraries(lf-platform-impl PRIVATE lf::platform-api) +message(STATUS "Applying preprocessor definitions to platform...") +macro(platform_define X) + if(DEFINED ${X}) + message(STATUS ${X}=${${X}}) + target_compile_definitions(lf-platform-impl PUBLIC ${X}=${${X}}) + endif(DEFINED ${X}) +endmacro() +platform_define(LF_SINGLE_THREADED) +platform_define(LOG_LEVEL) +platform_define(MODAL_REACTORS) diff --git a/python/include/pythontarget.h b/python/include/pythontarget.h index effbe0344..a828e0689 100644 --- a/python/include/pythontarget.h +++ b/python/include/pythontarget.h @@ -102,6 +102,22 @@ PyObject* py_schedule_copy(PyObject* self, PyObject* args); */ PyObject* py_request_stop(PyObject* self, PyObject* args); +/** + * @brief Return the source directory path (where the main .lf file is) as a string. + * @param self The lf object. + * @param args Empty. + * @return PyObject* A Python string. + */ +PyObject* py_source_directory(PyObject* self, PyObject* args); + +/** + * @brief Return the root project directory path as a string. + * @param self The lf object. + * @param args Empty. + * @return PyObject* A Python string. + */ +PyObject* py_package_directory(PyObject* self, PyObject* args); + ////////////////////////////////////////////////////////////// ///////////// Main function callable from Python code PyObject* py_main(PyObject* self, PyObject* args); diff --git a/python/lib/pythontarget.c b/python/lib/pythontarget.c index a485efaf5..3a3e7b2b4 100644 --- a/python/lib/pythontarget.c +++ b/python/lib/pythontarget.c @@ -162,6 +162,26 @@ PyObject* py_request_stop(PyObject* self, PyObject* args) { return Py_None; } +PyObject* py_source_directory(PyObject* self, PyObject* args) { +#ifndef LF_SOURCE_DIRECTORY + // This should not occur. + PyErr_SetString(PyExc_RuntimeError, "LF_SOURCE_DIRECTORY constant is not defined."); + return NULL; +#else + return PyUnicode_DecodeFSDefault(LF_SOURCE_DIRECTORY); +#endif +} + +PyObject* py_package_directory(PyObject* self, PyObject* args) { +#ifndef LF_PACKAGE_DIRECTORY + // This should not occur. + PyErr_SetString(PyExc_RuntimeError, "LF_PACKAGE_DIRECTORY constant is not defined."); + return NULL; +#else + return PyUnicode_DecodeFSDefault(LF_PACKAGE_DIRECTORY); +#endif +} + /** * Parse Python's 'argv' (from sys.argv()) into a pair of C-style * 'argc' (the size of command-line parameters array) @@ -299,12 +319,15 @@ PyObject* py_main(PyObject* self, PyObject* py_args) { * @see schedule_copy * @see request_stop */ -static PyMethodDef GEN_NAME(MODULE_NAME, _methods)[] = {{"start", py_main, METH_VARARGS, NULL}, - {"schedule_copy", py_schedule_copy, METH_VARARGS, NULL}, - {"tag", py_lf_tag, METH_NOARGS, NULL}, - {"tag_compare", py_tag_compare, METH_VARARGS, NULL}, - {"request_stop", py_request_stop, METH_NOARGS, "Request stop"}, - {NULL, NULL, 0, NULL}}; +static PyMethodDef GEN_NAME(MODULE_NAME, _methods)[] = { + {"start", py_main, METH_VARARGS, NULL}, + {"schedule_copy", py_schedule_copy, METH_VARARGS, NULL}, + {"tag", py_lf_tag, METH_NOARGS, NULL}, + {"tag_compare", py_tag_compare, METH_VARARGS, NULL}, + {"request_stop", py_request_stop, METH_NOARGS, "Request stop"}, + {"source_directory", py_source_directory, METH_NOARGS, "Source directory path for .lf file"}, + {"package_directory", py_package_directory, METH_NOARGS, "Root package directory path"}, + {NULL, NULL, 0, NULL}}; /** * Define the Lingua Franca module. diff --git a/tag/api/tag.h b/tag/api/tag.h index c903aaf53..c40e490f8 100644 --- a/tag/api/tag.h +++ b/tag/api/tag.h @@ -104,6 +104,15 @@ tag_t lf_tag(void* env); */ tag_t lf_tag_add(tag_t a, tag_t b); +/** + * @brief Return the sum of an interval and an instant, saturating on overflow and underflow. + * + * @param a + * @param b + * @return instant_t + */ +instant_t lf_time_add(instant_t a, interval_t b); + /** * Compare two tags. Return -1 if the first is less than * the second, 0 if they are equal, and +1 if the first is diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt deleted file mode 100644 index 89ed5d967..000000000 --- a/test/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -add_library(test-lib STATIC src_gen_stub.c rand_utils.c) -target_link_libraries(test-lib PRIVATE lf::low-level-platform-api) diff --git a/test/Tests.cmake b/test/Tests.cmake index 4130b7c09..a5b8c2ccc 100644 --- a/test/Tests.cmake +++ b/test/Tests.cmake @@ -1,10 +1,12 @@ # This adds all tests in the test directory. include(CTest) -set(TestLib test-lib) set(TEST_DIR ${CMAKE_CURRENT_SOURCE_DIR}/test) set(TEST_SUFFIX test.c) # Files that are tests must have names ending with TEST_SUFFIX. set(LF_ROOT ${CMAKE_CURRENT_LIST_DIR}/..) +set(TEST_MOCK_SRCS ${TEST_DIR}/src_gen_stub.c ${TEST_DIR}/rand_utils.c) + +include(${LF_ROOT}/core/lf_utils.cmake) # Add the test files found in DIR to TEST_FILES. function(add_test_dir DIR) @@ -21,75 +23,24 @@ endfunction() # Add the appropriate directories for the provided build parameters. add_test_dir(${TEST_DIR}/general) if(NUMBER_OF_WORKERS) - add_test_dir(${TEST_DIR}/multithreaded) -else() - add_test_dir(${TEST_DIR}/single-threaded) + if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + add_test_dir(${TEST_DIR}/scheduling) + endif() endif(NUMBER_OF_WORKERS) # Create executables for each test. foreach(FILE ${TEST_FILES}) string(REGEX REPLACE "[./]" "_" NAME ${FILE}) - add_executable(${NAME} ${TEST_DIR}/${FILE}) + add_executable(${NAME} ${TEST_DIR}/${FILE} ${TEST_MOCK_SRCS}) add_test(NAME ${NAME} COMMAND ${NAME}) + # This is needed for the tests to use the threading API declared in + # low_level_platform.h. Ideally this would not be needed. + target_link_libraries(${NAME} PRIVATE lf::low-level-platform-impl) target_link_libraries( - ${NAME} PUBLIC - ${CoreLib} ${Lib} ${TestLib} + ${NAME} PRIVATE + ${CoreLib} ${Lib} ) target_include_directories(${NAME} PRIVATE ${TEST_DIR}) + # Warnings as errors + lf_enable_compiler_warnings(${NAME}) endforeach(FILE ${TEST_FILES}) - -# Add the test for the RTI. -if (NOT DEFINED LF_SINGLE_THREADED) - # Check which system we are running on to select the correct platform support - # file and assign the file's path to LF_PLATFORM_FILE - # FIXME: This is effectively a second build script for the RTI that we have to maintain. This is code duplication. - # FIXME: We should not be reaching into the platform directory and bypassing its CMake build. - if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") - set(LF_PLATFORM_FILE ${LF_ROOT}/low_level_platform/impl/src/lf_linux_support.c) - elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Darwin") - set(LF_PLATFORM_FILE ${LF_ROOT}/low_level_platform/impl/src/lf_macos_support.c) - else() - message(FATAL_ERROR "Your platform is not supported! RTI supports Linux and MacOS.") - endif() - - set(IncludeDir include/core) - - set(RTI_DIR ${CoreLibPath}/federated/RTI) - add_executable( - rti_common_test - ${TEST_DIR}/RTI/rti_common_test.c - ${RTI_DIR}/rti_common.c - ${RTI_DIR}/rti_remote.c - ${CoreLibPath}/tracepoint.c - ${LF_PLATFORM_FILE} - ${LF_ROOT}/low_level_platform/impl/src/platform_internal.c - ${LF_ROOT}/low_level_platform/impl/src/lf_atomic_gcc_clang.c - ${LF_ROOT}/low_level_platform/impl/src/lf_unix_clock_support.c - ${CoreLibPath}/utils/util.c - ${CoreLibPath}/tag.c - ${CoreLibPath}/clock.c - ${CoreLibPath}/federated/network/net_util.c - ${CoreLibPath}/utils/pqueue_base.c - ${CoreLibPath}/utils/pqueue_tag.c - ${CoreLibPath}/utils/pqueue.c - ) - add_test(NAME rti_common_test COMMAND rti_common_test) - target_include_directories(rti_common_test PUBLIC ${RTI_DIR}) - target_include_directories(rti_common_test PUBLIC ${IncludeDir}) - target_include_directories(rti_common_test PUBLIC ${IncludeDir}/federated) - target_include_directories(rti_common_test PUBLIC ${IncludeDir}/modal_models) - target_link_libraries(rti_common_test lf::low-level-platform-api) - target_link_libraries(rti_common_test lf::logging-api) - target_include_directories(rti_common_test PUBLIC ${IncludeDir}/utils) - # Set the STANDALONE_RTI flag to include the rti_remote and rti_common. - target_compile_definitions(rti_common_test PUBLIC STANDALONE_RTI=1) - - # Set FEDERATED to get federated compilation support - target_compile_definitions(rti_common_test PUBLIC FEDERATED=1) - - target_compile_definitions(rti_common_test PUBLIC PLATFORM_${CMAKE_SYSTEM_NAME}) - - # Find threads and link to it - find_package(Threads REQUIRED) - target_link_libraries(rti_common_test Threads::Threads) -endif() diff --git a/test/general/tag_test.c b/test/general/tag_test.c index aff2d6875..24fb7b174 100644 --- a/test/general/tag_test.c +++ b/test/general/tag_test.c @@ -2,7 +2,7 @@ #include #include "lf_types.h" -int main(int argc, char** argv) { +int main() { char* buf = malloc(sizeof(char) * 128); lf_readable_time(buf, 0); printf("%s", buf); diff --git a/test/general/utils/hashmap_test.c b/test/general/utils/hashmap_test.c index 28e8deb27..c0fd25443 100644 --- a/test/general/utils/hashmap_test.c +++ b/test/general/utils/hashmap_test.c @@ -35,7 +35,7 @@ void test_get(hashmap_object2int_t* h) { if (desired.value != found) { // It is possible that two distinct values were associated with the same key. Search the // "mock" array to check if this is the case. - for (size_t i = mock_size - 1; i >= 0; i--) { + for (int i = (int)mock_size - 1; i >= 0; i--) { if (mock[i].key == desired.key) { if (mock[i].value == found) return; // Everything is OK. diff --git a/test/general/utils/hashset_test.c b/test/general/utils/hashset_test.c index 4b425b848..1fffcf4c1 100644 --- a/test/general/utils/hashset_test.c +++ b/test/general/utils/hashset_test.c @@ -199,7 +199,7 @@ static void test_fill_with_deleted_items() { hashset_destroy(set); } -int main(int argc, char* argv[]) { +int main() { trivial(); test_gaps(); test_exceptions(); @@ -207,8 +207,6 @@ int main(int argc, char* argv[]) { test_iterating(); test_fill_with_deleted_items(); - (void)argc; - (void)argv; printf("Tests passed.\n"); return 0; } diff --git a/test/general/utils/pqueue_test.c b/test/general/utils/pqueue_test.c index da8e2c6b7..665c4e13f 100644 --- a/test/general/utils/pqueue_test.c +++ b/test/general/utils/pqueue_test.c @@ -47,7 +47,7 @@ static void find_from_queue(pqueue_tag_t* q) { } static void insert_if_no_match(pqueue_tag_t* q) { - int size = pqueue_tag_size(q); + size_t size = pqueue_tag_size(q); tag_t t1 = {.time = USEC(3), .microstep = 0}; tag_t t4 = {.time = USEC(1), .microstep = 2}; // Return value is non-zero on failure to insert: @@ -84,7 +84,7 @@ static void remove_from_queue(pqueue_tag_t* q, pqueue_tag_element_t* e1, pqueue_ assert(pqueue_tag_size(q) == 1); } -int main(int argc, char* argv[]) { +int main() { trivial(); // Create an event queue. pqueue_tag_t* q = pqueue_tag_init(2); diff --git a/test/rand_utils.c b/test/rand_utils.c index 49c1b5230..39b0aadab 100644 --- a/test/rand_utils.c +++ b/test/rand_utils.c @@ -12,7 +12,7 @@ */ void perturb(int* src, size_t size, int* out) { out[size - 1] = src[size - 1]; - for (int a = 0; a < size - 1; a += 2) { + for (size_t a = 0; a < size - 1; a += 2) { int min = src[a] < src[a + 1] ? src[a] : src[a + 1]; int diff = rand() % (min * 2) - min; out[a] = src[a] + diff; diff --git a/test/scheduling/scheduling_api_test.c b/test/scheduling/scheduling_api_test.c new file mode 100644 index 000000000..3315fe0e8 --- /dev/null +++ b/test/scheduling/scheduling_api_test.c @@ -0,0 +1,85 @@ +/** + * This tests the real-time scheduling API implementation in Linux. + */ +#include +#include +#include "core/utils/util.h" +#include "low_level_platform.h" + +#if !defined PLATFORM_Linux +#error scheduling_api_test.c should only be compiled on Linux +#endif + +int main() { + int res; + + // Set the CPU Set of the current thread. + res = lf_thread_set_cpu(lf_thread_self(), lf_available_cores() - 1); + if (res != 0) { + lf_print_error_and_exit("lf_thread_set_cpu failed with %d", res); + } + + // Configure SCHED_FIFO + { + lf_scheduling_policy_t cfg; + cfg.policy = LF_SCHED_PRIORITY; + cfg.priority = 99; + cfg.time_slice = 0; + res = lf_thread_set_scheduling_policy(lf_thread_self(), &cfg); + if (res != 0) { + lf_print_error_and_exit("lf_thread_set_scheduling_policy FIFO failed with %d", res); + } + } + + // Configure SCHED_RR + { + lf_scheduling_policy_t cfg; + cfg.policy = LF_SCHED_TIMESLICE; + cfg.priority = 99; + cfg.time_slice = 0; + res = lf_thread_set_scheduling_policy(lf_thread_self(), &cfg); + if (res != 0) { + lf_print_error_and_exit("lf_thread_set_scheduling_policy RR failed with %d", res); + } + } + + // Try illegal priority + { + lf_scheduling_policy_t cfg; + cfg.policy = LF_SCHED_TIMESLICE; + cfg.time_slice = 0; + cfg.priority = 10000; + res = lf_thread_set_scheduling_policy(lf_thread_self(), &cfg); + if (res == 0) { + lf_print_error_and_exit("lf_thread_set_scheduling_policy should have failed with illegal priority"); + } + } + + // Set the priority + res = lf_thread_set_priority(lf_thread_self(), 50); + if (res != 0) { + lf_print_error_and_exit("lf_thread_set_priority failed with %d", res); + } + + // Try negative priority + res = lf_thread_set_priority(lf_thread_self(), -50); + if (res == 0) { + lf_print_error_and_exit("lf_thread_set_priority should have failed for -50"); + } + + // Configure back to SCHED_OTHER + { + lf_scheduling_policy_t cfg; + cfg.policy = LF_SCHED_FAIR; + res = lf_thread_set_scheduling_policy(lf_thread_self(), &cfg); + if (res != 0) { + lf_print_error_and_exit("lf_thread_set_scheduling_policy RR failed with %d", res); + } + } + + // Try pinning to non-existant CPU core. + res = lf_thread_set_cpu(lf_thread_self(), lf_available_cores()); + if (res == 0) { + lf_print_error_and_exit("lf_thread_set_cpu should fail for too high CPU id"); + } +} diff --git a/test/src_gen_stub.c b/test/src_gen_stub.c index 1e4630a6e..919d14b43 100644 --- a/test/src_gen_stub.c +++ b/test/src_gen_stub.c @@ -14,8 +14,8 @@ environment_t _env; void _lf_initialize_trigger_objects(void) {} void lf_terminate_execution(void) {} void lf_set_default_command_line_options(void) {} -void _lf_initialize_watchdogs(environment_t** envs) {} -void logical_tag_complete(tag_t tag_to_send) {} +void _lf_initialize_watchdogs(environment_t** envs) { (void)envs; } +void logical_tag_complete(tag_t tag_to_send) { (void)tag_to_send; } int _lf_get_environments(environment_t** envs) { *envs = &_env; return 1; diff --git a/trace/api/CMakeLists.txt b/trace/api/CMakeLists.txt index c639096ea..2c0edc677 100644 --- a/trace/api/CMakeLists.txt +++ b/trace/api/CMakeLists.txt @@ -1,3 +1,5 @@ add_library(lf-trace-api INTERFACE) add_library(lf::trace-api ALIAS lf-trace-api) +include(${CMAKE_CURRENT_LIST_DIR}/types/CMakeLists.txt) +target_link_libraries(lf-trace-api INTERFACE lf::trace-api-types) target_include_directories(lf-trace-api INTERFACE ${CMAKE_CURRENT_LIST_DIR}) diff --git a/trace/api/trace.h b/trace/api/trace.h index 614eda541..e5223c036 100644 --- a/trace/api/trace.h +++ b/trace/api/trace.h @@ -1,6 +1,10 @@ #ifndef TRACE_H #define TRACE_H +#ifdef __cplusplus +extern "C" { +#endif + #include #include @@ -10,7 +14,7 @@ * @brief Return a description of the compile-time properties of the current * plugin. */ -version_t lf_version_tracing(); +const version_t* lf_version_tracing(); /** * Identifier for what is in the object table. @@ -48,14 +52,15 @@ typedef struct { * @brief Initialize the tracing module. Calling other API functions before * calling this procedure is undefined behavior. * - * @param file_name_prefix Prefix to attach to any files that may be produced by - * the tracing module. + * @param process_name The name of the current federate, or a placeholder if this is not a federate. + * @param process_names The names of all federates, separated by commas, or NULL + * if that information is not available. * @param process_id The ID of the current federate, or -1 if this is the RTI. 0 * if unfederated. * @param max_num_local_threads An upper bound on the number of threads created * by this process. */ -void lf_tracing_global_init(char* file_name_prefix, int process_id, int max_num_local_threads); +void lf_tracing_global_init(char* process_name, char* process_names, int process_id, int max_num_local_threads); /** * @brief Register a kind of trace event. This should be called before * tracepoints are reached. @@ -81,4 +86,8 @@ void lf_tracing_tracepoint(int worker, trace_record_nodeps_t* tr); */ void lf_tracing_global_shutdown(); +#ifdef __cplusplus +} +#endif + #endif // TRACE_H diff --git a/trace/api/types/CMakeLists.txt b/trace/api/types/CMakeLists.txt new file mode 100644 index 000000000..6576ab87a --- /dev/null +++ b/trace/api/types/CMakeLists.txt @@ -0,0 +1,3 @@ +add_library(lf-trace-api-types INTERFACE) +add_library(lf::trace-api-types ALIAS lf-trace-api-types) +target_include_directories(lf-trace-api-types INTERFACE ${CMAKE_CURRENT_LIST_DIR}) diff --git a/trace/api/types/trace_types.h b/trace/api/types/trace_types.h new file mode 100644 index 000000000..3be4d92b1 --- /dev/null +++ b/trace/api/types/trace_types.h @@ -0,0 +1,146 @@ +/** + * @file trace-types.h + * @author Peter Donovan + * @brief Definitions that are needed by both implementors and callers of the + * trace API regardless of whether tracing is enabled at compile time. + * + * @copyright Copyright (c) 2024 + */ + +#ifndef TRACE_TYPES_H +#define TRACE_TYPES_H + +/** + * Trace event types. If you update this, be sure to update the + * string representation below. Also, create a tracepoint function + * for each event type. + */ +typedef enum { + reaction_starts, + reaction_ends, + reaction_deadline_missed, + schedule_called, + user_event, + user_value, + worker_wait_starts, + worker_wait_ends, + scheduler_advancing_time_starts, + scheduler_advancing_time_ends, + federated, // Everything below this is for tracing federated interactions. + // Sending messages + send_ACK, + send_FAILED, + send_TIMESTAMP, + send_NET, + send_LTC, + send_STOP_REQ, + send_STOP_REQ_REP, + send_STOP_GRN, + send_FED_ID, + send_PTAG, + send_TAG, + send_REJECT, + send_RESIGN, + send_PORT_ABS, + send_CLOSE_RQ, + send_TAGGED_MSG, + send_P2P_TAGGED_MSG, + send_MSG, + send_P2P_MSG, + send_ADR_AD, + send_ADR_QR, + // Receiving messages + receive_ACK, + receive_FAILED, + receive_TIMESTAMP, + receive_NET, + receive_LTC, + receive_STOP_REQ, + receive_STOP_REQ_REP, + receive_STOP_GRN, + receive_FED_ID, + receive_PTAG, + receive_TAG, + receive_REJECT, + receive_RESIGN, + receive_PORT_ABS, + receive_CLOSE_RQ, + receive_TAGGED_MSG, + receive_P2P_TAGGED_MSG, + receive_MSG, + receive_P2P_MSG, + receive_ADR_AD, + receive_ADR_QR, + receive_UNIDENTIFIED, + send_STOP, + receive_STOP, + NUM_EVENT_TYPES +} trace_event_t; + +/** + * String description of event types. + */ +static const char* trace_event_names[] = { + "Reaction starts", + "Reaction ends", + "Reaction deadline missed", + "Schedule called", + "User-defined event", + "User-defined valued event", + "Worker wait starts", + "Worker wait ends", + "Scheduler advancing time starts", + "Scheduler advancing time ends", + "Federated marker", + // Sending messages + "Sending ACK", + "Sending FAILED", + "Sending TIMESTAMP", + "Sending NET", + "Sending LTC", + "Sending STOP_REQ", + "Sending STOP_REQ_REP", + "Sending STOP_GRN", + "Sending FED_ID", + "Sending PTAG", + "Sending TAG", + "Sending REJECT", + "Sending RESIGN", + "Sending PORT_ABS", + "Sending CLOSE_RQ", + "Sending TAGGED_MSG", + "Sending P2P_TAGGED_MSG", + "Sending MSG", + "Sending P2P_MSG", + "Sending ADR_AD", + "Sending ADR_QR", + // Receiving messages + "Receiving ACK", + "Receiving FAILED", + "Receiving TIMESTAMP", + "Receiving NET", + "Receiving LTC", + "Receiving STOP_REQ", + "Receiving STOP_REQ_REP", + "Receiving STOP_GRN", + "Receiving FED_ID", + "Receiving PTAG", + "Receiving TAG", + "Receiving REJECT", + "Receiving RESIGN", + "Receiving PORT_ABS", + "Receiving CLOSE_RQ", + "Receiving TAGGED_MSG", + "Receiving P2P_TAGGED_MSG", + "Receiving MSG", + "Receiving P2P_MSG", + "Receiving ADR_AD", + "Receiving ADR_QR", + "Receiving UNIDENTIFIED", + "Sending STOP", + "Receiving STOP", +}; + +static inline void _suppress_unused_variable_warning_for_static_variable() { (void)trace_event_names; } + +#endif diff --git a/trace/impl/src/trace_impl.c b/trace/impl/src/trace_impl.c index 7f79c49a5..895247e87 100644 --- a/trace/impl/src/trace_impl.c +++ b/trace/impl/src/trace_impl.c @@ -24,6 +24,17 @@ static lf_platform_mutex_ptr_t trace_mutex; static trace_t trace; static int process_id; static int64_t start_time; +static version_t version = {.build_config = + { + .single_threaded = TRIBOOL_DOES_NOT_MATTER, +#ifdef NDEBUG + .build_type_is_debug = TRIBOOL_FALSE, +#else + .build_type_is_debug = TRIBOOL_TRUE, +#endif + .log_level = LOG_LEVEL, + }, + .core_version_name = NULL}; // PRIVATE HELPERS *********************************************************** @@ -192,21 +203,7 @@ static void stop_trace(trace_t* trace) { // IMPLEMENTATION OF VERSION API ********************************************* -version_t lf_version_tracing() { - return (version_t){ - .build_config = - (build_config_t){ - .single_threaded = TRIBOOL_DOES_NOT_MATTER, -#ifdef NDEBUG - .build_type_is_debug = TRIBOOL_FALSE, -#else - .build_type_is_debug = TRIBOOL_TRUE, -#endif - .log_level = LOG_LEVEL, - }, - .core_version_name = NULL, - }; -} +const version_t* lf_version_tracing() { return &version; } // IMPLEMENTATION OF TRACE API *********************************************** @@ -253,7 +250,8 @@ void lf_tracing_tracepoint(int worker, trace_record_nodeps_t* tr) { } } -void lf_tracing_global_init(char* file_name_prefix, int fedid, int max_num_local_threads) { +void lf_tracing_global_init(char* process_name, char* process_names, int fedid, int max_num_local_threads) { + (void)process_names; trace_mutex = lf_platform_mutex_new(); if (!trace_mutex) { fprintf(stderr, "WARNING: Failed to initialize trace mutex.\n"); @@ -261,10 +259,10 @@ void lf_tracing_global_init(char* file_name_prefix, int fedid, int max_num_local } process_id = fedid; char filename[100]; - if (strcmp(file_name_prefix, "rti") == 0) { - sprintf(filename, "%s.lft", file_name_prefix); + if (strcmp(process_name, "rti") == 0) { + sprintf(filename, "%s.lft", process_name); } else { - sprintf(filename, "%s%d.lft", file_name_prefix, process_id); + sprintf(filename, "%s_%d.lft", process_name, process_id); } trace_new(filename); start_trace(&trace, max_num_local_threads); diff --git a/util/tracing/Makefile b/util/tracing/Makefile index 436087752..0cda03c7d 100644 --- a/util/tracing/Makefile +++ b/util/tracing/Makefile @@ -11,6 +11,7 @@ CFLAGS= -I$(REACTOR_C)/include/core/ \ -I$(REACTOR_C)/low_level_platform/api \ -I$(REACTOR_C)/tag/api \ -I$(REACTOR_C)/trace/api \ + -I$(REACTOR_C)/trace/api/types \ -I$(REACTOR_C)/version/api \ -I$(REACTOR_C)/logging/api \ -I$(REACTOR_C)/trace/impl/include \ diff --git a/util/tracing/codegen/CMakeLists.txt b/util/tracing/codegen/CMakeLists.txt new file mode 100644 index 000000000..5866a2ab3 --- /dev/null +++ b/util/tracing/codegen/CMakeLists.txt @@ -0,0 +1,9 @@ +cmake_minimum_required(VERSION 3.13) +project(TracepointToRs LANGUAGES C) +add_executable(tracepoint-to-rs ${CMAKE_CURRENT_LIST_DIR}/src/tracepoint_to_rs.c) +set(LF_ROOT ${CMAKE_CURRENT_LIST_DIR}/../../..) +include(${LF_ROOT}/trace/api/CMakeLists.txt) +include(${LF_ROOT}/version/api/CMakeLists.txt) +target_link_libraries(tracepoint-to-rs PUBLIC lf::trace-api) +target_link_libraries(tracepoint-to-rs PUBLIC lf::version-api) +target_link_libraries(tracepoint-to-rs PUBLIC lf::trace-api-types) diff --git a/util/tracing/codegen/src/tracepoint_to_rs.c b/util/tracing/codegen/src/tracepoint_to_rs.c new file mode 100644 index 000000000..9505c36df --- /dev/null +++ b/util/tracing/codegen/src/tracepoint_to_rs.c @@ -0,0 +1,105 @@ +#include +#include +#include +#include + +#include "trace.h" +#include "trace_types.h" + +int is_alphanumeric(char c) { return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'); } + +void to_camel_case(char* s) { + int capitalize_next = 1; // Flag to indicate whether the next character should be capitalized + int j = 0; + for (int i = 0; s[i] != '\0'; ++i) { + if (!is_alphanumeric(s[i])) { + capitalize_next = 1; // Treat non-alphanumeric characters as whitespace + } else { + if (capitalize_next) { + s[j] = toupper(s[i]); + capitalize_next = 0; // Reset the flag + } else { + s[j] = tolower(s[i]); // Convert to lowercase if not capitalizing + } + j++; + } + } + s[j] = '\0'; +} + +typedef void (*string_consumer_t)(int, const char*, const char*); + +void print_enum_variant(int idx, const char* camel_case, const char* description) { + printf(" %s = %d,\n", camel_case, idx); +} + +void print_match_case(int idx, const char* camel_case, const char* description) { + printf(" EventType::%s => write!(f, \"%s\"),\n", camel_case, description); +} + +void print_from_int(int idx, const char* camel_case, const char* description) { + printf(" %d => Ok(EventType::%s),\n", idx, camel_case); +} + +void do_for_each_camelcase(string_consumer_t sc) { + for (int i = 0; i < NUM_EVENT_TYPES; i++) { + size_t length = strlen(trace_event_names[i]); + + // Allocate memory for the new string including the null terminator + char* destination = (char*)malloc((length + 1) * sizeof(char)); + + // Check if memory allocation was successful + if (destination == NULL) { + perror("Memory allocation failed"); + exit(1); + } + + // Copy the source string to the newly allocated buffer + strcpy(destination, trace_event_names[i]); + to_camel_case(destination); + sc(i, destination, trace_event_names[i]); + } +} + +void print_display_impl() { + printf("%s\n", "impl std::fmt::Display for EventType {"); + printf("%s\n", " fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {"); + printf("%s\n", " match self {"); + do_for_each_camelcase(print_match_case); + printf("%s\n", " }"); + printf("%s\n", " }"); + printf("%s\n", "}"); +} + +void print_rs_enum() { + printf("%s\n", "#[derive(Debug)]"); + printf("%s\n", "pub enum EventType {"); + do_for_each_camelcase(print_enum_variant); + printf("}\n"); +} + +void print_warning() { + printf("%s\n", "/// Do not edit. Code in this file is generated from"); + printf("%s\n", "/// reactor-c/util/tracing/codegen/src/tracepoint_to_rs.c"); +} + +void print_rs_from_int() { + printf("%s\n", "impl EventType {"); + printf("%s\n", " pub fn try_from_int(i: i32) -> Result {"); + printf("%s\n", " match i {"); + do_for_each_camelcase(print_from_int); + printf("%s\n", " _ => Err(\"invalid event type\"),"); + printf("%s\n", " }"); + printf("%s\n", " }"); + printf("%s\n", "}"); +} + +int main() { + print_warning(); + printf("%s", "\n"); + print_rs_enum(); + printf("%s", "\n"); + print_display_impl(); + printf("%s", "\n"); + print_rs_from_int(); +} diff --git a/util/tracing/trace_util.c b/util/tracing/trace_util.c index 5cb698514..ed32c5baa 100644 --- a/util/tracing/trace_util.c +++ b/util/tracing/trace_util.c @@ -62,67 +62,6 @@ typedef struct open_file_t { } open_file_t; open_file_t* _open_files = NULL; -const char* trace_event_names[] = { - "Reaction starts", - "Reaction ends", - "Reaction deadline missed", - "Schedule called", - "User-defined event", - "User-defined valued event", - "Worker wait starts", - "Worker wait ends", - "Scheduler advancing time starts", - "Scheduler advancing time ends", - "Federated marker", - // Sending messages - "Sending ACK", - "Sending FAILED", - "Sending TIMESTAMP", - "Sending NET", - "Sending LTC", - "Sending STOP_REQ", - "Sending STOP_REQ_REP", - "Sending STOP_GRN", - "Sending FED_ID", - "Sending PTAG", - "Sending TAG", - "Sending REJECT", - "Sending RESIGN", - "Sending PORT_ABS", - "Sending CLOSE_RQ", - "Sending TAGGED_MSG", - "Sending P2P_TAGGED_MSG", - "Sending MSG", - "Sending P2P_MSG", - "Sending ADR_AD", - "Sending ADR_QR", - // Receiving messages - "Receiving ACK", - "Receiving FAILED", - "Receiving TIMESTAMP", - "Receiving NET", - "Receiving LTC", - "Receiving STOP_REQ", - "Receiving STOP_REQ_REP", - "Receiving STOP_GRN", - "Receiving FED_ID", - "Receiving PTAG", - "Receiving TAG", - "Receiving REJECT", - "Receiving RESIGN", - "Receiving PORT_ABS", - "Receiving CLOSE_RQ", - "Receiving TAGGED_MSG", - "Receiving P2P_TAGGED_MSG", - "Receiving MSG", - "Receiving P2P_MSG", - "Receiving ADR_AD", - "Receiving ADR_QR", - "Receiving UNIDENTIFIED", - "Sending STOP", - "Receiving STOP", -}; - /** * Function to be invoked upon exiting. */