From e0eae5fe520a94c2cf198c4a904d7f60d7e34a1e Mon Sep 17 00:00:00 2001 From: erling Date: Sat, 21 Sep 2024 21:58:19 -0700 Subject: [PATCH] Fix compiler warnings in Zephyr and FlexPRET support files (#479) * Fix compiler warnings in Zephyr and FlexPRET support files * Typo in flexpret support * Change atomics from handling int32 to just int * More lf_atomic fixes * atomics * Update lf-ref * Fix forgotten atomics int32->int changes * Do not use vfprintf for Zephyr it causes strange intermittent memory issues * Undo cbprintf fix real culprit, the number of stacks allocated for threads --- core/threaded/reactor_threaded.c | 6 +++--- core/threaded/scheduler_GEDF_NP.c | 4 ++-- core/threaded/scheduler_NP.c | 10 +++++----- core/threaded/scheduler_adaptive.c | 14 +++++++------- lingua-franca-ref.txt | 2 +- low_level_platform/api/platform/lf_atomic.h | 14 +++++++------- .../api/platform/lf_zephyr_support.h | 2 ++ low_level_platform/impl/src/lf_atomic_gcc_clang.c | 8 ++++---- low_level_platform/impl/src/lf_atomic_irq.c | 10 +++++----- low_level_platform/impl/src/lf_atomic_windows.c | 8 ++++---- low_level_platform/impl/src/lf_flexpret_support.c | 5 +---- low_level_platform/impl/src/lf_platform_util.c | 2 +- .../impl/src/lf_zephyr_clock_kernel.c | 1 + low_level_platform/impl/src/lf_zephyr_support.c | 15 +++++++++++++-- 14 files changed, 56 insertions(+), 45 deletions(-) diff --git a/core/threaded/reactor_threaded.c b/core/threaded/reactor_threaded.c index 56a53cffa..df0f271b1 100644 --- a/core/threaded/reactor_threaded.c +++ b/core/threaded/reactor_threaded.c @@ -176,7 +176,7 @@ void lf_set_present(lf_port_base_t* port) { return; environment_t* env = port->source_reactor->environment; bool* is_present_field = &port->is_present; - int ipfas = lf_atomic_fetch_add32(&env->is_present_fields_abbreviated_size, 1); + int ipfas = lf_atomic_fetch_add(&env->is_present_fields_abbreviated_size, 1); if (ipfas < env->is_present_fields_size) { env->is_present_fields_abbreviated[ipfas] = is_present_field; } @@ -184,7 +184,7 @@ void lf_set_present(lf_port_base_t* port) { // Support for sparse destination multiports. if (port->sparse_record && port->destination_channel >= 0 && port->sparse_record->size >= 0) { - size_t next = (size_t)lf_atomic_fetch_add32(&port->sparse_record->size, 1); + size_t next = (size_t)lf_atomic_fetch_add(&port->sparse_record->size, 1); if (next >= port->sparse_record->capacity) { // Buffer is full. Have to revert to the classic iteration. port->sparse_record->size = -1; @@ -1023,11 +1023,11 @@ int lf_reactor_c_main(int argc, const char* argv[]) { #endif LF_PRINT_DEBUG("Start time: " PRINTF_TIME "ns", start_time); - struct timespec physical_time_timespec = {start_time / BILLION, start_time % BILLION}; #ifdef MINIMAL_STDLIB lf_print("---- Start execution ----"); #else + struct timespec physical_time_timespec = {start_time / BILLION, start_time % BILLION}; lf_print("---- Start execution at time %s---- plus %ld nanoseconds", ctime(&physical_time_timespec.tv_sec), physical_time_timespec.tv_nsec); #endif // MINIMAL_STDLIB diff --git a/core/threaded/scheduler_GEDF_NP.c b/core/threaded/scheduler_GEDF_NP.c index e77257209..84afee379 100644 --- a/core/threaded/scheduler_GEDF_NP.c +++ b/core/threaded/scheduler_GEDF_NP.c @@ -228,14 +228,14 @@ reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_nu void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction) { (void)worker_number; // Suppress unused parameter warning. - if (!lf_atomic_bool_compare_and_swap32((int32_t*)&done_reaction->status, queued, inactive)) { + if (!lf_atomic_bool_compare_and_swap((int*)&done_reaction->status, queued, inactive)) { lf_print_error_and_exit("Unexpected reaction status: %d. Expected %d.", done_reaction->status, queued); } } void lf_scheduler_trigger_reaction(lf_scheduler_t* scheduler, reaction_t* reaction, int worker_number) { (void)worker_number; // Suppress unused parameter warning. - if (reaction == NULL || !lf_atomic_bool_compare_and_swap32((int32_t*)&reaction->status, inactive, queued)) { + if (reaction == NULL || !lf_atomic_bool_compare_and_swap((int*)&reaction->status, inactive, queued)) { return; } LF_PRINT_DEBUG("Scheduler: Enqueueing reaction %s, which has level %lld.", reaction->name, LF_LEVEL(reaction->index)); diff --git a/core/threaded/scheduler_NP.c b/core/threaded/scheduler_NP.c index 54a611ea8..fd0ccfb04 100644 --- a/core/threaded/scheduler_NP.c +++ b/core/threaded/scheduler_NP.c @@ -77,7 +77,7 @@ static inline void _lf_sched_insert_reaction(lf_scheduler_t* scheduler, reaction scheduler->indexes[reaction_level] = 0; } #endif - int reaction_q_level_index = lf_atomic_fetch_add32((int32_t*)&scheduler->indexes[reaction_level], 1); + int reaction_q_level_index = lf_atomic_fetch_add((int*)&scheduler->indexes[reaction_level], 1); assert(reaction_q_level_index >= 0); LF_PRINT_DEBUG("Scheduler: Accessing triggered reactions at the level %zu with index %d.", reaction_level, reaction_q_level_index); @@ -203,7 +203,7 @@ static void _lf_scheduler_try_advance_tag_and_distribute(lf_scheduler_t* schedul static void _lf_sched_wait_for_work(lf_scheduler_t* scheduler, size_t worker_number) { // Increment the number of idle workers by 1 and check if this is the last // worker thread to become idle. - if (lf_atomic_add_fetch32((int32_t*)&scheduler->number_of_idle_workers, 1) == (int)scheduler->number_of_workers) { + if (lf_atomic_add_fetch((int*)&scheduler->number_of_idle_workers, 1) == (int)scheduler->number_of_workers) { // Last thread to go idle LF_PRINT_DEBUG("Scheduler: Worker %zu is the last idle thread.", worker_number); // Call on the scheduler to distribute work or advance tag. @@ -322,7 +322,7 @@ reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_nu // the current level (if there is a causality loop) LF_MUTEX_LOCK(&scheduler->custom_data->array_of_mutexes[current_level]); #endif - int current_level_q_index = lf_atomic_add_fetch32((int32_t*)&scheduler->indexes[current_level], -1); + int current_level_q_index = lf_atomic_add_fetch((int*)&scheduler->indexes[current_level], -1); if (current_level_q_index >= 0) { LF_PRINT_DEBUG("Scheduler: Worker %d popping reaction with level %zu, index " "for level: %d.", @@ -361,7 +361,7 @@ reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_nu */ void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction) { (void)worker_number; - if (!lf_atomic_bool_compare_and_swap32((int32_t*)&done_reaction->status, queued, inactive)) { + if (!lf_atomic_bool_compare_and_swap((int*)&done_reaction->status, queued, inactive)) { lf_print_error_and_exit("Unexpected reaction status: %d. Expected %d.", done_reaction->status, queued); } } @@ -388,7 +388,7 @@ void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction void lf_scheduler_trigger_reaction(lf_scheduler_t* scheduler, reaction_t* reaction, int worker_number) { (void)worker_number; - if (reaction == NULL || !lf_atomic_bool_compare_and_swap32((int32_t*)&reaction->status, inactive, queued)) { + if (reaction == NULL || !lf_atomic_bool_compare_and_swap((int*)&reaction->status, inactive, queued)) { return; } LF_PRINT_DEBUG("Scheduler: Enqueueing reaction %s, which has level %lld.", reaction->name, LF_LEVEL(reaction->index)); diff --git a/core/threaded/scheduler_adaptive.c b/core/threaded/scheduler_adaptive.c index 1f90c90a6..5a926aba6 100644 --- a/core/threaded/scheduler_adaptive.c +++ b/core/threaded/scheduler_adaptive.c @@ -207,7 +207,7 @@ static void worker_assignments_free(lf_scheduler_t* scheduler) { static reaction_t* get_reaction(lf_scheduler_t* scheduler, size_t worker) { worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments; #ifndef FEDERATED - int index = lf_atomic_add_fetch32((int32_t*)(worker_assignments->num_reactions_by_worker + worker), -1); + int index = lf_atomic_add_fetch(worker_assignments->num_reactions_by_worker + worker, -1); if (index >= 0) { return worker_assignments->reactions_by_worker[worker][index]; } @@ -223,9 +223,9 @@ static reaction_t* get_reaction(lf_scheduler_t* scheduler, size_t worker) { old_num_reactions = current_num_reactions; if (old_num_reactions <= 0) return NULL; - } while ((current_num_reactions = lf_atomic_val_compare_and_swap32( - (int32_t*)(worker_assignments->num_reactions_by_worker + worker), old_num_reactions, - (index = old_num_reactions - 1))) != old_num_reactions); + } while ((current_num_reactions = + lf_atomic_val_compare_and_swap(worker_assignments->num_reactions_by_worker + worker, old_num_reactions, + (index = old_num_reactions - 1))) != old_num_reactions); return worker_assignments->reactions_by_worker[worker][index]; #endif } @@ -282,7 +282,7 @@ static void worker_assignments_put(lf_scheduler_t* scheduler, reaction_t* reacti hash = hash ^ (hash >> 31); size_t worker = hash % worker_assignments->num_workers_by_level[level]; size_t num_preceding_reactions = - lf_atomic_fetch_add32((int32_t*)&worker_assignments->num_reactions_by_worker_by_level[level][worker], 1); + lf_atomic_fetch_add(&worker_assignments->num_reactions_by_worker_by_level[level][worker], 1); worker_assignments->reactions_by_worker_by_level[level][worker][num_preceding_reactions] = reaction; } @@ -383,7 +383,7 @@ static bool worker_states_finished_with_level_locked(lf_scheduler_t* scheduler, assert(((int64_t)worker_assignments->num_reactions_by_worker[worker]) <= 0); // Why use an atomic operation when we are supposed to be "as good as locked"? Because I took a // shortcut, and the shortcut was imperfect. - size_t ret = lf_atomic_add_fetch32((int32_t*)&worker_states->num_loose_threads, -1); + size_t ret = lf_atomic_add_fetch(&worker_states->num_loose_threads, -1); assert(ret <= worker_assignments->max_num_workers); // Check for underflow return !ret; } @@ -726,7 +726,7 @@ void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction void lf_scheduler_trigger_reaction(lf_scheduler_t* scheduler, reaction_t* reaction, int worker_number) { LF_ASSERT(worker_number >= -1, "Sched: Invalid worker number"); - if (!lf_atomic_bool_compare_and_swap32((int32_t*)&reaction->status, inactive, queued)) + if (!lf_atomic_bool_compare_and_swap((int*)&reaction->status, inactive, queued)) return; worker_assignments_put(scheduler, reaction); } diff --git a/lingua-franca-ref.txt b/lingua-franca-ref.txt index 1f7391f92..bff3ad964 100644 --- a/lingua-franca-ref.txt +++ b/lingua-franca-ref.txt @@ -1 +1 @@ -master +zephyr-tests diff --git a/low_level_platform/api/platform/lf_atomic.h b/low_level_platform/api/platform/lf_atomic.h index 391678293..e40de9b25 100644 --- a/low_level_platform/api/platform/lf_atomic.h +++ b/low_level_platform/api/platform/lf_atomic.h @@ -11,14 +11,14 @@ #include /** - * @brief Atomically fetch a 32bit integer from memory and add a value to it. + * @brief Atomically fetch an integer from memory and add a value to it. * Return the value that was previously in memory. * * @param ptr A pointer to the memory location. * @param val The value to be added. * @return The value previously in memory. */ -int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t val); +int lf_atomic_fetch_add(int* ptr, int val); /** * @brief Atomically fetch 64-bit integer from memory and add a value to it. @@ -31,14 +31,14 @@ int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t val); int64_t lf_atomic_fetch_add64(int64_t* ptr, int64_t val); /** - * @brief Atomically fetch a 32-bit integer from memory and add a value to it. + * @brief Atomically fetch an integer from memory and add a value to it. * Return the new value of the memory. * * @param ptr A pointer to the memory location. * @param val The value to be added. * @return The new value in memory. */ -int32_t lf_atomic_add_fetch32(int32_t* ptr, int32_t val); +int lf_atomic_add_fetch(int* ptr, int val); /** * @brief Atomically fetch a 64-bit integer from memory and add a value to it. @@ -60,7 +60,7 @@ int64_t lf_atomic_add_fetch64(int64_t* ptr, int64_t val); * @param newval The value to swap in. * @return Whether a swap was performed or not. */ -bool lf_atomic_bool_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval); +bool lf_atomic_bool_compare_and_swap(int* ptr, int oldval, int newval); /** * @brief Atomically perform a compare-and-swap operation on a 64 bit integer in @@ -75,7 +75,7 @@ bool lf_atomic_bool_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t new bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval); /** - * @brief Atomically perform a compare-and-swap operation on a 32 bit integer in + * @brief Atomically perform a compare-and-swap operation on an integer in * memory. If the value in memory is equal to `oldval` replace it with `newval`. * Return the content of the memory before the potential swap operation is * performed. @@ -85,7 +85,7 @@ bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t new * @param newval The value to swap in. * @return The value in memory prior to the swap. */ -int32_t lf_atomic_val_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval); +int lf_atomic_val_compare_and_swap(int* ptr, int oldval, int newval); /** * @brief Atomically perform a compare-and-swap operation on a 64 bit integer in diff --git a/low_level_platform/api/platform/lf_zephyr_support.h b/low_level_platform/api/platform/lf_zephyr_support.h index 724bbe4e5..44d91bcbd 100644 --- a/low_level_platform/api/platform/lf_zephyr_support.h +++ b/low_level_platform/api/platform/lf_zephyr_support.h @@ -50,6 +50,8 @@ typedef struct { } lf_cond_t; typedef struct k_thread* lf_thread_t; +void _lf_initialize_clock_zephyr_common(); + #endif // !LF_SINGLE_THREADED #endif // LF_ZEPHYR_SUPPORT_H diff --git a/low_level_platform/impl/src/lf_atomic_gcc_clang.c b/low_level_platform/impl/src/lf_atomic_gcc_clang.c index 30d671a8a..bca144459 100644 --- a/low_level_platform/impl/src/lf_atomic_gcc_clang.c +++ b/low_level_platform/impl/src/lf_atomic_gcc_clang.c @@ -11,17 +11,17 @@ #include "platform/lf_atomic.h" #include "low_level_platform.h" -int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t value) { return __sync_fetch_and_add(ptr, value); } +int lf_atomic_fetch_add(int* ptr, int value) { return __sync_fetch_and_add(ptr, value); } int64_t lf_atomic_fetch_add64(int64_t* ptr, int64_t value) { return __sync_fetch_and_add(ptr, value); } -int32_t lf_atomic_add_fetch32(int32_t* ptr, int32_t value) { return __sync_add_and_fetch(ptr, value); } +int lf_atomic_add_fetch(int* ptr, int value) { return __sync_add_and_fetch(ptr, value); } int64_t lf_atomic_add_fetch64(int64_t* ptr, int64_t value) { return __sync_add_and_fetch(ptr, value); } -bool lf_atomic_bool_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) { +bool lf_atomic_bool_compare_and_swap(int* ptr, int oldval, int newval) { return __sync_bool_compare_and_swap(ptr, oldval, newval); } bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval) { return __sync_bool_compare_and_swap(ptr, oldval, newval); } -int32_t lf_atomic_val_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) { +int lf_atomic_val_compare_and_swap(int* ptr, int oldval, int newval) { return __sync_val_compare_and_swap(ptr, oldval, newval); } int64_t lf_atomic_val_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval) { diff --git a/low_level_platform/impl/src/lf_atomic_irq.c b/low_level_platform/impl/src/lf_atomic_irq.c index 3a9d72086..7d78ab445 100644 --- a/low_level_platform/impl/src/lf_atomic_irq.c +++ b/low_level_platform/impl/src/lf_atomic_irq.c @@ -17,9 +17,9 @@ int lf_disable_interrupts_nested(); int lf_enable_interrupts_nested(); -int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t value) { +int lf_atomic_fetch_add(int* ptr, int value) { lf_disable_interrupts_nested(); - int32_t res = *ptr; + int res = *ptr; *ptr += value; lf_enable_interrupts_nested(); return res; @@ -33,7 +33,7 @@ int64_t lf_atomic_fetch_add64(int64_t* ptr, int64_t value) { return res; } -int32_t lf_atomic_add_fetch32(int32_t* ptr, int32_t value) { +int lf_atomic_add_fetch(int* ptr, int value) { lf_disable_interrupts_nested(); int res = *ptr + value; *ptr = res; @@ -49,7 +49,7 @@ int64_t lf_atomic_add_fetch64(int64_t* ptr, int64_t value) { return res; } -bool lf_atomic_bool_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) { +bool lf_atomic_bool_compare_and_swap(int* ptr, int oldval, int newval) { lf_disable_interrupts_nested(); bool res = false; if ((*ptr) == oldval) { @@ -71,7 +71,7 @@ bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t new return res; } -int32_t lf_atomic_val_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) { +int lf_atomic_val_compare_and_swap(int* ptr, int oldval, int newval) { lf_disable_interrupts_nested(); int res = *ptr; if ((*ptr) == oldval) { diff --git a/low_level_platform/impl/src/lf_atomic_windows.c b/low_level_platform/impl/src/lf_atomic_windows.c index 1db0fa2de..519d225f6 100644 --- a/low_level_platform/impl/src/lf_atomic_windows.c +++ b/low_level_platform/impl/src/lf_atomic_windows.c @@ -10,17 +10,17 @@ #include "platform/lf_atomic.h" #include -int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t value) { return InterlockedExchangeAdd(ptr, value); } +int lf_atomic_fetch_add(int* ptr, int value) { return InterlockedExchangeAdd(ptr, value); } int64_t lf_atomic_fetch_add64(int64_t* ptr, int64_t value) { return InterlockedExchangeAdd64(ptr, value); } -int32_t lf_atomic_add_fetch32(int32_t* ptr, int32_t value) { return InterlockedAdd(ptr, value); } +int lf_atomic_add_fetch(int* ptr, int value) { return InterlockedAdd(ptr, value); } int64_t lf_atomic_add_fetch64(int64_t* ptr, int64_t value) { return InterlockedAdd64(ptr, value); } -bool lf_atomic_bool_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) { +bool lf_atomic_bool_compare_and_swap(int* ptr, int oldval, int newval) { return (InterlockedCompareExchange(ptr, newval, oldval) == oldval); } bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval) { return (InterlockedCompareExchange64(ptr, newval, oldval) == oldval); } -int32_t lf_atomic_val_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) { +int lf_atomic_val_compare_and_swap(int* ptr, int oldval, int newval) { return InterlockedCompareExchange(ptr, newval, oldval); } int64_t lf_atomic_val_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval) { diff --git a/low_level_platform/impl/src/lf_flexpret_support.c b/low_level_platform/impl/src/lf_flexpret_support.c index cf37c1b8a..7fb6d2a48 100644 --- a/low_level_platform/impl/src/lf_flexpret_support.c +++ b/low_level_platform/impl/src/lf_flexpret_support.c @@ -178,10 +178,7 @@ int lf_available_cores() { return FP_THREADS - 1; // Return the number of Flexpret HW threads } -lf_thread_t lf_thread_self() { - // Not implemented. - return NULL; -} +lf_thread_t lf_thread_self() { return read_hartid(); } int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments) { /** diff --git a/low_level_platform/impl/src/lf_platform_util.c b/low_level_platform/impl/src/lf_platform_util.c index 0225aa423..212e6ea83 100644 --- a/low_level_platform/impl/src/lf_platform_util.c +++ b/low_level_platform/impl/src/lf_platform_util.c @@ -21,6 +21,6 @@ static thread_local int lf_thread_id_var = -1; int lf_thread_id() { return lf_thread_id_var; } -void initialize_lf_thread_id() { lf_thread_id_var = lf_atomic_fetch_add32(&_lf_worker_thread_count, 1); } +void initialize_lf_thread_id() { lf_thread_id_var = lf_atomic_fetch_add(&_lf_worker_thread_count, 1); } #endif #endif diff --git a/low_level_platform/impl/src/lf_zephyr_clock_kernel.c b/low_level_platform/impl/src/lf_zephyr_clock_kernel.c index e23332f81..9dc343bc5 100644 --- a/low_level_platform/impl/src/lf_zephyr_clock_kernel.c +++ b/low_level_platform/impl/src/lf_zephyr_clock_kernel.c @@ -113,6 +113,7 @@ int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup) { return 0; } else { lf_print_error_and_exit("k_sem_take returned %d", res); + return -1; } } diff --git a/low_level_platform/impl/src/lf_zephyr_support.c b/low_level_platform/impl/src/lf_zephyr_support.c index 5e5efb82d..74ae9bf90 100644 --- a/low_level_platform/impl/src/lf_zephyr_support.c +++ b/low_level_platform/impl/src/lf_zephyr_support.c @@ -36,14 +36,21 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "platform/lf_platform_util.h" #include "low_level_platform.h" #include "tag.h" +#include "logging.h" #include +#include // Keep track of nested critical sections static uint32_t num_nested_critical_sections = 0; // Keep track of IRQ mask when entering critical section so we can enable again after static volatile unsigned irq_mask = 0; +// Catch kernel panics from Zephyr +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf* esf) { + lf_print_error_and_exit("Zephyr kernel panic reason=%d", reason); +} + int lf_sleep(interval_t sleep_duration) { k_sleep(K_NSEC(sleep_duration)); return 0; @@ -81,8 +88,12 @@ int lf_enable_interrupts_nested() { // If NUMBER_OF_WORKERS is not specified, or set to 0, then we default to 1. #if !defined(NUMBER_OF_WORKERS) || NUMBER_OF_WORKERS == 0 #undef NUMBER_OF_WORKERS +#if defined(LF_REACTION_GRAPH_BREADTH) +#define NUMBER_OF_WORKERS LF_REACTION_GRAPH_BREADTH +#else #define NUMBER_OF_WORKERS 1 #endif +#endif // If USER_THREADS is not specified, then default to 0. #if !defined(USER_THREADS) @@ -149,9 +160,9 @@ int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* argum int lf_thread_join(lf_thread_t thread, void** thread_return) { return k_thread_join(thread, K_FOREVER); } void initialize_lf_thread_id() { - static int _lf_worker_thread_count = 0; + static int32_t _lf_worker_thread_count = 0; int* thread_id = (int*)malloc(sizeof(int)); - *thread_id = lf_atomic_fetch_add32(&_lf_worker_thread_count, 1); + *thread_id = lf_atomic_fetch_add(&_lf_worker_thread_count, 1); k_thread_custom_data_set(thread_id); }