Skip to content

Commit

Permalink
Fix compiler warnings in Zephyr and FlexPRET support files (#479)
Browse files Browse the repository at this point in the history
* Fix compiler warnings in Zephyr and FlexPRET support files

* Typo in flexpret support

* Change atomics from handling int32 to just int

* More lf_atomic fixes

* atomics

* Update lf-ref

* Fix forgotten atomics int32->int changes

* Do not use vfprintf for Zephyr it causes strange intermittent memory issues

* Undo cbprintf fix real culprit, the number of stacks allocated for threads
  • Loading branch information
erlingrj authored Sep 22, 2024
1 parent d419c9f commit e0eae5f
Show file tree
Hide file tree
Showing 14 changed files with 56 additions and 45 deletions.
6 changes: 3 additions & 3 deletions core/threaded/reactor_threaded.c
Original file line number Diff line number Diff line change
Expand Up @@ -176,15 +176,15 @@ void lf_set_present(lf_port_base_t* port) {
return;
environment_t* env = port->source_reactor->environment;
bool* is_present_field = &port->is_present;
int ipfas = lf_atomic_fetch_add32(&env->is_present_fields_abbreviated_size, 1);
int ipfas = lf_atomic_fetch_add(&env->is_present_fields_abbreviated_size, 1);
if (ipfas < env->is_present_fields_size) {
env->is_present_fields_abbreviated[ipfas] = is_present_field;
}
*is_present_field = true;

// Support for sparse destination multiports.
if (port->sparse_record && port->destination_channel >= 0 && port->sparse_record->size >= 0) {
size_t next = (size_t)lf_atomic_fetch_add32(&port->sparse_record->size, 1);
size_t next = (size_t)lf_atomic_fetch_add(&port->sparse_record->size, 1);
if (next >= port->sparse_record->capacity) {
// Buffer is full. Have to revert to the classic iteration.
port->sparse_record->size = -1;
Expand Down Expand Up @@ -1023,11 +1023,11 @@ int lf_reactor_c_main(int argc, const char* argv[]) {
#endif

LF_PRINT_DEBUG("Start time: " PRINTF_TIME "ns", start_time);
struct timespec physical_time_timespec = {start_time / BILLION, start_time % BILLION};

#ifdef MINIMAL_STDLIB
lf_print("---- Start execution ----");
#else
struct timespec physical_time_timespec = {start_time / BILLION, start_time % BILLION};
lf_print("---- Start execution at time %s---- plus %ld nanoseconds", ctime(&physical_time_timespec.tv_sec),
physical_time_timespec.tv_nsec);
#endif // MINIMAL_STDLIB
Expand Down
4 changes: 2 additions & 2 deletions core/threaded/scheduler_GEDF_NP.c
Original file line number Diff line number Diff line change
Expand Up @@ -228,14 +228,14 @@ reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_nu

void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction) {
(void)worker_number; // Suppress unused parameter warning.
if (!lf_atomic_bool_compare_and_swap32((int32_t*)&done_reaction->status, queued, inactive)) {
if (!lf_atomic_bool_compare_and_swap((int*)&done_reaction->status, queued, inactive)) {
lf_print_error_and_exit("Unexpected reaction status: %d. Expected %d.", done_reaction->status, queued);
}
}

void lf_scheduler_trigger_reaction(lf_scheduler_t* scheduler, reaction_t* reaction, int worker_number) {
(void)worker_number; // Suppress unused parameter warning.
if (reaction == NULL || !lf_atomic_bool_compare_and_swap32((int32_t*)&reaction->status, inactive, queued)) {
if (reaction == NULL || !lf_atomic_bool_compare_and_swap((int*)&reaction->status, inactive, queued)) {
return;
}
LF_PRINT_DEBUG("Scheduler: Enqueueing reaction %s, which has level %lld.", reaction->name, LF_LEVEL(reaction->index));
Expand Down
10 changes: 5 additions & 5 deletions core/threaded/scheduler_NP.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ static inline void _lf_sched_insert_reaction(lf_scheduler_t* scheduler, reaction
scheduler->indexes[reaction_level] = 0;
}
#endif
int reaction_q_level_index = lf_atomic_fetch_add32((int32_t*)&scheduler->indexes[reaction_level], 1);
int reaction_q_level_index = lf_atomic_fetch_add((int*)&scheduler->indexes[reaction_level], 1);
assert(reaction_q_level_index >= 0);
LF_PRINT_DEBUG("Scheduler: Accessing triggered reactions at the level %zu with index %d.", reaction_level,
reaction_q_level_index);
Expand Down Expand Up @@ -203,7 +203,7 @@ static void _lf_scheduler_try_advance_tag_and_distribute(lf_scheduler_t* schedul
static void _lf_sched_wait_for_work(lf_scheduler_t* scheduler, size_t worker_number) {
// Increment the number of idle workers by 1 and check if this is the last
// worker thread to become idle.
if (lf_atomic_add_fetch32((int32_t*)&scheduler->number_of_idle_workers, 1) == (int)scheduler->number_of_workers) {
if (lf_atomic_add_fetch((int*)&scheduler->number_of_idle_workers, 1) == (int)scheduler->number_of_workers) {
// Last thread to go idle
LF_PRINT_DEBUG("Scheduler: Worker %zu is the last idle thread.", worker_number);
// Call on the scheduler to distribute work or advance tag.
Expand Down Expand Up @@ -322,7 +322,7 @@ reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_nu
// the current level (if there is a causality loop)
LF_MUTEX_LOCK(&scheduler->custom_data->array_of_mutexes[current_level]);
#endif
int current_level_q_index = lf_atomic_add_fetch32((int32_t*)&scheduler->indexes[current_level], -1);
int current_level_q_index = lf_atomic_add_fetch((int*)&scheduler->indexes[current_level], -1);
if (current_level_q_index >= 0) {
LF_PRINT_DEBUG("Scheduler: Worker %d popping reaction with level %zu, index "
"for level: %d.",
Expand Down Expand Up @@ -361,7 +361,7 @@ reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_nu
*/
void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction) {
(void)worker_number;
if (!lf_atomic_bool_compare_and_swap32((int32_t*)&done_reaction->status, queued, inactive)) {
if (!lf_atomic_bool_compare_and_swap((int*)&done_reaction->status, queued, inactive)) {
lf_print_error_and_exit("Unexpected reaction status: %d. Expected %d.", done_reaction->status, queued);
}
}
Expand All @@ -388,7 +388,7 @@ void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction
void lf_scheduler_trigger_reaction(lf_scheduler_t* scheduler, reaction_t* reaction, int worker_number) {
(void)worker_number;

if (reaction == NULL || !lf_atomic_bool_compare_and_swap32((int32_t*)&reaction->status, inactive, queued)) {
if (reaction == NULL || !lf_atomic_bool_compare_and_swap((int*)&reaction->status, inactive, queued)) {
return;
}
LF_PRINT_DEBUG("Scheduler: Enqueueing reaction %s, which has level %lld.", reaction->name, LF_LEVEL(reaction->index));
Expand Down
14 changes: 7 additions & 7 deletions core/threaded/scheduler_adaptive.c
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ static void worker_assignments_free(lf_scheduler_t* scheduler) {
static reaction_t* get_reaction(lf_scheduler_t* scheduler, size_t worker) {
worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments;
#ifndef FEDERATED
int index = lf_atomic_add_fetch32((int32_t*)(worker_assignments->num_reactions_by_worker + worker), -1);
int index = lf_atomic_add_fetch(worker_assignments->num_reactions_by_worker + worker, -1);
if (index >= 0) {
return worker_assignments->reactions_by_worker[worker][index];
}
Expand All @@ -223,9 +223,9 @@ static reaction_t* get_reaction(lf_scheduler_t* scheduler, size_t worker) {
old_num_reactions = current_num_reactions;
if (old_num_reactions <= 0)
return NULL;
} while ((current_num_reactions = lf_atomic_val_compare_and_swap32(
(int32_t*)(worker_assignments->num_reactions_by_worker + worker), old_num_reactions,
(index = old_num_reactions - 1))) != old_num_reactions);
} while ((current_num_reactions =
lf_atomic_val_compare_and_swap(worker_assignments->num_reactions_by_worker + worker, old_num_reactions,
(index = old_num_reactions - 1))) != old_num_reactions);
return worker_assignments->reactions_by_worker[worker][index];
#endif
}
Expand Down Expand Up @@ -282,7 +282,7 @@ static void worker_assignments_put(lf_scheduler_t* scheduler, reaction_t* reacti
hash = hash ^ (hash >> 31);
size_t worker = hash % worker_assignments->num_workers_by_level[level];
size_t num_preceding_reactions =
lf_atomic_fetch_add32((int32_t*)&worker_assignments->num_reactions_by_worker_by_level[level][worker], 1);
lf_atomic_fetch_add(&worker_assignments->num_reactions_by_worker_by_level[level][worker], 1);
worker_assignments->reactions_by_worker_by_level[level][worker][num_preceding_reactions] = reaction;
}

Expand Down Expand Up @@ -383,7 +383,7 @@ static bool worker_states_finished_with_level_locked(lf_scheduler_t* scheduler,
assert(((int64_t)worker_assignments->num_reactions_by_worker[worker]) <= 0);
// Why use an atomic operation when we are supposed to be "as good as locked"? Because I took a
// shortcut, and the shortcut was imperfect.
size_t ret = lf_atomic_add_fetch32((int32_t*)&worker_states->num_loose_threads, -1);
size_t ret = lf_atomic_add_fetch(&worker_states->num_loose_threads, -1);
assert(ret <= worker_assignments->max_num_workers); // Check for underflow
return !ret;
}
Expand Down Expand Up @@ -726,7 +726,7 @@ void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction

void lf_scheduler_trigger_reaction(lf_scheduler_t* scheduler, reaction_t* reaction, int worker_number) {
LF_ASSERT(worker_number >= -1, "Sched: Invalid worker number");
if (!lf_atomic_bool_compare_and_swap32((int32_t*)&reaction->status, inactive, queued))
if (!lf_atomic_bool_compare_and_swap((int*)&reaction->status, inactive, queued))
return;
worker_assignments_put(scheduler, reaction);
}
Expand Down
2 changes: 1 addition & 1 deletion lingua-franca-ref.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
master
zephyr-tests
14 changes: 7 additions & 7 deletions low_level_platform/api/platform/lf_atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,14 @@
#include <stdbool.h>

/**
* @brief Atomically fetch a 32bit integer from memory and add a value to it.
* @brief Atomically fetch an integer from memory and add a value to it.
* Return the value that was previously in memory.
*
* @param ptr A pointer to the memory location.
* @param val The value to be added.
* @return The value previously in memory.
*/
int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t val);
int lf_atomic_fetch_add(int* ptr, int val);

/**
* @brief Atomically fetch 64-bit integer from memory and add a value to it.
Expand All @@ -31,14 +31,14 @@ int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t val);
int64_t lf_atomic_fetch_add64(int64_t* ptr, int64_t val);

/**
* @brief Atomically fetch a 32-bit integer from memory and add a value to it.
* @brief Atomically fetch an integer from memory and add a value to it.
* Return the new value of the memory.
*
* @param ptr A pointer to the memory location.
* @param val The value to be added.
* @return The new value in memory.
*/
int32_t lf_atomic_add_fetch32(int32_t* ptr, int32_t val);
int lf_atomic_add_fetch(int* ptr, int val);

/**
* @brief Atomically fetch a 64-bit integer from memory and add a value to it.
Expand All @@ -60,7 +60,7 @@ int64_t lf_atomic_add_fetch64(int64_t* ptr, int64_t val);
* @param newval The value to swap in.
* @return Whether a swap was performed or not.
*/
bool lf_atomic_bool_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval);
bool lf_atomic_bool_compare_and_swap(int* ptr, int oldval, int newval);

/**
* @brief Atomically perform a compare-and-swap operation on a 64 bit integer in
Expand All @@ -75,7 +75,7 @@ bool lf_atomic_bool_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t new
bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval);

/**
* @brief Atomically perform a compare-and-swap operation on a 32 bit integer in
* @brief Atomically perform a compare-and-swap operation on an integer in
* memory. If the value in memory is equal to `oldval` replace it with `newval`.
* Return the content of the memory before the potential swap operation is
* performed.
Expand All @@ -85,7 +85,7 @@ bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t new
* @param newval The value to swap in.
* @return The value in memory prior to the swap.
*/
int32_t lf_atomic_val_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval);
int lf_atomic_val_compare_and_swap(int* ptr, int oldval, int newval);

/**
* @brief Atomically perform a compare-and-swap operation on a 64 bit integer in
Expand Down
2 changes: 2 additions & 0 deletions low_level_platform/api/platform/lf_zephyr_support.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ typedef struct {
} lf_cond_t;
typedef struct k_thread* lf_thread_t;

void _lf_initialize_clock_zephyr_common();

#endif // !LF_SINGLE_THREADED

#endif // LF_ZEPHYR_SUPPORT_H
8 changes: 4 additions & 4 deletions low_level_platform/impl/src/lf_atomic_gcc_clang.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,17 +11,17 @@
#include "platform/lf_atomic.h"
#include "low_level_platform.h"

int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t value) { return __sync_fetch_and_add(ptr, value); }
int lf_atomic_fetch_add(int* ptr, int value) { return __sync_fetch_and_add(ptr, value); }
int64_t lf_atomic_fetch_add64(int64_t* ptr, int64_t value) { return __sync_fetch_and_add(ptr, value); }
int32_t lf_atomic_add_fetch32(int32_t* ptr, int32_t value) { return __sync_add_and_fetch(ptr, value); }
int lf_atomic_add_fetch(int* ptr, int value) { return __sync_add_and_fetch(ptr, value); }
int64_t lf_atomic_add_fetch64(int64_t* ptr, int64_t value) { return __sync_add_and_fetch(ptr, value); }
bool lf_atomic_bool_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) {
bool lf_atomic_bool_compare_and_swap(int* ptr, int oldval, int newval) {
return __sync_bool_compare_and_swap(ptr, oldval, newval);
}
bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval) {
return __sync_bool_compare_and_swap(ptr, oldval, newval);
}
int32_t lf_atomic_val_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) {
int lf_atomic_val_compare_and_swap(int* ptr, int oldval, int newval) {
return __sync_val_compare_and_swap(ptr, oldval, newval);
}
int64_t lf_atomic_val_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval) {
Expand Down
10 changes: 5 additions & 5 deletions low_level_platform/impl/src/lf_atomic_irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@
int lf_disable_interrupts_nested();
int lf_enable_interrupts_nested();

int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t value) {
int lf_atomic_fetch_add(int* ptr, int value) {
lf_disable_interrupts_nested();
int32_t res = *ptr;
int res = *ptr;
*ptr += value;
lf_enable_interrupts_nested();
return res;
Expand All @@ -33,7 +33,7 @@ int64_t lf_atomic_fetch_add64(int64_t* ptr, int64_t value) {
return res;
}

int32_t lf_atomic_add_fetch32(int32_t* ptr, int32_t value) {
int lf_atomic_add_fetch(int* ptr, int value) {
lf_disable_interrupts_nested();
int res = *ptr + value;
*ptr = res;
Expand All @@ -49,7 +49,7 @@ int64_t lf_atomic_add_fetch64(int64_t* ptr, int64_t value) {
return res;
}

bool lf_atomic_bool_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) {
bool lf_atomic_bool_compare_and_swap(int* ptr, int oldval, int newval) {
lf_disable_interrupts_nested();
bool res = false;
if ((*ptr) == oldval) {
Expand All @@ -71,7 +71,7 @@ bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t new
return res;
}

int32_t lf_atomic_val_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) {
int lf_atomic_val_compare_and_swap(int* ptr, int oldval, int newval) {
lf_disable_interrupts_nested();
int res = *ptr;
if ((*ptr) == oldval) {
Expand Down
8 changes: 4 additions & 4 deletions low_level_platform/impl/src/lf_atomic_windows.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,17 +10,17 @@
#include "platform/lf_atomic.h"
#include <windows.h>

int32_t lf_atomic_fetch_add32(int32_t* ptr, int32_t value) { return InterlockedExchangeAdd(ptr, value); }
int lf_atomic_fetch_add(int* ptr, int value) { return InterlockedExchangeAdd(ptr, value); }
int64_t lf_atomic_fetch_add64(int64_t* ptr, int64_t value) { return InterlockedExchangeAdd64(ptr, value); }
int32_t lf_atomic_add_fetch32(int32_t* ptr, int32_t value) { return InterlockedAdd(ptr, value); }
int lf_atomic_add_fetch(int* ptr, int value) { return InterlockedAdd(ptr, value); }
int64_t lf_atomic_add_fetch64(int64_t* ptr, int64_t value) { return InterlockedAdd64(ptr, value); }
bool lf_atomic_bool_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) {
bool lf_atomic_bool_compare_and_swap(int* ptr, int oldval, int newval) {
return (InterlockedCompareExchange(ptr, newval, oldval) == oldval);
}
bool lf_atomic_bool_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval) {
return (InterlockedCompareExchange64(ptr, newval, oldval) == oldval);
}
int32_t lf_atomic_val_compare_and_swap32(int32_t* ptr, int32_t oldval, int32_t newval) {
int lf_atomic_val_compare_and_swap(int* ptr, int oldval, int newval) {
return InterlockedCompareExchange(ptr, newval, oldval);
}
int64_t lf_atomic_val_compare_and_swap64(int64_t* ptr, int64_t oldval, int64_t newval) {
Expand Down
5 changes: 1 addition & 4 deletions low_level_platform/impl/src/lf_flexpret_support.c
Original file line number Diff line number Diff line change
Expand Up @@ -178,10 +178,7 @@ int lf_available_cores() {
return FP_THREADS - 1; // Return the number of Flexpret HW threads
}

lf_thread_t lf_thread_self() {
// Not implemented.
return NULL;
}
lf_thread_t lf_thread_self() { return read_hartid(); }

int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments) {
/**
Expand Down
2 changes: 1 addition & 1 deletion low_level_platform/impl/src/lf_platform_util.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,6 @@ static thread_local int lf_thread_id_var = -1;

int lf_thread_id() { return lf_thread_id_var; }

void initialize_lf_thread_id() { lf_thread_id_var = lf_atomic_fetch_add32(&_lf_worker_thread_count, 1); }
void initialize_lf_thread_id() { lf_thread_id_var = lf_atomic_fetch_add(&_lf_worker_thread_count, 1); }
#endif
#endif
1 change: 1 addition & 0 deletions low_level_platform/impl/src/lf_zephyr_clock_kernel.c
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@ int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup) {
return 0;
} else {
lf_print_error_and_exit("k_sem_take returned %d", res);
return -1;
}
}

Expand Down
15 changes: 13 additions & 2 deletions low_level_platform/impl/src/lf_zephyr_support.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,21 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "platform/lf_platform_util.h"
#include "low_level_platform.h"
#include "tag.h"
#include "logging.h"

#include <zephyr/kernel.h>
#include <zephyr/sys/cbprintf.h>

// Keep track of nested critical sections
static uint32_t num_nested_critical_sections = 0;
// Keep track of IRQ mask when entering critical section so we can enable again after
static volatile unsigned irq_mask = 0;

// Catch kernel panics from Zephyr
void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf* esf) {
lf_print_error_and_exit("Zephyr kernel panic reason=%d", reason);
}

int lf_sleep(interval_t sleep_duration) {
k_sleep(K_NSEC(sleep_duration));
return 0;
Expand Down Expand Up @@ -81,8 +88,12 @@ int lf_enable_interrupts_nested() {
// If NUMBER_OF_WORKERS is not specified, or set to 0, then we default to 1.
#if !defined(NUMBER_OF_WORKERS) || NUMBER_OF_WORKERS == 0
#undef NUMBER_OF_WORKERS
#if defined(LF_REACTION_GRAPH_BREADTH)
#define NUMBER_OF_WORKERS LF_REACTION_GRAPH_BREADTH
#else
#define NUMBER_OF_WORKERS 1
#endif
#endif

// If USER_THREADS is not specified, then default to 0.
#if !defined(USER_THREADS)
Expand Down Expand Up @@ -149,9 +160,9 @@ int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* argum
int lf_thread_join(lf_thread_t thread, void** thread_return) { return k_thread_join(thread, K_FOREVER); }

void initialize_lf_thread_id() {
static int _lf_worker_thread_count = 0;
static int32_t _lf_worker_thread_count = 0;
int* thread_id = (int*)malloc(sizeof(int));
*thread_id = lf_atomic_fetch_add32(&_lf_worker_thread_count, 1);
*thread_id = lf_atomic_fetch_add(&_lf_worker_thread_count, 1);
k_thread_custom_data_set(thread_id);
}

Expand Down

0 comments on commit e0eae5f

Please sign in to comment.