trace: Introduce adaptive rate-limiting/suppression of repeated messages

Few recent messages are remembered by the filter, and repetitive messages
reported in short period of time are suppressed. Solution is compile-time
configurable via many Kconfig options, as documented in Kconfig.

Signed-off-by: Artur Kloniecki <arturx.kloniecki@linux.intel.com>
This commit is contained in:
Artur Kloniecki 2020-09-23 18:13:35 +02:00 committed by Liam Girdwood
parent b07f2674cc
commit fee7d9c660
7 changed files with 210 additions and 36 deletions

View File

@ -12,3 +12,4 @@ CONFIG_COMP_TDFB=n
CONFIG_OPTIMIZE_FOR_SIZE=y CONFIG_OPTIMIZE_FOR_SIZE=y
CONFIG_HAVE_AGENT=n CONFIG_HAVE_AGENT=n
CONFIG_DEBUG_MEMORY_USAGE_SCAN=n CONFIG_DEBUG_MEMORY_USAGE_SCAN=n
CONFIG_TRACE_FILTERING_ADAPTIVE=n

View File

@ -11,3 +11,4 @@ CONFIG_COMP_ASRC=n
CONFIG_COMP_TDFB=n CONFIG_COMP_TDFB=n
CONFIG_HAVE_AGENT=n CONFIG_HAVE_AGENT=n
CONFIG_DEBUG_MEMORY_USAGE_SCAN=n CONFIG_DEBUG_MEMORY_USAGE_SCAN=n
CONFIG_TRACE_FILTERING_ADAPTIVE=n

View File

@ -13,3 +13,4 @@ CONFIG_COMP_TONE=n
CONFIG_OPTIMIZE_FOR_SIZE=y CONFIG_OPTIMIZE_FOR_SIZE=y
CONFIG_HAVE_AGENT=n CONFIG_HAVE_AGENT=n
CONFIG_DEBUG_MEMORY_USAGE_SCAN=n CONFIG_DEBUG_MEMORY_USAGE_SCAN=n
CONFIG_TRACE_FILTERING_ADAPTIVE=n

View File

@ -11,3 +11,4 @@ CONFIG_COMP_ASRC=n
CONFIG_COMP_TDFB=n CONFIG_COMP_TDFB=n
CONFIG_HAVE_AGENT=n CONFIG_HAVE_AGENT=n
CONFIG_DEBUG_MEMORY_USAGE_SCAN=n CONFIG_DEBUG_MEMORY_USAGE_SCAN=n
CONFIG_TRACE_FILTERING_ADAPTIVE=n

View File

@ -146,12 +146,10 @@ struct sof_ipc_trace_filter_elem *trace_filter_fill(struct sof_ipc_trace_filter_
int trace_filter_update(const struct trace_filter *elem); int trace_filter_update(const struct trace_filter *elem);
#define _trace_event_with_ids(lvl, class, ctx, id_1, id_2, format, ...) \ #define _trace_event_with_ids(lvl, class, ctx, id_1, id_2, format, ...) \
_log_message(false, lvl, class, ctx, id_1, id_2, \ _log_message(trace_log_filtered, false, lvl, class, ctx, id_1, id_2, format, ##__VA_ARGS__)
format, ##__VA_ARGS__)
#define _trace_event_atomic_with_ids(lvl, class, ctx, id_1, id_2, format, ...) \ #define _trace_event_atomic_with_ids(lvl, class, ctx, id_1, id_2, format, ...) \
_log_message(true, lvl, class, ctx, id_1, \ _log_message(trace_log_filtered, true, lvl, class, ctx, id_1, id_2, format, ##__VA_ARGS__)
id_2, format, ##__VA_ARGS__)
#ifndef CONFIG_LIBRARY #ifndef CONFIG_LIBRARY
@ -192,37 +190,33 @@ _thrown_from_macro_BASE_LOG_in_trace_h
#define STATIC_ASSERT_ARG_SIZE(...) \ #define STATIC_ASSERT_ARG_SIZE(...) \
META_MAP(1, trace_check_size_uint32, __VA_ARGS__) META_MAP(1, trace_check_size_uint32, __VA_ARGS__)
#define _log_message(atomic, lvl, comp_class, ctx, id_1, id_2, \ #define _log_message(log_func, atomic, lvl, comp_class, ctx, id_1, id_2, format, ...) \
format, ...) \ do { \
do { \ _DECLARE_LOG_ENTRY(lvl, format, comp_class, \
_DECLARE_LOG_ENTRY(lvl, format, comp_class, \ META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__)); \
META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__)); \ STATIC_ASSERT_ARG_SIZE(__VA_ARGS__); \
STATIC_ASSERT_ARG_SIZE(__VA_ARGS__); \ STATIC_ASSERT(_TRACE_EVENT_MAX_ARGUMENT_COUNT >= \
STATIC_ASSERT(_TRACE_EVENT_MAX_ARGUMENT_COUNT >= \ META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), \
META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), \ BASE_LOG_ASSERT_FAIL_MSG \
BASE_LOG_ASSERT_FAIL_MSG \ ); \
); \ log_func(atomic, &log_entry, ctx, lvl, id_1, id_2, \
trace_log_filtered(atomic, &log_entry, ctx, lvl, id_1, id_2, \ META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), ##__VA_ARGS__); \
META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), \
##__VA_ARGS__); \
} while (0) } while (0)
#else /* CONFIG_LIBRARY */ #else /* CONFIG_LIBRARY */
extern int test_bench_trace; extern int test_bench_trace;
char *get_trace_class(uint32_t trace_class); char *get_trace_class(uint32_t trace_class);
#define _log_message(atomic, level, comp_class, ctx, id_1, id_2, \ #define _log_message(log_func, atomic, level, comp_class, ctx, id_1, id_2, format, ...) \
format, ...) \ do { \
do { \ (void)ctx; \
(void)ctx; \ (void)id_1; \
(void)id_1; \ (void)id_2; \
(void)id_2; \ if (test_bench_trace) { \
if (test_bench_trace) { \ char *msg = "%s " format; \
char *msg = "%s " format; \ fprintf(stderr, msg, get_trace_class(comp_class), ##__VA_ARGS__); \
fprintf(stderr, msg, get_trace_class(comp_class), \ fprintf(stderr, "\n"); \
##__VA_ARGS__); \ } \
fprintf(stderr, "\n"); \
} \
} while (0) } while (0)
#define trace_point(x) do {} while (0) #define trace_point(x) do {} while (0)
@ -274,8 +268,8 @@ static inline int trace_filter_update(const struct trace_filter *filter)
/* error tracing */ /* error tracing */
#if CONFIG_TRACEE #if CONFIG_TRACEE
#define _trace_error_with_ids(class, ctx, id_1, id_2, format, ...) \ #define _trace_error_with_ids(class, ctx, id_1, id_2, format, ...) \
_log_message(true, LOG_LEVEL_CRITICAL, class, ctx, id_1, \ _log_message(trace_log_filtered, true, LOG_LEVEL_CRITICAL, class, ctx, id_1, \
id_2, format, ##__VA_ARGS__) id_2, format, ##__VA_ARGS__)
#define trace_error_with_ids(class, ctx, id_1, id_2, format, ...) \ #define trace_error_with_ids(class, ctx, id_1, id_2, format, ...) \
_trace_error_with_ids(class, ctx, id_1, id_2, format, ##__VA_ARGS__) _trace_error_with_ids(class, ctx, id_1, id_2, format, ##__VA_ARGS__)

View File

@ -31,4 +31,52 @@ config TRACEM
help help
Sending all traces by mailbox additionally. Sending all traces by mailbox additionally.
config TRACE_FILTERING
bool "Trace filtering"
depends on TRACE
default y
help
Filtering of trace messages based on their verbosity level and frequency.
config TRACE_FILTERING_VERBOSITY
bool "Filter by verbosity"
depends on TRACE_FILTERING
default y
help
Filtering by log verbosity level, where maximum verbosity allowed is specified for each
context and may be adjusted in runtime.
config TRACE_FILTERING_ADAPTIVE
bool "Adaptive rate limiting"
depends on TRACE_FILTERING
default y
help
Adaptive filtering of trace messages, tracking up to CONFIG_TRACE_RECENT_ENTRIES_COUNT,
suppressing all repeated messages for up to CONFIG_TRACE_RECENT_TIME_THRESHOLD cycles.
config TRACE_RECENT_ENTRIES_COUNT
int "Amount of entries considered recent"
depends on TRACE_FILTERING_ADAPTIVE
default 5
help
Recent entries are used to determine whether currently processed message was sent in the
past. Size of that filtering window affects effectiveness and performance. More recent
entries allow to better filter repetitive messeges out, but also slightly decrease
performance due to increased number of comparisions necessary.
config TRACE_RECENT_TIME_THRESHOLD
int "Period of time considered recent (microseconds)"
depends on TRACE_FILTERING_ADAPTIVE
default 1500
range 1 TRACE_RECENT_MAX_TIME
help
Period of time during which entries are tracked and will be suppressed if reported again.
config TRACE_RECENT_MAX_TIME
int "Maximum period of time that message can be suppressed (microseconds)"
depends on TRACE_FILTERING_ADAPTIVE
default 5000000
help
Maximum amount of time message can be suppressed for, due to repeated suppression.
endmenu endmenu

View File

@ -26,10 +26,29 @@
#include <stdarg.h> #include <stdarg.h>
#include <stdint.h> #include <stdint.h>
extern struct tr_ctx dt_tr;
#if CONFIG_TRACE_FILTERING_ADAPTIVE
struct recent_log_entry {
uint32_t entry_id;
uint64_t message_ts;
uint64_t first_suppression_ts;
uint32_t suppress_count;
};
struct recent_trace_context {
struct recent_log_entry recent_entries[CONFIG_TRACE_RECENT_ENTRIES_COUNT];
};
#endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */
struct trace { struct trace {
uintptr_t pos ; /* trace position */ uintptr_t pos ; /* trace position */
uint32_t enable; uint32_t enable;
spinlock_t lock; /* locking mechanism */ spinlock_t lock; /* locking mechanism */
#if CONFIG_TRACE_FILTERING_ADAPTIVE
struct recent_trace_context trace_core_context[CONFIG_CORE_COUNT];
#endif
}; };
/* calculates total message size, both header and payload in bytes */ /* calculates total message size, both header and payload in bytes */
@ -96,19 +115,111 @@ static inline void mtrace_event(const char *data, uint32_t length)
platform_shared_commit(trace, sizeof(*trace)); platform_shared_commit(trace, sizeof(*trace));
} }
#if CONFIG_TRACE_FILTERING_VERBOSITY
/** /**
* \brief Runtime trace filtering * \brief Runtime trace filtering based on verbosity level
* \param lvl log level (LOG_LEVEL_ ERROR, INFO, DEBUG ...) * \param lvl log level (LOG_LEVEL_ ERROR, INFO, DEBUG ...)
* \param uuid uuid address * \param uuid uuid address
* \return false when trace is filtered out, otherwise true * \return false when trace is filtered out, otherwise true
*/ */
static inline bool trace_filter_pass(uint32_t lvl, static inline bool trace_filter_verbosity(uint32_t lvl, const struct tr_ctx *ctx)
const struct tr_ctx *ctx)
{ {
STATIC_ASSERT(LOG_LEVEL_CRITICAL < LOG_LEVEL_VERBOSE, STATIC_ASSERT(LOG_LEVEL_CRITICAL < LOG_LEVEL_VERBOSE,
LOG_LEVEL_CRITICAL_MUST_HAVE_LOWEST_VALUE); LOG_LEVEL_CRITICAL_MUST_HAVE_LOWEST_VALUE);
return lvl <= ctx->level; return lvl <= ctx->level;
} }
#endif /* CONFIG_TRACE_FILTERING_VERBOSITY */
#if CONFIG_TRACE_FILTERING_ADAPTIVE
static void emit_recent_entry(struct recent_log_entry *entry)
{
_log_message(trace_log_unfiltered, false, LOG_LEVEL_INFO, _TRACE_INV_CLASS, &dt_tr,
_TRACE_INV_ID, _TRACE_INV_ID, "Suppressed %u similar messages: %pQ",
entry->suppress_count, entry->entry_id);
memset(entry, 0, sizeof(*entry));
}
static void emit_recent_entries(uint64_t current_ts)
{
struct trace *trace = trace_get();
struct recent_log_entry *recent_entries =
trace->trace_core_context[cpu_get_id()].recent_entries;
int i;
/* Check if any tracked entries were dormant long enough to unsuppress them */
for (i = 0; i < CONFIG_TRACE_RECENT_ENTRIES_COUNT; i++) {
if (recent_entries[i].entry_id) {
if (current_ts - recent_entries[i].message_ts >
CONFIG_TRACE_RECENT_TIME_THRESHOLD) {
if (recent_entries[i].suppress_count)
emit_recent_entry(&recent_entries[i]);
}
}
}
platform_shared_commit(trace, sizeof(*trace));
}
/**
* \brief Runtime trace flood suppression
* \param log_level log verbosity level
* \param entry ID of log entry
* \param message_ts message timestamp
* \return false when trace is filtered out, true otherwise
*/
static bool trace_filter_flood(uint32_t log_level, uint32_t entry, uint64_t message_ts)
{
struct trace *trace = trace_get();
struct recent_log_entry *recent_entries =
trace->trace_core_context[cpu_get_id()].recent_entries;
struct recent_log_entry *oldest_entry = recent_entries;
int i;
/* don't attempt to suppress debug messages using this method, it would be uneffective */
if (log_level >= LOG_LEVEL_DEBUG)
return true;
/* check if same log entry was sent recently */
for (i = 0; i < CONFIG_TRACE_RECENT_ENTRIES_COUNT; i++) {
/* in the meantime identify the oldest or an empty entry, for reuse later */
if (oldest_entry->first_suppression_ts > recent_entries[i].first_suppression_ts)
oldest_entry = &recent_entries[i];
if (recent_entries[i].entry_id == entry) {
if (message_ts - recent_entries[i].first_suppression_ts <
CONFIG_TRACE_RECENT_MAX_TIME &&
message_ts - recent_entries[i].message_ts <
CONFIG_TRACE_RECENT_TIME_THRESHOLD) {
recent_entries[i].suppress_count++;
/* refresh suppression timer */
recent_entries[i].message_ts = message_ts;
platform_shared_commit(trace, sizeof(*trace));
return false;
}
if (recent_entries[i].suppress_count)
emit_recent_entry(&recent_entries[i]);
else
memset(&recent_entries[i], 0, sizeof(recent_entries[i]));
platform_shared_commit(trace, sizeof(*trace));
return true;
}
}
/* Make room for tracking new entry, by emitting the oldest one in the filter */
if (oldest_entry->entry_id && oldest_entry->suppress_count)
emit_recent_entry(oldest_entry);
oldest_entry->entry_id = entry;
oldest_entry->message_ts = message_ts;
oldest_entry->first_suppression_ts = message_ts;
platform_shared_commit(trace, sizeof(*trace));
return true;
}
#endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */
static void vatrace_log(bool send_atomic, uint32_t log_entry, const struct tr_ctx *ctx, static void vatrace_log(bool send_atomic, uint32_t log_entry, const struct tr_ctx *ctx,
uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list vargs) uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list vargs)
@ -170,12 +281,29 @@ void trace_log_filtered(bool send_atomic, const void *log_entry, const struct tr
{ {
struct trace *trace = trace_get(); struct trace *trace = trace_get();
va_list vl; va_list vl;
#if CONFIG_TRACE_FILTERING_ADAPTIVE
uint64_t current_ts;
#endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */
if (!trace->enable || !trace_filter_pass(lvl, ctx)) { if (!trace->enable) {
platform_shared_commit(trace, sizeof(*trace)); platform_shared_commit(trace, sizeof(*trace));
return; return;
} }
#if CONFIG_TRACE_FILTERING_VERBOSITY
if (!trace_filter_verbosity(lvl, ctx))
return;
#endif /* CONFIG_TRACE_FILTERING_VERBOSITY */
#if CONFIG_TRACE_FILTERING_ADAPTIVE
current_ts = platform_timer_get(timer_get());
emit_recent_entries(current_ts);
if (!trace_filter_flood(lvl, (uint32_t)log_entry, current_ts))
return;
#endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */
va_start(vl, arg_count); va_start(vl, arg_count);
vatrace_log(send_atomic, (uint32_t)log_entry, ctx, lvl, id_1, id_2, arg_count, vl); vatrace_log(send_atomic, (uint32_t)log_entry, ctx, lvl, id_1, id_2, arg_count, vl);
va_end(vl); va_end(vl);