perf: make trace part of perf counting configurable

Clients may provide they own trace macro to be run
when new cpu peak value is detected.

Signed-off-by: Marcin Maka <marcin.maka@linux.intel.com>
This commit is contained in:
Marcin Maka 2020-03-05 15:58:55 +01:00 committed by Janusz Jankowski
parent 8d350a3eff
commit 1758279623
3 changed files with 38 additions and 7 deletions

View File

@ -34,14 +34,39 @@ struct perf_cnt_data {
(pcd)->cpu_delta_last, \
(pcd)->cpu_delta_peak)
/** \brief Clears performance counters data. */
#define perf_cnt_clear(pcd) memset((pcd), 0, sizeof(struct perf_cnt_data))
/** \brief Initializes timestamps with current timer values. */
#define perf_cnt_init(pcd) do { \
(pcd)->plat_ts = platform_timer_get(timer_get()); \
(pcd)->cpu_ts = arch_timer_get_system(cpu_timer_get()); \
} while (0)
#define perf_cnt_stamp(tclass, pcd, log_peak) do { \
/* Trace macros that can be used as trace_m argument of the perf_cnt_stamp()
* to trace PCD values if the last arch timer reading exceeds the previous
* peak value.
*
* arg passed to perf_cnt_stamp() is forwarded to the trace_m() macro
* as the second argument.
*/
/** \brief No trace when detecting peak value. */
#define perf_trace_null(pcd, arg)
/** \brief Simple trace, all values are printed, arg should be one
* of TRACE_CLASS_...
*/
#define perf_trace_simple(pcd, arg) perf_cnt_trace(arg, pcd)
/** \brief Reads the timers and computes delta to the previous readings.
*
* If current arch delta exceeds the previous peak value, trace_m is run.
* \param pcd Performance counters data.
* \param trace_m Trace macro trace_m(pcd, arg).
* \param arg Argument passed to trace_m as arg.
*/
#define perf_cnt_stamp(pcd, trace_m, arg) do { \
uint64_t plat_ts = platform_timer_get(timer_get()); \
uint64_t cpu_ts = arch_timer_get_system(cpu_timer_get()); \
if ((pcd)->plat_ts) { \
@ -54,16 +79,14 @@ struct perf_cnt_data {
(pcd)->plat_delta_peak = (pcd)->plat_delta_last; \
if ((pcd)->cpu_delta_last > (pcd)->cpu_delta_peak) { \
(pcd)->cpu_delta_peak = (pcd)->cpu_delta_last; \
if (log_peak) \
perf_cnt_trace(tclass, pcd); \
trace_m(pcd, arg); \
} \
} while (0)
#else
#define perf_cnt_trace(tclass, pcd)
#define perf_cnt_clear(pcd)
#define perf_cnt_init(pcd)
#define perf_cnt_stamp(tclass, pcd, log_peak)
#define perf_cnt_stamp(pcd, trace_m, arg)
#endif
#endif /* __SOF_LIB_PERF_CNT_H__ */

View File

@ -36,6 +36,10 @@
#define trace_sa_error(__e, ...) \
trace_error(TRACE_CLASS_SA, __e, ##__VA_ARGS__)
#define perf_sa_trace(pcd, sa) \
trace_sa("perf sys_load peak plat %lu cpu %lu", \
(pcd)->plat_delta_peak, (pcd)->cpu_delta_peak)
/* c63c4e75-8f61-4420-9319-1395932efa9e */
DECLARE_SOF_UUID("agent-work", agent_work_task_uuid, 0xc63c4e75, 0x8f61, 0x4420,
0x93, 0x19, 0x13, 0x95, 0x93, 0x2e, 0xfa, 0x9e);
@ -49,7 +53,7 @@ static enum task_state validate(void *data)
current = platform_timer_get(timer_get());
delta = current - sa->last_check;
perf_cnt_stamp(TRACE_CLASS_SA, &sa->pcd, true);
perf_cnt_stamp(&sa->pcd, perf_sa_trace, sa);
/* panic timeout */
if (delta > sa->panic_timeout)

View File

@ -42,6 +42,10 @@ struct ll_schedule_data {
const struct scheduler_ops schedule_ll_ops;
#define perf_ll_sched_trace(pcd, ll_sched) \
trace_ll("perf ll_work peak plat %lu cpu %lu", \
(pcd)->plat_delta_peak, (pcd)->cpu_delta_peak)
static bool schedule_ll_is_pending(struct ll_schedule_data *sch)
{
struct list_item *tlist;
@ -171,7 +175,7 @@ static void schedule_ll_tasks_run(void *data)
if (schedule_ll_is_pending(sch))
schedule_ll_tasks_execute(sch, last_tick);
perf_cnt_stamp(TRACE_CLASS_SCHEDULE_LL, &sch->pcd, true);
perf_cnt_stamp(&sch->pcd, perf_ll_sched_trace, sch);
spin_lock(&sch->domain->lock);