2019-06-02 03:14:06 +08:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2018-08-22 21:17:19 +08:00
|
|
|
*
|
2019-06-02 03:14:06 +08:00
|
|
|
* Copyright(c) 2018 Intel Corporation. All rights reserved.
|
2018-08-22 21:17:19 +08:00
|
|
|
*
|
|
|
|
* Author: Janusz Jankowski <janusz.jankowski@linux.intel.com>
|
|
|
|
*/
|
|
|
|
|
2019-07-10 20:23:41 +08:00
|
|
|
#ifndef __SOF_DRIVERS_INTERRUPT_H__
|
|
|
|
#define __SOF_DRIVERS_INTERRUPT_H__
|
2018-08-22 21:17:19 +08:00
|
|
|
|
2019-07-17 02:31:41 +08:00
|
|
|
#include <platform/drivers/interrupt.h>
|
2020-03-02 19:19:40 +08:00
|
|
|
|
|
|
|
#if !defined(__ASSEMBLER__) && !defined(LINKER)
|
|
|
|
#include <arch/drivers/interrupt.h>
|
2019-02-12 22:27:12 +08:00
|
|
|
#include <sof/lib/cpu.h>
|
2019-07-11 20:31:19 +08:00
|
|
|
#include <sof/list.h>
|
2023-02-22 01:05:47 +08:00
|
|
|
#include <rtos/sof.h>
|
2022-08-19 21:10:56 +08:00
|
|
|
#include <rtos/spinlock.h>
|
2019-07-17 02:31:41 +08:00
|
|
|
#include <sof/trace/trace.h>
|
2019-07-22 23:06:58 +08:00
|
|
|
#include <user/trace.h>
|
2019-02-12 22:27:12 +08:00
|
|
|
#include <stdbool.h>
|
2019-07-11 20:31:19 +08:00
|
|
|
#include <stdint.h>
|
|
|
|
|
2019-02-12 22:27:12 +08:00
|
|
|
/**
|
2019-02-22 01:05:37 +08:00
|
|
|
* \brief child IRQ descriptor for cascading IRQ controllers.
|
2019-02-12 22:27:12 +08:00
|
|
|
*/
|
|
|
|
struct irq_child {
|
2020-10-09 16:33:20 +08:00
|
|
|
int enable_count[CONFIG_CORE_COUNT]; /**< IRQ enable counter */
|
2019-02-22 01:05:37 +08:00
|
|
|
struct list_item list; /**< head for IRQ descriptors,
|
|
|
|
* sharing this interrupt
|
|
|
|
*/
|
2019-02-12 22:27:12 +08:00
|
|
|
};
|
|
|
|
|
2019-03-26 21:20:42 +08:00
|
|
|
/**
|
|
|
|
* \brief interrupt client descriptor
|
|
|
|
*/
|
2019-07-11 20:31:19 +08:00
|
|
|
struct irq_desc {
|
2019-03-26 21:20:42 +08:00
|
|
|
int irq; /**< virtual IRQ number */
|
|
|
|
void (*handler)(void *arg); /**< interrupt handler function */
|
|
|
|
void *handler_arg; /**< interrupt handler argument */
|
|
|
|
uint32_t cpu_mask; /**< a mask of CPUs on which this
|
|
|
|
* interrupt is enabled
|
|
|
|
*/
|
|
|
|
struct list_item irq_list; /**< to link to other irq_desc */
|
2019-02-11 23:01:56 +08:00
|
|
|
};
|
|
|
|
|
2019-02-22 01:05:37 +08:00
|
|
|
/**
|
|
|
|
* \brief cascading IRQ controller operations.
|
|
|
|
*/
|
|
|
|
struct irq_cascade_ops {
|
2019-02-22 17:16:58 +08:00
|
|
|
void (*mask)(struct irq_desc *desc, uint32_t irq,
|
|
|
|
unsigned int cpu); /**< mask */
|
|
|
|
void (*unmask)(struct irq_desc *desc, uint32_t irq,
|
|
|
|
unsigned int cpu); /**< unmask */
|
2019-02-22 01:05:37 +08:00
|
|
|
};
|
2019-02-12 22:27:12 +08:00
|
|
|
|
2019-02-22 01:05:37 +08:00
|
|
|
/**
|
|
|
|
* \brief cascading interrupt controller descriptor.
|
|
|
|
*/
|
|
|
|
struct irq_cascade_desc {
|
|
|
|
const char *name; /**< name of the
|
|
|
|
* controller
|
|
|
|
*/
|
2019-02-26 18:49:39 +08:00
|
|
|
int irq_base; /**< first virtual IRQ
|
|
|
|
* number, assigned to
|
|
|
|
* this controller
|
|
|
|
*/
|
2019-02-22 01:05:37 +08:00
|
|
|
const struct irq_cascade_ops *ops; /**< cascading interrupt
|
|
|
|
* controller driver
|
|
|
|
* operations
|
|
|
|
*/
|
|
|
|
struct irq_desc desc; /**< the interrupt, that
|
|
|
|
* this controller is
|
|
|
|
* generating
|
|
|
|
*/
|
|
|
|
struct irq_cascade_desc *next; /**< link to the global
|
|
|
|
* list of interrupt
|
|
|
|
* controllers
|
|
|
|
*/
|
|
|
|
bool global_mask; /**< the controller
|
|
|
|
* cannot mask input
|
|
|
|
* interrupts per core
|
|
|
|
*/
|
2022-01-19 21:15:03 +08:00
|
|
|
struct k_spinlock lock; /**< protect child
|
2019-02-22 01:05:37 +08:00
|
|
|
* lists, enable and
|
|
|
|
* child counters
|
|
|
|
*/
|
2020-10-09 16:33:20 +08:00
|
|
|
int enable_count[CONFIG_CORE_COUNT]; /**< enabled child
|
2019-02-22 01:05:37 +08:00
|
|
|
* interrupt counter
|
|
|
|
*/
|
2020-10-09 16:33:20 +08:00
|
|
|
unsigned int num_children[CONFIG_CORE_COUNT]; /**< number of children
|
2019-02-22 01:05:37 +08:00
|
|
|
*/
|
|
|
|
struct irq_child child[PLATFORM_IRQ_CHILDREN]; /**< array of child
|
|
|
|
* lists - one per
|
|
|
|
* multiplexed IRQ
|
|
|
|
*/
|
2019-07-11 20:31:19 +08:00
|
|
|
};
|
|
|
|
|
2019-02-21 22:36:28 +08:00
|
|
|
/* A descriptor for cascading interrupt controller template */
|
|
|
|
struct irq_cascade_tmpl {
|
|
|
|
const char *name;
|
2019-02-22 01:05:37 +08:00
|
|
|
const struct irq_cascade_ops *ops;
|
2019-02-21 22:36:28 +08:00
|
|
|
int irq;
|
|
|
|
void (*handler)(void *arg);
|
2019-02-12 22:27:12 +08:00
|
|
|
bool global_mask;
|
2019-02-21 22:36:28 +08:00
|
|
|
};
|
|
|
|
|
2020-01-15 23:47:32 +08:00
|
|
|
/**
|
|
|
|
* \brief Cascading interrupt controller root.
|
|
|
|
*/
|
|
|
|
struct cascade_root {
|
2022-01-19 21:15:03 +08:00
|
|
|
struct k_spinlock lock; /**< locking mechanism */
|
2020-01-15 23:47:32 +08:00
|
|
|
struct irq_cascade_desc *list; /**< list of child cascade irqs */
|
|
|
|
int last_irq; /**< last registered cascade irq */
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline struct cascade_root *cascade_root_get(void)
|
|
|
|
{
|
|
|
|
return sof_get()->cascade_root;
|
|
|
|
}
|
|
|
|
|
2021-07-23 23:23:18 +08:00
|
|
|
/* For i.MX, while building SOF with Zephyr use the interrupt_*
|
|
|
|
* functions from second level interrupt handling and IRQ_STEER.
|
|
|
|
*/
|
2024-10-07 14:04:09 +08:00
|
|
|
#if defined(__ZEPHYR__) && (defined(CONFIG_IMX) || defined(CONFIG_AMD))
|
2021-07-23 23:23:18 +08:00
|
|
|
int mux_interrupt_get_irq(unsigned int irq, const char *cascade);
|
|
|
|
int mux_interrupt_register(uint32_t irq, void(*handler)(void *arg), void *arg);
|
|
|
|
void mux_interrupt_unregister(uint32_t irq, const void *arg);
|
|
|
|
uint32_t mux_interrupt_enable(uint32_t irq, void *arg);
|
|
|
|
uint32_t mux_interrupt_disable(uint32_t irq, void *arg);
|
|
|
|
#endif
|
|
|
|
|
2019-10-04 20:30:24 +08:00
|
|
|
int interrupt_register(uint32_t irq, void(*handler)(void *arg), void *arg);
|
2019-02-14 19:28:08 +08:00
|
|
|
void interrupt_unregister(uint32_t irq, const void *arg);
|
2019-02-26 01:08:48 +08:00
|
|
|
uint32_t interrupt_enable(uint32_t irq, void *arg);
|
|
|
|
uint32_t interrupt_disable(uint32_t irq, void *arg);
|
2018-08-22 21:17:19 +08:00
|
|
|
|
2022-09-03 00:06:59 +08:00
|
|
|
/* Zephyr compat */
|
|
|
|
#if !defined(__ZEPHYR__)
|
|
|
|
#define arch_irq_lock() arch_interrupt_disable_mask(0xffffffff)
|
|
|
|
#endif
|
|
|
|
|
2018-08-22 21:17:19 +08:00
|
|
|
void platform_interrupt_init(void);
|
|
|
|
|
2019-02-19 20:18:37 +08:00
|
|
|
void platform_interrupt_set(uint32_t irq);
|
2018-08-22 21:17:19 +08:00
|
|
|
void platform_interrupt_clear(uint32_t irq, uint32_t mask);
|
|
|
|
uint32_t platform_interrupt_get_enabled(void);
|
2019-02-22 17:16:58 +08:00
|
|
|
void interrupt_mask(uint32_t irq, unsigned int cpu);
|
|
|
|
void interrupt_unmask(uint32_t irq, unsigned int cpu);
|
2018-08-22 21:17:19 +08:00
|
|
|
|
2019-03-18 22:28:22 +08:00
|
|
|
/*
|
|
|
|
* On platforms, supporting cascading interrupts cascaded interrupt numbers
|
2019-08-22 16:19:23 +08:00
|
|
|
* are greater than or equal to PLATFORM_IRQ_HW_NUM
|
2019-03-18 22:28:22 +08:00
|
|
|
*/
|
2019-02-26 18:49:39 +08:00
|
|
|
#define interrupt_is_dsp_direct(irq) (!PLATFORM_IRQ_CHILDREN || \
|
2019-08-22 16:19:23 +08:00
|
|
|
irq < PLATFORM_IRQ_HW_NUM)
|
2019-03-18 22:28:22 +08:00
|
|
|
|
2020-01-15 23:47:32 +08:00
|
|
|
void interrupt_init(struct sof *sof);
|
2019-02-21 22:36:28 +08:00
|
|
|
int interrupt_cascade_register(const struct irq_cascade_tmpl *tmpl);
|
2019-03-06 20:19:50 +08:00
|
|
|
struct irq_cascade_desc *interrupt_get_parent(uint32_t irq);
|
2019-02-26 18:49:39 +08:00
|
|
|
int interrupt_get_irq(unsigned int irq, const char *cascade);
|
2019-02-21 22:36:28 +08:00
|
|
|
|
2019-07-11 20:31:19 +08:00
|
|
|
static inline void interrupt_set(int irq)
|
|
|
|
{
|
2019-02-19 20:18:37 +08:00
|
|
|
platform_interrupt_set(irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void interrupt_clear_mask(int irq, uint32_t mask)
|
|
|
|
{
|
|
|
|
platform_interrupt_clear(irq, mask);
|
2019-07-11 20:31:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void interrupt_clear(int irq)
|
|
|
|
{
|
2019-02-19 20:18:37 +08:00
|
|
|
interrupt_clear_mask(irq, 1);
|
2019-07-11 20:31:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t interrupt_global_disable(void)
|
|
|
|
{
|
|
|
|
return arch_interrupt_global_disable();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void interrupt_global_enable(uint32_t flags)
|
|
|
|
{
|
|
|
|
arch_interrupt_global_enable(flags);
|
|
|
|
}
|
|
|
|
|
2021-04-01 23:15:40 +08:00
|
|
|
#if CONFIG_LIBRARY
|
|
|
|
|
|
|
|
/* temporary fix to remove build warning for testbench that will need shortly
|
|
|
|
* realigned when Zephyr native APIs are used.
|
|
|
|
*/
|
|
|
|
static inline void __irq_local_disable(unsigned long flags) {}
|
|
|
|
static inline void __irq_local_enable(unsigned long flags) {}
|
|
|
|
|
|
|
|
/* disables all IRQ sources on current core - NO effect on library */
|
|
|
|
#define irq_local_disable(flags) \
|
|
|
|
do { \
|
|
|
|
flags = 0; \
|
|
|
|
__irq_local_disable(flags); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/* re-enables IRQ sources on current core - NO effect on library*/
|
|
|
|
#define irq_local_enable(flags) \
|
|
|
|
__irq_local_enable(flags)
|
|
|
|
|
|
|
|
#else
|
2019-08-02 20:38:39 +08:00
|
|
|
/* disables all IRQ sources on current core */
|
|
|
|
#define irq_local_disable(flags) \
|
|
|
|
(flags = interrupt_global_disable())
|
|
|
|
|
|
|
|
/* re-enables IRQ sources on current core */
|
|
|
|
#define irq_local_enable(flags) \
|
|
|
|
interrupt_global_enable(flags)
|
2020-03-02 19:19:40 +08:00
|
|
|
#endif
|
2021-04-01 23:15:40 +08:00
|
|
|
#endif
|
2019-07-10 20:23:41 +08:00
|
|
|
#endif /* __SOF_DRIVERS_INTERRUPT_H__ */
|