smp: Move for loops to use arch_num_cpus instead of CONFIG_MP_NUM_CPUS

Change for loops of the form:

for (i = 0; i < CONFIG_MP_NUM_CPUS; i++)
   ...

to

unsigned int num_cpus = arch_num_cpus();
for (i = 0; i < num_cpus; i++)
   ...

We do the call outside of the for loop so that it only happens once,
rather than on every iteration.

Signed-off-by: Kumar Gala <kumar.gala@intel.com>
This commit is contained in:
Kumar Gala 2022-10-18 09:45:13 -05:00 committed by Carles Cufí
parent 77dcf91f50
commit a1195ae39b
26 changed files with 121 additions and 44 deletions

View File

@ -133,7 +133,9 @@ void arch_sched_ipi(void)
/* broadcast sched_ipi request to other cores
* if the target is current core, hardware will ignore it
*/
for (i = 0U; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (i = 0U; i < num_cpus; i++) {
z_arc_connect_ici_generate(i);
}
}

View File

@ -96,7 +96,9 @@ static void flush_owned_fpu(struct k_thread *thread)
int i;
/* search all CPUs for the owner we want */
for (i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (i = 0; i < num_cpus; i++) {
if (_kernel.cpus[i].arch.fpu_owner != thread) {
continue;
}

View File

@ -160,7 +160,9 @@ static void broadcast_ipi(unsigned int ipi)
/*
* Send SGI to all cores except itself
*/
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
uint64_t target_mpidr = cpu_node_list[i];
uint8_t aff0 = MPIDR_AFFLVL(target_mpidr, 0);

View File

@ -67,7 +67,9 @@ void arch_sched_ipi(void)
key = arch_irq_lock();
id = _current_cpu->id;
for (i = 0U; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (i = 0U; i < num_cpus; i++) {
if (i != id) {
volatile uint32_t *r = (uint32_t *)get_hart_msip(i);
*r = 1U;

View File

@ -67,7 +67,9 @@ FUNC_NORETURN void z_x86_prep_c(void *arg)
#endif
#if CONFIG_X86_STACK_PROTECTION
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
z_x86_set_stack_guard(z_interrupt_stacks[i]);
}
#endif

View File

@ -85,7 +85,9 @@ void dw_ace_irq_enable(const struct device *dev, uint32_t irq)
ARG_UNUSED(dev);
if (is_dw_irq(irq)) {
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
ACE_INTC[i].irq_inten_l |= BIT(ACE_IRQ_FROM_ZEPHYR(irq));
ACE_INTC[i].irq_intmask_l &= ~BIT(ACE_IRQ_FROM_ZEPHYR(irq));
}
@ -99,7 +101,9 @@ void dw_ace_irq_disable(const struct device *dev, uint32_t irq)
ARG_UNUSED(dev);
if (is_dw_irq(irq)) {
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
ACE_INTC[i].irq_inten_l &= ~BIT(ACE_IRQ_FROM_ZEPHYR(irq));
ACE_INTC[i].irq_intmask_l |= BIT(ACE_IRQ_FROM_ZEPHYR(irq));
}

View File

@ -202,7 +202,9 @@ static struct vector_desc_t *find_desc_for_source(int source, int cpu)
void esp_intr_initialize(void)
{
for (size_t i = 0; i < (ESP_INTC_INTS_NUM * CONFIG_MP_NUM_CPUS); ++i) {
unsigned int num_cpus = arch_num_cpus();
for (size_t i = 0; i < (ESP_INTC_INTS_NUM * num_cpus); ++i) {
intr_alloc_table[i].handler = default_intr_handler;
intr_alloc_table[i].arg = (void *)i;
}

View File

@ -138,9 +138,11 @@ static void gic_dist_init(void)
/*
* Enable all global interrupts distributing to CPUs listed
* in dts with the count of CONFIG_MP_NUM_CPUS.
* in dts with the count of arch_num_cpus().
*/
for (i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (i = 0; i < num_cpus; i++) {
cpu_mask |= BIT(cpu_mpid_list[i]);
}
reg_val = cpu_mask | (cpu_mask << 8) | (cpu_mask << 16)

View File

@ -36,7 +36,9 @@ static void cavs_idc_isr(const struct device *dev)
bool do_sched_ipi = false;
#endif
for (i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (i = 0; i < num_cpus; i++) {
if (i == curr_cpu_id) {
/* skip current core */
continue;
@ -94,7 +96,9 @@ static int cavs_idc_send(const struct device *dev, int wait, uint32_t id,
/* Check if any core is still busy */
busy = false;
for (i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (i = 0; i < num_cpus; i++) {
if (i == curr_cpu_id) {
/* skip current core */
continue;
@ -116,7 +120,7 @@ static int cavs_idc_send(const struct device *dev, int wait, uint32_t id,
ext &= IPC_IDCIETC_MSG_MASK;
ext |= IPC_IDCIETC_DONE; /* always clear DONE bit */
for (i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
for (i = 0; i < num_cpus; i++) {
if (i == curr_cpu_id) {
/* skip current core */
continue;
@ -173,11 +177,13 @@ static int cavs_idc_set_enabled(const struct device *dev, int enable)
}
#endif
for (i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (i = 0; i < num_cpus; i++) {
mask = 0;
if (enable) {
for (j = 0; j < CONFIG_MP_NUM_CPUS; j++) {
for (j = 0; j < num_cpus; j++) {
if (i == j) {
continue;
}

View File

@ -593,7 +593,9 @@ static bool thread_active_elsewhere(struct k_thread *thread)
#ifdef CONFIG_SMP
int currcpu = _current_cpu->id;
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
if ((i != currcpu) &&
(_kernel.cpus[i].current == thread)) {
return true;
@ -1291,7 +1293,9 @@ void init_ready_q(struct _ready_q *rq)
void z_sched_init(void)
{
#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
init_ready_q(&_kernel.cpus[i].ready_q);
}
#else

View File

@ -112,7 +112,10 @@ void z_smp_start_cpu(int id)
void z_smp_init(void)
{
(void)atomic_clear(&start_flag);
for (int i = 1; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 1; i < num_cpus; i++) {
start_cpu(i, &start_flag);
}
(void)atomic_set(&start_flag, 1);

View File

@ -1100,7 +1100,9 @@ int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
/* Retrieve the usage stats for each core and amalgamate them. */
for (uint8_t i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (uint8_t i = 0; i < num_cpus; i++) {
z_sched_cpu_usage(i, &tmp_stats);
stats->execution_cycles += tmp_stats.execution_cycles;

View File

@ -296,7 +296,9 @@ void k_sys_runtime_stats_enable(void)
/* Enable gathering of runtime stats on each CPU */
for (uint8_t i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (uint8_t i = 0; i < num_cpus; i++) {
_kernel.cpus[i].usage.track_usage = true;
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
_kernel.cpus[i].usage.num_windows++;
@ -328,7 +330,9 @@ void k_sys_runtime_stats_disable(void)
uint32_t now = usage_now();
for (uint8_t i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (uint8_t i = 0; i < num_cpus; i++) {
cpu = &_kernel.cpus[i];
if (cpu->usage0 != 0) {
sched_cpu_update_usage(cpu, now - cpu->usage0);

View File

@ -46,7 +46,9 @@ void soc_mp_init(void)
irq_enable(ACE_IRQ_TO_ZEPHYR(ACE_INTL_IDCA));
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
/* DINT has one bit per IPC, unmask only IPC "Ax" on core "x" */
ACE_DINT[i].ie[ACE_INTL_IDCA] = BIT(i);
@ -111,7 +113,9 @@ void arch_sched_ipi(void)
uint32_t curr = arch_proc_id();
/* Signal agent B[n] to cause an interrupt from agent A[n] */
for (int core = 0; core < CONFIG_MP_NUM_CPUS; core++) {
unsigned int num_cpus = arch_num_cpus();
for (int core = 0; core < num_cpus; core++) {
if (core != curr && soc_cpus_active[core]) {
IDC[core].agents[1].ipc.idr = INTEL_ADSP_IPC_BUSY;
}

View File

@ -29,7 +29,9 @@ __imr void soc_mp_startup(uint32_t cpu)
* spurious IPI when we enter user code). Remember: this
* could have come from any core, clear all of them.
*/
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
IDC[cpu].core[i].tfc = BIT(31);
}
@ -108,7 +110,9 @@ void soc_start_core(int cpu_num)
* some platforms will mess it up.
*/
CAVS_INTCTRL[cpu_num].l2.clear = CAVS_L2_IDC;
for (int c = 0; c < CONFIG_MP_NUM_CPUS; c++) {
unsigned int num_cpus = arch_num_cpus();
for (int c = 0; c < num_cpus; c++) {
IDC[c].busy_int |= IDC_ALL_CORES;
}
@ -126,8 +130,9 @@ void soc_start_core(int cpu_num)
void arch_sched_ipi(void)
{
uint32_t curr = arch_proc_id();
unsigned int num_cpus = arch_num_cpus();
for (int c = 0; c < CONFIG_MP_NUM_CPUS; c++) {
for (int c = 0; c < num_cpus; c++) {
if (c != curr && soc_cpus_active[c]) {
IDC[curr].core[c].itc = BIT(31);
}
@ -148,7 +153,10 @@ void idc_isr(const void *param)
* of the ITC/TFC high bits, INCLUDING the one "from this
* CPU".
*/
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
IDC[arch_proc_id()].core[i].tfc = BIT(31);
}
}
@ -161,7 +169,9 @@ __imr void soc_mp_init(void)
* every other CPU, but not to be back-interrupted when the
* target core clears the busy bit.
*/
for (int core = 0; core < CONFIG_MP_NUM_CPUS; core++) {
unsigned int num_cpus = arch_num_cpus();
for (int core = 0; core < num_cpus; core++) {
IDC[core].busy_int |= IDC_ALL_CORES;
IDC[core].done_int &= ~IDC_ALL_CORES;
@ -172,8 +182,8 @@ __imr void soc_mp_init(void)
}
/* Clear out any existing pending interrupts that might be present */
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
for (int j = 0; j < CONFIG_MP_NUM_CPUS; j++) {
for (int i = 0; i < num_cpus; i++) {
for (int j = 0; j < num_cpus; j++) {
IDC[i].core[j].tfc = BIT(31);
}
}

View File

@ -66,7 +66,10 @@ int cavs_clock_set_freq(uint32_t freq_idx)
k = k_spin_lock(&lock);
select_cpu_clock_hw(freq_idx);
for (i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (i = 0; i < num_cpus; i++) {
platform_clocks[i].current_freq = freq_idx;
}
@ -94,7 +97,9 @@ void cavs_clock_init(void)
}
#endif
for (i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (i = 0; i < num_cpus; i++) {
platform_clocks[i].default_freq = CAVS_CLOCK_FREQ_DEFAULT;
platform_clocks[i].current_freq = CAVS_CLOCK_FREQ_DEFAULT;
platform_clocks[i].lowest_freq = platform_lowest_freq_idx;

View File

@ -169,7 +169,9 @@ bool intel_adsp_ipc_send_message_sync(const struct device *dev,
#if defined(CONFIG_SOC_SERIES_INTEL_ACE)
static inline void ace_ipc_intc_unmask(void)
{
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
ACE_DINT[i].ie[ACE_INTL_HIPC] = BIT(0);
}
}

View File

@ -131,7 +131,9 @@ K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS,
static void isr_stacks(void)
{
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
const uint8_t *buf = Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[i]);
size_t size = K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[i]);
size_t unused;

View File

@ -35,7 +35,9 @@ static int pm_stats_init(const struct device *dev)
{
ARG_UNUSED(dev);
for (uint8_t i = 0U; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (uint8_t i = 0U; i < num_cpus; i++) {
for (uint8_t j = 0U; j < PM_STATE_COUNT; j++) {
snprintk(names[i][j], PM_STAT_NAME_LEN,
"pm_cpu_%03d_state_%1d_stats", i, j);

View File

@ -211,7 +211,9 @@ static int cmd_kernel_stacks(const struct shell *shell,
* kernel support, including dumping arch-specific exception-related
* stack buffers.
*/
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
size_t unused;
const uint8_t *buf = Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[i]);
size_t size = K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[i]);

View File

@ -120,8 +120,9 @@ static void control_load(void)
uint64_t idle_cycles = 0;
k_thread_runtime_stats_t rt_stats_all;
int err = 0;
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
for (int i = 0; i < num_cpus; i++) {
k_thread_runtime_stats_t thread_stats;
err = k_thread_runtime_stats_get(idle_tid[i], &thread_stats);

View File

@ -139,7 +139,9 @@ static void core_smoke(void *arg)
ZTEST(intel_adsp_boot, test_4th_cpu_behavior)
{
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
printk("Per-CPU smoke test %d...\n", i);
run_on_cpu(i, core_smoke, (void *)i, true);
}
@ -196,7 +198,9 @@ static void halt_and_restart(int cpu)
void halt_and_restart_thread(void *p1, void *p2, void *p3)
{
for (int i = 1; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 1; i < num_cpus; i++) {
halt_and_restart(i);
}
}

View File

@ -41,11 +41,13 @@ static void thread_fn(void *a, void *b, void *c)
*/
ZTEST(intel_adsp_boot, test_1st_smp_boot_delay)
{
unsigned int num_cpus = arch_num_cpus();
if (CONFIG_MP_NUM_CPUS < 2) {
ztest_test_skip();
}
for (int i = 1; i < CONFIG_MP_NUM_CPUS; i++) {
for (int i = 1; i < num_cpus; i++) {
printk("Launch cpu%d\n", i);
mp_flag = false;
k_thread_create(&cpu_thr, thr_stack, K_THREAD_STACK_SIZEOF(thr_stack),

View File

@ -10,8 +10,9 @@
static void check_clocks(struct cavs_clock_info *clocks, uint32_t freq_idx)
{
int i;
unsigned int num_cpus = arch_num_cpus();
for (i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
for (i = 0; i < num_cpus; i++) {
zassert_equal(clocks[i].current_freq, freq_idx, "");
}
}

View File

@ -391,7 +391,10 @@ ZTEST(userspace_thread_stack, test_stack_buffer)
K_KERNEL_STACK_RESERVED);
printk("CONFIG_ISR_STACK_SIZE %zu\n", (size_t)CONFIG_ISR_STACK_SIZE);
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
printk("irq stack %d: %p size %zu\n",
i, &z_interrupt_stacks[i],
sizeof(z_interrupt_stacks[i]));

View File

@ -180,7 +180,9 @@ ZTEST(lib_p4wq, test_fill_queue)
* we can be sure to run). They should all be made active
* when added.
*/
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
unsigned int num_cpus = arch_num_cpus();
for (int i = 0; i < num_cpus; i++) {
zassert_true(add_new_item(p0), "thread should be active");
}
@ -192,7 +194,7 @@ ZTEST(lib_p4wq, test_fill_queue)
* we run out of threads.
*/
for (int pri = p0 - 1; pri >= p0 - 4; pri++) {
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
for (int i = 0; i < num_cpus; i++) {
bool active = add_new_item(pri);
if (!active) {