add smp_call_function support

take use of VCPU_NOTIFY vector, add smp_call_function support.
added a per_cpu field smp_call_info, and make each smp_call_function
is not re-entered, and the caller CPU is returned when all the target
CPUs complete the call.

v4:
- remove global lock
- take use of wait_sync_change function to do the sequence sync

v3:
- remove per_cpu lock in smp_call_info
- use a global lock to ensure smp_call_function sequence
- use pcpu_sync_sleep to wait IPI complete

v2:
- after new smp function come, if old one exist, changed from overwirte
  with the new one to ignore the new one.

Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
This commit is contained in:
Jason Chen CJ 2018-08-12 21:02:27 +08:00 committed by lijinxia
parent 8ef072165f
commit 4b03c97a5e
3 changed files with 48 additions and 2 deletions

View File

@ -8,15 +8,53 @@
static uint32_t notification_irq = IRQ_INVALID;
static volatile uint64_t smp_call_mask = 0UL;
/* run in interrupt context */
static int kick_notification(__unused uint32_t irq, __unused void *data)
{
/* Notification vector does not require handling here, it's just used
* to kick taget cpu out of non-root mode.
/* Notification vector is used to kick taget cpu out of non-root mode.
* And it also serves for smp call.
*/
uint16_t pcpu_id = get_cpu_id();
if (bitmap_test(pcpu_id, &smp_call_mask)) {
struct smp_call_info_data *smp_call =
&per_cpu(smp_call_info, pcpu_id);
if (smp_call->func)
smp_call->func(smp_call->data);
bitmap_clear_nolock(pcpu_id, &smp_call_mask);
}
return 0;
}
void smp_call_function(uint64_t mask, smp_call_func_t func, void *data)
{
uint16_t pcpu_id;
struct smp_call_info_data *smp_call;
/* wait for previous smp call complete, which may run on other cpus */
while (atomic_cmpxchg64(&smp_call_mask, 0UL, mask & INVALID_BIT_INDEX));
while ((pcpu_id = ffs64(mask)) != INVALID_BIT_INDEX) {
bitmap_clear_nolock(pcpu_id, &mask);
if (bitmap_test(pcpu_id, &pcpu_active_bitmap)) {
smp_call = &per_cpu(smp_call_info, pcpu_id);
smp_call->func = func;
smp_call->data = data;
} else {
/* pcpu is not in active, print error */
pr_err("pcpu_id %d not in active!", pcpu_id);
bitmap_clear_nolock(pcpu_id, &smp_call_mask);
}
}
send_dest_ipi(smp_call_mask, VECTOR_NOTIFY_VCPU,
INTR_LAPIC_ICR_LOGICAL);
/* wait for current smp call complete */
wait_sync_change(&smp_call_mask, 0UL);
}
static int request_notification_irq(irq_action_t func, void *data,
const char *name)
{

View File

@ -47,6 +47,13 @@ struct intr_excp_ctx {
uint64_t ss;
};
typedef void (*smp_call_func_t)(void *data);
struct smp_call_info_data {
smp_call_func_t func;
void *data;
};
void smp_call_function(uint64_t mask, smp_call_func_t func, void *data);
int handle_level_interrupt_common(struct irq_desc *desc,
__unused void *handler_data);
int common_handler_edge(struct irq_desc *desc, __unused void *handler_data);

View File

@ -46,6 +46,7 @@ struct per_cpu_region {
uint8_t stack[CONFIG_STACK_SIZE] __aligned(16);
char logbuf[LOG_MESSAGE_MAX_SIZE];
uint8_t lapic_id;
struct smp_call_info_data smp_call_info;
} __aligned(CPU_PAGE_SIZE); //per_cpu_region size aligned with CPU_PAGE_SIZE
extern struct per_cpu_region *per_cpu_data_base_ptr;