2018-03-07 20:57:14 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2018 Intel Corporation. All rights reserved.
|
|
|
|
*
|
2018-05-26 01:49:13 +08:00
|
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
2018-03-07 20:57:14 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef SPINLOCK_H
|
|
|
|
#define SPINLOCK_H
|
|
|
|
|
|
|
|
#ifndef ASSEMBLER
|
|
|
|
|
|
|
|
#include <types.h>
|
2018-09-13 10:50:46 +08:00
|
|
|
#include <rtl.h>
|
2018-03-07 20:57:14 +08:00
|
|
|
|
|
|
|
/** The architecture dependent spinlock type. */
|
|
|
|
typedef struct _spinlock {
|
|
|
|
uint32_t head;
|
|
|
|
uint32_t tail;
|
|
|
|
|
|
|
|
} spinlock_t;
|
|
|
|
|
|
|
|
/* Function prototypes */
|
2018-09-13 10:50:46 +08:00
|
|
|
static inline void spinlock_init(spinlock_t *lock)
|
|
|
|
{
|
|
|
|
(void)memset(lock, 0U, sizeof(spinlock_t));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void spinlock_obtain(spinlock_t *lock)
|
|
|
|
{
|
|
|
|
|
|
|
|
/* The lock function atomically increments and exchanges the head
|
|
|
|
* counter of the queue. If the old head of the queue is equal to the
|
|
|
|
* tail, we have locked the spinlock. Otherwise we have to wait.
|
|
|
|
*/
|
|
|
|
|
|
|
|
asm volatile (" movl $0x1,%%eax\n"
|
|
|
|
" lock xaddl %%eax,%[head]\n"
|
|
|
|
" cmpl %%eax,%[tail]\n"
|
|
|
|
" jz 1f\n"
|
|
|
|
"2: pause\n"
|
|
|
|
" cmpl %%eax,%[tail]\n"
|
|
|
|
" jnz 2b\n"
|
|
|
|
"1:\n"
|
|
|
|
:
|
|
|
|
:
|
|
|
|
[head] "m"(lock->head),
|
|
|
|
[tail] "m"(lock->tail)
|
|
|
|
: "cc", "memory", "eax");
|
|
|
|
}
|
2018-03-07 20:57:14 +08:00
|
|
|
|
2018-06-19 10:15:48 +08:00
|
|
|
static inline void spinlock_release(spinlock_t *lock)
|
2018-03-07 20:57:14 +08:00
|
|
|
{
|
|
|
|
/* Increment tail of queue */
|
|
|
|
asm volatile (" lock incl %[tail]\n"
|
|
|
|
:
|
|
|
|
: [tail] "m" (lock->tail)
|
|
|
|
: "cc", "memory");
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* ASSEMBLER */
|
|
|
|
|
|
|
|
/** The offset of the head element. */
|
|
|
|
#define SYNC_SPINLOCK_HEAD_OFFSET 0
|
|
|
|
|
|
|
|
/** The offset of the tail element. */
|
|
|
|
#define SYNC_SPINLOCK_TAIL_OFFSET 4
|
|
|
|
|
|
|
|
.macro spinlock_obtain lock
|
|
|
|
movl $1, % eax
|
2018-04-12 13:57:05 +08:00
|
|
|
lea \lock, % rbx
|
|
|
|
lock xaddl % eax, SYNC_SPINLOCK_HEAD_OFFSET(%rbx)
|
|
|
|
cmpl % eax, SYNC_SPINLOCK_TAIL_OFFSET(%rbx)
|
2018-03-07 20:57:14 +08:00
|
|
|
jz 1f
|
|
|
|
2 :
|
|
|
|
pause
|
2018-04-12 13:57:05 +08:00
|
|
|
cmpl % eax, SYNC_SPINLOCK_TAIL_OFFSET(%rbx)
|
2018-03-07 20:57:14 +08:00
|
|
|
jnz 2b
|
|
|
|
1 :
|
|
|
|
.endm
|
|
|
|
|
|
|
|
#define spinlock_obtain(x) spinlock_obtain lock = (x)
|
|
|
|
|
|
|
|
.macro spinlock_release lock
|
2018-04-12 13:57:05 +08:00
|
|
|
lea \lock, % rbx
|
|
|
|
lock incl SYNC_SPINLOCK_TAIL_OFFSET(%rbx)
|
2018-03-07 20:57:14 +08:00
|
|
|
.endm
|
|
|
|
|
|
|
|
#define spinlock_release(x) spinlock_release lock = (x)
|
|
|
|
|
|
|
|
#endif /* ASSEMBLER */
|
|
|
|
|
2018-08-15 21:01:21 +08:00
|
|
|
#define spinlock_irqsave_obtain(lock, p_rflags) \
|
2018-03-07 20:57:14 +08:00
|
|
|
do { \
|
2018-08-15 21:01:21 +08:00
|
|
|
CPU_INT_ALL_DISABLE(p_rflags); \
|
|
|
|
spinlock_obtain(lock); \
|
2018-03-07 20:57:14 +08:00
|
|
|
} while (0)
|
|
|
|
|
2018-08-15 21:01:21 +08:00
|
|
|
#define spinlock_irqrestore_release(lock, rflags) \
|
2018-03-07 20:57:14 +08:00
|
|
|
do { \
|
2018-08-15 21:01:21 +08:00
|
|
|
spinlock_release(lock); \
|
|
|
|
CPU_INT_ALL_RESTORE(rflags); \
|
2018-03-07 20:57:14 +08:00
|
|
|
} while (0)
|
|
|
|
#endif /* SPINLOCK_H */
|