hv: add max_len for sbuf_put param
sbuf_put copies sbuf->ele_size of data, and puts into ring. Currently this function assumes that data size from caller is no less than sbuf->ele_size. But as sbuf->ele_size is usually setup by some sources outside of the HV (e.g., the service VM), it is not meant to be trusted. So caller should provide the max length of the data for safety reason. sbuf_put() will return UINT32_MAX if max_len of data is less than element size. Additionally, a helper function sbuf_put_many() is added for putting multiple entries. Tracked-On: #8547 Signed-off-by: Wu Zhou <wu.zhou@intel.com> Reviewed-by: Junjie Mao <junjie.mao@intel.com>
This commit is contained in:
parent
056f4abcfb
commit
925e3d95b4
|
@ -27,8 +27,13 @@ uint32_t sbuf_next_ptr(uint32_t pos_arg,
|
|||
|
||||
/**
|
||||
* The high caller should guarantee each time there must have
|
||||
* sbuf->ele_size data can be write form data and this function
|
||||
* should guarantee execution atomically.
|
||||
* sbuf->ele_size data can be write form data.
|
||||
*
|
||||
* As sbuf->ele_size is possibly setup by some sources outside of the
|
||||
* HV (e.g. the service VM), it is not meant to be trusted. So caller
|
||||
* should provide the max length of the data for safety reason.
|
||||
*
|
||||
* And this function should guarantee execution atomically.
|
||||
*
|
||||
* flag:
|
||||
* If OVERWRITE_EN set, buf can store (ele_num - 1) elements at most.
|
||||
|
@ -40,23 +45,24 @@ uint32_t sbuf_next_ptr(uint32_t pos_arg,
|
|||
* return:
|
||||
* ele_size: write succeeded.
|
||||
* 0: no write, buf is full
|
||||
* negative: failed.
|
||||
* UINT32_MAX: failed, sbuf corrupted.
|
||||
*/
|
||||
|
||||
uint32_t sbuf_put(struct shared_buf *sbuf, uint8_t *data)
|
||||
uint32_t sbuf_put(struct shared_buf *sbuf, uint8_t *data, uint32_t max_len)
|
||||
{
|
||||
void *to;
|
||||
uint32_t next_tail;
|
||||
uint32_t ele_size;
|
||||
uint32_t ele_size, ret;
|
||||
bool trigger_overwrite = false;
|
||||
|
||||
stac();
|
||||
next_tail = sbuf_next_ptr(sbuf->tail, sbuf->ele_size, sbuf->size);
|
||||
ele_size = sbuf->ele_size;
|
||||
next_tail = sbuf_next_ptr(sbuf->tail, ele_size, sbuf->size);
|
||||
|
||||
if ((next_tail == sbuf->head) && ((sbuf->flags & OVERWRITE_EN) == 0U)) {
|
||||
/* if overrun is not enabled, return 0 directly */
|
||||
ele_size = 0U;
|
||||
} else {
|
||||
ret = 0U;
|
||||
} else if (ele_size <= max_len) {
|
||||
if (next_tail == sbuf->head) {
|
||||
/* accumulate overrun count if necessary */
|
||||
sbuf->overrun_cnt += sbuf->flags & OVERRUN_CNT_EN;
|
||||
|
@ -64,20 +70,23 @@ uint32_t sbuf_put(struct shared_buf *sbuf, uint8_t *data)
|
|||
}
|
||||
to = (void *)sbuf + SBUF_HEAD_SIZE + sbuf->tail;
|
||||
|
||||
(void)memcpy_s(to, sbuf->ele_size, data, sbuf->ele_size);
|
||||
(void)memcpy_s(to, ele_size, data, max_len);
|
||||
/* make sure write data before update head */
|
||||
cpu_write_memory_barrier();
|
||||
|
||||
if (trigger_overwrite) {
|
||||
sbuf->head = sbuf_next_ptr(sbuf->head,
|
||||
sbuf->ele_size, sbuf->size);
|
||||
ele_size, sbuf->size);
|
||||
}
|
||||
sbuf->tail = next_tail;
|
||||
ele_size = sbuf->ele_size;
|
||||
ret = ele_size;
|
||||
} else {
|
||||
/* there must be something wrong */
|
||||
ret = UINT32_MAX;
|
||||
}
|
||||
clac();
|
||||
|
||||
return ele_size;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int32_t sbuf_setup_common(struct acrn_vm *vm, uint16_t cpu_id, uint32_t sbuf_id, uint64_t *hva)
|
||||
|
@ -104,3 +113,29 @@ int32_t sbuf_setup_common(struct acrn_vm *vm, uint16_t cpu_id, uint32_t sbuf_id,
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* try put a batch of elememts from data to sbuf
|
||||
* data_size should be equel to n*elem_size, data not enough to fill the elem_size will be ignored.
|
||||
*
|
||||
* return:
|
||||
* elem_size * n: bytes put in sbuf
|
||||
* UINT32_MAX: failed, sbuf corrupted.
|
||||
*/
|
||||
uint32_t sbuf_put_many(struct shared_buf *sbuf, uint32_t elem_size, uint8_t *data, uint32_t data_size)
|
||||
{
|
||||
uint32_t ret, sent = 0U;
|
||||
uint32_t i;
|
||||
|
||||
for (i = 0U; i < (data_size / elem_size); i++) {
|
||||
ret = sbuf_put(sbuf, data + i * elem_size, elem_size);
|
||||
if (ret == elem_size) {
|
||||
sent += ret;
|
||||
} else {
|
||||
if (ret == UINT32_MAX) {
|
||||
sent = UINT32_MAX;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
return sent;
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ int32_t send_vm_event(struct acrn_vm *vm, struct vm_event *event)
|
|||
|
||||
if (sbuf != NULL) {
|
||||
spinlock_obtain(&vm->vm_event_lock);
|
||||
size_sent = sbuf_put(sbuf, (uint8_t *)event);
|
||||
size_sent = sbuf_put(sbuf, (uint8_t *)event, sizeof(*event));
|
||||
spinlock_release(&vm->vm_event_lock);
|
||||
if (size_sent == sizeof(struct vm_event)) {
|
||||
arch_fire_hsm_interrupt();
|
||||
|
|
|
@ -91,18 +91,14 @@ void do_logmsg(uint32_t severity, const char *fmt, ...)
|
|||
|
||||
/* Check whether output to memory */
|
||||
if (do_mem_log) {
|
||||
uint32_t i, msg_len;
|
||||
uint32_t msg_len;
|
||||
struct shared_buf *sbuf = per_cpu(sbuf, pcpu_id)[ACRN_HVLOG];
|
||||
|
||||
/* If sbuf is not ready, we just drop the massage */
|
||||
if (sbuf != NULL) {
|
||||
msg_len = strnlen_s(buffer, LOG_MESSAGE_MAX_SIZE);
|
||||
|
||||
for (i = 0U; i < (((msg_len - 1U) / LOG_ENTRY_SIZE) + 1U);
|
||||
i++) {
|
||||
(void)sbuf_put(sbuf, (uint8_t *)buffer +
|
||||
(i * LOG_ENTRY_SIZE));
|
||||
}
|
||||
(void)sbuf_put_many(sbuf, LOG_ENTRY_SIZE, (uint8_t *)buffer,
|
||||
LOG_ENTRY_SIZE * (((msg_len - 1U) / LOG_ENTRY_SIZE) + 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -303,7 +303,6 @@ static int32_t profiling_sbuf_put_variable(struct shared_buf *sbuf,
|
|||
*/
|
||||
static int32_t profiling_generate_data(int32_t collector, uint32_t type)
|
||||
{
|
||||
uint64_t i;
|
||||
uint32_t remaining_space = 0U;
|
||||
int32_t ret = 0;
|
||||
struct data_header pkt_header;
|
||||
|
@ -380,13 +379,8 @@ static int32_t profiling_generate_data(int32_t collector, uint32_t type)
|
|||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0U; i < (((DATA_HEADER_SIZE - 1U) / SEP_BUF_ENTRY_SIZE) + 1U); i++) {
|
||||
(void)sbuf_put(sbuf, (uint8_t *)&pkt_header + i * SEP_BUF_ENTRY_SIZE);
|
||||
}
|
||||
|
||||
for (i = 0U; i < (((payload_size - 1U) / SEP_BUF_ENTRY_SIZE) + 1U); i++) {
|
||||
(void)sbuf_put(sbuf, (uint8_t *)payload + i * SEP_BUF_ENTRY_SIZE);
|
||||
}
|
||||
(void)sbuf_put_many(sbuf, SEP_BUF_ENTRY_SIZE, (uint8_t *)&pkt_header, sizeof(pkt_header));
|
||||
(void)sbuf_put_many(sbuf, SEP_BUF_ENTRY_SIZE, (uint8_t *)payload, payload_size);
|
||||
|
||||
ss->samples_logged++;
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ static inline void trace_put(uint16_t cpu_id, uint32_t evid, uint32_t n_data, st
|
|||
entry->id = evid;
|
||||
entry->n_data = (uint8_t)n_data;
|
||||
entry->cpu = (uint8_t)cpu_id;
|
||||
(void)sbuf_put(sbuf, (uint8_t *)entry);
|
||||
(void)sbuf_put(sbuf, (uint8_t *)entry, sizeof(*entry));
|
||||
}
|
||||
|
||||
void TRACE_2L(uint32_t evid, uint64_t e, uint64_t f)
|
||||
|
|
|
@ -176,7 +176,7 @@ static int acrn_insert_asyncio(struct acrn_vcpu *vcpu, const uint64_t asyncio_fd
|
|||
|
||||
if (sbuf != NULL) {
|
||||
spinlock_obtain(&vm->asyncio_lock);
|
||||
while (sbuf_put(sbuf, (uint8_t *)&asyncio_fd) == 0U) {
|
||||
while (sbuf_put(sbuf, (uint8_t *)&asyncio_fd, sizeof(asyncio_fd)) == 0U) {
|
||||
/* sbuf is full, try later.. */
|
||||
spinlock_release(&vm->asyncio_lock);
|
||||
asm_pause();
|
||||
|
|
|
@ -17,7 +17,8 @@
|
|||
*@pre sbuf != NULL
|
||||
*@pre data != NULL
|
||||
*/
|
||||
uint32_t sbuf_put(struct shared_buf *sbuf, uint8_t *data);
|
||||
uint32_t sbuf_put(struct shared_buf *sbuf, uint8_t *data, uint32_t max_len);
|
||||
uint32_t sbuf_put_many(struct shared_buf *sbuf, uint32_t elem_size, uint8_t *data, uint32_t data_size);
|
||||
int32_t sbuf_share_setup(uint16_t cpu_id, uint32_t sbuf_id, uint64_t *hva);
|
||||
void sbuf_reset(void);
|
||||
uint32_t sbuf_next_ptr(uint32_t pos, uint32_t span, uint32_t scope);
|
||||
|
|
Loading…
Reference in New Issue