2016-10-06 01:01:54 +08:00
|
|
|
/* ring_buffer.c: Simple ring buffer API */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2015 Intel Corporation
|
|
|
|
*
|
2017-01-19 09:01:01 +08:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2016-10-06 01:01:54 +08:00
|
|
|
*/
|
|
|
|
|
2022-05-06 17:23:05 +08:00
|
|
|
#include <zephyr/sys/ring_buffer.h>
|
2018-09-07 21:30:56 +08:00
|
|
|
#include <string.h>
|
2016-10-06 01:01:54 +08:00
|
|
|
|
2020-05-28 00:26:57 +08:00
|
|
|
uint32_t ring_buf_put_claim(struct ring_buf *buf, uint8_t **data, uint32_t size)
|
2018-09-07 21:30:56 +08:00
|
|
|
{
|
ring_buffer: the great simplification
This code is rather hairy. When I look at it I don't like the way it
stares back at me.
First, the rewind business looks fishy. It has to die.
And we don't have to rely on modulus either. Not even for non-power-of-2
buffers. Let's kill that distinction too and make all sizes always
"high performance".
The code is now entirely relying only on simple ALU operations (add,
sub and compare).
The key assumption: 32-bit values do wrap around after max range has
been reached. No saturation. All architectures supported by Zephyr
do that.
Some stats:
lib/os/ring_buffer.c: 62 insertions(+), 124 deletions(-)
ring_buffer.c.obj before after diff
----------------------------------------------
frdm_k64f 1224 1136 -88
m2gl025_miv 2485 2079 -406
mps2_an385 1228 1132 -96
mps2_an521 1228 1132 -96
native_posix 1546 1496 -50
native_posix_64 1598 1595 -3
nsim_hs_mpuv6 1252 1192 -60
nsim_hs_smp 1252 1192 -60
nsim_sem 1252 1192 -60
qemu_arc_em 1324 1192 -132
qemu_arc_hs6x 1824 1620 -204
qemu_arc_hs 1252 1192 -60
qemu_cortex_a53_smp 2154 1888 -266
qemu_cortex_a53 2154 1888 -266
qemu_cortex_a9 1938 1792 -146
Before (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 52
4 byte put-get, avg cycles: 47
1 byte put claim-finish, avg cycles: 39
5 byte put claim-finish, avg cycles: 41
5 byte get claim-finish, avg cycles: 52
PASS - test_ringbuffer_performance in 0.8 seconds
After (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 34
4 byte put-get, avg cycles: 41
1 byte put claim-finish, avg cycles: 27
5 byte put claim-finish, avg cycles: 29
5 byte get claim-finish, avg cycles: 29
PASS - test_ringbuffer_performance in 0.4 seconds
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2022-02-14 11:16:33 +08:00
|
|
|
uint32_t free_space, wrap_size;
|
|
|
|
int32_t base;
|
|
|
|
|
|
|
|
base = buf->put_base;
|
|
|
|
wrap_size = buf->put_head - base;
|
|
|
|
if (unlikely(wrap_size >= buf->size)) {
|
|
|
|
/* put_base is not yet adjusted */
|
|
|
|
wrap_size -= buf->size;
|
|
|
|
base += buf->size;
|
2021-12-16 18:56:38 +08:00
|
|
|
}
|
ring_buffer: the great simplification
This code is rather hairy. When I look at it I don't like the way it
stares back at me.
First, the rewind business looks fishy. It has to die.
And we don't have to rely on modulus either. Not even for non-power-of-2
buffers. Let's kill that distinction too and make all sizes always
"high performance".
The code is now entirely relying only on simple ALU operations (add,
sub and compare).
The key assumption: 32-bit values do wrap around after max range has
been reached. No saturation. All architectures supported by Zephyr
do that.
Some stats:
lib/os/ring_buffer.c: 62 insertions(+), 124 deletions(-)
ring_buffer.c.obj before after diff
----------------------------------------------
frdm_k64f 1224 1136 -88
m2gl025_miv 2485 2079 -406
mps2_an385 1228 1132 -96
mps2_an521 1228 1132 -96
native_posix 1546 1496 -50
native_posix_64 1598 1595 -3
nsim_hs_mpuv6 1252 1192 -60
nsim_hs_smp 1252 1192 -60
nsim_sem 1252 1192 -60
qemu_arc_em 1324 1192 -132
qemu_arc_hs6x 1824 1620 -204
qemu_arc_hs 1252 1192 -60
qemu_cortex_a53_smp 2154 1888 -266
qemu_cortex_a53 2154 1888 -266
qemu_cortex_a9 1938 1792 -146
Before (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 52
4 byte put-get, avg cycles: 47
1 byte put claim-finish, avg cycles: 39
5 byte put claim-finish, avg cycles: 41
5 byte get claim-finish, avg cycles: 52
PASS - test_ringbuffer_performance in 0.8 seconds
After (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 34
4 byte put-get, avg cycles: 41
1 byte put claim-finish, avg cycles: 27
5 byte put claim-finish, avg cycles: 29
5 byte get claim-finish, avg cycles: 29
PASS - test_ringbuffer_performance in 0.4 seconds
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2022-02-14 11:16:33 +08:00
|
|
|
wrap_size = buf->size - wrap_size;
|
2018-09-07 21:30:56 +08:00
|
|
|
|
ring_buffer: the great simplification
This code is rather hairy. When I look at it I don't like the way it
stares back at me.
First, the rewind business looks fishy. It has to die.
And we don't have to rely on modulus either. Not even for non-power-of-2
buffers. Let's kill that distinction too and make all sizes always
"high performance".
The code is now entirely relying only on simple ALU operations (add,
sub and compare).
The key assumption: 32-bit values do wrap around after max range has
been reached. No saturation. All architectures supported by Zephyr
do that.
Some stats:
lib/os/ring_buffer.c: 62 insertions(+), 124 deletions(-)
ring_buffer.c.obj before after diff
----------------------------------------------
frdm_k64f 1224 1136 -88
m2gl025_miv 2485 2079 -406
mps2_an385 1228 1132 -96
mps2_an521 1228 1132 -96
native_posix 1546 1496 -50
native_posix_64 1598 1595 -3
nsim_hs_mpuv6 1252 1192 -60
nsim_hs_smp 1252 1192 -60
nsim_sem 1252 1192 -60
qemu_arc_em 1324 1192 -132
qemu_arc_hs6x 1824 1620 -204
qemu_arc_hs 1252 1192 -60
qemu_cortex_a53_smp 2154 1888 -266
qemu_cortex_a53 2154 1888 -266
qemu_cortex_a9 1938 1792 -146
Before (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 52
4 byte put-get, avg cycles: 47
1 byte put claim-finish, avg cycles: 39
5 byte put claim-finish, avg cycles: 41
5 byte get claim-finish, avg cycles: 52
PASS - test_ringbuffer_performance in 0.8 seconds
After (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 34
4 byte put-get, avg cycles: 41
1 byte put claim-finish, avg cycles: 27
5 byte put claim-finish, avg cycles: 29
5 byte get claim-finish, avg cycles: 29
PASS - test_ringbuffer_performance in 0.4 seconds
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2022-02-14 11:16:33 +08:00
|
|
|
free_space = ring_buf_space_get(buf);
|
|
|
|
size = MIN(size, free_space);
|
|
|
|
size = MIN(size, wrap_size);
|
2018-09-07 21:30:56 +08:00
|
|
|
|
ring_buffer: the great simplification
This code is rather hairy. When I look at it I don't like the way it
stares back at me.
First, the rewind business looks fishy. It has to die.
And we don't have to rely on modulus either. Not even for non-power-of-2
buffers. Let's kill that distinction too and make all sizes always
"high performance".
The code is now entirely relying only on simple ALU operations (add,
sub and compare).
The key assumption: 32-bit values do wrap around after max range has
been reached. No saturation. All architectures supported by Zephyr
do that.
Some stats:
lib/os/ring_buffer.c: 62 insertions(+), 124 deletions(-)
ring_buffer.c.obj before after diff
----------------------------------------------
frdm_k64f 1224 1136 -88
m2gl025_miv 2485 2079 -406
mps2_an385 1228 1132 -96
mps2_an521 1228 1132 -96
native_posix 1546 1496 -50
native_posix_64 1598 1595 -3
nsim_hs_mpuv6 1252 1192 -60
nsim_hs_smp 1252 1192 -60
nsim_sem 1252 1192 -60
qemu_arc_em 1324 1192 -132
qemu_arc_hs6x 1824 1620 -204
qemu_arc_hs 1252 1192 -60
qemu_cortex_a53_smp 2154 1888 -266
qemu_cortex_a53 2154 1888 -266
qemu_cortex_a9 1938 1792 -146
Before (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 52
4 byte put-get, avg cycles: 47
1 byte put claim-finish, avg cycles: 39
5 byte put claim-finish, avg cycles: 41
5 byte get claim-finish, avg cycles: 52
PASS - test_ringbuffer_performance in 0.8 seconds
After (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 34
4 byte put-get, avg cycles: 41
1 byte put claim-finish, avg cycles: 27
5 byte put claim-finish, avg cycles: 29
5 byte get claim-finish, avg cycles: 29
PASS - test_ringbuffer_performance in 0.4 seconds
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2022-02-14 11:16:33 +08:00
|
|
|
*data = &buf->buffer[buf->put_head - base];
|
|
|
|
buf->put_head += size;
|
2018-09-07 21:30:56 +08:00
|
|
|
|
ring_buffer: the great simplification
This code is rather hairy. When I look at it I don't like the way it
stares back at me.
First, the rewind business looks fishy. It has to die.
And we don't have to rely on modulus either. Not even for non-power-of-2
buffers. Let's kill that distinction too and make all sizes always
"high performance".
The code is now entirely relying only on simple ALU operations (add,
sub and compare).
The key assumption: 32-bit values do wrap around after max range has
been reached. No saturation. All architectures supported by Zephyr
do that.
Some stats:
lib/os/ring_buffer.c: 62 insertions(+), 124 deletions(-)
ring_buffer.c.obj before after diff
----------------------------------------------
frdm_k64f 1224 1136 -88
m2gl025_miv 2485 2079 -406
mps2_an385 1228 1132 -96
mps2_an521 1228 1132 -96
native_posix 1546 1496 -50
native_posix_64 1598 1595 -3
nsim_hs_mpuv6 1252 1192 -60
nsim_hs_smp 1252 1192 -60
nsim_sem 1252 1192 -60
qemu_arc_em 1324 1192 -132
qemu_arc_hs6x 1824 1620 -204
qemu_arc_hs 1252 1192 -60
qemu_cortex_a53_smp 2154 1888 -266
qemu_cortex_a53 2154 1888 -266
qemu_cortex_a9 1938 1792 -146
Before (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 52
4 byte put-get, avg cycles: 47
1 byte put claim-finish, avg cycles: 39
5 byte put claim-finish, avg cycles: 41
5 byte get claim-finish, avg cycles: 52
PASS - test_ringbuffer_performance in 0.8 seconds
After (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 34
4 byte put-get, avg cycles: 41
1 byte put claim-finish, avg cycles: 27
5 byte put claim-finish, avg cycles: 29
5 byte get claim-finish, avg cycles: 29
PASS - test_ringbuffer_performance in 0.4 seconds
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2022-02-14 11:16:33 +08:00
|
|
|
return size;
|
2018-09-07 21:30:56 +08:00
|
|
|
}
|
|
|
|
|
2020-05-28 00:26:57 +08:00
|
|
|
int ring_buf_put_finish(struct ring_buf *buf, uint32_t size)
|
2018-09-07 21:30:56 +08:00
|
|
|
{
|
ring_buffer: the great simplification
This code is rather hairy. When I look at it I don't like the way it
stares back at me.
First, the rewind business looks fishy. It has to die.
And we don't have to rely on modulus either. Not even for non-power-of-2
buffers. Let's kill that distinction too and make all sizes always
"high performance".
The code is now entirely relying only on simple ALU operations (add,
sub and compare).
The key assumption: 32-bit values do wrap around after max range has
been reached. No saturation. All architectures supported by Zephyr
do that.
Some stats:
lib/os/ring_buffer.c: 62 insertions(+), 124 deletions(-)
ring_buffer.c.obj before after diff
----------------------------------------------
frdm_k64f 1224 1136 -88
m2gl025_miv 2485 2079 -406
mps2_an385 1228 1132 -96
mps2_an521 1228 1132 -96
native_posix 1546 1496 -50
native_posix_64 1598 1595 -3
nsim_hs_mpuv6 1252 1192 -60
nsim_hs_smp 1252 1192 -60
nsim_sem 1252 1192 -60
qemu_arc_em 1324 1192 -132
qemu_arc_hs6x 1824 1620 -204
qemu_arc_hs 1252 1192 -60
qemu_cortex_a53_smp 2154 1888 -266
qemu_cortex_a53 2154 1888 -266
qemu_cortex_a9 1938 1792 -146
Before (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 52
4 byte put-get, avg cycles: 47
1 byte put claim-finish, avg cycles: 39
5 byte put claim-finish, avg cycles: 41
5 byte get claim-finish, avg cycles: 52
PASS - test_ringbuffer_performance in 0.8 seconds
After (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 34
4 byte put-get, avg cycles: 41
1 byte put claim-finish, avg cycles: 27
5 byte put claim-finish, avg cycles: 29
5 byte get claim-finish, avg cycles: 29
PASS - test_ringbuffer_performance in 0.4 seconds
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2022-02-14 11:16:33 +08:00
|
|
|
uint32_t finish_space, wrap_size;
|
2021-07-30 14:29:48 +08:00
|
|
|
|
ring_buffer: the great simplification
This code is rather hairy. When I look at it I don't like the way it
stares back at me.
First, the rewind business looks fishy. It has to die.
And we don't have to rely on modulus either. Not even for non-power-of-2
buffers. Let's kill that distinction too and make all sizes always
"high performance".
The code is now entirely relying only on simple ALU operations (add,
sub and compare).
The key assumption: 32-bit values do wrap around after max range has
been reached. No saturation. All architectures supported by Zephyr
do that.
Some stats:
lib/os/ring_buffer.c: 62 insertions(+), 124 deletions(-)
ring_buffer.c.obj before after diff
----------------------------------------------
frdm_k64f 1224 1136 -88
m2gl025_miv 2485 2079 -406
mps2_an385 1228 1132 -96
mps2_an521 1228 1132 -96
native_posix 1546 1496 -50
native_posix_64 1598 1595 -3
nsim_hs_mpuv6 1252 1192 -60
nsim_hs_smp 1252 1192 -60
nsim_sem 1252 1192 -60
qemu_arc_em 1324 1192 -132
qemu_arc_hs6x 1824 1620 -204
qemu_arc_hs 1252 1192 -60
qemu_cortex_a53_smp 2154 1888 -266
qemu_cortex_a53 2154 1888 -266
qemu_cortex_a9 1938 1792 -146
Before (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 52
4 byte put-get, avg cycles: 47
1 byte put claim-finish, avg cycles: 39
5 byte put claim-finish, avg cycles: 41
5 byte get claim-finish, avg cycles: 52
PASS - test_ringbuffer_performance in 0.8 seconds
After (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 34
4 byte put-get, avg cycles: 41
1 byte put claim-finish, avg cycles: 27
5 byte put claim-finish, avg cycles: 29
5 byte get claim-finish, avg cycles: 29
PASS - test_ringbuffer_performance in 0.4 seconds
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2022-02-14 11:16:33 +08:00
|
|
|
finish_space = buf->put_head - buf->put_tail;
|
|
|
|
if (unlikely(size > finish_space)) {
|
|
|
|
return -EINVAL;
|
2021-07-30 14:29:48 +08:00
|
|
|
}
|
|
|
|
|
ring_buffer: the great simplification
This code is rather hairy. When I look at it I don't like the way it
stares back at me.
First, the rewind business looks fishy. It has to die.
And we don't have to rely on modulus either. Not even for non-power-of-2
buffers. Let's kill that distinction too and make all sizes always
"high performance".
The code is now entirely relying only on simple ALU operations (add,
sub and compare).
The key assumption: 32-bit values do wrap around after max range has
been reached. No saturation. All architectures supported by Zephyr
do that.
Some stats:
lib/os/ring_buffer.c: 62 insertions(+), 124 deletions(-)
ring_buffer.c.obj before after diff
----------------------------------------------
frdm_k64f 1224 1136 -88
m2gl025_miv 2485 2079 -406
mps2_an385 1228 1132 -96
mps2_an521 1228 1132 -96
native_posix 1546 1496 -50
native_posix_64 1598 1595 -3
nsim_hs_mpuv6 1252 1192 -60
nsim_hs_smp 1252 1192 -60
nsim_sem 1252 1192 -60
qemu_arc_em 1324 1192 -132
qemu_arc_hs6x 1824 1620 -204
qemu_arc_hs 1252 1192 -60
qemu_cortex_a53_smp 2154 1888 -266
qemu_cortex_a53 2154 1888 -266
qemu_cortex_a9 1938 1792 -146
Before (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 52
4 byte put-get, avg cycles: 47
1 byte put claim-finish, avg cycles: 39
5 byte put claim-finish, avg cycles: 41
5 byte get claim-finish, avg cycles: 52
PASS - test_ringbuffer_performance in 0.8 seconds
After (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 34
4 byte put-get, avg cycles: 41
1 byte put claim-finish, avg cycles: 27
5 byte put claim-finish, avg cycles: 29
5 byte get claim-finish, avg cycles: 29
PASS - test_ringbuffer_performance in 0.4 seconds
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2022-02-14 11:16:33 +08:00
|
|
|
buf->put_tail += size;
|
2022-02-26 02:20:23 +08:00
|
|
|
buf->put_head = buf->put_tail;
|
ring_buffer: the great simplification
This code is rather hairy. When I look at it I don't like the way it
stares back at me.
First, the rewind business looks fishy. It has to die.
And we don't have to rely on modulus either. Not even for non-power-of-2
buffers. Let's kill that distinction too and make all sizes always
"high performance".
The code is now entirely relying only on simple ALU operations (add,
sub and compare).
The key assumption: 32-bit values do wrap around after max range has
been reached. No saturation. All architectures supported by Zephyr
do that.
Some stats:
lib/os/ring_buffer.c: 62 insertions(+), 124 deletions(-)
ring_buffer.c.obj before after diff
----------------------------------------------
frdm_k64f 1224 1136 -88
m2gl025_miv 2485 2079 -406
mps2_an385 1228 1132 -96
mps2_an521 1228 1132 -96
native_posix 1546 1496 -50
native_posix_64 1598 1595 -3
nsim_hs_mpuv6 1252 1192 -60
nsim_hs_smp 1252 1192 -60
nsim_sem 1252 1192 -60
qemu_arc_em 1324 1192 -132
qemu_arc_hs6x 1824 1620 -204
qemu_arc_hs 1252 1192 -60
qemu_cortex_a53_smp 2154 1888 -266
qemu_cortex_a53 2154 1888 -266
qemu_cortex_a9 1938 1792 -146
Before (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 52
4 byte put-get, avg cycles: 47
1 byte put claim-finish, avg cycles: 39
5 byte put claim-finish, avg cycles: 41
5 byte get claim-finish, avg cycles: 52
PASS - test_ringbuffer_performance in 0.8 seconds
After (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 34
4 byte put-get, avg cycles: 41
1 byte put claim-finish, avg cycles: 27
5 byte put claim-finish, avg cycles: 29
5 byte get claim-finish, avg cycles: 29
PASS - test_ringbuffer_performance in 0.4 seconds
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2022-02-14 11:16:33 +08:00
|
|
|
|
|
|
|
wrap_size = buf->put_tail - buf->put_base;
|
|
|
|
if (unlikely(wrap_size >= buf->size)) {
|
|
|
|
/* we wrapped: adjust put_base */
|
|
|
|
buf->put_base += buf->size;
|
|
|
|
}
|
2018-09-07 21:30:56 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-28 00:26:57 +08:00
|
|
|
uint32_t ring_buf_put(struct ring_buf *buf, const uint8_t *data, uint32_t size)
|
2018-09-07 21:30:56 +08:00
|
|
|
{
|
2020-05-28 00:26:57 +08:00
|
|
|
uint8_t *dst;
|
|
|
|
uint32_t partial_size;
|
|
|
|
uint32_t total_size = 0U;
|
2019-03-07 18:56:01 +08:00
|
|
|
int err;
|
2018-09-07 21:30:56 +08:00
|
|
|
|
|
|
|
do {
|
|
|
|
partial_size = ring_buf_put_claim(buf, &dst, size);
|
|
|
|
memcpy(dst, data, partial_size);
|
|
|
|
total_size += partial_size;
|
|
|
|
size -= partial_size;
|
|
|
|
data += partial_size;
|
|
|
|
} while (size && partial_size);
|
|
|
|
|
2019-03-07 18:56:01 +08:00
|
|
|
err = ring_buf_put_finish(buf, total_size);
|
|
|
|
__ASSERT_NO_MSG(err == 0);
|
2022-06-06 21:24:57 +08:00
|
|
|
ARG_UNUSED(err);
|
2018-09-07 21:30:56 +08:00
|
|
|
|
|
|
|
return total_size;
|
|
|
|
}
|
|
|
|
|
2020-05-28 00:26:57 +08:00
|
|
|
uint32_t ring_buf_get_claim(struct ring_buf *buf, uint8_t **data, uint32_t size)
|
2018-09-07 21:30:56 +08:00
|
|
|
{
|
ring_buffer: the great simplification
This code is rather hairy. When I look at it I don't like the way it
stares back at me.
First, the rewind business looks fishy. It has to die.
And we don't have to rely on modulus either. Not even for non-power-of-2
buffers. Let's kill that distinction too and make all sizes always
"high performance".
The code is now entirely relying only on simple ALU operations (add,
sub and compare).
The key assumption: 32-bit values do wrap around after max range has
been reached. No saturation. All architectures supported by Zephyr
do that.
Some stats:
lib/os/ring_buffer.c: 62 insertions(+), 124 deletions(-)
ring_buffer.c.obj before after diff
----------------------------------------------
frdm_k64f 1224 1136 -88
m2gl025_miv 2485 2079 -406
mps2_an385 1228 1132 -96
mps2_an521 1228 1132 -96
native_posix 1546 1496 -50
native_posix_64 1598 1595 -3
nsim_hs_mpuv6 1252 1192 -60
nsim_hs_smp 1252 1192 -60
nsim_sem 1252 1192 -60
qemu_arc_em 1324 1192 -132
qemu_arc_hs6x 1824 1620 -204
qemu_arc_hs 1252 1192 -60
qemu_cortex_a53_smp 2154 1888 -266
qemu_cortex_a53 2154 1888 -266
qemu_cortex_a9 1938 1792 -146
Before (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 52
4 byte put-get, avg cycles: 47
1 byte put claim-finish, avg cycles: 39
5 byte put claim-finish, avg cycles: 41
5 byte get claim-finish, avg cycles: 52
PASS - test_ringbuffer_performance in 0.8 seconds
After (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 34
4 byte put-get, avg cycles: 41
1 byte put claim-finish, avg cycles: 27
5 byte put claim-finish, avg cycles: 29
5 byte get claim-finish, avg cycles: 29
PASS - test_ringbuffer_performance in 0.4 seconds
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2022-02-14 11:16:33 +08:00
|
|
|
uint32_t available_size, wrap_size;
|
|
|
|
int32_t base;
|
|
|
|
|
|
|
|
base = buf->get_base;
|
|
|
|
wrap_size = buf->get_head - base;
|
|
|
|
if (unlikely(wrap_size >= buf->size)) {
|
|
|
|
/* get_base is not yet adjusted */
|
|
|
|
wrap_size -= buf->size;
|
|
|
|
base += buf->size;
|
2021-07-30 14:29:48 +08:00
|
|
|
}
|
ring_buffer: the great simplification
This code is rather hairy. When I look at it I don't like the way it
stares back at me.
First, the rewind business looks fishy. It has to die.
And we don't have to rely on modulus either. Not even for non-power-of-2
buffers. Let's kill that distinction too and make all sizes always
"high performance".
The code is now entirely relying only on simple ALU operations (add,
sub and compare).
The key assumption: 32-bit values do wrap around after max range has
been reached. No saturation. All architectures supported by Zephyr
do that.
Some stats:
lib/os/ring_buffer.c: 62 insertions(+), 124 deletions(-)
ring_buffer.c.obj before after diff
----------------------------------------------
frdm_k64f 1224 1136 -88
m2gl025_miv 2485 2079 -406
mps2_an385 1228 1132 -96
mps2_an521 1228 1132 -96
native_posix 1546 1496 -50
native_posix_64 1598 1595 -3
nsim_hs_mpuv6 1252 1192 -60
nsim_hs_smp 1252 1192 -60
nsim_sem 1252 1192 -60
qemu_arc_em 1324 1192 -132
qemu_arc_hs6x 1824 1620 -204
qemu_arc_hs 1252 1192 -60
qemu_cortex_a53_smp 2154 1888 -266
qemu_cortex_a53 2154 1888 -266
qemu_cortex_a9 1938 1792 -146
Before (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 52
4 byte put-get, avg cycles: 47
1 byte put claim-finish, avg cycles: 39
5 byte put claim-finish, avg cycles: 41
5 byte get claim-finish, avg cycles: 52
PASS - test_ringbuffer_performance in 0.8 seconds
After (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 34
4 byte put-get, avg cycles: 41
1 byte put claim-finish, avg cycles: 27
5 byte put claim-finish, avg cycles: 29
5 byte get claim-finish, avg cycles: 29
PASS - test_ringbuffer_performance in 0.4 seconds
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2022-02-14 11:16:33 +08:00
|
|
|
wrap_size = buf->size - wrap_size;
|
2018-09-07 21:30:56 +08:00
|
|
|
|
ring_buffer: the great simplification
This code is rather hairy. When I look at it I don't like the way it
stares back at me.
First, the rewind business looks fishy. It has to die.
And we don't have to rely on modulus either. Not even for non-power-of-2
buffers. Let's kill that distinction too and make all sizes always
"high performance".
The code is now entirely relying only on simple ALU operations (add,
sub and compare).
The key assumption: 32-bit values do wrap around after max range has
been reached. No saturation. All architectures supported by Zephyr
do that.
Some stats:
lib/os/ring_buffer.c: 62 insertions(+), 124 deletions(-)
ring_buffer.c.obj before after diff
----------------------------------------------
frdm_k64f 1224 1136 -88
m2gl025_miv 2485 2079 -406
mps2_an385 1228 1132 -96
mps2_an521 1228 1132 -96
native_posix 1546 1496 -50
native_posix_64 1598 1595 -3
nsim_hs_mpuv6 1252 1192 -60
nsim_hs_smp 1252 1192 -60
nsim_sem 1252 1192 -60
qemu_arc_em 1324 1192 -132
qemu_arc_hs6x 1824 1620 -204
qemu_arc_hs 1252 1192 -60
qemu_cortex_a53_smp 2154 1888 -266
qemu_cortex_a53 2154 1888 -266
qemu_cortex_a9 1938 1792 -146
Before (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 52
4 byte put-get, avg cycles: 47
1 byte put claim-finish, avg cycles: 39
5 byte put claim-finish, avg cycles: 41
5 byte get claim-finish, avg cycles: 52
PASS - test_ringbuffer_performance in 0.8 seconds
After (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 34
4 byte put-get, avg cycles: 41
1 byte put claim-finish, avg cycles: 27
5 byte put claim-finish, avg cycles: 29
5 byte get claim-finish, avg cycles: 29
PASS - test_ringbuffer_performance in 0.4 seconds
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2022-02-14 11:16:33 +08:00
|
|
|
available_size = ring_buf_size_get(buf);
|
|
|
|
size = MIN(size, available_size);
|
|
|
|
size = MIN(size, wrap_size);
|
2018-09-07 21:30:56 +08:00
|
|
|
|
ring_buffer: the great simplification
This code is rather hairy. When I look at it I don't like the way it
stares back at me.
First, the rewind business looks fishy. It has to die.
And we don't have to rely on modulus either. Not even for non-power-of-2
buffers. Let's kill that distinction too and make all sizes always
"high performance".
The code is now entirely relying only on simple ALU operations (add,
sub and compare).
The key assumption: 32-bit values do wrap around after max range has
been reached. No saturation. All architectures supported by Zephyr
do that.
Some stats:
lib/os/ring_buffer.c: 62 insertions(+), 124 deletions(-)
ring_buffer.c.obj before after diff
----------------------------------------------
frdm_k64f 1224 1136 -88
m2gl025_miv 2485 2079 -406
mps2_an385 1228 1132 -96
mps2_an521 1228 1132 -96
native_posix 1546 1496 -50
native_posix_64 1598 1595 -3
nsim_hs_mpuv6 1252 1192 -60
nsim_hs_smp 1252 1192 -60
nsim_sem 1252 1192 -60
qemu_arc_em 1324 1192 -132
qemu_arc_hs6x 1824 1620 -204
qemu_arc_hs 1252 1192 -60
qemu_cortex_a53_smp 2154 1888 -266
qemu_cortex_a53 2154 1888 -266
qemu_cortex_a9 1938 1792 -146
Before (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 52
4 byte put-get, avg cycles: 47
1 byte put claim-finish, avg cycles: 39
5 byte put claim-finish, avg cycles: 41
5 byte get claim-finish, avg cycles: 52
PASS - test_ringbuffer_performance in 0.8 seconds
After (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 34
4 byte put-get, avg cycles: 41
1 byte put claim-finish, avg cycles: 27
5 byte put claim-finish, avg cycles: 29
5 byte get claim-finish, avg cycles: 29
PASS - test_ringbuffer_performance in 0.4 seconds
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2022-02-14 11:16:33 +08:00
|
|
|
*data = &buf->buffer[buf->get_head - base];
|
|
|
|
buf->get_head += size;
|
2018-09-07 21:30:56 +08:00
|
|
|
|
ring_buffer: the great simplification
This code is rather hairy. When I look at it I don't like the way it
stares back at me.
First, the rewind business looks fishy. It has to die.
And we don't have to rely on modulus either. Not even for non-power-of-2
buffers. Let's kill that distinction too and make all sizes always
"high performance".
The code is now entirely relying only on simple ALU operations (add,
sub and compare).
The key assumption: 32-bit values do wrap around after max range has
been reached. No saturation. All architectures supported by Zephyr
do that.
Some stats:
lib/os/ring_buffer.c: 62 insertions(+), 124 deletions(-)
ring_buffer.c.obj before after diff
----------------------------------------------
frdm_k64f 1224 1136 -88
m2gl025_miv 2485 2079 -406
mps2_an385 1228 1132 -96
mps2_an521 1228 1132 -96
native_posix 1546 1496 -50
native_posix_64 1598 1595 -3
nsim_hs_mpuv6 1252 1192 -60
nsim_hs_smp 1252 1192 -60
nsim_sem 1252 1192 -60
qemu_arc_em 1324 1192 -132
qemu_arc_hs6x 1824 1620 -204
qemu_arc_hs 1252 1192 -60
qemu_cortex_a53_smp 2154 1888 -266
qemu_cortex_a53 2154 1888 -266
qemu_cortex_a9 1938 1792 -146
Before (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 52
4 byte put-get, avg cycles: 47
1 byte put claim-finish, avg cycles: 39
5 byte put claim-finish, avg cycles: 41
5 byte get claim-finish, avg cycles: 52
PASS - test_ringbuffer_performance in 0.8 seconds
After (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 34
4 byte put-get, avg cycles: 41
1 byte put claim-finish, avg cycles: 27
5 byte put claim-finish, avg cycles: 29
5 byte get claim-finish, avg cycles: 29
PASS - test_ringbuffer_performance in 0.4 seconds
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2022-02-14 11:16:33 +08:00
|
|
|
return size;
|
2018-09-07 21:30:56 +08:00
|
|
|
}
|
|
|
|
|
2020-05-28 00:26:57 +08:00
|
|
|
int ring_buf_get_finish(struct ring_buf *buf, uint32_t size)
|
2018-09-07 21:30:56 +08:00
|
|
|
{
|
ring_buffer: the great simplification
This code is rather hairy. When I look at it I don't like the way it
stares back at me.
First, the rewind business looks fishy. It has to die.
And we don't have to rely on modulus either. Not even for non-power-of-2
buffers. Let's kill that distinction too and make all sizes always
"high performance".
The code is now entirely relying only on simple ALU operations (add,
sub and compare).
The key assumption: 32-bit values do wrap around after max range has
been reached. No saturation. All architectures supported by Zephyr
do that.
Some stats:
lib/os/ring_buffer.c: 62 insertions(+), 124 deletions(-)
ring_buffer.c.obj before after diff
----------------------------------------------
frdm_k64f 1224 1136 -88
m2gl025_miv 2485 2079 -406
mps2_an385 1228 1132 -96
mps2_an521 1228 1132 -96
native_posix 1546 1496 -50
native_posix_64 1598 1595 -3
nsim_hs_mpuv6 1252 1192 -60
nsim_hs_smp 1252 1192 -60
nsim_sem 1252 1192 -60
qemu_arc_em 1324 1192 -132
qemu_arc_hs6x 1824 1620 -204
qemu_arc_hs 1252 1192 -60
qemu_cortex_a53_smp 2154 1888 -266
qemu_cortex_a53 2154 1888 -266
qemu_cortex_a9 1938 1792 -146
Before (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 52
4 byte put-get, avg cycles: 47
1 byte put claim-finish, avg cycles: 39
5 byte put claim-finish, avg cycles: 41
5 byte get claim-finish, avg cycles: 52
PASS - test_ringbuffer_performance in 0.8 seconds
After (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 34
4 byte put-get, avg cycles: 41
1 byte put claim-finish, avg cycles: 27
5 byte put claim-finish, avg cycles: 29
5 byte get claim-finish, avg cycles: 29
PASS - test_ringbuffer_performance in 0.4 seconds
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2022-02-14 11:16:33 +08:00
|
|
|
uint32_t finish_space, wrap_size;
|
|
|
|
|
|
|
|
finish_space = buf->get_head - buf->get_tail;
|
|
|
|
if (unlikely(size > finish_space)) {
|
2018-09-07 21:30:56 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
ring_buffer: the great simplification
This code is rather hairy. When I look at it I don't like the way it
stares back at me.
First, the rewind business looks fishy. It has to die.
And we don't have to rely on modulus either. Not even for non-power-of-2
buffers. Let's kill that distinction too and make all sizes always
"high performance".
The code is now entirely relying only on simple ALU operations (add,
sub and compare).
The key assumption: 32-bit values do wrap around after max range has
been reached. No saturation. All architectures supported by Zephyr
do that.
Some stats:
lib/os/ring_buffer.c: 62 insertions(+), 124 deletions(-)
ring_buffer.c.obj before after diff
----------------------------------------------
frdm_k64f 1224 1136 -88
m2gl025_miv 2485 2079 -406
mps2_an385 1228 1132 -96
mps2_an521 1228 1132 -96
native_posix 1546 1496 -50
native_posix_64 1598 1595 -3
nsim_hs_mpuv6 1252 1192 -60
nsim_hs_smp 1252 1192 -60
nsim_sem 1252 1192 -60
qemu_arc_em 1324 1192 -132
qemu_arc_hs6x 1824 1620 -204
qemu_arc_hs 1252 1192 -60
qemu_cortex_a53_smp 2154 1888 -266
qemu_cortex_a53 2154 1888 -266
qemu_cortex_a9 1938 1792 -146
Before (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 52
4 byte put-get, avg cycles: 47
1 byte put claim-finish, avg cycles: 39
5 byte put claim-finish, avg cycles: 41
5 byte get claim-finish, avg cycles: 52
PASS - test_ringbuffer_performance in 0.8 seconds
After (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 34
4 byte put-get, avg cycles: 41
1 byte put claim-finish, avg cycles: 27
5 byte put claim-finish, avg cycles: 29
5 byte get claim-finish, avg cycles: 29
PASS - test_ringbuffer_performance in 0.4 seconds
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2022-02-14 11:16:33 +08:00
|
|
|
buf->get_tail += size;
|
2022-02-26 02:20:23 +08:00
|
|
|
buf->get_head = buf->get_tail;
|
ring_buffer: the great simplification
This code is rather hairy. When I look at it I don't like the way it
stares back at me.
First, the rewind business looks fishy. It has to die.
And we don't have to rely on modulus either. Not even for non-power-of-2
buffers. Let's kill that distinction too and make all sizes always
"high performance".
The code is now entirely relying only on simple ALU operations (add,
sub and compare).
The key assumption: 32-bit values do wrap around after max range has
been reached. No saturation. All architectures supported by Zephyr
do that.
Some stats:
lib/os/ring_buffer.c: 62 insertions(+), 124 deletions(-)
ring_buffer.c.obj before after diff
----------------------------------------------
frdm_k64f 1224 1136 -88
m2gl025_miv 2485 2079 -406
mps2_an385 1228 1132 -96
mps2_an521 1228 1132 -96
native_posix 1546 1496 -50
native_posix_64 1598 1595 -3
nsim_hs_mpuv6 1252 1192 -60
nsim_hs_smp 1252 1192 -60
nsim_sem 1252 1192 -60
qemu_arc_em 1324 1192 -132
qemu_arc_hs6x 1824 1620 -204
qemu_arc_hs 1252 1192 -60
qemu_cortex_a53_smp 2154 1888 -266
qemu_cortex_a53 2154 1888 -266
qemu_cortex_a9 1938 1792 -146
Before (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 52
4 byte put-get, avg cycles: 47
1 byte put claim-finish, avg cycles: 39
5 byte put claim-finish, avg cycles: 41
5 byte get claim-finish, avg cycles: 52
PASS - test_ringbuffer_performance in 0.8 seconds
After (qemu_cortex_a53):
START - test_ringbuffer_performance
1 byte put-get, avg cycles: 34
4 byte put-get, avg cycles: 41
1 byte put claim-finish, avg cycles: 27
5 byte put claim-finish, avg cycles: 29
5 byte get claim-finish, avg cycles: 29
PASS - test_ringbuffer_performance in 0.4 seconds
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2022-02-14 11:16:33 +08:00
|
|
|
|
|
|
|
wrap_size = buf->get_tail - buf->get_base;
|
|
|
|
if (unlikely(wrap_size >= buf->size)) {
|
|
|
|
/* we wrapped: adjust get_base */
|
|
|
|
buf->get_base += buf->size;
|
|
|
|
}
|
2018-09-07 21:30:56 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-28 00:26:57 +08:00
|
|
|
uint32_t ring_buf_get(struct ring_buf *buf, uint8_t *data, uint32_t size)
|
2018-09-07 21:30:56 +08:00
|
|
|
{
|
2020-05-28 00:26:57 +08:00
|
|
|
uint8_t *src;
|
|
|
|
uint32_t partial_size;
|
|
|
|
uint32_t total_size = 0U;
|
2019-03-07 18:56:01 +08:00
|
|
|
int err;
|
2018-09-07 21:30:56 +08:00
|
|
|
|
|
|
|
do {
|
|
|
|
partial_size = ring_buf_get_claim(buf, &src, size);
|
2021-03-19 09:40:05 +08:00
|
|
|
if (data) {
|
|
|
|
memcpy(data, src, partial_size);
|
|
|
|
data += partial_size;
|
|
|
|
}
|
2018-09-07 21:30:56 +08:00
|
|
|
total_size += partial_size;
|
|
|
|
size -= partial_size;
|
|
|
|
} while (size && partial_size);
|
|
|
|
|
2019-03-07 18:56:01 +08:00
|
|
|
err = ring_buf_get_finish(buf, total_size);
|
|
|
|
__ASSERT_NO_MSG(err == 0);
|
2022-06-06 21:24:57 +08:00
|
|
|
ARG_UNUSED(err);
|
2018-09-07 21:30:56 +08:00
|
|
|
|
|
|
|
return total_size;
|
|
|
|
}
|
2021-07-22 23:09:24 +08:00
|
|
|
|
|
|
|
uint32_t ring_buf_peek(struct ring_buf *buf, uint8_t *data, uint32_t size)
|
|
|
|
{
|
|
|
|
uint8_t *src;
|
|
|
|
uint32_t partial_size;
|
|
|
|
uint32_t total_size = 0U;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
size = MIN(size, ring_buf_size_get(buf));
|
|
|
|
|
|
|
|
do {
|
|
|
|
partial_size = ring_buf_get_claim(buf, &src, size);
|
|
|
|
__ASSERT_NO_MSG(data != NULL);
|
|
|
|
memcpy(data, src, partial_size);
|
|
|
|
data += partial_size;
|
|
|
|
total_size += partial_size;
|
|
|
|
size -= partial_size;
|
|
|
|
} while (size && partial_size);
|
|
|
|
|
|
|
|
/* effectively unclaim total_size bytes */
|
|
|
|
err = ring_buf_get_finish(buf, 0);
|
|
|
|
__ASSERT_NO_MSG(err == 0);
|
2022-06-06 21:24:57 +08:00
|
|
|
ARG_UNUSED(err);
|
2021-07-22 23:09:24 +08:00
|
|
|
|
|
|
|
return total_size;
|
|
|
|
}
|
2022-02-16 12:14:03 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal data structure for a buffer header.
|
|
|
|
*
|
|
|
|
* We want all of this to fit in a single uint32_t. Every item stored in the
|
|
|
|
* ring buffer will be one of these headers plus any extra data supplied
|
|
|
|
*/
|
|
|
|
struct ring_element {
|
|
|
|
uint32_t type :16; /**< Application-specific */
|
|
|
|
uint32_t length :8; /**< length in 32-bit chunks */
|
|
|
|
uint32_t value :8; /**< Room for small integral values */
|
|
|
|
};
|
|
|
|
|
|
|
|
int ring_buf_item_put(struct ring_buf *buf, uint16_t type, uint8_t value,
|
|
|
|
uint32_t *data32, uint8_t size32)
|
|
|
|
{
|
|
|
|
uint8_t *dst, *data = (uint8_t *)data32;
|
|
|
|
struct ring_element *header;
|
|
|
|
uint32_t space, size, partial_size, total_size;
|
2022-06-06 21:29:20 +08:00
|
|
|
int err;
|
2022-02-16 12:14:03 +08:00
|
|
|
|
|
|
|
space = ring_buf_space_get(buf);
|
|
|
|
size = size32 * 4;
|
|
|
|
if (size + sizeof(struct ring_element) > space) {
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
2022-06-06 21:29:20 +08:00
|
|
|
err = ring_buf_put_claim(buf, &dst, sizeof(struct ring_element));
|
|
|
|
__ASSERT_NO_MSG(err == sizeof(struct ring_element));
|
2022-02-16 12:14:03 +08:00
|
|
|
|
|
|
|
header = (struct ring_element *)dst;
|
|
|
|
header->type = type;
|
|
|
|
header->length = size32;
|
|
|
|
header->value = value;
|
|
|
|
total_size = sizeof(struct ring_element);
|
|
|
|
|
|
|
|
do {
|
|
|
|
partial_size = ring_buf_put_claim(buf, &dst, size);
|
|
|
|
memcpy(dst, data, partial_size);
|
|
|
|
size -= partial_size;
|
|
|
|
total_size += partial_size;
|
|
|
|
data += partial_size;
|
|
|
|
} while (size && partial_size);
|
|
|
|
__ASSERT_NO_MSG(size == 0);
|
|
|
|
|
2022-06-06 21:29:20 +08:00
|
|
|
err = ring_buf_put_finish(buf, total_size);
|
|
|
|
__ASSERT_NO_MSG(err == 0);
|
|
|
|
ARG_UNUSED(err);
|
2022-02-16 12:14:03 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ring_buf_item_get(struct ring_buf *buf, uint16_t *type, uint8_t *value,
|
|
|
|
uint32_t *data32, uint8_t *size32)
|
|
|
|
{
|
|
|
|
uint8_t *src, *data = (uint8_t *)data32;
|
|
|
|
struct ring_element *header;
|
|
|
|
uint32_t size, partial_size, total_size;
|
2022-06-06 21:29:20 +08:00
|
|
|
int err;
|
2022-02-16 12:14:03 +08:00
|
|
|
|
|
|
|
if (ring_buf_is_empty(buf)) {
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
2022-06-06 21:29:20 +08:00
|
|
|
err = ring_buf_get_claim(buf, &src, sizeof(struct ring_element));
|
|
|
|
__ASSERT_NO_MSG(err == sizeof(struct ring_element));
|
2022-02-16 12:14:03 +08:00
|
|
|
|
|
|
|
header = (struct ring_element *)src;
|
|
|
|
|
|
|
|
if (data && (header->length > *size32)) {
|
|
|
|
*size32 = header->length;
|
|
|
|
ring_buf_get_finish(buf, 0);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
*size32 = header->length;
|
|
|
|
*type = header->type;
|
|
|
|
*value = header->value;
|
|
|
|
total_size = sizeof(struct ring_element);
|
|
|
|
|
|
|
|
size = *size32 * 4;
|
|
|
|
|
|
|
|
do {
|
|
|
|
partial_size = ring_buf_get_claim(buf, &src, size);
|
|
|
|
if (data) {
|
|
|
|
memcpy(data, src, partial_size);
|
|
|
|
data += partial_size;
|
|
|
|
}
|
|
|
|
total_size += partial_size;
|
|
|
|
size -= partial_size;
|
|
|
|
} while (size && partial_size);
|
|
|
|
|
2022-06-06 21:29:20 +08:00
|
|
|
err = ring_buf_get_finish(buf, total_size);
|
|
|
|
__ASSERT_NO_MSG(err == 0);
|
|
|
|
ARG_UNUSED(err);
|
2022-02-16 12:14:03 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|