zephyr/samples/testing/unit/main.c

70 lines
1.2 KiB
C
Raw Normal View History

/*
* Copyright (c) 2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <ztest.h>
#include <net/buf.h>
struct net_buf_pool _net_buf_pool_list[1];
unsigned int irq_lock(void)
{
return 0;
}
void irq_unlock(unsigned int key)
{
}
#include <net/buf.c>
void k_queue_init(struct k_queue *fifo) {}
void k_queue_append_list(struct k_queue *fifo, void *head, void *tail) {}
int k_is_in_isr(void)
{
return 0;
}
void *k_queue_get(struct k_queue *fifo, s32_t timeout)
{
return NULL;
}
void k_queue_append(struct k_queue *fifo, void *data)
{
}
void k_queue_prepend(struct k_queue *fifo, void *data)
{
}
#define TEST_BUF_COUNT 1
#define TEST_BUF_SIZE 74
NET_BUF_POOL_DEFINE(bufs_pool, TEST_BUF_COUNT, TEST_BUF_SIZE,
sizeof(int), NULL);
static void test_get_single_buffer(void)
{
struct net_buf *buf;
net: buf: Redesigned pool & buffer allocation API Until now it has been necessary to separately define a k_fifo and an array of buffers when creating net_buf pools. This has been a bit of an inconvenience as well as blurred the line of what exactly constitutes the "pool". This patch removes the NET_BUF_POOL() macro and replaces it with a NET_BUF_POOL_DEFINE() macro that internally expands into the buffer array and new net_buf_pool struct with a given name: NET_BUF_POOL_DEFINE(pool_name, ...); Having a dedicated context struct for the pool has the added benefit that we can start moving there net_buf members that have the same value for all buffers from the same pool. The first such member that gets moved is the destroy callback, thus shrinking net_buf by four bytes. Another potential candidate is the user_data_size, however right not that's left out since it would just leave 2 bytes of padding in net_buf (i.e. not influence its size). Another common value is buf->size, however that one is also used by net_buf_simple and can therefore not be moved. This patch also splits getting buffers from a FIFO and allocating a new buffer from a pool into two separate APIs: net_buf_get and net_buf_alloc, thus simplifying the APIs and their usage. There is no separate 'reserve_head' parameter anymore when allocating, rather the user is expected to call net_buf_reserve() afterwards if something else than 0 headroom is desired. Change-Id: Id91b1e5c2be2deb1274dde47f5edebfe29af383a Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2016-10-19 04:24:51 +08:00
buf = net_buf_alloc(&bufs_pool, K_NO_WAIT);
zassert_equal(buf->ref, 1, "Invalid refcount");
zassert_equal(buf->len, 0, "Invalid length");
zassert_equal(buf->flags, 0, "Invalid flags");
zassert_equal_ptr(buf->frags, NULL, "Frags not NULL");
}
void test_main(void)
{
ztest_test_suite(net_buf_test,
ztest_unit_test(test_get_single_buffer)
);
ztest_run_test_suite(net_buf_test);
}