1
0
Fork 0

Removes host_queue, main = k_sleep(K_FOREVER)

This commit is contained in:
Shawn Nock 2020-01-27 14:25:59 -05:00
parent 84c42f5605
commit 3ecf65feb1
1 changed files with 348 additions and 387 deletions

View File

@ -128,7 +128,7 @@ static struct h5 {
//struct net_buf *rx_buf;
struct k_fifo controller_queue;
struct k_fifo host_queue;
//struct k_fifo host_queue;
struct k_fifo unack_queue;
struct k_fifo unprocessed_queue;
struct k_fifo event_queue;
@ -158,7 +158,7 @@ static const u8_t conf_rsp[] = {0x04, 0x7b, MAX_PACKETS_IN_FLIGHT};
NET_BUF_POOL_DEFINE(h5_pool, SIGNAL_COUNT, SIG_BUF_SIZE, 0, NULL);
/* H5 Packet Buf */
#define MAX_PACKET_LEN 255 // CMD Header + 255 max payload
#define MAX_PACKET_LEN 255
#define PACKET_BUF_SIZE (CONFIG_BT_HCI_RESERVE + MAX_PACKET_LEN)
NET_BUF_POOL_DEFINE(h5_pack_pool, MAX_PACKETS_IN_FLIGHT + 10, PACKET_BUF_SIZE,
0, NULL);
@ -497,322 +497,6 @@ bt_uart_isr(struct device *unused)
}
}
/*void bt_uart_isr(struct device *unused) {
static u8_t byte;
static struct net_buf *buf = NULL;
static h5_hdr_t hdr = {0};
static enum slip_state {
SYNC,
NOSYNC,
} slip_state = NOSYNC;
ARG_UNUSED(unused);
while (uart_irq_update(hci_uart_dev) &&
uart_irq_is_pending(hci_uart_dev)) {
if (!uart_irq_rx_ready(hci_uart_dev)) {
*//* Only the UART RX path is interrupt-enabled *//*
break;
}
if (!buf) {
buf = net_buf_alloc(&h5_pack_pool, K_NO_WAIT);
REBOOT_IF_NOT(buf);
}
while (uart_fifo_read(hci_uart_dev, &byte, sizeof(byte))) {
if (slip_state == NOSYNC && byte != SLIP_DELIMITER) {
continue;
}
if (byte == SLIP_DELIMITER) {
if (buf->len > 0 && slip_state == SYNC) {
net_buf_put(&h5.unprocessed_queue, buf);
buf = NULL;
}
slip_state = SYNC;
} else {
net_buf_add_u8(buf, byte);
if (unslip_header(buf->data, buf->len, &hdr)) {
// We have the header; check length
if (hdr.len + sizeof(h5_hdr_t) > buf->size) {
// We cannot process a packet this large
net_buf_reset(buf);
slip_state = NOSYNC;
}
}
}
}
}
}*/
static void
unproc_thread(void)
{
struct net_buf *buf;
k_thread_name_set(NULL, "unproc_thread");
while (true) {
buf = net_buf_get(&h5.unprocessed_queue, K_FOREVER);
h5_hdr_t hdr = {0};
int r = pull_header1(buf, &hdr);
if (r < 0) {
LOG_WRN("Bad h5 header: %d, dropping packet", r);
goto next;
}
//LOG_HEXDUMP_DBG((uint8_t*)&hdr, sizeof(hdr), "Header");
if (hdr.is_reliable) {
LOG_DBG("Header: seq %d, ack %d, rel: %s, has_crc16: %s",
hdr.seq, hdr.ack,
hdr.is_reliable ? "true" : "false",
hdr.crc_after_payload ? "true" : "false");
if (hdr.seq != h5.next_expected_seq_from_host) {
LOG_ERR("Seq expected %u got %u. Drop packet",
h5.next_expected_seq_from_host,
hdr.seq);
goto next;
}
k_delayed_work_submit(&ack_work, H5_RX_ACK_TIMEOUT);
h5.next_expected_seq_from_host = next_seq(hdr.seq);
}
if (h5.link_state < ACTIVE
&& hdr.packet_type != HCI_3WIRE_LINK_PKT) {
LOG_WRN("Dropping unexpected packet type (%d) in non-ACTIVE state (%d)",
hdr.packet_type, h5.link_state);
goto next;
}
struct net_buf *rx_buf = NULL;
switch (hdr.packet_type) {
case HCI_ACLDATA_PKT:
rx_buf = net_buf_alloc(&acl_tx_pool, K_NO_WAIT);
REBOOT_IF_NOT(rx_buf);
bt_buf_set_type(rx_buf, BT_BUF_ACL_OUT);
break;
case HCI_COMMAND_PKT:
rx_buf = net_buf_alloc(&cmd_tx_pool, K_NO_WAIT);
REBOOT_IF_NOT(rx_buf);
bt_buf_set_type(rx_buf, BT_BUF_CMD);
break;
case HCI_3WIRE_ACK_PKT:
// h5.rx_ack = hdr.ack;
// We currently don't retransmit, so drop ack packets
goto next;
case HCI_3WIRE_LINK_PKT:
rx_buf = net_buf_alloc(&h5_pool, K_NO_WAIT);
REBOOT_IF_NOT(rx_buf);
break;
default:
LOG_ERR("Wrong packet type from host: %u",
hdr.packet_type);
goto next;
}
int byte;
while ((byte = unslip_next_byte(buf)) >= 0) {
net_buf_add_u8(rx_buf, (u8_t) byte);
}
if (hdr.len != rx_buf->len) {
LOG_WRN("Truncated payload, skipping\n");
net_buf_unref(rx_buf);
goto next;
}
switch (hdr.packet_type) {
case HCI_3WIRE_ACK_PKT:
// No further action required
break;
case HCI_3WIRE_LINK_PKT:
net_buf_put(&h5.host_queue, rx_buf);
break;
case HCI_COMMAND_PKT:
case HCI_ACLDATA_PKT:
//LOG_DBG("Adding to controller queue\n");
net_buf_put(&h5.controller_queue, rx_buf);
break;
default:
LOG_WRN("Unknown packet type %u\n", hdr.packet_type);
break;
}
next:
net_buf_unref(buf);
}
}
static void h5_send_sync(void);
static void h5_send_config(void);
static void
tx_thread(void)
{
k_thread_name_set(NULL, "tx_thread");
while (true) {
struct net_buf *buf;
switch (h5.link_state) {
case UNINIT:
h5_send_sync();
LOG_DBG("Autonomous SYNC");
k_sleep(250);
break;
case INIT:
h5_send_config();
LOG_DBG("Autonomous CONFIG");
k_sleep(250);
break;
case ACTIVE:
while ((buf = net_buf_get(&h5.controller_queue,
K_FOREVER))) {
bt_send(buf);
}
//LOG_DBG("controller_queue empty");
/* buf is dequeued from tx_queue and queued to unack
* queue.
*/
//net_buf_put(&h5.unack_queue, buf);
//unack_queue_len++;
//k_delayed_work_submit(&retx_work, H5_TX_ACK_TIMEOUT);
break;
}
}
}
static void
event_thread(void)
{
static struct net_buf *buf = NULL;
while (true) {
buf = net_buf_get(&h5.event_queue, K_MSEC(1000));
if (!buf) {
//LOG_WRN("event_queue is empty");
continue;
}
net_buf_put(&h5.host_queue, buf);
}
}
static void
uart_tx_thread()
{
while (true) {
struct net_buf *buf = net_buf_get(&h5.uart_tx_queue, K_FOREVER);
if (!buf) {
LOG_ERR("uart_tx_queue buf is NULL");
continue;
}
uart_poll_out(hci_uart_dev, SLIP_DELIMITER);
for (size_t i = 0, j = buf->len; i < j; i++) {
h5_slip_byte(net_buf_pull_u8(buf));
}
uart_poll_out(hci_uart_dev, SLIP_DELIMITER);
net_buf_unref(buf);
}
}
static void
h5_init(void)
{
LOG_DBG("h5_init");
h5.link_state = UNINIT;
h5.tx_win = 4;
/* TX thread - handles period h5_link messages and forwards commands to controller */
k_fifo_init(&h5.controller_queue);
k_thread_create(&tx_thread_data, tx_stack,
K_THREAD_STACK_SIZEOF(tx_stack),
(k_thread_entry_t) tx_thread, NULL, NULL, NULL,
K_PRIO_PREEMPT(1),
0, K_NO_WAIT);
k_fifo_init(&h5.host_queue);
/*k_thread_create(&rx_thread_data, rx_stack,
K_THREAD_STACK_SIZEOF(rx_stack),
(k_thread_entry_t)rx_thread, NULL, NULL, NULL,
K_PRIO_COOP(CONFIG_BT_RX_PRIO),
0, K_NO_WAIT);*/
/* Unack queue */
k_fifo_init(&h5.unack_queue);
/* Thread & queue to un-slip and un-h5 incoming packets */
k_fifo_init(&h5.unprocessed_queue);
k_thread_create(&unproc_thread_data, unproc_stack,
K_THREAD_STACK_SIZEOF(unproc_stack),
(k_thread_entry_t) unproc_thread, NULL, NULL, NULL,
K_PRIO_PREEMPT(1),
0, K_NO_WAIT);
/* Thread handles incoming events from BT Controller */
k_fifo_init(&h5.event_queue);
k_thread_create(&event_thread_data, event_stack,
K_THREAD_STACK_SIZEOF(event_stack),
(k_thread_entry_t) event_thread, NULL, NULL, NULL,
K_PRIO_PREEMPT(2),
0, K_NO_WAIT);
/* Thread handles incoming events from BT Controller */
k_fifo_init(&h5.uart_tx_queue);
k_thread_create(&uart_tx_thread_data, uart_tx_stack,
K_THREAD_STACK_SIZEOF(uart_tx_stack),
(k_thread_entry_t) uart_tx_thread, NULL, NULL, NULL,
K_PRIO_PREEMPT(0),
0, K_NO_WAIT);
/* Init delayed work */
k_delayed_work_init(&ack_work, ack_timeout);
//k_delayed_work_init(&retx_work, retx_timeout);
}
static int
h5_open(struct device *unused)
{
LOG_DBG("h5_open");
hci_uart_dev = device_get_binding(CONFIG_BT_CTLR_TO_HOST_UART_DEV_NAME);
if (!hci_uart_dev) {
return -EINVAL;
}
uart_irq_rx_disable(hci_uart_dev);
uart_irq_tx_disable(hci_uart_dev);
/* Enable HFXO so that UART clock is stable for 115200 bitrate */
*(uint32_t *) 0x40000000 = 0x1;
bt_uart_drain(hci_uart_dev);
uart_irq_callback_set(hci_uart_dev, bt_uart_isr);
uart_irq_rx_enable(hci_uart_dev);
return 0;
}
#if defined(CONFIG_BT_CTLR_ASSERT_HANDLER)
void bt_ctlr_assert_handle(char *file, u32_t line)
{
LOG_ERR("BT_CTLR Assert %s:%u", file, line);
sys_reboot(SYS_REBOOT_COLD);
}
#endif /* CONFIG_BT_CTLR_ASSERT_HANDLER */
DEVICE_INIT(hci_uart, "hci_uart", &h5_open, NULL, NULL,
APPLICATION, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);
enum h5_pkt_types {
H5_UNKNOWN = -1,
H5_SYNC,
@ -893,50 +577,6 @@ h5_send_config_response(void)
_send_link_control(conf_rsp, sizeof(conf_rsp));
}
static struct device *gpio_dev;
static struct gpio_callback gpio_cb;
void
gpio_callback(struct device *port,
struct gpio_callback *cb, u32_t pins)
{
LOG_ERR("Host wants the bootloader, rebooting\n");
k_sleep(250);
sys_reboot(SYS_REBOOT_COLD);
}
#define BOOTLOADER_REQ_GPIO_PIN 8
#define BOOTLOADER_STATUS_GPIO_PIN 7
void
gpio_init()
{
int ret;
gpio_dev = device_get_binding("GPIO_0");
if (!gpio_dev) {
LOG_ERR("Cannot find %s!\n", "GPIO_0");
}
ret = gpio_pin_configure(gpio_dev, BOOTLOADER_REQ_GPIO_PIN,
(GPIO_DIR_IN | GPIO_INT | GPIO_INT_EDGE | GPIO_INT_ACTIVE_LOW
| GPIO_PUD_PULL_UP));
if (ret) {
LOG_ERR("Error configuring GPIO_%d!\n",
BOOTLOADER_REQ_GPIO_PIN);
}
gpio_init_callback(&gpio_cb, gpio_callback,
BIT(BOOTLOADER_REQ_GPIO_PIN));
ret = gpio_add_callback(gpio_dev, &gpio_cb);
if (ret) {
LOG_ERR("Cannot setup callback!\n");
}
ret = gpio_pin_enable_callback(gpio_dev, BOOTLOADER_REQ_GPIO_PIN);
if (ret) {
LOG_ERR("Error enabling callback!\n");
}
}
static h5_linkstate_t
do_uninit(struct net_buf const *buf)
{
@ -1002,13 +642,8 @@ do_active(struct net_buf *buf)
LOG_DBG("RX'd CONFIG RESPONSE: ACTIVE -> ACTIVE");
return ACTIVE;
default:
// Presumably something from the controller
if (bt_buf_get_type(buf) == BT_BUF_EVT) {
h5_send(buf->data, HCI_EVENT_PKT, buf->len);
} else {
LOG_HEXDUMP_ERR(buf->data, buf->len,
"Unexpected buffer in host_queue");
}
LOG_HEXDUMP_ERR(buf->data, buf->len,
"Unexpected buffer in do_active");
return ACTIVE;
}
}
@ -1027,6 +662,348 @@ state_change(int old, int new)
}
}
static size_t
unslip_link_buf(struct net_buf *buf)
{
uint8_t link_buf[MAX_SIG_LEN];
size_t i = 0;
int maybe_byte;
while ((maybe_byte = unslip_next_byte(buf)) >= 0) {
if (i > MAX_SIG_LEN - 1) {
LOG_ERR("No space in unslip_buf output");
return i;
}
link_buf[i] = (uint8_t)maybe_byte;
i += 1;
}
net_buf_add_mem(buf, link_buf, i);
return i;
}
static void
do_link_packet(struct net_buf *buf)
{
unslip_link_buf(buf);
switch (h5.link_state) {
case UNINIT:
state_change(h5.link_state, do_uninit(buf));
break;
case INIT:
state_change(h5.link_state, do_init(buf));
break;
case ACTIVE:
state_change(h5.link_state, do_active(buf));
break;
}
}
static void
unproc_thread(void)
{
struct net_buf *buf;
k_thread_name_set(NULL, "unproc_thread");
while (true) {
buf = net_buf_get(&h5.unprocessed_queue, K_FOREVER);
h5_hdr_t hdr = {0};
int r = pull_header1(buf, &hdr);
if (r < 0) {
LOG_WRN("Bad h5 header: %d, dropping packet", r);
goto next;
}
//LOG_HEXDUMP_DBG((uint8_t*)&hdr, sizeof(hdr), "Header");
if (hdr.is_reliable) {
LOG_DBG("Header: seq %d, ack %d, rel: %s, has_crc16: %s",
hdr.seq, hdr.ack,
hdr.is_reliable ? "true" : "false",
hdr.crc_after_payload ? "true" : "false");
if (hdr.seq != h5.next_expected_seq_from_host) {
LOG_ERR("Seq expected %u got %u. Drop packet",
h5.next_expected_seq_from_host,
hdr.seq);
goto next;
}
k_delayed_work_submit(&ack_work, H5_RX_ACK_TIMEOUT);
h5.next_expected_seq_from_host = next_seq(hdr.seq);
}
if (h5.link_state < ACTIVE
&& hdr.packet_type != HCI_3WIRE_LINK_PKT) {
LOG_WRN("Dropping unexpected packet type (%d) in non-ACTIVE state (%d)",
hdr.packet_type, h5.link_state);
goto next;
}
if (hdr.packet_type == HCI_3WIRE_LINK_PKT) {
do_link_packet(buf);
goto next;
}
struct net_buf *rx_buf = NULL;
switch (hdr.packet_type) {
case HCI_ACLDATA_PKT:
rx_buf = net_buf_alloc(&acl_tx_pool, K_NO_WAIT);
REBOOT_IF_NOT(rx_buf);
bt_buf_set_type(rx_buf, BT_BUF_ACL_OUT);
break;
case HCI_COMMAND_PKT:
rx_buf = net_buf_alloc(&cmd_tx_pool, K_NO_WAIT);
REBOOT_IF_NOT(rx_buf);
bt_buf_set_type(rx_buf, BT_BUF_CMD);
break;
case HCI_3WIRE_ACK_PKT:
// h5.rx_ack = hdr.ack;
// We currently don't retransmit, so drop ack packets
goto next;
default:
LOG_ERR("Wrong packet type from host: %u",
hdr.packet_type);
goto next;
}
int byte;
while ((byte = unslip_next_byte(buf)) >= 0) {
net_buf_add_u8(rx_buf, (u8_t) byte);
}
if (hdr.len != rx_buf->len) {
LOG_WRN("Truncated payload, skipping\n");
net_buf_unref(rx_buf);
goto next;
}
switch (hdr.packet_type) {
case HCI_3WIRE_ACK_PKT:
// No further action required
break;
case HCI_COMMAND_PKT:
case HCI_ACLDATA_PKT:
//LOG_DBG("Adding to controller queue\n");
net_buf_put(&h5.controller_queue, rx_buf);
break;
default:
LOG_WRN("Unknown packet type %u\n", hdr.packet_type);
break;
}
next:
net_buf_unref(buf);
}
}
static void
tx_thread(void)
{
k_thread_name_set(NULL, "tx_thread");
while (true) {
struct net_buf *buf;
switch (h5.link_state) {
case UNINIT:
h5_send_sync();
LOG_DBG("Autonomous SYNC");
k_sleep(250);
break;
case INIT:
h5_send_config();
LOG_DBG("Autonomous CONFIG");
k_sleep(250);
break;
case ACTIVE:
while ((buf = net_buf_get(&h5.controller_queue,
K_FOREVER))) {
bt_send(buf);
}
//LOG_DBG("controller_queue empty");
/* buf is dequeued from tx_queue and queued to unack
* queue.
*/
//net_buf_put(&h5.unack_queue, buf);
//unack_queue_len++;
//k_delayed_work_submit(&retx_work, H5_TX_ACK_TIMEOUT);
break;
}
}
}
static void
event_thread(void)
{
static struct net_buf *buf = NULL;
while (true) {
buf = net_buf_get(&h5.event_queue, K_FOREVER);
if (!buf) {
LOG_ERR("event_queue is empty");
continue;
}
h5_send(buf->data, HCI_EVENT_PKT, buf->len);
net_buf_unref(buf);
//net_buf_put(&h5.host_queue, buf);
}
}
static void
uart_tx_thread()
{
while (true) {
struct net_buf *buf = net_buf_get(&h5.uart_tx_queue, K_FOREVER);
if (!buf) {
LOG_ERR("uart_tx_queue buf is NULL");
continue;
}
uart_poll_out(hci_uart_dev, SLIP_DELIMITER);
for (size_t i = 0, j = buf->len; i < j; i++) {
h5_slip_byte(net_buf_pull_u8(buf));
}
uart_poll_out(hci_uart_dev, SLIP_DELIMITER);
net_buf_unref(buf);
}
}
static void
h5_init(void)
{
LOG_DBG("h5_init");
h5.link_state = UNINIT;
h5.tx_win = 4;
/* TX thread - handles period h5_link messages and forwards commands to controller */
k_fifo_init(&h5.controller_queue);
k_thread_create(&tx_thread_data, tx_stack,
K_THREAD_STACK_SIZEOF(tx_stack),
(k_thread_entry_t) tx_thread, NULL, NULL, NULL,
K_PRIO_PREEMPT(1),
0, K_NO_WAIT);
//k_fifo_init(&h5.host_queue);
/*k_thread_create(&rx_thread_data, rx_stack,
K_THREAD_STACK_SIZEOF(rx_stack),
(k_thread_entry_t)rx_thread, NULL, NULL, NULL,
K_PRIO_COOP(CONFIG_BT_RX_PRIO),
0, K_NO_WAIT);*/
/* Unack queue */
k_fifo_init(&h5.unack_queue);
/* Thread & queue to un-slip and un-h5 incoming packets */
k_fifo_init(&h5.unprocessed_queue);
k_thread_create(&unproc_thread_data, unproc_stack,
K_THREAD_STACK_SIZEOF(unproc_stack),
(k_thread_entry_t) unproc_thread, NULL, NULL, NULL,
K_PRIO_PREEMPT(1),
0, K_NO_WAIT);
/* Thread handles incoming events from BT Controller */
k_fifo_init(&h5.event_queue);
k_thread_create(&event_thread_data, event_stack,
K_THREAD_STACK_SIZEOF(event_stack),
(k_thread_entry_t) event_thread, NULL, NULL, NULL,
K_PRIO_PREEMPT(2),
0, K_NO_WAIT);
/* Thread handles incoming events from BT Controller */
k_fifo_init(&h5.uart_tx_queue);
k_thread_create(&uart_tx_thread_data, uart_tx_stack,
K_THREAD_STACK_SIZEOF(uart_tx_stack),
(k_thread_entry_t) uart_tx_thread, NULL, NULL, NULL,
K_PRIO_PREEMPT(0),
0, K_NO_WAIT);
/* Init delayed work */
k_delayed_work_init(&ack_work, ack_timeout);
//k_delayed_work_init(&retx_work, retx_timeout);
}
static int
h5_open(struct device *unused)
{
LOG_DBG("h5_open");
hci_uart_dev = device_get_binding(CONFIG_BT_CTLR_TO_HOST_UART_DEV_NAME);
if (!hci_uart_dev) {
return -EINVAL;
}
uart_irq_rx_disable(hci_uart_dev);
uart_irq_tx_disable(hci_uart_dev);
/* Enable HFXO so that UART clock is stable for 115200 bitrate */
*(uint32_t *) 0x40000000 = 0x1;
bt_uart_drain(hci_uart_dev);
uart_irq_callback_set(hci_uart_dev, bt_uart_isr);
uart_irq_rx_enable(hci_uart_dev);
return 0;
}
#if defined(CONFIG_BT_CTLR_ASSERT_HANDLER)
void bt_ctlr_assert_handle(char *file, u32_t line)
{
LOG_ERR("BT_CTLR Assert %s:%u", file, line);
sys_reboot(SYS_REBOOT_COLD);
}
#endif /* CONFIG_BT_CTLR_ASSERT_HANDLER */
DEVICE_INIT(hci_uart, "hci_uart", &h5_open, NULL, NULL,
APPLICATION, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);
static struct device *gpio_dev;
static struct gpio_callback gpio_cb;
void
gpio_callback(struct device *port,
struct gpio_callback *cb, u32_t pins)
{
LOG_ERR("Host wants the bootloader, rebooting\n");
sys_reboot(SYS_REBOOT_COLD);
}
#define BOOTLOADER_REQ_GPIO_PIN 8
#define BOOTLOADER_STATUS_GPIO_PIN 7
void
gpio_init()
{
int ret;
gpio_dev = device_get_binding("GPIO_0");
if (!gpio_dev) {
LOG_ERR("Cannot find %s!\n", "GPIO_0");
}
ret = gpio_pin_configure(gpio_dev, BOOTLOADER_REQ_GPIO_PIN,
(GPIO_DIR_IN | GPIO_INT | GPIO_INT_EDGE | GPIO_INT_ACTIVE_LOW
| GPIO_PUD_PULL_UP));
if (ret) {
LOG_ERR("Error configuring GPIO_%d!\n",
BOOTLOADER_REQ_GPIO_PIN);
}
gpio_init_callback(&gpio_cb, gpio_callback,
BIT(BOOTLOADER_REQ_GPIO_PIN));
ret = gpio_add_callback(gpio_dev, &gpio_cb);
if (ret) {
LOG_ERR("Cannot setup callback!\n");
}
ret = gpio_pin_enable_callback(gpio_dev, BOOTLOADER_REQ_GPIO_PIN);
if (ret) {
LOG_ERR("Error enabling callback!\n");
}
}
void
main(void)
{
@ -1034,27 +1011,11 @@ main(void)
gpio_init();
bt_enable_raw(&h5.event_queue);
struct net_buf *buf = NULL;
//struct net_buf *buf = NULL;
h5_init();
while (true) {
buf = net_buf_get(&h5.host_queue, K_MSEC(250));
if (!buf) {
continue;
}
switch (h5.link_state) {
case UNINIT:
state_change(h5.link_state, do_uninit(buf));
break;
case INIT:
state_change(h5.link_state, do_init(buf));
break;
case ACTIVE:
state_change(h5.link_state, do_active(buf));
break;
}
net_buf_unref(buf);
k_sleep(K_FOREVER);
}
}