1
0
Fork 0

Updates timeouts to core spec and unslip in-place

This commit is contained in:
Shawn Nock 2019-08-03 10:26:58 -05:00
parent 247baf238a
commit 9bb1c8e77b
1 changed files with 276 additions and 563 deletions

View File

@ -18,7 +18,6 @@
#include <logging/log.h>
#include <logging/log_ctrl.h>
#include <misc/util.h>
#include <device.h>
@ -40,25 +39,6 @@
static struct device *hci_uart_dev;
/* HCI command buffers */
#define CMD_BUF_SIZE BT_BUF_RX_SIZE
NET_BUF_POOL_DEFINE(cmd_tx_pool, CONFIG_BT_HCI_CMD_COUNT, CMD_BUF_SIZE,
BT_BUF_USER_DATA_MIN, NULL);
#define BT_L2CAP_MTU (CONFIG_BT_CTLR_TX_BUFFER_SIZE - BT_L2CAP_HDR_SIZE)
/** Data size needed for ACL buffers */
#define BT_BUF_ACL_SIZE BT_L2CAP_BUF_SIZE(BT_L2CAP_MTU)
#if defined(CONFIG_BT_CTLR_TX_BUFFERS)
#define TX_BUF_COUNT CONFIG_BT_CTLR_TX_BUFFERS
#else
#define TX_BUF_COUNT 6
#endif
NET_BUF_POOL_DEFINE(acl_tx_pool, TX_BUF_COUNT, BT_BUF_ACL_SIZE,
BT_BUF_USER_DATA_MIN, NULL);
static K_THREAD_STACK_DEFINE(tx_stack, 4096);
static struct k_thread tx_thread_data;
@ -69,7 +49,6 @@ static K_THREAD_STACK_DEFINE(event_stack, 4096);
static struct k_thread event_thread_data;
static struct k_delayed_work ack_work;
//static struct k_delayed_work retx_work;
typedef enum {
HCI_3WIRE_ACK_PKT = 0x00,
@ -81,8 +60,7 @@ typedef enum {
HCI_3WIRE_LINK_PKT = 0x0f,
} h5_pkt_t;
static bool reliable_packet(h5_pkt_t type)
{
static bool reliable_packet(h5_pkt_t type) {
switch (type) {
case HCI_COMMAND_PKT:
case HCI_ACLDATA_PKT:
@ -93,18 +71,15 @@ static bool reliable_packet(h5_pkt_t type)
}
}
/* FIXME: Correct timeout */
#define H5_RX_ACK_TIMEOUT K_MSEC(250)
#define H5_TX_ACK_TIMEOUT K_MSEC(250)
#define TMAX 18 // 18ms; Max packet size in bits divided by bit rate = 259 * 8 / 115200
#define H5_ACK_TIMEOUT K_MSEC(2 * TMAX)
#define H5_RETRANSMIT_TIMEOUT K_MSEC(3 * TMAX)
#define SLIP_DELIMITER 0xc0
#define SLIP_ESC 0xdb
#define SLIP_ESC_DELIM 0xdc
#define SLIP_ESC_ESC 0xdd
#define H5_RX_ESC 1
#define H5_TX_ACK_PEND 2
typedef struct __attribute__((packed)) {
uint8_t seq : 3;
uint8_t ack : 3;
@ -122,11 +97,8 @@ typedef enum {
} h5_linkstate_t;
static struct h5 {
//struct net_buf *rx_buf;
struct k_fifo controller_queue;
struct k_fifo host_queue;
struct k_fifo unack_queue;
struct k_fifo unprocessed_queue;
struct k_fifo event_queue;
@ -137,15 +109,13 @@ static struct h5 {
h5_linkstate_t link_state;
} h5;
//static u8_t unack_queue_len;
#define MAX_PACKETS_IN_FLIGHT (0x01)
static const u8_t sync_req[] = { 0x01, 0x7e };
static const u8_t sync_rsp[] = { 0x02, 0x7d };
static const u8_t sync_req[] = {0x01, 0x7e};
static const u8_t sync_rsp[] = {0x02, 0x7d};
/* Third byte may change */
static u8_t conf_req[] = { 0x03, 0xfc };
static const u8_t conf_rsp[] = { 0x04, 0x7b, MAX_PACKETS_IN_FLIGHT };
static u8_t conf_req[] = {0x03, 0xfc};
static const u8_t conf_rsp[] = {0x04, 0x7b, MAX_PACKETS_IN_FLIGHT};
/* H5 signal buffers pool */
#define MAX_SIG_LEN 3
@ -158,8 +128,7 @@ NET_BUF_POOL_DEFINE(h5_pool, SIGNAL_COUNT, SIG_BUF_SIZE, 0, NULL);
#define PACKET_BUF_SIZE (CONFIG_BT_HCI_RESERVE + MAX_PACKET_LEN)
NET_BUF_POOL_DEFINE(h5_pack_pool, MAX_PACKETS_IN_FLIGHT + 10, PACKET_BUF_SIZE, 0, NULL);
static inline void bt_uart_drain(struct device *dev)
{
static inline void bt_uart_drain(struct device *dev) {
u8_t c;
while (uart_fifo_read(dev, &c, 1)) {
@ -177,74 +146,7 @@ void _reboot_if_not(struct net_buf *buf, char const *file, int line) {
}
}
/*
static void process_unack(void)
{
u8_t next_seq = h5.tx_seq;
u8_t number_removed = unack_queue_len;
if (!unack_queue_len) {
return;
}
LOG_DBG("rx_ack %u tx_ack %u tx_seq %u unack_queue_len %u",
h5.rx_ack, h5.tx_ack, h5.tx_seq, unack_queue_len);
while (unack_queue_len > 0) {
if (next_seq == h5.rx_ack) {
// Next sequence number is the same as last received
// ack number
//
break;
}
number_removed--;
// Similar to (n - 1) % 8 with unsigned conversion
next_seq = (next_seq - 1) & 0x07;
}
if (next_seq != h5.rx_ack) {
LOG_ERR("Wrong sequence: rx_ack %u tx_seq %u next_seq %u",
h5.rx_ack, h5.tx_seq, next_seq);
}
LOG_DBG("Need to remove %u packet from the queue", number_removed);
while (number_removed) {
struct net_buf *buf = net_buf_get(&h5.unack_queue, K_NO_WAIT);
if (!buf) {
LOG_ERR("Unack queue is empty");
break;
}
// TODO: print or do something with packet
LOG_DBG("Remove buf from the unack_queue");
//net_buf_unref(buf);
unack_queue_len--;
number_removed--;
}
}
*/
static void h5_print_header(h5_hdr_t *hdr)
{
if (hdr->is_reliable) {
LOG_DBG("REL: seq %d ack %d crc %d type %d len %d",
hdr->seq, hdr->ack,
hdr->checksum, hdr->packet_type,
hdr->len);
} else {
LOG_DBG("UNREL: ack %d crc %d type %d len %d",
hdr->ack, hdr->checksum,
hdr->packet_type, hdr->len);
}
}
static u8_t h5_slip_byte(u8_t byte)
{
static u8_t h5_slip_byte(u8_t byte) {
switch (byte) {
case SLIP_DELIMITER:
uart_poll_out(hci_uart_dev, SLIP_ESC);
@ -261,7 +163,7 @@ static u8_t h5_slip_byte(u8_t byte)
}
void h5_hdr_update_checksum(h5_hdr_t *hdr) {
uint8_t *u8_hdr = (uint8_t *)hdr;
uint8_t *u8_hdr = (uint8_t *) hdr;
u8_hdr[3] = ~((u8_hdr[0] + u8_hdr[1] + u8_hdr[2]) & 0xff);
}
@ -269,8 +171,7 @@ static u8_t next_seq(u8_t i) {
return (i + 1) % 8;
}
void h5_send(const u8_t *payload, h5_pkt_t type, int len)
{
void h5_send(const u8_t *payload, h5_pkt_t type, int len) {
h5_hdr_t hdr = {0};
/* Set ACK for outgoing packet and stop delayed work */
@ -290,7 +191,7 @@ void h5_send(const u8_t *payload, h5_pkt_t type, int len)
uart_poll_out(hci_uart_dev, SLIP_DELIMITER);
uint8_t *u8_hdr = (uint8_t *)&hdr;
uint8_t *u8_hdr = (uint8_t *) &hdr;
for (int i = 0; i < 4; i++) {
h5_slip_byte(u8_hdr[i]);
}
@ -302,125 +203,17 @@ void h5_send(const u8_t *payload, h5_pkt_t type, int len)
uart_poll_out(hci_uart_dev, SLIP_DELIMITER);
}
/* Delayed work taking care about retransmitting packets */
/*static void retx_timeout(struct k_work *work)
{
ARG_UNUSED(work);
LOG_DBG("unack_queue_len %u", unack_queue_len);
if (unack_queue_len) {
struct k_fifo tmp_queue;
struct net_buf *buf;
k_fifo_init(&tmp_queue);
*//* Queue to temperary queue *//*
while ((buf = net_buf_get(&h5.host_queue, K_NO_WAIT))) {
net_buf_put(&tmp_queue, buf);
}
*//* Queue unack packets to the beginning of the queue *//*
while ((buf = net_buf_get(&h5.unack_queue, K_NO_WAIT))) {
*//* include also packet type *//*
net_buf_put(&h5.host_queue, buf);
h5.tx_ = next_seq(h5.tx_seq);
unack_queue_len--;
}
*//* Queue saved packets from temp queue *//*
while ((buf = net_buf_get(&tmp_queue, K_NO_WAIT))) {
net_buf_put(&h5.host_queue, buf);
}
}
}*/
static void ack_timeout(struct k_work *work) {
ARG_UNUSED(work);
h5_send(NULL, HCI_3WIRE_ACK_PKT, 0);
}
bool h5_hdr_checksum_good(h5_hdr_t *hdr) {
uint8_t *u8_hdr = (uint8_t *)hdr;
uint8_t *u8_hdr = (uint8_t *) hdr;
return ((u8_hdr[3] + u8_hdr[0] + u8_hdr[1] + u8_hdr[2]) & 0xff) == 0xff;
}
bool unslip_header(uint8_t *buf, size_t len, h5_hdr_t *hdr) {
uint8_t hdr_idx = 0, *u8_hdr = (uint8_t *)&hdr;
if (len < sizeof(h5_hdr_t)) {
return false;
}
for (size_t i = 0; i < len; i++) {
if (buf[i] == SLIP_ESC) {
continue;
}
if (buf[i] == SLIP_ESC_ESC) {
u8_hdr[hdr_idx++] = SLIP_ESC;
}
if (buf[i] == SLIP_ESC_DELIM) {
u8_hdr[hdr_idx++] = SLIP_DELIMITER;
}
u8_hdr[hdr_idx++] = buf[i];
}
if (hdr_idx < sizeof(h5_hdr_t)) {
// Buffer too small due to slip escape chars
return false;
}
return h5_hdr_checksum_good(hdr);
}
int unslip_next_byte(struct net_buf *buf) {
if (!buf->len) {
// This happens at the end of every buffer, not work logging
return -1;
}
u8_t next = net_buf_pull_u8(buf);
if (next != SLIP_ESC) {
return next;
}
if (!buf->len) {
LOG_WRN("Incomplete SLIP_ESC pair");
return -1;
}
next = net_buf_pull_u8(buf);
if (next == SLIP_ESC_ESC) {
return SLIP_ESC;
}
if (next == SLIP_ESC_DELIM) {
return SLIP_DELIMITER;
}
LOG_WRN("Bad Escape Seqence: %02X %02X", SLIP_ESC, next);
return -2;
}
int pull_header1(struct net_buf *buf, h5_hdr_t *hdr) {
uint8_t *u8_hdr = (uint8_t *)hdr;
if (buf->len < sizeof(h5_hdr_t)) {
return -1;
}
for (u8_t i = 0; i < sizeof(h5_hdr_t); i++) {
int byte = unslip_next_byte(buf);
if (byte < 0) {
// Packet too short due to escaped bytes
return -1;
}
u8_hdr[i] = byte;
}
if (!h5_hdr_checksum_good(hdr)) {
return -2;
}
return 0;
}
void bt_uart_isr(struct device *unused)
{
void bt_uart_isr(struct device *unused) {
static u8_t byte;
static struct net_buf *buf = NULL;
static enum slip_states {
@ -471,57 +264,29 @@ void bt_uart_isr(struct device *unused)
}
}
/*void bt_uart_isr(struct device *unused) {
static u8_t byte;
static struct net_buf *buf = NULL;
static h5_hdr_t hdr = {0};
static enum slip_state {
SYNC,
NOSYNC,
} slip_state = NOSYNC;
ARG_UNUSED(unused);
while (uart_irq_update(hci_uart_dev) &&
uart_irq_is_pending(hci_uart_dev)) {
if (!uart_irq_rx_ready(hci_uart_dev)) {
*//* Only the UART RX path is interrupt-enabled *//*
static void unslip_buffer(struct net_buf *buf) {
if (buf->frags) {
LOG_WRN("Inplace un-SLIP not designed for fragmented net_buf, likely will truncate packet");
}
bool escaped = false;
size_t out_idx = 0;
for (size_t in_idx = 0; in_idx < buf->len; in_idx++) {
switch (buf->data[in_idx]) {
case SLIP_ESC:
escaped = true;
break;
}
if (!buf) {
buf = net_buf_alloc(&h5_pack_pool, K_NO_WAIT);
REBOOT_IF_NOT(buf);
}
while (uart_fifo_read(hci_uart_dev, &byte, sizeof(byte))) {
if (slip_state == NOSYNC && byte != SLIP_DELIMITER) {
continue;
}
if (byte == SLIP_DELIMITER) {
if (buf->len > 0 && slip_state == SYNC) {
net_buf_put(&h5.unprocessed_queue, buf);
buf = NULL;
}
slip_state = SYNC;
} else {
net_buf_add_u8(buf, byte);
if (unslip_header(buf->data, buf->len, &hdr)) {
// We have the header; check length
if (hdr.len + sizeof(h5_hdr_t) > buf->size) {
// We cannot process a packet this large
net_buf_reset(buf);
slip_state = NOSYNC;
case SLIP_ESC_DELIM:
buf->data[out_idx++] = escaped ? SLIP_DELIMITER : SLIP_ESC_DELIM;
break;
case SLIP_ESC_ESC:
buf->data[out_idx++] = escaped ? SLIP_ESC : SLIP_ESC_ESC;
break;
default:
buf->data[out_idx++] = buf->data[in_idx];
}
}
}
}
}
}*/
buf->len = out_idx;
}
static void unproc_thread(void) {
struct net_buf *buf;
@ -529,86 +294,62 @@ static void unproc_thread(void) {
while (true) {
buf = net_buf_get(&h5.unprocessed_queue, K_FOREVER);
unslip_buffer(buf);
h5_hdr_t hdr = {0};
int r = pull_header1(buf, &hdr);
if (r < 0) {
LOG_WRN("Bad h5 header: %d, dropping packet", r);
if (buf->len < sizeof(h5_hdr_t)) {
LOG_ERR("Packet too short for valid H5 header, dropping");
goto next;
}
//LOG_HEXDUMP_DBG((uint8_t*)&hdr, sizeof(hdr), "Header");
if (hdr.is_reliable) {
LOG_DBG("Header: seq %d, ack %d, rel: %s, has_crc16: %s", hdr.seq, hdr.ack,
hdr.is_reliable ? "true" : "false",
hdr.crc_after_payload ? "true" : "false");
if (hdr.seq != h5.next_expected_seq_from_host) {
LOG_ERR("Seq expected %u got %u. Drop packet", h5.next_expected_seq_from_host, hdr.seq);
goto next;
}
k_delayed_work_submit(&ack_work, H5_RX_ACK_TIMEOUT);
h5.next_expected_seq_from_host = next_seq(hdr.seq);
}
if (h5.link_state < ACTIVE && hdr.packet_type != HCI_3WIRE_LINK_PKT) {
LOG_WRN("Dropping unexpected packet type (%d) in non-ACTIVE state (%d)",
hdr.packet_type, h5.link_state);
h5_hdr_t *hdr = net_buf_pull_mem(buf, sizeof(h5_hdr_t));
if (!h5_hdr_checksum_good(hdr)) {
LOG_ERR("Invalid header checksum, dropping packet");
goto next;
}
struct net_buf *rx_buf = NULL;
switch (hdr.packet_type) {
case HCI_ACLDATA_PKT:
rx_buf = net_buf_alloc(&acl_tx_pool, K_NO_WAIT);
REBOOT_IF_NOT(rx_buf);
bt_buf_set_type(rx_buf, BT_BUF_ACL_OUT);
break;
case HCI_COMMAND_PKT:
rx_buf = net_buf_alloc(&cmd_tx_pool, K_NO_WAIT);
REBOOT_IF_NOT(rx_buf);
bt_buf_set_type(rx_buf, BT_BUF_CMD);
break;
case HCI_3WIRE_ACK_PKT:
// h5.rx_ack = hdr.ack;
// We currently don't retransmit, so drop ack packets
goto next;
case HCI_3WIRE_LINK_PKT:
rx_buf = net_buf_alloc(&h5_pool, K_NO_WAIT);
REBOOT_IF_NOT(rx_buf);
break;
default:
LOG_ERR("Wrong packet type from host: %u", hdr.packet_type);
goto next;
}
int byte;
while ((byte = unslip_next_byte(buf)) >= 0) {
net_buf_add_u8(rx_buf, (u8_t) byte);
}
if (hdr.len != rx_buf->len) {
if (hdr->len != buf->len) {
LOG_WRN("Truncated payload, skipping\n");
net_buf_unref(rx_buf);
goto next;
}
switch (hdr.packet_type) {
if (h5.link_state < ACTIVE && hdr->packet_type != HCI_3WIRE_LINK_PKT) {
LOG_WRN("Dropping unexpected packet type (%d) in non-ACTIVE state (%d)",
hdr->packet_type, h5.link_state);
goto next;
}
// If we've made it this far, the packet is good enough to ACK
if (hdr->is_reliable) {
LOG_DBG("Header: seq %d, ack %d, rel: %s, has_crc16: %s", hdr->seq, hdr->ack,
hdr->is_reliable ? "true" : "false",
hdr->crc_after_payload ? "true" : "false");
if (hdr->seq != h5.next_expected_seq_from_host) {
LOG_ERR("Seq expected %u got %u. Drop packet", h5.next_expected_seq_from_host, hdr->seq);
goto next;
}
k_delayed_work_submit(&ack_work, H5_ACK_TIMEOUT);
h5.next_expected_seq_from_host = next_seq(hdr->seq);
}
switch (hdr->packet_type) {
case HCI_3WIRE_ACK_PKT:
// No further action required
break;
case HCI_3WIRE_LINK_PKT:
net_buf_put(&h5.host_queue, rx_buf);
net_buf_put(&h5.host_queue, buf);
break;
case HCI_COMMAND_PKT:
case HCI_ACLDATA_PKT:
case HCI_SCODATA_PKT:
//LOG_DBG("Adding to controller queue\n");
net_buf_put(&h5.controller_queue, rx_buf);
net_buf_put(&h5.controller_queue, buf);
break;
default:
LOG_WRN("Unknown packet type %u\n", hdr.packet_type);
LOG_WRN("Unknown packet type %u\n", hdr->packet_type);
break;
}
next:
next:
net_buf_unref(buf);
}
}
@ -616,10 +357,7 @@ next:
static void h5_send_sync(void);
static void h5_send_config(void);
static void tx_thread(void)
{
static void tx_thread(void) {
k_thread_name_set(NULL, "tx_thread");
while (true) {
@ -640,17 +378,6 @@ static void tx_thread(void)
while ((buf = net_buf_get(&h5.controller_queue, K_FOREVER))) {
bt_send(buf);
}
//LOG_DBG("controller_queue empty");
/* buf is dequeued from tx_queue and queued to unack
* queue.
*/
//net_buf_put(&h5.unack_queue, buf);
//unack_queue_len++;
//k_delayed_work_submit(&retx_work, H5_TX_ACK_TIMEOUT);
break;
}
}
@ -668,37 +395,27 @@ static void event_thread(void) {
}
}
static void h5_init(void)
{
static void h5_init(void) {
LOG_DBG("h5_init");
h5.link_state = UNINIT;
h5.rx_state = START;
h5.tx_win = 4;
/* TX thread - handles period h5_link messages and forwards commands to controller */
k_fifo_init(&h5.controller_queue);
k_thread_create(&tx_thread_data, tx_stack,
K_THREAD_STACK_SIZEOF(tx_stack),
(k_thread_entry_t)tx_thread, NULL, NULL, NULL,
(k_thread_entry_t) tx_thread, NULL, NULL, NULL,
K_PRIO_PREEMPT(1),
0, K_NO_WAIT);
k_fifo_init(&h5.host_queue);
/*k_thread_create(&rx_thread_data, rx_stack,
K_THREAD_STACK_SIZEOF(rx_stack),
(k_thread_entry_t)rx_thread, NULL, NULL, NULL,
K_PRIO_COOP(CONFIG_BT_RX_PRIO),
0, K_NO_WAIT);*/
/* Unack queue */
k_fifo_init(&h5.unack_queue);
/* Thread & queue to un-slip and un-h5 incoming packets */
k_fifo_init(&h5.unprocessed_queue);
k_thread_create(&unproc_thread_data, unproc_stack,
K_THREAD_STACK_SIZEOF(unproc_stack),
(k_thread_entry_t)unproc_thread, NULL, NULL, NULL,
(k_thread_entry_t) unproc_thread, NULL, NULL, NULL,
K_PRIO_PREEMPT(1),
0, K_NO_WAIT);
@ -706,18 +423,16 @@ static void h5_init(void)
k_fifo_init(&h5.event_queue);
k_thread_create(&event_thread_data, event_stack,
K_THREAD_STACK_SIZEOF(event_stack),
(k_thread_entry_t)event_thread, NULL, NULL, NULL,
(k_thread_entry_t) event_thread, NULL, NULL, NULL,
K_PRIO_PREEMPT(2),
0, K_NO_WAIT);
/* Init delayed work */
k_delayed_work_init(&ack_work, ack_timeout);
//k_delayed_work_init(&retx_work, retx_timeout);
}
static int h5_open(struct device *unused)
{
static int h5_open(struct device *unused) {
LOG_DBG("h5_open");
hci_uart_dev = device_get_binding(CONFIG_BT_CTLR_TO_HOST_UART_DEV_NAME);
@ -729,7 +444,7 @@ static int h5_open(struct device *unused)
uart_irq_tx_disable(hci_uart_dev);
/* Enable HFXO so that UART clock is stable for 115200 bitrate */
*(uint32_t *)0x40000000 = 0x1;
*(uint32_t *) 0x40000000 = 0x1;
bt_uart_drain(hci_uart_dev);
@ -759,7 +474,7 @@ enum h5_pkt_types {
H5_CONFIG_RESPONSE,
};
bool _link_ctrl_memcmp(struct net_buf const * buf, u8_t const * const ref) {
bool _link_ctrl_memcmp(struct net_buf const *buf, u8_t const *const ref) {
return !memcmp(buf->data, ref, 2);
}
@ -779,12 +494,12 @@ static bool packet_is_config_response(struct net_buf const *buf) {
return _link_ctrl_memcmp(buf, conf_rsp);
}
static void _send_link_control(u8_t const * const buf, u8_t len) {
static void _send_link_control(u8_t const *const buf, u8_t len) {
h5_send(buf, HCI_3WIRE_LINK_PKT, len);
}
int packet_get_type(struct net_buf const * buf) {
if (packet_is_sync(buf)){
int packet_get_type(struct net_buf const *buf) {
if (packet_is_sync(buf)) {
return H5_SYNC;
} else if (packet_is_sync_response(buf)) {
return H5_SYNC_RESPONSE;
@ -846,7 +561,7 @@ void gpio_init() {
}
static linkstate_t do_uninit(struct net_buf const *buf) {
static h5_linkstate_t do_uninit(struct net_buf const *buf) {
switch (packet_get_type(buf)) {
case H5_SYNC:
LOG_DBG("RX'd SYNC: UNINIT -> UNINIT");
@ -864,7 +579,7 @@ static linkstate_t do_uninit(struct net_buf const *buf) {
}
}
static linkstate_t do_init(struct net_buf const *buf) {
static h5_linkstate_t do_init(struct net_buf const *buf) {
switch (packet_get_type(buf)) {
case H5_SYNC:
LOG_DBG("RX'd SYNC: INIT -> INIT");
@ -878,7 +593,6 @@ static linkstate_t do_init(struct net_buf const *buf) {
h5.tx_win = conf_rsp[2] & 0x7;
h5.next_expected_seq_from_host = 0;
h5.local_seq = 0;
h5.initializing = true;
LOG_DBG("Finished H5 configuration, tx_win %u: INIT -> ACTIVE", h5.tx_win);
return ACTIVE;
default:
@ -887,7 +601,7 @@ static linkstate_t do_init(struct net_buf const *buf) {
}
}
static linkstate_t do_active(struct net_buf *buf) {
static h5_linkstate_t do_active(struct net_buf *buf) {
switch (packet_get_type(buf)) {
case H5_SYNC:
LOG_WRN("SYNC in ACTIVE state, peer reset: ACTIVE -> UNINIT");
@ -927,8 +641,7 @@ void state_change(int old, int new) {
}
}
void main(void)
{
void main(void) {
LOG_DBG("Start");
gpio_init();