1
0
Fork 0

Reworks seq/ack system and adds additional RX buffers

Also adds logging for too-large packets and removes the non-functional retransmit functions
This commit is contained in:
Shawn Nock 2019-08-02 15:10:57 -04:00
parent 52edd92f00
commit 2366c8bcef
2 changed files with 35 additions and 41 deletions

View File

@ -9,7 +9,7 @@ CONFIG_BT_MAX_CONN=16
CONFIG_BT_TINYCRYPT_ECC=n CONFIG_BT_TINYCRYPT_ECC=n
CONFIG_BT_CTLR_DTM_HCI=y CONFIG_BT_CTLR_DTM_HCI=y
CONFIG_BT_CTLR_ASSERT_HANDLER=y CONFIG_BT_CTLR_ASSERT_HANDLER=y
CONFIG_BT_RX_BUF_COUNT=10 CONFIG_BT_RX_BUF_COUNT=20
CONFIG_INIT_STACKS=y CONFIG_INIT_STACKS=y
CONFIG_THREAD_STACK_INFO=y CONFIG_THREAD_STACK_INFO=y

View File

@ -69,7 +69,7 @@ static K_THREAD_STACK_DEFINE(event_stack, 4096);
static struct k_thread event_thread_data; static struct k_thread event_thread_data;
static struct k_delayed_work ack_work; static struct k_delayed_work ack_work;
static struct k_delayed_work retx_work; //static struct k_delayed_work retx_work;
#define HCI_3WIRE_ACK_PKT 0x00 #define HCI_3WIRE_ACK_PKT 0x00
#define HCI_COMMAND_PKT 0x01 #define HCI_COMMAND_PKT 0x01
@ -85,7 +85,7 @@ static bool reliable_packet(u8_t type)
case HCI_COMMAND_PKT: case HCI_COMMAND_PKT:
case HCI_ACLDATA_PKT: case HCI_ACLDATA_PKT:
case HCI_EVENT_PKT: case HCI_EVENT_PKT:
return true; return true;
default: default:
return false; return false;
} }
@ -122,17 +122,15 @@ typedef enum {
static struct h5 { static struct h5 {
//struct net_buf *rx_buf; //struct net_buf *rx_buf;
struct k_fifo controller_queue; struct k_fifo controller_queue;
struct k_fifo host_queue; struct k_fifo host_queue;
struct k_fifo unack_queue; struct k_fifo unack_queue;
struct k_fifo unprocessed_queue; struct k_fifo unprocessed_queue;
struct k_fifo event_queue; struct k_fifo event_queue;
u8_t tx_win; u8_t tx_win;
u8_t tx_ack; u8_t last_seen_seq_from_host;
u8_t tx_seq; u8_t local_seq;
u8_t rx_ack;
linkstate_t link_state; linkstate_t link_state;
@ -144,7 +142,7 @@ static struct h5 {
} rx_state; } rx_state;
} h5; } h5;
static u8_t unack_queue_len; //static u8_t unack_queue_len;
#define MAX_PACKETS_IN_FLIGHT (0x01) #define MAX_PACKETS_IN_FLIGHT (0x01)
@ -272,18 +270,22 @@ void h5_hdr_update_checksum(h5_hdr_t *hdr) {
u8_hdr[3] = ~((u8_hdr[0] + u8_hdr[1] + u8_hdr[2]) & 0xff); u8_hdr[3] = ~((u8_hdr[0] + u8_hdr[1] + u8_hdr[2]) & 0xff);
} }
static u8_t next_seq(u8_t i) {
return (i + 1) % 8;
}
void h5_send(const u8_t *payload, u8_t type, int len) void h5_send(const u8_t *payload, u8_t type, int len)
{ {
h5_hdr_t hdr = {0}; h5_hdr_t hdr = {0};
/* Set ACK for outgoing packet and stop delayed work */ /* Set ACK for outgoing packet and stop delayed work */
hdr.ack = h5.tx_ack; hdr.ack = h5.last_seen_seq_from_host;
k_delayed_work_cancel(&ack_work); k_delayed_work_cancel(&ack_work);
if (reliable_packet(type)) { if (reliable_packet(type)) {
hdr.is_reliable = true; hdr.is_reliable = true;
hdr.seq = h5.tx_seq; hdr.seq = h5.local_seq;
h5.tx_seq = (h5.tx_seq + 1) % 8; h5.local_seq = next_seq(h5.local_seq);
} }
hdr.packet_type = type; hdr.packet_type = type;
@ -306,7 +308,7 @@ void h5_send(const u8_t *payload, u8_t type, int len)
} }
/* Delayed work taking care about retransmitting packets */ /* Delayed work taking care about retransmitting packets */
static void retx_timeout(struct k_work *work) /*static void retx_timeout(struct k_work *work)
{ {
ARG_UNUSED(work); ARG_UNUSED(work);
@ -318,37 +320,29 @@ static void retx_timeout(struct k_work *work)
k_fifo_init(&tmp_queue); k_fifo_init(&tmp_queue);
/* Queue to temperary queue */ *//* Queue to temperary queue *//*
while ((buf = net_buf_get(&h5.host_queue, K_NO_WAIT))) { while ((buf = net_buf_get(&h5.host_queue, K_NO_WAIT))) {
net_buf_put(&tmp_queue, buf); net_buf_put(&tmp_queue, buf);
} }
/* Queue unack packets to the beginning of the queue */ *//* Queue unack packets to the beginning of the queue *//*
while ((buf = net_buf_get(&h5.unack_queue, K_NO_WAIT))) { while ((buf = net_buf_get(&h5.unack_queue, K_NO_WAIT))) {
/* include also packet type */ *//* include also packet type *//*
net_buf_put(&h5.host_queue, buf); net_buf_put(&h5.host_queue, buf);
h5.tx_seq = (h5.tx_seq - 1) & 0x07; h5.tx_ = next_seq(h5.tx_seq);
unack_queue_len--; unack_queue_len--;
} }
/* Queue saved packets from temp queue */ *//* Queue saved packets from temp queue *//*
while ((buf = net_buf_get(&tmp_queue, K_NO_WAIT))) { while ((buf = net_buf_get(&tmp_queue, K_NO_WAIT))) {
net_buf_put(&h5.host_queue, buf); net_buf_put(&h5.host_queue, buf);
} }
} }
} }*/
static void ack_timeout(struct k_work *work) static void ack_timeout(struct k_work *work) {
{
ARG_UNUSED(work); ARG_UNUSED(work);
LOG_DBG("");
h5_send(NULL, HCI_3WIRE_ACK_PKT, 0); h5_send(NULL, HCI_3WIRE_ACK_PKT, 0);
/* Analyze stacks */
//STACK_ANALYZE("tx_stack", tx_stack);
//STACK_ANALYZE("rx_stack", rx_stack);
} }
bool h5_hdr_checksum_good(h5_hdr_t *hdr) { bool h5_hdr_checksum_good(h5_hdr_t *hdr) {
@ -460,6 +454,7 @@ void bt_uart_isr(struct device *unused)
* unsync'ing SLIP will drop the rest of the packet and the net_buf_reset * unsync'ing SLIP will drop the rest of the packet and the net_buf_reset
* prepares the buffer for reuse without unref/alloc cycle. * prepares the buffer for reuse without unref/alloc cycle.
*/ */
LOG_WRN("H5 packet exceeds buffer size, dropping");
net_buf_reset(buf); net_buf_reset(buf);
slip_state = SLIP_NOSYNC; slip_state = SLIP_NOSYNC;
} }
@ -571,7 +566,8 @@ static void unproc_thread(void) {
bt_buf_set_type(rx_buf, BT_BUF_CMD); bt_buf_set_type(rx_buf, BT_BUF_CMD);
break; break;
case HCI_3WIRE_ACK_PKT: case HCI_3WIRE_ACK_PKT:
h5.rx_ack = hdr.ack; // h5.rx_ack = hdr.ack;
// We currently don't retransmit, so drop ack packets
goto next; goto next;
case HCI_3WIRE_LINK_PKT: case HCI_3WIRE_LINK_PKT:
rx_buf = net_buf_alloc(&h5_pool, K_NO_WAIT); rx_buf = net_buf_alloc(&h5_pool, K_NO_WAIT);
@ -592,16 +588,14 @@ static void unproc_thread(void) {
goto next; goto next;
} }
if (hdr.is_reliable && (hdr.seq != h5.tx_ack)) { u8_t expected_seq = next_seq(h5.last_seen_seq_from_host);
LOG_ERR("Seq expected %u got %u. Drop packet", h5.tx_ack, hdr.seq); if (hdr.is_reliable && hdr.seq != expected_seq) {
LOG_ERR("Seq expected %u got %u. Drop packet", expected_seq, hdr.seq);
goto next; goto next;
} }
h5.last_seen_seq_from_host = hdr.seq;
h5.rx_ack = hdr.ack;
if (hdr.is_reliable) { if (hdr.is_reliable) {
/* For reliable packet increment next transmit ack number */
h5.tx_ack = (h5.tx_ack + 1) % 8;
/* Submit delayed work to ack the packet */ /* Submit delayed work to ack the packet */
k_delayed_work_submit(&ack_work, H5_RX_ACK_TIMEOUT); k_delayed_work_submit(&ack_work, H5_RX_ACK_TIMEOUT);
} }
@ -675,7 +669,7 @@ static void event_thread(void) {
while (true) { while (true) {
buf = net_buf_get(&h5.event_queue, K_MSEC(1000)); buf = net_buf_get(&h5.event_queue, K_MSEC(1000));
if (!buf) { if (!buf) {
LOG_WRN("host_queue is empty"); LOG_WRN("event_queue is empty");
continue; continue;
} }
net_buf_put(&h5.host_queue, buf); net_buf_put(&h5.host_queue, buf);
@ -727,7 +721,7 @@ static void h5_init(void)
/* Init delayed work */ /* Init delayed work */
k_delayed_work_init(&ack_work, ack_timeout); k_delayed_work_init(&ack_work, ack_timeout);
k_delayed_work_init(&retx_work, retx_timeout); //k_delayed_work_init(&retx_work, retx_timeout);
} }
static int h5_open(struct device *unused) static int h5_open(struct device *unused)
@ -890,8 +884,8 @@ static linkstate_t do_init(struct net_buf const *buf) {
return INIT; return INIT;
case H5_CONFIG_RESPONSE: case H5_CONFIG_RESPONSE:
h5.tx_win = conf_rsp[2] & 0x7; h5.tx_win = conf_rsp[2] & 0x7;
h5.tx_seq = 0; h5.last_seen_seq_from_host = 7;
h5.tx_ack = 0; h5.local_seq = 0;
LOG_DBG("Finished H5 configuration, tx_win %u: INIT -> ACTIVE", h5.tx_win); LOG_DBG("Finished H5 configuration, tx_win %u: INIT -> ACTIVE", h5.tx_win);
return ACTIVE; return ACTIVE;
default: default: