summaryrefslogtreecommitdiff
path: root/gm_platform/fw/serial.c
diff options
context:
space:
mode:
authorjaseg <git@jaseg.net>2020-01-27 21:58:22 +0100
committerjaseg <git@jaseg.net>2020-01-27 21:58:22 +0100
commitf4a6ea896f711f68e898e69e21d74118a44465f2 (patch)
tree48a8b14c2c452333cbc8fb9595d20ddad5c5348f /gm_platform/fw/serial.c
parent966f104597275b29f41c06c4574d1bbe9ecde401 (diff)
downloadmaster-thesis-f4a6ea896f711f68e898e69e21d74118a44465f2.tar.gz
master-thesis-f4a6ea896f711f68e898e69e21d74118a44465f2.tar.bz2
master-thesis-f4a6ea896f711f68e898e69e21d74118a44465f2.zip
serial basically working
Diffstat (limited to 'gm_platform/fw/serial.c')
-rw-r--r--gm_platform/fw/serial.c181
1 files changed, 152 insertions, 29 deletions
diff --git a/gm_platform/fw/serial.c b/gm_platform/fw/serial.c
index 468c523..178d6f9 100644
--- a/gm_platform/fw/serial.c
+++ b/gm_platform/fw/serial.c
@@ -31,14 +31,27 @@
volatile struct dma_tx_buf usart_tx_buf;
+static uint32_t tx_overruns=0, rx_overruns=0;
+static uint32_t rx_framing_errors=0, rx_protocol_errors=0;
+
+static struct cobs_decode_state cobs_state;
+
+static volatile uint8_t rx_buf[32];
+
+
static void usart_schedule_dma(void);
-int usart_putc_nonblocking(char c);
-int usart_putc(char c);
+static int usart_putc_nonblocking(uint8_t c);
+static int usart_retransmit_packet(uint8_t idx);
+
void usart_dma_init() {
- usart_tx_buf.xfr_start = -1,
- usart_tx_buf.xfr_end = 0,
- usart_tx_buf.wr_pos = 0,
+ usart_tx_buf.xfr_start = -1;
+ usart_tx_buf.xfr_end = 0;
+ usart_tx_buf.wr_pos = 0;
+ for (size_t i=0; i<ARRAY_LEN(usart_tx_buf.packet_start); i++)
+ usart_tx_buf.packet_start[i] = -1;
+
+ cobs_decode_incremental_initialize(&cobs_state);
/* Configure DMA 1 Channel 2 to handle uart transmission */
DMA1_Channel2->CPAR = (uint32_t)&(USART1->TDR);
@@ -74,7 +87,7 @@ void usart_dma_init() {
//USART1->BRR = 417;
//USART1->BRR = 48; /* 1MBd */
- USART1->BRR = 96; /* 500kBd */
+ //USART1->BRR = 96; /* 500kBd */
USART1->BRR = 192; /* 250kBd */
//USART1->BRR = 208; /* 230400 */
@@ -83,19 +96,72 @@ void usart_dma_init() {
USART1->CR3 |= USART_CR3_DMAT; /* TX DMA enable */
/* Enable receive interrupt */
- //NVIC_EnableIRQ(USART1_IRQn);
- //NVIC_SetPriority(USART1_IRQn, 1);
+ NVIC_EnableIRQ(USART1_IRQn);
+ NVIC_SetPriority(USART1_IRQn, 3<<5);
/* And... go! */
USART1->CR1 |= USART_CR1_UE;
}
+void USART1_IRQHandler() {
+ uint32_t isr = USART1->ISR;
+
+ if (isr & USART_ISR_ORE) {
+ USART1->ICR = USART_ICR_ORECF;
+ rx_overruns++;
+ return;
+ }
+
+ if (isr & USART_ISR_RXNE) {
+ uint8_t c = USART1->RDR;
+
+ int rc = cobs_decode_incremental(&cobs_state, (char *)rx_buf, sizeof(rx_buf), c);
+ if (rc == 0) /* packet still incomplete */
+ return;
+
+ if (rc < 0) {
+ rx_framing_errors++;
+ return;
+ }
+
+ /* A complete frame received */
+ if (rc != 2) {
+ rx_protocol_errors++;
+ return;
+ }
+
+ volatile struct ctrl_pkt *pkt = (volatile struct ctrl_pkt *)rx_buf;
+
+ switch (pkt->type) {
+ case CTRL_PKT_RESET:
+ for (size_t i=0; i<ARRAY_LEN(usart_tx_buf.packet_start); i++)
+ usart_tx_buf.packet_start[i] = -1;
+ break;
+
+ case CTRL_PKT_ACK:
+ if (usart_ack_packet(pkt->orig_id))
+ rx_protocol_errors++;
+ break;
+
+ case CTRL_PKT_RETRANSMIT:
+ if (usart_retransmit_packet(pkt->orig_id))
+ rx_protocol_errors++;
+ break;
+
+ default:
+ rx_protocol_errors++;
+ }
+ return;
+ }
+}
+
+
void usart_schedule_dma() {
/* This function is only called when the DMA channel is disabled. This means we don't have to guard it in IRQ
* disables. */
volatile struct dma_tx_buf *buf = &usart_tx_buf;
- size_t xfr_len, xfr_start = buf->xfr_end;
+ ssize_t xfr_len, xfr_start = buf->xfr_end;
if (buf->wr_pos > xfr_start) /* no wraparound */
xfr_len = buf->wr_pos - xfr_start;
else /* wraparound */
@@ -110,15 +176,28 @@ void usart_schedule_dma() {
DMA1_Channel2->CCR |= DMA_CCR_EN;
}
-int usart_dma_fifo_push(volatile struct dma_tx_buf *buf, char c) {
+int usart_ack_packet(uint8_t idx) {
+ if (idx > ARRAY_LEN(usart_tx_buf.packet_start))
+ return -EINVAL;
+
+ usart_tx_buf.packet_start[idx] = -1;
+ return 0;
+}
+
+int usart_dma_fifo_push(volatile struct dma_tx_buf *buf, uint8_t c) {
/* This function must be guarded by IRQ disable since the IRQ may schedule a new transfer and charge pos/start. */
NVIC_DisableIRQ(DMA1_Channel2_3_IRQn);
- if (buf->wr_pos == buf->xfr_start) {
- NVIC_EnableIRQ(DMA1_Channel2_3_IRQn);
- return -EBUSY;
+ /* If the write pointer hit any unacknowledged packet start position we can't advance it.
+ * Packet start positions are unordered and we have to scan here. */
+ for (size_t i=0; i<ARRAY_LEN(buf->packet_start); i++) {
+ if (buf->wr_pos == buf->packet_start[i]) {
+ NVIC_EnableIRQ(DMA1_Channel2_3_IRQn);
+ return -EBUSY;
+ }
}
+ /* write byte, then increment to avoid racing the DMA ISR reading wr_pos */
buf->data[buf->wr_pos] = c;
buf->wr_pos = (buf->wr_pos + 1) % sizeof(buf->data);
@@ -126,7 +205,7 @@ int usart_dma_fifo_push(volatile struct dma_tx_buf *buf, char c) {
return 0;
}
-int usart_putc(char c) {
+int usart_putc(uint8_t c) {
/* push char to fifo, busy-loop if stalled to wait for USART to empty fifo via DMA */
while (usart_dma_fifo_push(&usart_tx_buf, c) == -EBUSY) {
/* idle */
@@ -134,7 +213,7 @@ int usart_putc(char c) {
return 0;
}
-int usart_putc_nonblocking(char c) {
+int usart_putc_nonblocking(uint8_t c) {
return usart_dma_fifo_push(&usart_tx_buf, c);
}
@@ -148,26 +227,70 @@ void DMA1_Channel2_3_IRQHandler(void) {
usart_schedule_dma();
}
-void usart_send_packet(const uint8_t *data, size_t len) {
- /* ignore return value as putf is blocking and always succeeds */
- (void)cobs_encode_usart(usart_putc, (char *)data, len);
+int usart_retransmit_packet(uint8_t idx) {
+ /* Disable ADC DMA IRQ to prevent write races */
+ NVIC_DisableIRQ(DMA1_Channel1_IRQn);
- /* If the DMA stream is idle right now, schedule a transfer */
- if (!(DMA1_Channel2->CCR & DMA_CCR_EN))
- usart_schedule_dma();
+ ssize_t i = usart_tx_buf.packet_start[idx];
+ ssize_t start = i;
+
+ /* Copy packet */
+ uint8_t c;
+ while ((c = usart_tx_buf.data[i++])) {
+ if (usart_putc_nonblocking(c)) {
+ tx_overruns++;
+ return -EBUSY;
+ }
+ }
+
+ /* Terminating null byte */
+ if (usart_putc_nonblocking(0)) {
+ tx_overruns++;
+ return -EBUSY;
+ }
+
+ /* Update start index */
+ usart_tx_buf.packet_start[idx] = start;
+
+ NVIC_EnableIRQ(DMA1_Channel1_IRQn);
+ return 0;
}
-int usart_send_packet_nonblocking(const uint8_t *data, size_t len) {
- /* ignore return value as putf is blocking and always succeeds */
- /* FIXME DEBUG */
- //int rc = cobs_encode_usart(usart_putc_nonblocking, (char *)data, len);
- //if (rc)
- // return rc;
- /* END */
+/* len is the packet length including headers */
+int usart_send_packet_nonblocking(struct ll_pkt *pkt, size_t pkt_len) {
+
+ ssize_t start = usart_tx_buf.wr_pos;
+ /* Find a free slot for this packet */
+ size_t packet_idx = 0;
+ do {
+ if (usart_tx_buf.packet_start[packet_idx] == -1)
+ goto success;
+ } while (++packet_idx <ARRAY_LEN(usart_tx_buf.packet_start));
+
+ tx_overruns++;
+ return -EBUSY;
+
+success:
+ pkt->pid = packet_idx;
+ pkt->_pad = 0;
+
+ /* make the value this wonky-ass CRC implementation produces match zlib etc. */
+ CRC->CR = CRC_CR_REV_OUT | (1<<CRC_CR_REV_IN_Pos) | CRC_CR_RESET;
+ for (size_t i=offsetof(struct ll_pkt, pid); i<pkt_len; i++)
+ CRC->DR = ((uint8_t *)pkt)[i];
+
+ pkt->crc32 = ~CRC->DR;
+ int rc = cobs_encode_usart((int (*)(char))usart_putc_nonblocking, (char *)pkt, pkt_len);
+ if (rc)
+ return rc;
+
+ /* Checkpoint packet start index to prevent overwriting before ack */
+ usart_tx_buf.packet_start[packet_idx] = start;
+ /* FIXME debug code
static uint8_t x = 0;
-
for (size_t i=0; i<351; i++)
usart_putc_nonblocking(x++);
+ */
/* If the DMA stream is idle right now, schedule a transfer */
if (!(DMA1_Channel2->CCR & DMA_CCR_EN))