summaryrefslogtreecommitdiff
path: root/gm_platform
diff options
context:
space:
mode:
Diffstat (limited to 'gm_platform')
-rw-r--r--gm_platform/fw/serial.c136
-rw-r--r--gm_platform/fw/serial.h10
2 files changed, 64 insertions, 82 deletions
diff --git a/gm_platform/fw/serial.c b/gm_platform/fw/serial.c
index 5a6009c..25b86fc 100644
--- a/gm_platform/fw/serial.c
+++ b/gm_platform/fw/serial.c
@@ -43,18 +43,23 @@ static void usart_schedule_dma(void);
static int usart_putc_nonblocking(uint8_t c);
-void usart_dma_init() {
+void usart_dma_reset() {
usart_tx_buf.xfr_start = -1;
usart_tx_buf.xfr_end = 0;
usart_tx_buf.wr_pos = 0;
usart_tx_buf.wr_idx = 0;
- usart_tx_buf.cur_packet = -1;
- usart_tx_buf.retransmit_rq = 0;
- usart_tx_buf.wraparound = 0;
+ usart_tx_buf.xfr_next = 0;
+ usart_tx_buf.wraparound = false;
+ usart_tx_buf.ack = false;
+
for (size_t i=0; i<ARRAY_LEN(usart_tx_buf.packet_end); i++)
usart_tx_buf.packet_end[i] = -1;
cobs_decode_incremental_initialize(&cobs_state);
+}
+
+void usart_dma_init() {
+ usart_dma_reset();
/* Configure DMA 1 Channel 2 to handle uart transmission */
DMA1_Channel2->CPAR = (uint32_t)&(USART1->TDR);
@@ -74,7 +79,7 @@ void usart_dma_init() {
/* triggered on transfer completion. We use this to process the ADC data */
NVIC_EnableIRQ(DMA1_Channel2_3_IRQn);
- NVIC_SetPriority(DMA1_Channel2_3_IRQn, 1<<5);
+ NVIC_SetPriority(DMA1_Channel2_3_IRQn, 2<<5);
USART1->CR1 = /* 8-bit -> M1, M0 clear */
/* OVER8 clear. Use default 16x oversampling */
@@ -100,7 +105,7 @@ void usart_dma_init() {
/* Enable receive interrupt */
NVIC_EnableIRQ(USART1_IRQn);
- NVIC_SetPriority(USART1_IRQn, 3<<5);
+ NVIC_SetPriority(USART1_IRQn, 1<<5);
/* And... go! */
USART1->CR1 |= USART_CR1_UE;
@@ -137,17 +142,11 @@ void USART1_IRQHandler() {
switch (pkt->type) {
case CTRL_PKT_RESET:
- for (size_t i=0; i<ARRAY_LEN(usart_tx_buf.packet_end); i++)
- usart_tx_buf.packet_end[i] = -1;
+ usart_dma_reset();
break;
case CTRL_PKT_ACK:
- if (usart_ack_packet(pkt->orig_id))
- rx_protocol_errors++;
- break;
-
- case CTRL_PKT_RETRANSMIT:
- usart_tx_buf.retransmit_rq = 1;
+ usart_tx_buf.ack = true;
if (!(DMA1_Channel2->CCR & DMA_CCR_EN))
usart_schedule_dma();
break;
@@ -161,98 +160,81 @@ void USART1_IRQHandler() {
void usart_schedule_dma() {
- /* This function is only called when the DMA channel is disabled. This means we don't have to guard it in IRQ
- * disables. */
volatile struct dma_tx_buf *buf = &usart_tx_buf;
- ssize_t next_start, next_idx;
+ ssize_t xfr_start, xfr_end, xfr_len;
if (buf->wraparound) {
- buf->wraparound = 0;
- next_idx = buf->cur_packet;
- next_start = 0;
+ buf->wraparound = false;
+ xfr_start = 0;
+ xfr_len = buf->xfr_end;
+ xfr_end = buf->xfr_end;
- } else if (buf->retransmit_rq) {
- buf->retransmit_rq = 0;
- next_idx = buf->cur_packet;
- next_start = buf->xfr_start;
+ } else if (buf->ack) {
+ if (buf->packet_end[buf->xfr_next] == -1)
+ return; /* Nothing to trasnmit */
- } else {
- next_idx = (buf->cur_packet + 1) % ARRAY_LEN(usart_tx_buf.packet_end);
- next_start = buf->xfr_end;
- }
+ buf->ack = false;
- ssize_t next_end = buf->packet_end[next_idx];
+ xfr_start = buf->xfr_end;
+ xfr_end = buf->packet_end[buf->xfr_next];
+ buf->packet_end[buf->xfr_next] = -1;
+ buf->xfr_next = (buf->xfr_next + 1) % ARRAY_LEN(buf->packet_end);
- /* Nothing to trasnmit */
- if (next_end == -1)
- return;
+ if (xfr_end > xfr_start) { /* no wraparound */
+ xfr_len = xfr_end - xfr_start;
- ssize_t xfr_len;
- if (next_end > next_start) /* no wraparound */
- xfr_len = next_end - next_start;
- else /* wraparound */
- xfr_len = sizeof(buf->data) - next_start; /* schedule transfer until end of buffer */
+ } else { /* wraparound */
+ if (xfr_end != 0)
+ buf->wraparound = true;
+ xfr_len = sizeof(buf->data) - xfr_start;
+ }
- buf->xfr_start = next_start;
- buf->xfr_end = (next_start + xfr_len) % sizeof(buf->data); /* handle wraparound */
- buf->cur_packet = next_idx;
+ } else {
+ /* retransmit */
+ /* First, send a zero to delimit any garbage from the following good packet */
+ USART1->TDR = 0x00;
- /* initiate transmission of new buffer */
- DMA1_Channel2->CMAR = (uint32_t)(buf->data + next_start);
- DMA1_Channel2->CNDTR = xfr_len;
- DMA1_Channel2->CCR |= DMA_CCR_EN;
-}
+ xfr_start = buf->xfr_start;
+ xfr_end = buf->xfr_end;
-int usart_ack_packet(uint8_t idx) {
- if (idx > ARRAY_LEN(usart_tx_buf.packet_end))
- return -EINVAL;
+ if (xfr_end > xfr_start) { /* no wraparound */
+ xfr_len = xfr_end - xfr_start;
- if (idx != usart_tx_buf.cur_packet)
- return -EINVAL;
+ } else { /* wraparound */
+ if (xfr_end != 0)
+ buf->wraparound = true;
+ xfr_len = sizeof(buf->data) - xfr_start;
+ }
- usart_tx_buf.packet_end[idx] = -1;
+ }
- /* If the DMA stream is idle right now, schedule the next transfer */
- if (!(DMA1_Channel2->CCR & DMA_CCR_EN))
- usart_schedule_dma();
- return 0;
+ buf->xfr_start = xfr_start;
+ buf->xfr_end = xfr_end;
+
+ /* initiate transmission of new buffer */
+ DMA1_Channel2->CMAR = (uint32_t)(buf->data + xfr_start);
+ DMA1_Channel2->CNDTR = xfr_len;
+ DMA1_Channel2->CCR |= DMA_CCR_EN;
}
-int usart_dma_fifo_push(volatile struct dma_tx_buf *buf, uint8_t c) {
- /* This function must be guarded by IRQ disable since the IRQ may schedule a new transfer and charge pos/start. */
- NVIC_DisableIRQ(DMA1_Channel2_3_IRQn);
+int usart_putc_nonblocking(uint8_t c) {
+ volatile struct dma_tx_buf *buf = &usart_tx_buf;
- if (buf->wr_pos == buf->xfr_start) {
- NVIC_EnableIRQ(DMA1_Channel2_3_IRQn);
+ if (buf->wr_pos == buf->xfr_start)
return -EBUSY;
- }
buf->data[buf->wr_pos] = c;
buf->wr_pos = (buf->wr_pos + 1) % sizeof(buf->data);
-
- NVIC_EnableIRQ(DMA1_Channel2_3_IRQn);
return 0;
}
-int usart_putc(uint8_t c) {
- /* push char to fifo, busy-loop if stalled to wait for USART to empty fifo via DMA */
- while (usart_dma_fifo_push(&usart_tx_buf, c) == -EBUSY) {
- /* idle */
- }
- return 0;
-}
-
-int usart_putc_nonblocking(uint8_t c) {
- return usart_dma_fifo_push(&usart_tx_buf, c);
-}
-
void DMA1_Channel2_3_IRQHandler(void) {
/* Transfer complete */
DMA1->IFCR |= DMA_IFCR_CTCIF2;
DMA1_Channel2->CCR &= ~DMA_CCR_EN;
- if (usart_tx_buf.retransmit_rq || usart_tx_buf.wraparound)
+ if (usart_tx_buf.wraparound)
usart_schedule_dma();
}
@@ -266,7 +248,7 @@ int usart_send_packet_nonblocking(struct ll_pkt *pkt, size_t pkt_len) {
}
pkt->pid = usart_tx_buf.wr_idx;
- pkt->_pad = 0;
+ pkt->_pad = usart_tx_buf.xfr_next;
/* make the value this wonky-ass CRC implementation produces match zlib etc. */
CRC->CR = CRC_CR_REV_OUT | (1<<CRC_CR_REV_IN_Pos) | CRC_CR_RESET;
diff --git a/gm_platform/fw/serial.h b/gm_platform/fw/serial.h
index 61ac4eb..8cec089 100644
--- a/gm_platform/fw/serial.h
+++ b/gm_platform/fw/serial.h
@@ -26,6 +26,7 @@
#include <stdint.h>
#include <stdarg.h>
#include <errno.h>
+#include <stdbool.h>
#include "global.h"
@@ -33,9 +34,10 @@ struct dma_tx_buf {
/* The following fields are accessed only from DMA ISR */
ssize_t xfr_start; /* Start index of running DMA transfer */
ssize_t xfr_end; /* End index of running DMA transfer plus one */
- ssize_t cur_packet;
- int retransmit_rq;
- int wraparound;
+ bool wraparound;
+ ssize_t xfr_next;
+ bool ack;
+
/* The following fields are written only from non-interrupt code */
ssize_t wr_pos; /* Next index to be written */
@@ -57,7 +59,6 @@ struct __attribute__((__packed__)) ll_pkt {
enum ctrl_pkt_type {
CTRL_PKT_RESET = 1,
CTRL_PKT_ACK = 2,
- CTRL_PKT_RETRANSMIT = 3,
};
struct __attribute__((__packed__)) ctrl_pkt {
@@ -68,7 +69,6 @@ struct __attribute__((__packed__)) ctrl_pkt {
extern volatile struct dma_tx_buf usart_tx_buf;
void usart_dma_init(void);
-int usart_dma_fifo_push(volatile struct dma_tx_buf *buf, uint8_t c);
int usart_send_packet_nonblocking(struct ll_pkt *pkt, size_t pkt_len);
int usart_ack_packet(uint8_t idx);