proper RX DMA

This commit is contained in:
2025-11-08 21:14:56 +06:00
parent e814012f09
commit 06012c613a

View File

@@ -20,6 +20,7 @@
struct ethernetif { struct ethernetif {
ETH_DMADESCTypeDef* DMARxDescToGet; ETH_DMADESCTypeDef* DMARxDescToGet;
ETH_DMADESCTypeDef* DMARxDescToRead;
ETH_DMADESCTypeDef* DMATxDescToSet; ETH_DMADESCTypeDef* DMATxDescToSet;
}; };
@@ -29,7 +30,6 @@ __attribute__((aligned(4))) uint8_t MACRxBuf[ETH_RXBUFNB * ETH_RX_BUF_SZE];
__attribute__((aligned(4))) uint8_t MACTxBuf[ETH_TXBUFNB * ETH_TX_BUF_SZE]; __attribute__((aligned(4))) uint8_t MACTxBuf[ETH_TXBUFNB * ETH_TX_BUF_SZE];
static volatile bool g_link_interrupt_flag = false; static volatile bool g_link_interrupt_flag = false;
static volatile bool g_packet_received_flag = false;
static struct ethernetif eth_state; static struct ethernetif eth_state;
static void low_level_init(struct netif* netif); static void low_level_init(struct netif* netif);
@@ -114,8 +114,9 @@ static void low_level_init(struct netif* netif) {
// init RX descriptors // init RX descriptors
ethernetif->DMARxDescToGet = DMARxDscrTab; ethernetif->DMARxDescToGet = DMARxDscrTab;
ethernetif->DMARxDescToRead = DMARxDscrTab;
for (int i = 0; i < ETH_RXBUFNB; i++) { for (int i = 0; i < ETH_RXBUFNB; i++) {
DMARxDscrTab[i].Status = 0; DMARxDscrTab[i].Status = ETH_DMARxDesc_OWN;
DMARxDscrTab[i].Buffer1Addr = (uint32_t)&MACRxBuf[i * ETH_RX_BUF_SZE]; DMARxDscrTab[i].Buffer1Addr = (uint32_t)&MACRxBuf[i * ETH_RX_BUF_SZE];
DMARxDscrTab[i].Buffer2NextDescAddr = DMARxDscrTab[i].Buffer2NextDescAddr =
(uint32_t)&DMARxDscrTab[(i + 1) % ETH_RXBUFNB]; (uint32_t)&DMARxDscrTab[(i + 1) % ETH_RXBUFNB];
@@ -167,55 +168,42 @@ static err_t low_level_output(struct netif* netif, struct pbuf* p) {
static struct pbuf* low_level_input(struct netif* netif) { static struct pbuf* low_level_input(struct netif* netif) {
struct ethernetif* ethernetif = netif->state; struct ethernetif* ethernetif = netif->state;
struct pbuf* p = NULL;
uint16_t len = ETH10M->ERXLN; // if OWN bit is set, it's still owned by DMA and no packet rdy
if (ethernetif->DMARxDescToRead->Status & ETH_DMARxDesc_OWN) {
if (len < MIN_ETH_FRAME_SIZE || len > ETH_MAX_PACKET_SIZE) {
LINK_STATS_INC(link.lenerr);
ETH10M->ECON1 |= RB_ETH_ECON1_RXEN;
return NULL; return NULL;
} }
uint8_t* current_rx_buffer_ptr = // packet ready
(uint8_t*)ethernetif->DMARxDescToGet->Buffer1Addr; uint32_t len = (ethernetif->DMARxDescToRead->Status & ETH_DMARxDesc_FL) >> 16;
struct pbuf* p = pbuf_alloc(PBUF_RAW, len, PBUF_POOL); p = pbuf_alloc(PBUF_RAW, len, PBUF_POOL);
if (p != NULL) { if (p != NULL) {
uint8_t* buffer = (uint8_t*)ethernetif->DMARxDescToRead->Buffer1Addr;
uint32_t offset = 0; uint32_t offset = 0;
for (struct pbuf* q = p; q != NULL; q = q->next) { for (struct pbuf* q = p; q != NULL; q = q->next) {
memcpy(q->payload, current_rx_buffer_ptr + offset, q->len); memcpy(q->payload, buffer + offset, q->len);
offset += q->len; offset += q->len;
} }
LINK_STATS_INC(link.recv); LINK_STATS_INC(link.recv);
MIB2_STATS_NETIF_ADD(netif, ifinoctets, len);
} else { } else {
LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.memerr);
LINK_STATS_INC(link.drop); LINK_STATS_INC(link.drop);
MIB2_STATS_NETIF_INC(netif, ifindiscards);
} }
// move to next descriptor // give buffer back to DMA
ethernetif->DMARxDescToGet = ethernetif->DMARxDescToRead->Status = ETH_DMARxDesc_OWN;
(ETH_DMADESCTypeDef*)ethernetif->DMARxDescToGet->Buffer2NextDescAddr; // advance read pointer to the next descriptor in the ring
ETH10M->ERXST = (uint32_t)ethernetif->DMARxDescToGet->Buffer1Addr; ethernetif->DMARxDescToRead =
ETH10M->ECON1 |= RB_ETH_ECON1_RXEN; (ETH_DMADESCTypeDef*)ethernetif->DMARxDescToRead->Buffer2NextDescAddr;
return p; return p;
} }
void ethernetif_input(struct netif* netif) { void ethernetif_input(struct netif* netif) {
if (!g_packet_received_flag) { struct pbuf* p;
return; while ((p = low_level_input(netif)) != NULL) {
}
NVIC_DisableIRQ(ETH_IRQn);
if (g_packet_received_flag) {
g_packet_received_flag = false;
}
NVIC_EnableIRQ(ETH_IRQn);
struct pbuf* p = low_level_input(netif);
if (p != NULL) {
if (netif->input(p, netif) != ERR_OK) { if (netif->input(p, netif) != ERR_OK) {
pbuf_free(p); pbuf_free(p);
} }
@@ -245,10 +233,31 @@ void ethernetif_link_poll(struct netif* netif) {
void ETH_IRQHandler(void) __attribute__((interrupt)) __attribute__((used)); void ETH_IRQHandler(void) __attribute__((interrupt)) __attribute__((used));
void ETH_IRQHandler(void) { void ETH_IRQHandler(void) {
uint32_t flags = ETH10M->EIR; uint32_t flags = ETH10M->EIR;
struct ethernetif* ethernetif = &eth_state;
if (flags & RB_ETH_EIR_RXIF) { if (flags & RB_ETH_EIR_RXIF) {
g_packet_received_flag = true;
ETH10M->EIR = RB_ETH_EIR_RXIF; ETH10M->EIR = RB_ETH_EIR_RXIF;
// descriptor should be owned by DMA
if (ethernetif->DMARxDescToGet->Status & ETH_DMARxDesc_OWN) {
ETH_DMADESCTypeDef* next_desc =
(ETH_DMADESCTypeDef*)ethernetif->DMARxDescToGet->Buffer2NextDescAddr;
// if next descriptor OWN bit is 0, ring is full and we must drop
if (!(next_desc->Status & ETH_DMARxDesc_OWN)) {
LINK_STATS_INC(link.drop);
} else {
// process and re-arm
ethernetif->DMARxDescToGet->Status &= ~ETH_DMARxDesc_OWN;
// write packet len into status field for CPU
ethernetif->DMARxDescToGet->Status |=
(ETH_DMARxDesc_FS | ETH_DMARxDesc_LS | (ETH10M->ERXLN << 16));
// advance descripotor ptr
ethernetif->DMARxDescToGet = next_desc;
// re-arm receiver with new emtpy buf
ETH10M->ERXST = (uint32_t)ethernetif->DMARxDescToGet->Buffer1Addr;
}
}
} }
if (flags & RB_ETH_EIR_TXIF) { if (flags & RB_ETH_EIR_TXIF) {