rewrite ethernetif to use ch32v208_eth.hg

This commit is contained in:
2025-11-13 02:16:21 +06:00
parent fe6cc2ebb9
commit 537e7bfe10
5 changed files with 152 additions and 341 deletions

62
.vscode/settings.json vendored
View File

@@ -47,6 +47,64 @@
"limits": "c",
"tuple": "c",
"init.h": "c",
"hw_i2c.h": "c"
}
"hw_i2c.h": "c",
"chrono": "c",
"stop_token": "c",
"__locale": "c",
"stdint.h": "c",
"ch32v208_eth.h": "c",
"bit": "c",
"any": "c",
"array": "c",
"hash_map": "c",
"strstream": "c",
"charconv": "c",
"cmath": "c",
"codecvt": "c",
"complex": "c",
"concepts": "c",
"condition_variable": "c",
"coroutine": "c",
"cstddef": "c",
"unordered_map": "c",
"unordered_set": "c",
"exception": "c",
"memory": "c",
"numeric": "c",
"optional": "c",
"ratio": "c",
"string_view": "c",
"system_error": "c",
"type_traits": "c",
"algorithm": "c",
"iomanip": "c",
"mutex": "c",
"ostream": "c",
"semaphore": "c",
"shared_mutex": "c",
"span": "c",
"stacktrace": "c",
"text_encoding": "c",
"thread": "c",
"typeindex": "c",
"typeinfo": "c",
"utility": "c",
"valarray": "c",
"__assert": "c",
"__split_buffer": "c",
"ios": "c",
"map": "c",
"new": "c",
"queue": "c",
"set": "c",
"stack": "c",
"stdexcept": "c",
"__node_handle": "c",
"execution": "c",
"numbers": "c",
"print": "c",
"ha_mqtt.h": "c",
"ethernetif.h": "c"
},
"cmake.sourceDirectory": "/home/mira/src/embedded/ch32v208_sens/lwip"
}

Submodule ch32fun updated: 08885a5ea4...d38b104838

2
main.c
View File

@@ -20,7 +20,7 @@
#define HSE_STARTUP_TIMEOUT 10000
#define PLL_LOCK_TIMEOUT 10000
#define LED_TOGGLE_INTERVAL_MS 500
#define LINK_POLL_INTERVAL_MS 500
#define LINK_POLL_INTERVAL_MS 100
#define RCC_PREDIV1_OFFSET 0
#define HSE_CLOCK_MHZ 32

View File

@@ -8,66 +8,24 @@
#include "ch32v20xhw.h"
#include "lwip/etharp.h"
#include "lwip/snmp.h"
#include "systick.h"
#define CH32V208_ETH_IMPLEMENTATION
#define ETH_RX_BUF_COUNT 4
#define ETH_TX_BUF_COUNT 2
#include "ch32v208_eth.h"
#define IFNAME0 'e'
#define IFNAME1 'n'
typedef struct {
volatile uint32_t head; // producer idx: next free slot to write to
volatile uint32_t tail; // consumer idx: next slot to be txed
volatile bool is_full; // for N=1 size
} tx_queue_t;
static volatile bool g_link_changed = false;
struct ethernetif {
ETH_DMADESCTypeDef* rx_desc_head; // next desc to be filled by DMA
ETH_DMADESCTypeDef* rx_desc_tail; // next desc to be read by CPU
tx_queue_t tx_q;
};
__attribute__((aligned(4))) ETH_DMADESCTypeDef g_dma_rx_descs[ETH_RX_BUF_COUNT];
__attribute__((aligned(4))) ETH_DMADESCTypeDef g_dma_tx_descs[ETH_TX_BUF_COUNT];
__attribute__((
aligned(4))) uint8_t g_mac_rx_bufs[ETH_RX_BUF_COUNT * ETH_RX_BUF_SIZE];
__attribute__((
aligned(4))) uint8_t g_mac_tx_bufs[ETH_TX_BUF_COUNT * ETH_TX_BUF_SIZE];
static struct ethernetif g_eth_state;
static volatile bool g_link_irq_flag = false;
static inline void tx_queue_init(tx_queue_t* q) {
q->head = 0;
q->tail = 0;
q->is_full = false;
}
static inline bool tx_queue_is_empty(const tx_queue_t* q) {
return !q->is_full && (q->head == q->tail);
}
static inline bool tx_queue_is_full(const tx_queue_t* q) { return q->is_full; }
static inline void tx_queue_produce(tx_queue_t* q) {
q->head = (q->head + 1) % ETH_TX_BUF_COUNT;
if (q->head == q->tail) {
q->is_full = true;
}
}
static inline void tx_queue_consume(tx_queue_t* q) {
q->tail = (q->tail + 1) % ETH_TX_BUF_COUNT;
q->is_full = false;
}
static void low_level_init(struct netif* netif);
static void eth_link_callback(bool link_up);
static err_t low_level_output(struct netif* netif, struct pbuf* p);
static struct pbuf* low_level_input(struct netif* netif);
void phy_write_reg(uint8_t reg_add, uint16_t reg_val);
uint16_t phy_read_reg(uint8_t reg_add);
static void eth_get_mac_addr(uint8_t* mac) {
// Mac is backwards.
const uint8_t* macaddr_src = (const uint8_t*)(ROM_CFG_USERADR_ID + 5);
for (int i = 0; i < 6; i++) {
mac[i] = *(macaddr_src--);
}
static void eth_link_callback(bool link_up) {
(void)link_up;
g_link_changed = true;
}
err_t ethernetif_init(struct netif* netif) {
@@ -83,274 +41,110 @@ err_t ethernetif_init(struct netif* netif) {
netif->linkoutput = low_level_output;
MIB2_INIT_NETIF(netif, snmp_ifType_ethernet_csmacd, 10000000); // 10Mbps
netif->mtu = 1500;
netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP;
eth_config_t eth_cfg = {.mac_addr = NULL, // we'll use part uuid MAC
.rx_callback = NULL, // no cb, polling API
.link_callback = eth_link_callback,
.promiscuous_mode = false,
.broadcast_filter = true,
.multicast_filter = true};
if (eth_init(&eth_cfg) != 0) {
printf("ERROR: Ethernet initialization failed\n");
return ERR_IF;
}
// get MAC from driver
netif->hwaddr_len = ETH_HWADDR_LEN;
eth_get_mac_addr(netif->hwaddr);
eth_get_mac_address(netif->hwaddr);
printf("MAC Address: %02X:%02X:%02X:%02X:%02X:%02X\n", netif->hwaddr[0],
netif->hwaddr[1], netif->hwaddr[2], netif->hwaddr[3], netif->hwaddr[4],
netif->hwaddr[5]);
netif->mtu = 1500;
netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP;
low_level_init(netif);
return ERR_OK;
}
static void low_level_init(struct netif* netif) {
struct ethernetif* ethernetif = netif->state;
// clocks
RCC->APB2PCENR |= RCC_APB2Periph_AFIO;
RCC->CFGR0 |= RCC_ETHPRE; // div 2
EXTEN->EXTEN_CTR |= EXTEN_ETH_10M_EN;
// reset mac rx and tx
ETH10M->ECON1 = RB_ETH_ECON1_TXRST | RB_ETH_ECON1_RXRST;
ETH10M->ECON1 = 0;
// mac regs
ETH10M->ERXFCON = RB_ETH_ERXFCON_BCEN | RB_ETH_ERXFCON_MCEN;
ETH10M->MACON1 = RB_ETH_MACON1_MARXEN;
ETH10M->MACON2 = PADCFG_AUTO_3 | RB_ETH_MACON2_TXCRCEN;
ETH10M->MAMXFL = ETH_MAX_PACKET_SIZE;
R8_ETH_MAADRL1 = netif->hwaddr[5];
R8_ETH_MAADRL2 = netif->hwaddr[4];
R8_ETH_MAADRL3 = netif->hwaddr[3];
R8_ETH_MAADRL4 = netif->hwaddr[2];
R8_ETH_MAADRL5 = netif->hwaddr[1];
R8_ETH_MAADRL6 = netif->hwaddr[0];
// PHY analog block
ETH10M->ECON2 = RB_ETH_ECON2_DEFAULT;
// init TX descriptors
tx_queue_init(&ethernetif->tx_q);
for (int i = 0; i < ETH_TX_BUF_COUNT; i++) {
g_dma_tx_descs[i].Status = 0;
g_dma_tx_descs[i].Buffer1Addr =
(uint32_t)&g_mac_tx_bufs[i * ETH_TX_BUF_SIZE];
g_dma_tx_descs[i].Buffer2NextDescAddr =
(uint32_t)&g_dma_tx_descs[(i + 1) % ETH_TX_BUF_COUNT];
}
// init RX descriptors
ethernetif->rx_desc_head = g_dma_rx_descs;
ethernetif->rx_desc_tail = g_dma_rx_descs;
for (int i = 0; i < ETH_RX_BUF_COUNT; i++) {
g_dma_rx_descs[i].Status = ETH_DMARxDesc_OWN;
g_dma_rx_descs[i].Buffer1Addr =
(uint32_t)&g_mac_rx_bufs[i * ETH_RX_BUF_SIZE];
g_dma_rx_descs[i].Buffer2NextDescAddr =
(uint32_t)&g_dma_rx_descs[(i + 1) % ETH_RX_BUF_COUNT];
}
// set RX buffer start and enable receiver
ETH10M->ERXST = ethernetif->rx_desc_head->Buffer1Addr;
ETH10M->ECON1 = RB_ETH_ECON1_RXEN;
phy_write_reg(PHY_BMCR, PHY_BMCR_RESET);
Delay_Ms(200);
phy_write_reg(PHY_BMCR, PHY_BMCR_FULL_DUPLEX);
ETH10M->EIR = 0xFF; // clear all interrupt flags
ETH10M->EIE = RB_ETH_EIE_INTIE | RB_ETH_EIE_RXIE | RB_ETH_EIE_TXIE |
RB_ETH_EIE_LINKIE | RB_ETH_EIE_TXERIE | RB_ETH_EIE_RXERIE |
RB_ETH_EIE_R_EN50;
NVIC_EnableIRQ(ETH_IRQn);
}
static void tx_start_if_possible(void) {
// if TXRTS bit is set, MAC is busy sending a packet
if (ETH10M->ECON1 & RB_ETH_ECON1_TXRTS) {
return;
}
struct ethernetif* ethernetif = &g_eth_state;
if (tx_queue_is_empty(&ethernetif->tx_q)) {
return;
}
// get descriptor for the next packet to send
uint32_t idx = ethernetif->tx_q.tail;
ETH_DMADESCTypeDef* dma_desc = &g_dma_tx_descs[idx];
uint16_t len = dma_desc->Status;
// tell MAC which buffer to send
ETH10M->ETXLN = len;
ETH10M->ETXST = dma_desc->Buffer1Addr;
// start tx
ETH10M->ECON1 |= RB_ETH_ECON1_TXRTS;
}
static err_t low_level_output(struct netif* netif, struct pbuf* p) {
struct ethernetif* ethernetif = netif->state;
err_t errval = ERR_OK;
(void)netif;
if (tx_queue_is_full(&ethernetif->tx_q)) {
// should this be ERR_BUF or ERR_MEM? does ERR_MEM re-queue the packet?
// queue full, drop pkt
errval = ERR_BUF;
// errval = ERR_MEM;
} else {
uint32_t current_idx = ethernetif->tx_q.head;
uint8_t* tx_buf_ptr = (uint8_t*)g_dma_tx_descs[current_idx].Buffer1Addr;
uint32_t len = 0;
static uint8_t tx_buffer[ETH_TX_BUF_SIZE];
uint32_t total_len = 0;
for (struct pbuf* q = p; q != NULL; q = q->next) {
memcpy(&tx_buf_ptr[len], q->payload, q->len);
len += q->len;
for (struct pbuf* q = p; q != NULL; q = q->next) {
if (total_len + q->len > ETH_TX_BUF_SIZE) {
LINK_STATS_INC(link.err);
return ERR_BUF;
}
g_dma_tx_descs[current_idx].Status = len;
tx_queue_produce(&ethernetif->tx_q);
memcpy(&tx_buffer[total_len], q->payload, q->len);
total_len += q->len;
}
tx_start_if_possible();
return errval;
}
// send packet via driver
int result = eth_send_packet(tx_buffer, total_len);
static struct pbuf* low_level_input(struct netif* netif) {
struct ethernetif* ethernetif = netif->state;
struct pbuf* p = NULL;
// if OWN bit is set, it's still owned by DMA and no packet rdy
if (ethernetif->rx_desc_tail->Status & ETH_DMARxDesc_OWN) {
return NULL;
}
// packet ready
uint32_t len = (ethernetif->rx_desc_tail->Status & ETH_DMARxDesc_FL) >> 16;
p = pbuf_alloc(PBUF_RAW, len, PBUF_POOL);
if (p != NULL) {
uint8_t* buffer = (uint8_t*)ethernetif->rx_desc_tail->Buffer1Addr;
uint32_t offset = 0;
for (struct pbuf* q = p; q != NULL; q = q->next) {
memcpy(q->payload, buffer + offset, q->len);
offset += q->len;
}
LINK_STATS_INC(link.recv);
} else {
LINK_STATS_INC(link.memerr);
if (result == -1) {
// tx queue full
LINK_STATS_INC(link.drop);
return ERR_BUF;
} else if (result == -2) {
// invalid length
LINK_STATS_INC(link.err);
return ERR_ARG;
}
// give buffer back to DMA
ethernetif->rx_desc_tail->Status = ETH_DMARxDesc_OWN;
// advance read pointer to the next descriptor in the ring
ethernetif->rx_desc_tail =
(ETH_DMADESCTypeDef*)ethernetif->rx_desc_tail->Buffer2NextDescAddr;
return p;
LINK_STATS_INC(link.xmit);
return ERR_OK;
}
void ethernetif_input(struct netif* netif) {
struct pbuf* p;
while ((p = low_level_input(netif)) != NULL) {
if (netif->input(p, netif) != ERR_OK) {
pbuf_free(p);
uint16_t length;
const uint8_t* packet;
// process all pending packets using polling API
while ((packet = eth_get_rx_packet(&length)) != NULL) {
struct pbuf* p = pbuf_alloc(PBUF_RAW, length, PBUF_POOL);
if (p != NULL) {
// copy packet into pbuf chain
uint32_t offset = 0;
for (struct pbuf* q = p; q != NULL; q = q->next) {
memcpy(q->payload, packet + offset, q->len);
offset += q->len;
}
LINK_STATS_INC(link.recv);
// pass to lwIP
if (netif->input(p, netif) != ERR_OK) {
pbuf_free(p);
}
} else {
// oom
LINK_STATS_INC(link.memerr);
LINK_STATS_INC(link.drop);
}
// release packet back to driver
eth_release_rx_packet();
}
}
void ethernetif_link_poll(struct netif* netif) {
if (!g_link_irq_flag) return;
g_link_irq_flag = false;
// driver does PHY polling and autoneg
eth_poll_link();
// supposedly, first read latches link status 2nd get cur val
(void)phy_read_reg(PHY_BMSR);
uint16_t bmsr = phy_read_reg(PHY_BMSR);
if (g_link_changed) {
g_link_changed = false;
if (bmsr & PHY_BMSR_LINK_STATUS) {
if (!netif_is_link_up(netif)) {
ETH10M->MACON2 |= RB_ETH_MACON2_FULDPX;
bool link_up = eth_is_link_up();
if (link_up && !netif_is_link_up(netif)) {
netif_set_link_up(netif);
}
} else {
if (netif_is_link_up(netif)) {
} else if (!link_up && netif_is_link_up(netif)) {
netif_set_link_down(netif);
}
}
}
void ETH_IRQHandler(void) __attribute__((interrupt)) __attribute__((used));
void ETH_IRQHandler(void) {
uint32_t flags = ETH10M->EIR;
struct ethernetif* ethernetif = &g_eth_state;
if (flags & RB_ETH_EIR_RXIF) {
ETH10M->EIR = RB_ETH_EIR_RXIF;
// descriptor should be owned by DMA
if (ethernetif->rx_desc_head->Status & ETH_DMARxDesc_OWN) {
ETH_DMADESCTypeDef* next_desc =
(ETH_DMADESCTypeDef*)ethernetif->rx_desc_head->Buffer2NextDescAddr;
// if next descriptor OWN bit is 0, ring is full and we must drop
if (!(next_desc->Status & ETH_DMARxDesc_OWN)) {
LINK_STATS_INC(link.drop);
} else {
// process and re-arm
ethernetif->rx_desc_head->Status &= ~ETH_DMARxDesc_OWN;
// write packet len into status field for CPU
ethernetif->rx_desc_head->Status |=
(ETH_DMARxDesc_FS | ETH_DMARxDesc_LS |
(ETH10M->ERXLN << ETH_DMARxDesc_FrameLengthShift));
// advance descripotor ptr
ethernetif->rx_desc_head = next_desc;
// re-arm receiver with new emtpy buf
ETH10M->ERXST = (uint32_t)ethernetif->rx_desc_head->Buffer1Addr;
}
}
}
if (flags & RB_ETH_EIR_TXIF) {
ETH10M->EIR = RB_ETH_EIR_TXIF;
if (!tx_queue_is_empty(&ethernetif->tx_q)) {
LINK_STATS_INC(link.xmit);
tx_queue_consume(&ethernetif->tx_q);
}
tx_start_if_possible();
}
if (flags & RB_ETH_EIR_TXERIF) {
ETH10M->EIR = RB_ETH_EIR_TXERIF;
LINK_STATS_INC(link.err);
if (!tx_queue_is_empty(&ethernetif->tx_q)) {
tx_queue_consume(&ethernetif->tx_q);
}
tx_start_if_possible();
}
if (flags & RB_ETH_EIR_RXERIF) {
ETH10M->EIR = RB_ETH_EIR_RXERIF;
ETH10M->ECON1 |= RB_ETH_ECON1_RXEN; // re-enable receiver
LINK_STATS_INC(link.err);
}
if (flags & RB_ETH_EIR_LINKIF) {
g_link_irq_flag = true;
ETH10M->EIR = RB_ETH_EIR_LINKIF;
}
}
void phy_write_reg(uint8_t reg_add, uint16_t reg_val) {
R32_ETH_MIWR = (reg_add & RB_ETH_MIREGADR_MASK) | RB_ETH_MIWR_MIIWR |
(reg_val << RB_ETH_MIWR_DATA_SHIFT);
}
uint16_t phy_read_reg(uint8_t reg_add) {
ETH10M->MIERGADR = reg_add;
return ETH10M->MIRD;
}

View File

@@ -4,38 +4,9 @@
#include "lwip/err.h"
#include "lwip/netif.h"
/* Unique device ID */
#define ROM_CFG_USERADR_ID 0x1FFFF7E8
/* Ethernet Frame Size Definitions */
#define ETH_HEADER \
14 /* 6 byte Dest addr, 6 byte Src addr, 2 byte length/type */
#define ETH_CRC 4 /* Ethernet CRC */
#define ETH_EXTRA 2 /* Extra bytes in some cases */
#define VLAN_TAG 4 /* optional 802.1q VLAN Tag */
#define MIN_ETH_PAYLOAD 46 /* Minimum Ethernet payload size */
#define MAX_ETH_PAYLOAD 1500 /* Maximum Ethernet payload size */
#define ETH_MAX_PACKET_SIZE \
1536 /* ETH_HEADER + VLAN_TAG + MAX_ETH_PAYLOAD + ETH_CRC */
#define MIN_ETH_FRAME_SIZE (ETH_HEADER + MIN_ETH_PAYLOAD) /* 60 bytes */
/* Buffer Configuration */
#define ETH_RX_BUF_COUNT 4
#define ETH_TX_BUF_COUNT 2
#define ETH_RX_BUF_SIZE ETH_MAX_PACKET_SIZE
#define ETH_TX_BUF_SIZE ETH_MAX_PACKET_SIZE
/* DMA descriptor stuff */
#define ETH_DMARxDesc_FrameLengthShift 16
typedef struct {
uint32_t volatile Status; /* Status */
uint32_t ControlBufferSize; /* Control and Buffer1, Buffer2 lengths */
uint32_t Buffer1Addr; /* Buffer1 address pointer */
uint32_t Buffer2NextDescAddr; /* Buffer2 or next descriptor address pointer */
} ETH_DMADESCTypeDef;
#ifdef __cplusplus
extern "C" {
#endif
/**
* Should be called at the beginning of the program to set up the
@@ -70,20 +41,8 @@ void ethernetif_input(struct netif* netif);
*/
void ethernetif_link_poll(struct netif* netif);
/**
* Write a value to PHY register.
*
* @param reg_add PHY register address.
* @param reg_val Value to write.
*/
void phy_write_reg(uint8_t reg_add, uint16_t reg_val);
#ifdef __cplusplus
}
#endif
/**
* Read a value from PHY register.
*
* @param reg_add PHY register address.
* @return Register value.
*/
uint16_t phy_read_reg(uint8_t reg_add);
#endif /* __ETHERNETIF_H */
#endif /* __ETHERNETIF_H */