364 lines
10 KiB
C
364 lines
10 KiB
C
#include "ethernetif.h"
|
|
|
|
#include <stdbool.h>
|
|
#include <stdio.h>
|
|
#include <string.h>
|
|
|
|
#include "ch32fun.h"
|
|
#include "ch32v20xhw.h"
|
|
#include "lwip/etharp.h"
|
|
#include "lwip/snmp.h"
|
|
#include "systick.h"
|
|
|
|
#define IFNAME0 'e'
|
|
#define IFNAME1 'n'
|
|
|
|
#define ETH_RX_BUF_COUNT 4
|
|
#define ETH_TX_BUF_COUNT 2
|
|
/* buf size should be at least ETH_MAX_PACKET_SIZE */
|
|
#define ETH_RX_BUF_SIZE ETH_MAX_PACKET_SIZE
|
|
#define ETH_TX_BUF_SIZE ETH_MAX_PACKET_SIZE
|
|
|
|
typedef struct {
|
|
volatile uint32_t head; // producer idx: next free slot to write to
|
|
volatile uint32_t tail; // consumer idx: next slot to be txed
|
|
volatile bool is_full; // for N=1 size
|
|
} tx_queue_t;
|
|
|
|
struct ethernetif {
|
|
ETH_DMADESCTypeDef* rx_desc_head; // next desc to be filled by DMA
|
|
ETH_DMADESCTypeDef* rx_desc_tail; // next desc to be read by CPU
|
|
tx_queue_t tx_q;
|
|
};
|
|
|
|
__attribute__((aligned(4))) ETH_DMADESCTypeDef g_dma_rx_descs[ETH_RX_BUF_COUNT];
|
|
__attribute__((aligned(4))) ETH_DMADESCTypeDef g_dma_tx_descs[ETH_TX_BUF_COUNT];
|
|
__attribute__((
|
|
aligned(4))) uint8_t g_mac_rx_bufs[ETH_RX_BUF_COUNT * ETH_RX_BUF_SIZE];
|
|
__attribute__((
|
|
aligned(4))) uint8_t g_mac_tx_bufs[ETH_TX_BUF_COUNT * ETH_TX_BUF_SIZE];
|
|
|
|
static struct ethernetif g_eth_state;
|
|
static volatile bool g_link_irq_flag = false;
|
|
|
|
static inline void tx_queue_init(tx_queue_t* q) {
|
|
q->head = 0;
|
|
q->tail = 0;
|
|
q->is_full = false;
|
|
}
|
|
|
|
static inline bool tx_queue_is_empty(const tx_queue_t* q) {
|
|
return !q->is_full && (q->head == q->tail);
|
|
}
|
|
static inline bool tx_queue_is_full(const tx_queue_t* q) { return q->is_full; }
|
|
static inline void tx_queue_produce(tx_queue_t* q) {
|
|
q->head = (q->head + 1) % ETH_TX_BUF_COUNT;
|
|
if (q->head == q->tail) {
|
|
q->is_full = true;
|
|
}
|
|
}
|
|
static inline void tx_queue_consume(tx_queue_t* q) {
|
|
q->tail = (q->tail + 1) % ETH_TX_BUF_COUNT;
|
|
q->is_full = false;
|
|
}
|
|
|
|
static void low_level_init(struct netif* netif);
|
|
static err_t low_level_output(struct netif* netif, struct pbuf* p);
|
|
static struct pbuf* low_level_input(struct netif* netif);
|
|
void phy_write_reg(uint8_t reg_add, uint16_t reg_val);
|
|
uint16_t phy_read_reg(uint8_t reg_add);
|
|
|
|
static void eth_get_mac_addr(uint8_t* mac) {
|
|
// Mac is backwards.
|
|
const uint8_t* macaddr_src = (const uint8_t*)(ROM_CFG_USERADR_ID + 5);
|
|
for (int i = 0; i < 6; i++) {
|
|
mac[i] = *(macaddr_src--);
|
|
}
|
|
}
|
|
|
|
err_t ethernetif_init(struct netif* netif) {
|
|
#if LWIP_NETIF_HOSTNAME
|
|
netif->hostname = "lwip-ch32";
|
|
#endif
|
|
|
|
netif->state = &g_eth_state;
|
|
netif->name[0] = IFNAME0;
|
|
netif->name[1] = IFNAME1;
|
|
|
|
netif->output = etharp_output;
|
|
netif->linkoutput = low_level_output;
|
|
|
|
MIB2_INIT_NETIF(netif, snmp_ifType_ethernet_csmacd, 10000000); // 10Mbps
|
|
|
|
netif->hwaddr_len = ETH_HWADDR_LEN;
|
|
eth_get_mac_addr(netif->hwaddr);
|
|
|
|
printf("MAC Address: %02X:%02X:%02X:%02X:%02X:%02X\n", netif->hwaddr[0],
|
|
netif->hwaddr[1], netif->hwaddr[2], netif->hwaddr[3], netif->hwaddr[4],
|
|
netif->hwaddr[5]);
|
|
|
|
netif->mtu = 1500;
|
|
netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP;
|
|
|
|
low_level_init(netif);
|
|
|
|
return ERR_OK;
|
|
}
|
|
|
|
static void low_level_init(struct netif* netif) {
|
|
struct ethernetif* ethernetif = netif->state;
|
|
|
|
// clocks
|
|
RCC->APB2PCENR |= RCC_APB2Periph_AFIO;
|
|
RCC->CFGR0 |= RCC_ETHPRE; // div 2
|
|
EXTEN->EXTEN_CTR |= EXTEN_ETH_10M_EN;
|
|
|
|
// reset mac rx and tx
|
|
ETH10M->ECON1 = RB_ETH_ECON1_TXRST | RB_ETH_ECON1_RXRST;
|
|
ETH10M->ECON1 = 0;
|
|
|
|
// mac regs
|
|
ETH10M->ERXFCON = RB_ETH_ERXFCON_BCEN | RB_ETH_ERXFCON_MCEN;
|
|
ETH10M->MACON1 = RB_ETH_MACON1_MARXEN;
|
|
ETH10M->MACON2 = PADCFG_AUTO_3 | RB_ETH_MACON2_TXCRCEN;
|
|
ETH10M->MAMXFL = ETH_MAX_PACKET_SIZE;
|
|
|
|
R8_ETH_MAADRL1 = netif->hwaddr[5];
|
|
R8_ETH_MAADRL2 = netif->hwaddr[4];
|
|
R8_ETH_MAADRL3 = netif->hwaddr[3];
|
|
R8_ETH_MAADRL4 = netif->hwaddr[2];
|
|
R8_ETH_MAADRL5 = netif->hwaddr[1];
|
|
R8_ETH_MAADRL6 = netif->hwaddr[0];
|
|
|
|
// PHY analog block
|
|
ETH10M->ECON2 = RB_ETH_ECON2_DEFAULT;
|
|
|
|
// init TX descriptors
|
|
tx_queue_init(ðernetif->tx_q);
|
|
for (int i = 0; i < ETH_TX_BUF_COUNT; i++) {
|
|
g_dma_tx_descs[i].Status = 0;
|
|
g_dma_tx_descs[i].Buffer1Addr =
|
|
(uint32_t)&g_mac_tx_bufs[i * ETH_TX_BUF_SIZE];
|
|
g_dma_tx_descs[i].Buffer2NextDescAddr =
|
|
(uint32_t)&g_dma_tx_descs[(i + 1) % ETH_TX_BUF_COUNT];
|
|
}
|
|
|
|
// init RX descriptors
|
|
ethernetif->rx_desc_head = g_dma_rx_descs;
|
|
ethernetif->rx_desc_tail = g_dma_rx_descs;
|
|
for (int i = 0; i < ETH_RX_BUF_COUNT; i++) {
|
|
g_dma_rx_descs[i].Status = ETH_DMARxDesc_OWN;
|
|
g_dma_rx_descs[i].Buffer1Addr =
|
|
(uint32_t)&g_mac_rx_bufs[i * ETH_RX_BUF_SIZE];
|
|
g_dma_rx_descs[i].Buffer2NextDescAddr =
|
|
(uint32_t)&g_dma_rx_descs[(i + 1) % ETH_RX_BUF_COUNT];
|
|
}
|
|
|
|
// set RX buffer start and enable receiver
|
|
ETH10M->ERXST = ethernetif->rx_desc_head->Buffer1Addr;
|
|
ETH10M->ECON1 = RB_ETH_ECON1_RXEN;
|
|
|
|
phy_write_reg(PHY_BMCR, PHY_BMCR_RESET);
|
|
Delay_Ms(200);
|
|
|
|
phy_write_reg(PHY_BMCR, PHY_BMCR_FULL_DUPLEX);
|
|
|
|
ETH10M->EIR = 0xFF; // clear all interrupt flags
|
|
ETH10M->EIE = RB_ETH_EIE_INTIE | RB_ETH_EIE_RXIE | RB_ETH_EIE_TXIE |
|
|
RB_ETH_EIE_LINKIE | RB_ETH_EIE_TXERIE | RB_ETH_EIE_RXERIE |
|
|
RB_ETH_EIE_R_EN50;
|
|
|
|
NVIC_EnableIRQ(ETH_IRQn);
|
|
}
|
|
|
|
static void tx_start_if_possible(void) {
|
|
// if TXRTS bit is set, MAC is busy sending a packet
|
|
if (ETH10M->ECON1 & RB_ETH_ECON1_TXRTS) {
|
|
return;
|
|
}
|
|
|
|
struct ethernetif* ethernetif = &g_eth_state;
|
|
|
|
if (tx_queue_is_empty(ðernetif->tx_q)) {
|
|
return;
|
|
}
|
|
|
|
// get descriptor for the next packet to send
|
|
uint32_t idx = ethernetif->tx_q.tail;
|
|
ETH_DMADESCTypeDef* dma_desc = &g_dma_tx_descs[idx];
|
|
|
|
uint16_t len = dma_desc->Status;
|
|
|
|
// tell MAC which buffer to send
|
|
ETH10M->ETXLN = len;
|
|
ETH10M->ETXST = dma_desc->Buffer1Addr;
|
|
// start tx
|
|
ETH10M->ECON1 |= RB_ETH_ECON1_TXRTS;
|
|
}
|
|
|
|
static err_t low_level_output(struct netif* netif, struct pbuf* p) {
|
|
struct ethernetif* ethernetif = netif->state;
|
|
err_t errval = ERR_OK;
|
|
|
|
if (tx_queue_is_full(ðernetif->tx_q)) {
|
|
// should this be ERR_BUF or ERR_MEM? does ERR_MEM re-queue the packet?
|
|
// queue full, drop pkt
|
|
errval = ERR_BUF;
|
|
// errval = ERR_MEM;
|
|
} else {
|
|
uint32_t current_idx = ethernetif->tx_q.head;
|
|
uint8_t* tx_buf_ptr = (uint8_t*)g_dma_tx_descs[current_idx].Buffer1Addr;
|
|
uint32_t len = 0;
|
|
|
|
for (struct pbuf* q = p; q != NULL; q = q->next) {
|
|
memcpy(&tx_buf_ptr[len], q->payload, q->len);
|
|
len += q->len;
|
|
}
|
|
|
|
g_dma_tx_descs[current_idx].Status = len;
|
|
|
|
tx_queue_produce(ðernetif->tx_q);
|
|
}
|
|
|
|
tx_start_if_possible();
|
|
|
|
return errval;
|
|
}
|
|
|
|
static struct pbuf* low_level_input(struct netif* netif) {
|
|
struct ethernetif* ethernetif = netif->state;
|
|
struct pbuf* p = NULL;
|
|
|
|
// if OWN bit is set, it's still owned by DMA and no packet rdy
|
|
if (ethernetif->rx_desc_tail->Status & ETH_DMARxDesc_OWN) {
|
|
return NULL;
|
|
}
|
|
|
|
// packet ready
|
|
uint32_t len = (ethernetif->rx_desc_tail->Status & ETH_DMARxDesc_FL) >> 16;
|
|
|
|
p = pbuf_alloc(PBUF_RAW, len, PBUF_POOL);
|
|
if (p != NULL) {
|
|
uint8_t* buffer = (uint8_t*)ethernetif->rx_desc_tail->Buffer1Addr;
|
|
uint32_t offset = 0;
|
|
for (struct pbuf* q = p; q != NULL; q = q->next) {
|
|
memcpy(q->payload, buffer + offset, q->len);
|
|
offset += q->len;
|
|
}
|
|
LINK_STATS_INC(link.recv);
|
|
} else {
|
|
LINK_STATS_INC(link.memerr);
|
|
LINK_STATS_INC(link.drop);
|
|
}
|
|
|
|
// give buffer back to DMA
|
|
ethernetif->rx_desc_tail->Status = ETH_DMARxDesc_OWN;
|
|
// advance read pointer to the next descriptor in the ring
|
|
ethernetif->rx_desc_tail =
|
|
(ETH_DMADESCTypeDef*)ethernetif->rx_desc_tail->Buffer2NextDescAddr;
|
|
|
|
return p;
|
|
}
|
|
|
|
void ethernetif_input(struct netif* netif) {
|
|
struct pbuf* p;
|
|
while ((p = low_level_input(netif)) != NULL) {
|
|
if (netif->input(p, netif) != ERR_OK) {
|
|
pbuf_free(p);
|
|
}
|
|
}
|
|
}
|
|
|
|
void ethernetif_link_poll(struct netif* netif) {
|
|
if (!g_link_irq_flag) return;
|
|
g_link_irq_flag = false;
|
|
|
|
// supposedly, first read latches link status 2nd get cur val
|
|
(void)phy_read_reg(PHY_BMSR);
|
|
uint16_t bmsr = phy_read_reg(PHY_BMSR);
|
|
|
|
if (bmsr & PHY_BMSR_LINK_STATUS) {
|
|
if (!netif_is_link_up(netif)) {
|
|
ETH10M->MACON2 |= RB_ETH_MACON2_FULDPX;
|
|
netif_set_link_up(netif);
|
|
}
|
|
} else {
|
|
if (netif_is_link_up(netif)) {
|
|
netif_set_link_down(netif);
|
|
}
|
|
}
|
|
}
|
|
|
|
void ETH_IRQHandler(void) __attribute__((interrupt)) __attribute__((used));
|
|
void ETH_IRQHandler(void) {
|
|
uint32_t flags = ETH10M->EIR;
|
|
struct ethernetif* ethernetif = &g_eth_state;
|
|
|
|
if (flags & RB_ETH_EIR_RXIF) {
|
|
ETH10M->EIR = RB_ETH_EIR_RXIF;
|
|
|
|
// descriptor should be owned by DMA
|
|
if (ethernetif->rx_desc_head->Status & ETH_DMARxDesc_OWN) {
|
|
ETH_DMADESCTypeDef* next_desc =
|
|
(ETH_DMADESCTypeDef*)ethernetif->rx_desc_head->Buffer2NextDescAddr;
|
|
|
|
// if next descriptor OWN bit is 0, ring is full and we must drop
|
|
if (!(next_desc->Status & ETH_DMARxDesc_OWN)) {
|
|
LINK_STATS_INC(link.drop);
|
|
} else {
|
|
// process and re-arm
|
|
ethernetif->rx_desc_head->Status &= ~ETH_DMARxDesc_OWN;
|
|
// write packet len into status field for CPU
|
|
ethernetif->rx_desc_head->Status |=
|
|
(ETH_DMARxDesc_FS | ETH_DMARxDesc_LS | (ETH10M->ERXLN << 16));
|
|
// advance descripotor ptr
|
|
ethernetif->rx_desc_head = next_desc;
|
|
// re-arm receiver with new emtpy buf
|
|
ETH10M->ERXST = (uint32_t)ethernetif->rx_desc_head->Buffer1Addr;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (flags & RB_ETH_EIR_TXIF) {
|
|
ETH10M->EIR = RB_ETH_EIR_TXIF;
|
|
|
|
if (!tx_queue_is_empty(ðernetif->tx_q)) {
|
|
LINK_STATS_INC(link.xmit);
|
|
tx_queue_consume(ðernetif->tx_q);
|
|
}
|
|
|
|
tx_start_if_possible();
|
|
}
|
|
|
|
if (flags & RB_ETH_EIR_TXERIF) {
|
|
ETH10M->EIR = RB_ETH_EIR_TXERIF;
|
|
LINK_STATS_INC(link.err);
|
|
|
|
if (!tx_queue_is_empty(ðernetif->tx_q)) {
|
|
tx_queue_consume(ðernetif->tx_q);
|
|
}
|
|
tx_start_if_possible();
|
|
}
|
|
|
|
if (flags & RB_ETH_EIR_RXERIF) {
|
|
ETH10M->EIR = RB_ETH_EIR_RXERIF;
|
|
ETH10M->ECON1 |= RB_ETH_ECON1_RXEN; // re-enable receiver
|
|
LINK_STATS_INC(link.err);
|
|
}
|
|
|
|
if (flags & RB_ETH_EIR_LINKIF) {
|
|
g_link_irq_flag = true;
|
|
ETH10M->EIR = RB_ETH_EIR_LINKIF;
|
|
}
|
|
}
|
|
|
|
void phy_write_reg(uint8_t reg_add, uint16_t reg_val) {
|
|
R32_ETH_MIWR = (reg_add & RB_ETH_MIREGADR_MASK) | RB_ETH_MIWR_MIIWR |
|
|
(reg_val << RB_ETH_MIWR_DATA_SHIFT);
|
|
}
|
|
|
|
uint16_t phy_read_reg(uint8_t reg_add) {
|
|
ETH10M->MIERGADR = reg_add;
|
|
return ETH10M->MIRD;
|
|
}
|