mediatek: revert mtk_eth_soc & hnat driver to mtk-openwrt-feeds 'afff566'

This commit is contained in:
hanwckf
2022-12-05 01:32:33 +08:00
parent c6ad08117d
commit c1f27cf3ed
17 changed files with 913 additions and 3207 deletions

View File

@@ -20,16 +20,6 @@ config MEDIATEK_NETSYS_V2
---help---
This options enable MTK Ethernet NETSYS V2 support
config MEDIATEK_NETSYS_V3
tristate "MediaTek Ethernet NETSYS V3 support"
depends on ARCH_MEDIATEK && NET_MEDIATEK_SOC
---help---
This options enable MTK Ethernet NETSYS V3 support for
XGMAC and USXGMII.
If you have a network system belong to this class, say Y.
If unsure, say N.
config NET_MEDIATEK_HNAT
tristate "MediaTek HW NAT support"
depends on NET_MEDIATEK_SOC && NF_CONNTRACK && IP_NF_NAT

View File

@@ -4,5 +4,5 @@
#
obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_usxgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o
mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o
obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/

View File

@@ -24,7 +24,6 @@
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/of_mdio.h>
#include <linux/of_address.h>
#include "mtk_eth_soc.h"
#include "mtk_eth_dbg.h"
@@ -44,55 +43,20 @@ static struct proc_dir_entry *proc_hw_lro_stats, *proc_hw_lro_auto_tlb;
typedef int (*mtk_lro_dbg_func) (int par);
struct mtk_eth_debug {
struct dentry *root;
void __iomem *base;
int direct_access;
struct dentry *root;
};
struct mtk_eth *g_eth;
struct mtk_eth_debug eth_debug;
int mt798x_iomap(void)
{
struct device_node *np = NULL;
np = of_find_node_by_name(NULL, "switch0");
if (np) {
eth_debug.base = of_iomap(np, 0);
if (!eth_debug.base) {
pr_err("of_iomap failed\n");
of_node_put(np);
return -ENOMEM;
}
of_node_put(np);
eth_debug.direct_access = 1;
}
return 0;
}
int mt798x_iounmap(void)
{
eth_debug.direct_access = 0;
if (eth_debug.base)
iounmap(eth_debug.base);
return 0;
}
void mt7530_mdio_w32(struct mtk_eth *eth, u16 reg, u32 val)
{
mutex_lock(&eth->mii_bus->mdio_lock);
if (eth_debug.direct_access)
__raw_writel(val, eth_debug.base + reg);
else {
_mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
_mtk_mdio_write(eth, 0x1f, (reg >> 2) & 0xf, val & 0xffff);
_mtk_mdio_write(eth, 0x1f, 0x10, val >> 16);
}
_mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
_mtk_mdio_write(eth, 0x1f, (reg >> 2) & 0xf, val & 0xffff);
_mtk_mdio_write(eth, 0x1f, 0x10, val >> 16);
mutex_unlock(&eth->mii_bus->mdio_lock);
}
@@ -100,15 +64,9 @@ void mt7530_mdio_w32(struct mtk_eth *eth, u16 reg, u32 val)
u32 mt7530_mdio_r32(struct mtk_eth *eth, u32 reg)
{
u16 high, low;
u32 ret;
mutex_lock(&eth->mii_bus->mdio_lock);
if (eth_debug.direct_access) {
ret = __raw_readl(eth_debug.base + reg);
mutex_unlock(&eth->mii_bus->mdio_lock);
return ret;
}
_mtk_mdio_write(eth, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
low = _mtk_mdio_read(eth, 0x1f, (reg >> 2) & 0xf);
high = _mtk_mdio_read(eth, 0x1f, 0x10);
@@ -160,7 +118,6 @@ static int mtketh_debug_open(struct inode *inode, struct file *file)
}
static const struct file_operations mtketh_debug_fops = {
.owner = THIS_MODULE,
.open = mtketh_debug_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -252,7 +209,6 @@ static int mtketh_debug_mt7530sw_open(struct inode *inode, struct file *file)
}
static const struct file_operations mtketh_debug_mt7530sw_fops = {
.owner = THIS_MODULE,
.open = mtketh_debug_mt7530sw_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -363,42 +319,10 @@ static ssize_t mtketh_debugfs_reset(struct file *file, const char __user *ptr,
size_t len, loff_t *off)
{
struct mtk_eth *eth = file->private_data;
char buf[8] = "";
int count = len;
unsigned long dbg_level = 0;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, ptr, len))
return -EFAULT;
buf[len] = '\0';
if (kstrtoul(buf, 0, &dbg_level))
return -EINVAL;
switch(dbg_level)
{
case 0:
if (atomic_read(&reset_lock) == 0)
atomic_inc(&reset_lock);
break;
case 1:
if (atomic_read(&force) == 0)
atomic_inc(&force);
schedule_work(&eth->pending_work);
break;
case 2:
if (atomic_read(&reset_lock) == 1)
atomic_dec(&reset_lock);
break;
default:
pr_info("Usage: echo [level] > /sys/kernel/debug/mtketh/reset\n");
pr_info("Commands: [level] \n");
pr_info(" 0 disable reset \n");
pr_info(" 1 force reset \n");
pr_info(" 2 enable reset\n");
break;
}
return count;
atomic_inc(&force);
schedule_work(&eth->pending_work);
return len;
}
static const struct file_operations fops_reg_w = {
@@ -461,7 +385,7 @@ void mii_mgr_read_combine(struct mtk_eth *eth, u32 phy_addr, u32 phy_register,
*read_data = mt7530_mdio_r32(eth, phy_register);
else
*read_data = mdiobus_read(eth->mii_bus, phy_addr, phy_register);
*read_data = _mtk_mdio_read(eth, phy_addr, phy_register);
}
void mii_mgr_write_combine(struct mtk_eth *eth, u16 phy_addr, u16 phy_register,
@@ -471,17 +395,17 @@ void mii_mgr_write_combine(struct mtk_eth *eth, u16 phy_addr, u16 phy_register,
mt7530_mdio_w32(eth, phy_register, write_data);
else
mdiobus_write(eth->mii_bus, phy_addr, phy_register, write_data);
_mtk_mdio_write(eth, phy_addr, phy_register, write_data);
}
static void mii_mgr_read_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 *data)
{
*data = mdiobus_read(eth->mii_bus, port, mdiobus_c45_addr(devad, reg));
*data = _mtk_mdio_read(eth, port, mdiobus_c45_addr(devad, reg));
}
static void mii_mgr_write_cl45(struct mtk_eth *eth, u16 port, u16 devad, u16 reg, u16 data)
{
mdiobus_write(eth->mii_bus, port, mdiobus_c45_addr(devad, reg), data);
_mtk_mdio_write(eth, port, mdiobus_c45_addr(devad, reg), data);
}
int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -563,112 +487,75 @@ err_copy:
return -EFAULT;
}
static void gdm_reg_dump_v3(struct mtk_eth *eth, u32 gdm_id, u32 mib_base)
{
pr_info("| GDMA%d_RX_GBCNT : %010u (Rx Good Bytes) |\n",
gdm_id, mtk_r32(eth, mib_base));
pr_info("| GDMA%d_RX_GPCNT : %010u (Rx Good Pkts) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x08));
pr_info("| GDMA%d_RX_OERCNT : %010u (overflow error) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x10));
pr_info("| GDMA%d_RX_FERCNT : %010u (FCS error) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x14));
pr_info("| GDMA%d_RX_SERCNT : %010u (too short) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x18));
pr_info("| GDMA%d_RX_LERCNT : %010u (too long) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x1C));
pr_info("| GDMA%d_RX_CERCNT : %010u (checksum error) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x20));
pr_info("| GDMA%d_RX_FCCNT : %010u (flow control) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x24));
pr_info("| GDMA%d_RX_VDPCNT : %010u (VID drop) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x28));
pr_info("| GDMA%d_RX_PFCCNT : %010u (priority flow control)\n",
gdm_id, mtk_r32(eth, mib_base + 0x2C));
pr_info("| GDMA%d_TX_GBCNT : %010u (Tx Good Bytes) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x40));
pr_info("| GDMA%d_TX_GPCNT : %010u (Tx Good Pkts) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x48));
pr_info("| GDMA%d_TX_SKIPCNT: %010u (abort count) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x50));
pr_info("| GDMA%d_TX_COLCNT : %010u (collision count)|\n",
gdm_id, mtk_r32(eth, mib_base + 0x54));
pr_info("| GDMA%d_TX_OERCNT : %010u (overflow error) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x58));
pr_info("| GDMA%d_TX_FCCNT : %010u (flow control) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x60));
pr_info("| GDMA%d_TX_PFCCNT : %010u (priority flow control)\n",
gdm_id, mtk_r32(eth, mib_base + 0x64));
pr_info("| |\n");
}
static void gdm_reg_dump_v2(struct mtk_eth *eth, u32 gdm_id, u32 mib_base)
{
pr_info("| GDMA%d_RX_GBCNT : %010u (Rx Good Bytes) |\n",
gdm_id, mtk_r32(eth, mib_base));
pr_info("| GDMA%d_RX_GPCNT : %010u (Rx Good Pkts) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x08));
pr_info("| GDMA%d_RX_OERCNT : %010u (overflow error) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x10));
pr_info("| GDMA%d_RX_FERCNT : %010u (FCS error) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x14));
pr_info("| GDMA%d_RX_SERCNT : %010u (too short) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x18));
pr_info("| GDMA%d_RX_LERCNT : %010u (too long) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x1C));
pr_info("| GDMA%d_RX_CERCNT : %010u (checksum error) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x20));
pr_info("| GDMA%d_RX_FCCNT : %010u (flow control) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x24));
pr_info("| GDMA%d_TX_SKIPCNT: %010u (abort count) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x28));
pr_info("| GDMA%d_TX_COLCNT : %010u (collision count) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x2C));
pr_info("| GDMA%d_TX_GBCNT : %010u (Tx Good Bytes) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x30));
pr_info("| GDMA%d_TX_GPCNT : %010u (Tx Good Pkts) |\n",
gdm_id, mtk_r32(eth, mib_base + 0x38));
pr_info("| |\n");
}
static void gdm_cnt_read(struct mtk_eth *eth)
{
u32 i, mib_base;
pr_info("\n <<CPU>>\n");
pr_info(" |\n");
pr_info("+-----------------------------------------------+\n");
pr_info("| <<PSE>> |\n");
pr_info("+-----------------------------------------------+\n");
pr_info(" |\n");
pr_info("+-----------------------------------------------+\n");
pr_info("| <<GDMA>> |\n");
for (i = 0; i < MTK_MAC_COUNT; i++) {
mib_base = MTK_GDM1_TX_GBCNT + MTK_STAT_OFFSET * i;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
gdm_reg_dump_v3(eth, i + 1, mib_base);
else
gdm_reg_dump_v2(eth, i + 1, mib_base);
}
pr_info("+-----------------------------------------------+\n");
}
int esw_cnt_read(struct seq_file *seq, void *v)
{
unsigned int pkt_cnt = 0;
int i = 0;
struct mtk_eth *eth = g_eth;
unsigned int mib_base = MTK_GDM1_TX_GBCNT;
gdm_cnt_read(eth);
seq_puts(seq, "\n <<CPU>>\n");
seq_puts(seq, " |\n");
seq_puts(seq, "+-----------------------------------------------+\n");
seq_puts(seq, "| <<PSE>> |\n");
seq_puts(seq, "+-----------------------------------------------+\n");
seq_puts(seq, " |\n");
seq_puts(seq, "+-----------------------------------------------+\n");
seq_puts(seq, "| <<GDMA>> |\n");
seq_printf(seq, "| GDMA1_RX_GBCNT : %010u (Rx Good Bytes) |\n",
mtk_r32(eth, mib_base));
seq_printf(seq, "| GDMA1_RX_GPCNT : %010u (Rx Good Pkts) |\n",
mtk_r32(eth, mib_base+0x08));
seq_printf(seq, "| GDMA1_RX_OERCNT : %010u (overflow error) |\n",
mtk_r32(eth, mib_base+0x10));
seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error) |\n",
mtk_r32(eth, mib_base+0x14));
seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short) |\n",
mtk_r32(eth, mib_base+0x18));
seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long) |\n",
mtk_r32(eth, mib_base+0x1C));
seq_printf(seq, "| GDMA1_RX_CERCNT : %010u (checksum error) |\n",
mtk_r32(eth, mib_base+0x20));
seq_printf(seq, "| GDMA1_RX_FCCNT : %010u (flow control) |\n",
mtk_r32(eth, mib_base+0x24));
seq_printf(seq, "| GDMA1_TX_SKIPCNT: %010u (about count) |\n",
mtk_r32(eth, mib_base+0x28));
seq_printf(seq, "| GDMA1_TX_COLCNT : %010u (collision count) |\n",
mtk_r32(eth, mib_base+0x2C));
seq_printf(seq, "| GDMA1_TX_GBCNT : %010u (Tx Good Bytes) |\n",
mtk_r32(eth, mib_base+0x30));
seq_printf(seq, "| GDMA1_TX_GPCNT : %010u (Tx Good Pkts) |\n",
mtk_r32(eth, mib_base+0x38));
seq_puts(seq, "| |\n");
seq_printf(seq, "| GDMA2_RX_GBCNT : %010u (Rx Good Bytes) |\n",
mtk_r32(eth, mib_base+0x40));
seq_printf(seq, "| GDMA2_RX_GPCNT : %010u (Rx Good Pkts) |\n",
mtk_r32(eth, mib_base+0x48));
seq_printf(seq, "| GDMA2_RX_OERCNT : %010u (overflow error) |\n",
mtk_r32(eth, mib_base+0x50));
seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error) |\n",
mtk_r32(eth, mib_base+0x54));
seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short) |\n",
mtk_r32(eth, mib_base+0x58));
seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long) |\n",
mtk_r32(eth, mib_base+0x5C));
seq_printf(seq, "| GDMA2_RX_CERCNT : %010u (checksum error) |\n",
mtk_r32(eth, mib_base+0x60));
seq_printf(seq, "| GDMA2_RX_FCCNT : %010u (flow control) |\n",
mtk_r32(eth, mib_base+0x64));
seq_printf(seq, "| GDMA2_TX_SKIPCNT: %010u (skip) |\n",
mtk_r32(eth, mib_base+0x68));
seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision) |\n",
mtk_r32(eth, mib_base+0x6C));
seq_printf(seq, "| GDMA2_TX_GBCNT : %010u (Tx Good Bytes) |\n",
mtk_r32(eth, mib_base+0x70));
seq_printf(seq, "| GDMA2_TX_GPCNT : %010u (Tx Good Pkts) |\n",
mtk_r32(eth, mib_base+0x78));
seq_puts(seq, "+-----------------------------------------------+\n");
if (!mt7530_exist(eth))
return 0;
mt798x_iomap();
#define DUMP_EACH_PORT(base) \
do { \
for (i = 0; i < 7; i++) { \
@@ -724,8 +611,6 @@ int esw_cnt_read(struct seq_file *seq, void *v)
seq_puts(seq, "\n");
mt798x_iounmap();
return 0;
}
@@ -746,33 +631,38 @@ static struct proc_dir_entry *proc_tx_ring, *proc_hwtx_ring, *proc_rx_ring;
int tx_ring_read(struct seq_file *seq, void *v)
{
struct mtk_eth *eth = g_eth;
struct mtk_tx_ring *ring = &g_eth->tx_ring;
struct mtk_tx_dma_v2 *tx_ring;
struct mtk_tx_dma *tx_ring;
int i = 0;
tx_ring =
kmalloc(sizeof(struct mtk_tx_dma) * MTK_DMA_SIZE, GFP_KERNEL);
if (!tx_ring) {
seq_puts(seq, " allocate temp tx_ring fail.\n");
return 0;
}
for (i = 0; i < MTK_DMA_SIZE; i++)
tx_ring[i] = ring->dma[i];
seq_printf(seq, "free count = %d\n", (int)atomic_read(&ring->free_count));
seq_printf(seq, "cpu next free: %d\n", (int)(ring->next_free - ring->dma));
seq_printf(seq, "cpu last free: %d\n", (int)(ring->last_free - ring->dma));
for (i = 0; i < MTK_DMA_SIZE; i++) {
dma_addr_t tmp = ring->phys + i * eth->soc->txrx.txd_size;
tx_ring = ring->dma + i * eth->soc->txrx.txd_size;
dma_addr_t tmp = ring->phys + i * sizeof(*tx_ring);
seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &tmp,
tx_ring->txd1, tx_ring->txd2,
tx_ring->txd3, tx_ring->txd4);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
seq_printf(seq, " %08x %08x %08x %08x",
tx_ring->txd5, tx_ring->txd6,
tx_ring->txd7, tx_ring->txd8);
}
*(int *)&tx_ring[i].txd1, *(int *)&tx_ring[i].txd2,
*(int *)&tx_ring[i].txd3, *(int *)&tx_ring[i].txd4);
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
seq_printf(seq, " %08x %08x %08x %08x",
*(int *)&tx_ring[i].txd5, *(int *)&tx_ring[i].txd6,
*(int *)&tx_ring[i].txd7, *(int *)&tx_ring[i].txd8);
#endif
seq_printf(seq, "\n");
}
kfree(tx_ring);
return 0;
}
@@ -792,28 +682,34 @@ static const struct file_operations tx_ring_fops = {
int hwtx_ring_read(struct seq_file *seq, void *v)
{
struct mtk_eth *eth = g_eth;
struct mtk_tx_dma_v2 *hwtx_ring;
struct mtk_tx_dma *hwtx_ring;
int i = 0;
for (i = 0; i < MTK_DMA_SIZE; i++) {
dma_addr_t addr = eth->phy_scratch_ring + i * eth->soc->txrx.txd_size;
hwtx_ring =
kmalloc(sizeof(struct mtk_tx_dma) * MTK_DMA_SIZE, GFP_KERNEL);
if (!hwtx_ring) {
seq_puts(seq, " allocate temp hwtx_ring fail.\n");
return 0;
}
hwtx_ring = eth->scratch_ring + i * eth->soc->txrx.txd_size;
for (i = 0; i < MTK_DMA_SIZE; i++)
hwtx_ring[i] = eth->scratch_ring[i];
for (i = 0; i < MTK_DMA_SIZE; i++) {
dma_addr_t addr = eth->phy_scratch_ring + i * sizeof(*hwtx_ring);
seq_printf(seq, "%d (%pad): %08x %08x %08x %08x", i, &addr,
hwtx_ring->txd1, hwtx_ring->txd2,
hwtx_ring->txd3, hwtx_ring->txd4);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
seq_printf(seq, " %08x %08x %08x %08x",
hwtx_ring->txd5, hwtx_ring->txd6,
hwtx_ring->txd7, hwtx_ring->txd8);
}
*(int *)&hwtx_ring[i].txd1, *(int *)&hwtx_ring[i].txd2,
*(int *)&hwtx_ring[i].txd3, *(int *)&hwtx_ring[i].txd4);
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
seq_printf(seq, " %08x %08x %08x %08x",
*(int *)&hwtx_ring[i].txd5, *(int *)&hwtx_ring[i].txd6,
*(int *)&hwtx_ring[i].txd7, *(int *)&hwtx_ring[i].txd8);
#endif
seq_printf(seq, "\n");
}
kfree(hwtx_ring);
return 0;
}
@@ -832,30 +728,36 @@ static const struct file_operations hwtx_ring_fops = {
int rx_ring_read(struct seq_file *seq, void *v)
{
struct mtk_eth *eth = g_eth;
struct mtk_rx_ring *ring = &g_eth->rx_ring[0];
struct mtk_rx_dma_v2 *rx_ring;
struct mtk_rx_dma *rx_ring;
int i = 0;
rx_ring =
kmalloc(sizeof(struct mtk_rx_dma) * MTK_DMA_SIZE, GFP_KERNEL);
if (!rx_ring) {
seq_puts(seq, " allocate temp rx_ring fail.\n");
return 0;
}
for (i = 0; i < MTK_DMA_SIZE; i++)
rx_ring[i] = ring->dma[i];
seq_printf(seq, "next to read: %d\n",
NEXT_DESP_IDX(ring->calc_idx, MTK_DMA_SIZE));
for (i = 0; i < MTK_DMA_SIZE; i++) {
rx_ring = ring->dma + i * eth->soc->txrx.rxd_size;
seq_printf(seq, "%d: %08x %08x %08x %08x", i,
rx_ring->rxd1, rx_ring->rxd2,
rx_ring->rxd3, rx_ring->rxd4);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
seq_printf(seq, " %08x %08x %08x %08x",
rx_ring->rxd5, rx_ring->rxd6,
rx_ring->rxd7, rx_ring->rxd8);
}
*(int *)&rx_ring[i].rxd1, *(int *)&rx_ring[i].rxd2,
*(int *)&rx_ring[i].rxd3, *(int *)&rx_ring[i].rxd4);
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
seq_printf(seq, " %08x %08x %08x %08x",
*(int *)&rx_ring[i].rxd5, *(int *)&rx_ring[i].rxd6,
*(int *)&rx_ring[i].rxd7, *(int *)&rx_ring[i].rxd8);
#endif
seq_printf(seq, "\n");
}
kfree(rx_ring);
return 0;
}
@@ -892,8 +794,7 @@ int dbg_regs_read(struct seq_file *seq, void *v)
seq_printf(seq, "| FE_INT_STA : %08x |\n",
mtk_r32(eth, MTK_FE_INT_STATUS));
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
seq_printf(seq, "| FE_INT_STA2 : %08x |\n",
mtk_r32(eth, MTK_FE_INT_STATUS2));
@@ -904,20 +805,13 @@ int dbg_regs_read(struct seq_file *seq, void *v)
seq_printf(seq, "| PSE_IQ_STA2 : %08x |\n",
mtk_r32(eth, MTK_PSE_IQ_STA(1)));
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
seq_printf(seq, "| PSE_IQ_STA3 : %08x |\n",
mtk_r32(eth, MTK_PSE_IQ_STA(2)));
seq_printf(seq, "| PSE_IQ_STA4 : %08x |\n",
mtk_r32(eth, MTK_PSE_IQ_STA(3)));
seq_printf(seq, "| PSE_IQ_STA5 : %08x |\n",
mtk_r32(eth, MTK_PSE_IQ_STA(4)));
seq_printf(seq, "| PSE_IQ_STA6 : %08x |\n",
mtk_r32(eth, MTK_PSE_IQ_STA(5)));
seq_printf(seq, "| PSE_IQ_STA7 : %08x |\n",
mtk_r32(eth, MTK_PSE_IQ_STA(6)));
seq_printf(seq, "| PSE_IQ_STA8 : %08x |\n",
mtk_r32(eth, MTK_PSE_IQ_STA(7)));
}
seq_printf(seq, "| PSE_OQ_STA1 : %08x |\n",
@@ -925,20 +819,13 @@ int dbg_regs_read(struct seq_file *seq, void *v)
seq_printf(seq, "| PSE_OQ_STA2 : %08x |\n",
mtk_r32(eth, MTK_PSE_OQ_STA(1)));
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
seq_printf(seq, "| PSE_OQ_STA3 : %08x |\n",
mtk_r32(eth, MTK_PSE_OQ_STA(2)));
seq_printf(seq, "| PSE_OQ_STA4 : %08x |\n",
mtk_r32(eth, MTK_PSE_OQ_STA(3)));
seq_printf(seq, "| PSE_OQ_STA5 : %08x |\n",
mtk_r32(eth, MTK_PSE_OQ_STA(4)));
seq_printf(seq, "| PSE_OQ_STA6 : %08x |\n",
mtk_r32(eth, MTK_PSE_OQ_STA(5)));
seq_printf(seq, "| PSE_OQ_STA7 : %08x |\n",
mtk_r32(eth, MTK_PSE_OQ_STA(6)));
seq_printf(seq, "| PSE_OQ_STA8 : %08x |\n",
mtk_r32(eth, MTK_PSE_OQ_STA(7)));
}
seq_printf(seq, "| PDMA_CRX_IDX : %08x |\n",
@@ -951,10 +838,6 @@ int dbg_regs_read(struct seq_file *seq, void *v)
mtk_r32(eth, MTK_QTX_DTX_PTR));
seq_printf(seq, "| QDMA_FQ_CNT : %08x |\n",
mtk_r32(eth, MTK_QDMA_FQ_CNT));
seq_printf(seq, "| QDMA_FWD_CNT : %08x |\n",
mtk_r32(eth, MTK_QDMA_FWD_CNT));
seq_printf(seq, "| QDMA_FSM : %08x |\n",
mtk_r32(eth, MTK_QDMA_FSM));
seq_printf(seq, "| FE_PSE_FREE : %08x |\n",
mtk_r32(eth, MTK_FE_PSE_FREE));
seq_printf(seq, "| FE_DROP_FQ : %08x |\n",
@@ -967,29 +850,16 @@ int dbg_regs_read(struct seq_file *seq, void *v)
mtk_r32(eth, MTK_GDMA_FWD_CFG(0)));
seq_printf(seq, "| GDM2_IG_CTRL : %08x |\n",
mtk_r32(eth, MTK_GDMA_FWD_CFG(1)));
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
seq_printf(seq, "| GDM3_IG_CTRL : %08x |\n",
mtk_r32(eth, MTK_GDMA_FWD_CFG(2)));
}
seq_printf(seq, "| MAC_P1_MCR : %08x |\n",
mtk_r32(eth, MTK_MAC_MCR(0)));
seq_printf(seq, "| MAC_P2_MCR : %08x |\n",
mtk_r32(eth, MTK_MAC_MCR(1)));
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
seq_printf(seq, "| MAC_P3_MCR : %08x |\n",
mtk_r32(eth, MTK_MAC_MCR(2)));
}
seq_printf(seq, "| MAC_P1_FSM : %08x |\n",
mtk_r32(eth, MTK_MAC_FSM(0)));
seq_printf(seq, "| MAC_P2_FSM : %08x |\n",
mtk_r32(eth, MTK_MAC_FSM(1)));
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
seq_printf(seq, "| MAC_P3_FSM : %08x |\n",
mtk_r32(eth, MTK_MAC_FSM(2)));
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
seq_printf(seq, "| FE_CDM1_FSM : %08x |\n",
mtk_r32(eth, MTK_FE_CDM1_FSM));
seq_printf(seq, "| FE_CDM2_FSM : %08x |\n",
@@ -998,10 +868,6 @@ int dbg_regs_read(struct seq_file *seq, void *v)
mtk_r32(eth, MTK_FE_CDM3_FSM));
seq_printf(seq, "| FE_CDM4_FSM : %08x |\n",
mtk_r32(eth, MTK_FE_CDM4_FSM));
seq_printf(seq, "| FE_CDM5_FSM : %08x |\n",
mtk_r32(eth, MTK_FE_CDM5_FSM));
seq_printf(seq, "| FE_CDM6_FSM : %08x |\n",
mtk_r32(eth, MTK_FE_CDM6_FSM));
seq_printf(seq, "| FE_GDM1_FSM : %08x |\n",
mtk_r32(eth, MTK_FE_GDM1_FSM));
seq_printf(seq, "| FE_GDM2_FSM : %08x |\n",
@@ -1017,8 +883,7 @@ int dbg_regs_read(struct seq_file *seq, void *v)
}
mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
mtk_w32(eth, 0xffffffff, MTK_FE_INT_STATUS2);
return 0;
@@ -1037,19 +902,17 @@ static const struct file_operations dbg_regs_fops = {
.release = single_release
};
void hw_lro_stats_update(u32 ring_no, struct mtk_rx_dma_v2 *rxd)
void hw_lro_stats_update(u32 ring_no, struct mtk_rx_dma *rxd)
{
struct mtk_eth *eth = g_eth;
u32 idx, agg_cnt, agg_size;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
idx = ring_no - 4;
agg_cnt = RX_DMA_GET_AGG_CNT_V2(rxd->rxd6);
} else {
idx = ring_no - 1;
agg_cnt = RX_DMA_GET_AGG_CNT(rxd->rxd2);
}
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
idx = ring_no - 4;
agg_cnt = RX_DMA_GET_AGG_CNT_V2(rxd->rxd6);
#else
idx = ring_no - 1;
agg_cnt = RX_DMA_GET_AGG_CNT(rxd->rxd2);
#endif
agg_size = RX_DMA_GET_PLEN0(rxd->rxd2);
@@ -1059,19 +922,17 @@ void hw_lro_stats_update(u32 ring_no, struct mtk_rx_dma_v2 *rxd)
hw_lro_tot_agg_cnt[idx] += agg_cnt;
}
void hw_lro_flush_stats_update(u32 ring_no, struct mtk_rx_dma_v2 *rxd)
void hw_lro_flush_stats_update(u32 ring_no, struct mtk_rx_dma *rxd)
{
struct mtk_eth *eth = g_eth;
u32 idx, flush_reason;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
idx = ring_no - 4;
flush_reason = RX_DMA_GET_FLUSH_RSN_V2(rxd->rxd6);
} else {
idx = ring_no - 1;
flush_reason = RX_DMA_GET_REV(rxd->rxd2);
}
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
idx = ring_no - 4;
flush_reason = RX_DMA_GET_FLUSH_RSN_V2(rxd->rxd6);
#else
idx = ring_no - 1;
flush_reason = RX_DMA_GET_REV(rxd->rxd2);
#endif
if ((flush_reason & 0x7) == MTK_HW_LRO_AGG_FLUSH)
hw_lro_agg_flush_cnt[idx]++;
@@ -1312,8 +1173,7 @@ int hw_lro_stats_read_wrapper(struct seq_file *seq, void *v)
{
struct mtk_eth *eth = g_eth;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
hw_lro_stats_read_v2(seq, v);
else
hw_lro_stats_read_v1(seq, v);
@@ -1584,8 +1444,7 @@ int hw_lro_auto_tlb_read(struct seq_file *seq, void *v)
seq_puts(seq, "[4] = hwlro_ring_enable_ctrl\n");
seq_puts(seq, "[5] = hwlro_stats_enable_ctrl\n\n");
if (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V2) ||
MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V3)) {
if (MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V2)) {
for (i = 1; i <= 8; i++)
hw_lro_auto_tlb_dump_v2(seq, i);
} else {
@@ -1621,7 +1480,7 @@ int hw_lro_auto_tlb_read(struct seq_file *seq, void *v)
((reg_op1 >> MTK_LRO_RING_AGE_TIME_L_OFFSET) & 0x3ff);
seq_printf(seq,
"Ring[%d]: MAX_AGG_CNT=%d, AGG_TIME=%d, AGE_TIME=%d, Threshold=%d\n",
(MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V1)) ? i : i+3,
(MTK_HAS_CAPS(g_eth->soc->caps, MTK_NETSYS_V2))? i+3 : i,
agg_cnt, agg_time, age_time, reg_op4);
}

View File

@@ -24,8 +24,6 @@
#define MTK_FE_CDM2_FSM 0x224
#define MTK_FE_CDM3_FSM 0x238
#define MTK_FE_CDM4_FSM 0x298
#define MTK_FE_CDM5_FSM 0x318
#define MTK_FE_CDM6_FSM 0x328
#define MTK_FE_GDM1_FSM 0x228
#define MTK_FE_GDM2_FSM 0x22C
#define MTK_FE_PSE_FREE 0x240
@@ -37,7 +35,7 @@
#define MTK_SGMII_EFUSE 0x11D008C8
#define MTK_WED_RTQM_GLO_CFG 0x15010B00
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
#define MTK_PSE_IQ_STA(x) (0x180 + (x) * 0x4)
#define MTK_PSE_OQ_STA(x) (0x1A0 + (x) * 0x4)
#else
@@ -281,7 +279,7 @@ void debug_proc_exit(void);
int mtketh_debugfs_init(struct mtk_eth *eth);
void mtketh_debugfs_exit(struct mtk_eth *eth);
int mtk_do_priv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
void hw_lro_stats_update(u32 ring_no, struct mtk_rx_dma_v2 *rxd);
void hw_lro_flush_stats_update(u32 ring_no, struct mtk_rx_dma_v2 *rxd);
void hw_lro_stats_update(u32 ring_no, struct mtk_rx_dma *rxd);
void hw_lro_flush_stats_update(u32 ring_no, struct mtk_rx_dma *rxd);
#endif /* MTK_ETH_DBG_H */

View File

@@ -14,11 +14,11 @@
struct mtk_eth_muxc {
const char *name;
u64 cap_bit;
int (*set_path)(struct mtk_eth *eth, u64 path);
int cap_bit;
int (*set_path)(struct mtk_eth *eth, int path);
};
static const char *mtk_eth_path_name(u64 path)
static const char *mtk_eth_path_name(int path)
{
switch (path) {
case MTK_ETH_PATH_GMAC1_RGMII:
@@ -33,22 +33,14 @@ static const char *mtk_eth_path_name(u64 path)
return "gmac2_sgmii";
case MTK_ETH_PATH_GMAC2_GEPHY:
return "gmac2_gephy";
case MTK_ETH_PATH_GMAC3_SGMII:
return "gmac3_sgmii";
case MTK_ETH_PATH_GDM1_ESW:
return "gdm1_esw";
case MTK_ETH_PATH_GMAC1_USXGMII:
return "gmac1_usxgmii";
case MTK_ETH_PATH_GMAC2_USXGMII:
return "gmac2_usxgmii";
case MTK_ETH_PATH_GMAC3_USXGMII:
return "gmac3_usxgmii";
default:
return "unknown path";
}
}
static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, u64 path)
static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
{
bool updated = true;
u32 val, mask, set;
@@ -79,7 +71,7 @@ static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, u64 path)
return 0;
}
static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, u64 path)
static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path)
{
unsigned int val = 0;
bool updated = true;
@@ -102,7 +94,7 @@ static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, u64 path)
return 0;
}
static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, u64 path)
static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
{
unsigned int val = 0,mask=0,reg=0;
bool updated = true;
@@ -133,7 +125,7 @@ static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, u64 path)
return 0;
}
static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, u64 path)
static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
{
unsigned int val = 0;
bool updated = true;
@@ -175,70 +167,7 @@ static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, u64 path)
return 0;
}
static int set_mux_gmac123_to_usxgmii(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0;
bool updated = true;
int mac_id = 0, id = 0;
dev_dbg(eth->dev, "path %s in %s updated = %d\n",
mtk_eth_path_name(path), __func__, updated);
/* Disable SYSCFG1 SGMII */
regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
switch (path) {
case MTK_ETH_PATH_GMAC1_USXGMII:
val &= ~(u32)SYSCFG0_SGMII_GMAC1_V2;
mac_id = MTK_GMAC1_ID;
break;
case MTK_ETH_PATH_GMAC2_USXGMII:
val &= ~(u32)SYSCFG0_SGMII_GMAC2_V2;
mac_id = MTK_GMAC2_ID;
break;
case MTK_ETH_PATH_GMAC3_USXGMII:
val &= ~(u32)SYSCFG0_SGMII_GMAC3_V2;
mac_id = MTK_GMAC3_ID;
break;
default:
updated = false;
};
if (updated) {
regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
SYSCFG0_SGMII_MASK, val);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3) &&
mac_id == MTK_GMAC2_ID) {
id = mtk_mac2xgmii_id(eth, mac_id);
if (MTK_HAS_FLAGS(eth->xgmii->flags[id],
MTK_USXGMII_INT_2500)) {
val = mtk_r32(eth, MTK_XGMAC_STS(mac_id));
mtk_w32(eth,
val | (MTK_XGMAC_FORCE_LINK << 16),
MTK_XGMAC_STS(mac_id));
} else {
regmap_update_bits(eth->infra,
TOP_MISC_NETSYS_PCS_MUX,
NETSYS_PCS_MUX_MASK,
MUX_G2_USXGMII_SEL);
}
}
}
/* Enable XGDM Path */
val = mtk_r32(eth, MTK_GDMA_EG_CTRL(mac_id));
val |= MTK_GDMA_XGDM_SEL;
mtk_w32(eth, val, MTK_GDMA_EG_CTRL(mac_id));
dev_dbg(eth->dev, "path %s in %s updated = %d\n",
mtk_eth_path_name(path), __func__, updated);
return 0;
}
static int set_mux_gmac123_to_gephy_sgmii(struct mtk_eth *eth, u64 path)
static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path)
{
unsigned int val = 0;
bool updated = true;
@@ -257,9 +186,6 @@ static int set_mux_gmac123_to_gephy_sgmii(struct mtk_eth *eth, u64 path)
case MTK_ETH_PATH_GMAC2_SGMII:
val |= SYSCFG0_SGMII_GMAC2_V2;
break;
case MTK_ETH_PATH_GMAC3_SGMII:
val |= SYSCFG0_SGMII_GMAC3_V2;
break;
default:
updated = false;
};
@@ -296,19 +222,11 @@ static const struct mtk_eth_muxc mtk_eth_muxc[] = {
}, {
.name = "mux_gmac12_to_gephy_sgmii",
.cap_bit = MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII,
.set_path = set_mux_gmac123_to_gephy_sgmii,
}, {
.name = "mux_gmac123_to_gephy_sgmii",
.cap_bit = MTK_ETH_MUX_GMAC123_TO_GEPHY_SGMII,
.set_path = set_mux_gmac123_to_gephy_sgmii,
}, {
.name = "mux_gmac123_to_usxgmii",
.cap_bit = MTK_ETH_MUX_GMAC123_TO_USXGMII,
.set_path = set_mux_gmac123_to_usxgmii,
.set_path = set_mux_gmac12_to_gephy_sgmii,
},
};
static int mtk_eth_mux_setup(struct mtk_eth *eth, u64 path)
static int mtk_eth_mux_setup(struct mtk_eth *eth, int path)
{
int i, err = 0;
@@ -337,34 +255,12 @@ out:
return err;
}
int mtk_gmac_usxgmii_path_setup(struct mtk_eth *eth, int mac_id)
{
int err;
u64 path;
path = (mac_id == MTK_GMAC1_ID) ? MTK_ETH_PATH_GMAC1_USXGMII :
(mac_id == MTK_GMAC2_ID) ? MTK_ETH_PATH_GMAC2_USXGMII :
MTK_ETH_PATH_GMAC3_USXGMII;
dev_err(eth->dev, "%s path %s in\n", __func__,
mtk_eth_path_name(path));
/* Setup proper MUXes along the path */
err = mtk_eth_mux_setup(eth, path);
if (err)
return err;
return 0;
}
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
{
int err;
u64 path;
int err, path;
path = (mac_id == MTK_GMAC1_ID) ? MTK_ETH_PATH_GMAC1_SGMII :
(mac_id == MTK_GMAC2_ID) ? MTK_ETH_PATH_GMAC2_SGMII :
MTK_ETH_PATH_GMAC3_SGMII;
path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII :
MTK_ETH_PATH_GMAC2_SGMII;
/* Setup proper MUXes along the path */
err = mtk_eth_mux_setup(eth, path);
@@ -376,8 +272,7 @@ int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
{
int err;
u64 path = 0;
int err, path = 0;
if (mac_id == 1)
path = MTK_ETH_PATH_GMAC2_GEPHY;
@@ -395,8 +290,7 @@ int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
{
int err;
u64 path;
int err, path;
path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_RGMII :
MTK_ETH_PATH_GMAC2_RGMII;

View File

@@ -33,8 +33,7 @@ void mtk_reset_event_update(struct mtk_eth *eth, u32 id)
int mtk_eth_cold_reset(struct mtk_eth *eth)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
@@ -42,8 +41,7 @@ int mtk_eth_cold_reset(struct mtk_eth *eth)
else
ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | RSTCTRL_PPE0);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0x3ffffff);
return 0;
@@ -179,17 +177,10 @@ static void mtk_dump_reg(void *_eth, char *name, u32 offset, u32 range)
void mtk_dump_netsys_info(void *_eth)
{
struct mtk_eth *eth = _eth;
u32 id = 0;
mtk_dump_reg(eth, "FE", 0x0, 0x500);
mtk_dump_reg(eth, "ADMA", PDMA_BASE, 0x300);
for (id = 0; id < MTK_QDMA_PAGE_NUM; id++){
mtk_w32(eth, id, MTK_QDMA_PAGE);
pr_info("\nQDMA PAGE:%x ",mtk_r32(eth, MTK_QDMA_PAGE));
mtk_dump_reg(eth, "QDMA", QDMA_BASE, 0x100);
mtk_w32(eth, 0, MTK_QDMA_PAGE);
}
mtk_dump_reg(eth, "QDMA", MTK_QRX_BASE_PTR0, 0x300);
mtk_dump_reg(eth, "QDMA", QDMA_BASE, 0x400);
mtk_dump_reg(eth, "WDMA", WDMA_BASE(0), 0x600);
mtk_dump_reg(eth, "PPE", 0x2200, 0x200);
mtk_dump_reg(eth, "GMAC", 0x10000, 0x300);
@@ -201,9 +192,6 @@ void mtk_dma_monitor(struct timer_list *t)
static u32 timestamp = 0;
static u32 err_cnt1 = 0, err_cnt2 = 0, err_cnt3 = 0;
static u32 prev_wdidx = 0;
unsigned int mib_base = MTK_GDM1_TX_GBCNT;
/*wdma tx path*/
u32 cur_wdidx = mtk_r32(eth, MTK_WDMA_DTX_PTR(0));
u32 is_wtx_busy = mtk_r32(eth, MTK_WDMA_GLO_CFG(0)) & MTK_TX_DMA_BUSY;
u32 is_oq_free = ((mtk_r32(eth, MTK_PSE_OQ_STA(0)) & 0x01FF0000) == 0) &&
@@ -211,28 +199,21 @@ void mtk_dma_monitor(struct timer_list *t)
((mtk_r32(eth, MTK_PSE_OQ_STA(4)) & 0x01FF0000) == 0);
u32 is_cdm_full =
!(mtk_r32(eth, MTK_WDMA_TX_DBG_MON0(0)) & MTK_CDM_TXFIFO_RDY);
/*qdma tx path*/
u32 is_qfsm_hang = mtk_r32(eth, MTK_QDMA_FSM) != 0;
u32 is_qfwd_hang = mtk_r32(eth, MTK_QDMA_FWD_CNT) == 0;
u32 is_qfq_hang = mtk_r32(eth, MTK_QDMA_FQ_CNT) !=
((MTK_DMA_SIZE << 16) | MTK_DMA_SIZE);
u32 is_gdm1_tx = (mtk_r32(eth, MTK_FE_GDM1_FSM) & 0xFFFF0000) > 0;
u32 is_gdm2_tx = (mtk_r32(eth, MTK_FE_GDM2_FSM) & 0xFFFF0000) > 0;
u32 is_gmac1_tx = (mtk_r32(eth, MTK_MAC_FSM(0)) & 0xFF000000) != 0x1000000;
u32 is_gmac2_tx = (mtk_r32(eth, MTK_MAC_FSM(1)) & 0xFF000000) != 0x1000000;
u32 gdm1_fc = mtk_r32(eth, mib_base+0x24);
u32 gdm2_fc = mtk_r32(eth, mib_base+0x64);
/*adma rx path*/
u32 is_oq0_stuck = (mtk_r32(eth, MTK_PSE_OQ_STA(0)) & 0x1FF) != 0;
u32 is_cdm1_busy = (mtk_r32(eth, MTK_FE_CDM1_FSM) & 0xFFFF0000) != 0;
u32 is_adma_busy = ((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x1F) == 0) &&
((mtk_r32(eth, MTK_ADMA_RX_DBG1) & 0x3F0000) == 0) &&
((mtk_r32(eth, MTK_ADMA_RX_DBG0) & 0x40) == 0);
if (cur_wdidx == prev_wdidx && is_wtx_busy &&
is_oq_free && is_cdm_full) {
err_cnt1++;
if (err_cnt1 >= 3) {
pr_info("WDMA CDM Info\n");
if (err_cnt1 == 3) {
pr_info("WDMA CDM Hang !\n");
pr_info("============== Time: %d ================\n",
timestamp);
pr_info("err_cnt1 = %d", err_cnt1);
@@ -262,11 +243,10 @@ void mtk_dma_monitor(struct timer_list *t)
schedule_work(&eth->pending_work);
}
}
} else if (is_qfsm_hang && is_qfwd_hang &&
((is_gdm1_tx && is_gmac1_tx && (gdm1_fc < 1)) || (is_gdm2_tx && is_gmac2_tx && (gdm2_fc < 1)))) {
} else if (is_qfsm_hang && is_qfwd_hang) {
err_cnt2++;
if (err_cnt2 >= 3) {
pr_info("QDMA Tx Info\n");
if (err_cnt2 == 3) {
pr_info("QDMA Tx Hang !\n");
pr_info("============== Time: %d ================\n",
timestamp);
pr_info("err_cnt2 = %d", err_cnt2);
@@ -280,8 +260,6 @@ void mtk_dma_monitor(struct timer_list *t)
mtk_r32(eth, MTK_QDMA_FWD_CNT));
pr_info("MTK_QDMA_FQ_CNT = 0x%x\n",
mtk_r32(eth, MTK_QDMA_FQ_CNT));
pr_info("GDM1 FC = 0x%x\n",gdm1_fc);
pr_info("GDM2 FC = 0x%x\n",gdm2_fc);
pr_info("==============================\n");
if ((atomic_read(&reset_lock) == 0) &&
@@ -292,8 +270,8 @@ void mtk_dma_monitor(struct timer_list *t)
}
} else if (is_oq0_stuck && is_cdm1_busy && is_adma_busy) {
err_cnt3++;
if (err_cnt3 >= 3) {
pr_info("ADMA Rx Info\n");
if (err_cnt3 == 3) {
pr_info("ADMA Rx Hang !\n");
pr_info("============== Time: %d ================\n",
timestamp);
pr_info("err_cnt3 = %d", err_cnt3);
@@ -314,7 +292,7 @@ void mtk_dma_monitor(struct timer_list *t)
schedule_work(&eth->pending_work);
}
}
}else {
} else {
err_cnt1 = 0;
err_cnt2 = 0;
err_cnt3 = 0;
@@ -346,12 +324,12 @@ void mtk_prepare_reset_fe(struct mtk_eth *eth)
/* Power down sgmii */
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->xgmii->regmap_sgmii[i])
if (!eth->sgmii->regmap[i])
continue;
regmap_read(eth->xgmii->regmap_sgmii[i], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
regmap_read(eth->sgmii->regmap[i], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
val |= SGMII_PHYA_PWD;
regmap_write(eth->xgmii->regmap_sgmii[i], SGMSYS_QPHY_PWR_STATE_CTRL, val);
regmap_write(eth->sgmii->regmap[i], SGMSYS_QPHY_PWR_STATE_CTRL, val);
}
/* Force link down GMAC */

File diff suppressed because it is too large Load Diff

View File

@@ -20,13 +20,7 @@
#define MTK_MAX_RX_LENGTH 1536
#define MTK_DMA_SIZE 2048
#define MTK_NAPI_WEIGHT 256
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
#define MTK_MAC_COUNT 3
#else
#define MTK_MAC_COUNT 2
#endif
#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
#define MTK_DMA_DUMMY_DESC 0xffffffff
@@ -106,36 +100,23 @@
/* CDMP Exgress Control Register */
#define MTK_CDMP_EG_CTRL 0x404
/* GDM Ingress Control Register */
#define MTK_GDMA_FWD_CFG(x) ((x == MTK_GMAC3_ID) ? \
0x540 : 0x500 + (x * 0x1000))
/* GDM Exgress Control Register */
#define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000))
#define MTK_GDMA_SPECIAL_TAG BIT(24)
#define MTK_GDMA_ICS_EN BIT(22)
#define MTK_GDMA_TCS_EN BIT(21)
#define MTK_GDMA_UCS_EN BIT(20)
#define MTK_GDMA_STRP_CRC BIT(16)
#define MTK_GDMA_TO_PDMA 0x0
#define MTK_GDMA_DROP_ALL 0x7777
/* GDM Egress Control Register */
#define MTK_GDMA_EG_CTRL(x) ((x == MTK_GMAC3_ID) ? \
0x544 : 0x504 + (x * 0x1000))
#define MTK_GDMA_XGDM_SEL BIT(31)
/* Unicast Filter MAC Address Register - Low */
#define MTK_GDMA_MAC_ADRL(x) ((x == MTK_GMAC3_ID) ? \
0x548 : 0x508 + (x * 0x1000))
#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000))
/* Unicast Filter MAC Address Register - High */
#define MTK_GDMA_MAC_ADRH(x) ((x == MTK_GMAC3_ID) ? \
0x54C : 0x50C + (x * 0x1000))
#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
/* Internal SRAM offset */
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
#define MTK_ETH_SRAM_OFFSET 0x300000
#else
#define MTK_ETH_SRAM_OFFSET 0x40000
#endif
/* FE global misc reg*/
#define MTK_FE_GLO_MISC 0x124
@@ -143,13 +124,7 @@
/* PSE Free Queue Flow Control */
#define PSE_FQFC_CFG1 0x100
#define PSE_FQFC_CFG2 0x104
#define PSE_NO_DROP_CFG 0x108
#define PSE_PPE0_DROP 0x110
/* PSE Last FreeQ Page Request Control */
#define PSE_DUMY_REQ 0x10C
#define PSE_DUMMY_WORK_GDM(x) BIT(16 + (x))
#define DUMMY_PAGE_THR 0x151
#define PSE_DROP_CFG 0x108
/* PSE Input Queue Reservation Register*/
#define PSE_IQ_REV(x) (0x140 + ((x - 1) * 0x4))
@@ -167,12 +142,7 @@
#define MTK_PDMA_V2 BIT(4)
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
#define PDMA_BASE 0x6800
#define QDMA_BASE 0x4400
#define WDMA_BASE(x) (0x4800 + ((x) * 0x400))
#define PPE_BASE(x) ((x == 2) ? 0x2C00 : 0x2200 + ((x) * 0x400))
#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
#define PDMA_BASE 0x6000
#define QDMA_BASE 0x4400
#define WDMA_BASE(x) (0x4800 + ((x) * 0x400))
@@ -201,7 +171,7 @@
/* PDMA HW LRO Control Registers */
#define BITS(m, n) (~(BIT(m) - 1) & ((BIT(n) - 1) | BIT(n)))
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
#define MTK_MAX_RX_RING_NUM (8)
#define MTK_HW_LRO_RING_NUM (4)
#define IS_HW_LRO_RING(ring_no) (((ring_no) > 3) && ((ring_no) < 8))
@@ -245,7 +215,7 @@
#define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
/* PDMA RSS Control Registers */
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
#define MTK_PDMA_RSS_GLO_CFG (PDMA_BASE + 0x800)
#define MTK_RX_NAPI_NUM (2)
#define MTK_MAX_IRQ_NUM (4)
@@ -287,7 +257,6 @@
/* PDMA Delay Interrupt Register */
#define MTK_PDMA_DELAY_INT (PDMA_BASE + 0x20c)
#define MTK_PDMA_RSS_DELAY_INT (PDMA_BASE + 0x2c0)
#define MTK_PDMA_DELAY_RX_EN BIT(15)
#define MTK_PDMA_DELAY_RX_PINT 4
#define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
@@ -305,7 +274,7 @@
/* PDMA Interrupt grouping registers */
#define MTK_PDMA_INT_GRP1 (PDMA_BASE + 0x250)
#define MTK_PDMA_INT_GRP2 (PDMA_BASE + 0x254)
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
#define MTK_PDMA_INT_GRP3 (PDMA_BASE + 0x258)
#else
#define MTK_PDMA_INT_GRP3 (PDMA_BASE + 0x22c)
@@ -314,7 +283,7 @@
#define MTK_MAX_DELAY_INT 0x8f0f8f0f
/* PDMA HW LRO IP Setting Registers */
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
#define MTK_LRO_RX_RING0_DIP_DW0 (PDMA_BASE + 0x414)
#else
#define MTK_LRO_RX_RING0_DIP_DW0 (PDMA_BASE + 0x304)
@@ -393,9 +362,6 @@
/* QDMA RX DMA Pointer Register */
#define MTK_QRX_DRX_IDX0 (QDMA_BASE + 0x10c)
/* QDMA Page Configuration Register */
#define MTK_QDMA_PAGE (QDMA_BASE + 0x1f0)
/* QDMA Global Configuration Register */
#define MTK_QDMA_GLO_CFG (QDMA_BASE + 0x204)
#define MTK_RX_2B_OFFSET BIT(31)
@@ -433,10 +399,9 @@
/* QDMA Interrupt Status Register */
#define MTK_QDMA_INT_STATUS (QDMA_BASE + 0x218)
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
#define MTK_RX_DONE_INT(ring_no) \
(MTK_HAS_CAPS(eth->soc->caps, MTK_RSS) ? (BIT(24 + (ring_no))) : \
((ring_no) ? BIT(16 + (ring_no)) : BIT(14)))
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
#define MTK_RX_DONE_INT(ring_no) \
((ring_no)? BIT(16 + (ring_no)) : BIT(14))
#else
#define MTK_RX_DONE_INT(ring_no) \
((ring_no)? BIT(24 + (ring_no)) : BIT(30))
@@ -500,21 +465,15 @@
#define MTK_CDM_TXFIFO_RDY BIT(7)
/* GMA1 Received Good Byte Count Register */
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
#define MTK_GDM1_TX_GBCNT 0x1C00
#else
#define MTK_GDM1_TX_GBCNT 0x2400
#endif
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
#define MTK_STAT_OFFSET 0x80
#else
#define MTK_STAT_OFFSET 0x40
#endif
/* QDMA TX NUM */
#define MTK_QDMA_TX_NUM 16
#define MTK_QDMA_PAGE_NUM 8
#define MTK_QDMA_TX_MASK ((MTK_QDMA_TX_NUM) - 1)
#define QID_LOW_BITS(x) ((x) & 0xf)
#define QID_HIGH_BITS(x) ((((x) >> 4) & 0x3) << 20)
@@ -528,22 +487,31 @@
/* QDMA V2 descriptor txd5 */
#define TX_DMA_CHKSUM_V2 (0x7 << 28)
#define TX_DMA_TSO_V2 BIT(31)
#define TX_DMA_SPTAG_V3 BIT(27)
/* QDMA V2 descriptor txd4 */
#define TX_DMA_FPORT_SHIFT_V2 8
#define TX_DMA_FPORT_MASK_V2 0xf
#define TX_DMA_SWC_V2 BIT(30)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
#define MTK_TX_DMA_BUF_LEN 0xffff
#define MTK_TX_DMA_BUF_SHIFT 8
#else
#define MTK_TX_DMA_BUF_LEN 0x3fff
#define MTK_TX_DMA_BUF_LEN_V2 0xffff
#define MTK_TX_DMA_BUF_SHIFT 16
#define MTK_TX_DMA_BUF_SHIFT_V2 8
#endif
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
#define MTK_RX_DMA_BUF_LEN 0xffff
#define MTK_RX_DMA_BUF_SHIFT 8
#define RX_DMA_SPORT_SHIFT 26
#define RX_DMA_SPORT_MASK 0xf
#else
#define MTK_RX_DMA_BUF_LEN 0x3fff
#define MTK_RX_DMA_BUF_SHIFT 16
#define RX_DMA_SPORT_SHIFT 19
#define RX_DMA_SPORT_SHIFT_V2 26
#define RX_DMA_SPORT_MASK 0x7
#define RX_DMA_SPORT_MASK_V2 0xf
#endif
/* QDMA descriptor txd4 */
#define TX_DMA_CHKSUM (0x7 << 29)
@@ -555,10 +523,10 @@
/* QDMA descriptor txd3 */
#define TX_DMA_OWNER_CPU BIT(31)
#define TX_DMA_LS0 BIT(30)
#define TX_DMA_PLEN0(_x) (((_x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
#define TX_DMA_PLEN1(_x) ((_x) & eth->soc->txrx.dma_max_len)
#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << MTK_TX_DMA_BUF_SHIFT)
#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN)
#define TX_DMA_SWC BIT(14)
#define TX_DMA_SDP1(_x) ((((u64)(_x)) >> 32) & 0xf)
#define TX_DMA_SDL(_x) (TX_DMA_PLEN0(_x))
/* PDMA on MT7628 */
#define TX_DMA_DONE BIT(31)
@@ -568,12 +536,11 @@
/* QDMA descriptor rxd2 */
#define RX_DMA_DONE BIT(31)
#define RX_DMA_LSO BIT(30)
#define RX_DMA_PLEN0(_x) (((_x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
#define RX_DMA_GET_PLEN0(_x) (((_x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
#define RX_DMA_PLEN0(_x) (((_x) & MTK_RX_DMA_BUF_LEN) << MTK_RX_DMA_BUF_SHIFT)
#define RX_DMA_GET_PLEN0(_x) (((_x) >> MTK_RX_DMA_BUF_SHIFT) & MTK_RX_DMA_BUF_LEN)
#define RX_DMA_GET_AGG_CNT(_x) (((_x) >> 2) & 0xff)
#define RX_DMA_GET_REV(_x) (((_x) >> 10) & 0x1f)
#define RX_DMA_VTAG BIT(15)
#define RX_DMA_SDP1(_x) ((((u64)(_x)) >> 32) & 0xf)
/* QDMA descriptor rxd3 */
#define RX_DMA_VID(_x) ((_x) & VLAN_VID_MASK)
@@ -586,7 +553,6 @@
#define RX_DMA_SPECIAL_TAG BIT(22) /* switch header in packet */
#define RX_DMA_GET_SPORT(_x) (((_x) >> RX_DMA_SPORT_SHIFT) & RX_DMA_SPORT_MASK)
#define RX_DMA_GET_SPORT_V2(_x) (((_x) >> RX_DMA_SPORT_SHIFT_V2) & RX_DMA_SPORT_MASK_V2)
/* PDMA V2 descriptor rxd3 */
#define RX_DMA_VTAG_V2 BIT(0)
@@ -600,7 +566,6 @@
/* PDMA V2 descriptor rxd6 */
#define RX_DMA_GET_FLUSH_RSN_V2(_x) ((_x) & 0x7)
#define RX_DMA_GET_AGG_CNT_V2(_x) (((_x) >> 16) & 0xff)
#define RX_DMA_GET_TOPS_CRSN(_x) (((_x) >> 24) & 0xff)
/* PHY Indirect Access Control registers */
#define MTK_PHY_IAC 0x10004
@@ -618,27 +583,10 @@
#define MTK_MAC_MISC 0x1000c
#define MTK_MUX_TO_ESW BIT(0)
/* XMAC status registers */
#define MTK_XGMAC_STS(x) ((x == MTK_GMAC3_ID) ? 0x1001C : 0x1000C)
#define MTK_XGMAC_FORCE_LINK BIT(15)
#define MTK_USXGMII_PCS_LINK BIT(8)
#define MTK_XGMAC_RX_FC BIT(5)
#define MTK_XGMAC_TX_FC BIT(4)
#define MTK_USXGMII_PCS_MODE GENMASK(3, 1)
#define MTK_XGMAC_LINK_STS BIT(0)
/* GSW bridge registers */
#define MTK_GSW_CFG (0x10080)
#define GSWTX_IPG_MASK GENMASK(19, 16)
#define GSWTX_IPG_SHIFT 16
#define GSWRX_IPG_MASK GENMASK(3, 0)
#define GSWRX_IPG_SHIFT 0
#define GSW_IPG_11 11
/* Mac control registers */
#define MTK_MAC_MCR(x) (0x10100 + (x * 0x100))
#define MAC_MCR_MAX_RX_1536 BIT(24)
#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16) | BIT(12))
#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
#define MAC_MCR_FORCE_MODE BIT(15)
#define MAC_MCR_TX_EN BIT(14)
#define MAC_MCR_RX_EN BIT(13)
@@ -652,12 +600,6 @@
#define MAC_MCR_FORCE_LINK BIT(0)
#define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE)
/* XFI Mac control registers */
#define MTK_XMAC_MCR(x) (0x12000 + ((x - 1) * 0x1000))
#define XMAC_MCR_TRX_DISABLE 0xf
#define XMAC_MCR_FORCE_TX_FC BIT(5)
#define XMAC_MCR_FORCE_RX_FC BIT(4)
/* Mac status registers */
#define MTK_MAC_MSR(x) (0x10108 + (x * 0x100))
#define MAC_MSR_EEE1G BIT(7)
@@ -722,12 +664,11 @@
#define ETHSYS_SYSCFG0 0x14
#define SYSCFG0_GE_MASK 0x3
#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2)))
#define SYSCFG0_SGMII_MASK GENMASK(9, 7)
#define SYSCFG0_SGMII_MASK GENMASK(9, 8)
#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & SYSCFG0_SGMII_MASK)
#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK)
#define SYSCFG0_SGMII_GMAC1_V2 BIT(9)
#define SYSCFG0_SGMII_GMAC2_V2 BIT(8)
#define SYSCFG0_SGMII_GMAC3_V2 BIT(7)
/* ethernet subsystem clock register */
@@ -765,15 +706,6 @@
#define SGMII_PCS_FAULT BIT(23)
#define SGMII_AN_EXPANSION_CLR BIT(30)
/* Register to set SGMII speed */
#define SGMII_PCS_SPEED_ABILITY 0x08
#define SGMII_PCS_SPEED_MASK GENMASK(11, 10)
#define SGMII_PCS_SPEED_10 0
#define SGMII_PCS_SPEED_100 1
#define SGMII_PCS_SPEED_1000 2
#define SGMII_PCS_SPEED_DUPLEX BIT(12)
#define SGMII_PCS_SPEED_LINK BIT(15)
/* Register to programmable link timer, the unit in 2 * 8ns */
#define SGMSYS_PCS_LINK_TIMER 0x18
#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & GENMASK(19, 0))
@@ -782,7 +714,6 @@
#define SGMSYS_SGMII_MODE 0x20
#define SGMII_IF_MODE_BIT0 BIT(0)
#define SGMII_SPEED_DUPLEX_AN BIT(1)
#define SGMII_SPEED_MASK GENMASK(3, 2)
#define SGMII_SPEED_10 0x0
#define SGMII_SPEED_100 BIT(2)
#define SGMII_SPEED_1000 BIT(3)
@@ -794,10 +725,6 @@
#define SGMII_SEND_AN_ERROR_EN BIT(11)
#define SGMII_IF_MODE_MASK GENMASK(5, 1)
/* Register to reset SGMII design */
#define SGMII_RESERVED_0 0x34
#define SGMII_SW_RESET BIT(0)
/* Register to set SGMII speed, ANA RG_ Control Signals III*/
#define SGMSYS_ANA_RG_CS3 0x2028
#define RG_PHY_SPEED_MASK (BIT(2) | BIT(3))
@@ -813,49 +740,12 @@
#define SGMII_PN_SWAP_MASK GENMASK(1, 0)
#define SGMII_PN_SWAP_TX_RX (BIT(0) | BIT(1))
/* USXGMII subsystem config registers */
/* Register to control speed */
#define RG_PHY_TOP_SPEED_CTRL1 0x80C
#define RG_USXGMII_RATE_UPDATE_MODE BIT(31)
#define RG_MAC_CK_GATED BIT(29)
#define RG_IF_FORCE_EN BIT(28)
#define RG_RATE_ADAPT_MODE GENMASK(10, 8)
#define RG_RATE_ADAPT_MODE_X1 0
#define RG_RATE_ADAPT_MODE_X2 1
#define RG_RATE_ADAPT_MODE_X4 2
#define RG_RATE_ADAPT_MODE_X10 3
#define RG_RATE_ADAPT_MODE_X100 4
#define RG_RATE_ADAPT_MODE_X5 5
#define RG_RATE_ADAPT_MODE_X50 6
#define RG_XFI_RX_MODE GENMASK(6, 4)
#define RG_XFI_RX_MODE_10G 0
#define RG_XFI_RX_MODE_5G 1
#define RG_XFI_TX_MODE GENMASK(2, 0)
#define RG_XFI_TX_MODE_10G 0
#define RG_XFI_TX_MODE_5G 1
/* Register to control PCS AN */
#define RG_PCS_AN_CTRL0 0x810
#define RG_AN_ENABLE BIT(0)
/* Register to control USXGMII XFI PLL digital */
#define XFI_PLL_DIG_GLB8 0x08
#define RG_XFI_PLL_EN BIT(31)
/* Register to control USXGMII XFI PLL analog */
#define XFI_PLL_ANA_GLB8 0x108
#define RG_XFI_PLL_ANA_SWWA 0x02283248
/* Infrasys subsystem config registers */
#define INFRA_MISC2 0x70c
#define CO_QPHY_SEL BIT(0)
#define GEPHY_MAC_SEL BIT(1)
/* Top misc registers */
#define TOP_MISC_NETSYS_PCS_MUX 0x84
#define NETSYS_PCS_MUX_MASK GENMASK(1, 0)
#define MUX_G2_USXGMII_SEL BIT(1)
#define MUX_HSGMII1_G1_SEL BIT(0)
#define USB_PHY_SWITCH_REG 0x218
#define QPHY_SEL_MASK GENMASK(1, 0)
#define SGMII_QPHY_SEL 0x2
@@ -883,17 +773,12 @@ struct mtk_rx_dma {
unsigned int rxd2;
unsigned int rxd3;
unsigned int rxd4;
} __packed __aligned(4);
struct mtk_rx_dma_v2 {
unsigned int rxd1;
unsigned int rxd2;
unsigned int rxd3;
unsigned int rxd4;
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
unsigned int rxd5;
unsigned int rxd6;
unsigned int rxd7;
unsigned int rxd8;
#endif
} __packed __aligned(4);
struct mtk_tx_dma {
@@ -901,17 +786,12 @@ struct mtk_tx_dma {
unsigned int txd2;
unsigned int txd3;
unsigned int txd4;
} __packed __aligned(4);
struct mtk_tx_dma_v2 {
unsigned int txd1;
unsigned int txd2;
unsigned int txd3;
unsigned int txd4;
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
unsigned int txd5;
unsigned int txd6;
unsigned int txd7;
unsigned int txd8;
#endif
} __packed __aligned(4);
struct mtk_eth;
@@ -957,7 +837,6 @@ enum mtk_tx_flags {
*/
MTK_TX_FLAGS_FPORT0 = 0x04,
MTK_TX_FLAGS_FPORT1 = 0x08,
MTK_TX_FLAGS_FPORT2 = 0x10,
};
/* This enum allows us to identify how the clock is defined on the array of the
@@ -1037,59 +916,11 @@ enum mtk_clks_map {
BIT(MTK_CLK_SGMII2_RX_250M) | \
BIT(MTK_CLK_SGMII2_CDR_REF) | \
BIT(MTK_CLK_SGMII2_CDR_FB))
#define MT7988_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \
BIT(MTK_CLK_SGMII_TX_250M) | \
BIT(MTK_CLK_SGMII_RX_250M) | \
BIT(MTK_CLK_SGMII_CDR_REF) | \
BIT(MTK_CLK_SGMII_CDR_FB) | \
BIT(MTK_CLK_SGMII2_TX_250M) | \
BIT(MTK_CLK_SGMII2_RX_250M) | \
BIT(MTK_CLK_SGMII2_CDR_REF) | \
BIT(MTK_CLK_SGMII2_CDR_FB))
enum mtk_dev_state {
MTK_HW_INIT,
MTK_RESETTING
};
/* PSE Port Definition */
enum mtk_pse_port {
PSE_ADMA_PORT = 0,
PSE_GDM1_PORT,
PSE_GDM2_PORT,
PSE_PPE0_PORT,
PSE_PPE1_PORT,
PSE_QDMA_TX_PORT,
PSE_QDMA_RX_PORT,
PSE_DROP_PORT,
PSE_WDMA0_PORT,
PSE_WDMA1_PORT,
PSE_TDMA_PORT,
PSE_NONE_PORT,
PSE_PPE2_PORT,
PSE_WDMA2_PORT,
PSE_EIP197_PORT,
PSE_GDM3_PORT,
PSE_PORT_MAX
};
/* GMAC Identifier */
enum mtk_gmac_id {
MTK_GMAC1_ID = 0,
MTK_GMAC2_ID,
MTK_GMAC3_ID,
MTK_GMAC_ID_MAX
};
/* GDM Type */
enum mtk_gdm_type {
MTK_GDM_TYPE = 0,
MTK_XGDM_TYPE,
MTK_GDM_TYPE_MAX
};
/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
* by the TX descriptor s
* @skb: The SKB pointer of the packet being sent
@@ -1119,16 +950,16 @@ struct mtk_tx_buf {
* are present
*/
struct mtk_tx_ring {
void *dma;
struct mtk_tx_dma *dma;
struct mtk_tx_buf *buf;
dma_addr_t phys;
void *next_free;
void *last_free;
struct mtk_tx_dma *next_free;
struct mtk_tx_dma *last_free;
u32 last_free_ptr;
u16 thresh;
atomic_t free_count;
int dma_size;
void *dma_pdma; /* For MT7628/88 PDMA handling */
struct mtk_tx_dma *dma_pdma; /* For MT7628/88 PDMA handling */
dma_addr_t phys_pdma;
int cpu_idx;
};
@@ -1150,7 +981,7 @@ enum mtk_rx_flags {
* @ring_no: The index of ring
*/
struct mtk_rx_ring {
void *dma;
struct mtk_rx_dma *dma;
u8 **data;
dma_addr_t phys;
u16 frag_size;
@@ -1180,7 +1011,6 @@ enum mkt_eth_capabilities {
MTK_RGMII_BIT = 0,
MTK_TRGMII_BIT,
MTK_SGMII_BIT,
MTK_USXGMII_BIT,
MTK_ESW_BIT,
MTK_GEPHY_BIT,
MTK_MUX_BIT,
@@ -1191,13 +1021,10 @@ enum mkt_eth_capabilities {
MTK_SHARED_INT_BIT,
MTK_TRGMII_MT7621_CLK_BIT,
MTK_QDMA_BIT,
MTK_NETSYS_V1_BIT,
MTK_NETSYS_V2_BIT,
MTK_NETSYS_V3_BIT,
MTK_SOC_MT7628_BIT,
MTK_RSTCTRL_PPE1_BIT,
MTK_U3_COPHY_V2_BIT,
MTK_8GB_ADDRESSING_BIT,
/* MUX BITS*/
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
@@ -1205,8 +1032,6 @@ enum mkt_eth_capabilities {
MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT,
MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT,
MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT,
MTK_ETH_MUX_GMAC123_TO_GEPHY_SGMII_BIT,
MTK_ETH_MUX_GMAC123_TO_USXGMII_BIT,
/* PATH BITS */
MTK_ETH_PATH_GMAC1_RGMII_BIT,
@@ -1215,63 +1040,47 @@ enum mkt_eth_capabilities {
MTK_ETH_PATH_GMAC2_RGMII_BIT,
MTK_ETH_PATH_GMAC2_SGMII_BIT,
MTK_ETH_PATH_GMAC2_GEPHY_BIT,
MTK_ETH_PATH_GMAC3_SGMII_BIT,
MTK_ETH_PATH_GDM1_ESW_BIT,
MTK_ETH_PATH_GMAC1_USXGMII_BIT,
MTK_ETH_PATH_GMAC2_USXGMII_BIT,
MTK_ETH_PATH_GMAC3_USXGMII_BIT,
};
/* Supported hardware group on SoCs */
#define MTK_RGMII BIT_ULL(MTK_RGMII_BIT)
#define MTK_TRGMII BIT_ULL(MTK_TRGMII_BIT)
#define MTK_SGMII BIT_ULL(MTK_SGMII_BIT)
#define MTK_USXGMII BIT_ULL(MTK_USXGMII_BIT)
#define MTK_ESW BIT_ULL(MTK_ESW_BIT)
#define MTK_GEPHY BIT_ULL(MTK_GEPHY_BIT)
#define MTK_MUX BIT_ULL(MTK_MUX_BIT)
#define MTK_INFRA BIT_ULL(MTK_INFRA_BIT)
#define MTK_SHARED_SGMII BIT_ULL(MTK_SHARED_SGMII_BIT)
#define MTK_HWLRO BIT_ULL(MTK_HWLRO_BIT)
#define MTK_RSS BIT_ULL(MTK_RSS_BIT)
#define MTK_SHARED_INT BIT_ULL(MTK_SHARED_INT_BIT)
#define MTK_TRGMII_MT7621_CLK BIT_ULL(MTK_TRGMII_MT7621_CLK_BIT)
#define MTK_QDMA BIT_ULL(MTK_QDMA_BIT)
#define MTK_NETSYS_V1 BIT_ULL(MTK_NETSYS_V1_BIT)
#define MTK_NETSYS_V2 BIT_ULL(MTK_NETSYS_V2_BIT)
#define MTK_NETSYS_V3 BIT_ULL(MTK_NETSYS_V3_BIT)
#define MTK_SOC_MT7628 BIT_ULL(MTK_SOC_MT7628_BIT)
#define MTK_RSTCTRL_PPE1 BIT_ULL(MTK_RSTCTRL_PPE1_BIT)
#define MTK_U3_COPHY_V2 BIT_ULL(MTK_U3_COPHY_V2_BIT)
#define MTK_8GB_ADDRESSING BIT_ULL(MTK_8GB_ADDRESSING_BIT)
#define MTK_RGMII BIT(MTK_RGMII_BIT)
#define MTK_TRGMII BIT(MTK_TRGMII_BIT)
#define MTK_SGMII BIT(MTK_SGMII_BIT)
#define MTK_ESW BIT(MTK_ESW_BIT)
#define MTK_GEPHY BIT(MTK_GEPHY_BIT)
#define MTK_MUX BIT(MTK_MUX_BIT)
#define MTK_INFRA BIT(MTK_INFRA_BIT)
#define MTK_SHARED_SGMII BIT(MTK_SHARED_SGMII_BIT)
#define MTK_HWLRO BIT(MTK_HWLRO_BIT)
#define MTK_RSS BIT(MTK_RSS_BIT)
#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
#define MTK_QDMA BIT(MTK_QDMA_BIT)
#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT)
#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT)
#define MTK_U3_COPHY_V2 BIT(MTK_U3_COPHY_V2_BIT)
#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
BIT_ULL(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
#define MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY \
BIT_ULL(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
BIT(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
#define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \
BIT_ULL(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
BIT(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
#define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
BIT_ULL(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
BIT(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
#define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \
BIT_ULL(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT)
#define MTK_ETH_MUX_GMAC123_TO_GEPHY_SGMII \
BIT_ULL(MTK_ETH_MUX_GMAC123_TO_GEPHY_SGMII_BIT)
#define MTK_ETH_MUX_GMAC123_TO_USXGMII \
BIT_ULL(MTK_ETH_MUX_GMAC123_TO_USXGMII_BIT)
BIT(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT)
/* Supported path present on SoCs */
#define MTK_ETH_PATH_GMAC1_RGMII BIT_ULL(MTK_ETH_PATH_GMAC1_RGMII_BIT)
#define MTK_ETH_PATH_GMAC1_TRGMII BIT_ULL(MTK_ETH_PATH_GMAC1_TRGMII_BIT)
#define MTK_ETH_PATH_GMAC1_SGMII BIT_ULL(MTK_ETH_PATH_GMAC1_SGMII_BIT)
#define MTK_ETH_PATH_GMAC2_RGMII BIT_ULL(MTK_ETH_PATH_GMAC2_RGMII_BIT)
#define MTK_ETH_PATH_GMAC2_SGMII BIT_ULL(MTK_ETH_PATH_GMAC2_SGMII_BIT)
#define MTK_ETH_PATH_GMAC2_GEPHY BIT_ULL(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
#define MTK_ETH_PATH_GMAC3_SGMII BIT_ULL(MTK_ETH_PATH_GMAC3_SGMII_BIT)
#define MTK_ETH_PATH_GDM1_ESW BIT_ULL(MTK_ETH_PATH_GDM1_ESW_BIT)
#define MTK_ETH_PATH_GMAC1_USXGMII BIT_ULL(MTK_ETH_PATH_GMAC1_USXGMII_BIT)
#define MTK_ETH_PATH_GMAC2_USXGMII BIT_ULL(MTK_ETH_PATH_GMAC2_USXGMII_BIT)
#define MTK_ETH_PATH_GMAC3_USXGMII BIT_ULL(MTK_ETH_PATH_GMAC3_USXGMII_BIT)
#define MTK_ETH_PATH_GMAC1_RGMII BIT(MTK_ETH_PATH_GMAC1_RGMII_BIT)
#define MTK_ETH_PATH_GMAC1_TRGMII BIT(MTK_ETH_PATH_GMAC1_TRGMII_BIT)
#define MTK_ETH_PATH_GMAC1_SGMII BIT(MTK_ETH_PATH_GMAC1_SGMII_BIT)
#define MTK_ETH_PATH_GMAC2_RGMII BIT(MTK_ETH_PATH_GMAC2_RGMII_BIT)
#define MTK_ETH_PATH_GMAC2_SGMII BIT(MTK_ETH_PATH_GMAC2_SGMII_BIT)
#define MTK_ETH_PATH_GMAC2_GEPHY BIT(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
#define MTK_ETH_PATH_GDM1_ESW BIT(MTK_ETH_PATH_GDM1_ESW_BIT)
#define MTK_GMAC1_RGMII (MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII)
#define MTK_GMAC1_TRGMII (MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII)
@@ -1279,11 +1088,7 @@ enum mkt_eth_capabilities {
#define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII)
#define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII)
#define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY)
#define MTK_GMAC3_SGMII (MTK_ETH_PATH_GMAC3_SGMII | MTK_SGMII)
#define MTK_GDM1_ESW (MTK_ETH_PATH_GDM1_ESW | MTK_ESW)
#define MTK_GMAC1_USXGMII (MTK_ETH_PATH_GMAC1_USXGMII | MTK_USXGMII)
#define MTK_GMAC2_USXGMII (MTK_ETH_PATH_GMAC2_USXGMII | MTK_USXGMII)
#define MTK_GMAC3_USXGMII (MTK_ETH_PATH_GMAC3_USXGMII | MTK_USXGMII)
/* MUXes present on SoCs */
/* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */
@@ -1306,32 +1111,26 @@ enum mkt_eth_capabilities {
#define MTK_MUX_GMAC12_TO_GEPHY_SGMII \
(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX)
#define MTK_MUX_GMAC123_TO_GEPHY_SGMII \
(MTK_ETH_MUX_GMAC123_TO_GEPHY_SGMII | MTK_MUX)
#define MTK_MUX_GMAC123_TO_USXGMII \
(MTK_ETH_MUX_GMAC123_TO_USXGMII | MTK_MUX | MTK_INFRA)
#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
#define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \
MTK_GMAC2_RGMII | MTK_SHARED_INT | \
MTK_TRGMII_MT7621_CLK | MTK_QDMA | MTK_NETSYS_V1)
MTK_TRGMII_MT7621_CLK | MTK_QDMA)
#define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \
MTK_GMAC2_SGMII | MTK_GDM1_ESW | \
MTK_MUX_GDM1_TO_GMAC1_ESW | MTK_NETSYS_V1 | \
MTK_MUX_GDM1_TO_GMAC1_ESW | \
MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_QDMA)
#define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII | \
MTK_QDMA | MTK_NETSYS_V1)
MTK_QDMA)
#define MT7628_CAPS (MTK_SHARED_INT | MTK_SOC_MT7628 | MTK_NETSYS_V1)
#define MT7628_CAPS (MTK_SHARED_INT | MTK_SOC_MT7628)
#define MT7629_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \
MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \
MTK_MUX_U3_GMAC2_TO_QPHY | MTK_NETSYS_V1 | \
MTK_MUX_U3_GMAC2_TO_QPHY | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
@@ -1343,24 +1142,6 @@ enum mkt_eth_capabilities {
MTK_MUX_U3_GMAC2_TO_QPHY | MTK_U3_COPHY_V2 | \
MTK_NETSYS_V2)
#define MT7988_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC3_SGMII | \
MTK_MUX_GMAC123_TO_GEPHY_SGMII | MTK_QDMA | \
MTK_NETSYS_V3 | MTK_RSTCTRL_PPE1 | \
MTK_GMAC1_USXGMII | MTK_GMAC2_USXGMII | \
MTK_GMAC3_USXGMII | MTK_MUX_GMAC123_TO_USXGMII | MTK_RSS)
struct mtk_tx_dma_desc_info {
dma_addr_t addr;
u32 size;
u16 vlan_tci;
u16 qid;
u8 gso:1;
u8 csum:1;
u8 vlan:1;
u8 first:1;
u8 last:1;
};
/* struct mtk_eth_data - This is the structure holding all differences
* among various plaforms
* @ana_rgc3: The offset for register ANA_RGC3 related to
@@ -1371,57 +1152,36 @@ struct mtk_tx_dma_desc_info {
* the target SoC
* @required_pctl A bool value to show whether the SoC requires
* the extra setup for those pins used by GMAC.
* @txd_size Tx DMA descriptor size.
* @rxd_size Rx DMA descriptor size.
* @dma_max_len Max DMA tx/rx buffer length.
* @dma_len_offset Tx/Rx DMA length field offset.
*/
struct mtk_soc_data {
u32 ana_rgc3;
u64 caps;
u32 caps;
u32 required_clks;
bool required_pctl;
netdev_features_t hw_features;
bool has_sram;
struct {
u32 txd_size;
u32 rxd_size;
u32 dma_max_len;
u32 dma_len_offset;
} txrx;
};
/* currently no SoC has more than 3 macs */
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
#define MTK_MAX_DEVS 3
#else
#define MTK_MAX_DEVS 2
#endif
/* currently no SoC has more than 2 macs */
#define MTK_MAX_DEVS 2
#define MTK_SGMII_PHYSPEED_AN BIT(31)
#define MTK_SGMII_PHYSPEED_MASK GENMASK(2, 0)
#define MTK_SGMII_PHYSPEED_1000 BIT(0)
#define MTK_SGMII_PHYSPEED_2500 BIT(1)
#define MTK_SGMII_PHYSPEED_5000 BIT(2)
#define MTK_SGMII_PHYSPEED_10000 BIT(3)
#define MTK_SGMII_PN_SWAP BIT(16)
#define MTK_USXGMII_INT_2500 BIT(17)
#define MTK_HAS_FLAGS(flags, _x) (((flags) & (_x)) == (_x))
/* struct mtk_xgmii - This is the structure holding sgmii/usxgmii regmap and
* its characteristics
/* struct mtk_sgmii - This is the structure holding sgmii regmap and its
* characteristics
* @regmap: The register map pointing at the range used to setup
* SGMII/USXGMII modes
* SGMII modes
* @flags: The enum refers to which mode the sgmii wants to run on
* @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
*/
struct mtk_xgmii {
struct mtk_eth *eth;
struct regmap *regmap_sgmii[MTK_MAX_DEVS];
struct regmap *regmap_usxgmii[MTK_MAX_DEVS];
struct regmap *regmap_pextp[MTK_MAX_DEVS];
struct regmap *regmap_pll;
struct mtk_sgmii {
struct regmap *regmap[MTK_MAX_DEVS];
u32 flags[MTK_MAX_DEVS];
u32 ana_rgc3;
};
@@ -1435,21 +1195,6 @@ struct mtk_reset_event {
u32 count[32];
};
/* struct mtk_phylink_priv - This is the structure holding private data for phylink
* @desc: Pointer to the memory holding info about the phylink gpio
* @id: The element is used to record the phy index of phylink
* @phyaddr: The element is used to record the phy address of phylink
* @link: The element is used to record the phy link status of phylink
*/
struct mtk_phylink_priv {
struct net_device *dev;
struct gpio_desc *desc;
char label[16];
int id;
int phyaddr;
int link;
};
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
* @dev: The device pointer
@@ -1488,7 +1233,6 @@ struct mtk_phylink_priv {
struct mtk_eth {
struct device *dev;
void __iomem *base;
void __iomem *sram_base;
spinlock_t page_lock;
spinlock_t tx_irq_lock;
spinlock_t rx_irq_lock;
@@ -1500,8 +1244,7 @@ struct mtk_eth {
unsigned long sysclk;
struct regmap *ethsys;
struct regmap *infra;
struct regmap *toprgu;
struct mtk_xgmii *xgmii;
struct mtk_sgmii *sgmii;
struct regmap *pctl;
bool hwlro;
refcount_t dma_refcnt;
@@ -1510,7 +1253,7 @@ struct mtk_eth {
struct mtk_rx_ring rx_ring_qdma;
struct napi_struct tx_napi;
struct mtk_napi rx_napi[MTK_RX_NAPI_NUM];
void *scratch_ring;
struct mtk_tx_dma *scratch_ring;
struct mtk_reset_event reset_event;
dma_addr_t phy_scratch_ring;
void *scratch_head;
@@ -1542,12 +1285,10 @@ struct mtk_mac {
unsigned int id;
phy_interface_t interface;
unsigned int mode;
unsigned int type;
int speed;
struct device_node *of_node;
struct phylink *phylink;
struct phylink_config phylink_config;
struct mtk_phylink_priv phylink_priv;
struct mtk_eth *hw;
struct mtk_hw_stats *hw_stats;
__be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
@@ -1565,31 +1306,17 @@ void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg);
int mtk_sgmii_init(struct mtk_xgmii *ss, struct device_node *np,
int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np,
u32 ana_rgc3);
int mtk_sgmii_setup_mode_an(struct mtk_xgmii *ss, unsigned int mac_id);
int mtk_sgmii_setup_mode_force(struct mtk_xgmii *ss, unsigned int mac_id,
int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, unsigned int id);
int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, unsigned int id,
const struct phylink_link_state *state);
void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id);
void mtk_sgmii_setup_phya_gen2(struct mtk_xgmii *ss, int mac_id);
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_usxgmii_path_setup(struct mtk_eth *eth, int mac_id);
void mtk_gdm_config(struct mtk_eth *eth, u32 config);
void ethsys_reset(struct mtk_eth *eth, u32 reset_bits);
int mtk_mac2xgmii_id(struct mtk_eth *eth, int mac_id);
int mtk_usxgmii_init(struct mtk_xgmii *ss, struct device_node *r);
int mtk_xfi_pextp_init(struct mtk_xgmii *ss, struct device_node *r);
int mtk_xfi_pll_init(struct mtk_xgmii *ss, struct device_node *r);
int mtk_toprgu_init(struct mtk_eth *eth, struct device_node *r);
int mtk_xfi_pll_enable(struct mtk_xgmii *ss);
int mtk_usxgmii_setup_mode_an(struct mtk_xgmii *ss, int mac_id,
int max_speed);
int mtk_usxgmii_setup_mode_force(struct mtk_xgmii *ss, int mac_id,
int max_speed);
void mtk_usxgmii_setup_phya_an_10000(struct mtk_xgmii *ss, int mac_id);
void mtk_usxgmii_reset(struct mtk_xgmii *ss, int mac_id);
#endif /* MTK_ETH_H */

View File

@@ -139,27 +139,17 @@ void set_gmac_ppe_fwd(int id, int enable)
void __iomem *reg;
u32 val;
reg = hnat_priv->fe_base +
((id == NR_GMAC1_PORT) ? GDMA1_FWD_CFG :
(id == NR_GMAC2_PORT) ? GDMA2_FWD_CFG : GDMA3_FWD_CFG);
reg = hnat_priv->fe_base + (id ? GDMA2_FWD_CFG : GDMA1_FWD_CFG);
if (enable) {
if (CFG_PPE_NUM == 3 && id == NR_GMAC3_PORT)
cr_set_bits(reg, BITS_GDM_ALL_FRC_P_PPE2);
else if (CFG_PPE_NUM == 3 && id == NR_GMAC2_PORT)
cr_set_bits(reg, BITS_GDM_ALL_FRC_P_PPE1);
else
cr_set_bits(reg, BITS_GDM_ALL_FRC_P_PPE);
cr_set_bits(reg, BITS_GDM_ALL_FRC_P_PPE);
return;
}
/*disabled */
val = readl(reg);
if ((val & GDM_ALL_FRC_MASK) == BITS_GDM_ALL_FRC_P_PPE ||
(CFG_PPE_NUM == 3 &&
((val & GDM_ALL_FRC_MASK) == BITS_GDM_ALL_FRC_P_PPE1 ||
(val & GDM_ALL_FRC_MASK) == BITS_GDM_ALL_FRC_P_PPE2)))
if ((val & GDM_ALL_FRC_MASK) == BITS_GDM_ALL_FRC_P_PPE)
cr_set_field(reg, GDM_ALL_FRC_MASK,
BITS_GDM_ALL_FRC_P_CPU_PDMA);
}
@@ -307,9 +297,7 @@ static int hnat_hw_init(u32 ppe_id)
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, HASH_MODE, HASH_MODE_1);
writel(HASH_SEED_KEY, hnat_priv->ppe_base[ppe_id] + PPE_HASH_SEED);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, XMODE, 0);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TB_ENTRY_SIZE,
(hnat_priv->data->version == MTK_HNAT_V5) ? ENTRY_128B :
(hnat_priv->data->version == MTK_HNAT_V4) ? ENTRY_96B : ENTRY_80B);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TB_ENTRY_SIZE, ENTRY_80B);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, SMA, SMA_FWD_CPU_BUILD_ENTRY);
/* set ip proto */
@@ -325,8 +313,7 @@ static int hnat_hw_init(u32 ppe_id)
BIT_IPV4_DSL_EN | BIT_IPV6_6RD_EN |
BIT_IPV6_3T_ROUTE_EN | BIT_IPV6_5T_ROUTE_EN);
if (hnat_priv->data->version == MTK_HNAT_V4 ||
hnat_priv->data->version == MTK_HNAT_V5)
if (hnat_priv->data->version == MTK_HNAT_V4)
cr_set_bits(hnat_priv->ppe_base[ppe_id] + PPE_FLOW_CFG,
BIT_IPV4_MAPE_EN | BIT_IPV4_MAPT_EN);
@@ -373,17 +360,11 @@ static int hnat_hw_init(u32 ppe_id)
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, TTL0_DRP, 0);
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, MCAST_TB_EN, 1);
if (hnat_priv->data->version == MTK_HNAT_V4 ||
hnat_priv->data->version == MTK_HNAT_V5) {
if (hnat_priv->data->version == MTK_HNAT_V4) {
writel(0xcb777, hnat_priv->ppe_base[ppe_id] + PPE_DFT_CPORT1);
writel(0x7f, hnat_priv->ppe_base[ppe_id] + PPE_SBW_CTRL);
}
if (hnat_priv->data->version == MTK_HNAT_V5) {
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_SB_FIFO_DBG,
SB_MED_FULL_DRP_EN, 1);
}
/*enable ppe mib counter*/
if (hnat_priv->data->per_flow_accounting) {
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MIB_CFG, MIB_EN, 1);
@@ -403,7 +384,7 @@ static int hnat_start(u32 ppe_id)
{
u32 foe_table_sz;
u32 foe_mib_tb_sz;
int etry_num_cfg;
u32 etry_num_cfg;
if (ppe_id >= CFG_PPE_NUM)
return -EINVAL;
@@ -487,9 +468,8 @@ static void hnat_stop(u32 ppe_id)
return;
/* send all traffic back to the DMA engine */
set_gmac_ppe_fwd(NR_GMAC1_PORT, 0);
set_gmac_ppe_fwd(NR_GMAC2_PORT, 0);
set_gmac_ppe_fwd(NR_GMAC3_PORT, 0);
set_gmac_ppe_fwd(0, 0);
set_gmac_ppe_fwd(1, 0);
dev_info(hnat_priv->dev, "hwnat stop\n");
@@ -520,8 +500,7 @@ static void hnat_stop(u32 ppe_id)
BIT_IPV6_6RD_EN | BIT_IPV6_3T_ROUTE_EN |
BIT_IPV6_5T_ROUTE_EN | BIT_FUC_FOE | BIT_FMC_FOE);
if (hnat_priv->data->version == MTK_HNAT_V4 ||
hnat_priv->data->version == MTK_HNAT_V5)
if (hnat_priv->data->version == MTK_HNAT_V4)
cr_clr_bits(hnat_priv->ppe_base[ppe_id] + PPE_FLOW_CFG,
BIT_IPV4_MAPE_EN | BIT_IPV4_MAPT_EN);
@@ -586,8 +565,7 @@ int hnat_enable_hook(void)
*/
if (hnat_priv->data->whnat) {
ra_sw_nat_hook_rx =
(hnat_priv->data->version == MTK_HNAT_V4 ||
hnat_priv->data->version == MTK_HNAT_V5) ?
(hnat_priv->data->version == MTK_HNAT_V4) ?
mtk_sw_nat_hook_rx : NULL;
ra_sw_nat_hook_tx = mtk_sw_nat_hook_tx;
ppe_dev_register_hook = mtk_ppe_dev_register_hook;
@@ -718,13 +696,6 @@ static int hnat_probe(struct platform_device *pdev)
strncpy(hnat_priv->lan, (char *)name, IFNAMSIZ - 1);
dev_info(&pdev->dev, "lan = %s\n", hnat_priv->lan);
err = of_property_read_string(np, "mtketh-lan2", &name);
if (err < 0)
strncpy(hnat_priv->lan2, "eth2", IFNAMSIZ);
else
strncpy(hnat_priv->lan2, (char *)name, IFNAMSIZ - 1);
dev_info(&pdev->dev, "lan2 = %s\n", hnat_priv->lan2);
err = of_property_read_string(np, "mtketh-ppd", &name);
if (err < 0)
strncpy(hnat_priv->ppd, "eth0", IFNAMSIZ);
@@ -769,14 +740,11 @@ static int hnat_probe(struct platform_device *pdev)
if (!hnat_priv->fe_base)
return -EADDRNOTAVAIL;
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
hnat_priv->ppe_base[0] = hnat_priv->fe_base + 0x2200;
if (CFG_PPE_NUM > 1)
hnat_priv->ppe_base[1] = hnat_priv->fe_base + 0x2600;
if (CFG_PPE_NUM > 2)
hnat_priv->ppe_base[2] = hnat_priv->fe_base + 0x2e00;
#else
hnat_priv->ppe_base[0] = hnat_priv->fe_base + 0xe00;
#endif
@@ -917,21 +885,12 @@ static const struct mtk_hnat_data hnat_data_v4 = {
.version = MTK_HNAT_V4,
};
static const struct mtk_hnat_data hnat_data_v5 = {
.num_of_sch = 4,
.whnat = true,
.per_flow_accounting = true,
.mcast = false,
.version = MTK_HNAT_V5,
};
const struct of_device_id of_hnat_match[] = {
{ .compatible = "mediatek,mtk-hnat", .data = &hnat_data_v3 },
{ .compatible = "mediatek,mtk-hnat_v1", .data = &hnat_data_v1 },
{ .compatible = "mediatek,mtk-hnat_v2", .data = &hnat_data_v2 },
{ .compatible = "mediatek,mtk-hnat_v3", .data = &hnat_data_v3 },
{ .compatible = "mediatek,mtk-hnat_v4", .data = &hnat_data_v4 },
{ .compatible = "mediatek,mtk-hnat_v5", .data = &hnat_data_v5 },
{},
};
MODULE_DEVICE_TABLE(of, of_hnat_match);

View File

@@ -94,18 +94,15 @@
#define PPE_MIB_SER_R0 0X140
#define PPE_MIB_SER_R1 0X144
#define PPE_MIB_SER_R2 0X148
#define PPE_MIB_SER_R3 0X14C
#define PPE_MIB_CAH_CTRL 0X150
#define PPE_MIB_CAH_TAG_SRH 0X154
#define PPE_MIB_CAH_LINE_RW 0X158
#define PPE_MIB_CAH_WDATA 0X15C
#define PPE_MIB_CAH_RDATA 0X160
#define PPE_SB_FIFO_DBG 0x170
#define PPE_SBW_CTRL 0x174
#define GDMA1_FWD_CFG 0x500
#define GDMA2_FWD_CFG 0x1500
#define GDMA3_FWD_CFG 0x540
/* QDMA Tx queue configuration */
#define QTX_CFG(x) (QDMA_BASE + ((x) * 0x10))
@@ -196,10 +193,10 @@
#define MIB_CAH_EN (0X1 << 0) /* RW */
/*GDMA_FWD_CFG mask */
#define GDM_UFRC_MASK (0xF << 12) /* RW */
#define GDM_BFRC_MASK (0xF << 8) /*RW*/
#define GDM_MFRC_MASK (0xF << 4) /*RW*/
#define GDM_OFRC_MASK (0xF << 0) /*RW*/
#define GDM_UFRC_MASK (0x7 << 12) /* RW */
#define GDM_BFRC_MASK (0x7 << 8) /*RW*/
#define GDM_MFRC_MASK (0x7 << 4) /*RW*/
#define GDM_OFRC_MASK (0x7 << 0) /*RW*/
#define GDM_ALL_FRC_MASK \
(GDM_UFRC_MASK | GDM_BFRC_MASK | GDM_MFRC_MASK | GDM_OFRC_MASK)
@@ -210,13 +207,10 @@
#define MIB_ON_QTX_CFG (0x1 << 31) /* RW */
#define VQTX_MIB_EN (0x1 << 28) /* RW */
/* PPE Side Band FIFO Debug Mask */
#define SB_MED_FULL_DRP_EN (0x1 << 11)
/*--------------------------------------------------------------------------*/
/* Descriptor Structure */
/*--------------------------------------------------------------------------*/
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
struct hnat_unbind_info_blk {
u32 time_stamp : 8;
u32 sp : 4;
@@ -263,29 +257,10 @@ struct hnat_info_blk2 {
u32 dscp : 8; /* DSCP value */
} __packed;
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
struct hnat_winfo {
u32 wcid : 16; /* WiFi wtable Idx */
u32 bssid : 8; /* WiFi Bssidx */
u32 resv : 8;
} __packed;
struct hnat_winfo_pao {
u32 usr_info : 16;
u32 tid : 4;
u32 is_fixedrate : 1;
u32 is_prior : 1;
u32 is_sp : 1;
u32 hf : 1;
u32 amsdu : 1;
u32 resv : 7;
} __packed;
#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
struct hnat_winfo {
u32 bssid : 6; /* WiFi Bssidx */
u32 wcid : 10; /* WiFi wtable Idx */
} __packed;
#endif
#else
struct hnat_unbind_info_blk {
@@ -383,7 +358,7 @@ struct hnat_ipv4_hnapt {
u16 etype;
u32 dmac_hi;
union {
#if !defined(CONFIG_MEDIATEK_NETSYS_V2) && !defined(CONFIG_MEDIATEK_NETSYS_V3)
#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
struct hnat_winfo winfo;
#endif
u16 vlan2;
@@ -392,17 +367,7 @@ struct hnat_ipv4_hnapt {
u32 smac_hi;
u16 pppoe_id;
u16 smac_lo;
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
u16 minfo;
u16 resv4;
struct hnat_winfo winfo;
struct hnat_winfo_pao winfo_pao;
u32 cdrt_id : 8;
u32 tops_entry : 6;
u32 resv5 : 2;
u32 tport_id : 4;
u32 resv6 : 12;
#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
u16 minfo;
struct hnat_winfo winfo;
#endif
@@ -445,7 +410,7 @@ struct hnat_ipv4_dslite {
u16 etype;
u32 dmac_hi;
union {
#if !defined(CONFIG_MEDIATEK_NETSYS_V2) && !defined(CONFIG_MEDIATEK_NETSYS_V3)
#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
struct hnat_winfo winfo;
#endif
u16 vlan2;
@@ -454,89 +419,13 @@ struct hnat_ipv4_dslite {
u32 smac_hi;
u16 pppoe_id;
u16 smac_lo;
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
u16 minfo;
u16 resv3;
struct hnat_winfo winfo;
struct hnat_winfo_pao winfo_pao;
u32 cdrt_id : 8;
u32 tops_entry : 6;
u32 resv4 : 2;
u32 tport_id : 4;
u32 resv5 : 12;
#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
u16 minfo;
struct hnat_winfo winfo;
#endif
} __packed;
struct hnat_ipv4_mape {
union {
struct hnat_bind_info_blk bfib1;
struct hnat_unbind_info_blk udib1;
u32 info_blk1;
};
u32 sip;
u32 dip;
u16 dport;
u16 sport;
u32 tunnel_sipv6_0;
u32 tunnel_sipv6_1;
u32 tunnel_sipv6_2;
u32 tunnel_sipv6_3;
u32 tunnel_dipv6_0;
u32 tunnel_dipv6_1;
u32 tunnel_dipv6_2;
u32 tunnel_dipv6_3;
u8 flow_lbl[3]; /* in order to consist with Linux kernel (should be 20bits) */
u8 priority; /* in order to consist with Linux kernel (should be 8bits) */
u32 hop_limit : 8;
u32 resv2 : 18;
u32 act_dp : 6; /* UDF */
union {
struct hnat_info_blk2 iblk2;
struct hnat_info_blk2_whnat iblk2w;
u32 info_blk2;
};
u16 vlan1;
u16 etype;
u32 dmac_hi;
union {
#if !defined(CONFIG_MEDIATEK_NETSYS_V2) && !defined(CONFIG_MEDIATEK_NETSYS_V3)
struct hnat_winfo winfo;
#endif
u16 vlan2;
};
u16 dmac_lo;
u32 smac_hi;
u16 pppoe_id;
u16 smac_lo;
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
u16 minfo;
u16 resv3;
u32 new_sip;
u32 new_dip;
u16 new_dport;
u16 new_sport;
struct hnat_winfo winfo;
struct hnat_winfo_pao winfo_pao;
u32 cdrt_id : 8;
u32 tops_entry : 6;
u32 resv4 : 2;
u32 tport_id : 4;
u32 resv5 : 12;
#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
u16 minfo;
struct hnat_winfo winfo;
u32 new_sip;
u32 new_dip;
u16 new_dport;
u16 new_sport;
u32 new_dip;
u16 new_dport;
u16 new_sport;
#endif
} __packed;
@@ -572,7 +461,7 @@ struct hnat_ipv6_3t_route {
u16 etype;
u32 dmac_hi;
union {
#if !defined(CONFIG_MEDIATEK_NETSYS_V2) && !defined(CONFIG_MEDIATEK_NETSYS_V3)
#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
struct hnat_winfo winfo;
#endif
u16 vlan2;
@@ -581,17 +470,7 @@ struct hnat_ipv6_3t_route {
u32 smac_hi;
u16 pppoe_id;
u16 smac_lo;
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
u16 minfo;
u16 resv5;
struct hnat_winfo winfo;
struct hnat_winfo_pao winfo_pao;
u32 cdrt_id : 8;
u32 tops_entry : 6;
u32 resv6 : 2;
u32 tport_id : 4;
u32 resv7 : 12;
#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
u16 minfo;
struct hnat_winfo winfo;
#endif
@@ -630,7 +509,7 @@ struct hnat_ipv6_5t_route {
u16 etype;
u32 dmac_hi;
union {
#if !defined(CONFIG_MEDIATEK_NETSYS_V2) && !defined(CONFIG_MEDIATEK_NETSYS_V3)
#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
struct hnat_winfo winfo;
#endif
u16 vlan2;
@@ -639,17 +518,7 @@ struct hnat_ipv6_5t_route {
u32 smac_hi;
u16 pppoe_id;
u16 smac_lo;
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
u16 minfo;
u16 resv5;
struct hnat_winfo winfo;
struct hnat_winfo_pao winfo_pao;
u32 cdrt_id : 8;
u32 tops_entry : 6;
u32 resv6 : 2;
u32 tport_id : 4;
u32 resv7 : 12;
#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
u16 minfo;
struct hnat_winfo winfo;
#endif
@@ -693,7 +562,7 @@ struct hnat_ipv6_6rd {
u16 etype;
u32 dmac_hi;
union {
#if !defined(CONFIG_MEDIATEK_NETSYS_V2) && !defined(CONFIG_MEDIATEK_NETSYS_V3)
#if !defined(CONFIG_MEDIATEK_NETSYS_V2)
struct hnat_winfo winfo;
#endif
u16 vlan2;
@@ -702,31 +571,13 @@ struct hnat_ipv6_6rd {
u32 smac_hi;
u16 pppoe_id;
u16 smac_lo;
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
u16 minfo;
u16 resv3;
struct hnat_winfo winfo;
struct hnat_winfo_pao winfo_pao;
u32 cdrt_id : 8;
u32 tops_entry : 6;
u32 resv4 : 2;
u32 tport_id : 4;
u32 resv5 : 12;
u32 resv6;
u32 resv7;
u32 resv8;
u32 resv9;
u32 resv10;
u32 resv11;
u32 resv12;
u32 resv13;
#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
u16 minfo;
struct hnat_winfo winfo;
u32 resv3;
u32 resv4;
u16 new_dport;
u16 new_sport;
u32 resv4;
u16 new_dport;
u16 new_sport;
#endif
} __packed;
@@ -736,7 +587,6 @@ struct foe_entry {
struct hnat_bind_info_blk bfib1;
struct hnat_ipv4_hnapt ipv4_hnapt;
struct hnat_ipv4_dslite ipv4_dslite;
struct hnat_ipv4_mape ipv4_mape;
struct hnat_ipv6_3t_route ipv6_3t_route;
struct hnat_ipv6_5t_route ipv6_5t_route;
struct hnat_ipv6_6rd ipv6_6rd;
@@ -755,9 +605,7 @@ struct foe_entry {
#define MAX_EXT_DEVS (0x3fU)
#define MAX_IF_NUM 64
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
#define MAX_PPE_NUM 3
#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
#define MAX_PPE_NUM 2
#else
#define MAX_PPE_NUM 1
@@ -779,11 +627,10 @@ struct hnat_accounting {
};
enum mtk_hnat_version {
MTK_HNAT_V1 = 1, /* version 1: mt7621, mt7623 */
MTK_HNAT_V2, /* version 2: mt7622 */
MTK_HNAT_V3, /* version 3: mt7629 */
MTK_HNAT_V4, /* version 4: mt7981, mt7986 */
MTK_HNAT_V5, /* version 5: mt7988 */
MTK_HNAT_V1 = 1, /* version 1: mt7621, mt7623 */
MTK_HNAT_V2, /* version 2: mt7622 */
MTK_HNAT_V3, /* version 3: mt7629 */
MTK_HNAT_V4, /* version 4: mt7986 */
};
struct mtk_hnat_data {
@@ -813,7 +660,6 @@ struct mtk_hnat {
/*devices we plays for*/
char wan[IFNAMSIZ];
char lan[IFNAMSIZ];
char lan2[IFNAMSIZ];
char ppd[IFNAMSIZ];
u16 lvid;
u16 wvid;
@@ -856,7 +702,7 @@ enum FoeIpAct {
IPV6_3T_ROUTE = 4,
IPV6_5T_ROUTE = 5,
IPV6_6RD = 7,
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
IPV4_MAP_T = 8,
IPV4_MAP_E = 9,
#else
@@ -873,9 +719,8 @@ enum FoeIpAct {
#define HASH_SEED_KEY 0x12345678
/*PPE_TB_CFG value*/
#define ENTRY_128B 0
#define ENTRY_96B 1
#define ENTRY_80B 1
#define ENTRY_64B 0
#define TABLE_1K 0
#define TABLE_2K 1
#define TABLE_4K 2
@@ -918,22 +763,6 @@ enum FoeIpAct {
(BITS_GDM_UFRC_P_PPE | BITS_GDM_BFRC_P_PPE | BITS_GDM_MFRC_P_PPE | \
BITS_GDM_OFRC_P_PPE)
#define BITS_GDM_UFRC_P_PPE1 (NR_PPE1_PORT << 12)
#define BITS_GDM_BFRC_P_PPE1 (NR_PPE1_PORT << 8)
#define BITS_GDM_MFRC_P_PPE1 (NR_PPE1_PORT << 4)
#define BITS_GDM_OFRC_P_PPE1 (NR_PPE1_PORT << 0)
#define BITS_GDM_ALL_FRC_P_PPE1 \
(BITS_GDM_UFRC_P_PPE1 | BITS_GDM_BFRC_P_PPE1 | \
BITS_GDM_MFRC_P_PPE1 | BITS_GDM_OFRC_P_PPE1)
#define BITS_GDM_UFRC_P_PPE2 (NR_PPE2_PORT << 12)
#define BITS_GDM_BFRC_P_PPE2 (NR_PPE2_PORT << 8)
#define BITS_GDM_MFRC_P_PPE2 (NR_PPE2_PORT << 4)
#define BITS_GDM_OFRC_P_PPE2 (NR_PPE2_PORT << 0)
#define BITS_GDM_ALL_FRC_P_PPE2 \
(BITS_GDM_UFRC_P_PPE2 | BITS_GDM_BFRC_P_PPE2 | \
BITS_GDM_MFRC_P_PPE2 | BITS_GDM_OFRC_P_PPE2)
#define BITS_GDM_UFRC_P_CPU_PDMA (NR_PDMA_PORT << 12)
#define BITS_GDM_BFRC_P_CPU_PDMA (NR_PDMA_PORT << 8)
#define BITS_GDM_MFRC_P_CPU_PDMA (NR_PDMA_PORT << 4)
@@ -970,9 +799,7 @@ enum FoeIpAct {
#define skb_hnat_is_hashed(skb) \
(skb_hnat_entry(skb) != 0x3fff && skb_hnat_entry(skb) < hnat_priv->foe_etry_num)
#define FROM_GE_LAN_GRP(skb) (FROM_GE_LAN(skb) | FROM_GE_LAN2(skb))
#define FROM_GE_LAN(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_LAN)
#define FROM_GE_LAN2(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_LAN2)
#define FROM_GE_WAN(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_WAN)
#define FROM_GE_PPD(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_PPD)
#define FROM_GE_VIRTUAL(skb) (skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL)
@@ -984,10 +811,8 @@ enum FoeIpAct {
#define FOE_MAGIC_EXT 0x3
#define FOE_MAGIC_GE_VIRTUAL 0x4
#define FOE_MAGIC_GE_PPD 0x5
#define FOE_MAGIC_GE_LAN2 0x6
#define FOE_MAGIC_WED0 0x78
#define FOE_MAGIC_WED1 0x79
#define FOE_MAGIC_WED2 0x7A
#define FOE_INVALID 0xf
#define index6b(i) (0x3fU - i)
@@ -1001,11 +826,10 @@ enum FoeIpAct {
#define NR_PDMA_PORT 0
#define NR_GMAC1_PORT 1
#define NR_GMAC2_PORT 2
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
#define NR_WHNAT_WDMA_PORT EINVAL
#define NR_PPE0_PORT 3
#define NR_PPE1_PORT 4
#define NR_PPE2_PORT 0xC
#else
#define NR_WHNAT_WDMA_PORT 3
#define NR_PPE0_PORT 4
@@ -1014,15 +838,10 @@ enum FoeIpAct {
#define NR_DISCARD 7
#define NR_WDMA0_PORT 8
#define NR_WDMA1_PORT 9
#define NR_GMAC3_PORT 15
#define LAN_DEV_NAME hnat_priv->lan
#define LAN2_DEV_NAME hnat_priv->lan2
#define IS_WAN(dev) \
(!strncmp((dev)->name, hnat_priv->wan, strlen(hnat_priv->wan)))
#define IS_LAN_GRP(dev) (IS_LAN(dev) | IS_LAN2(dev))
#define IS_LAN(dev) (!strncmp(dev->name, LAN_DEV_NAME, strlen(LAN_DEV_NAME)))
#define IS_LAN2(dev) (!strncmp(dev->name, LAN2_DEV_NAME, \
strlen(LAN2_DEV_NAME)))
#define IS_BR(dev) (!strncmp(dev->name, "br", 2))
#define IS_WHNAT(dev) \
((hnat_priv->data->whnat && \
@@ -1104,7 +923,6 @@ static inline u32 hnat_dsa_fill_stag(const struct net_device *netdev,
struct flow_offload_hw_path *hw_path,
u16 eth_proto, int mape)
{
return 0;
}
static inline bool hnat_dsa_is_enable(struct mtk_hnat *priv)

View File

@@ -41,7 +41,6 @@ static const char * const packet_type[] = {
static uint8_t *show_cpu_reason(struct sk_buff *skb)
{
static u8 buf[32];
int ret;
switch (skb_hnat_reason(skb)) {
case TTL_0:
@@ -90,12 +89,8 @@ static uint8_t *show_cpu_reason(struct sk_buff *skb)
return "Pre bind\n";
}
ret = snprintf(buf, sizeof(buf), "CPU Reason Error - %X\n",
skb_hnat_entry(skb));
if (ret == strlen(buf))
return buf;
else
return "CPU Reason Error\n";
sprintf(buf, "CPU Reason Error - %X\n", skb_hnat_entry(skb));
return buf;
}
uint32_t foe_dump_pkt(struct sk_buff *skb)
@@ -407,10 +402,10 @@ int entry_detail(u32 ppe_id, int index)
entry->ipv4_dslite.tunnel_dipv6_1,
entry->ipv4_dslite.tunnel_dipv6_2,
entry->ipv4_dslite.tunnel_dipv6_3);
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
} else if (IS_IPV4_MAPE(entry)) {
nsaddr = htonl(entry->ipv4_mape.new_sip);
ndaddr = htonl(entry->ipv4_mape.new_dip);
nsaddr = htonl(entry->ipv4_dslite.new_sip);
ndaddr = htonl(entry->ipv4_dslite.new_dip);
pr_info("Information Block 2: %08X\n",
entry->ipv4_dslite.info_blk2);
@@ -419,8 +414,8 @@ int entry_detail(u32 ppe_id, int index)
&saddr, entry->ipv4_dslite.sport,
&daddr, entry->ipv4_dslite.dport);
pr_info("IPv4 MAP-E New IP/Port: %pI4:%d->%pI4:%d\n",
&nsaddr, entry->ipv4_mape.new_sport,
&ndaddr, entry->ipv4_mape.new_dport);
&nsaddr, entry->ipv4_dslite.new_sport,
&ndaddr, entry->ipv4_dslite.new_dport);
pr_info("EG DIPv6: %08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
entry->ipv4_dslite.tunnel_sipv6_0,
entry->ipv4_dslite.tunnel_sipv6_1,
@@ -492,12 +487,6 @@ int entry_detail(u32 ppe_id, int index)
entry->ipv4_hnapt.bfib1.udp == 0 ?
"TCP" : entry->ipv4_hnapt.bfib1.udp == 1 ?
"UDP" : "Unknown");
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
pr_info("tport_id = %d, tops_entry = %d, cdrt_id = %d\n",
entry->ipv4_hnapt.tport_id,
entry->ipv4_hnapt.tops_entry,
entry->ipv4_hnapt.cdrt_id);
#endif
pr_info("=========================================\n\n");
} else {
*((u32 *)h_source) = swab32(entry->ipv6_5t_route.smac_hi);
@@ -521,12 +510,6 @@ int entry_detail(u32 ppe_id, int index)
entry->ipv6_5t_route.bfib1.udp == 0 ?
"TCP" : entry->ipv6_5t_route.bfib1.udp == 1 ?
"UDP" : "Unknown");
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
pr_info("tport_id = %d, tops_entry = %d, cdrt_id = %d\n",
entry->ipv6_5t_route.tport_id,
entry->ipv6_5t_route.tops_entry,
entry->ipv6_5t_route.cdrt_id);
#endif
pr_info("=========================================\n\n");
}
return 0;
@@ -724,7 +707,7 @@ int read_mib(struct mtk_hnat *h, u32 ppe_id,
u32 index, u64 *bytes, u64 *packets)
{
int ret;
u32 val, cnt_r0, cnt_r1, cnt_r2, cnt_r3;
u32 val, cnt_r0, cnt_r1, cnt_r2;
if (ppe_id >= CFG_PPE_NUM)
return -EINVAL;
@@ -740,16 +723,8 @@ int read_mib(struct mtk_hnat *h, u32 ppe_id,
cnt_r0 = readl(h->ppe_base[ppe_id] + PPE_MIB_SER_R0);
cnt_r1 = readl(h->ppe_base[ppe_id] + PPE_MIB_SER_R1);
cnt_r2 = readl(h->ppe_base[ppe_id] + PPE_MIB_SER_R2);
if (hnat_priv->data->version == MTK_HNAT_V5) {
cnt_r3 = readl(h->ppe_base[ppe_id] + PPE_MIB_SER_R3);
*bytes = cnt_r0 + ((u64)cnt_r1 << 32);
*packets = cnt_r2 + ((u64)cnt_r3 << 32);
} else {
*bytes = cnt_r0 + ((u64)(cnt_r1 & 0xffff) << 32);
*packets = ((cnt_r1 & 0xffff0000) >> 16) +
((u64)(cnt_r2 & 0xffffff) << 16);
}
*bytes = cnt_r0 + ((u64)(cnt_r1 & 0xffff) << 32);
*packets = ((cnt_r1 & 0xffff0000) >> 16) + ((cnt_r2 & 0xffffff) << 16);
return 0;
@@ -764,9 +739,6 @@ struct hnat_accounting *hnat_get_count(struct mtk_hnat *h, u32 ppe_id,
if (ppe_id >= CFG_PPE_NUM)
return NULL;
if (index >= hnat_priv->foe_etry_num)
return NULL;
if (!hnat_priv->data->per_flow_accounting)
return NULL;
@@ -775,7 +747,7 @@ struct hnat_accounting *hnat_get_count(struct mtk_hnat *h, u32 ppe_id,
h->acct[ppe_id][index].bytes += bytes;
h->acct[ppe_id][index].packets += packets;
if (diff) {
diff->bytes = bytes;
diff->packets = packets;
@@ -974,12 +946,12 @@ static int __hnat_debug_show(struct seq_file *m, void *private, u32 ppe_id)
ntohs(entry->ipv6_5t_route.etype),
entry->ipv6_5t_route.info_blk1,
entry->ipv6_5t_route.info_blk2);
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
} else if (IS_IPV4_MAPE(entry)) {
__be32 saddr = htonl(entry->ipv4_dslite.sip);
__be32 daddr = htonl(entry->ipv4_dslite.dip);
__be32 nsaddr = htonl(entry->ipv4_mape.new_sip);
__be32 ndaddr = htonl(entry->ipv4_mape.new_dip);
__be32 nsaddr = htonl(entry->ipv4_dslite.new_sip);
__be32 ndaddr = htonl(entry->ipv4_dslite.new_dip);
u32 ipv6_tsip0 = entry->ipv4_dslite.tunnel_sipv6_0;
u32 ipv6_tsip1 = entry->ipv4_dslite.tunnel_sipv6_1;
u32 ipv6_tsip2 = entry->ipv4_dslite.tunnel_sipv6_2;
@@ -1002,8 +974,8 @@ static int __hnat_debug_show(struct seq_file *m, void *private, u32 ppe_id)
es(entry), pt(entry),
&saddr, entry->ipv4_dslite.sport,
&daddr, entry->ipv4_dslite.dport,
&nsaddr, entry->ipv4_mape.new_sport,
&ndaddr, entry->ipv4_mape.new_dport,
&nsaddr, entry->ipv4_dslite.new_sport,
&ndaddr, entry->ipv4_dslite.new_dport,
ipv6_tsip0, ipv6_tsip1, ipv6_tsip2,
ipv6_tsip3, ipv6_tdip0, ipv6_tdip1,
ipv6_tdip2, ipv6_tdip3, h_source, h_dest,
@@ -1259,17 +1231,17 @@ void dbg_dump_entry(struct seq_file *m, struct foe_entry *entry,
entry->ipv4_dslite.tunnel_dipv6_1,
entry->ipv4_dslite.tunnel_dipv6_2,
entry->ipv4_dslite.tunnel_dipv6_3);
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
} else if (IS_IPV4_MAPE(entry)) {
nsaddr = htonl(entry->ipv4_mape.new_sip);
ndaddr = htonl(entry->ipv4_mape.new_dip);
nsaddr = htonl(entry->ipv4_dslite.new_sip);
ndaddr = htonl(entry->ipv4_dslite.new_dip);
seq_printf(m,
"IPv4 MAP-E(%d): %pI4:%d->%pI4:%d => %pI4:%d->%pI4:%d | Tunnel=%08X:%08X:%08X:%08X->%08X:%08X:%08X:%08X\n",
index, &saddr, entry->ipv4_dslite.sport,
&daddr, entry->ipv4_dslite.dport,
&nsaddr, entry->ipv4_mape.new_sport,
&ndaddr, entry->ipv4_mape.new_dport,
&nsaddr, entry->ipv4_dslite.new_sport,
&ndaddr, entry->ipv4_dslite.new_dport,
entry->ipv4_dslite.tunnel_sipv6_0,
entry->ipv4_dslite.tunnel_sipv6_1,
entry->ipv4_dslite.tunnel_sipv6_2,
@@ -1859,15 +1831,6 @@ static ssize_t hnat_queue_write(struct file *file, const char __user *buf,
line[length] = '\0';
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
if (max_rate > 100000000 || max_rate < 0 ||
min_rate > 100000000 || min_rate < 0)
#else
if (max_rate > 10000000 || max_rate < 0 ||
min_rate > 10000000 || min_rate < 0)
#endif
return -EINVAL;
while (max_rate > 127) {
max_rate /= 10;
max_exp++;
@@ -2157,7 +2120,7 @@ static void hnat_qos_pppq_enable(void)
static ssize_t hnat_qos_toggle_write(struct file *file, const char __user *buffer,
size_t count, loff_t *data)
{
char buf[8] = {0};
char buf[8];
int len = count;
if ((len > 8) || copy_from_user(buf, buffer, len))
@@ -2207,210 +2170,6 @@ static const struct file_operations hnat_version_fops = {
.release = single_release,
};
static u32 hnat_get_ppe_hash(u32 sip, u32 dip, u32 sport, u32 dport)
{
u32 hv1 = sport << 16 | dport;
u32 hv2 = dip;
u32 hv3 = sip;
u32 hash;
hash = (hv1 & hv2) | ((~hv1) & hv3);
hash = (hash >> 24) | ((hash & 0xffffff) << 8);
hash ^= hv1 ^ hv2 ^ hv3;
hash ^= hash >> 16;
hash <<= 2;
hash &= hnat_priv->foe_etry_num - 1;
return hash;
}
static u32 hnat_char2hex(const char c)
{
switch (c) {
case '0'...'9':
return 0x0 + (c - '0');
case 'a'...'f':
return 0xa + (c - 'a');
case 'A'...'F':
return 0xa + (c - 'A');
default:
pr_info("MAC format error\n");
return 0;
}
}
static void hnat_parse_mac(char *str, char *mac)
{
int i;
for (i = 0; i < ETH_ALEN; i++) {
mac[i] = (hnat_char2hex(str[i * 3]) << 4) +
(hnat_char2hex(str[i * 3 + 1]));
}
}
static void hnat_static_entry_help(void)
{
pr_info("-------------------- Usage --------------------\n");
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
pr_info("echo $0 $1 $2 ... $15 > /sys/kernel/debug/hnat/static_entry\n\n");
#else
pr_info("echo $0 $1 $2 ... $12 > /sys/kernel/debug/hnat/static_entry\n\n");
#endif
pr_info("-------------------- Parameters --------------------\n");
pr_info("$0: HASH OCT\n");
pr_info("$1: INFO1 HEX\n");
pr_info("$2: ING SIPv4 HEX\n");
pr_info("$3: ING DIPv4 HEX\n");
pr_info("$4: ING SP HEX\n");
pr_info("$5: ING DP HEX\n");
pr_info("$6: INFO2 HEX\n");
pr_info("$7: EG SIPv4 HEX\n");
pr_info("$8: EG DIPv4 HEX\n");
pr_info("$9: EG SP HEX\n");
pr_info("$10: EG DP HEX\n");
pr_info("$11: DMAC STR (00:11:22:33:44:55)\n");
pr_info("$12: SMAC STR (00:11:22:33:44:55)\n");
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
pr_info("$13: TPORT IDX HEX\n");
pr_info("$14: TOPS ENTRY HEX\n");
pr_info("$15: CDRT IDX HEX\n");
#endif
}
static int hnat_static_entry_read(struct seq_file *m, void *private)
{
hnat_static_entry_help();
return 0;
}
static int hnat_static_entry_open(struct inode *inode, struct file *file)
{
return single_open(file, hnat_static_entry_read, file->private_data);
}
static ssize_t hnat_static_entry_write(struct file *file,
const char __user *buffer,
size_t count, loff_t *data)
{
struct foe_entry *foe, entry = { 0 };
char buf[256], dmac_str[18], smac_str[18], dmac[6], smac[6];
int len = count, hash, coll = 0;
u32 ppe_id = 0;
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
u32 tport_id, tops_entry, cdrt_id;
#endif
if (len >= sizeof(buf) || copy_from_user(buf, buffer, len)) {
pr_info("Input handling fail!\n");
len = sizeof(buf) - 1;
return -EFAULT;
}
buf[len] = '\0';
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
if (sscanf(buf, "%d %x %x %x %hx %hx %x %x %x %hx %hx %s %s %x %x %x",
&hash,
&entry.ipv4_hnapt.info_blk1,
&entry.ipv4_hnapt.sip,
&entry.ipv4_hnapt.dip,
&entry.ipv4_hnapt.sport,
&entry.ipv4_hnapt.dport,
&entry.ipv4_hnapt.info_blk2,
&entry.ipv4_hnapt.new_sip,
&entry.ipv4_hnapt.new_dip,
&entry.ipv4_hnapt.new_sport,
&entry.ipv4_hnapt.new_dport,
dmac_str, smac_str, &tport_id, &tops_entry, &cdrt_id) != 16)
return -EFAULT;
entry.ipv4_hnapt.tport_id = tport_id;
entry.ipv4_hnapt.tops_entry = tops_entry;
entry.ipv4_hnapt.cdrt_id = cdrt_id;
if ((hash >= hnat_priv->foe_etry_num) || (hash < -1) ||
(tport_id > 16) || (tport_id < 0) ||
(tops_entry > 64) || (tops_entry < 0) ||
(cdrt_id > 255) || (cdrt_id < 0) ||
(entry.ipv4_hnapt.sport > 65535) ||
(entry.ipv4_hnapt.sport < 0) ||
(entry.ipv4_hnapt.dport > 65535) ||
(entry.ipv4_hnapt.dport < 0) ||
(entry.ipv4_hnapt.new_sport > 65535) ||
(entry.ipv4_hnapt.new_sport < 0) ||
(entry.ipv4_hnapt.new_dport > 65535) ||
(entry.ipv4_hnapt.new_dport < 0)) {
hnat_static_entry_help();
return -EFAULT;
}
#else
if (sscanf(buf, "%d %x %x %x %hx %hx %x %x %x %hx %hx %s %s",
&hash,
&entry.ipv4_hnapt.info_blk1,
&entry.ipv4_hnapt.sip,
&entry.ipv4_hnapt.dip,
&entry.ipv4_hnapt.sport,
&entry.ipv4_hnapt.dport,
&entry.ipv4_hnapt.info_blk2,
&entry.ipv4_hnapt.new_sip,
&entry.ipv4_hnapt.new_dip,
&entry.ipv4_hnapt.new_sport,
&entry.ipv4_hnapt.new_dport,
dmac_str, smac_str) != 13)
return -EFAULT;
if ((hash >= hnat_priv->foe_etry_num) || (hash < -1) ||
(entry.ipv4_hnapt.sport > 65535) ||
(entry.ipv4_hnapt.sport < 0) ||
(entry.ipv4_hnapt.dport > 65535) ||
(entry.ipv4_hnapt.dport < 0) ||
(entry.ipv4_hnapt.new_sport > 65535) ||
(entry.ipv4_hnapt.new_sport < 0) ||
(entry.ipv4_hnapt.new_dport > 65535) ||
(entry.ipv4_hnapt.new_dport < 0)) {
hnat_static_entry_help();
return -EFAULT;
}
#endif
hnat_parse_mac(smac_str, smac);
hnat_parse_mac(dmac_str, dmac);
entry.ipv4_hnapt.dmac_hi = swab32(*((u32 *)dmac));
entry.ipv4_hnapt.dmac_lo = swab16(*((u16 *)&dmac[4]));
entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)smac));
entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)&smac[4]));
if (hash == -1) {
hash = hnat_get_ppe_hash(entry.ipv4_hnapt.sip,
entry.ipv4_hnapt.dip,
entry.ipv4_hnapt.sport,
entry.ipv4_hnapt.dport);
}
foe = &hnat_priv->foe_table_cpu[ppe_id][hash];
while ((foe->ipv4_hnapt.bfib1.state == BIND) && (coll < 4)) {
hash++;
coll++;
foe = &hnat_priv->foe_table_cpu[ppe_id][hash];
};
memcpy(foe, &entry, sizeof(entry));
debug_level = 7;
entry_detail(ppe_id, hash);
return len;
}
static const struct file_operations hnat_static_fops = {
.open = hnat_static_entry_open,
.read = seq_read,
.llseek = seq_lseek,
.write = hnat_static_entry_write,
.release = single_release,
};
int get_ppe_mib(u32 ppe_id, int index, u64 *pkt_cnt, u64 *byte_cnt)
{
struct mtk_hnat *h = hnat_priv;
@@ -2527,12 +2286,8 @@ int hnat_init_debugfs(struct mtk_hnat *h)
h->regset[i]->nregs = ARRAY_SIZE(hnat_regs);
h->regset[i]->base = h->ppe_base[i];
ret = snprintf(name, sizeof(name), "regdump%ld", i);
if (ret != strlen(name)) {
ret = -ENOMEM;
goto err1;
}
file = debugfs_create_regset32(name, 0444,
snprintf(name, sizeof(name), "regdump%ld", i);
file = debugfs_create_regset32(name, S_IRUGO,
root, h->regset[i]);
if (!file) {
dev_notice(h->dev, "%s:err at %d\n", __func__, __LINE__);
@@ -2541,49 +2296,39 @@ int hnat_init_debugfs(struct mtk_hnat *h)
}
}
debugfs_create_file("all_entry", 0444, root, h, &hnat_debug_fops);
debugfs_create_file("external_interface", 0444, root, h,
debugfs_create_file("all_entry", S_IRUGO, root, h, &hnat_debug_fops);
debugfs_create_file("external_interface", S_IRUGO, root, h,
&hnat_ext_fops);
debugfs_create_file("whnat_interface", 0444, root, h,
debugfs_create_file("whnat_interface", S_IRUGO, root, h,
&hnat_whnat_fops);
debugfs_create_file("cpu_reason", 0444, root, h,
debugfs_create_file("cpu_reason", S_IFREG | S_IRUGO, root, h,
&cpu_reason_fops);
debugfs_create_file("hnat_entry", 0444, root, h,
debugfs_create_file("hnat_entry", S_IRUGO | S_IRUGO, root, h,
&hnat_entry_fops);
debugfs_create_file("hnat_setting", 0444, root, h,
debugfs_create_file("hnat_setting", S_IRUGO | S_IRUGO, root, h,
&hnat_setting_fops);
debugfs_create_file("mcast_table", 0444, root, h,
debugfs_create_file("mcast_table", S_IRUGO | S_IRUGO, root, h,
&hnat_mcast_fops);
debugfs_create_file("hook_toggle", 0444, root, h,
debugfs_create_file("hook_toggle", S_IRUGO | S_IRUGO, root, h,
&hnat_hook_toggle_fops);
debugfs_create_file("mape_toggle", 0444, root, h,
debugfs_create_file("mape_toggle", S_IRUGO | S_IRUGO, root, h,
&hnat_mape_toggle_fops);
debugfs_create_file("qos_toggle", 0444, root, h,
debugfs_create_file("qos_toggle", S_IRUGO | S_IRUGO, root, h,
&hnat_qos_toggle_fops);
debugfs_create_file("hnat_version", 0444, root, h,
debugfs_create_file("hnat_version", S_IRUGO | S_IRUGO, root, h,
&hnat_version_fops);
debugfs_create_file("hnat_ppd_if", 0444, root, h,
debugfs_create_file("hnat_ppd_if", S_IRUGO | S_IRUGO, root, h,
&hnat_ppd_if_fops);
debugfs_create_file("static_entry", 0444, root, h,
&hnat_static_fops);
for (i = 0; i < hnat_priv->data->num_of_sch; i++) {
ret = snprintf(name, sizeof(name), "qdma_sch%ld", i);
if (ret != strlen(name)) {
ret = -ENOMEM;
goto err1;
}
debugfs_create_file(name, 0444, root, (void *)i,
snprintf(name, sizeof(name), "qdma_sch%ld", i);
debugfs_create_file(name, S_IRUGO, root, (void *)i,
&hnat_sched_fops);
}
for (i = 0; i < MTK_QDMA_TX_NUM; i++) {
ret = snprintf(name, sizeof(name), "qdma_txq%ld", i);
if (ret != strlen(name)) {
ret = -ENOMEM;
goto err1;
}
debugfs_create_file(name, 0444, root, (void *)i,
snprintf(name, sizeof(name), "qdma_txq%ld", i);
debugfs_create_file(name, S_IRUGO, root, (void *)i,
&hnat_queue_fops);
}

View File

@@ -297,11 +297,11 @@ int hnat_mcast_enable(u32 ppe_id)
INIT_WORK(&pmcast->work, hnat_mcast_nlmsg_handler);
pmcast->queue = create_singlethread_workqueue("ppe_mcast");
if (!pmcast->queue)
goto err1;
goto err;
pmcast->msock = hnat_mcast_netlink_open(&init_net);
if (!pmcast->msock)
goto err2;
goto err;
hnat_priv->pmcast = pmcast;
@@ -325,10 +325,11 @@ int hnat_mcast_enable(u32 ppe_id)
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MCAST_PPSE, MC_P3_PPSE, 5);
return 0;
err2:
err:
if (pmcast->queue)
destroy_workqueue(pmcast->queue);
err1:
if (pmcast->msock)
sock_release(pmcast->msock);
kfree(pmcast);
return -1;

View File

@@ -33,7 +33,7 @@
#include "../mtk_eth_reset.h"
#define do_ge2ext_fast(dev, skb) \
((IS_LAN_GRP(dev) || IS_WAN(dev) || IS_PPD(dev)) && \
((IS_LAN(dev) || IS_WAN(dev) || IS_PPD(dev)) && \
skb_hnat_is_hashed(skb) && \
skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU)
#define do_ext2ge_fast_learn(dev, skb) \
@@ -189,7 +189,7 @@ void foe_clear_all_bind_entries(struct net_device *dev)
int i, hash_index;
struct foe_entry *entry;
if (!IS_LAN_GRP(dev) && !IS_WAN(dev) &&
if (!IS_LAN(dev) && !IS_WAN(dev) &&
!find_extif_from_devname(dev->name) &&
!dev->netdev_ops->ndo_flow_offload_check)
return;
@@ -217,11 +217,9 @@ void foe_clear_all_bind_entries(struct net_device *dev)
static void gmac_ppe_fwd_enable(struct net_device *dev)
{
if (IS_LAN(dev) || IS_GMAC1_MODE)
set_gmac_ppe_fwd(NR_GMAC1_PORT, 1);
set_gmac_ppe_fwd(0, 1);
else if (IS_WAN(dev))
set_gmac_ppe_fwd(NR_GMAC2_PORT, 1);
else if (IS_LAN2(dev))
set_gmac_ppe_fwd(NR_GMAC3_PORT, 1);
set_gmac_ppe_fwd(1, 1);
}
int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event,
@@ -421,10 +419,6 @@ unsigned int do_hnat_ext_to_ge2(struct sk_buff *skb, const char *func)
trace_printk("%s: vlan_prot=0x%x, vlan_tci=%x\n", __func__,
ntohs(skb->vlan_proto), skb->vlan_tci);
if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
skb_hnat_ppe(skb) >= CFG_PPE_NUM)
return -1;
dev = get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK);
if (dev) {
@@ -440,11 +434,9 @@ unsigned int do_hnat_ext_to_ge2(struct sk_buff *skb, const char *func)
}
if (IS_BOND_MODE &&
(((hnat_priv->data->version == MTK_HNAT_V4 ||
hnat_priv->data->version == MTK_HNAT_V5) &&
(((hnat_priv->data->version == MTK_HNAT_V4) &&
(skb_hnat_entry(skb) != 0x7fff)) ||
((hnat_priv->data->version != MTK_HNAT_V4 &&
hnat_priv->data->version != MTK_HNAT_V5) &&
((hnat_priv->data->version != MTK_HNAT_V4) &&
(skb_hnat_entry(skb) != 0x3fff))))
skb_set_hash(skb, skb_hnat_entry(skb) >> 1, PKT_HASH_TYPE_L4);
@@ -483,10 +475,6 @@ unsigned int do_hnat_ge_to_ext(struct sk_buff *skb, const char *func)
struct foe_entry *entry;
struct net_device *dev;
if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
skb_hnat_ppe(skb) >= CFG_PPE_NUM)
return -1;
entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
if (IS_IPV4_GRP(entry))
@@ -581,8 +569,6 @@ static inline void hnat_set_iif(const struct nf_hook_state *state,
return;
} else if (IS_LAN(state->in)) {
skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN;
} else if (IS_LAN2(state->in)) {
skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN2;
} else if (IS_PPD(state->in)) {
skb_hnat_iface(skb) = FOE_MAGIC_GE_PPD;
} else if (IS_EXT(state->in)) {
@@ -681,7 +667,74 @@ unsigned int do_hnat_mape_w2l_fast(struct sk_buff *skb, const struct net_device
return -1;
}
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
unsigned int do_hnat_mape_w2l(struct sk_buff *skb, const struct net_device *in,
const char *func)
{
struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct iphdr _iphdr;
struct iphdr *iph;
struct foe_entry *entry;
struct tcpudphdr _ports;
const struct tcpudphdr *pptr;
int udp = 0;
/* WAN -> LAN/WLAN MapE learn info(include innner IPv4 header info). */
if (ip6h->nexthdr == NEXTHDR_IPIP) {
entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
entry->ipv4_dslite.tunnel_sipv6_0 =
ntohl(ip6h->saddr.s6_addr32[0]);
entry->ipv4_dslite.tunnel_sipv6_1 =
ntohl(ip6h->saddr.s6_addr32[1]);
entry->ipv4_dslite.tunnel_sipv6_2 =
ntohl(ip6h->saddr.s6_addr32[2]);
entry->ipv4_dslite.tunnel_sipv6_3 =
ntohl(ip6h->saddr.s6_addr32[3]);
entry->ipv4_dslite.tunnel_dipv6_0 =
ntohl(ip6h->daddr.s6_addr32[0]);
entry->ipv4_dslite.tunnel_dipv6_1 =
ntohl(ip6h->daddr.s6_addr32[1]);
entry->ipv4_dslite.tunnel_dipv6_2 =
ntohl(ip6h->daddr.s6_addr32[2]);
entry->ipv4_dslite.tunnel_dipv6_3 =
ntohl(ip6h->daddr.s6_addr32[3]);
ppe_fill_flow_lbl(entry, ip6h);
iph = skb_header_pointer(skb, IPV6_HDR_LEN,
sizeof(_iphdr), &_iphdr);
if (unlikely(!iph))
return NF_ACCEPT;
switch (iph->protocol) {
case IPPROTO_UDP:
udp = 1;
case IPPROTO_TCP:
break;
default:
return NF_ACCEPT;
}
pptr = skb_header_pointer(skb, IPV6_HDR_LEN + iph->ihl * 4,
sizeof(_ports), &_ports);
if (unlikely(!pptr))
return NF_ACCEPT;
entry->bfib1.udp = udp;
entry->ipv4_dslite.new_sip = ntohl(iph->saddr);
entry->ipv4_dslite.new_dip = ntohl(iph->daddr);
entry->ipv4_dslite.new_sport = ntohs(pptr->src);
entry->ipv4_dslite.new_dport = ntohs(pptr->dst);
return 0;
}
return -1;
}
#endif
static unsigned int is_ppe_support_type(struct sk_buff *skb)
{
@@ -738,9 +791,6 @@ static unsigned int
mtk_hnat_ipv6_nf_pre_routing(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
if (!skb)
goto drop;
if (!is_ppe_support_type(skb)) {
hnat_set_head_frags(state, skb, 1, hnat_set_alg);
return NF_ACCEPT;
@@ -754,6 +804,8 @@ mtk_hnat_ipv6_nf_pre_routing(void *priv, struct sk_buff *skb,
if (do_ext2ge_fast_try(state->in, skb)) {
if (!do_hnat_ext_to_ge(skb, state->in, __func__))
return NF_STOLEN;
if (!skb)
goto drop;
return NF_ACCEPT;
}
@@ -766,29 +818,30 @@ mtk_hnat_ipv6_nf_pre_routing(void *priv, struct sk_buff *skb,
goto drop;
}
#if !(defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3))
/* MapE need remove ipv6 header and pingpong. */
if (do_mape_w2l_fast(state->in, skb)) {
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
if (mape_toggle && do_hnat_mape_w2l(skb, state->in, __func__))
return NF_ACCEPT;
#else
if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
return NF_STOLEN;
else
return NF_ACCEPT;
#endif
}
if (is_from_mape(skb))
clr_from_extge(skb);
#endif
return NF_ACCEPT;
drop:
if (skb)
printk_ratelimited(KERN_WARNING
"%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
"sport=0x%x, reason=0x%x, alg=0x%x)\n",
__func__, state->in->name, skb_hnat_iface(skb),
HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
skb_hnat_sport(skb), skb_hnat_reason(skb),
skb_hnat_alg(skb));
printk_ratelimited(KERN_WARNING
"%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
__func__, state->in->name, skb_hnat_iface(skb),
HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
skb_hnat_sport(skb), skb_hnat_reason(skb),
skb_hnat_alg(skb));
return NF_DROP;
}
@@ -797,9 +850,6 @@ static unsigned int
mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
if (!skb)
goto drop;
if (!is_ppe_support_type(skb)) {
hnat_set_head_frags(state, skb, 1, hnat_set_alg);
return NF_ACCEPT;
@@ -813,6 +863,8 @@ mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb,
if (do_ext2ge_fast_try(state->in, skb)) {
if (!do_hnat_ext_to_ge(skb, state->in, __func__))
return NF_STOLEN;
if (!skb)
goto drop;
return NF_ACCEPT;
}
@@ -827,14 +879,12 @@ mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb,
return NF_ACCEPT;
drop:
if (skb)
printk_ratelimited(KERN_WARNING
"%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
"sport=0x%x, reason=0x%x, alg=0x%x)\n",
__func__, state->in->name, skb_hnat_iface(skb),
HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
skb_hnat_sport(skb), skb_hnat_reason(skb),
skb_hnat_alg(skb));
printk_ratelimited(KERN_WARNING
"%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
__func__, state->in->name, skb_hnat_iface(skb),
HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
skb_hnat_sport(skb), skb_hnat_reason(skb),
skb_hnat_alg(skb));
return NF_DROP;
}
@@ -845,9 +895,6 @@ mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb,
{
struct vlan_ethhdr *veth;
if (!skb)
goto drop;
if (IS_HQOS_MODE && hnat_priv->data->whnat) {
veth = (struct vlan_ethhdr *)skb_mac_header(skb);
@@ -880,6 +927,8 @@ mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb,
if (!do_hnat_ext_to_ge(skb, state->in, __func__))
return NF_STOLEN;
if (!skb)
goto drop;
return NF_ACCEPT;
}
@@ -903,7 +952,6 @@ mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb,
}
}
#if !(defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3))
/* MapE need remove ipv6 header and pingpong. (bridge mode) */
if (do_mape_w2l_fast(state->in, skb)) {
if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
@@ -911,17 +959,15 @@ mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb,
else
return NF_ACCEPT;
}
#endif
return NF_ACCEPT;
drop:
if (skb)
printk_ratelimited(KERN_WARNING
"%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
"sport=0x%x, reason=0x%x, alg=0x%x)\n",
__func__, state->in->name, skb_hnat_iface(skb),
HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
skb_hnat_sport(skb), skb_hnat_reason(skb),
skb_hnat_alg(skb));
printk_ratelimited(KERN_WARNING
"%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
__func__, state->in->name, skb_hnat_iface(skb),
HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
skb_hnat_sport(skb), skb_hnat_reason(skb),
skb_hnat_alg(skb));
return NF_DROP;
}
@@ -941,9 +987,6 @@ static unsigned int hnat_ipv6_get_nexthop(struct sk_buff *skb,
return 0;
}
if (!dst)
return -1;
rcu_read_lock_bh();
ipv6_nexthop =
rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
@@ -1072,8 +1115,7 @@ struct foe_entry ppe_fill_info_blk(struct ethhdr *eth, struct foe_entry entry,
entry.bfib1.vlan_layer += (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN) ? 1 : 0;
entry.bfib1.vpm = (entry.bfib1.vlan_layer) ? 1 : 0;
entry.bfib1.cah = 1;
entry.bfib1.time_stamp = (hnat_priv->data->version == MTK_HNAT_V4 ||
hnat_priv->data->version == MTK_HNAT_V5) ?
entry.bfib1.time_stamp = (hnat_priv->data->version == MTK_HNAT_V4) ?
readl(hnat_priv->fe_base + 0x0010) & (0xFF) :
readl(hnat_priv->fe_base + 0x0010) & (0x7FFF);
@@ -1092,8 +1134,7 @@ struct foe_entry ppe_fill_info_blk(struct ethhdr *eth, struct foe_entry entry,
}
entry.ipv4_hnapt.iblk2.port_ag =
(hnat_priv->data->version == MTK_HNAT_V4 ||
hnat_priv->data->version == MTK_HNAT_V5) ? 0xf : 0x3f;
(hnat_priv->data->version == MTK_HNAT_V4) ? 0xf : 0x3f;
break;
case IPV4_DSLITE:
case IPV4_MAP_E:
@@ -1112,8 +1153,7 @@ struct foe_entry ppe_fill_info_blk(struct ethhdr *eth, struct foe_entry entry,
}
entry.ipv6_5t_route.iblk2.port_ag =
(hnat_priv->data->version == MTK_HNAT_V4 ||
hnat_priv->data->version == MTK_HNAT_V5) ? 0xf : 0x3f;
(hnat_priv->data->version == MTK_HNAT_V4) ? 0xf : 0x3f;
break;
}
return entry;
@@ -1150,7 +1190,7 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
entry.bfib1.state = foe->udib1.state;
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
entry.bfib1.sp = foe->udib1.sp;
#endif
@@ -1174,7 +1214,7 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
entry.ipv4_dslite.dport =
foe->ipv4_dslite.dport;
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
if (entry.bfib1.pkt_type == IPV4_MAP_E) {
pptr = skb_header_pointer(skb,
iph->ihl * 4,
@@ -1183,13 +1223,13 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
if (unlikely(!pptr))
return -1;
entry.ipv4_mape.new_sip =
entry.ipv4_dslite.new_sip =
ntohl(iph->saddr);
entry.ipv4_mape.new_dip =
entry.ipv4_dslite.new_dip =
ntohl(iph->daddr);
entry.ipv4_mape.new_sport =
entry.ipv4_dslite.new_sport =
ntohs(pptr->src);
entry.ipv4_mape.new_dport =
entry.ipv4_dslite.new_dport =
ntohs(pptr->dst);
}
#endif
@@ -1225,8 +1265,7 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
if (skb->vlan_tci && FROM_GE_WAN(skb) &&
IS_LAN_GRP(dev)) {
if (skb->vlan_tci && FROM_GE_WAN(skb) && IS_LAN(dev)) {
entry.bfib1.vlan_layer += 1;
if (entry.ipv4_hnapt.vlan1)
@@ -1278,8 +1317,7 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
entry.ipv6_5t_route.vlan1 = hw_path->vlan_id;
if (skb->vlan_tci && FROM_GE_WAN(skb) &&
IS_LAN_GRP(dev)) {
if (skb->vlan_tci && FROM_GE_WAN(skb) && IS_LAN(dev)) {
entry.bfib1.vlan_layer += 1;
if (entry.ipv6_5t_route.vlan1)
@@ -1380,10 +1418,10 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
if (mape_toggle) {
entry.ipv4_dslite.iblk2.dscp = foe->ipv4_dslite.iblk2.dscp;
entry.ipv4_mape.new_sip = foe->ipv4_mape.new_sip;
entry.ipv4_mape.new_dip = foe->ipv4_mape.new_dip;
entry.ipv4_mape.new_sport = foe->ipv4_mape.new_sport;
entry.ipv4_mape.new_dport = foe->ipv4_mape.new_dport;
entry.ipv4_dslite.new_sip = foe->ipv4_dslite.new_sip;
entry.ipv4_dslite.new_dip = foe->ipv4_dslite.new_dip;
entry.ipv4_dslite.new_sport = foe->ipv4_dslite.new_sport;
entry.ipv4_dslite.new_dport = foe->ipv4_dslite.new_dport;
}
#endif
} else if (mape_toggle &&
@@ -1413,14 +1451,9 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
if (IS_HQOS_MODE) {
entry.ipv4_hnapt.iblk2.qid =
(hnat_priv->data->version == MTK_HNAT_V4 ||
hnat_priv->data->version == MTK_HNAT_V5) ?
(hnat_priv->data->version == MTK_HNAT_V4) ?
skb->mark & 0x7f : skb->mark & 0xf;
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
entry.ipv4_hnapt.tport_id = 1;
#else
entry.ipv4_hnapt.iblk2.fqos = 1;
#endif
}
entry.ipv4_hnapt.bfib1.udp =
@@ -1494,8 +1527,6 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
NR_GMAC2_PORT : NR_GMAC1_PORT;
else
gmac = NR_GMAC1_PORT;
} else if (IS_LAN2(dev)) {
gmac = NR_GMAC3_PORT;
} else if (IS_WAN(dev)) {
if (IS_DSA_WAN(dev))
port_id = hnat_dsa_fill_stag(dev,&entry, hw_path,
@@ -1508,7 +1539,7 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
} else {
gmac = (IS_GMAC1_MODE) ? NR_GMAC1_PORT : NR_GMAC2_PORT;
}
} else if (IS_EXT(dev) && (FROM_GE_PPD(skb) || FROM_GE_LAN_GRP(skb) ||
} else if (IS_EXT(dev) && (FROM_GE_PPD(skb) || FROM_GE_LAN(skb) ||
FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb) || FROM_WED(skb))) {
if (!hnat_priv->data->whnat && IS_GMAC1_MODE) {
entry.bfib1.vpm = 1;
@@ -1540,8 +1571,7 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
if (IS_HQOS_MODE || skb->mark >= MAX_PPPQ_PORT_NUM)
qid = skb->mark & (MTK_QDMA_TX_MASK);
else if (IS_PPPQ_MODE && (IS_DSA_1G_LAN(dev) || IS_DSA_WAN(dev) ||
(FROM_WED(skb) && IS_DSA_LAN(dev))))
else if (IS_PPPQ_MODE && (IS_DSA_1G_LAN(dev) || IS_DSA_WAN(dev)))
qid = port_id & MTK_QDMA_TX_MASK;
else
qid = 0;
@@ -1552,8 +1582,7 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
(hnat_priv->data->version == MTK_HNAT_V1) ? 0x3f : 0;
if (qos_toggle) {
if (hnat_priv->data->version == MTK_HNAT_V4 ||
hnat_priv->data->version == MTK_HNAT_V5) {
if (hnat_priv->data->version == MTK_HNAT_V4) {
entry.ipv4_hnapt.iblk2.qid = qid & 0x7f;
} else {
/* qid[5:0]= port_mg[1:0]+ qid[3:0] */
@@ -1562,7 +1591,7 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
entry.ipv4_hnapt.iblk2.port_mg |=
((qid >> 4) & 0x3);
if (((IS_EXT(dev) && (FROM_GE_LAN_GRP(skb) ||
if (((IS_EXT(dev) && (FROM_GE_LAN(skb) ||
FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) ||
((mape_toggle && mape == 1) && !FROM_EXT(skb))) &&
(!whnat)) {
@@ -1576,14 +1605,9 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
(IS_PPPQ_MODE && !IS_DSA_LAN(dev) && !IS_DSA_WAN(dev)))
entry.ipv4_hnapt.iblk2.fqos = 0;
else
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
entry.ipv4_hnapt.tport_id = 1;
#else
entry.ipv4_hnapt.iblk2.fqos =
(!IS_PPPQ_MODE || (IS_PPPQ_MODE &&
(IS_DSA_1G_LAN(dev) || IS_DSA_WAN(dev) ||
(FROM_WED(skb) && IS_DSA_LAN(dev)))));
#endif
(IS_DSA_1G_LAN(dev) || IS_DSA_WAN(dev))));
} else {
entry.ipv4_hnapt.iblk2.fqos = 0;
}
@@ -1593,8 +1617,7 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
(hnat_priv->data->version == MTK_HNAT_V1) ? 0x3f : 0;
if (qos_toggle) {
if (hnat_priv->data->version == MTK_HNAT_V4 ||
hnat_priv->data->version == MTK_HNAT_V5) {
if (hnat_priv->data->version == MTK_HNAT_V4) {
entry.ipv6_5t_route.iblk2.qid = qid & 0x7f;
} else {
/* qid[5:0]= port_mg[1:0]+ qid[3:0] */
@@ -1603,7 +1626,7 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
entry.ipv6_5t_route.iblk2.port_mg |=
((qid >> 4) & 0x3);
if (IS_EXT(dev) && (FROM_GE_LAN_GRP(skb) ||
if (IS_EXT(dev) && (FROM_GE_LAN(skb) ||
FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) &&
(!whnat)) {
entry.ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
@@ -1616,14 +1639,9 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
(IS_PPPQ_MODE && !IS_DSA_LAN(dev) && !IS_DSA_WAN(dev)))
entry.ipv6_5t_route.iblk2.fqos = 0;
else
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
entry.ipv6_5t_route.tport_id = 1;
#else
entry.ipv6_5t_route.iblk2.fqos =
(!IS_PPPQ_MODE || (IS_PPPQ_MODE &&
(IS_DSA_1G_LAN(dev) || IS_DSA_WAN(dev) ||
(FROM_WED(skb) && IS_DSA_LAN(dev)))));
#endif
(IS_DSA_1G_LAN(dev) || IS_DSA_WAN(dev))));
} else {
entry.ipv6_5t_route.iblk2.fqos = 0;
}
@@ -1641,9 +1659,7 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
wmb();
memcpy(foe, &entry, sizeof(entry));
/*reset statistic for this entry*/
if (hnat_priv->data->per_flow_accounting &&
skb_hnat_entry(skb) < hnat_priv->foe_etry_num &&
skb_hnat_ppe(skb) < CFG_PPE_NUM)
if (hnat_priv->data->per_flow_accounting)
memset(&hnat_priv->acct[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
0, sizeof(struct mib_entry));
@@ -1739,26 +1755,11 @@ int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
entry->ipv4_hnapt.iblk2.fqos = 0;
if ((hnat_priv->data->version == MTK_HNAT_V2 &&
gmac_no == NR_WHNAT_WDMA_PORT) ||
((hnat_priv->data->version == MTK_HNAT_V4 ||
hnat_priv->data->version == MTK_HNAT_V5) &&
(hnat_priv->data->version == MTK_HNAT_V4 &&
(gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
entry->ipv4_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
entry->ipv4_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
entry->ipv4_hnapt.tport_id = (IS_HQOS_MODE) ? 1 : 0;
entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
entry->ipv4_hnapt.iblk2.winfoi = 1;
entry->ipv4_hnapt.winfo_pao.usr_info =
skb_hnat_usr_info(skb);
entry->ipv4_hnapt.winfo_pao.tid = skb_hnat_tid(skb);
entry->ipv4_hnapt.winfo_pao.is_fixedrate =
skb_hnat_is_fixedrate(skb);
entry->ipv4_hnapt.winfo_pao.is_prior =
skb_hnat_is_prior(skb);
entry->ipv4_hnapt.winfo_pao.is_sp = skb_hnat_is_sp(skb);
entry->ipv4_hnapt.winfo_pao.hf = skb_hnat_hf(skb);
entry->ipv4_hnapt.winfo_pao.amsdu = skb_hnat_amsdu(skb);
#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
entry->ipv4_hnapt.iblk2.winfoi = 1;
#else
@@ -1771,14 +1772,14 @@ int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
bfib1_tx.vpm = 1;
bfib1_tx.vlan_layer = 1;
if (FROM_GE_LAN_GRP(skb))
if (FROM_GE_LAN(skb))
entry->ipv4_hnapt.vlan1 = 1;
else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
entry->ipv4_hnapt.vlan1 = 2;
}
if (IS_HQOS_MODE &&
(FROM_GE_LAN_GRP(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
(FROM_GE_LAN(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
bfib1_tx.vpm = 0;
bfib1_tx.vlan_layer = 1;
entry->ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
@@ -1791,30 +1792,11 @@ int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
entry->ipv6_5t_route.iblk2.fqos = 0;
if ((hnat_priv->data->version == MTK_HNAT_V2 &&
gmac_no == NR_WHNAT_WDMA_PORT) ||
((hnat_priv->data->version == MTK_HNAT_V4 ||
hnat_priv->data->version == MTK_HNAT_V5) &&
(hnat_priv->data->version == MTK_HNAT_V4 &&
(gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
entry->ipv6_5t_route.winfo.bssid = skb_hnat_bss_id(skb);
entry->ipv6_5t_route.winfo.wcid = skb_hnat_wc_id(skb);
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
entry->ipv6_5t_route.tport_id = (IS_HQOS_MODE) ? 1 : 0;
entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
entry->ipv6_5t_route.iblk2.winfoi = 1;
entry->ipv6_5t_route.winfo_pao.usr_info =
skb_hnat_usr_info(skb);
entry->ipv6_5t_route.winfo_pao.tid =
skb_hnat_tid(skb);
entry->ipv6_5t_route.winfo_pao.is_fixedrate =
skb_hnat_is_fixedrate(skb);
entry->ipv6_5t_route.winfo_pao.is_prior =
skb_hnat_is_prior(skb);
entry->ipv6_5t_route.winfo_pao.is_sp =
skb_hnat_is_sp(skb);
entry->ipv6_5t_route.winfo_pao.hf =
skb_hnat_hf(skb);
entry->ipv6_5t_route.winfo_pao.amsdu =
skb_hnat_amsdu(skb);
#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
entry->ipv6_5t_route.iblk2.winfoi = 1;
#else
@@ -1827,14 +1809,14 @@ int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
bfib1_tx.vpm = 1;
bfib1_tx.vlan_layer = 1;
if (FROM_GE_LAN_GRP(skb))
if (FROM_GE_LAN(skb))
entry->ipv6_5t_route.vlan1 = 1;
else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
entry->ipv6_5t_route.vlan1 = 2;
}
if (IS_HQOS_MODE &&
(FROM_GE_LAN_GRP(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
(FROM_GE_LAN(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
bfib1_tx.vpm = 0;
bfib1_tx.vlan_layer = 1;
entry->ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
@@ -2036,16 +2018,12 @@ static unsigned int mtk_hnat_nf_post_routing(
out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
}
if (!IS_LAN_GRP(out) && !IS_WAN(out) && !IS_EXT(out))
if (!IS_LAN(out) && !IS_WAN(out) && !IS_EXT(out))
return 0;
trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
skb_hnat_iface(skb), out->name, skb_hnat_reason(skb));
if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
skb_hnat_ppe(skb) >= CFG_PPE_NUM)
return -1;
entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
switch (skb_hnat_reason(skb)) {
@@ -2086,7 +2064,7 @@ static unsigned int mtk_hnat_nf_post_routing(
case HIT_BIND_MULTICAST_TO_CPU:
case HIT_BIND_MULTICAST_TO_GMAC_CPU:
/*do not forward to gdma again,if ppe already done it*/
if (IS_LAN_GRP(out) || IS_WAN(out))
if (IS_LAN(out) || IS_WAN(out))
return -1;
break;
}
@@ -2109,10 +2087,6 @@ mtk_hnat_ipv6_nf_local_out(void *priv, struct sk_buff *skb,
if (unlikely(!skb_hnat_is_hashed(skb)))
return NF_ACCEPT;
if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
skb_hnat_ppe(skb) >= CFG_PPE_NUM)
return NF_ACCEPT;
entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
if (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH) {
ip6h = ipv6_hdr(skb);
@@ -2142,13 +2116,13 @@ mtk_hnat_ipv6_nf_local_out(void *priv, struct sk_buff *skb,
entry->bfib1.udp = udp;
/* Map-E LAN->WAN record inner IPv4 header info. */
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
entry->bfib1.pkt_type = IPV4_MAP_E;
entry->ipv4_dslite.iblk2.dscp = iph->tos;
entry->ipv4_mape.new_sip = ntohl(iph->saddr);
entry->ipv4_mape.new_dip = ntohl(iph->daddr);
entry->ipv4_mape.new_sport = ntohs(pptr->src);
entry->ipv4_mape.new_dport = ntohs(pptr->dst);
entry->ipv4_dslite.new_sip = ntohl(iph->saddr);
entry->ipv4_dslite.new_dip = ntohl(iph->daddr);
entry->ipv4_dslite.new_sport = ntohs(pptr->src);
entry->ipv4_dslite.new_dport = ntohs(pptr->dst);
#else
entry->ipv4_hnapt.iblk2.dscp = iph->tos;
entry->ipv4_hnapt.new_sip = ntohl(iph->saddr);
@@ -2168,24 +2142,17 @@ static unsigned int
mtk_hnat_ipv6_nf_post_routing(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
if (!skb)
goto drop;
post_routing_print(skb, state->in, state->out, __func__);
if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv6_get_nexthop,
__func__))
return NF_ACCEPT;
drop:
if (skb)
trace_printk(
"%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x,\n"
"sport=0x%x, reason=0x%x, alg=0x%x)\n",
__func__, skb_hnat_iface(skb), state->out->name,
HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
skb_hnat_sport(skb), skb_hnat_reason(skb),
skb_hnat_alg(skb));
trace_printk(
"%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
__func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
skb_hnat_alg(skb));
return NF_DROP;
}
@@ -2194,24 +2161,17 @@ static unsigned int
mtk_hnat_ipv4_nf_post_routing(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
if (!skb)
goto drop;
post_routing_print(skb, state->in, state->out, __func__);
if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv4_get_nexthop,
__func__))
return NF_ACCEPT;
drop:
if (skb)
trace_printk(
"%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x,\n"
"sport=0x%x, reason=0x%x, alg=0x%x)\n",
__func__, skb_hnat_iface(skb), state->out->name,
HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
skb_hnat_sport(skb), skb_hnat_reason(skb),
skb_hnat_alg(skb));
trace_printk(
"%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
__func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
skb_hnat_alg(skb));
return NF_DROP;
}
@@ -2246,16 +2206,13 @@ mtk_pong_hqos_handler(void *priv, struct sk_buff *skb,
}
return NF_ACCEPT;
drop:
if (skb)
printk_ratelimited(KERN_WARNING
"%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
"sport=0x%x, reason=0x%x, alg=0x%x)\n",
__func__, state->in->name, skb_hnat_iface(skb),
HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
skb_hnat_sport(skb), skb_hnat_reason(skb),
skb_hnat_alg(skb));
printk_ratelimited(KERN_WARNING
"%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
__func__, state->in->name, skb_hnat_iface(skb),
HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
skb_hnat_sport(skb), skb_hnat_reason(skb),
skb_hnat_alg(skb));
return NF_DROP;
}
@@ -2264,23 +2221,16 @@ static unsigned int
mtk_hnat_br_nf_local_out(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
if (!skb)
goto drop;
post_routing_print(skb, state->in, state->out, __func__);
if (!mtk_hnat_nf_post_routing(skb, state->out, 0, __func__))
return NF_ACCEPT;
drop:
if (skb)
trace_printk(
"%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x,\n"
"sport=0x%x, reason=0x%x, alg=0x%x)\n",
__func__, skb_hnat_iface(skb), state->out->name,
HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
skb_hnat_sport(skb), skb_hnat_reason(skb),
skb_hnat_alg(skb));
trace_printk(
"%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
__func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
skb_hnat_alg(skb));
return NF_DROP;
}
@@ -2296,10 +2246,6 @@ mtk_hnat_ipv4_nf_local_out(void *priv, struct sk_buff *skb,
if (!skb_hnat_is_hashed(skb))
return NF_ACCEPT;
if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
skb_hnat_ppe(skb) >= CFG_PPE_NUM)
return NF_ACCEPT;
entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
if (unlikely(skb_headroom(skb) < FOE_INFO_LEN)) {

View File

@@ -23,31 +23,7 @@ struct hnat_skb_cb2 {
__u32 magic;
};
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
struct hnat_desc {
u32 entry : 15;
u32 filled : 3;
u32 crsn : 5;
u32 resv1 : 3;
u32 sport : 4;
u32 resv2 : 1;
u32 alg : 1;
u32 iface : 8;
u32 wdmaid : 2;
u32 rxid : 2;
u32 wcid : 16;
u32 bssid : 8;
u32 usr_info : 16;
u32 tid : 4;
u32 is_fixedrate : 1;
u32 is_prior : 1;
u32 is_sp : 1;
u32 hf : 1;
u32 amsdu : 1;
u32 resv3 : 19;
u32 magic_tag_protect : 16;
} __packed;
#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
#if defined(CONFIG_MEDIATEK_NETSYS_V2)
struct hnat_desc {
u32 entry : 15;
u32 filled : 3;
@@ -86,7 +62,7 @@ struct hnat_desc {
#define HNAT_MAGIC_TAG 0x6789
#define HNAT_INFO_FILLED 0x7
#define WIFI_INFO_LEN 6
#define WIFI_INFO_LEN 3
#define FOE_INFO_LEN (10 + WIFI_INFO_LEN)
#define IS_SPACE_AVAILABLE_HEAD(skb) \
((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
@@ -104,22 +80,8 @@ struct hnat_desc {
#define skb_hnat_rx_id(skb) (((struct hnat_desc *)((skb)->head))->rxid)
#define skb_hnat_wc_id(skb) (((struct hnat_desc *)((skb)->head))->wcid)
#define skb_hnat_bss_id(skb) (((struct hnat_desc *)((skb)->head))->bssid)
#define skb_hnat_usr_info(skb) (((struct hnat_desc *)((skb)->head))->usr_info)
#define skb_hnat_tid(skb) (((struct hnat_desc *)((skb)->head))->tid)
#define skb_hnat_is_fixedrate(skb) \
(((struct hnat_desc *)((skb)->head))->is_fixedrate)
#define skb_hnat_is_prior(skb) (((struct hnat_desc *)((skb)->head))->is_prior)
#define skb_hnat_is_sp(skb) (((struct hnat_desc *)((skb)->head))->is_sp)
#define skb_hnat_hf(skb) (((struct hnat_desc *)((skb)->head))->hf)
#define skb_hnat_amsdu(skb) (((struct hnat_desc *)((skb)->head))->amsdu)
#define skb_hnat_ppe2(skb) \
((skb_hnat_iface(skb) == FOE_MAGIC_GE_LAN2 || \
skb_hnat_iface(skb) == FOE_MAGIC_WED2) && CFG_PPE_NUM == 3)
#define skb_hnat_ppe1(skb) \
((skb_hnat_iface(skb) == FOE_MAGIC_GE_WAN && CFG_PPE_NUM == 3) || \
(skb_hnat_iface(skb) == FOE_MAGIC_WED1 && CFG_PPE_NUM > 1))
#define skb_hnat_ppe(skb) \
(skb_hnat_ppe2(skb) ? 2 : (skb_hnat_ppe1(skb) ? 1 : 0))
#define skb_hnat_ppe(skb) \
((skb_hnat_iface(skb) == FOE_MAGIC_WED1 && CFG_PPE_NUM > 1) ? 1 : 0)
#define do_ext2ge_fast_try(dev, skb) \
((skb_hnat_iface(skb) == FOE_MAGIC_EXT) && !is_from_extge(skb))
#define set_from_extge(skb) (HNAT_SKB_CB2(skb)->magic = 0x78786688)

View File

@@ -13,7 +13,7 @@
#include "mtk_eth_soc.h"
int mtk_sgmii_init(struct mtk_xgmii *ss, struct device_node *r, u32 ana_rgc3)
int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
{
struct device_node *np;
int i;
@@ -25,9 +25,9 @@ int mtk_sgmii_init(struct mtk_xgmii *ss, struct device_node *r, u32 ana_rgc3)
if (!np)
break;
ss->regmap_sgmii[i] = syscon_node_to_regmap(np);
if (IS_ERR(ss->regmap_sgmii[i]))
return PTR_ERR(ss->regmap_sgmii[i]);
ss->regmap[i] = syscon_node_to_regmap(np);
if (IS_ERR(ss->regmap[i]))
return PTR_ERR(ss->regmap[i]);
ss->flags[i] &= ~(MTK_SGMII_PN_SWAP);
if (of_property_read_bool(np, "pn_swap"))
@@ -37,215 +37,57 @@ int mtk_sgmii_init(struct mtk_xgmii *ss, struct device_node *r, u32 ana_rgc3)
return 0;
}
void mtk_sgmii_setup_phya_gen1(struct mtk_xgmii *ss, int mac_id)
int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, unsigned int id)
{
u32 id = mtk_mac2xgmii_id(ss->eth, mac_id);
unsigned int val;
if (id >= MTK_MAX_DEVS ||
!ss->regmap_sgmii[id] || !ss->regmap_pextp[id])
return;
regmap_update_bits(ss->regmap_pextp[id], 0x9024, GENMASK(31, 0), 0x00D9071C);
regmap_update_bits(ss->regmap_pextp[id], 0x2020, GENMASK(31, 0), 0xAA8585AA);
regmap_update_bits(ss->regmap_pextp[id], 0x2030, GENMASK(31, 0), 0x0C020207);
regmap_update_bits(ss->regmap_pextp[id], 0x2034, GENMASK(31, 0), 0x0E05050F);
regmap_update_bits(ss->regmap_pextp[id], 0x2040, GENMASK(31, 0), 0x00200032);
regmap_update_bits(ss->regmap_pextp[id], 0x50F0, GENMASK(31, 0), 0x00C014BA);
regmap_update_bits(ss->regmap_pextp[id], 0x50E0, GENMASK(31, 0), 0x3777C12B);
regmap_update_bits(ss->regmap_pextp[id], 0x506C, GENMASK(31, 0), 0x005F9CFF);
regmap_update_bits(ss->regmap_pextp[id], 0x5070, GENMASK(31, 0), 0x9D9DFAFA);
regmap_update_bits(ss->regmap_pextp[id], 0x5074, GENMASK(31, 0), 0x27273F3F);
regmap_update_bits(ss->regmap_pextp[id], 0x5078, GENMASK(31, 0), 0xA7883C68);
regmap_update_bits(ss->regmap_pextp[id], 0x507C, GENMASK(31, 0), 0x11661166);
regmap_update_bits(ss->regmap_pextp[id], 0x5080, GENMASK(31, 0), 0x0E000EAF);
regmap_update_bits(ss->regmap_pextp[id], 0x5084, GENMASK(31, 0), 0x08080E0D);
regmap_update_bits(ss->regmap_pextp[id], 0x5088, GENMASK(31, 0), 0x02030B09);
regmap_update_bits(ss->regmap_pextp[id], 0x50E4, GENMASK(31, 0), 0x0C0C0000);
regmap_update_bits(ss->regmap_pextp[id], 0x50E8, GENMASK(31, 0), 0x04040000);
regmap_update_bits(ss->regmap_pextp[id], 0x50EC, GENMASK(31, 0), 0x0F0F0606);
regmap_update_bits(ss->regmap_pextp[id], 0x50A8, GENMASK(31, 0), 0x506E8C8C);
regmap_update_bits(ss->regmap_pextp[id], 0x6004, GENMASK(31, 0), 0x18190000);
regmap_update_bits(ss->regmap_pextp[id], 0x00F8, GENMASK(31, 0), 0x00FA32FA);
regmap_update_bits(ss->regmap_pextp[id], 0x00F4, GENMASK(31, 0), 0x80201F21);
regmap_update_bits(ss->regmap_pextp[id], 0x0030, GENMASK(31, 0), 0x00050C00);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x02002800);
ndelay(1020);
regmap_update_bits(ss->regmap_pextp[id], 0x30B0, GENMASK(31, 0), 0x00000020);
regmap_update_bits(ss->regmap_pextp[id], 0x3028, GENMASK(31, 0), 0x00008A01);
regmap_update_bits(ss->regmap_pextp[id], 0x302C, GENMASK(31, 0), 0x0000A884);
regmap_update_bits(ss->regmap_pextp[id], 0x3024, GENMASK(31, 0), 0x00083002);
regmap_update_bits(ss->regmap_pextp[id], 0x3010, GENMASK(31, 0), 0x00011110);
regmap_update_bits(ss->regmap_pextp[id], 0x3048, GENMASK(31, 0), 0x40704000);
regmap_update_bits(ss->regmap_pextp[id], 0x3064, GENMASK(31, 0), 0x0000C000);
regmap_update_bits(ss->regmap_pextp[id], 0x3050, GENMASK(31, 0), 0xA8000000);
regmap_update_bits(ss->regmap_pextp[id], 0x3054, GENMASK(31, 0), 0x000000AA);
regmap_update_bits(ss->regmap_pextp[id], 0x306C, GENMASK(31, 0), 0x20200F00);
regmap_update_bits(ss->regmap_pextp[id], 0xA060, GENMASK(31, 0), 0x00050000);
regmap_update_bits(ss->regmap_pextp[id], 0x90D0, GENMASK(31, 0), 0x00000007);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0200E800);
udelay(150);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0200C111);
ndelay(1020);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0200C101);
udelay(15);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0201C111);
ndelay(1020);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0201C101);
udelay(100);
regmap_update_bits(ss->regmap_pextp[id], 0x30B0, GENMASK(31, 0), 0x00000030);
regmap_update_bits(ss->regmap_pextp[id], 0x00F4, GENMASK(31, 0), 0x80201F01);
regmap_update_bits(ss->regmap_pextp[id], 0x3040, GENMASK(31, 0), 0x30000000);
udelay(400);
}
void mtk_sgmii_setup_phya_gen2(struct mtk_xgmii *ss, int mac_id)
{
u32 id = mtk_mac2xgmii_id(ss->eth, mac_id);
if (id < 0 || id >= MTK_MAX_DEVS ||
!ss->regmap_sgmii[id] || !ss->regmap_pextp[id])
return;
regmap_update_bits(ss->regmap_pextp[id], 0x9024, GENMASK(31, 0), 0x00D9071C);
regmap_update_bits(ss->regmap_pextp[id], 0x2020, GENMASK(31, 0), 0xAA8585AA);
regmap_update_bits(ss->regmap_pextp[id], 0x2030, GENMASK(31, 0), 0x0C020707);
regmap_update_bits(ss->regmap_pextp[id], 0x2034, GENMASK(31, 0), 0x0E050F0F);
regmap_update_bits(ss->regmap_pextp[id], 0x2040, GENMASK(31, 0), 0x00140032);
regmap_update_bits(ss->regmap_pextp[id], 0x50F0, GENMASK(31, 0), 0x00C014AA);
regmap_update_bits(ss->regmap_pextp[id], 0x50E0, GENMASK(31, 0), 0x3777C12B);
regmap_update_bits(ss->regmap_pextp[id], 0x506C, GENMASK(31, 0), 0x005F9CFF);
regmap_update_bits(ss->regmap_pextp[id], 0x5070, GENMASK(31, 0), 0x9D9DFAFA);
regmap_update_bits(ss->regmap_pextp[id], 0x5074, GENMASK(31, 0), 0x27273F3F);
regmap_update_bits(ss->regmap_pextp[id], 0x5078, GENMASK(31, 0), 0xA7883C68);
regmap_update_bits(ss->regmap_pextp[id], 0x507C, GENMASK(31, 0), 0x11661166);
regmap_update_bits(ss->regmap_pextp[id], 0x5080, GENMASK(31, 0), 0x0E000AAF);
regmap_update_bits(ss->regmap_pextp[id], 0x5084, GENMASK(31, 0), 0x08080D0D);
regmap_update_bits(ss->regmap_pextp[id], 0x5088, GENMASK(31, 0), 0x02030909);
regmap_update_bits(ss->regmap_pextp[id], 0x50E4, GENMASK(31, 0), 0x0C0C0000);
regmap_update_bits(ss->regmap_pextp[id], 0x50E8, GENMASK(31, 0), 0x04040000);
regmap_update_bits(ss->regmap_pextp[id], 0x50EC, GENMASK(31, 0), 0x0F0F0C06);
regmap_update_bits(ss->regmap_pextp[id], 0x50A8, GENMASK(31, 0), 0x506E8C8C);
regmap_update_bits(ss->regmap_pextp[id], 0x6004, GENMASK(31, 0), 0x18190000);
regmap_update_bits(ss->regmap_pextp[id], 0x00F8, GENMASK(31, 0), 0x009C329C);
regmap_update_bits(ss->regmap_pextp[id], 0x00F4, GENMASK(31, 0), 0x80201F21);
regmap_update_bits(ss->regmap_pextp[id], 0x0030, GENMASK(31, 0), 0x00050C00);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x02002800);
ndelay(1020);
regmap_update_bits(ss->regmap_pextp[id], 0x30B0, GENMASK(31, 0), 0x00000020);
regmap_update_bits(ss->regmap_pextp[id], 0x3028, GENMASK(31, 0), 0x00008A01);
regmap_update_bits(ss->regmap_pextp[id], 0x302C, GENMASK(31, 0), 0x0000A884);
regmap_update_bits(ss->regmap_pextp[id], 0x3024, GENMASK(31, 0), 0x00083002);
regmap_update_bits(ss->regmap_pextp[id], 0x3010, GENMASK(31, 0), 0x00011110);
regmap_update_bits(ss->regmap_pextp[id], 0x3048, GENMASK(31, 0), 0x40704000);
regmap_update_bits(ss->regmap_pextp[id], 0x3050, GENMASK(31, 0), 0xA8000000);
regmap_update_bits(ss->regmap_pextp[id], 0x3054, GENMASK(31, 0), 0x000000AA);
regmap_update_bits(ss->regmap_pextp[id], 0x306C, GENMASK(31, 0), 0x22000F00);
regmap_update_bits(ss->regmap_pextp[id], 0xA060, GENMASK(31, 0), 0x00050000);
regmap_update_bits(ss->regmap_pextp[id], 0x90D0, GENMASK(31, 0), 0x00000005);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0200E800);
udelay(150);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0200C111);
ndelay(1020);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0200C101);
udelay(15);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0201C111);
ndelay(1020);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0201C101);
udelay(100);
regmap_update_bits(ss->regmap_pextp[id], 0x30B0, GENMASK(31, 0), 0x00000030);
regmap_update_bits(ss->regmap_pextp[id], 0x00F4, GENMASK(31, 0), 0x80201F01);
regmap_update_bits(ss->regmap_pextp[id], 0x3040, GENMASK(31, 0), 0x30000000);
udelay(400);
}
int mtk_sgmii_setup_mode_an(struct mtk_xgmii *ss, unsigned int mac_id)
{
struct mtk_eth *eth = ss->eth;
unsigned int val = 0;
u32 id = mtk_mac2xgmii_id(ss->eth, mac_id);
if (!ss->regmap_sgmii[id])
if (!ss->regmap[id])
return -EINVAL;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
mtk_xfi_pll_enable(ss);
/* Assert PHYA power down state */
regmap_write(ss->regmap_sgmii[id], SGMSYS_QPHY_PWR_STATE_CTRL, SGMII_PHYA_PWD);
/* Reset SGMII PCS state */
regmap_write(ss->regmap_sgmii[id], SGMII_RESERVED_0, SGMII_SW_RESET);
regmap_read(ss->regmap_sgmii[id], ss->ana_rgc3, &val);
val &= ~RG_PHY_SPEED_3_125G;
regmap_write(ss->regmap_sgmii[id], ss->ana_rgc3, val);
/* Setup the link timer and QPHY power up inside SGMIISYS */
regmap_write(ss->regmap_sgmii[id], SGMSYS_PCS_LINK_TIMER,
regmap_write(ss->regmap[id], SGMSYS_PCS_LINK_TIMER,
SGMII_LINK_TIMER_DEFAULT);
regmap_read(ss->regmap_sgmii[id], SGMSYS_SGMII_MODE, &val);
regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
val |= SGMII_REMOTE_FAULT_DIS;
regmap_write(ss->regmap_sgmii[id], SGMSYS_SGMII_MODE, val);
regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
/* SGMII AN mode setting */
regmap_read(ss->regmap_sgmii[id], SGMSYS_SGMII_MODE, &val);
val &= ~SGMII_IF_MODE_MASK;
val |= SGMII_SPEED_DUPLEX_AN;
regmap_write(ss->regmap_sgmii[id], SGMSYS_SGMII_MODE, val);
/* Enable SGMII AN */
regmap_read(ss->regmap_sgmii[id], SGMSYS_PCS_CONTROL_1, &val);
val |= SGMII_AN_ENABLE;
regmap_write(ss->regmap_sgmii[id], SGMSYS_PCS_CONTROL_1, val);
regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
val |= SGMII_AN_RESTART;
regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
if(MTK_HAS_FLAGS(ss->flags[id],MTK_SGMII_PN_SWAP))
regmap_update_bits(ss->regmap_sgmii[id], SGMSYS_QPHY_WRAP_CTRL,
regmap_update_bits(ss->regmap[id], SGMSYS_QPHY_WRAP_CTRL,
SGMII_PN_SWAP_MASK, SGMII_PN_SWAP_TX_RX);
/* Release PHYA power down state */
regmap_write(ss->regmap_sgmii[id], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
mtk_sgmii_setup_phya_gen1(ss, mac_id);
regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
return 0;
}
int mtk_sgmii_setup_mode_force(struct mtk_xgmii *ss, unsigned int mac_id,
int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, unsigned int id,
const struct phylink_link_state *state)
{
struct mtk_eth *eth = ss->eth;
unsigned int val = 0;
u32 id = mtk_mac2xgmii_id(eth, mac_id);
unsigned int val;
if (!ss->regmap_sgmii[id])
if (!ss->regmap[id])
return -EINVAL;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
mtk_xfi_pll_enable(ss);
/* Assert PHYA power down state */
regmap_write(ss->regmap_sgmii[id], SGMSYS_QPHY_PWR_STATE_CTRL, SGMII_PHYA_PWD);
/* Reset SGMII PCS state */
regmap_write(ss->regmap_sgmii[id], SGMII_RESERVED_0, SGMII_SW_RESET);
regmap_read(ss->regmap_sgmii[id], ss->ana_rgc3, &val);
regmap_read(ss->regmap[id], ss->ana_rgc3, &val);
val &= ~RG_PHY_SPEED_MASK;
if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
val |= RG_PHY_SPEED_3_125G;
regmap_write(ss->regmap_sgmii[id], ss->ana_rgc3, val);
regmap_write(ss->regmap[id], ss->ana_rgc3, val);
/* Disable SGMII AN */
regmap_read(ss->regmap_sgmii[id], SGMSYS_PCS_CONTROL_1, &val);
regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
val &= ~SGMII_AN_ENABLE;
regmap_write(ss->regmap_sgmii[id], SGMSYS_PCS_CONTROL_1, val);
regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
/* SGMII force mode setting */
regmap_read(ss->regmap_sgmii[id], SGMSYS_SGMII_MODE, &val);
regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
val &= ~SGMII_IF_MODE_MASK;
val &= ~SGMII_REMOTE_FAULT_DIS;
switch (state->speed) {
case SPEED_10:
@@ -256,45 +98,38 @@ int mtk_sgmii_setup_mode_force(struct mtk_xgmii *ss, unsigned int mac_id,
break;
case SPEED_2500:
case SPEED_1000:
default:
val |= SGMII_SPEED_1000;
break;
};
/* SGMII 1G and 2.5G force mode can only work in full duplex
* mode, no matter SGMII_FORCE_HALF_DUPLEX is set or not.
*/
if (state->duplex != DUPLEX_FULL)
if (state->duplex == DUPLEX_FULL)
val |= SGMII_DUPLEX_FULL;
regmap_write(ss->regmap_sgmii[id], SGMSYS_SGMII_MODE, val);
regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
if(MTK_HAS_FLAGS(ss->flags[id],MTK_SGMII_PN_SWAP))
regmap_update_bits(ss->regmap_sgmii[id], SGMSYS_QPHY_WRAP_CTRL,
regmap_update_bits(ss->regmap[id], SGMSYS_QPHY_WRAP_CTRL,
SGMII_PN_SWAP_MASK, SGMII_PN_SWAP_TX_RX);
/* Release PHYA power down state */
regmap_write(ss->regmap_sgmii[id], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3))
mtk_sgmii_setup_phya_gen2(ss, mac_id);
regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
return 0;
}
void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id)
{
struct mtk_xgmii *ss = eth->xgmii;
unsigned int val = 0, sid = mtk_mac2xgmii_id(eth, mac_id);
struct mtk_sgmii *ss = eth->sgmii;
unsigned int val, sid;
/* Decide how GMAC and SGMIISYS be mapped */
sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
0 : sid;
0 : mac_id;
if (!ss->regmap_sgmii[sid])
if (!ss->regmap[sid])
return;
regmap_read(ss->regmap_sgmii[sid], SGMSYS_PCS_CONTROL_1, &val);
regmap_read(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, &val);
val |= SGMII_AN_RESTART;
regmap_write(ss->regmap_sgmii[sid], SGMSYS_PCS_CONTROL_1, val);
regmap_write(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, val);
}

View File

@@ -1,350 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (c) 2022 MediaTek Inc.
* Author: Henry Yen <henry.yen@mediatek.com>
*/
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include "mtk_eth_soc.h"
int mtk_usxgmii_init(struct mtk_xgmii *ss, struct device_node *r)
{
struct device_node *np;
int i;
for (i = 0; i < MTK_MAX_DEVS; i++) {
np = of_parse_phandle(r, "mediatek,usxgmiisys", i);
if (!np)
break;
ss->regmap_usxgmii[i] = syscon_node_to_regmap(np);
if (IS_ERR(ss->regmap_usxgmii[i]))
return PTR_ERR(ss->regmap_usxgmii[i]);
ss->flags[i] &= ~(MTK_USXGMII_INT_2500);
if (of_property_read_bool(np, "internal_2500"))
ss->flags[i] |= MTK_USXGMII_INT_2500;
}
return 0;
}
int mtk_xfi_pextp_init(struct mtk_xgmii *ss, struct device_node *r)
{
struct device_node *np;
int i;
for (i = 0; i < MTK_MAX_DEVS; i++) {
np = of_parse_phandle(r, "mediatek,xfi_pextp", i);
if (!np)
break;
ss->regmap_pextp[i] = syscon_node_to_regmap(np);
if (IS_ERR(ss->regmap_pextp[i]))
return PTR_ERR(ss->regmap_pextp[i]);
}
return 0;
}
int mtk_xfi_pll_init(struct mtk_xgmii *ss, struct device_node *r)
{
struct device_node *np;
np = of_parse_phandle(r, "mediatek,xfi_pll", 0);
if (!np)
return -1;
ss->regmap_pll = syscon_node_to_regmap(np);
if (IS_ERR(ss->regmap_pll))
return PTR_ERR(ss->regmap_pll);
return 0;
}
int mtk_toprgu_init(struct mtk_eth *eth, struct device_node *r)
{
struct device_node *np;
np = of_parse_phandle(r, "mediatek,toprgu", 0);
if (!np)
return -1;
eth->toprgu = syscon_node_to_regmap(np);
if (IS_ERR(eth->toprgu))
return PTR_ERR(eth->toprgu);
return 0;
}
int mtk_xfi_pll_enable(struct mtk_xgmii *ss)
{
u32 val = 0;
if (!ss->regmap_pll)
return -EINVAL;
/* Add software workaround for USXGMII PLL TCL issue */
regmap_write(ss->regmap_pll, XFI_PLL_ANA_GLB8, RG_XFI_PLL_ANA_SWWA);
regmap_read(ss->regmap_pll, XFI_PLL_DIG_GLB8, &val);
val |= RG_XFI_PLL_EN;
regmap_write(ss->regmap_pll, XFI_PLL_DIG_GLB8, val);
return 0;
}
int mtk_mac2xgmii_id(struct mtk_eth *eth, int mac_id)
{
u32 xgmii_id = mac_id;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
switch (mac_id) {
case MTK_GMAC1_ID:
case MTK_GMAC2_ID:
xgmii_id = 1;
break;
case MTK_GMAC3_ID:
xgmii_id = 0;
break;
default:
pr_info("[%s] Warning: get illegal mac_id=%d !=!!!\n",
__func__, mac_id);
}
}
return xgmii_id;
}
void mtk_usxgmii_setup_phya_an_10000(struct mtk_xgmii *ss, int mac_id)
{
u32 id = mtk_mac2xgmii_id(ss->eth, mac_id);
if (id < 0 || id >= MTK_MAX_DEVS ||
!ss->regmap_usxgmii[id] || !ss->regmap_pextp[id])
return;
regmap_update_bits(ss->regmap_usxgmii[id], 0x810, GENMASK(31, 0), 0x000FFE6D);
regmap_update_bits(ss->regmap_usxgmii[id], 0x818, GENMASK(31, 0), 0x07B1EC7B);
regmap_update_bits(ss->regmap_usxgmii[id], 0x80C, GENMASK(31, 0), 0x30000000);
ndelay(1020);
regmap_update_bits(ss->regmap_usxgmii[id], 0x80C, GENMASK(31, 0), 0x10000000);
ndelay(1020);
regmap_update_bits(ss->regmap_usxgmii[id], 0x80C, GENMASK(31, 0), 0x00000000);
regmap_update_bits(ss->regmap_pextp[id], 0x9024, GENMASK(31, 0), 0x00C9071C);
regmap_update_bits(ss->regmap_pextp[id], 0x2020, GENMASK(31, 0), 0xAA8585AA);
regmap_update_bits(ss->regmap_pextp[id], 0x2030, GENMASK(31, 0), 0x0C020707);
regmap_update_bits(ss->regmap_pextp[id], 0x2034, GENMASK(31, 0), 0x0E050F0F);
regmap_update_bits(ss->regmap_pextp[id], 0x2040, GENMASK(31, 0), 0x00140032);
regmap_update_bits(ss->regmap_pextp[id], 0x50F0, GENMASK(31, 0), 0x00C014AA);
regmap_update_bits(ss->regmap_pextp[id], 0x50E0, GENMASK(31, 0), 0x3777C12B);
regmap_update_bits(ss->regmap_pextp[id], 0x506C, GENMASK(31, 0), 0x005F9CFF);
regmap_update_bits(ss->regmap_pextp[id], 0x5070, GENMASK(31, 0), 0x9D9DFAFA);
regmap_update_bits(ss->regmap_pextp[id], 0x5074, GENMASK(31, 0), 0x27273F3F);
regmap_update_bits(ss->regmap_pextp[id], 0x5078, GENMASK(31, 0), 0xA7883C68);
regmap_update_bits(ss->regmap_pextp[id], 0x507C, GENMASK(31, 0), 0x11661166);
regmap_update_bits(ss->regmap_pextp[id], 0x5080, GENMASK(31, 0), 0x0E000AAF);
regmap_update_bits(ss->regmap_pextp[id], 0x5084, GENMASK(31, 0), 0x08080D0D);
regmap_update_bits(ss->regmap_pextp[id], 0x5088, GENMASK(31, 0), 0x02030909);
regmap_update_bits(ss->regmap_pextp[id], 0x50E4, GENMASK(31, 0), 0x0C0C0000);
regmap_update_bits(ss->regmap_pextp[id], 0x50E8, GENMASK(31, 0), 0x04040000);
regmap_update_bits(ss->regmap_pextp[id], 0x50EC, GENMASK(31, 0), 0x0F0F0C06);
regmap_update_bits(ss->regmap_pextp[id], 0x50A8, GENMASK(31, 0), 0x506E8C8C);
regmap_update_bits(ss->regmap_pextp[id], 0x6004, GENMASK(31, 0), 0x18190000);
regmap_update_bits(ss->regmap_pextp[id], 0x00F8, GENMASK(31, 0), 0x01423342);
regmap_update_bits(ss->regmap_pextp[id], 0x00F4, GENMASK(31, 0), 0x80201F20);
regmap_update_bits(ss->regmap_pextp[id], 0x0030, GENMASK(31, 0), 0x00050C00);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x02002800);
ndelay(1020);
regmap_update_bits(ss->regmap_pextp[id], 0x30B0, GENMASK(31, 0), 0x00000020);
regmap_update_bits(ss->regmap_pextp[id], 0x3028, GENMASK(31, 0), 0x00008A01);
regmap_update_bits(ss->regmap_pextp[id], 0x302C, GENMASK(31, 0), 0x0000A884);
regmap_update_bits(ss->regmap_pextp[id], 0x3024, GENMASK(31, 0), 0x00083002);
regmap_update_bits(ss->regmap_pextp[id], 0x3010, GENMASK(31, 0), 0x00022220);
regmap_update_bits(ss->regmap_pextp[id], 0x5064, GENMASK(31, 0), 0x0F020A01);
regmap_update_bits(ss->regmap_pextp[id], 0x50B4, GENMASK(31, 0), 0x06100600);
regmap_update_bits(ss->regmap_pextp[id], 0x3048, GENMASK(31, 0), 0x40704000);
regmap_update_bits(ss->regmap_pextp[id], 0x3050, GENMASK(31, 0), 0xA8000000);
regmap_update_bits(ss->regmap_pextp[id], 0x3054, GENMASK(31, 0), 0x000000AA);
regmap_update_bits(ss->regmap_pextp[id], 0x306C, GENMASK(31, 0), 0x00000F00);
regmap_update_bits(ss->regmap_pextp[id], 0xA060, GENMASK(31, 0), 0x00040000);
regmap_update_bits(ss->regmap_pextp[id], 0x90D0, GENMASK(31, 0), 0x00000001);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0200E800);
udelay(150);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0200C111);
ndelay(1020);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0200C101);
udelay(15);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0202C111);
ndelay(1020);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0202C101);
udelay(100);
regmap_update_bits(ss->regmap_pextp[id], 0x30B0, GENMASK(31, 0), 0x00000030);
regmap_update_bits(ss->regmap_pextp[id], 0x00F4, GENMASK(31, 0), 0x80201F00);
regmap_update_bits(ss->regmap_pextp[id], 0x3040, GENMASK(31, 0), 0x30000000);
udelay(400);
}
void mtk_usxgmii_setup_phya_force(struct mtk_xgmii *ss, int mac_id, int max_speed)
{
unsigned int val;
u32 id = mtk_mac2xgmii_id(ss->eth, mac_id);
if (id < 0 || id >= MTK_MAX_DEVS ||
!ss->regmap_usxgmii[id] || !ss->regmap_pextp[id])
return;
/* Decide USXGMII speed */
switch (max_speed) {
case SPEED_5000:
val = FIELD_PREP(RG_XFI_RX_MODE, RG_XFI_RX_MODE_5G) |
FIELD_PREP(RG_XFI_TX_MODE, RG_XFI_TX_MODE_5G);
break;
case SPEED_10000:
default:
val = FIELD_PREP(RG_XFI_RX_MODE, RG_XFI_RX_MODE_10G) |
FIELD_PREP(RG_XFI_TX_MODE, RG_XFI_TX_MODE_10G);
break;
};
regmap_write(ss->regmap_usxgmii[id], RG_PHY_TOP_SPEED_CTRL1, val);
/* Disable USXGMII AN mode */
regmap_read(ss->regmap_usxgmii[id], RG_PCS_AN_CTRL0, &val);
val &= ~RG_AN_ENABLE;
regmap_write(ss->regmap_usxgmii[id], RG_PCS_AN_CTRL0, val);
/* Gated USXGMII */
regmap_read(ss->regmap_usxgmii[id], RG_PHY_TOP_SPEED_CTRL1, &val);
val |= RG_MAC_CK_GATED;
regmap_write(ss->regmap_usxgmii[id], RG_PHY_TOP_SPEED_CTRL1, val);
ndelay(1020);
/* USXGMII force mode setting */
regmap_read(ss->regmap_usxgmii[id], RG_PHY_TOP_SPEED_CTRL1, &val);
val |= RG_USXGMII_RATE_UPDATE_MODE;
val |= RG_IF_FORCE_EN;
val |= FIELD_PREP(RG_RATE_ADAPT_MODE, RG_RATE_ADAPT_MODE_X1);
regmap_write(ss->regmap_usxgmii[id], RG_PHY_TOP_SPEED_CTRL1, val);
/* Un-gated USXGMII */
regmap_read(ss->regmap_usxgmii[id], RG_PHY_TOP_SPEED_CTRL1, &val);
val &= ~RG_MAC_CK_GATED;
regmap_write(ss->regmap_usxgmii[id], RG_PHY_TOP_SPEED_CTRL1, val);
ndelay(1020);
regmap_update_bits(ss->regmap_pextp[id], 0x9024, GENMASK(31, 0), 0x00C9071C);
regmap_update_bits(ss->regmap_pextp[id], 0x2020, GENMASK(31, 0), 0xAA8585AA);
regmap_update_bits(ss->regmap_pextp[id], 0x2030, GENMASK(31, 0), 0x0C020707);
regmap_update_bits(ss->regmap_pextp[id], 0x2034, GENMASK(31, 0), 0x0E050F0F);
regmap_update_bits(ss->regmap_pextp[id], 0x2040, GENMASK(31, 0), 0x00140032);
regmap_update_bits(ss->regmap_pextp[id], 0x50F0, GENMASK(31, 0), 0x00C014AA);
regmap_update_bits(ss->regmap_pextp[id], 0x50E0, GENMASK(31, 0), 0x3777C12B);
regmap_update_bits(ss->regmap_pextp[id], 0x506C, GENMASK(31, 0), 0x005F9CFF);
regmap_update_bits(ss->regmap_pextp[id], 0x5070, GENMASK(31, 0), 0x9D9DFAFA);
regmap_update_bits(ss->regmap_pextp[id], 0x5074, GENMASK(31, 0), 0x27273F3F);
regmap_update_bits(ss->regmap_pextp[id], 0x5078, GENMASK(31, 0), 0xA7883C68);
regmap_update_bits(ss->regmap_pextp[id], 0x507C, GENMASK(31, 0), 0x11661166);
regmap_update_bits(ss->regmap_pextp[id], 0x5080, GENMASK(31, 0), 0x0E000AAF);
regmap_update_bits(ss->regmap_pextp[id], 0x5084, GENMASK(31, 0), 0x08080D0D);
regmap_update_bits(ss->regmap_pextp[id], 0x5088, GENMASK(31, 0), 0x02030909);
regmap_update_bits(ss->regmap_pextp[id], 0x50E4, GENMASK(31, 0), 0x0C0C0000);
regmap_update_bits(ss->regmap_pextp[id], 0x50E8, GENMASK(31, 0), 0x04040000);
regmap_update_bits(ss->regmap_pextp[id], 0x50EC, GENMASK(31, 0), 0x0F0F0C06);
regmap_update_bits(ss->regmap_pextp[id], 0x50A8, GENMASK(31, 0), 0x506E8C8C);
regmap_update_bits(ss->regmap_pextp[id], 0x6004, GENMASK(31, 0), 0x18190000);
regmap_update_bits(ss->regmap_pextp[id], 0x00F8, GENMASK(31, 0), 0x01423342);
regmap_update_bits(ss->regmap_pextp[id], 0x00F4, GENMASK(31, 0), 0x80201F20);
regmap_update_bits(ss->regmap_pextp[id], 0x0030, GENMASK(31, 0), 0x00050C00);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x02002800);
ndelay(1020);
regmap_update_bits(ss->regmap_pextp[id], 0x30B0, GENMASK(31, 0), 0x00000020);
regmap_update_bits(ss->regmap_pextp[id], 0x3028, GENMASK(31, 0), 0x00008A01);
regmap_update_bits(ss->regmap_pextp[id], 0x302C, GENMASK(31, 0), 0x0000A884);
regmap_update_bits(ss->regmap_pextp[id], 0x3024, GENMASK(31, 0), 0x00083002);
regmap_update_bits(ss->regmap_pextp[id], 0x3010, GENMASK(31, 0), 0x00022220);
regmap_update_bits(ss->regmap_pextp[id], 0x5064, GENMASK(31, 0), 0x0F020A01);
regmap_update_bits(ss->regmap_pextp[id], 0x50B4, GENMASK(31, 0), 0x06100600);
regmap_update_bits(ss->regmap_pextp[id], 0x3048, GENMASK(31, 0), 0x49664100);
regmap_update_bits(ss->regmap_pextp[id], 0x3050, GENMASK(31, 0), 0x00000000);
regmap_update_bits(ss->regmap_pextp[id], 0x3054, GENMASK(31, 0), 0x00000000);
regmap_update_bits(ss->regmap_pextp[id], 0x306C, GENMASK(31, 0), 0x00000F00);
regmap_update_bits(ss->regmap_pextp[id], 0xA060, GENMASK(31, 0), 0x00040000);
regmap_update_bits(ss->regmap_pextp[id], 0x90D0, GENMASK(31, 0), 0x00000001);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0200E800);
udelay(150);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0200C111);
ndelay(1020);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0200C101);
udelay(15);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0202C111);
ndelay(1020);
regmap_update_bits(ss->regmap_pextp[id], 0x0070, GENMASK(31, 0), 0x0202C101);
udelay(100);
regmap_update_bits(ss->regmap_pextp[id], 0x30B0, GENMASK(31, 0), 0x00000030);
regmap_update_bits(ss->regmap_pextp[id], 0x00F4, GENMASK(31, 0), 0x80201F00);
regmap_update_bits(ss->regmap_pextp[id], 0x3040, GENMASK(31, 0), 0x30000000);
udelay(400);
}
void mtk_usxgmii_reset(struct mtk_xgmii *ss, int mac_id)
{
struct mtk_eth *eth = ss->eth;
u32 id = mtk_mac2xgmii_id(eth, mac_id);
if (id < 0 || id >= MTK_MAX_DEVS || !eth->toprgu)
return;
switch (mac_id) {
case MTK_GMAC2_ID:
regmap_update_bits(eth->toprgu, 0xFC, GENMASK(31, 0), 0x0000A004);
regmap_update_bits(eth->toprgu, 0x18, GENMASK(31, 0), 0x88F0A004);
regmap_update_bits(eth->toprgu, 0xFC, GENMASK(31, 0), 0x00000000);
regmap_update_bits(eth->toprgu, 0x18, GENMASK(31, 0), 0x88F00000);
regmap_update_bits(eth->toprgu, 0x18, GENMASK(31, 0), 0x00F00000);
break;
case MTK_GMAC3_ID:
regmap_update_bits(eth->toprgu, 0xFC, GENMASK(31, 0), 0x00005002);
regmap_update_bits(eth->toprgu, 0x18, GENMASK(31, 0), 0x88F05002);
regmap_update_bits(eth->toprgu, 0xFC, GENMASK(31, 0), 0x00000000);
regmap_update_bits(eth->toprgu, 0x18, GENMASK(31, 0), 0x88F00000);
regmap_update_bits(eth->toprgu, 0x18, GENMASK(31, 0), 0x00F00000);
break;
}
udelay(100);
}
int mtk_usxgmii_setup_mode_an(struct mtk_xgmii *ss, int mac_id, int max_speed)
{
if (mac_id < 0 || mac_id >= MTK_MAX_DEVS)
return -EINVAL;
if ((max_speed != SPEED_10000) && (max_speed != SPEED_5000))
return -EINVAL;
mtk_xfi_pll_enable(ss);
mtk_usxgmii_reset(ss, mac_id);
mtk_usxgmii_setup_phya_an_10000(ss, mac_id);
return 0;
}
int mtk_usxgmii_setup_mode_force(struct mtk_xgmii *ss, int mac_id, int max_speed)
{
if (mac_id < 0 || mac_id >= MTK_MAX_DEVS)
return -EINVAL;
if ((max_speed != SPEED_10000) && (max_speed != SPEED_5000))
return -EINVAL;
mtk_xfi_pll_enable(ss);
mtk_usxgmii_reset(ss, mac_id);
mtk_usxgmii_setup_phya_force(ss, mac_id, max_speed);
return 0;
}

View File

@@ -1,52 +0,0 @@
Index: linux-5.4.154/drivers/net/ethernet/mediatek/mtk_eth_soc.c
===================================================================
--- linux-5.4.154.orig/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ linux-5.4.154/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1403,7 +1403,7 @@ static int mtk_poll_rx(struct napi_struc
goto release_desc;
/* alloc new buffer */
- new_data = napi_alloc_frag(ring->frag_size);
+ new_data = kmalloc(ring->frag_size, GFP_ATOMIC);
if (unlikely(!new_data)) {
netdev->stats.rx_dropped++;
goto release_desc;
@@ -1414,7 +1414,7 @@ static int mtk_poll_rx(struct napi_struc
ring->buf_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
- skb_free_frag(new_data);
+ kfree(new_data);
netdev->stats.rx_dropped++;
goto release_desc;
}
@@ -1423,9 +1423,9 @@ static int mtk_poll_rx(struct napi_struc
ring->buf_size, DMA_FROM_DEVICE);
/* receive data */
- skb = build_skb(data, ring->frag_size);
+ skb = build_skb(data, 0);
if (unlikely(!skb)) {
- skb_free_frag(data);
+ kfree(data);
netdev->stats.rx_dropped++;
goto skip_rx;
}
@@ -1866,7 +1866,7 @@ static int mtk_rx_alloc(struct mtk_eth *
return -ENOMEM;
for (i = 0; i < rx_dma_size; i++) {
- ring->data[i] = netdev_alloc_frag(ring->frag_size);
+ ring->data[i] = kmalloc(ring->frag_size, GFP_ATOMIC);
if (!ring->data[i])
return -ENOMEM;
}
@@ -1953,7 +1953,7 @@ static void mtk_rx_clean(struct mtk_eth
ring->dma[i].rxd1,
ring->buf_size,
DMA_FROM_DEVICE);
- skb_free_frag(ring->data[i]);
+ kfree(ring->data[i]);
}
kfree(ring->data);
ring->data = NULL;