1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
9 #include <linux/of_device.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/regmap.h>
15 #include <linux/clk.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/if_vlan.h>
18 #include <linux/reset.h>
19 #include <linux/tcp.h>
20 #include <linux/interrupt.h>
21 #include <linux/pinctrl/devinfo.h>
22 #include <linux/phylink.h>
23 #include <linux/jhash.h>
24 #include <linux/bitfield.h>
26 #include <net/dst_metadata.h>
28 #include "mtk_eth_soc.h"
31 static int mtk_msg_level = -1;
32 module_param_named(msg_level, mtk_msg_level, int, 0);
33 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
35 #define MTK_ETHTOOL_STAT(x) { #x, \
36 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
38 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
39 offsetof(struct mtk_hw_stats, xdp_stats.x) / \
42 static const struct mtk_reg_map mtk_reg_map = {
43 .tx_irq_mask = 0x1a1c,
44 .tx_irq_status = 0x1a18,
54 .adma_rx_dbg0 = 0x0a38,
67 .tx_sch_rate = 0x1a14,
80 .gdma_to_ppe = 0x4444,
90 static const struct mtk_reg_map mt7628_reg_map = {
91 .tx_irq_mask = 0x0a28,
92 .tx_irq_status = 0x0a20,
100 .irq_status = 0x0a20,
106 static const struct mtk_reg_map mt7986_reg_map = {
107 .tx_irq_mask = 0x461c,
108 .tx_irq_status = 0x4618,
111 .rx_cnt_cfg = 0x6104,
116 .irq_status = 0x6220,
118 .adma_rx_dbg0 = 0x6238,
125 .rx_cnt_cfg = 0x4504,
141 .tx_sch_rate = 0x4798,
144 .gdma_to_ppe = 0x3333,
150 .pse_iq_sta = 0x0180,
151 .pse_oq_sta = 0x01a0,
154 /* strings used by ethtool */
155 static const struct mtk_ethtool_stats {
156 char str[ETH_GSTRING_LEN];
158 } mtk_ethtool_stats[] = {
159 MTK_ETHTOOL_STAT(tx_bytes),
160 MTK_ETHTOOL_STAT(tx_packets),
161 MTK_ETHTOOL_STAT(tx_skip),
162 MTK_ETHTOOL_STAT(tx_collisions),
163 MTK_ETHTOOL_STAT(rx_bytes),
164 MTK_ETHTOOL_STAT(rx_packets),
165 MTK_ETHTOOL_STAT(rx_overflow),
166 MTK_ETHTOOL_STAT(rx_fcs_errors),
167 MTK_ETHTOOL_STAT(rx_short_errors),
168 MTK_ETHTOOL_STAT(rx_long_errors),
169 MTK_ETHTOOL_STAT(rx_checksum_errors),
170 MTK_ETHTOOL_STAT(rx_flow_control_packets),
171 MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
172 MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
173 MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
174 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
175 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
176 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
177 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
180 static const char * const mtk_clks_source_name[] = {
181 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
182 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
183 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
184 "sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
187 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
189 __raw_writel(val, eth->base + reg);
192 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
194 return __raw_readl(eth->base + reg);
197 static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
201 val = mtk_r32(eth, reg);
204 mtk_w32(eth, val, reg);
208 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
210 unsigned long t_start = jiffies;
213 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
215 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
220 dev_err(eth->dev, "mdio: MDIO timeout\n");
224 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
229 ret = mtk_mdio_busy_wait(eth);
233 mtk_w32(eth, PHY_IAC_ACCESS |
236 PHY_IAC_REG(phy_reg) |
237 PHY_IAC_ADDR(phy_addr) |
238 PHY_IAC_DATA(write_data),
241 ret = mtk_mdio_busy_wait(eth);
248 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
249 u32 devad, u32 phy_reg, u32 write_data)
253 ret = mtk_mdio_busy_wait(eth);
257 mtk_w32(eth, PHY_IAC_ACCESS |
259 PHY_IAC_CMD_C45_ADDR |
261 PHY_IAC_ADDR(phy_addr) |
262 PHY_IAC_DATA(phy_reg),
265 ret = mtk_mdio_busy_wait(eth);
269 mtk_w32(eth, PHY_IAC_ACCESS |
273 PHY_IAC_ADDR(phy_addr) |
274 PHY_IAC_DATA(write_data),
277 ret = mtk_mdio_busy_wait(eth);
284 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
288 ret = mtk_mdio_busy_wait(eth);
292 mtk_w32(eth, PHY_IAC_ACCESS |
294 PHY_IAC_CMD_C22_READ |
295 PHY_IAC_REG(phy_reg) |
296 PHY_IAC_ADDR(phy_addr),
299 ret = mtk_mdio_busy_wait(eth);
303 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
306 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
307 u32 devad, u32 phy_reg)
311 ret = mtk_mdio_busy_wait(eth);
315 mtk_w32(eth, PHY_IAC_ACCESS |
317 PHY_IAC_CMD_C45_ADDR |
319 PHY_IAC_ADDR(phy_addr) |
320 PHY_IAC_DATA(phy_reg),
323 ret = mtk_mdio_busy_wait(eth);
327 mtk_w32(eth, PHY_IAC_ACCESS |
329 PHY_IAC_CMD_C45_READ |
331 PHY_IAC_ADDR(phy_addr),
334 ret = mtk_mdio_busy_wait(eth);
338 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
341 static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
342 int phy_reg, u16 val)
344 struct mtk_eth *eth = bus->priv;
346 return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
349 static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
350 int devad, int phy_reg, u16 val)
352 struct mtk_eth *eth = bus->priv;
354 return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
357 static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
359 struct mtk_eth *eth = bus->priv;
361 return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
364 static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
367 struct mtk_eth *eth = bus->priv;
369 return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
372 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
373 phy_interface_t interface)
377 /* Check DDR memory type.
378 * Currently TRGMII mode with DDR2 memory is not supported.
380 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
381 if (interface == PHY_INTERFACE_MODE_TRGMII &&
382 val & SYSCFG_DRAM_TYPE_DDR2) {
384 "TRGMII mode with DDR2 memory is not supported!\n");
388 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
389 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
391 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
392 ETHSYS_TRGMII_MT7621_MASK, val);
397 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
398 phy_interface_t interface, int speed)
403 if (interface == PHY_INTERFACE_MODE_TRGMII) {
404 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
406 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
408 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
412 val = (speed == SPEED_1000) ?
413 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
414 mtk_w32(eth, val, INTF_MODE);
416 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
417 ETHSYS_TRGMII_CLK_SEL362_5,
418 ETHSYS_TRGMII_CLK_SEL362_5);
420 val = (speed == SPEED_1000) ? 250000000 : 500000000;
421 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
423 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
425 val = (speed == SPEED_1000) ?
426 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
427 mtk_w32(eth, val, TRGMII_RCK_CTRL);
429 val = (speed == SPEED_1000) ?
430 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
431 mtk_w32(eth, val, TRGMII_TCK_CTRL);
434 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
435 phy_interface_t interface)
437 struct mtk_mac *mac = container_of(config, struct mtk_mac,
439 struct mtk_eth *eth = mac->hw;
442 if (interface == PHY_INTERFACE_MODE_SGMII ||
443 phy_interface_mode_is_8023z(interface)) {
444 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
447 return mtk_sgmii_select_pcs(eth->sgmii, sid);
453 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
454 const struct phylink_link_state *state)
456 struct mtk_mac *mac = container_of(config, struct mtk_mac,
458 struct mtk_eth *eth = mac->hw;
459 int val, ge_mode, err = 0;
462 /* MT76x8 has no hardware settings between for the MAC */
463 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
464 mac->interface != state->interface) {
465 /* Setup soc pin functions */
466 switch (state->interface) {
467 case PHY_INTERFACE_MODE_TRGMII:
470 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
474 case PHY_INTERFACE_MODE_RGMII_TXID:
475 case PHY_INTERFACE_MODE_RGMII_RXID:
476 case PHY_INTERFACE_MODE_RGMII_ID:
477 case PHY_INTERFACE_MODE_RGMII:
478 case PHY_INTERFACE_MODE_MII:
479 case PHY_INTERFACE_MODE_REVMII:
480 case PHY_INTERFACE_MODE_RMII:
481 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
482 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
487 case PHY_INTERFACE_MODE_1000BASEX:
488 case PHY_INTERFACE_MODE_2500BASEX:
489 case PHY_INTERFACE_MODE_SGMII:
490 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
491 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
496 case PHY_INTERFACE_MODE_GMII:
497 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
498 err = mtk_gmac_gephy_path_setup(eth, mac->id);
507 /* Setup clock for 1st gmac */
508 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
509 !phy_interface_mode_is_8023z(state->interface) &&
510 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
511 if (MTK_HAS_CAPS(mac->hw->soc->caps,
512 MTK_TRGMII_MT7621_CLK)) {
513 if (mt7621_gmac0_rgmii_adjust(mac->hw,
517 /* FIXME: this is incorrect. Not only does it
518 * use state->speed (which is not guaranteed
519 * to be correct) but it also makes use of it
520 * in a code path that will only be reachable
521 * when the PHY interface mode changes, not
522 * when the speed changes. Consequently, RGMII
523 * is probably broken.
525 mtk_gmac0_rgmii_adjust(mac->hw,
529 /* mt7623_pad_clk_setup */
530 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
532 TD_DM_DRVP(8) | TD_DM_DRVN(8),
535 /* Assert/release MT7623 RXC reset */
536 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
538 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
543 switch (state->interface) {
544 case PHY_INTERFACE_MODE_MII:
545 case PHY_INTERFACE_MODE_GMII:
548 case PHY_INTERFACE_MODE_REVMII:
551 case PHY_INTERFACE_MODE_RMII:
560 /* put the gmac into the right mode */
561 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
562 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
563 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
564 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
566 mac->interface = state->interface;
570 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
571 phy_interface_mode_is_8023z(state->interface)) {
572 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
575 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
577 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
579 ~(u32)SYSCFG0_SGMII_MASK);
581 /* Save the syscfg0 value for mac_finish */
583 } else if (phylink_autoneg_inband(mode)) {
585 "In-band mode not supported in non SGMII mode!\n");
592 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
593 mac->id, phy_modes(state->interface));
597 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
598 mac->id, phy_modes(state->interface), err);
601 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
602 phy_interface_t interface)
604 struct mtk_mac *mac = container_of(config, struct mtk_mac,
606 struct mtk_eth *eth = mac->hw;
607 u32 mcr_cur, mcr_new;
610 if (interface == PHY_INTERFACE_MODE_SGMII ||
611 phy_interface_mode_is_8023z(interface))
612 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
613 SYSCFG0_SGMII_MASK, mac->syscfg0);
616 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
618 mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
619 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
620 MAC_MCR_RX_FIFO_CLR_DIS;
622 /* Only update control register when needed! */
623 if (mcr_new != mcr_cur)
624 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
629 static void mtk_mac_pcs_get_state(struct phylink_config *config,
630 struct phylink_link_state *state)
632 struct mtk_mac *mac = container_of(config, struct mtk_mac,
634 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
636 state->link = (pmsr & MAC_MSR_LINK);
637 state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
639 switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
641 state->speed = SPEED_10;
643 case MAC_MSR_SPEED_100:
644 state->speed = SPEED_100;
646 case MAC_MSR_SPEED_1000:
647 state->speed = SPEED_1000;
650 state->speed = SPEED_UNKNOWN;
654 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
655 if (pmsr & MAC_MSR_RX_FC)
656 state->pause |= MLO_PAUSE_RX;
657 if (pmsr & MAC_MSR_TX_FC)
658 state->pause |= MLO_PAUSE_TX;
661 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
662 phy_interface_t interface)
664 struct mtk_mac *mac = container_of(config, struct mtk_mac,
666 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
668 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
669 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
672 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
675 const struct mtk_soc_data *soc = eth->soc;
678 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
681 val = MTK_QTX_SCH_MIN_RATE_EN |
682 /* minimum: 10 Mbps */
683 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
684 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
685 MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
686 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
687 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
689 if (IS_ENABLED(CONFIG_SOC_MT7621)) {
692 val |= MTK_QTX_SCH_MAX_RATE_EN |
693 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
694 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
695 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
698 val |= MTK_QTX_SCH_MAX_RATE_EN |
699 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
700 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
701 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
704 val |= MTK_QTX_SCH_MAX_RATE_EN |
705 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
706 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
707 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
715 val |= MTK_QTX_SCH_MAX_RATE_EN |
716 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
717 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
718 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
721 val |= MTK_QTX_SCH_MAX_RATE_EN |
722 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
723 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
724 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
727 val |= MTK_QTX_SCH_MAX_RATE_EN |
728 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
729 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
730 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
737 ofs = MTK_QTX_OFFSET * idx;
738 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
741 static void mtk_mac_link_up(struct phylink_config *config,
742 struct phy_device *phy,
743 unsigned int mode, phy_interface_t interface,
744 int speed, int duplex, bool tx_pause, bool rx_pause)
746 struct mtk_mac *mac = container_of(config, struct mtk_mac,
750 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
751 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
752 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
753 MAC_MCR_FORCE_RX_FC);
755 /* Configure speed */
760 mcr |= MAC_MCR_SPEED_1000;
763 mcr |= MAC_MCR_SPEED_100;
767 /* Configure duplex */
768 if (duplex == DUPLEX_FULL)
769 mcr |= MAC_MCR_FORCE_DPX;
771 /* Configure pause modes - phylink will avoid these for half duplex */
773 mcr |= MAC_MCR_FORCE_TX_FC;
775 mcr |= MAC_MCR_FORCE_RX_FC;
777 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
778 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
781 static const struct phylink_mac_ops mtk_phylink_ops = {
782 .mac_select_pcs = mtk_mac_select_pcs,
783 .mac_pcs_get_state = mtk_mac_pcs_get_state,
784 .mac_config = mtk_mac_config,
785 .mac_finish = mtk_mac_finish,
786 .mac_link_down = mtk_mac_link_down,
787 .mac_link_up = mtk_mac_link_up,
790 static int mtk_mdio_init(struct mtk_eth *eth)
792 struct device_node *mii_np;
795 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
797 dev_err(eth->dev, "no %s child node found", "mdio-bus");
801 if (!of_device_is_available(mii_np)) {
806 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
812 eth->mii_bus->name = "mdio";
813 eth->mii_bus->read = mtk_mdio_read_c22;
814 eth->mii_bus->write = mtk_mdio_write_c22;
815 eth->mii_bus->read_c45 = mtk_mdio_read_c45;
816 eth->mii_bus->write_c45 = mtk_mdio_write_c45;
817 eth->mii_bus->priv = eth;
818 eth->mii_bus->parent = eth->dev;
820 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
821 ret = of_mdiobus_register(eth->mii_bus, mii_np);
828 static void mtk_mdio_cleanup(struct mtk_eth *eth)
833 mdiobus_unregister(eth->mii_bus);
836 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
841 spin_lock_irqsave(ð->tx_irq_lock, flags);
842 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
843 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
844 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
847 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
852 spin_lock_irqsave(ð->tx_irq_lock, flags);
853 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
854 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
855 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
858 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
863 spin_lock_irqsave(ð->rx_irq_lock, flags);
864 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
865 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
866 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
869 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
874 spin_lock_irqsave(ð->rx_irq_lock, flags);
875 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
876 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
877 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
880 static int mtk_set_mac_address(struct net_device *dev, void *p)
882 int ret = eth_mac_addr(dev, p);
883 struct mtk_mac *mac = netdev_priv(dev);
884 struct mtk_eth *eth = mac->hw;
885 const char *macaddr = dev->dev_addr;
890 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
893 spin_lock_bh(&mac->hw->page_lock);
894 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
895 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
896 MT7628_SDM_MAC_ADRH);
897 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
898 (macaddr[4] << 8) | macaddr[5],
899 MT7628_SDM_MAC_ADRL);
901 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
902 MTK_GDMA_MAC_ADRH(mac->id));
903 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
904 (macaddr[4] << 8) | macaddr[5],
905 MTK_GDMA_MAC_ADRL(mac->id));
907 spin_unlock_bh(&mac->hw->page_lock);
912 void mtk_stats_update_mac(struct mtk_mac *mac)
914 struct mtk_hw_stats *hw_stats = mac->hw_stats;
915 struct mtk_eth *eth = mac->hw;
917 u64_stats_update_begin(&hw_stats->syncp);
919 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
920 hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
921 hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
922 hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
923 hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
924 hw_stats->rx_checksum_errors +=
925 mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
927 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
928 unsigned int offs = hw_stats->reg_offset;
931 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
932 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
934 hw_stats->rx_bytes += (stats << 32);
935 hw_stats->rx_packets +=
936 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
937 hw_stats->rx_overflow +=
938 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
939 hw_stats->rx_fcs_errors +=
940 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
941 hw_stats->rx_short_errors +=
942 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
943 hw_stats->rx_long_errors +=
944 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
945 hw_stats->rx_checksum_errors +=
946 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
947 hw_stats->rx_flow_control_packets +=
948 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
950 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
951 hw_stats->tx_collisions +=
952 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
953 hw_stats->tx_bytes +=
954 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
955 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
957 hw_stats->tx_bytes += (stats << 32);
958 hw_stats->tx_packets +=
959 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
962 u64_stats_update_end(&hw_stats->syncp);
965 static void mtk_stats_update(struct mtk_eth *eth)
969 for (i = 0; i < MTK_MAC_COUNT; i++) {
970 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
972 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
973 mtk_stats_update_mac(eth->mac[i]);
974 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
979 static void mtk_get_stats64(struct net_device *dev,
980 struct rtnl_link_stats64 *storage)
982 struct mtk_mac *mac = netdev_priv(dev);
983 struct mtk_hw_stats *hw_stats = mac->hw_stats;
986 if (netif_running(dev) && netif_device_present(dev)) {
987 if (spin_trylock_bh(&hw_stats->stats_lock)) {
988 mtk_stats_update_mac(mac);
989 spin_unlock_bh(&hw_stats->stats_lock);
994 start = u64_stats_fetch_begin(&hw_stats->syncp);
995 storage->rx_packets = hw_stats->rx_packets;
996 storage->tx_packets = hw_stats->tx_packets;
997 storage->rx_bytes = hw_stats->rx_bytes;
998 storage->tx_bytes = hw_stats->tx_bytes;
999 storage->collisions = hw_stats->tx_collisions;
1000 storage->rx_length_errors = hw_stats->rx_short_errors +
1001 hw_stats->rx_long_errors;
1002 storage->rx_over_errors = hw_stats->rx_overflow;
1003 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1004 storage->rx_errors = hw_stats->rx_checksum_errors;
1005 storage->tx_aborted_errors = hw_stats->tx_skip;
1006 } while (u64_stats_fetch_retry(&hw_stats->syncp, start));
1008 storage->tx_errors = dev->stats.tx_errors;
1009 storage->rx_dropped = dev->stats.rx_dropped;
1010 storage->tx_dropped = dev->stats.tx_dropped;
1013 static inline int mtk_max_frag_size(int mtu)
1015 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1016 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
1017 mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
1019 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1020 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1023 static inline int mtk_max_buf_size(int frag_size)
1025 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1026 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1028 WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1033 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1034 struct mtk_rx_dma_v2 *dma_rxd)
1036 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1037 if (!(rxd->rxd2 & RX_DMA_DONE))
1040 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1041 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1042 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1043 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1044 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1045 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1051 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1053 unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1056 data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1059 return (void *)data;
1062 /* the qdma core needs scratch memory to be setup */
1063 static int mtk_init_fq_dma(struct mtk_eth *eth)
1065 const struct mtk_soc_data *soc = eth->soc;
1066 dma_addr_t phy_ring_tail;
1067 int cnt = MTK_QDMA_RING_SIZE;
1068 dma_addr_t dma_addr;
1071 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
1072 cnt * soc->txrx.txd_size,
1073 ð->phy_scratch_ring,
1075 if (unlikely(!eth->scratch_ring))
1078 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1079 if (unlikely(!eth->scratch_head))
1082 dma_addr = dma_map_single(eth->dma_dev,
1083 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1085 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1088 phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
1090 for (i = 0; i < cnt; i++) {
1091 struct mtk_tx_dma_v2 *txd;
1093 txd = eth->scratch_ring + i * soc->txrx.txd_size;
1094 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1096 txd->txd2 = eth->phy_scratch_ring +
1097 (i + 1) * soc->txrx.txd_size;
1099 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1101 if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
1109 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1110 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1111 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1112 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1117 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1119 return ring->dma + (desc - ring->phys);
1122 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1123 void *txd, u32 txd_size)
1125 int idx = (txd - ring->dma) / txd_size;
1127 return &ring->buf[idx];
1130 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1131 struct mtk_tx_dma *dma)
1133 return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1136 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1138 return (dma - ring->dma) / txd_size;
1141 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1142 struct xdp_frame_bulk *bq, bool napi)
1144 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1145 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1146 dma_unmap_single(eth->dma_dev,
1147 dma_unmap_addr(tx_buf, dma_addr0),
1148 dma_unmap_len(tx_buf, dma_len0),
1150 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1151 dma_unmap_page(eth->dma_dev,
1152 dma_unmap_addr(tx_buf, dma_addr0),
1153 dma_unmap_len(tx_buf, dma_len0),
1157 if (dma_unmap_len(tx_buf, dma_len0)) {
1158 dma_unmap_page(eth->dma_dev,
1159 dma_unmap_addr(tx_buf, dma_addr0),
1160 dma_unmap_len(tx_buf, dma_len0),
1164 if (dma_unmap_len(tx_buf, dma_len1)) {
1165 dma_unmap_page(eth->dma_dev,
1166 dma_unmap_addr(tx_buf, dma_addr1),
1167 dma_unmap_len(tx_buf, dma_len1),
1172 if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1173 if (tx_buf->type == MTK_TYPE_SKB) {
1174 struct sk_buff *skb = tx_buf->data;
1177 napi_consume_skb(skb, napi);
1179 dev_kfree_skb_any(skb);
1181 struct xdp_frame *xdpf = tx_buf->data;
1183 if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1184 xdp_return_frame_rx_napi(xdpf);
1186 xdp_return_frame_bulk(xdpf, bq);
1188 xdp_return_frame(xdpf);
1192 tx_buf->data = NULL;
1195 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1196 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1197 size_t size, int idx)
1199 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1200 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1201 dma_unmap_len_set(tx_buf, dma_len0, size);
1204 txd->txd3 = mapped_addr;
1205 txd->txd2 |= TX_DMA_PLEN1(size);
1206 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1207 dma_unmap_len_set(tx_buf, dma_len1, size);
1209 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1210 txd->txd1 = mapped_addr;
1211 txd->txd2 = TX_DMA_PLEN0(size);
1212 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1213 dma_unmap_len_set(tx_buf, dma_len0, size);
1218 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1219 struct mtk_tx_dma_desc_info *info)
1221 struct mtk_mac *mac = netdev_priv(dev);
1222 struct mtk_eth *eth = mac->hw;
1223 struct mtk_tx_dma *desc = txd;
1226 WRITE_ONCE(desc->txd1, info->addr);
1228 data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1229 FIELD_PREP(TX_DMA_PQID, info->qid);
1232 WRITE_ONCE(desc->txd3, data);
1234 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1238 /* tx checksum offload */
1240 data |= TX_DMA_CHKSUM;
1241 /* vlan header offload */
1243 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1245 WRITE_ONCE(desc->txd4, data);
1248 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1249 struct mtk_tx_dma_desc_info *info)
1251 struct mtk_mac *mac = netdev_priv(dev);
1252 struct mtk_tx_dma_v2 *desc = txd;
1253 struct mtk_eth *eth = mac->hw;
1256 WRITE_ONCE(desc->txd1, info->addr);
1258 data = TX_DMA_PLEN0(info->size);
1261 WRITE_ONCE(desc->txd3, data);
1263 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1264 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1265 WRITE_ONCE(desc->txd4, data);
1270 data |= TX_DMA_TSO_V2;
1271 /* tx checksum offload */
1273 data |= TX_DMA_CHKSUM_V2;
1275 WRITE_ONCE(desc->txd5, data);
1278 if (info->first && info->vlan)
1279 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1280 WRITE_ONCE(desc->txd6, data);
1282 WRITE_ONCE(desc->txd7, 0);
1283 WRITE_ONCE(desc->txd8, 0);
1286 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1287 struct mtk_tx_dma_desc_info *info)
1289 struct mtk_mac *mac = netdev_priv(dev);
1290 struct mtk_eth *eth = mac->hw;
1292 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1293 mtk_tx_set_dma_desc_v2(dev, txd, info);
1295 mtk_tx_set_dma_desc_v1(dev, txd, info);
1298 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1299 int tx_num, struct mtk_tx_ring *ring, bool gso)
1301 struct mtk_tx_dma_desc_info txd_info = {
1302 .size = skb_headlen(skb),
1304 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1305 .vlan = skb_vlan_tag_present(skb),
1306 .qid = skb_get_queue_mapping(skb),
1307 .vlan_tci = skb_vlan_tag_get(skb),
1309 .last = !skb_is_nonlinear(skb),
1311 struct netdev_queue *txq;
1312 struct mtk_mac *mac = netdev_priv(dev);
1313 struct mtk_eth *eth = mac->hw;
1314 const struct mtk_soc_data *soc = eth->soc;
1315 struct mtk_tx_dma *itxd, *txd;
1316 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1317 struct mtk_tx_buf *itx_buf, *tx_buf;
1319 int queue = skb_get_queue_mapping(skb);
1322 txq = netdev_get_tx_queue(dev, queue);
1323 itxd = ring->next_free;
1324 itxd_pdma = qdma_to_pdma(ring, itxd);
1325 if (itxd == ring->last_free)
1328 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1329 memset(itx_buf, 0, sizeof(*itx_buf));
1331 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1333 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1336 mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1338 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1339 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1340 MTK_TX_FLAGS_FPORT1;
1341 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1346 txd_pdma = qdma_to_pdma(ring, txd);
1348 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1349 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1350 unsigned int offset = 0;
1351 int frag_size = skb_frag_size(frag);
1354 bool new_desc = true;
1356 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1358 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1359 txd_pdma = qdma_to_pdma(ring, txd);
1360 if (txd == ring->last_free)
1368 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1369 txd_info.size = min_t(unsigned int, frag_size,
1370 soc->txrx.dma_max_len);
1371 txd_info.qid = queue;
1372 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1373 !(frag_size - txd_info.size);
1374 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1375 offset, txd_info.size,
1377 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1380 mtk_tx_set_dma_desc(dev, txd, &txd_info);
1382 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1383 soc->txrx.txd_size);
1385 memset(tx_buf, 0, sizeof(*tx_buf));
1386 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1387 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1388 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1389 MTK_TX_FLAGS_FPORT1;
1391 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1392 txd_info.size, k++);
1394 frag_size -= txd_info.size;
1395 offset += txd_info.size;
1399 /* store skb to cleanup */
1400 itx_buf->type = MTK_TYPE_SKB;
1401 itx_buf->data = skb;
1403 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1405 txd_pdma->txd2 |= TX_DMA_LS0;
1407 txd_pdma->txd2 |= TX_DMA_LS1;
1410 netdev_tx_sent_queue(txq, skb->len);
1411 skb_tx_timestamp(skb);
1413 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1414 atomic_sub(n_desc, &ring->free_count);
1416 /* make sure that all changes to the dma ring are flushed before we
1421 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1422 if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1423 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1427 next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
1429 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1436 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1439 mtk_tx_unmap(eth, tx_buf, NULL, false);
1441 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1442 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1443 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1445 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1446 itxd_pdma = qdma_to_pdma(ring, itxd);
1447 } while (itxd != txd);
1452 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1457 if (skb_is_gso(skb)) {
1458 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1459 frag = &skb_shinfo(skb)->frags[i];
1460 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1461 eth->soc->txrx.dma_max_len);
1464 nfrags += skb_shinfo(skb)->nr_frags;
1470 static int mtk_queue_stopped(struct mtk_eth *eth)
1474 for (i = 0; i < MTK_MAC_COUNT; i++) {
1475 if (!eth->netdev[i])
1477 if (netif_queue_stopped(eth->netdev[i]))
1484 static void mtk_wake_queue(struct mtk_eth *eth)
1488 for (i = 0; i < MTK_MAC_COUNT; i++) {
1489 if (!eth->netdev[i])
1491 netif_tx_wake_all_queues(eth->netdev[i]);
1495 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1497 struct mtk_mac *mac = netdev_priv(dev);
1498 struct mtk_eth *eth = mac->hw;
1499 struct mtk_tx_ring *ring = ð->tx_ring;
1500 struct net_device_stats *stats = &dev->stats;
1504 /* normally we can rely on the stack not calling this more than once,
1505 * however we have 2 queues running on the same ring so we need to lock
1508 spin_lock(ð->page_lock);
1510 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1513 tx_num = mtk_cal_txd_req(eth, skb);
1514 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1515 netif_tx_stop_all_queues(dev);
1516 netif_err(eth, tx_queued, dev,
1517 "Tx Ring full when queue awake!\n");
1518 spin_unlock(ð->page_lock);
1519 return NETDEV_TX_BUSY;
1522 /* TSO: fill MSS info in tcp checksum field */
1523 if (skb_is_gso(skb)) {
1524 if (skb_cow_head(skb, 0)) {
1525 netif_warn(eth, tx_err, dev,
1526 "GSO expand head fail.\n");
1530 if (skb_shinfo(skb)->gso_type &
1531 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1533 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1537 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1540 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1541 netif_tx_stop_all_queues(dev);
1543 spin_unlock(ð->page_lock);
1545 return NETDEV_TX_OK;
1548 spin_unlock(ð->page_lock);
1549 stats->tx_dropped++;
1550 dev_kfree_skb_any(skb);
1551 return NETDEV_TX_OK;
1554 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1557 struct mtk_rx_ring *ring;
1561 return ð->rx_ring[0];
1563 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1564 struct mtk_rx_dma *rxd;
1566 ring = ð->rx_ring[i];
1567 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1568 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1569 if (rxd->rxd2 & RX_DMA_DONE) {
1570 ring->calc_idx_update = true;
1578 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1580 struct mtk_rx_ring *ring;
1584 ring = ð->rx_ring[0];
1585 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1587 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1588 ring = ð->rx_ring[i];
1589 if (ring->calc_idx_update) {
1590 ring->calc_idx_update = false;
1591 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1597 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1599 return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
1602 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1603 struct xdp_rxq_info *xdp_q,
1606 struct page_pool_params pp_params = {
1608 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1610 .nid = NUMA_NO_NODE,
1611 .dev = eth->dma_dev,
1612 .offset = MTK_PP_HEADROOM,
1613 .max_len = MTK_PP_MAX_BUF_SIZE,
1615 struct page_pool *pp;
1618 pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1620 pp = page_pool_create(&pp_params);
1624 err = __xdp_rxq_info_reg(xdp_q, ð->dummy_dev, id,
1625 eth->rx_napi.napi_id, PAGE_SIZE);
1629 err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1631 goto err_unregister_rxq;
1636 xdp_rxq_info_unreg(xdp_q);
1638 page_pool_destroy(pp);
1640 return ERR_PTR(err);
1643 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1648 page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1652 *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1653 return page_address(page);
1656 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1658 if (ring->page_pool)
1659 page_pool_put_full_page(ring->page_pool,
1660 virt_to_head_page(data), napi);
1662 skb_free_frag(data);
1665 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1666 struct mtk_tx_dma_desc_info *txd_info,
1667 struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1668 void *data, u16 headroom, int index, bool dma_map)
1670 struct mtk_tx_ring *ring = ð->tx_ring;
1671 struct mtk_mac *mac = netdev_priv(dev);
1672 struct mtk_tx_dma *txd_pdma;
1674 if (dma_map) { /* ndo_xdp_xmit */
1675 txd_info->addr = dma_map_single(eth->dma_dev, data,
1676 txd_info->size, DMA_TO_DEVICE);
1677 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1680 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1682 struct page *page = virt_to_head_page(data);
1684 txd_info->addr = page_pool_get_dma_addr(page) +
1685 sizeof(struct xdp_frame) + headroom;
1686 dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1687 txd_info->size, DMA_BIDIRECTIONAL);
1689 mtk_tx_set_dma_desc(dev, txd, txd_info);
1691 tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
1692 tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1693 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1695 txd_pdma = qdma_to_pdma(ring, txd);
1696 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1702 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1703 struct net_device *dev, bool dma_map)
1705 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1706 const struct mtk_soc_data *soc = eth->soc;
1707 struct mtk_tx_ring *ring = ð->tx_ring;
1708 struct mtk_mac *mac = netdev_priv(dev);
1709 struct mtk_tx_dma_desc_info txd_info = {
1712 .last = !xdp_frame_has_frags(xdpf),
1715 int err, index = 0, n_desc = 1, nr_frags;
1716 struct mtk_tx_buf *htx_buf, *tx_buf;
1717 struct mtk_tx_dma *htxd, *txd;
1718 void *data = xdpf->data;
1720 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1723 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1724 if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1727 spin_lock(ð->page_lock);
1729 txd = ring->next_free;
1730 if (txd == ring->last_free) {
1731 spin_unlock(ð->page_lock);
1736 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
1737 memset(tx_buf, 0, sizeof(*tx_buf));
1741 err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1742 data, xdpf->headroom, index, dma_map);
1749 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
1750 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1751 if (txd == ring->last_free)
1754 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1755 soc->txrx.txd_size);
1756 memset(tx_buf, 0, sizeof(*tx_buf));
1760 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1761 txd_info.size = skb_frag_size(&sinfo->frags[index]);
1762 txd_info.last = index + 1 == nr_frags;
1763 txd_info.qid = mac->id;
1764 data = skb_frag_address(&sinfo->frags[index]);
1768 /* store xdpf for cleanup */
1769 htx_buf->data = xdpf;
1771 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1772 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
1775 txd_pdma->txd2 |= TX_DMA_LS0;
1777 txd_pdma->txd2 |= TX_DMA_LS1;
1780 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1781 atomic_sub(n_desc, &ring->free_count);
1783 /* make sure that all changes to the dma ring are flushed before we
1788 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1789 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1793 idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
1794 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
1795 MT7628_TX_CTX_IDX0);
1798 spin_unlock(ð->page_lock);
1803 while (htxd != txd) {
1804 tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
1805 mtk_tx_unmap(eth, tx_buf, NULL, false);
1807 htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1808 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1809 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
1811 txd_pdma->txd2 = TX_DMA_DESP2_DEF;
1814 htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
1817 spin_unlock(ð->page_lock);
1822 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
1823 struct xdp_frame **frames, u32 flags)
1825 struct mtk_mac *mac = netdev_priv(dev);
1826 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1827 struct mtk_eth *eth = mac->hw;
1830 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1833 for (i = 0; i < num_frame; i++) {
1834 if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
1839 u64_stats_update_begin(&hw_stats->syncp);
1840 hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
1841 hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
1842 u64_stats_update_end(&hw_stats->syncp);
1847 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
1848 struct xdp_buff *xdp, struct net_device *dev)
1850 struct mtk_mac *mac = netdev_priv(dev);
1851 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1852 u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
1853 struct bpf_prog *prog;
1858 prog = rcu_dereference(eth->prog);
1862 act = bpf_prog_run_xdp(prog, xdp);
1865 count = &hw_stats->xdp_stats.rx_xdp_pass;
1868 if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
1873 count = &hw_stats->xdp_stats.rx_xdp_redirect;
1876 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
1878 if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
1879 count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
1884 count = &hw_stats->xdp_stats.rx_xdp_tx;
1888 bpf_warn_invalid_xdp_action(dev, prog, act);
1891 trace_xdp_exception(dev, prog, act);
1897 page_pool_put_full_page(ring->page_pool,
1898 virt_to_head_page(xdp->data), true);
1901 u64_stats_update_begin(&hw_stats->syncp);
1902 *count = *count + 1;
1903 u64_stats_update_end(&hw_stats->syncp);
1910 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1911 struct mtk_eth *eth)
1913 struct dim_sample dim_sample = {};
1914 struct mtk_rx_ring *ring;
1915 bool xdp_flush = false;
1917 struct sk_buff *skb;
1918 u8 *data, *new_data;
1919 struct mtk_rx_dma_v2 *rxd, trxd;
1920 int done = 0, bytes = 0;
1922 while (done < budget) {
1923 unsigned int pktlen, *rxdcsum;
1924 struct net_device *netdev;
1925 dma_addr_t dma_addr;
1929 ring = mtk_get_rx_ring(eth);
1930 if (unlikely(!ring))
1933 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1934 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1935 data = ring->data[idx];
1937 if (!mtk_rx_get_desc(eth, &trxd, rxd))
1940 /* find out which mac the packet come from. values start at 1 */
1941 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1942 mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
1943 else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
1944 !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
1945 mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1947 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1951 netdev = eth->netdev[mac];
1953 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1956 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1958 /* alloc new buffer */
1959 if (ring->page_pool) {
1960 struct page *page = virt_to_head_page(data);
1961 struct xdp_buff xdp;
1964 new_data = mtk_page_pool_get_buff(ring->page_pool,
1967 if (unlikely(!new_data)) {
1968 netdev->stats.rx_dropped++;
1972 dma_sync_single_for_cpu(eth->dma_dev,
1973 page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
1974 pktlen, page_pool_get_dma_dir(ring->page_pool));
1976 xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
1977 xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
1979 xdp_buff_clear_frags_flag(&xdp);
1981 ret = mtk_xdp_run(eth, ring, &xdp, netdev);
1982 if (ret == XDP_REDIRECT)
1985 if (ret != XDP_PASS)
1988 skb = build_skb(data, PAGE_SIZE);
1989 if (unlikely(!skb)) {
1990 page_pool_put_full_page(ring->page_pool,
1992 netdev->stats.rx_dropped++;
1996 skb_reserve(skb, xdp.data - xdp.data_hard_start);
1997 skb_put(skb, xdp.data_end - xdp.data);
1998 skb_mark_for_recycle(skb);
2000 if (ring->frag_size <= PAGE_SIZE)
2001 new_data = napi_alloc_frag(ring->frag_size);
2003 new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
2005 if (unlikely(!new_data)) {
2006 netdev->stats.rx_dropped++;
2010 dma_addr = dma_map_single(eth->dma_dev,
2011 new_data + NET_SKB_PAD + eth->ip_align,
2012 ring->buf_size, DMA_FROM_DEVICE);
2013 if (unlikely(dma_mapping_error(eth->dma_dev,
2015 skb_free_frag(new_data);
2016 netdev->stats.rx_dropped++;
2020 dma_unmap_single(eth->dma_dev, trxd.rxd1,
2021 ring->buf_size, DMA_FROM_DEVICE);
2023 skb = build_skb(data, ring->frag_size);
2024 if (unlikely(!skb)) {
2025 netdev->stats.rx_dropped++;
2026 skb_free_frag(data);
2030 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2031 skb_put(skb, pktlen);
2037 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2038 reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
2039 hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
2040 if (hash != MTK_RXD5_FOE_ENTRY)
2041 skb_set_hash(skb, jhash_1word(hash, 0),
2043 rxdcsum = &trxd.rxd3;
2045 reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
2046 hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
2047 if (hash != MTK_RXD4_FOE_ENTRY)
2048 skb_set_hash(skb, jhash_1word(hash, 0),
2050 rxdcsum = &trxd.rxd4;
2053 if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
2054 skb->ip_summed = CHECKSUM_UNNECESSARY;
2056 skb_checksum_none_assert(skb);
2057 skb->protocol = eth_type_trans(skb, netdev);
2059 /* When using VLAN untagging in combination with DSA, the
2060 * hardware treats the MTK special tag as a VLAN and untags it.
2062 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
2063 (trxd.rxd2 & RX_DMA_VTAG) && netdev_uses_dsa(netdev)) {
2064 unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
2066 if (port < ARRAY_SIZE(eth->dsa_meta) &&
2067 eth->dsa_meta[port])
2068 skb_dst_set_noref(skb, ð->dsa_meta[port]->dst);
2071 if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2072 mtk_ppe_check_skb(eth->ppe[0], skb, hash);
2074 skb_record_rx_queue(skb, 0);
2075 napi_gro_receive(napi, skb);
2078 ring->data[idx] = new_data;
2079 rxd->rxd1 = (unsigned int)dma_addr;
2081 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2082 rxd->rxd2 = RX_DMA_LSO;
2084 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2086 ring->calc_idx = idx;
2092 /* make sure that all changes to the dma ring are flushed before
2096 mtk_update_rx_cpu_idx(eth);
2099 eth->rx_packets += done;
2100 eth->rx_bytes += bytes;
2101 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2103 net_dim(ð->rx_dim, dim_sample);
2111 struct mtk_poll_state {
2112 struct netdev_queue *txq;
2119 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2120 struct sk_buff *skb)
2122 struct netdev_queue *txq;
2123 struct net_device *dev;
2124 unsigned int bytes = skb->len;
2128 eth->tx_bytes += bytes;
2130 dev = eth->netdev[mac];
2134 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2135 if (state->txq == txq) {
2137 state->bytes += bytes;
2142 netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2146 state->bytes = bytes;
2149 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2150 struct mtk_poll_state *state)
2152 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2153 struct mtk_tx_ring *ring = ð->tx_ring;
2154 struct mtk_tx_buf *tx_buf;
2155 struct xdp_frame_bulk bq;
2156 struct mtk_tx_dma *desc;
2159 cpu = ring->last_free_ptr;
2160 dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2162 desc = mtk_qdma_phys_to_virt(ring, cpu);
2163 xdp_frame_bulk_init(&bq);
2165 while ((cpu != dma) && budget) {
2166 u32 next_cpu = desc->txd2;
2169 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2170 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2173 tx_buf = mtk_desc_to_tx_buf(ring, desc,
2174 eth->soc->txrx.txd_size);
2175 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
2181 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2182 if (tx_buf->type == MTK_TYPE_SKB)
2183 mtk_poll_tx_done(eth, state, mac, tx_buf->data);
2187 mtk_tx_unmap(eth, tx_buf, &bq, true);
2189 ring->last_free = desc;
2190 atomic_inc(&ring->free_count);
2194 xdp_flush_frame_bulk(&bq);
2196 ring->last_free_ptr = cpu;
2197 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2202 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2203 struct mtk_poll_state *state)
2205 struct mtk_tx_ring *ring = ð->tx_ring;
2206 struct mtk_tx_buf *tx_buf;
2207 struct xdp_frame_bulk bq;
2208 struct mtk_tx_dma *desc;
2211 cpu = ring->cpu_idx;
2212 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2213 xdp_frame_bulk_init(&bq);
2215 while ((cpu != dma) && budget) {
2216 tx_buf = &ring->buf[cpu];
2220 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2221 if (tx_buf->type == MTK_TYPE_SKB)
2222 mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2225 mtk_tx_unmap(eth, tx_buf, &bq, true);
2227 desc = ring->dma + cpu * eth->soc->txrx.txd_size;
2228 ring->last_free = desc;
2229 atomic_inc(&ring->free_count);
2231 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2233 xdp_flush_frame_bulk(&bq);
2235 ring->cpu_idx = cpu;
2240 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2242 struct mtk_tx_ring *ring = ð->tx_ring;
2243 struct dim_sample dim_sample = {};
2244 struct mtk_poll_state state = {};
2246 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2247 budget = mtk_poll_tx_qdma(eth, budget, &state);
2249 budget = mtk_poll_tx_pdma(eth, budget, &state);
2252 netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2254 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2256 net_dim(ð->tx_dim, dim_sample);
2258 if (mtk_queue_stopped(eth) &&
2259 (atomic_read(&ring->free_count) > ring->thresh))
2260 mtk_wake_queue(eth);
2265 static void mtk_handle_status_irq(struct mtk_eth *eth)
2267 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2269 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2270 mtk_stats_update(eth);
2271 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2276 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2278 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2279 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2282 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2283 mtk_handle_status_irq(eth);
2284 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2285 tx_done = mtk_poll_tx(eth, budget);
2287 if (unlikely(netif_msg_intr(eth))) {
2289 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2290 mtk_r32(eth, reg_map->tx_irq_status),
2291 mtk_r32(eth, reg_map->tx_irq_mask));
2294 if (tx_done == budget)
2297 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2300 if (napi_complete_done(napi, tx_done))
2301 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2306 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2308 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2309 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2310 int rx_done_total = 0;
2312 mtk_handle_status_irq(eth);
2317 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
2318 reg_map->pdma.irq_status);
2319 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2320 rx_done_total += rx_done;
2322 if (unlikely(netif_msg_intr(eth))) {
2324 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2325 mtk_r32(eth, reg_map->pdma.irq_status),
2326 mtk_r32(eth, reg_map->pdma.irq_mask));
2329 if (rx_done_total == budget)
2332 } while (mtk_r32(eth, reg_map->pdma.irq_status) &
2333 eth->soc->txrx.rx_irq_done_mask);
2335 if (napi_complete_done(napi, rx_done_total))
2336 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
2338 return rx_done_total;
2341 static int mtk_tx_alloc(struct mtk_eth *eth)
2343 const struct mtk_soc_data *soc = eth->soc;
2344 struct mtk_tx_ring *ring = ð->tx_ring;
2345 int i, sz = soc->txrx.txd_size;
2346 struct mtk_tx_dma_v2 *txd;
2350 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2351 ring_size = MTK_QDMA_RING_SIZE;
2353 ring_size = MTK_DMA_SIZE;
2355 ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
2360 ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2361 &ring->phys, GFP_KERNEL);
2365 for (i = 0; i < ring_size; i++) {
2366 int next = (i + 1) % ring_size;
2367 u32 next_ptr = ring->phys + next * sz;
2369 txd = ring->dma + i * sz;
2370 txd->txd2 = next_ptr;
2371 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2373 if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
2381 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2382 * only as the framework. The real HW descriptors are the PDMA
2383 * descriptors in ring->dma_pdma.
2385 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2386 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2387 &ring->phys_pdma, GFP_KERNEL);
2388 if (!ring->dma_pdma)
2391 for (i = 0; i < ring_size; i++) {
2392 ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2393 ring->dma_pdma[i].txd4 = 0;
2397 ring->dma_size = ring_size;
2398 atomic_set(&ring->free_count, ring_size - 2);
2399 ring->next_free = ring->dma;
2400 ring->last_free = (void *)txd;
2401 ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2402 ring->thresh = MAX_SKB_FRAGS;
2404 /* make sure that all changes to the dma ring are flushed before we
2409 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2410 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2411 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2413 ring->phys + ((ring_size - 1) * sz),
2414 soc->reg_map->qdma.crx_ptr);
2415 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2417 for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2418 val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2419 mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2421 val = MTK_QTX_SCH_MIN_RATE_EN |
2422 /* minimum: 10 Mbps */
2423 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2424 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2425 MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2426 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2427 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2428 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2429 ofs += MTK_QTX_OFFSET;
2431 val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2432 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2433 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2434 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2436 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2437 mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2438 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2439 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2448 static void mtk_tx_clean(struct mtk_eth *eth)
2450 const struct mtk_soc_data *soc = eth->soc;
2451 struct mtk_tx_ring *ring = ð->tx_ring;
2455 for (i = 0; i < ring->dma_size; i++)
2456 mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2462 dma_free_coherent(eth->dma_dev,
2463 ring->dma_size * soc->txrx.txd_size,
2464 ring->dma, ring->phys);
2468 if (ring->dma_pdma) {
2469 dma_free_coherent(eth->dma_dev,
2470 ring->dma_size * soc->txrx.txd_size,
2471 ring->dma_pdma, ring->phys_pdma);
2472 ring->dma_pdma = NULL;
2476 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2478 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2479 struct mtk_rx_ring *ring;
2480 int rx_data_len, rx_dma_size;
2483 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2486 ring = ð->rx_ring_qdma;
2488 ring = ð->rx_ring[ring_no];
2491 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2492 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2493 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2495 rx_data_len = ETH_DATA_LEN;
2496 rx_dma_size = MTK_DMA_SIZE;
2499 ring->frag_size = mtk_max_frag_size(rx_data_len);
2500 ring->buf_size = mtk_max_buf_size(ring->frag_size);
2501 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2506 if (mtk_page_pool_enabled(eth)) {
2507 struct page_pool *pp;
2509 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2514 ring->page_pool = pp;
2517 ring->dma = dma_alloc_coherent(eth->dma_dev,
2518 rx_dma_size * eth->soc->txrx.rxd_size,
2519 &ring->phys, GFP_KERNEL);
2523 for (i = 0; i < rx_dma_size; i++) {
2524 struct mtk_rx_dma_v2 *rxd;
2525 dma_addr_t dma_addr;
2528 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2529 if (ring->page_pool) {
2530 data = mtk_page_pool_get_buff(ring->page_pool,
2531 &dma_addr, GFP_KERNEL);
2535 if (ring->frag_size <= PAGE_SIZE)
2536 data = netdev_alloc_frag(ring->frag_size);
2538 data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2543 dma_addr = dma_map_single(eth->dma_dev,
2544 data + NET_SKB_PAD + eth->ip_align,
2545 ring->buf_size, DMA_FROM_DEVICE);
2546 if (unlikely(dma_mapping_error(eth->dma_dev,
2548 skb_free_frag(data);
2552 rxd->rxd1 = (unsigned int)dma_addr;
2553 ring->data[i] = data;
2555 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2556 rxd->rxd2 = RX_DMA_LSO;
2558 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2562 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2570 ring->dma_size = rx_dma_size;
2571 ring->calc_idx_update = false;
2572 ring->calc_idx = rx_dma_size - 1;
2573 if (rx_flag == MTK_RX_FLAGS_QDMA)
2574 ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2575 ring_no * MTK_QRX_OFFSET;
2577 ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2578 ring_no * MTK_QRX_OFFSET;
2579 /* make sure that all changes to the dma ring are flushed before we
2584 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2585 mtk_w32(eth, ring->phys,
2586 reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2587 mtk_w32(eth, rx_dma_size,
2588 reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2589 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2590 reg_map->qdma.rst_idx);
2592 mtk_w32(eth, ring->phys,
2593 reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2594 mtk_w32(eth, rx_dma_size,
2595 reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2596 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2597 reg_map->pdma.rst_idx);
2599 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2604 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
2608 if (ring->data && ring->dma) {
2609 for (i = 0; i < ring->dma_size; i++) {
2610 struct mtk_rx_dma *rxd;
2615 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2619 dma_unmap_single(eth->dma_dev, rxd->rxd1,
2620 ring->buf_size, DMA_FROM_DEVICE);
2621 mtk_rx_put_buff(ring, ring->data[i], false);
2628 dma_free_coherent(eth->dma_dev,
2629 ring->dma_size * eth->soc->txrx.rxd_size,
2630 ring->dma, ring->phys);
2634 if (ring->page_pool) {
2635 if (xdp_rxq_info_is_reg(&ring->xdp_q))
2636 xdp_rxq_info_unreg(&ring->xdp_q);
2637 page_pool_destroy(ring->page_pool);
2638 ring->page_pool = NULL;
2642 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2645 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2646 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2648 /* set LRO rings to auto-learn modes */
2649 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2651 /* validate LRO ring */
2652 ring_ctrl_dw2 |= MTK_RING_VLD;
2654 /* set AGE timer (unit: 20us) */
2655 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2656 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2658 /* set max AGG timer (unit: 20us) */
2659 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2661 /* set max LRO AGG count */
2662 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2663 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2665 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2666 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2667 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2668 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2671 /* IPv4 checksum update enable */
2672 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2674 /* switch priority comparison to packet count mode */
2675 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2677 /* bandwidth threshold setting */
2678 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2680 /* auto-learn score delta setting */
2681 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2683 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2684 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2685 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2687 /* set HW LRO mode & the max aggregation count for rx packets */
2688 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2690 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2691 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2694 lro_ctrl_dw0 |= MTK_LRO_EN;
2696 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2697 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2702 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2707 /* relinquish lro rings, flush aggregated packets */
2708 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
2710 /* wait for relinquishments done */
2711 for (i = 0; i < 10; i++) {
2712 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2713 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
2720 /* invalidate lro rings */
2721 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2722 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2724 /* disable HW LRO */
2725 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2728 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2732 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2734 /* invalidate the IP setting */
2735 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2737 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2739 /* validate the IP setting */
2740 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2743 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2747 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2749 /* invalidate the IP setting */
2750 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2752 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2755 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2760 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2761 if (mac->hwlro_ip[i])
2768 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2769 struct ethtool_rxnfc *cmd)
2771 struct ethtool_rx_flow_spec *fsp =
2772 (struct ethtool_rx_flow_spec *)&cmd->fs;
2773 struct mtk_mac *mac = netdev_priv(dev);
2774 struct mtk_eth *eth = mac->hw;
2777 if ((fsp->flow_type != TCP_V4_FLOW) ||
2778 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2779 (fsp->location > 1))
2782 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2783 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2785 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2787 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2792 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2793 struct ethtool_rxnfc *cmd)
2795 struct ethtool_rx_flow_spec *fsp =
2796 (struct ethtool_rx_flow_spec *)&cmd->fs;
2797 struct mtk_mac *mac = netdev_priv(dev);
2798 struct mtk_eth *eth = mac->hw;
2801 if (fsp->location > 1)
2804 mac->hwlro_ip[fsp->location] = 0;
2805 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2807 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2809 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2814 static void mtk_hwlro_netdev_disable(struct net_device *dev)
2816 struct mtk_mac *mac = netdev_priv(dev);
2817 struct mtk_eth *eth = mac->hw;
2820 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2821 mac->hwlro_ip[i] = 0;
2822 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2824 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2827 mac->hwlro_ip_cnt = 0;
2830 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2831 struct ethtool_rxnfc *cmd)
2833 struct mtk_mac *mac = netdev_priv(dev);
2834 struct ethtool_rx_flow_spec *fsp =
2835 (struct ethtool_rx_flow_spec *)&cmd->fs;
2837 if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
2840 /* only tcp dst ipv4 is meaningful, others are meaningless */
2841 fsp->flow_type = TCP_V4_FLOW;
2842 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2843 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2845 fsp->h_u.tcp_ip4_spec.ip4src = 0;
2846 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2847 fsp->h_u.tcp_ip4_spec.psrc = 0;
2848 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2849 fsp->h_u.tcp_ip4_spec.pdst = 0;
2850 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2851 fsp->h_u.tcp_ip4_spec.tos = 0;
2852 fsp->m_u.tcp_ip4_spec.tos = 0xff;
2857 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2858 struct ethtool_rxnfc *cmd,
2861 struct mtk_mac *mac = netdev_priv(dev);
2865 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2866 if (mac->hwlro_ip[i]) {
2872 cmd->rule_cnt = cnt;
2877 static netdev_features_t mtk_fix_features(struct net_device *dev,
2878 netdev_features_t features)
2880 if (!(features & NETIF_F_LRO)) {
2881 struct mtk_mac *mac = netdev_priv(dev);
2882 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2885 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2887 features |= NETIF_F_LRO;
2894 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2896 netdev_features_t diff = dev->features ^ features;
2898 if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
2899 mtk_hwlro_netdev_disable(dev);
2904 /* wait for DMA to finish whatever it is doing before we start using it again */
2905 static int mtk_dma_busy_wait(struct mtk_eth *eth)
2911 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2912 reg = eth->soc->reg_map->qdma.glo_cfg;
2914 reg = eth->soc->reg_map->pdma.glo_cfg;
2916 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
2917 !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
2918 5, MTK_DMA_BUSY_TIMEOUT_US);
2920 dev_err(eth->dev, "DMA init timeout\n");
2925 static int mtk_dma_init(struct mtk_eth *eth)
2930 if (mtk_dma_busy_wait(eth))
2933 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2934 /* QDMA needs scratch memory for internal reordering of the
2937 err = mtk_init_fq_dma(eth);
2942 err = mtk_tx_alloc(eth);
2946 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2947 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2952 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2957 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2958 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2962 err = mtk_hwlro_rx_init(eth);
2967 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2968 /* Enable random early drop and set drop threshold
2971 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2972 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
2973 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
2979 static void mtk_dma_free(struct mtk_eth *eth)
2981 const struct mtk_soc_data *soc = eth->soc;
2984 for (i = 0; i < MTK_MAC_COUNT; i++)
2986 netdev_reset_queue(eth->netdev[i]);
2987 if (eth->scratch_ring) {
2988 dma_free_coherent(eth->dma_dev,
2989 MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
2990 eth->scratch_ring, eth->phy_scratch_ring);
2991 eth->scratch_ring = NULL;
2992 eth->phy_scratch_ring = 0;
2995 mtk_rx_clean(eth, ð->rx_ring[0]);
2996 mtk_rx_clean(eth, ð->rx_ring_qdma);
2999 mtk_hwlro_rx_uninit(eth);
3000 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3001 mtk_rx_clean(eth, ð->rx_ring[i]);
3004 kfree(eth->scratch_head);
3007 static bool mtk_hw_reset_check(struct mtk_eth *eth)
3009 u32 val = mtk_r32(eth, MTK_INT_STATUS2);
3011 return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
3012 (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
3013 (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
3016 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3018 struct mtk_mac *mac = netdev_priv(dev);
3019 struct mtk_eth *eth = mac->hw;
3021 if (test_bit(MTK_RESETTING, ð->state))
3024 if (!mtk_hw_reset_check(eth))
3027 eth->netdev[mac->id]->stats.tx_errors++;
3028 netif_err(eth, tx_err, dev, "transmit timed out\n");
3030 schedule_work(ð->pending_work);
3033 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3035 struct mtk_eth *eth = _eth;
3038 if (likely(napi_schedule_prep(ð->rx_napi))) {
3039 __napi_schedule(ð->rx_napi);
3040 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3046 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3048 struct mtk_eth *eth = _eth;
3051 if (likely(napi_schedule_prep(ð->tx_napi))) {
3052 __napi_schedule(ð->tx_napi);
3053 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3059 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3061 struct mtk_eth *eth = _eth;
3062 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3064 if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3065 eth->soc->txrx.rx_irq_done_mask) {
3066 if (mtk_r32(eth, reg_map->pdma.irq_status) &
3067 eth->soc->txrx.rx_irq_done_mask)
3068 mtk_handle_irq_rx(irq, _eth);
3070 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3071 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3072 mtk_handle_irq_tx(irq, _eth);
3078 #ifdef CONFIG_NET_POLL_CONTROLLER
3079 static void mtk_poll_controller(struct net_device *dev)
3081 struct mtk_mac *mac = netdev_priv(dev);
3082 struct mtk_eth *eth = mac->hw;
3084 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3085 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3086 mtk_handle_irq_rx(eth->irq[2], dev);
3087 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3088 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
3092 static int mtk_start_dma(struct mtk_eth *eth)
3094 u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3095 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3098 err = mtk_dma_init(eth);
3104 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3105 val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3106 val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3107 MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3108 MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3110 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3111 val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3112 MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3113 MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
3115 val |= MTK_RX_BT_32DWORDS;
3116 mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3119 MTK_RX_DMA_EN | rx_2b_offset |
3120 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3121 reg_map->pdma.glo_cfg);
3123 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3124 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3125 reg_map->pdma.glo_cfg);
3131 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
3135 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3138 for (i = 0; i < MTK_MAC_COUNT; i++) {
3139 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
3141 /* default setup the forward port to send frame to PDMA */
3144 /* Enable RX checksum */
3145 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3149 if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
3150 val |= MTK_GDMA_SPECIAL_TAG;
3152 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
3154 /* Reset and enable PSE */
3155 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
3156 mtk_w32(eth, 0, MTK_RST_GL);
3160 static bool mtk_uses_dsa(struct net_device *dev)
3162 #if IS_ENABLED(CONFIG_NET_DSA)
3163 return netdev_uses_dsa(dev) &&
3164 dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3170 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3172 struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3173 struct mtk_eth *eth = mac->hw;
3174 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3175 struct ethtool_link_ksettings s;
3176 struct net_device *ldev;
3177 struct list_head *iter;
3178 struct dsa_port *dp;
3180 if (event != NETDEV_CHANGE)
3183 netdev_for_each_lower_dev(dev, ldev, iter) {
3184 if (netdev_priv(ldev) == mac)
3191 if (!dsa_slave_dev_check(dev))
3194 if (__ethtool_get_link_ksettings(dev, &s))
3197 if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3200 dp = dsa_port_from_netdev(dev);
3201 if (dp->index >= MTK_QDMA_NUM_QUEUES)
3204 if (mac->speed > 0 && mac->speed <= s.base.speed)
3207 mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3212 static int mtk_open(struct net_device *dev)
3214 struct mtk_mac *mac = netdev_priv(dev);
3215 struct mtk_eth *eth = mac->hw;
3218 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3220 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3225 /* we run 2 netdevs on the same dma ring so we only bring it up once */
3226 if (!refcount_read(ð->dma_refcnt)) {
3227 const struct mtk_soc_data *soc = eth->soc;
3231 err = mtk_start_dma(eth);
3233 phylink_disconnect_phy(mac->phylink);
3237 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3238 mtk_ppe_start(eth->ppe[i]);
3240 gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
3242 mtk_gdm_config(eth, gdm_config);
3244 napi_enable(ð->tx_napi);
3245 napi_enable(ð->rx_napi);
3246 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3247 mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
3248 refcount_set(ð->dma_refcnt, 1);
3251 refcount_inc(ð->dma_refcnt);
3253 phylink_start(mac->phylink);
3254 netif_tx_start_all_queues(dev);
3256 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3259 if (mtk_uses_dsa(dev) && !eth->prog) {
3260 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3261 struct metadata_dst *md_dst = eth->dsa_meta[i];
3266 md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3271 md_dst->u.port_info.port_id = i;
3272 eth->dsa_meta[i] = md_dst;
3275 /* Hardware DSA untagging and VLAN RX offloading need to be
3276 * disabled if at least one MAC does not use DSA.
3278 u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3280 val &= ~MTK_CDMP_STAG_EN;
3281 mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3283 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3289 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3294 /* stop the dma engine */
3295 spin_lock_bh(ð->page_lock);
3296 val = mtk_r32(eth, glo_cfg);
3297 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3299 spin_unlock_bh(ð->page_lock);
3301 /* wait for dma stop */
3302 for (i = 0; i < 10; i++) {
3303 val = mtk_r32(eth, glo_cfg);
3304 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3312 static int mtk_stop(struct net_device *dev)
3314 struct mtk_mac *mac = netdev_priv(dev);
3315 struct mtk_eth *eth = mac->hw;
3318 phylink_stop(mac->phylink);
3320 netif_tx_disable(dev);
3322 phylink_disconnect_phy(mac->phylink);
3324 /* only shutdown DMA if this is the last user */
3325 if (!refcount_dec_and_test(ð->dma_refcnt))
3328 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
3330 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3331 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3332 napi_disable(ð->tx_napi);
3333 napi_disable(ð->rx_napi);
3335 cancel_work_sync(ð->rx_dim.work);
3336 cancel_work_sync(ð->tx_dim.work);
3338 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3339 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3340 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3344 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3345 mtk_ppe_stop(eth->ppe[i]);
3350 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3351 struct netlink_ext_ack *extack)
3353 struct mtk_mac *mac = netdev_priv(dev);
3354 struct mtk_eth *eth = mac->hw;
3355 struct bpf_prog *old_prog;
3359 NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3363 if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3364 NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3368 need_update = !!eth->prog != !!prog;
3369 if (netif_running(dev) && need_update)
3372 old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3374 bpf_prog_put(old_prog);
3376 if (netif_running(dev) && need_update)
3377 return mtk_open(dev);
3382 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3384 switch (xdp->command) {
3385 case XDP_SETUP_PROG:
3386 return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3392 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3394 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3398 usleep_range(1000, 1100);
3399 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3405 static void mtk_clk_disable(struct mtk_eth *eth)
3409 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3410 clk_disable_unprepare(eth->clks[clk]);
3413 static int mtk_clk_enable(struct mtk_eth *eth)
3417 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3418 ret = clk_prepare_enable(eth->clks[clk]);
3420 goto err_disable_clks;
3427 clk_disable_unprepare(eth->clks[clk]);
3432 static void mtk_dim_rx(struct work_struct *work)
3434 struct dim *dim = container_of(work, struct dim, work);
3435 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3436 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3437 struct dim_cq_moder cur_profile;
3440 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3442 spin_lock_bh(ð->dim_lock);
3444 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3445 val &= MTK_PDMA_DELAY_TX_MASK;
3446 val |= MTK_PDMA_DELAY_RX_EN;
3448 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3449 val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3451 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3452 val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3454 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3455 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3456 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3458 spin_unlock_bh(ð->dim_lock);
3460 dim->state = DIM_START_MEASURE;
3463 static void mtk_dim_tx(struct work_struct *work)
3465 struct dim *dim = container_of(work, struct dim, work);
3466 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3467 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3468 struct dim_cq_moder cur_profile;
3471 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3473 spin_lock_bh(ð->dim_lock);
3475 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3476 val &= MTK_PDMA_DELAY_RX_MASK;
3477 val |= MTK_PDMA_DELAY_TX_EN;
3479 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3480 val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3482 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3483 val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3485 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3486 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3487 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3489 spin_unlock_bh(ð->dim_lock);
3491 dim->state = DIM_START_MEASURE;
3494 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3496 struct mtk_eth *eth = mac->hw;
3497 u32 mcr_cur, mcr_new;
3499 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3502 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3503 mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3506 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3507 else if (val <= 1536)
3508 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3509 else if (val <= 1552)
3510 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3512 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3514 if (mcr_new != mcr_cur)
3515 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3518 static void mtk_hw_reset(struct mtk_eth *eth)
3522 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3523 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3524 val = RSTCTRL_PPE0_V2;
3529 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3530 val |= RSTCTRL_PPE1;
3532 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3534 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3535 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3539 static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3543 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3547 static void mtk_hw_warm_reset(struct mtk_eth *eth)
3551 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3553 if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3554 val & RSTCTRL_FE, 1, 1000)) {
3555 dev_err(eth->dev, "warm reset failed\n");
3560 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3561 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
3563 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
3565 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3566 rst_mask |= RSTCTRL_PPE1;
3568 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
3571 val = mtk_hw_reset_read(eth);
3572 if (!(val & rst_mask))
3573 dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
3576 rst_mask |= RSTCTRL_FE;
3577 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
3580 val = mtk_hw_reset_read(eth);
3582 dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
3586 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
3588 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3589 bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
3590 bool oq_hang, cdm1_busy, adma_busy;
3591 bool wtx_busy, cdm_full, oq_free;
3592 u32 wdidx, val, gdm1_fc, gdm2_fc;
3593 bool qfsm_hang, qfwd_hang;
3596 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3599 /* WDMA sanity checks */
3600 wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
3602 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
3603 wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
3605 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
3606 cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
3608 oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
3609 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
3610 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
3612 if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
3613 if (++eth->reset.wdma_hang_count > 2) {
3614 eth->reset.wdma_hang_count = 0;
3620 /* QDMA sanity checks */
3621 qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
3622 qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
3624 gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
3625 gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
3626 gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
3627 gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
3628 gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
3629 gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
3631 if (qfsm_hang && qfwd_hang &&
3632 ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
3633 (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
3634 if (++eth->reset.qdma_hang_count > 2) {
3635 eth->reset.qdma_hang_count = 0;
3641 /* ADMA sanity checks */
3642 oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
3643 cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
3644 adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
3645 !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
3647 if (oq_hang && cdm1_busy && adma_busy) {
3648 if (++eth->reset.adma_hang_count > 2) {
3649 eth->reset.adma_hang_count = 0;
3655 eth->reset.wdma_hang_count = 0;
3656 eth->reset.qdma_hang_count = 0;
3657 eth->reset.adma_hang_count = 0;
3659 eth->reset.wdidx = wdidx;
3664 static void mtk_hw_reset_monitor_work(struct work_struct *work)
3666 struct delayed_work *del_work = to_delayed_work(work);
3667 struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
3668 reset.monitor_work);
3670 if (test_bit(MTK_RESETTING, ð->state))
3673 /* DMA stuck checks */
3674 if (mtk_hw_check_dma_hang(eth))
3675 schedule_work(ð->pending_work);
3678 schedule_delayed_work(ð->reset.monitor_work,
3679 MTK_DMA_MONITOR_TIMEOUT);
3682 static int mtk_hw_init(struct mtk_eth *eth, bool reset)
3684 u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3685 ETHSYS_DMA_AG_MAP_PPE;
3686 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3689 if (!reset && test_and_set_bit(MTK_HW_INIT, ð->state))
3693 pm_runtime_enable(eth->dev);
3694 pm_runtime_get_sync(eth->dev);
3696 ret = mtk_clk_enable(eth);
3698 goto err_disable_pm;
3702 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3703 of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
3705 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3706 ret = device_reset(eth->dev);
3708 dev_err(eth->dev, "MAC reset failed!\n");
3709 goto err_disable_pm;
3712 /* set interrupt delays based on current Net DIM sample */
3713 mtk_dim_rx(ð->rx_dim.work);
3714 mtk_dim_tx(ð->tx_dim.work);
3716 /* disable delay and normal interrupt */
3717 mtk_tx_irq_disable(eth, ~0);
3718 mtk_rx_irq_disable(eth, ~0);
3726 mtk_hw_warm_reset(eth);
3730 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3731 /* Set FE to PDMAv2 if necessary */
3732 val = mtk_r32(eth, MTK_FE_GLO_MISC);
3733 mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
3737 /* Set GE2 driving and slew rate */
3738 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3741 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3744 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3747 /* Set linkdown as the default for each GMAC. Its own MCR would be set
3748 * up with the more appropriate value when mtk_mac_config call is being
3751 for (i = 0; i < MTK_MAC_COUNT; i++) {
3752 struct net_device *dev = eth->netdev[i];
3754 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3756 struct mtk_mac *mac = netdev_priv(dev);
3758 mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN);
3762 /* Indicates CDM to parse the MTK special tag from CPU
3763 * which also is working out for untag packets.
3765 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3766 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3767 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3768 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3769 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3771 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3774 /* set interrupt delays based on current Net DIM sample */
3775 mtk_dim_rx(ð->rx_dim.work);
3776 mtk_dim_tx(ð->tx_dim.work);
3778 /* disable delay and normal interrupt */
3779 mtk_tx_irq_disable(eth, ~0);
3780 mtk_rx_irq_disable(eth, ~0);
3782 /* FE int grouping */
3783 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
3784 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
3785 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3786 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
3787 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
3789 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3790 /* PSE should not drop port8 and port9 packets from WDMA Tx */
3791 mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
3793 /* PSE should drop packets to port 8/9 on WDMA Rx ring full */
3794 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
3796 /* PSE Free Queue Flow Control */
3797 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3799 /* PSE config input queue threshold */
3800 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3801 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3802 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3803 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3804 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3805 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3806 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
3807 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
3809 /* PSE config output queue threshold */
3810 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3811 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3812 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3813 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3814 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3815 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3816 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3817 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
3819 /* GDM and CDM Threshold */
3820 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3821 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3822 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3823 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3824 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3825 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
3832 pm_runtime_put_sync(eth->dev);
3833 pm_runtime_disable(eth->dev);
3839 static int mtk_hw_deinit(struct mtk_eth *eth)
3841 if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
3844 mtk_clk_disable(eth);
3846 pm_runtime_put_sync(eth->dev);
3847 pm_runtime_disable(eth->dev);
3852 static int __init mtk_init(struct net_device *dev)
3854 struct mtk_mac *mac = netdev_priv(dev);
3855 struct mtk_eth *eth = mac->hw;
3858 ret = of_get_ethdev_address(mac->of_node, dev);
3860 /* If the mac address is invalid, use random mac address */
3861 eth_hw_addr_random(dev);
3862 dev_err(eth->dev, "generated random MAC address %pM\n",
3869 static void mtk_uninit(struct net_device *dev)
3871 struct mtk_mac *mac = netdev_priv(dev);
3872 struct mtk_eth *eth = mac->hw;
3874 phylink_disconnect_phy(mac->phylink);
3875 mtk_tx_irq_disable(eth, ~0);
3876 mtk_rx_irq_disable(eth, ~0);
3879 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
3881 int length = new_mtu + MTK_RX_ETH_HLEN;
3882 struct mtk_mac *mac = netdev_priv(dev);
3883 struct mtk_eth *eth = mac->hw;
3885 if (rcu_access_pointer(eth->prog) &&
3886 length > MTK_PP_MAX_BUF_SIZE) {
3887 netdev_err(dev, "Invalid MTU for XDP mode\n");
3891 mtk_set_mcr_max_rx(mac, length);
3897 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3899 struct mtk_mac *mac = netdev_priv(dev);
3905 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
3913 static void mtk_prepare_for_reset(struct mtk_eth *eth)
3918 /* disabe FE P3 and P4 */
3919 val = mtk_r32(eth, MTK_FE_GLO_CFG) | MTK_FE_LINK_DOWN_P3;
3920 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3921 val |= MTK_FE_LINK_DOWN_P4;
3922 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3924 /* adjust PPE configurations to prepare for reset */
3925 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3926 mtk_ppe_prepare_reset(eth->ppe[i]);
3928 /* disable NETSYS interrupts */
3929 mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
3931 /* force link down GMAC */
3932 for (i = 0; i < 2; i++) {
3933 val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
3934 mtk_w32(eth, val, MTK_MAC_MCR(i));
3938 static void mtk_pending_work(struct work_struct *work)
3940 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
3941 unsigned long restart = 0;
3946 set_bit(MTK_RESETTING, ð->state);
3948 mtk_prepare_for_reset(eth);
3950 /* Run again reset preliminary configuration in order to avoid any
3951 * possible race during FE reset since it can run releasing RTNL lock.
3953 mtk_prepare_for_reset(eth);
3955 /* stop all devices to make sure that dma is properly shut down */
3956 for (i = 0; i < MTK_MAC_COUNT; i++) {
3957 if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
3960 mtk_stop(eth->netdev[i]);
3961 __set_bit(i, &restart);
3964 usleep_range(15000, 16000);
3967 pinctrl_select_state(eth->dev->pins->p,
3968 eth->dev->pins->default_state);
3969 mtk_hw_init(eth, true);
3971 /* restart DMA and enable IRQs */
3972 for (i = 0; i < MTK_MAC_COUNT; i++) {
3973 if (!test_bit(i, &restart))
3976 if (mtk_open(eth->netdev[i])) {
3977 netif_alert(eth, ifup, eth->netdev[i],
3978 "Driver up/down cycle failed\n");
3979 dev_close(eth->netdev[i]);
3983 /* enabe FE P3 and P4 */
3984 val = mtk_r32(eth, MTK_FE_GLO_CFG) & ~MTK_FE_LINK_DOWN_P3;
3985 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3986 val &= ~MTK_FE_LINK_DOWN_P4;
3987 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3989 clear_bit(MTK_RESETTING, ð->state);
3991 mtk_wed_fe_reset_complete();
3996 static int mtk_free_dev(struct mtk_eth *eth)
4000 for (i = 0; i < MTK_MAC_COUNT; i++) {
4001 if (!eth->netdev[i])
4003 free_netdev(eth->netdev[i]);
4006 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
4007 if (!eth->dsa_meta[i])
4009 metadata_dst_free(eth->dsa_meta[i]);
4015 static int mtk_unreg_dev(struct mtk_eth *eth)
4019 for (i = 0; i < MTK_MAC_COUNT; i++) {
4020 struct mtk_mac *mac;
4021 if (!eth->netdev[i])
4023 mac = netdev_priv(eth->netdev[i]);
4024 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4025 unregister_netdevice_notifier(&mac->device_notifier);
4026 unregister_netdev(eth->netdev[i]);
4032 static int mtk_cleanup(struct mtk_eth *eth)
4036 cancel_work_sync(ð->pending_work);
4037 cancel_delayed_work_sync(ð->reset.monitor_work);
4042 static int mtk_get_link_ksettings(struct net_device *ndev,
4043 struct ethtool_link_ksettings *cmd)
4045 struct mtk_mac *mac = netdev_priv(ndev);
4047 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4050 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4053 static int mtk_set_link_ksettings(struct net_device *ndev,
4054 const struct ethtool_link_ksettings *cmd)
4056 struct mtk_mac *mac = netdev_priv(ndev);
4058 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4061 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4064 static void mtk_get_drvinfo(struct net_device *dev,
4065 struct ethtool_drvinfo *info)
4067 struct mtk_mac *mac = netdev_priv(dev);
4069 strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4070 strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4071 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4074 static u32 mtk_get_msglevel(struct net_device *dev)
4076 struct mtk_mac *mac = netdev_priv(dev);
4078 return mac->hw->msg_enable;
4081 static void mtk_set_msglevel(struct net_device *dev, u32 value)
4083 struct mtk_mac *mac = netdev_priv(dev);
4085 mac->hw->msg_enable = value;
4088 static int mtk_nway_reset(struct net_device *dev)
4090 struct mtk_mac *mac = netdev_priv(dev);
4092 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4098 return phylink_ethtool_nway_reset(mac->phylink);
4101 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4105 switch (stringset) {
4106 case ETH_SS_STATS: {
4107 struct mtk_mac *mac = netdev_priv(dev);
4109 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
4110 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
4111 data += ETH_GSTRING_LEN;
4113 if (mtk_page_pool_enabled(mac->hw))
4114 page_pool_ethtool_stats_get_strings(data);
4122 static int mtk_get_sset_count(struct net_device *dev, int sset)
4125 case ETH_SS_STATS: {
4126 int count = ARRAY_SIZE(mtk_ethtool_stats);
4127 struct mtk_mac *mac = netdev_priv(dev);
4129 if (mtk_page_pool_enabled(mac->hw))
4130 count += page_pool_ethtool_stats_get_count();
4138 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
4140 struct page_pool_stats stats = {};
4143 for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
4144 struct mtk_rx_ring *ring = ð->rx_ring[i];
4146 if (!ring->page_pool)
4149 page_pool_get_stats(ring->page_pool, &stats);
4151 page_pool_ethtool_stats_get(data, &stats);
4154 static void mtk_get_ethtool_stats(struct net_device *dev,
4155 struct ethtool_stats *stats, u64 *data)
4157 struct mtk_mac *mac = netdev_priv(dev);
4158 struct mtk_hw_stats *hwstats = mac->hw_stats;
4159 u64 *data_src, *data_dst;
4163 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4166 if (netif_running(dev) && netif_device_present(dev)) {
4167 if (spin_trylock_bh(&hwstats->stats_lock)) {
4168 mtk_stats_update_mac(mac);
4169 spin_unlock_bh(&hwstats->stats_lock);
4173 data_src = (u64 *)hwstats;
4177 start = u64_stats_fetch_begin(&hwstats->syncp);
4179 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4180 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4181 if (mtk_page_pool_enabled(mac->hw))
4182 mtk_ethtool_pp_stats(mac->hw, data_dst);
4183 } while (u64_stats_fetch_retry(&hwstats->syncp, start));
4186 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4189 int ret = -EOPNOTSUPP;
4192 case ETHTOOL_GRXRINGS:
4193 if (dev->hw_features & NETIF_F_LRO) {
4194 cmd->data = MTK_MAX_RX_RING_NUM;
4198 case ETHTOOL_GRXCLSRLCNT:
4199 if (dev->hw_features & NETIF_F_LRO) {
4200 struct mtk_mac *mac = netdev_priv(dev);
4202 cmd->rule_cnt = mac->hwlro_ip_cnt;
4206 case ETHTOOL_GRXCLSRULE:
4207 if (dev->hw_features & NETIF_F_LRO)
4208 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4210 case ETHTOOL_GRXCLSRLALL:
4211 if (dev->hw_features & NETIF_F_LRO)
4212 ret = mtk_hwlro_get_fdir_all(dev, cmd,
4222 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4224 int ret = -EOPNOTSUPP;
4227 case ETHTOOL_SRXCLSRLINS:
4228 if (dev->hw_features & NETIF_F_LRO)
4229 ret = mtk_hwlro_add_ipaddr(dev, cmd);
4231 case ETHTOOL_SRXCLSRLDEL:
4232 if (dev->hw_features & NETIF_F_LRO)
4233 ret = mtk_hwlro_del_ipaddr(dev, cmd);
4242 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4243 struct net_device *sb_dev)
4245 struct mtk_mac *mac = netdev_priv(dev);
4246 unsigned int queue = 0;
4248 if (netdev_uses_dsa(dev))
4249 queue = skb_get_queue_mapping(skb) + 3;
4253 if (queue >= dev->num_tx_queues)
4259 static const struct ethtool_ops mtk_ethtool_ops = {
4260 .get_link_ksettings = mtk_get_link_ksettings,
4261 .set_link_ksettings = mtk_set_link_ksettings,
4262 .get_drvinfo = mtk_get_drvinfo,
4263 .get_msglevel = mtk_get_msglevel,
4264 .set_msglevel = mtk_set_msglevel,
4265 .nway_reset = mtk_nway_reset,
4266 .get_link = ethtool_op_get_link,
4267 .get_strings = mtk_get_strings,
4268 .get_sset_count = mtk_get_sset_count,
4269 .get_ethtool_stats = mtk_get_ethtool_stats,
4270 .get_rxnfc = mtk_get_rxnfc,
4271 .set_rxnfc = mtk_set_rxnfc,
4274 static const struct net_device_ops mtk_netdev_ops = {
4275 .ndo_init = mtk_init,
4276 .ndo_uninit = mtk_uninit,
4277 .ndo_open = mtk_open,
4278 .ndo_stop = mtk_stop,
4279 .ndo_start_xmit = mtk_start_xmit,
4280 .ndo_set_mac_address = mtk_set_mac_address,
4281 .ndo_validate_addr = eth_validate_addr,
4282 .ndo_eth_ioctl = mtk_do_ioctl,
4283 .ndo_change_mtu = mtk_change_mtu,
4284 .ndo_tx_timeout = mtk_tx_timeout,
4285 .ndo_get_stats64 = mtk_get_stats64,
4286 .ndo_fix_features = mtk_fix_features,
4287 .ndo_set_features = mtk_set_features,
4288 #ifdef CONFIG_NET_POLL_CONTROLLER
4289 .ndo_poll_controller = mtk_poll_controller,
4291 .ndo_setup_tc = mtk_eth_setup_tc,
4293 .ndo_xdp_xmit = mtk_xdp_xmit,
4294 .ndo_select_queue = mtk_select_queue,
4297 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4299 const __be32 *_id = of_get_property(np, "reg", NULL);
4300 phy_interface_t phy_mode;
4301 struct phylink *phylink;
4302 struct mtk_mac *mac;
4307 dev_err(eth->dev, "missing mac id\n");
4311 id = be32_to_cpup(_id);
4312 if (id >= MTK_MAC_COUNT) {
4313 dev_err(eth->dev, "%d is not a valid mac id\n", id);
4317 if (eth->netdev[id]) {
4318 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4322 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4323 txqs = MTK_QDMA_NUM_QUEUES;
4325 eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4326 if (!eth->netdev[id]) {
4327 dev_err(eth->dev, "alloc_etherdev failed\n");
4330 mac = netdev_priv(eth->netdev[id]);
4336 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4337 mac->hwlro_ip_cnt = 0;
4339 mac->hw_stats = devm_kzalloc(eth->dev,
4340 sizeof(*mac->hw_stats),
4342 if (!mac->hw_stats) {
4343 dev_err(eth->dev, "failed to allocate counter memory\n");
4347 spin_lock_init(&mac->hw_stats->stats_lock);
4348 u64_stats_init(&mac->hw_stats->syncp);
4349 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
4351 /* phylink create */
4352 err = of_get_phy_mode(np, &phy_mode);
4354 dev_err(eth->dev, "incorrect phy-mode\n");
4358 /* mac config is not set */
4359 mac->interface = PHY_INTERFACE_MODE_NA;
4360 mac->speed = SPEED_UNKNOWN;
4362 mac->phylink_config.dev = ð->netdev[id]->dev;
4363 mac->phylink_config.type = PHYLINK_NETDEV;
4364 /* This driver makes use of state->speed in mac_config */
4365 mac->phylink_config.legacy_pre_march2020 = true;
4366 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4367 MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4369 __set_bit(PHY_INTERFACE_MODE_MII,
4370 mac->phylink_config.supported_interfaces);
4371 __set_bit(PHY_INTERFACE_MODE_GMII,
4372 mac->phylink_config.supported_interfaces);
4374 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4375 phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4377 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4378 __set_bit(PHY_INTERFACE_MODE_TRGMII,
4379 mac->phylink_config.supported_interfaces);
4381 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4382 __set_bit(PHY_INTERFACE_MODE_SGMII,
4383 mac->phylink_config.supported_interfaces);
4384 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
4385 mac->phylink_config.supported_interfaces);
4386 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
4387 mac->phylink_config.supported_interfaces);
4390 phylink = phylink_create(&mac->phylink_config,
4391 of_fwnode_handle(mac->of_node),
4392 phy_mode, &mtk_phylink_ops);
4393 if (IS_ERR(phylink)) {
4394 err = PTR_ERR(phylink);
4398 mac->phylink = phylink;
4400 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4401 eth->netdev[id]->watchdog_timeo = 5 * HZ;
4402 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4403 eth->netdev[id]->base_addr = (unsigned long)eth->base;
4405 eth->netdev[id]->hw_features = eth->soc->hw_features;
4407 eth->netdev[id]->hw_features |= NETIF_F_LRO;
4409 eth->netdev[id]->vlan_features = eth->soc->hw_features &
4410 ~NETIF_F_HW_VLAN_CTAG_TX;
4411 eth->netdev[id]->features |= eth->soc->hw_features;
4412 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4414 eth->netdev[id]->irq = eth->irq[0];
4415 eth->netdev[id]->dev.of_node = np;
4417 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4418 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4420 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4422 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4423 mac->device_notifier.notifier_call = mtk_device_event;
4424 register_netdevice_notifier(&mac->device_notifier);
4427 if (mtk_page_pool_enabled(eth))
4428 eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
4429 NETDEV_XDP_ACT_REDIRECT |
4430 NETDEV_XDP_ACT_NDO_XMIT |
4431 NETDEV_XDP_ACT_NDO_XMIT_SG;
4436 free_netdev(eth->netdev[id]);
4440 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4442 struct net_device *dev, *tmp;
4443 LIST_HEAD(dev_list);
4448 for (i = 0; i < MTK_MAC_COUNT; i++) {
4449 dev = eth->netdev[i];
4451 if (!dev || !(dev->flags & IFF_UP))
4454 list_add_tail(&dev->close_list, &dev_list);
4457 dev_close_many(&dev_list, false);
4459 eth->dma_dev = dma_dev;
4461 list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4462 list_del_init(&dev->close_list);
4463 dev_open(dev, NULL);
4469 static int mtk_probe(struct platform_device *pdev)
4471 struct resource *res = NULL;
4472 struct device_node *mac_np;
4473 struct mtk_eth *eth;
4476 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4480 eth->soc = of_device_get_match_data(&pdev->dev);
4482 eth->dev = &pdev->dev;
4483 eth->dma_dev = &pdev->dev;
4484 eth->base = devm_platform_ioremap_resource(pdev, 0);
4485 if (IS_ERR(eth->base))
4486 return PTR_ERR(eth->base);
4488 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4489 eth->ip_align = NET_IP_ALIGN;
4491 spin_lock_init(ð->page_lock);
4492 spin_lock_init(ð->tx_irq_lock);
4493 spin_lock_init(ð->rx_irq_lock);
4494 spin_lock_init(ð->dim_lock);
4496 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4497 INIT_WORK(ð->rx_dim.work, mtk_dim_rx);
4498 INIT_DELAYED_WORK(ð->reset.monitor_work, mtk_hw_reset_monitor_work);
4500 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4501 INIT_WORK(ð->tx_dim.work, mtk_dim_tx);
4503 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4504 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4506 if (IS_ERR(eth->ethsys)) {
4507 dev_err(&pdev->dev, "no ethsys regmap found\n");
4508 return PTR_ERR(eth->ethsys);
4512 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4513 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4514 "mediatek,infracfg");
4515 if (IS_ERR(eth->infra)) {
4516 dev_err(&pdev->dev, "no infracfg regmap found\n");
4517 return PTR_ERR(eth->infra);
4521 if (of_dma_is_coherent(pdev->dev.of_node)) {
4524 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4525 "cci-control-port");
4526 /* enable CPU/bus coherency */
4528 regmap_write(cci, 0, 3);
4531 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
4532 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
4537 err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
4538 eth->soc->ana_rgc3);
4544 if (eth->soc->required_pctl) {
4545 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4547 if (IS_ERR(eth->pctl)) {
4548 dev_err(&pdev->dev, "no pctl regmap found\n");
4549 return PTR_ERR(eth->pctl);
4553 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
4554 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4559 if (eth->soc->offload_version) {
4561 struct device_node *np;
4562 phys_addr_t wdma_phy;
4565 if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
4568 np = of_parse_phandle(pdev->dev.of_node,
4573 wdma_base = eth->soc->reg_map->wdma_base[i];
4574 wdma_phy = res ? res->start + wdma_base : 0;
4575 mtk_wed_add_hw(np, eth, eth->base + wdma_base,
4580 for (i = 0; i < 3; i++) {
4581 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4582 eth->irq[i] = eth->irq[0];
4584 eth->irq[i] = platform_get_irq(pdev, i);
4585 if (eth->irq[i] < 0) {
4586 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4591 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4592 eth->clks[i] = devm_clk_get(eth->dev,
4593 mtk_clks_source_name[i]);
4594 if (IS_ERR(eth->clks[i])) {
4595 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
4596 err = -EPROBE_DEFER;
4599 if (eth->soc->required_clks & BIT(i)) {
4600 dev_err(&pdev->dev, "clock %s not found\n",
4601 mtk_clks_source_name[i]);
4605 eth->clks[i] = NULL;
4609 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4610 INIT_WORK(ð->pending_work, mtk_pending_work);
4612 err = mtk_hw_init(eth, false);
4616 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4618 for_each_child_of_node(pdev->dev.of_node, mac_np) {
4619 if (!of_device_is_compatible(mac_np,
4620 "mediatek,eth-mac"))
4623 if (!of_device_is_available(mac_np))
4626 err = mtk_add_mac(eth, mac_np);
4628 of_node_put(mac_np);
4633 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4634 err = devm_request_irq(eth->dev, eth->irq[0],
4636 dev_name(eth->dev), eth);
4638 err = devm_request_irq(eth->dev, eth->irq[1],
4639 mtk_handle_irq_tx, 0,
4640 dev_name(eth->dev), eth);
4644 err = devm_request_irq(eth->dev, eth->irq[2],
4645 mtk_handle_irq_rx, 0,
4646 dev_name(eth->dev), eth);
4651 /* No MT7628/88 support yet */
4652 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4653 err = mtk_mdio_init(eth);
4658 if (eth->soc->offload_version) {
4661 num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
4662 num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
4663 for (i = 0; i < num_ppe; i++) {
4664 u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
4666 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr,
4667 eth->soc->offload_version, i);
4670 goto err_deinit_ppe;
4674 err = mtk_eth_offload_init(eth);
4676 goto err_deinit_ppe;
4679 for (i = 0; i < MTK_MAX_DEVS; i++) {
4680 if (!eth->netdev[i])
4683 err = register_netdev(eth->netdev[i]);
4685 dev_err(eth->dev, "error bringing up device\n");
4686 goto err_deinit_ppe;
4688 netif_info(eth, probe, eth->netdev[i],
4689 "mediatek frame engine at 0x%08lx, irq %d\n",
4690 eth->netdev[i]->base_addr, eth->irq[0]);
4693 /* we run 2 devices on the same DMA ring so we need a dummy device
4696 init_dummy_netdev(ð->dummy_dev);
4697 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx);
4698 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx);
4700 platform_set_drvdata(pdev, eth);
4701 schedule_delayed_work(ð->reset.monitor_work,
4702 MTK_DMA_MONITOR_TIMEOUT);
4707 mtk_ppe_deinit(eth);
4708 mtk_mdio_cleanup(eth);
4719 static int mtk_remove(struct platform_device *pdev)
4721 struct mtk_eth *eth = platform_get_drvdata(pdev);
4722 struct mtk_mac *mac;
4725 /* stop all devices to make sure that dma is properly shut down */
4726 for (i = 0; i < MTK_MAC_COUNT; i++) {
4727 if (!eth->netdev[i])
4729 mtk_stop(eth->netdev[i]);
4730 mac = netdev_priv(eth->netdev[i]);
4731 phylink_disconnect_phy(mac->phylink);
4737 netif_napi_del(ð->tx_napi);
4738 netif_napi_del(ð->rx_napi);
4740 mtk_mdio_cleanup(eth);
4745 static const struct mtk_soc_data mt2701_data = {
4746 .reg_map = &mtk_reg_map,
4747 .caps = MT7623_CAPS | MTK_HWLRO,
4748 .hw_features = MTK_HW_FEATURES,
4749 .required_clks = MT7623_CLKS_BITMAP,
4750 .required_pctl = true,
4752 .txd_size = sizeof(struct mtk_tx_dma),
4753 .rxd_size = sizeof(struct mtk_rx_dma),
4754 .rx_irq_done_mask = MTK_RX_DONE_INT,
4755 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4756 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4757 .dma_len_offset = 16,
4761 static const struct mtk_soc_data mt7621_data = {
4762 .reg_map = &mtk_reg_map,
4763 .caps = MT7621_CAPS,
4764 .hw_features = MTK_HW_FEATURES,
4765 .required_clks = MT7621_CLKS_BITMAP,
4766 .required_pctl = false,
4767 .offload_version = 1,
4769 .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4771 .txd_size = sizeof(struct mtk_tx_dma),
4772 .rxd_size = sizeof(struct mtk_rx_dma),
4773 .rx_irq_done_mask = MTK_RX_DONE_INT,
4774 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4775 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4776 .dma_len_offset = 16,
4780 static const struct mtk_soc_data mt7622_data = {
4781 .reg_map = &mtk_reg_map,
4783 .caps = MT7622_CAPS | MTK_HWLRO,
4784 .hw_features = MTK_HW_FEATURES,
4785 .required_clks = MT7622_CLKS_BITMAP,
4786 .required_pctl = false,
4787 .offload_version = 2,
4789 .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4791 .txd_size = sizeof(struct mtk_tx_dma),
4792 .rxd_size = sizeof(struct mtk_rx_dma),
4793 .rx_irq_done_mask = MTK_RX_DONE_INT,
4794 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4795 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4796 .dma_len_offset = 16,
4800 static const struct mtk_soc_data mt7623_data = {
4801 .reg_map = &mtk_reg_map,
4802 .caps = MT7623_CAPS | MTK_HWLRO,
4803 .hw_features = MTK_HW_FEATURES,
4804 .required_clks = MT7623_CLKS_BITMAP,
4805 .required_pctl = true,
4806 .offload_version = 1,
4808 .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4810 .txd_size = sizeof(struct mtk_tx_dma),
4811 .rxd_size = sizeof(struct mtk_rx_dma),
4812 .rx_irq_done_mask = MTK_RX_DONE_INT,
4813 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4814 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4815 .dma_len_offset = 16,
4819 static const struct mtk_soc_data mt7629_data = {
4820 .reg_map = &mtk_reg_map,
4822 .caps = MT7629_CAPS | MTK_HWLRO,
4823 .hw_features = MTK_HW_FEATURES,
4824 .required_clks = MT7629_CLKS_BITMAP,
4825 .required_pctl = false,
4827 .txd_size = sizeof(struct mtk_tx_dma),
4828 .rxd_size = sizeof(struct mtk_rx_dma),
4829 .rx_irq_done_mask = MTK_RX_DONE_INT,
4830 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4831 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4832 .dma_len_offset = 16,
4836 static const struct mtk_soc_data mt7986_data = {
4837 .reg_map = &mt7986_reg_map,
4839 .caps = MT7986_CAPS,
4840 .hw_features = MTK_HW_FEATURES,
4841 .required_clks = MT7986_CLKS_BITMAP,
4842 .required_pctl = false,
4843 .offload_version = 2,
4845 .foe_entry_size = sizeof(struct mtk_foe_entry),
4847 .txd_size = sizeof(struct mtk_tx_dma_v2),
4848 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4849 .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
4850 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
4851 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4852 .dma_len_offset = 8,
4856 static const struct mtk_soc_data rt5350_data = {
4857 .reg_map = &mt7628_reg_map,
4858 .caps = MT7628_CAPS,
4859 .hw_features = MTK_HW_FEATURES_MT7628,
4860 .required_clks = MT7628_CLKS_BITMAP,
4861 .required_pctl = false,
4863 .txd_size = sizeof(struct mtk_tx_dma),
4864 .rxd_size = sizeof(struct mtk_rx_dma),
4865 .rx_irq_done_mask = MTK_RX_DONE_INT,
4866 .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
4867 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4868 .dma_len_offset = 16,
4872 const struct of_device_id of_mtk_match[] = {
4873 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
4874 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
4875 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
4876 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
4877 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
4878 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
4879 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
4882 MODULE_DEVICE_TABLE(of, of_mtk_match);
4884 static struct platform_driver mtk_driver = {
4886 .remove = mtk_remove,
4888 .name = "mtk_soc_eth",
4889 .of_match_table = of_mtk_match,
4893 module_platform_driver(mtk_driver);
4895 MODULE_LICENSE("GPL");
4896 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
4897 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");