mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-26 00:41:17 +00:00
f0a5f24217
- two upstreamed patches removed - compile tested all targets using 4.1 - run tested ar71xx Signed-off-by: Roman Yeryomin <roman@advem.lv> Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de> SVN-Revision: 47694
20549 lines
658 KiB
Diff
20549 lines
658 KiB
Diff
From e3aece79d5003b6879298b05551e113117d5cdd8 Mon Sep 17 00:00:00 2001
|
|
From: John Crispin <blogic@openwrt.org>
|
|
Date: Sat, 27 Jun 2015 13:13:36 +0200
|
|
Subject: [PATCH 63/76] arm: mediatek: add SDK ethernet
|
|
|
|
Signed-off-by: John Crispin <blogic@openwrt.org>
|
|
---
|
|
drivers/net/ethernet/Kconfig | 1 +
|
|
drivers/net/ethernet/Makefile | 1 +
|
|
drivers/net/ethernet/raeth/Kconfig | 415 ++
|
|
drivers/net/ethernet/raeth/Makefile | 67 +
|
|
drivers/net/ethernet/raeth/Makefile.release | 60 +
|
|
drivers/net/ethernet/raeth/csr_netlink.h | 27 +
|
|
drivers/net/ethernet/raeth/dvt/pkt_gen.c | 88 +
|
|
drivers/net/ethernet/raeth/dvt/pkt_gen_tcp_frag.c | 138 +
|
|
drivers/net/ethernet/raeth/dvt/pkt_gen_udp_frag.c | 191 +
|
|
drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.c | 1527 +++++
|
|
drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.h | 75 +
|
|
drivers/net/ethernet/raeth/ethtool_readme.txt | 44 +
|
|
drivers/net/ethernet/raeth/mcast.c | 187 +
|
|
drivers/net/ethernet/raeth/mii_mgr.c | 603 ++
|
|
drivers/net/ethernet/raeth/ra2882ethreg.h | 1985 +++++++
|
|
drivers/net/ethernet/raeth/ra_ethtool.c | 515 ++
|
|
drivers/net/ethernet/raeth/ra_ethtool.h | 13 +
|
|
drivers/net/ethernet/raeth/ra_ioctl.h | 102 +
|
|
drivers/net/ethernet/raeth/ra_mac.c | 2645 +++++++++
|
|
drivers/net/ethernet/raeth/ra_mac.h | 57 +
|
|
drivers/net/ethernet/raeth/ra_netlink.c | 142 +
|
|
drivers/net/ethernet/raeth/ra_netlink.h | 10 +
|
|
drivers/net/ethernet/raeth/ra_qos.c | 655 +++
|
|
drivers/net/ethernet/raeth/ra_qos.h | 18 +
|
|
drivers/net/ethernet/raeth/ra_rfrw.c | 66 +
|
|
drivers/net/ethernet/raeth/ra_rfrw.h | 6 +
|
|
drivers/net/ethernet/raeth/raether.c | 6401 +++++++++++++++++++++
|
|
drivers/net/ethernet/raeth/raether.h | 126 +
|
|
drivers/net/ethernet/raeth/raether_hwlro.c | 347 ++
|
|
drivers/net/ethernet/raeth/raether_pdma.c | 1121 ++++
|
|
drivers/net/ethernet/raeth/raether_qdma.c | 1407 +++++
|
|
drivers/net/ethernet/raeth/raether_qdma_mt7623.c | 1020 ++++
|
|
drivers/net/ethernet/raeth/smb_hook.c | 17 +
|
|
drivers/net/ethernet/raeth/smb_nf.c | 177 +
|
|
drivers/net/ethernet/raeth/sync_write.h | 103 +
|
|
35 files changed, 20357 insertions(+)
|
|
create mode 100644 drivers/net/ethernet/raeth/Kconfig
|
|
create mode 100644 drivers/net/ethernet/raeth/Makefile
|
|
create mode 100644 drivers/net/ethernet/raeth/Makefile.release
|
|
create mode 100644 drivers/net/ethernet/raeth/csr_netlink.h
|
|
create mode 100755 drivers/net/ethernet/raeth/dvt/pkt_gen.c
|
|
create mode 100755 drivers/net/ethernet/raeth/dvt/pkt_gen_tcp_frag.c
|
|
create mode 100755 drivers/net/ethernet/raeth/dvt/pkt_gen_udp_frag.c
|
|
create mode 100755 drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.c
|
|
create mode 100755 drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.h
|
|
create mode 100644 drivers/net/ethernet/raeth/ethtool_readme.txt
|
|
create mode 100644 drivers/net/ethernet/raeth/mcast.c
|
|
create mode 100644 drivers/net/ethernet/raeth/mii_mgr.c
|
|
create mode 100644 drivers/net/ethernet/raeth/ra2882ethreg.h
|
|
create mode 100644 drivers/net/ethernet/raeth/ra_ethtool.c
|
|
create mode 100644 drivers/net/ethernet/raeth/ra_ethtool.h
|
|
create mode 100644 drivers/net/ethernet/raeth/ra_ioctl.h
|
|
create mode 100644 drivers/net/ethernet/raeth/ra_mac.c
|
|
create mode 100644 drivers/net/ethernet/raeth/ra_mac.h
|
|
create mode 100644 drivers/net/ethernet/raeth/ra_netlink.c
|
|
create mode 100644 drivers/net/ethernet/raeth/ra_netlink.h
|
|
create mode 100644 drivers/net/ethernet/raeth/ra_qos.c
|
|
create mode 100644 drivers/net/ethernet/raeth/ra_qos.h
|
|
create mode 100644 drivers/net/ethernet/raeth/ra_rfrw.c
|
|
create mode 100644 drivers/net/ethernet/raeth/ra_rfrw.h
|
|
create mode 100644 drivers/net/ethernet/raeth/raether.c
|
|
create mode 100644 drivers/net/ethernet/raeth/raether.h
|
|
create mode 100755 drivers/net/ethernet/raeth/raether_hwlro.c
|
|
create mode 100755 drivers/net/ethernet/raeth/raether_pdma.c
|
|
create mode 100644 drivers/net/ethernet/raeth/raether_qdma.c
|
|
create mode 100644 drivers/net/ethernet/raeth/raether_qdma_mt7623.c
|
|
create mode 100644 drivers/net/ethernet/raeth/smb_hook.c
|
|
create mode 100644 drivers/net/ethernet/raeth/smb_nf.c
|
|
create mode 100644 drivers/net/ethernet/raeth/sync_write.h
|
|
|
|
--- a/drivers/net/ethernet/Kconfig
|
|
+++ b/drivers/net/ethernet/Kconfig
|
|
@@ -17,6 +17,7 @@ config MDIO
|
|
config SUNGEM_PHY
|
|
tristate
|
|
|
|
+source "drivers/net/ethernet/raeth/Kconfig"
|
|
source "drivers/net/ethernet/3com/Kconfig"
|
|
source "drivers/net/ethernet/adaptec/Kconfig"
|
|
source "drivers/net/ethernet/aeroflex/Kconfig"
|
|
--- a/drivers/net/ethernet/Makefile
|
|
+++ b/drivers/net/ethernet/Makefile
|
|
@@ -84,3 +84,4 @@ obj-$(CONFIG_NET_VENDOR_VIA) += via/
|
|
obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
|
|
obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
|
|
obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
|
|
+obj-$(CONFIG_RAETH) += raeth/
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/Kconfig
|
|
@@ -0,0 +1,415 @@
|
|
+config ARCH_MT7623
|
|
+ bool
|
|
+ default y
|
|
+
|
|
+config RA_NAT_NONE
|
|
+ bool
|
|
+ default y
|
|
+
|
|
+config RAETH
|
|
+ tristate "Ralink GMAC"
|
|
+ ---help---
|
|
+ This driver supports Ralink gigabit ethernet family of
|
|
+ adapters.
|
|
+
|
|
+config PDMA_NEW
|
|
+ bool
|
|
+ default y if (RALINK_MT7620 || RALINK_MT7621 || ARCH_MT7623)
|
|
+ depends on RAETH
|
|
+
|
|
+config RAETH_SCATTER_GATHER_RX_DMA
|
|
+ bool
|
|
+ default y if (RALINK_MT7620 || RALINK_MT7621 || ARCH_MT7623)
|
|
+ depends on RAETH
|
|
+
|
|
+
|
|
+choice
|
|
+ prompt "Network BottomHalves"
|
|
+ depends on RAETH
|
|
+ default RA_NETWORK_WORKQUEUE_BH
|
|
+
|
|
+ config RA_NETWORK_TASKLET_BH
|
|
+ bool "Tasklet"
|
|
+
|
|
+ config RA_NETWORK_WORKQUEUE_BH
|
|
+ bool "Work Queue"
|
|
+
|
|
+ config RAETH_NAPI
|
|
+ bool "NAPI"
|
|
+
|
|
+endchoice
|
|
+
|
|
+#config TASKLET_WORKQUEUE_SW
|
|
+# bool "Tasklet and Workqueue switch"
|
|
+# depends on RA_NETWORK_TASKLET_BH
|
|
+
|
|
+config RAETH_SKB_RECYCLE_2K
|
|
+ bool "SKB Recycling"
|
|
+ depends on RAETH
|
|
+
|
|
+config RAETH_SPECIAL_TAG
|
|
+ bool "Ralink Special Tag (0x810x)"
|
|
+ depends on RAETH && RT_3052_ESW
|
|
+
|
|
+#config RAETH_JUMBOFRAME
|
|
+# bool "Jumbo Frame up to 4K bytes"
|
|
+# depends on RAETH && !(RALINK_RT3052 || RALINK_RT3352 || RALINK_RT5350 || RALINK_MT7628)
|
|
+
|
|
+config RAETH_CHECKSUM_OFFLOAD
|
|
+ bool "TCP/UDP/IP checksum offload"
|
|
+ default y
|
|
+ depends on RAETH && !RALINK_RT2880
|
|
+
|
|
+#config RAETH_SW_FC
|
|
+# bool "When TX ring is full, inform kernel stop transmit and stop RX handler"
|
|
+# default n
|
|
+# depends on RAETH
|
|
+
|
|
+#config RAETH_8023AZ_EEE
|
|
+# bool "Enable Embeded Switch EEE"
|
|
+# default n
|
|
+# depends on RAETH && (RALINK_MT7620 || RALINK_MT7621 || RALINK_MT7628)
|
|
+
|
|
+
|
|
+
|
|
+config 32B_DESC
|
|
+ bool "32bytes TX/RX description"
|
|
+ default n
|
|
+ depends on RAETH && (RALINK_MT7620 || RALINK_MT7621)
|
|
+ ---help---
|
|
+ At this moment, you cannot enable 32B description with Multiple RX ring at the same time.
|
|
+
|
|
+config RAETH_LRO
|
|
+ bool "LRO (Large Receive Offload )"
|
|
+ select INET_LRO
|
|
+ depends on RAETH && (RALINK_RT6855A || RALINK_MT7620 || RALINK_MT7621 || ARCH_MT7623)
|
|
+
|
|
+config RAETH_HW_LRO
|
|
+ bool "HW LRO (Large Receive Offload)"
|
|
+ default n
|
|
+ depends on RAETH
|
|
+
|
|
+config RAETH_HW_LRO_DBG
|
|
+ bool "HW LRO Debug"
|
|
+ default n
|
|
+ depends on RAETH_HW_LRO
|
|
+
|
|
+config RAETH_HW_LRO_AUTO_ADJ_DBG
|
|
+ bool "HW LRO Auto Adjustment Debug"
|
|
+ default y
|
|
+ depends on RAETH_HW_LRO
|
|
+
|
|
+config RAETH_HW_LRO_REASON_DBG
|
|
+ bool "HW LRO Flush Reason Debug"
|
|
+ default n
|
|
+ depends on RAETH_HW_LRO
|
|
+
|
|
+config RAETH_HW_VLAN_TX
|
|
+ bool "Transmit VLAN HW (DoubleVLAN is not supported)"
|
|
+ depends on RAETH && !(RALINK_RT5350 || RALINK_MT7628)
|
|
+ ---help---
|
|
+ Please disable HW_VLAN_TX if you need double vlan
|
|
+
|
|
+config RAETH_HW_VLAN_RX
|
|
+ bool "Receive VLAN HW (DoubleVLAN is not supported)"
|
|
+ depends on RAETH && RALINK_MT7621
|
|
+ ---help---
|
|
+ Please disable HW_VLAN_RX if you need double vlan
|
|
+
|
|
+config RAETH_TSO
|
|
+ bool "TSOV4 (Tcp Segmentaton Offload)"
|
|
+ depends on (RAETH_HW_VLAN_TX && (RALINK_RT6855 || RALINK_RT6855A || RALINK_MT7620))||((RALINK_MT7621 || ARCH_MT7623) &&(RAETH_HW_VLAN_TX || RAETH_GMAC2 ))
|
|
+
|
|
+config RAETH_TSOV6
|
|
+ bool "TSOV6 (Tcp Segmentaton Offload)"
|
|
+ depends on RAETH_TSO
|
|
+
|
|
+config RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ bool
|
|
+ default y if RALINK_RT6855A || RALINK_MT7620
|
|
+ depends on RAETH
|
|
+
|
|
+config MTK_SMB_HOOK
|
|
+ bool "Samba Speedup Module"
|
|
+ depends on RAETH
|
|
+
|
|
+config SPLICE_NET_SUPPORT
|
|
+ default y if MTK_SMB_HOOK
|
|
+ depends on MTK_SMB_HOOK
|
|
+ bool
|
|
+
|
|
+
|
|
+config RAETH_DVT
|
|
+ bool "RAETH DVT"
|
|
+ depends on RAETH && (RALINK_MT7621 || ARCH_MT7623)
|
|
+
|
|
+config RAETH_PDMA_DVT
|
|
+ bool "PDMA DVT"
|
|
+ depends on RAETH_DVT
|
|
+
|
|
+config RAETH_PDMA_LEGACY_MODE
|
|
+ bool "PDMA legacy mode"
|
|
+ depends on RAETH_PDMA_DVT
|
|
+
|
|
+#config RAETH_QOS
|
|
+# bool "QoS Feature"
|
|
+# depends on RAETH && !RALINK_RT2880 && !RALINK_MT7620 && !RALINK_MT7621 && !RAETH_TSO
|
|
+
|
|
+choice
|
|
+ prompt "QoS Type"
|
|
+ depends on RAETH_QOS
|
|
+ default DSCP_QOS_DSCP
|
|
+
|
|
+config RAETH_QOS_DSCP_BASED
|
|
+ bool "DSCP-based"
|
|
+ depends on RAETH_QOS
|
|
+
|
|
+config RAETH_QOS_VPRI_BASED
|
|
+ bool "VPRI-based"
|
|
+ depends on RAETH_QOS
|
|
+
|
|
+endchoice
|
|
+
|
|
+config RAETH_QDMA
|
|
+ bool "Choose QDMA instead PDMA"
|
|
+ default n
|
|
+ depends on RAETH && (RALINK_MT7621 || ARCH_MT7623)
|
|
+
|
|
+config RAETH_QDMATX_QDMARX
|
|
+ bool "Choose QDMA RX instead PDMA RX"
|
|
+ default n
|
|
+ depends on RAETH_QDMA && !RALINK_MT7621
|
|
+
|
|
+
|
|
+
|
|
+choice
|
|
+ prompt "GMAC is connected to"
|
|
+ depends on RAETH
|
|
+ default GE1_RGMII_FORCE_1000
|
|
+
|
|
+config GE1_MII_FORCE_100
|
|
+ bool "MII_FORCE_100 (10/100M Switch)"
|
|
+ depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621)
|
|
+
|
|
+config GE1_MII_AN
|
|
+ bool "MII_AN (100Phy)"
|
|
+ depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621)
|
|
+
|
|
+config GE1_RVMII_FORCE_100
|
|
+ bool "RvMII_FORCE_100 (CPU)"
|
|
+ depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621)
|
|
+
|
|
+config GE1_RGMII_FORCE_1000
|
|
+ bool "RGMII_FORCE_1000 (GigaSW, CPU)"
|
|
+ depends on (RALINK_RT2880 || RALINK_RT3883)
|
|
+ select RALINK_SPI
|
|
+
|
|
+config GE1_RGMII_FORCE_1000
|
|
+ bool "RGMII_FORCE_1000 (GigaSW, CPU)"
|
|
+ depends on (RALINK_MT7621 || ARCH_MT7623)
|
|
+ select RT_3052_ESW
|
|
+
|
|
+config GE1_TRGMII_FORCE_1200
|
|
+ bool "TRGMII_FORCE_1200 (GigaSW, CPU)"
|
|
+ depends on (RALINK_MT7621)
|
|
+ select RT_3052_ESW
|
|
+
|
|
+config GE1_TRGMII_FORCE_2000
|
|
+ bool "TRGMII_FORCE_2000 (GigaSW, CPU, for MT7623 and MT7683)"
|
|
+ depends on (ARCH_MT7623)
|
|
+ select RT_3052_ESW
|
|
+
|
|
+config GE1_TRGMII_FORCE_2600
|
|
+ bool "TRGMII_FORCE_2600 (GigaSW, CPU, MT7623 only)"
|
|
+ depends on (ARCH_MT7623)
|
|
+ select RT_3052_ESW
|
|
+
|
|
+config GE1_RGMII_AN
|
|
+ bool "RGMII_AN (GigaPhy)"
|
|
+ depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621 || ARCH_MT7623)
|
|
+
|
|
+config GE1_RGMII_NONE
|
|
+ bool "NONE (NO CONNECT)"
|
|
+ depends on (RALINK_MT7621 || ARCH_MT7623)
|
|
+
|
|
+endchoice
|
|
+
|
|
+config HW_SFQ
|
|
+ bool "HW_SFQ"
|
|
+ default n
|
|
+ depends on RAETH_QDMA && (ARCH_MT7623)
|
|
+
|
|
+
|
|
+config RT_3052_ESW
|
|
+ bool "Ralink Embedded Switch"
|
|
+ default y
|
|
+ depends on RAETH && (RALINK_RT3052 || RALINK_RT3352 || RALINK_RT5350 || RALINK_RT6855 || RALINK_RT6855A || RALINK_MT7620 || RALINK_MT7621 || RALINK_MT7628 || ARCH_MT7623)
|
|
+
|
|
+config LAN_WAN_SUPPORT
|
|
+ bool "LAN/WAN Partition"
|
|
+ depends on RAETH && (RAETH_ROUTER || RT_3052_ESW)
|
|
+
|
|
+config ETH_MEMORY_OPTIMIZATION
|
|
+ bool "Ethernet memory optimization"
|
|
+ depends on RALINK_MT7628
|
|
+
|
|
+config ETH_ONE_PORT_ONLY
|
|
+ bool "One Port Only"
|
|
+ depends on RALINK_MT7628
|
|
+
|
|
+choice
|
|
+ prompt "Switch Board Layout Type"
|
|
+ depends on LAN_WAN_SUPPORT || P5_RGMII_TO_MAC_MODE || GE1_RGMII_FORCE_1000 || GE1_TRGMII_FORCE_1200 || GE2_RGMII_FORCE_1000
|
|
+ default WAN_AT_P0
|
|
+
|
|
+ config WAN_AT_P4
|
|
+ bool "LLLL/W"
|
|
+
|
|
+ config WAN_AT_P0
|
|
+ bool "W/LLLL"
|
|
+endchoice
|
|
+
|
|
+config RALINK_VISTA_BASIC
|
|
+ bool 'Vista Basic Logo for IC+ 175C'
|
|
+ depends on LAN_WAN_SUPPORT && (RALINK_RT2880 || RALINK_RT3883)
|
|
+
|
|
+config ESW_DOUBLE_VLAN_TAG
|
|
+ bool
|
|
+ default y if RT_3052_ESW
|
|
+
|
|
+config RAETH_HAS_PORT4
|
|
+ bool "Port 4 Support"
|
|
+ depends on RAETH && RALINK_MT7620
|
|
+choice
|
|
+ prompt "Target Mode"
|
|
+ depends on RAETH_HAS_PORT4
|
|
+ default P4_RGMII_TO_MAC_MODE
|
|
+
|
|
+ config P4_MAC_TO_PHY_MODE
|
|
+ bool "Giga_Phy (RGMII)"
|
|
+ config GE_RGMII_MT7530_P0_AN
|
|
+ bool "GE_RGMII_MT7530_P0_AN (MT7530 Internal GigaPhy)"
|
|
+ config GE_RGMII_MT7530_P4_AN
|
|
+ bool "GE_RGMII_MT7530_P4_AN (MT7530 Internal GigaPhy)"
|
|
+ config P4_RGMII_TO_MAC_MODE
|
|
+ bool "Giga_SW/iNIC (RGMII)"
|
|
+ config P4_MII_TO_MAC_MODE
|
|
+ bool "External_CPU (MII_RvMII)"
|
|
+ config P4_RMII_TO_MAC_MODE
|
|
+ bool "External_CPU (RvMII_MII)"
|
|
+endchoice
|
|
+
|
|
+config MAC_TO_GIGAPHY_MODE_ADDR2
|
|
+ hex "Port4 Phy Address"
|
|
+ default 0x4
|
|
+ depends on P4_MAC_TO_PHY_MODE
|
|
+
|
|
+config RAETH_HAS_PORT5
|
|
+ bool "Port 5 Support"
|
|
+ depends on RAETH && (RALINK_RT3052 || RALINK_RT3352 || RALINK_RT6855 || RALINK_RT6855A || RALINK_MT7620)
|
|
+choice
|
|
+ prompt "Target Mode"
|
|
+ depends on RAETH_HAS_PORT5
|
|
+ default P5_RGMII_TO_MAC_MODE
|
|
+
|
|
+ config P5_MAC_TO_PHY_MODE
|
|
+ bool "Giga_Phy (RGMII)"
|
|
+ config P5_RGMII_TO_MAC_MODE
|
|
+ bool "Giga_SW/iNIC (RGMII)"
|
|
+ config P5_RGMII_TO_MT7530_MODE
|
|
+ bool "MT7530 Giga_SW (RGMII)"
|
|
+ depends on RALINK_MT7620
|
|
+ config P5_MII_TO_MAC_MODE
|
|
+ bool "External_CPU (MII_RvMII)"
|
|
+ config P5_RMII_TO_MAC_MODE
|
|
+ bool "External_CPU (RvMII_MII)"
|
|
+endchoice
|
|
+
|
|
+config MAC_TO_GIGAPHY_MODE_ADDR
|
|
+ hex "GE1 Phy Address"
|
|
+ default 0x1F
|
|
+ depends on GE1_MII_AN || GE1_RGMII_AN
|
|
+
|
|
+config MAC_TO_GIGAPHY_MODE_ADDR
|
|
+ hex "Port5 Phy Address"
|
|
+ default 0x5
|
|
+ depends on P5_MAC_TO_PHY_MODE
|
|
+
|
|
+config RAETH_GMAC2
|
|
+ bool "GMAC2 Support"
|
|
+ depends on RAETH && (RALINK_RT3883 || RALINK_MT7621 || ARCH_MT7623)
|
|
+
|
|
+choice
|
|
+ prompt "GMAC2 is connected to"
|
|
+ depends on RAETH_GMAC2
|
|
+ default GE2_RGMII_AN
|
|
+
|
|
+config GE2_MII_FORCE_100
|
|
+ bool "MII_FORCE_100 (10/100M Switch)"
|
|
+ depends on RAETH_GMAC2
|
|
+
|
|
+config GE2_MII_AN
|
|
+ bool "MII_AN (100Phy)"
|
|
+ depends on RAETH_GMAC2
|
|
+
|
|
+config GE2_RVMII_FORCE_100
|
|
+ bool "RvMII_FORCE_100 (CPU)"
|
|
+ depends on RAETH_GMAC2
|
|
+
|
|
+config GE2_RGMII_FORCE_1000
|
|
+ bool "RGMII_FORCE_1000 (GigaSW, CPU)"
|
|
+ depends on RAETH_GMAC2
|
|
+ select RALINK_SPI
|
|
+
|
|
+config GE2_RGMII_AN
|
|
+ bool "RGMII_AN (External GigaPhy)"
|
|
+ depends on RAETH_GMAC2
|
|
+
|
|
+config GE2_INTERNAL_GPHY
|
|
+ bool "RGMII_AN (Internal GigaPny)"
|
|
+ depends on RAETH_GMAC2
|
|
+ select LAN_WAN_SUPPORT
|
|
+
|
|
+endchoice
|
|
+
|
|
+config GE_RGMII_INTERNAL_P0_AN
|
|
+ bool
|
|
+ depends on GE2_INTERNAL_GPHY
|
|
+ default y if WAN_AT_P0
|
|
+
|
|
+config GE_RGMII_INTERNAL_P4_AN
|
|
+ bool
|
|
+ depends on GE2_INTERNAL_GPHY
|
|
+ default y if WAN_AT_P4
|
|
+
|
|
+config MAC_TO_GIGAPHY_MODE_ADDR2
|
|
+ hex
|
|
+ default 0 if GE_RGMII_INTERNAL_P0_AN
|
|
+ default 4 if GE_RGMII_INTERNAL_P4_AN
|
|
+ depends on GE_RGMII_INTERNAL_P0_AN || GE_RGMII_INTERNAL_P4_AN
|
|
+
|
|
+config MAC_TO_GIGAPHY_MODE_ADDR2
|
|
+ hex "GE2 Phy Address"
|
|
+ default 0x1E
|
|
+ depends on GE2_MII_AN || GE2_RGMII_AN
|
|
+
|
|
+#force 100M
|
|
+config RAETH_ROUTER
|
|
+bool
|
|
+default y if GE1_MII_FORCE_100 || GE2_MII_FORCE_100 || GE1_RVMII_FORCE_100 || GE2_RVMII_FORCE_100
|
|
+
|
|
+#force 1000M
|
|
+config MAC_TO_MAC_MODE
|
|
+bool
|
|
+default y if GE1_RGMII_FORCE_1000 || GE2_RGMII_FORCE_1000
|
|
+depends on (RALINK_RT2880 || RALINK_RT3883)
|
|
+
|
|
+#AN
|
|
+config GIGAPHY
|
|
+bool
|
|
+default y if GE1_RGMII_AN || GE2_RGMII_AN
|
|
+
|
|
+#AN
|
|
+config 100PHY
|
|
+bool
|
|
+default y if GE1_MII_AN || GE2_MII_AN
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/Makefile
|
|
@@ -0,0 +1,67 @@
|
|
+obj-$(CONFIG_RAETH) += raeth.o
|
|
+raeth-objs := ra_mac.o mii_mgr.o ra_rfrw.o
|
|
+
|
|
+ifeq ($(CONFIG_MTK_SMB_HOOK),y)
|
|
+obj-y += smb_hook.o
|
|
+obj-m += smb.o
|
|
+smb-objs := smb_nf.o
|
|
+endif
|
|
+
|
|
+#EXTRA_CFLAGS += -DCONFIG_RAETH_MULTIPLE_RX_RING
|
|
+
|
|
+ifeq ($(CONFIG_RAETH_QOS),y)
|
|
+raeth-objs += ra_qos.o
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_RAETH_QDMA),y)
|
|
+raeth-objs += raether_qdma.o
|
|
+endif
|
|
+
|
|
+ifneq ($(CONFIG_RAETH_QDMA),y)
|
|
+raeth-objs += raether_pdma.o
|
|
+endif
|
|
+
|
|
+raeth-objs += raether.o
|
|
+
|
|
+ifeq ($(CONFIG_ETHTOOL),y)
|
|
+raeth-objs += ra_ethtool.o
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_RALINK_RT3052_MP2),y)
|
|
+raeth-objs += mcast.o
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_RAETH_NETLINK),y)
|
|
+raeth-objs += ra_netlink.o
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_RAETH_PDMA_DVT),y)
|
|
+raeth-objs += dvt/raether_pdma_dvt.o
|
|
+obj-m += dvt/pkt_gen.o
|
|
+obj-m += dvt/pkt_gen_udp_frag.o
|
|
+obj-m += dvt/pkt_gen_tcp_frag.o
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_RAETH_HW_LRO),y)
|
|
+raeth-objs += raether_hwlro.o
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_RAETH_GMAC2),y)
|
|
+EXTRA_CFLAGS += -DCONFIG_PSEUDO_SUPPORT
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_ETH_MEMORY_OPTIMIZATION),y)
|
|
+EXTRA_CFLAGS += -DMEMORY_OPTIMIZATION
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_RT2860V2_AP_MEMORY_OPTIMIZATION),y)
|
|
+EXTRA_CFLAGS += -DMEMORY_OPTIMIZATION
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_RA_NETWORK_WORKQUEUE_BH),y)
|
|
+EXTRA_CFLAGS += -DWORKQUEUE_BH
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_TASKLET_WORKQUEUE_SW),y)
|
|
+EXTRA_CFLAGS += -DTASKLET_WORKQUEUE_SW
|
|
+endif
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/Makefile.release
|
|
@@ -0,0 +1,60 @@
|
|
+obj-$(CONFIG_RAETH) += raeth.o
|
|
+raeth-objs := ra_mac.o mii_mgr.o ra_rfrw.o
|
|
+
|
|
+ifeq ($(CONFIG_MTK_SMB_HOOK),y)
|
|
+obj-y += smb_hook.o
|
|
+obj-m += smb.o
|
|
+smb-objs := smb_nf.o
|
|
+endif
|
|
+
|
|
+#EXTRA_CFLAGS += -DCONFIG_RAETH_MULTIPLE_RX_RING
|
|
+
|
|
+ifeq ($(CONFIG_RAETH_QOS),y)
|
|
+raeth-objs += ra_qos.o
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_RAETH_QDMA),y)
|
|
+raeth-objs += raether_qdma.o
|
|
+endif
|
|
+
|
|
+ifneq ($(CONFIG_RAETH_QDMA),y)
|
|
+raeth-objs += raether_pdma.o
|
|
+endif
|
|
+
|
|
+raeth-objs += raether.o
|
|
+
|
|
+ifeq ($(CONFIG_ETHTOOL),y)
|
|
+raeth-objs += ra_ethtool.o
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_RALINK_RT3052_MP2),y)
|
|
+raeth-objs += mcast.o
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_RAETH_NETLINK),y)
|
|
+raeth-objs += ra_netlink.o
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_RAETH_HW_LRO),y)
|
|
+raeth-objs += raether_hwlro.o
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_RAETH_GMAC2),y)
|
|
+EXTRA_CFLAGS += -DCONFIG_PSEUDO_SUPPORT
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_ETH_MEMORY_OPTIMIZATION),y)
|
|
+EXTRA_CFLAGS += -DMEMORY_OPTIMIZATION
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_RT2860V2_AP_MEMORY_OPTIMIZATION),y)
|
|
+EXTRA_CFLAGS += -DMEMORY_OPTIMIZATION
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_RA_NETWORK_WORKQUEUE_BH),y)
|
|
+EXTRA_CFLAGS += -DWORKQUEUE_BH
|
|
+endif
|
|
+
|
|
+ifeq ($(CONFIG_TASKLET_WORKQUEUE_SW),y)
|
|
+EXTRA_CFLAGS += -DTASKLET_WORKQUEUE_SW
|
|
+endif
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/csr_netlink.h
|
|
@@ -0,0 +1,27 @@
|
|
+#ifndef CSR_NETLINK_H
|
|
+#define CSR_NETLINK_H
|
|
+
|
|
+#define CSR_NETLINK 30
|
|
+#define CSR_READ 0
|
|
+#define CSR_WRITE 1
|
|
+#define CSR_TEST 2
|
|
+
|
|
+#define RALINK_CSR_GROUP 2882
|
|
+
|
|
+typedef struct rt2880_csr_msg {
|
|
+ int enable;
|
|
+ char reg_name[32];
|
|
+ unsigned long address;
|
|
+ unsigned long default_value;
|
|
+ unsigned long reserved_bits; /* 1 : not reserved, 0 : reserved */
|
|
+ unsigned long write_mask;
|
|
+ unsigned long write_value;
|
|
+ int status;
|
|
+} CSR_MSG;
|
|
+
|
|
+int csr_msg_send(CSR_MSG* msg);
|
|
+int csr_msg_recv(void);
|
|
+
|
|
+// static CSR_MSG input_csr_msg;
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/dvt/pkt_gen.c
|
|
@@ -0,0 +1,88 @@
|
|
+//#include <linux/config.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/skbuff.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/netfilter.h>
|
|
+#include <linux/netdevice.h>
|
|
+#include <linux/types.h>
|
|
+#include <asm/uaccess.h>
|
|
+#include <linux/moduleparam.h>
|
|
+
|
|
+char *ifname="eth3";
|
|
+
|
|
+static int32_t PktGenInitMod(void)
|
|
+{
|
|
+
|
|
+ struct net_dev *dev;
|
|
+ struct sk_buff *skb;
|
|
+ int i=0;
|
|
+
|
|
+ unsigned char pkt[]={
|
|
+ //0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // dest bcast mac
|
|
+ 0x00, 0x21, 0x86, 0xee, 0xe3, 0x95, // dest macA
|
|
+ //0x00, 0x30, 0xdb, 0x02, 0x02, 0x01, // dest macB
|
|
+ 0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
|
|
+ 0x81, 0x00, // vlan tag
|
|
+ //0x81, 0x10, // vlan tag
|
|
+ //0x87, 0x39, // do not learn
|
|
+ //0xc1, 0x03, // vlan tag SA=0, VID=2, LV=1
|
|
+ 0x00, 0x03, // pri=0, vlan=3
|
|
+ 0x08, 0x00, // eth type=ip
|
|
+ 0x45, 0x00, 0x00, 0x30, 0x12, 0x34, 0x40, 0x00, 0xff, 0x06,
|
|
+ 0x40, 0x74, 0x0a, 0x0a, 0x1e, 0x0a, 0x0a, 0x0a, 0x1e, 0x0b,
|
|
+ 0x00, 0x1e, 0x00, 0x28, 0x00, 0x1c, 0x81, 0x06, 0x00, 0x00,
|
|
+ 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
|
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
|
|
+
|
|
+ skb = alloc_skb(256, GFP_ATOMIC);
|
|
+
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ if((dev=dev_get_by_name(&init_net,ifname))){
|
|
+#else
|
|
+ if((dev=dev_get_by_name(ifname))){
|
|
+#endif
|
|
+
|
|
+
|
|
+
|
|
+ skb->dev=dev;
|
|
+ skb_put(skb,sizeof(pkt));
|
|
+ memcpy(skb->data, pkt, sizeof(pkt));
|
|
+
|
|
+ printk("send pkt(len=%d) to %s\n", skb->len, skb->dev->name);
|
|
+
|
|
+
|
|
+ for(i=0;i<sizeof(pkt);i++){
|
|
+ if(i%16==0) {
|
|
+ printk("\n");
|
|
+ }
|
|
+ printk("%02X-",skb->data[i]);
|
|
+ }
|
|
+
|
|
+ dev_queue_xmit(skb);
|
|
+ }else{
|
|
+ printk("interface %s not found\n",ifname);
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void PktGenCleanupMod(void)
|
|
+{
|
|
+}
|
|
+
|
|
+module_init(PktGenInitMod);
|
|
+module_exit(PktGenCleanupMod);
|
|
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)
|
|
+MODULE_PARM (ifname, "s");
|
|
+#else
|
|
+module_param (ifname, charp, 0);
|
|
+#endif
|
|
+
|
|
+MODULE_DESCRIPTION("Ralink PktGen Module");
|
|
+MODULE_AUTHOR("Steven Liu");
|
|
+MODULE_LICENSE("Proprietary");
|
|
+MODULE_PARM_DESC (ifname, "interface name");
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/dvt/pkt_gen_tcp_frag.c
|
|
@@ -0,0 +1,138 @@
|
|
+//#include <linux/config.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/skbuff.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/netfilter.h>
|
|
+#include <linux/netdevice.h>
|
|
+#include <linux/types.h>
|
|
+#include <asm/uaccess.h>
|
|
+#include <linux/moduleparam.h>
|
|
+
|
|
+char *ifname="eth3";
|
|
+
|
|
+
|
|
+static int32_t PktGenInitMod(void)
|
|
+{
|
|
+ unsigned char pkt_1[]={
|
|
+ 0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
|
|
+ 0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
|
|
+ 0x08, 0x00, // type: ip
|
|
+ 0x45, 0x00, 0x00, 0x34, // ip: ..., total len (0x034 = 52)
|
|
+ 0xa1, 0x78, 0x20, 0x00, // ip: id, frag, frag offset
|
|
+ 0x80, 0x06, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
|
|
+ 0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
|
|
+ 0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
|
|
+ 0x0d, 0xd5, //tcp src port
|
|
+ 0x13, 0x89, //tcp dst port
|
|
+ 0x40, 0xf5, 0x15, 0x04, //tcp sequence number
|
|
+ 0xf6, 0x4f, 0x1e, 0x31, //tcp ack number
|
|
+ 0x50, 0x10, 0xfc, 0x00, //tcp flags, win size
|
|
+ 0xf1, 0xfe, 0x00, 0x00, //tcp checksum (0xf1fe)
|
|
+ 0x01, 0x02, 0x03, 0x04, 0x05, //payload (12 bytes)
|
|
+ 0x06, 0x07, 0x08, 0x09, 0x0a,
|
|
+ 0x0b, 0x0c
|
|
+ };
|
|
+
|
|
+ unsigned char pkt_2[]={
|
|
+ 0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
|
|
+ 0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
|
|
+ 0x08, 0x00, // type: ip
|
|
+ 0x45, 0x00, 0x00, 0x20, // ip: ..., total len (0x020 = 32)
|
|
+ 0xa1, 0x78, 0x00, 0x04, // ip: id, frag, frag offset (32)
|
|
+ 0x40, 0x11, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
|
|
+ 0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
|
|
+ 0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
|
|
+ 0x11, 0x12, 0x13, 0x14, 0x15, //payload (12 bytes)
|
|
+ 0x16, 0x17, 0x18, 0x19, 0x1a,
|
|
+ 0x1b, 0x1c
|
|
+ };
|
|
+
|
|
+ struct net_dev *dev;
|
|
+ struct sk_buff *skb_1;
|
|
+ struct sk_buff *skb_2;
|
|
+ int i=0;
|
|
+
|
|
+ skb_1 = alloc_skb(256, GFP_ATOMIC);
|
|
+ skb_2 = alloc_skb(256, GFP_ATOMIC);
|
|
+
|
|
+
|
|
+#if 1
|
|
+/* send packet 1 */
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ if((dev=dev_get_by_name(&init_net,ifname))){
|
|
+#else
|
|
+ if((dev=dev_get_by_name(ifname))){
|
|
+#endif
|
|
+
|
|
+ skb_1->dev=dev;
|
|
+ skb_put(skb_1,sizeof(pkt_1));
|
|
+ memcpy(skb_1->data, pkt_1, sizeof(pkt_1));
|
|
+
|
|
+ printk("send pkt(len=%d) to %s\n", skb_1->len, skb_1->dev->name);
|
|
+
|
|
+
|
|
+ for(i=0;i<sizeof(pkt_1);i++){
|
|
+ if(i%16==0) {
|
|
+ printk("\n");
|
|
+ }
|
|
+ printk("%02X-",skb_1->data[i]);
|
|
+ }
|
|
+
|
|
+ dev_queue_xmit(skb_1);
|
|
+ }else{
|
|
+ printk("interface %s not found\n",ifname);
|
|
+ return 1;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if 1
|
|
+/* send packet 2 */
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ if((dev=dev_get_by_name(&init_net,ifname))){
|
|
+#else
|
|
+ if((dev=dev_get_by_name(ifname))){
|
|
+#endif
|
|
+
|
|
+ skb_2->dev=dev;
|
|
+ skb_put(skb_2,sizeof(pkt_2));
|
|
+ memcpy(skb_2->data, pkt_2, sizeof(pkt_2));
|
|
+
|
|
+ printk("send pkt(len=%d) to %s\n", skb_2->len, skb_2->dev->name);
|
|
+
|
|
+
|
|
+ for(i=0;i<sizeof(pkt_2);i++){
|
|
+ if(i%16==0) {
|
|
+ printk("\n");
|
|
+ }
|
|
+ printk("%02X-",skb_2->data[i]);
|
|
+ }
|
|
+
|
|
+ dev_queue_xmit(skb_2);
|
|
+ }else{
|
|
+ printk("interface %s not found\n",ifname);
|
|
+ return 1;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void PktGenCleanupMod(void)
|
|
+{
|
|
+}
|
|
+
|
|
+module_init(PktGenInitMod);
|
|
+module_exit(PktGenCleanupMod);
|
|
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)
|
|
+MODULE_PARM (ifname, "s");
|
|
+#else
|
|
+module_param (ifname, charp, 0);
|
|
+#endif
|
|
+
|
|
+MODULE_DESCRIPTION("Ralink PktGen Module");
|
|
+MODULE_AUTHOR("Steven Liu");
|
|
+MODULE_LICENSE("Proprietary");
|
|
+MODULE_PARM_DESC (ifname, "interface name");
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/dvt/pkt_gen_udp_frag.c
|
|
@@ -0,0 +1,191 @@
|
|
+//#include <linux/config.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/skbuff.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/netfilter.h>
|
|
+#include <linux/netdevice.h>
|
|
+#include <linux/types.h>
|
|
+#include <asm/uaccess.h>
|
|
+#include <linux/moduleparam.h>
|
|
+
|
|
+char *ifname="eth3";
|
|
+
|
|
+
|
|
+static int32_t PktGenInitMod(void)
|
|
+{
|
|
+#if 0
|
|
+ unsigned char pkt_0[]={
|
|
+// 0x00, 0x21, 0x86, 0xee, 0xe3, 0x95, // dest mac
|
|
+ 0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
|
|
+ 0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
|
|
+ 0x08, 0x00, // type: ip
|
|
+ 0x45, 0x00, 0x00, 0x26, // ip: ..., total len (0x026 = 38)
|
|
+// 0xa1, 0x78, 0x20, 0x00, // ip: id, frag, frag offset
|
|
+ 0xa1, 0x78, 0x40, 0x00, // ip: id, frag, frag offset
|
|
+ 0x40, 0x11, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
|
|
+ 0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
|
|
+// 0x0a, 0x0a, 0x1e, 0x03, // dst ip (10.10.30.3)
|
|
+ 0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
|
|
+ 0xca, 0x7b, //udp src port
|
|
+ 0x13, 0x89, //udp dst port
|
|
+ 0x00, 0x12, //udp len (0x01c = 18)
|
|
+ 0x2f, 0x96, //udp checksum (0x2f96)
|
|
+ 0x01, 0x02, 0x03, 0x04, 0x05, //payload (10 bytes)
|
|
+ 0x06, 0x07, 0x08, 0x09, 0x0a
|
|
+ };
|
|
+#endif
|
|
+
|
|
+ unsigned char pkt_1[]={
|
|
+// 0x00, 0x21, 0x86, 0xee, 0xe3, 0x95, // dest mac
|
|
+ 0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
|
|
+ 0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
|
|
+ 0x08, 0x00, // type: ip
|
|
+ 0x45, 0x00, 0x00, 0x24, // ip: ..., total len (0x024 = 36)
|
|
+ 0xa1, 0x78, 0x20, 0x00, // ip: id, frag, frag offset
|
|
+// 0xa1, 0x78, 0x40, 0x00, // ip: id, frag, frag offset
|
|
+ 0x40, 0x11, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
|
|
+ 0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
|
|
+// 0x0a, 0x0a, 0x1e, 0x03, // dst ip (10.10.30.3)
|
|
+ 0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
|
|
+ 0xca, 0x7b, //udp src port
|
|
+ 0x13, 0x89, //udp dst port
|
|
+ 0x00, 0x1a, //udp len (0x01a = 26)
|
|
+ 0x2f, 0x96, //udp checksum (0x2f96)
|
|
+ 0x01, 0x02, 0x03, 0x04, 0x05, //payload (8 bytes)
|
|
+ 0x06, 0x07, 0x08
|
|
+ };
|
|
+
|
|
+ unsigned char pkt_2[]={
|
|
+// 0x00, 0x21, 0x86, 0xee, 0xe3, 0x95, // dest mac
|
|
+ 0x00, 0x21, 0x86, 0xee, 0xe3, 0x90, // dest mac
|
|
+ 0x00, 0x0c, 0x43, 0x28, 0x80, 0x33, // src mac
|
|
+ 0x08, 0x00, // type: ip
|
|
+ 0x45, 0x00, 0x00, 0x1e, // ip: ..., total len (0x01e = 30)
|
|
+ 0xa1, 0x78, 0x00, 0x02, // ip: id, frag, frag offset (16)
|
|
+ 0x40, 0x11, 0x63, 0x07, // ip: ttl, protocol, hdr checksum (0x6307)
|
|
+ 0x0a, 0x0a, 0x1e, 0x7b, // src ip (10.10.30.123)
|
|
+// 0x0a, 0x0a, 0x1e, 0x03, // dst ip (10.10.30.3)
|
|
+ 0x0a, 0x0a, 0x1e, 0x05, // dst ip (10.10.30.5)
|
|
+ 0x11, 0x12, 0x13, 0x14, 0x15, //payload (10 bytes)
|
|
+ 0x16, 0x17, 0x18, 0x19, 0x1a
|
|
+ };
|
|
+
|
|
+ struct net_dev *dev;
|
|
+// struct sk_buff *skb_0;
|
|
+ struct sk_buff *skb_1;
|
|
+ struct sk_buff *skb_2;
|
|
+ int i=0;
|
|
+
|
|
+// skb_0 = alloc_skb(256, GFP_ATOMIC);
|
|
+ skb_1 = alloc_skb(256, GFP_ATOMIC);
|
|
+ skb_2 = alloc_skb(256, GFP_ATOMIC);
|
|
+
|
|
+#if 0
|
|
+/* send packet 0 */
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ if((dev=dev_get_by_name(&init_net,ifname))){
|
|
+#else
|
|
+ if((dev=dev_get_by_name(ifname))){
|
|
+#endif
|
|
+
|
|
+ skb_0->dev=dev;
|
|
+ skb_put(skb_0,sizeof(pkt_0));
|
|
+ memcpy(skb_0->data, pkt_0, sizeof(pkt_0));
|
|
+
|
|
+ printk("send pkt(len=%d) to %s\n", skb_0->len, skb_0->dev->name);
|
|
+
|
|
+
|
|
+ for(i=0;i<sizeof(pkt_0);i++){
|
|
+ if(i%16==0) {
|
|
+ printk("\n");
|
|
+ }
|
|
+ printk("%02X-",skb_0->data[i]);
|
|
+ }
|
|
+
|
|
+ dev_queue_xmit(skb_0);
|
|
+ }else{
|
|
+ printk("interface %s not found\n",ifname);
|
|
+ return 1;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if 1
|
|
+/* send packet 1 */
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ if((dev=dev_get_by_name(&init_net,ifname))){
|
|
+#else
|
|
+ if((dev=dev_get_by_name(ifname))){
|
|
+#endif
|
|
+
|
|
+ skb_1->dev=dev;
|
|
+ skb_put(skb_1,sizeof(pkt_1));
|
|
+ memcpy(skb_1->data, pkt_1, sizeof(pkt_1));
|
|
+
|
|
+ printk("send pkt(len=%d) to %s\n", skb_1->len, skb_1->dev->name);
|
|
+
|
|
+
|
|
+ for(i=0;i<sizeof(pkt_1);i++){
|
|
+ if(i%16==0) {
|
|
+ printk("\n");
|
|
+ }
|
|
+ printk("%02X-",skb_1->data[i]);
|
|
+ }
|
|
+
|
|
+ dev_queue_xmit(skb_1);
|
|
+ }else{
|
|
+ printk("interface %s not found\n",ifname);
|
|
+ return 1;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if 1
|
|
+/* send packet 2 */
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ if((dev=dev_get_by_name(&init_net,ifname))){
|
|
+#else
|
|
+ if((dev=dev_get_by_name(ifname))){
|
|
+#endif
|
|
+
|
|
+ skb_2->dev=dev;
|
|
+ skb_put(skb_2,sizeof(pkt_2));
|
|
+ memcpy(skb_2->data, pkt_2, sizeof(pkt_2));
|
|
+
|
|
+ printk("send pkt(len=%d) to %s\n", skb_2->len, skb_2->dev->name);
|
|
+
|
|
+
|
|
+ for(i=0;i<sizeof(pkt_2);i++){
|
|
+ if(i%16==0) {
|
|
+ printk("\n");
|
|
+ }
|
|
+ printk("%02X-",skb_2->data[i]);
|
|
+ }
|
|
+
|
|
+ dev_queue_xmit(skb_2);
|
|
+ }else{
|
|
+ printk("interface %s not found\n",ifname);
|
|
+ return 1;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void PktGenCleanupMod(void)
|
|
+{
|
|
+}
|
|
+
|
|
+module_init(PktGenInitMod);
|
|
+module_exit(PktGenCleanupMod);
|
|
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)
|
|
+MODULE_PARM (ifname, "s");
|
|
+#else
|
|
+module_param (ifname, charp, 0);
|
|
+#endif
|
|
+
|
|
+MODULE_DESCRIPTION("Ralink PktGen Module");
|
|
+MODULE_AUTHOR("Steven Liu");
|
|
+MODULE_LICENSE("Proprietary");
|
|
+MODULE_PARM_DESC (ifname, "interface name");
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.c
|
|
@@ -0,0 +1,1527 @@
|
|
+#include <linux/module.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/pci.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/skbuff.h>
|
|
+#include <linux/if_vlan.h>
|
|
+#include <linux/if_ether.h>
|
|
+#include <linux/fs.h>
|
|
+#include <asm/uaccess.h>
|
|
+#include <asm/rt2880/surfboardint.h>
|
|
+#if defined(CONFIG_RAETH_TSO)
|
|
+#include <linux/tcp.h>
|
|
+#include <net/ipv6.h>
|
|
+#include <linux/ip.h>
|
|
+#include <net/ip.h>
|
|
+#include <net/tcp.h>
|
|
+#include <linux/in.h>
|
|
+#include <linux/ppp_defs.h>
|
|
+#include <linux/if_pppox.h>
|
|
+#endif
|
|
+#if defined(CONFIG_RAETH_LRO)
|
|
+#include <linux/inet_lro.h>
|
|
+#endif
|
|
+#include <linux/delay.h>
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
|
|
+#include <linux/sched.h>
|
|
+#endif
|
|
+
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 0)
|
|
+#include <asm/rt2880/rt_mmap.h>
|
|
+#else
|
|
+#include <linux/libata-compat.h>
|
|
+#endif
|
|
+
|
|
+#include "../ra2882ethreg.h"
|
|
+#include "../raether.h"
|
|
+#include "../ra_mac.h"
|
|
+#include "../ra_ioctl.h"
|
|
+#include "../ra_rfrw.h"
|
|
+#ifdef CONFIG_RAETH_NETLINK
|
|
+#include "../ra_netlink.h"
|
|
+#endif
|
|
+#if defined(CONFIG_RAETH_QOS)
|
|
+#include "../ra_qos.h"
|
|
+#endif
|
|
+#include "raether_pdma_dvt.h"
|
|
+
|
|
+/* Global variables */
|
|
+static unsigned int g_pdma_dvt_show_config;
|
|
+static unsigned int g_pdma_dvt_rx_test_config;
|
|
+static unsigned int g_pdma_dvt_tx_test_config;
|
|
+static unsigned int g_pdma_dvt_debug_test_config;
|
|
+static unsigned int g_pdma_dvt_lro_test_config;
|
|
+
|
|
+unsigned int g_pdma_dev_lanport = 0;
|
|
+unsigned int g_pdma_dev_wanport = 0;
|
|
+
|
|
+void skb_dump(struct sk_buff *sk)
|
|
+{
|
|
+ unsigned int i;
|
|
+
|
|
+ printk("skb_dump: from %s with len %d (%d) headroom=%d tailroom=%d\n",
|
|
+ sk->dev ? sk->dev->name : "ip stack", sk->len, sk->truesize,
|
|
+ skb_headroom(sk), skb_tailroom(sk));
|
|
+
|
|
+ /* for(i=(unsigned int)sk->head;i<=(unsigned int)sk->tail;i++) { */
|
|
+ /* for(i=(unsigned int)sk->head;i<=(unsigned int)sk->data+20;i++) { */
|
|
+ for (i = (unsigned int)sk->head; i <= (unsigned int)sk->data + 60; i++) {
|
|
+ if ((i % 20) == 0)
|
|
+ printk("\n");
|
|
+ if (i == (unsigned int)sk->data)
|
|
+ printk("{");
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 21)
|
|
+ if (i == (unsigned int)sk->transport_header)
|
|
+ printk("#");
|
|
+ if (i == (unsigned int)sk->network_header)
|
|
+ printk("|");
|
|
+ if (i == (unsigned int)sk->mac_header)
|
|
+ printk("*");
|
|
+#else
|
|
+ if (i == (unsigned int)sk->h.raw)
|
|
+ printk("#");
|
|
+ if (i == (unsigned int)sk->nh.raw)
|
|
+ printk("|");
|
|
+ if (i == (unsigned int)sk->mac.raw)
|
|
+ printk("*");
|
|
+#endif
|
|
+ printk("%02X-", *((unsigned char *)i));
|
|
+ if (i == (unsigned int)sk->tail)
|
|
+ printk("}");
|
|
+ }
|
|
+ printk("\n");
|
|
+}
|
|
+
|
|
+#if defined(CONFIG_RAETH_HW_LRO)
|
|
+/* PDMA LRO test functions start */
|
|
+int pdma_lro_disable_dvt(void)
|
|
+{
|
|
+ unsigned int regVal = 0;
|
|
+
|
|
+ printk("pdma_lro_disable_dvt()\n");
|
|
+
|
|
+ /* 1. Invalid LRO ring1~3 */
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 0);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 0);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 0);
|
|
+
|
|
+ /* 2 Polling relinguish */
|
|
+ while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
|
|
+ }
|
|
+
|
|
+ /* 3. Disable LRO */
|
|
+ regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
|
|
+ regVal &= ~(PDMA_LRO_EN);
|
|
+ sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
|
|
+
|
|
+#if 0
|
|
+ /* 4. Disable non-lro multiple rx */
|
|
+ SET_PDMA_NON_LRO_MULTI_EN(0);
|
|
+
|
|
+ /* 5.1. Set GDM1 to ring0 */
|
|
+ SET_GDM_PID1_RXID_SEL(0);
|
|
+ /* 5.2. Set GDM2 to ring0 */
|
|
+ SET_GDM_PID2_RXID_SEL(0);
|
|
+#endif
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int pdma_lro_force_aggre_dvt(void)
|
|
+{
|
|
+ unsigned int regVal = 0;
|
|
+ unsigned int ip;
|
|
+
|
|
+ printk("pdma_lro_force_aggre_dvt()\n");
|
|
+
|
|
+/* pdma rx ring1 */
|
|
+ /* 1. Set RX ring mode to force port */
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_FORCE_PORT);
|
|
+
|
|
+ /* 2. Configure lro ring */
|
|
+ /* 2.1 set src/destination TCP ports */
|
|
+ SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING1, 3423);
|
|
+ SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING1, 2301);
|
|
+ /* 2.2 set src/destination IPs */
|
|
+ str_to_ip(&ip, "10.10.10.3");
|
|
+ sysRegWrite(LRO_RX_RING1_SIP_DW0, ip);
|
|
+ str_to_ip(&ip, "10.10.10.100");
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
|
|
+ SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
|
|
+
|
|
+ /* 2.3 Valid LRO ring */
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
|
|
+
|
|
+ /* 2.4 Set AGE timer */
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, 0);
|
|
+
|
|
+ /* 2.5 Set max AGG timer */
|
|
+ SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, 0);
|
|
+
|
|
+ /* 2.6 Set max LRO agg count */
|
|
+ SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, HW_LRO_MAX_AGG_CNT);
|
|
+
|
|
+ /* 3. IPv4 checksum update enable */
|
|
+ SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
|
|
+
|
|
+ /* 4. Polling relinguish */
|
|
+ while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
|
|
+ }
|
|
+
|
|
+ /* 5. Enable LRO */
|
|
+ regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
|
|
+ regVal |= PDMA_LRO_EN;
|
|
+ sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int pdma_lro_auto_aggre_dvt(void)
|
|
+{
|
|
+ unsigned int regVal = 0;
|
|
+ unsigned int ip;
|
|
+
|
|
+ printk("pdma_lro_auto_aggre_dvt()\n");
|
|
+
|
|
+ /* 1.1 Set my IP_1 */
|
|
+ str_to_ip(&ip, "10.10.10.254");
|
|
+ sysRegWrite(LRO_RX_RING0_DIP_DW0, ip);
|
|
+ sysRegWrite(LRO_RX_RING0_DIP_DW1, 0);
|
|
+ sysRegWrite(LRO_RX_RING0_DIP_DW2, 0);
|
|
+ sysRegWrite(LRO_RX_RING0_DIP_DW3, 0);
|
|
+ SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING0, 1);
|
|
+
|
|
+ /* 1.2 Set my IP_2 */
|
|
+ str_to_ip(&ip, "10.10.20.254");
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW1, 0);
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW2, 0);
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW3, 0);
|
|
+ SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
|
|
+
|
|
+ /* 1.3 Set my IP_3 */
|
|
+ sysRegWrite(LRO_RX_RING2_DIP_DW3, 0x20010238);
|
|
+ sysRegWrite(LRO_RX_RING2_DIP_DW2, 0x08000000);
|
|
+ sysRegWrite(LRO_RX_RING2_DIP_DW1, 0x00000000);
|
|
+ sysRegWrite(LRO_RX_RING2_DIP_DW0, 0x00000254);
|
|
+ SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING2, 1);
|
|
+
|
|
+ /* 1.4 Set my IP_4 */
|
|
+ sysRegWrite(LRO_RX_RING3_DIP_DW3, 0x20010238);
|
|
+ sysRegWrite(LRO_RX_RING3_DIP_DW2, 0x08010000);
|
|
+ sysRegWrite(LRO_RX_RING3_DIP_DW1, 0x00000000);
|
|
+ sysRegWrite(LRO_RX_RING3_DIP_DW0, 0x00000254);
|
|
+ SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING3, 1);
|
|
+
|
|
+ /* 2.1 Set RX ring1~3 to auto-learn modes */
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
|
|
+
|
|
+ /* 2.2 Valid LRO ring */
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
|
|
+
|
|
+ /* 2.3 Set AGE timer */
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, 0);
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, 0);
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, 0);
|
|
+
|
|
+ /* 2.4 Set max AGG timer */
|
|
+ SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, 0);
|
|
+ SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING2, 0);
|
|
+ SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING3, 0);
|
|
+
|
|
+ /* 2.5 Set max LRO agg count */
|
|
+ SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, HW_LRO_MAX_AGG_CNT);
|
|
+ SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING2, HW_LRO_MAX_AGG_CNT);
|
|
+ SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING3, HW_LRO_MAX_AGG_CNT);
|
|
+
|
|
+ /* 3.0 IPv6 LRO enable */
|
|
+ SET_PDMA_LRO_IPV6_EN(1);
|
|
+
|
|
+ /* 3.1 IPv4 checksum update disable */
|
|
+ SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
|
|
+
|
|
+ /* 3.2 switch priority comparision to byte count mode */
|
|
+ SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE);
|
|
+
|
|
+ /* 3.3 bandwidth threshold setting */
|
|
+ SET_PDMA_LRO_BW_THRESHOLD(0);
|
|
+
|
|
+ /* 3.4 auto-learn score delta setting */
|
|
+ sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
|
|
+
|
|
+ /* 3.5 Set ALT timer to 20us: (unit: 20us) */
|
|
+ SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(HW_LRO_TIMER_UNIT);
|
|
+ /* 3.6 Set ALT refresh timer to 1 sec. (unit: 20us) */
|
|
+ SET_PDMA_LRO_ALT_REFRESH_TIMER(HW_LRO_REFRESH_TIME);
|
|
+
|
|
+ /* 4. Polling relinguish */
|
|
+ while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
|
|
+ }
|
|
+
|
|
+ /* 5. Enable LRO */
|
|
+ regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
|
|
+ regVal |= PDMA_LRO_EN;
|
|
+ sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int pdma_lro_auto_ipv6_dvt(void)
|
|
+{
|
|
+ unsigned int regVal = 0;
|
|
+
|
|
+ printk("pdma_lro_auto_ipv6_dvt()\n");
|
|
+
|
|
+ /* 1. Set my IP */
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW3, 0x20010238);
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW2, 0x08000000);
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW1, 0x00000000);
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW0, 0x00000254);
|
|
+
|
|
+ /* 2.1 Set RX ring1~3 to auto-learn modes */
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
|
|
+
|
|
+ /* 2.2 Valid LRO ring */
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
|
|
+
|
|
+ /* 2.3 Set AGE timer */
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
|
|
+
|
|
+ /* 3.0 IPv6 LRO enable */
|
|
+ SET_PDMA_LRO_IPV6_EN(1);
|
|
+
|
|
+ /* 3.1 IPv4 checksum update disable */
|
|
+ SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
|
|
+
|
|
+ /* 3.2 switch priority comparision to byte count mode */
|
|
+ SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE);
|
|
+
|
|
+ /* 3.3 bandwidth threshold setting */
|
|
+ SET_PDMA_LRO_BW_THRESHOLD(0);
|
|
+
|
|
+ /* 3.4 auto-learn score delta setting */
|
|
+ sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
|
|
+
|
|
+ /* 3.5 Set ALT timer to 500us: (unit: 20us) */
|
|
+ SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(25);
|
|
+ /* 3.6 Set ALT refresh timer to 1 sec. (unit: 500us) */
|
|
+ SET_PDMA_LRO_ALT_REFRESH_TIMER(2000);
|
|
+
|
|
+ /* 3.7 Set max AGG timer: 10 msec. */
|
|
+ SET_PDMA_LRO_MAX_AGG_TIME(HW_LRO_AGG_TIME);
|
|
+
|
|
+ /* 4. Polling relinguish */
|
|
+ while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
|
|
+ }
|
|
+
|
|
+ /* 5. Enable LRO */
|
|
+ regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
|
|
+ regVal |= PDMA_LRO_EN;
|
|
+ sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int pdma_lro_auto_myIP_dvt(void)
|
|
+{
|
|
+ unsigned int regVal = 0;
|
|
+ unsigned int ip;
|
|
+
|
|
+ printk("pdma_lro_auto_myIP_dvt()\n");
|
|
+
|
|
+ /* 1.1 Set my IP_1 */
|
|
+ str_to_ip(&ip, "10.10.10.254");
|
|
+ sysRegWrite(LRO_RX_RING0_DIP_DW0, ip);
|
|
+ sysRegWrite(LRO_RX_RING0_DIP_DW1, 0);
|
|
+ sysRegWrite(LRO_RX_RING0_DIP_DW2, 0);
|
|
+ sysRegWrite(LRO_RX_RING0_DIP_DW3, 0);
|
|
+ SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING0, 1);
|
|
+ /* 1.2 Set my IP_2 */
|
|
+ str_to_ip(&ip, "10.10.20.254");
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW1, 0);
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW2, 0);
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW3, 0);
|
|
+ SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
|
|
+ /* 1.3 Set my IP_3 */
|
|
+ sysRegWrite(LRO_RX_RING2_DIP_DW3, 0x20010238);
|
|
+ sysRegWrite(LRO_RX_RING2_DIP_DW2, 0x08000000);
|
|
+ sysRegWrite(LRO_RX_RING2_DIP_DW1, 0x00000000);
|
|
+ sysRegWrite(LRO_RX_RING2_DIP_DW0, 0x00000254);
|
|
+ SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING2, 1);
|
|
+ /* 1.4 Set my IP_4 */
|
|
+ sysRegWrite(LRO_RX_RING3_DIP_DW3, 0x20010238);
|
|
+ sysRegWrite(LRO_RX_RING3_DIP_DW2, 0x08010000);
|
|
+ sysRegWrite(LRO_RX_RING3_DIP_DW1, 0x00000000);
|
|
+ sysRegWrite(LRO_RX_RING3_DIP_DW0, 0x00000254);
|
|
+ SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING3, 1);
|
|
+
|
|
+ /* 2.1 Set RX ring1~3 to auto-learn modes */
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
|
|
+
|
|
+ /* 2.2 Valid LRO ring */
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
|
|
+
|
|
+ /* 2.3 Set AGE timer */
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
|
|
+
|
|
+ /* 3.0 IPv6 LRO enable */
|
|
+ SET_PDMA_LRO_IPV6_EN(1);
|
|
+
|
|
+ /* 3.1 IPv4 checksum update disable */
|
|
+ SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
|
|
+
|
|
+ /* 3.2 switch priority comparision to byte count mode */
|
|
+ SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE);
|
|
+
|
|
+ /* 3.3 bandwidth threshold setting */
|
|
+ SET_PDMA_LRO_BW_THRESHOLD(0);
|
|
+
|
|
+ /* 3.4 auto-learn score delta setting */
|
|
+ sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
|
|
+
|
|
+ /* 3.5 Set ALT timer to 500us: (unit: 20us) */
|
|
+ SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(25);
|
|
+ /* 3.6 Set ALT refresh timer to 1 sec. (unit: 500us) */
|
|
+ SET_PDMA_LRO_ALT_REFRESH_TIMER(2000);
|
|
+
|
|
+ /* 3.7 Set max AGG timer: 10 msec. */
|
|
+ SET_PDMA_LRO_MAX_AGG_TIME(HW_LRO_AGG_TIME);
|
|
+
|
|
+ /* 4. Polling relinguish */
|
|
+ while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
|
|
+ }
|
|
+
|
|
+ /* 5. Enable LRO */
|
|
+ regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
|
|
+ regVal |= PDMA_LRO_EN;
|
|
+ sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int pdma_lro_dly_int_dvt(int index)
|
|
+{
|
|
+ unsigned int regVal = 0;
|
|
+ unsigned int ip;
|
|
+
|
|
+ printk("pdma_lro_dly_int_dvt(%d)\n", index);
|
|
+
|
|
+#if 0
|
|
+ /* 1.1 Set my IP_1 */
|
|
+ /* str_to_ip( &ip, "10.10.10.254" ); */
|
|
+ str_to_ip(&ip, "10.10.10.100");
|
|
+ sysRegWrite(LRO_RX_RING0_DIP_DW0, ip);
|
|
+ sysRegWrite(LRO_RX_RING0_DIP_DW1, 0);
|
|
+ sysRegWrite(LRO_RX_RING0_DIP_DW2, 0);
|
|
+ sysRegWrite(LRO_RX_RING0_DIP_DW3, 0);
|
|
+#else
|
|
+ /* 1.1 set src/destination TCP ports */
|
|
+ SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING1, 3423);
|
|
+ SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING1, 2301);
|
|
+ SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING2, 3423);
|
|
+ SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING2, 2301);
|
|
+ SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING3, 3423);
|
|
+ SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING3, 2301);
|
|
+ /* 1.2 set src/destination IPs */
|
|
+ str_to_ip(&ip, "10.10.10.3");
|
|
+ sysRegWrite(LRO_RX_RING1_SIP_DW0, ip);
|
|
+ str_to_ip(&ip, "10.10.10.100");
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
|
|
+ str_to_ip(&ip, "10.10.10.3");
|
|
+ sysRegWrite(LRO_RX_RING2_SIP_DW0, ip);
|
|
+ str_to_ip(&ip, "10.10.10.100");
|
|
+ sysRegWrite(LRO_RX_RING2_DIP_DW0, ip);
|
|
+ str_to_ip(&ip, "10.10.10.3");
|
|
+ sysRegWrite(LRO_RX_RING3_SIP_DW0, ip);
|
|
+ str_to_ip(&ip, "10.10.10.100");
|
|
+ sysRegWrite(LRO_RX_RING3_DIP_DW0, ip);
|
|
+ SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
|
|
+ SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING2, 1);
|
|
+ SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING3, 1);
|
|
+#endif
|
|
+
|
|
+ if (index == 0) {
|
|
+ /* 1.2 Disable DLY_INT for lro ring */
|
|
+ SET_PDMA_LRO_DLY_INT_EN(0);
|
|
+ } else {
|
|
+ /* 1.2 Enable DLY_INT for lro ring */
|
|
+ SET_PDMA_LRO_DLY_INT_EN(1);
|
|
+ }
|
|
+
|
|
+ /* 1.3 LRO ring DLY_INT setting */
|
|
+ if (index == 1) {
|
|
+ sysRegWrite(LRO_RX1_DLY_INT, DELAY_INT_INIT);
|
|
+ } else if (index == 2) {
|
|
+ sysRegWrite(LRO_RX2_DLY_INT, DELAY_INT_INIT);
|
|
+ } else if (index == 3) {
|
|
+ sysRegWrite(LRO_RX3_DLY_INT, DELAY_INT_INIT);
|
|
+ }
|
|
+#if 0
|
|
+ /* 2.1 Set RX rings to auto-learn modes */
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
|
|
+#else
|
|
+ /* 2.0 set rx ring mode */
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_FORCE_PORT);
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_FORCE_PORT);
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_FORCE_PORT);
|
|
+
|
|
+ /* 2.1 IPv4 force port mode */
|
|
+ SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING1, 1);
|
|
+ SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING2, 1);
|
|
+ SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING3, 1);
|
|
+#endif
|
|
+
|
|
+ /* 2.2 Valid LRO ring */
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
|
|
+ if ((index == 0) || (index == 1)) {
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 0);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 0);
|
|
+ } else if (index == 2) {
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 0);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 0);
|
|
+ } else {
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 0);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 0);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
|
|
+ }
|
|
+
|
|
+ /* 2.3 Set AGE timer */
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
|
|
+
|
|
+ /* 3.1 IPv4 checksum update enable */
|
|
+ SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
|
|
+
|
|
+ /* 3.2 switch priority comparision to byte count mode */
|
|
+ SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE);
|
|
+
|
|
+ /* 3.3 bandwidth threshold setting */
|
|
+ SET_PDMA_LRO_BW_THRESHOLD(0);
|
|
+
|
|
+ /* 3.4 auto-learn score delta setting */
|
|
+ sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
|
|
+
|
|
+ /* 3.5 Set ALT timer to 500us: (unit: 20us) */
|
|
+ SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(25);
|
|
+ /* 3.6 Set ALT refresh timer to 1 sec. (unit: 500us) */
|
|
+ SET_PDMA_LRO_ALT_REFRESH_TIMER(2000);
|
|
+
|
|
+ /* 3.7 Set max AGG timer */
|
|
+ SET_PDMA_LRO_MAX_AGG_TIME(HW_LRO_AGG_TIME);
|
|
+
|
|
+ /* 4. Polling relinguish */
|
|
+ while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH) {;
|
|
+ }
|
|
+
|
|
+ /* 5. Enable LRO */
|
|
+ regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
|
|
+ regVal |= PDMA_LRO_EN;
|
|
+ sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int pdma_lro_dly_int0_dvt(void)
|
|
+{
|
|
+ return pdma_lro_dly_int_dvt(0);
|
|
+}
|
|
+
|
|
+int pdma_lro_dly_int1_dvt(void)
|
|
+{
|
|
+ return pdma_lro_dly_int_dvt(1);
|
|
+}
|
|
+
|
|
+int pdma_lro_dly_int2_dvt(void)
|
|
+{
|
|
+ return pdma_lro_dly_int_dvt(2);
|
|
+}
|
|
+
|
|
+int pdma_lro_dly_int3_dvt(void)
|
|
+{
|
|
+ return pdma_lro_dly_int_dvt(3);
|
|
+}
|
|
+
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+
|
|
+#if defined(CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+int pdma_gdm_rxid_config(void)
|
|
+{
|
|
+ unsigned int regVal = 0;
|
|
+
|
|
+ printk("pdma_gdm_rxid_config()\n");
|
|
+
|
|
+ /* 1. Set RX ring1~3 to pse modes */
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_PSE_MODE);
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_PSE_MODE);
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_PSE_MODE);
|
|
+
|
|
+ /* 2. Enable non-lro multiple rx */
|
|
+ SET_PDMA_NON_LRO_MULTI_EN(1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int pdma_non_lro_portid_dvt(void)
|
|
+{
|
|
+ unsigned int regVal = 0;
|
|
+
|
|
+ printk("pdma_non_lro_portid_dvt()\n");
|
|
+
|
|
+ /* 1. Set GDM1 to ring3 */
|
|
+ SET_GDM_PID1_RXID_SEL(3);
|
|
+#if 0
|
|
+ /* 2. Set GDM2 to ring1 */
|
|
+ SET_GDM_PID2_RXID_SEL(1);
|
|
+#endif
|
|
+
|
|
+ /* 3. Set priority rule: pid */
|
|
+ SET_GDM_RXID_PRI_SEL(GDM_PRI_PID);
|
|
+
|
|
+ /* PDMA multi-rx enable */
|
|
+ pdma_gdm_rxid_config();
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int pdma_non_lro_stag_dvt(void)
|
|
+{
|
|
+ unsigned int regVal = 0;
|
|
+
|
|
+ printk("pdma_non_lro_stag_dvt()\n");
|
|
+
|
|
+ /* 1. Set STAG4 to ring0 */
|
|
+ GDM_STAG_RXID_SEL(4, 0);
|
|
+ /* 2. Set STAG3 to ring1 */
|
|
+ GDM_STAG_RXID_SEL(3, 1);
|
|
+ /* 3. Set STAG2 to ring2 */
|
|
+ GDM_STAG_RXID_SEL(2, 2);
|
|
+ /* 4. Set STAG1 to ring3 */
|
|
+ GDM_STAG_RXID_SEL(1, 3);
|
|
+
|
|
+ /* 5. Set priority rule: stag/pid */
|
|
+ SET_GDM_RXID_PRI_SEL(GDM_PRI_PID);
|
|
+
|
|
+ /* PDMA multi-rx enable */
|
|
+ pdma_gdm_rxid_config();
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int pdma_non_lro_vlan_dvt(void)
|
|
+{
|
|
+ unsigned int regVal = 0;
|
|
+
|
|
+ printk("pdma_non_lro_vlan_dvt()\n");
|
|
+
|
|
+ /* 1. Set vlan priority=3 to ring1 */
|
|
+ SET_GDM_VLAN_PRI_RXID_SEL(3, 1);
|
|
+ /* 2. Set vlan priority=2 to ring2 */
|
|
+ SET_GDM_VLAN_PRI_RXID_SEL(2, 2);
|
|
+ /* 3. Set vlan priority=1 to ring3 */
|
|
+ SET_GDM_VLAN_PRI_RXID_SEL(1, 3);
|
|
+ /* 4. Set vlan priority=0 to ring3 */
|
|
+ SET_GDM_VLAN_PRI_RXID_SEL(0, 3);
|
|
+
|
|
+ /* 1. Set vlan priority=4 to ring1 */
|
|
+ SET_GDM_VLAN_PRI_RXID_SEL(4, 1);
|
|
+ /* 2. Set vlan priority=5 to ring2 */
|
|
+ SET_GDM_VLAN_PRI_RXID_SEL(5, 2);
|
|
+ /* 3. Set vlan priority=6 to ring3 */
|
|
+ SET_GDM_VLAN_PRI_RXID_SEL(6, 3);
|
|
+ /* 4. Set vlan priority=7 to ring3 */
|
|
+ SET_GDM_VLAN_PRI_RXID_SEL(7, 3);
|
|
+
|
|
+ /* 4. Set priority rule: vlan > pid */
|
|
+ SET_GDM_RXID_PRI_SEL(GDM_PRI_VLAN_PID);
|
|
+
|
|
+ /* PDMA multi-rx enable */
|
|
+ pdma_gdm_rxid_config();
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int pdma_non_lro_tcpack_dvt(void)
|
|
+{
|
|
+ unsigned int regVal = 0;
|
|
+
|
|
+ printk("pdma_non_lro_tcpack_dvt()\n");
|
|
+
|
|
+ /* 1. Enable TCP ACK with zero payload check */
|
|
+ SET_GDM_TCP_ACK_WZPC(1);
|
|
+ /* 2. Set TCP ACK to ring3 */
|
|
+ SET_GDM_TCP_ACK_RXID_SEL(3);
|
|
+
|
|
+ /* 3. Set priority rule: ack > pid */
|
|
+ SET_GDM_RXID_PRI_SEL(GDM_PRI_ACK_PID);
|
|
+
|
|
+ /* PDMA multi-rx enable */
|
|
+ pdma_gdm_rxid_config();
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int pdma_non_lro_pri1_dvt(void)
|
|
+{
|
|
+ unsigned int regVal = 0;
|
|
+
|
|
+ printk("pdma_non_lro_pri1_dvt()\n");
|
|
+
|
|
+ /* 1. Set GDM1 to ring0 */
|
|
+ SET_GDM_PID1_RXID_SEL(0);
|
|
+
|
|
+ /* 2.1 Disable TCP ACK with zero payload check */
|
|
+ SET_GDM_TCP_ACK_WZPC(0);
|
|
+ /* 2.2 Set TCP ACK to ring1 */
|
|
+ SET_GDM_TCP_ACK_RXID_SEL(1);
|
|
+
|
|
+ /* 3. Set vlan priority=1 to ring2 */
|
|
+ SET_GDM_VLAN_PRI_RXID_SEL(1, 2);
|
|
+
|
|
+ /* 4. Set priority rule: vlan > ack > pid */
|
|
+ SET_GDM_RXID_PRI_SEL(GDM_PRI_VLAN_ACK_PID);
|
|
+
|
|
+ /* PDMA multi-rx enable */
|
|
+ pdma_gdm_rxid_config();
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int pdma_non_lro_pri2_dvt(void)
|
|
+{
|
|
+ unsigned int regVal = 0;
|
|
+
|
|
+ printk("pdma_non_lro_pri2_dvt()\n");
|
|
+
|
|
+ /* 1. Set GDM1 to ring0 */
|
|
+ SET_GDM_PID1_RXID_SEL(0);
|
|
+
|
|
+ /* 2.1 Disable TCP ACK with zero payload check */
|
|
+ SET_GDM_TCP_ACK_WZPC(0);
|
|
+ /* 2.2 Set TCP ACK to ring1 */
|
|
+ SET_GDM_TCP_ACK_RXID_SEL(1);
|
|
+
|
|
+ /* 3. Set vlan priority=1 to ring2 */
|
|
+ SET_GDM_VLAN_PRI_RXID_SEL(1, 2);
|
|
+
|
|
+ /* 4. Set priority rule: ack > vlan > pid */
|
|
+ SET_GDM_RXID_PRI_SEL(GDM_PRI_ACK_VLAN_PID);
|
|
+
|
|
+ /* PDMA multi-rx enable */
|
|
+ pdma_gdm_rxid_config();
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#endif /* CONFIG_RAETH_MULTIPLE_RX_RING */
|
|
+const static PDMA_LRO_DVT_FUNC pdma_dvt_lro_func[] = {
|
|
+#if defined(CONFIG_RAETH_HW_LRO)
|
|
+ [0] = pdma_lro_disable_dvt, /* PDMA_TEST_LRO_DISABLE */
|
|
+ [1] = pdma_lro_force_aggre_dvt, /* PDMA_TEST_LRO_FORCE_PORT */
|
|
+ [2] = pdma_lro_auto_aggre_dvt, /* PDMA_TEST_LRO_AUTO_LEARN */
|
|
+ [3] = pdma_lro_auto_ipv6_dvt, /* PDMA_TEST_LRO_AUTO_IPV6 */
|
|
+ [4] = pdma_lro_auto_myIP_dvt, /* PDMA_TEST_LRO_AUTO_MYIP */
|
|
+ [5] = pdma_lro_force_aggre_dvt, /* PDMA_TEST_LRO_FORCE_AGGREGATE */
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+#if defined(CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+ [6] = pdma_non_lro_portid_dvt, /* PDMA_TEST_NON_LRO_PORT_ID */
|
|
+ [7] = pdma_non_lro_stag_dvt, /* PDMA_TEST_NON_LRO_STAG */
|
|
+ [8] = pdma_non_lro_vlan_dvt, /* PDMA_TEST_NON_LRO_VLAN */
|
|
+ [9] = pdma_non_lro_tcpack_dvt, /* PDMA_TEST_NON_LRO_TCP_ACK */
|
|
+ [10] = pdma_non_lro_pri1_dvt, /* PDMA_TEST_NON_LRO_PRI1 */
|
|
+ [11] = pdma_non_lro_pri2_dvt, /* PDMA_TEST_NON_LRO_PRI2 */
|
|
+#endif /* CONFIG_RAETH_MULTIPLE_RX_RING */
|
|
+#if defined(CONFIG_RAETH_HW_LRO)
|
|
+ [12] = pdma_lro_dly_int0_dvt, /* PDMA_TEST_LRO_DLY_INT0 */
|
|
+ [13] = pdma_lro_dly_int1_dvt, /* PDMA_TEST_LRO_DLY_INT1 */
|
|
+ [14] = pdma_lro_dly_int2_dvt, /* PDMA_TEST_LRO_DLY_INT2 */
|
|
+ [15] = pdma_lro_dly_int3_dvt, /* PDMA_TEST_LRO_DLY_INT3 */
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+};
|
|
+
|
|
+/* PDMA LRO test functions end */
|
|
+
|
|
+#if defined(CONFIG_RAETH_HW_LRO) || defined(CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+void raeth_pdma_lro_dvt(int rx_ring_no, END_DEVICE *ei_local,
|
|
+ int rx_dma_owner_idx0)
|
|
+{
|
|
+ if (pdma_dvt_get_show_config() & PDMA_SHOW_RX_DESC) {
|
|
+ if (rx_ring_no == 1) {
|
|
+ printk("------- rt2880_eth_recv (ring1) --------\n");
|
|
+ printk("rx_info1=0x%x\n",
|
|
+ *(unsigned int *)
|
|
+ &ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info1);
|
|
+ printk("rx_info2=0x%x\n",
|
|
+ *(unsigned int *)
|
|
+ &ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info2);
|
|
+ printk("rx_info3=0x%x\n",
|
|
+ *(unsigned int *)
|
|
+ &ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info3);
|
|
+ printk("rx_info4=0x%x\n",
|
|
+ *(unsigned int *)
|
|
+ &ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info4);
|
|
+ printk("-------------------------------\n");
|
|
+ } else if (rx_ring_no == 2) {
|
|
+ printk("------- rt2880_eth_recv (ring2) --------\n");
|
|
+ printk("rx_info1=0x%x\n",
|
|
+ *(unsigned int *)
|
|
+ &ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info1);
|
|
+ printk("rx_info2=0x%x\n",
|
|
+ *(unsigned int *)
|
|
+ &ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info2);
|
|
+ printk("rx_info3=0x%x\n",
|
|
+ *(unsigned int *)
|
|
+ &ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info3);
|
|
+ printk("rx_info4=0x%x\n",
|
|
+ *(unsigned int *)
|
|
+ &ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info4);
|
|
+ printk("-------------------------------\n");
|
|
+ } else if (rx_ring_no == 3) {
|
|
+ printk("------- rt2880_eth_recv (ring3) --------\n");
|
|
+ printk("rx_info1=0x%x\n",
|
|
+ *(unsigned int *)
|
|
+ &ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info1);
|
|
+ printk("rx_info2=0x%x\n",
|
|
+ *(unsigned int *)
|
|
+ &ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info2);
|
|
+ printk("rx_info3=0x%x\n",
|
|
+ *(unsigned int *)
|
|
+ &ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info3);
|
|
+ printk("rx_info4=0x%x\n",
|
|
+ *(unsigned int *)
|
|
+ &ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info4);
|
|
+ printk("-------------------------------\n");
|
|
+ }
|
|
+#if 0
|
|
+ else {
|
|
+ printk("------- rt2880_eth_recv (ring0) --------\n");
|
|
+ printk("rx_info1=0x%x\n",
|
|
+ *(unsigned int *)
|
|
+ &ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info1);
|
|
+ printk("rx_info2=0x%x\n",
|
|
+ *(unsigned int *)
|
|
+ &ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info2);
|
|
+ printk("rx_info3=0x%x\n",
|
|
+ *(unsigned int *)
|
|
+ &ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info3);
|
|
+ printk("rx_info4=0x%x\n",
|
|
+ *(unsigned int *)
|
|
+ &ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info4);
|
|
+ printk("-------------------------------\n");
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+ if ((pdma_dvt_get_show_config() & PDMA_SHOW_DETAIL_RX_DESC) ||
|
|
+ (pdma_dvt_get_lro_test_config()==PDMA_TEST_LRO_FORCE_PORT)) {
|
|
+ if (rx_ring_no == 1) {
|
|
+ printk("------- rt2880_eth_recv (ring1) --------\n");
|
|
+ printk("rx_info1.PDP0=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info1.PDP0);
|
|
+ printk("rx_info2.DDONE_bit=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info2.DDONE_bit);
|
|
+ printk("rx_info2.LS0=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info2.LS0);
|
|
+ printk("rx_info2.PLEN0=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info2.PLEN0);
|
|
+ printk("rx_info2.TAG=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info2.TAG);
|
|
+#if defined(CONFIG_ARCH_MT7623)
|
|
+ printk("rx_info2.LRO_AGG_CNT=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info2.LRO_AGG_CNT);
|
|
+ printk("rx_info2.REV=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info2.REV);
|
|
+#else
|
|
+ printk("rx_info2.LS1=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info2.LS1);
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+ printk("rx_info2.PLEN1=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info2.PLEN1);
|
|
+ printk("rx_info3.TPID=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info3.TPID);
|
|
+ printk("rx_info3.VID=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info3.VID);
|
|
+ printk("rx_info4.IP6=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info4.IP6);
|
|
+ printk("rx_info4.IP4=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info4.IP4);
|
|
+ printk("rx_info4.IP4F=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info4.IP4F);
|
|
+ printk("rx_info4.TACK=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info4.TACK);
|
|
+ printk("rx_info4.L4VLD=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info4.L4VLD);
|
|
+ printk("rx_info4.L4F=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info4.L4F);
|
|
+ printk("rx_info4.SPORT=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info4.SP);
|
|
+ printk("rx_info4.CRSN=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info4.CRSN);
|
|
+ printk("rx_info4.FOE_Entry=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info4.FOE_Entry);
|
|
+ printk("-------------------------------\n");
|
|
+ } else if (rx_ring_no == 2) {
|
|
+ printk("------- rt2880_eth_recv (ring2) --------\n");
|
|
+ printk("rx_info1.PDP0=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info1.PDP0);
|
|
+ printk("rx_info2.DDONE_bit=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info2.DDONE_bit);
|
|
+ printk("rx_info2.LS0=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info2.LS0);
|
|
+ printk("rx_info2.PLEN0=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info2.PLEN0);
|
|
+ printk("rx_info2.TAG=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info2.TAG);
|
|
+#if defined(CONFIG_ARCH_MT7623)
|
|
+ printk("rx_info2.LRO_AGG_CNT=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info2.LRO_AGG_CNT);
|
|
+ printk("rx_info2.REV=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info2.REV);
|
|
+#else
|
|
+ printk("rx_info2.LS1=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info2.LS1);
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+ printk("rx_info2.PLEN1=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info2.PLEN1);
|
|
+ printk("rx_info3.TPID=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info3.TPID);
|
|
+ printk("rx_info3.VID=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info3.VID);
|
|
+ printk("rx_info4.IP6=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info4.IP6);
|
|
+ printk("rx_info4.IP4=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info4.IP4);
|
|
+ printk("rx_info4.IP4F=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info4.IP4F);
|
|
+ printk("rx_info4.TACK=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info4.TACK);
|
|
+ printk("rx_info4.L4VLD=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info4.L4VLD);
|
|
+ printk("rx_info4.L4F=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info4.L4F);
|
|
+ printk("rx_info4.SPORT=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info4.SP);
|
|
+ printk("rx_info4.CRSN=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info4.CRSN);
|
|
+ printk("rx_info4.FOE_Entry=0x%x\n",
|
|
+ ei_local->rx_ring2[rx_dma_owner_idx0].
|
|
+ rxd_info4.FOE_Entry);
|
|
+ printk("-------------------------------\n");
|
|
+ } else if (rx_ring_no == 3) {
|
|
+ printk("------- rt2880_eth_recv (ring3) --------\n");
|
|
+ printk("rx_info1.PDP0=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info1.PDP0);
|
|
+ printk("rx_info2.DDONE_bit=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info2.DDONE_bit);
|
|
+ printk("rx_info2.LS0=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info2.LS0);
|
|
+ printk("rx_info2.PLEN0=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info2.PLEN0);
|
|
+ printk("rx_info2.TAG=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info2.TAG);
|
|
+#if defined(CONFIG_ARCH_MT7623)
|
|
+ printk("rx_info2.LRO_AGG_CNT=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info2.LRO_AGG_CNT);
|
|
+ printk("rx_info2.REV=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info2.REV);
|
|
+#else
|
|
+ printk("rx_info2.LS1=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info2.LS1);
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+ printk("rx_info2.PLEN1=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info2.PLEN1);
|
|
+ printk("rx_info3.TPID=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info3.TPID);
|
|
+ printk("rx_info3.VID=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info3.VID);
|
|
+ printk("rx_info4.IP6=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info4.IP6);
|
|
+ printk("rx_info4.IP4=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info4.IP4);
|
|
+ printk("rx_info4.IP4F=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info4.IP4F);
|
|
+ printk("rx_info4.TACK=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info4.TACK);
|
|
+ printk("rx_info4.L4VLD=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info4.L4VLD);
|
|
+ printk("rx_info4.L4F=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info4.L4F);
|
|
+ printk("rx_info4.SPORT=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info4.SP);
|
|
+ printk("rx_info4.CRSN=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info4.CRSN);
|
|
+ printk("rx_info4.FOE_Entry=0x%x\n",
|
|
+ ei_local->rx_ring3[rx_dma_owner_idx0].
|
|
+ rxd_info4.FOE_Entry);
|
|
+ printk("-------------------------------\n");
|
|
+ }
|
|
+#if 0
|
|
+ else {
|
|
+ printk("------- rt2880_eth_recv (ring0) --------\n");
|
|
+ printk("rx_info1.PDP0=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info1.PDP0);
|
|
+ printk("rx_info2.DDONE_bit=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info2.DDONE_bit);
|
|
+ printk("rx_info2.LS0=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info2.LS0);
|
|
+ printk("rx_info2.PLEN0=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info2.PLEN0);
|
|
+ printk("rx_info2.TAG=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info2.TAG);
|
|
+ printk("rx_info2.LS1=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info2.LS1);
|
|
+ printk("rx_info2.PLEN1=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info2.PLEN1);
|
|
+ printk("rx_info3.TPID=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info3.TPID);
|
|
+ printk("rx_info3.VID=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info3.VID);
|
|
+ printk("rx_info4.IP6=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info4.IP6);
|
|
+ printk("rx_info4.IP4=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info4.IP4);
|
|
+ printk("rx_info4.IP4F=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info4.IP4F);
|
|
+ printk("rx_info4.TACK=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info4.TACK);
|
|
+ printk("rx_info4.L4VLD=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info4.L4VLD);
|
|
+ printk("rx_info4.L4F=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info4.L4F);
|
|
+ printk("rx_info4.SPORT=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info4.SP);
|
|
+ printk("rx_info4.CRSN=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info4.CRSN);
|
|
+ printk("rx_info4.FOE_Entry=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info4.FOE_Entry);
|
|
+ printk("-------------------------------\n");
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+ if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_FORCE_AGGREGATE) {
|
|
+ if (rx_ring_no == 1) {
|
|
+ printk("PASS!!! => RING1: rxd_info1.PDP0=0x%x\n",
|
|
+ ei_local->rx_ring1[rx_dma_owner_idx0].
|
|
+ rxd_info1.PDP0);
|
|
+ skb_dump(ei_local->netrx1_skbuf[rx_dma_owner_idx0]);
|
|
+ pdma_dvt_reset_config();
|
|
+ }
|
|
+ }
|
|
+}
|
|
+#endif
|
|
+
|
|
+int pdma_dvt_show_ctrl(int par1, int par2)
|
|
+{
|
|
+ if (par2 == 0)
|
|
+ g_pdma_dvt_show_config = 0;
|
|
+ else
|
|
+ g_pdma_dvt_show_config |= (1 << par2);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int pdma_dvt_test_rx_ctrl(int par1, int par2)
|
|
+{
|
|
+ if (par2 == 0)
|
|
+ g_pdma_dvt_rx_test_config = 0;
|
|
+ else
|
|
+ g_pdma_dvt_rx_test_config |= (1 << par2);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int pdma_dvt_test_tx_ctrl(int par1, int par2)
|
|
+{
|
|
+ if (par2 == 0)
|
|
+ g_pdma_dvt_tx_test_config = 0;
|
|
+ else
|
|
+ g_pdma_dvt_tx_test_config |= (1 << par2);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int pdma_dvt_test_debug_ctrl(int par1, int par2)
|
|
+{
|
|
+ if (par2 == 0)
|
|
+ g_pdma_dvt_debug_test_config = 0;
|
|
+ else
|
|
+ g_pdma_dvt_debug_test_config |= (1 << par2);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int pdma_dvt_test_lro_ctrl(int par1, int par2)
|
|
+{
|
|
+ g_pdma_dvt_lro_test_config = par2;
|
|
+
|
|
+#if defined(CONFIG_RAETH_HW_LRO) || defined(CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+ if (pdma_dvt_lro_func[par2])
|
|
+ (*pdma_dvt_lro_func[par2]) ();
|
|
+#endif /* #if defined (CONFIG_RAETH_HW_LRO) */
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+unsigned int pdma_dvt_get_show_config()
|
|
+{
|
|
+ return g_pdma_dvt_show_config;
|
|
+}
|
|
+
|
|
+unsigned int pdma_dvt_get_rx_test_config()
|
|
+{
|
|
+ return g_pdma_dvt_rx_test_config;
|
|
+}
|
|
+
|
|
+unsigned int pdma_dvt_get_tx_test_config()
|
|
+{
|
|
+ return g_pdma_dvt_tx_test_config;
|
|
+}
|
|
+
|
|
+unsigned int pdma_dvt_get_debug_test_config()
|
|
+{
|
|
+ return g_pdma_dvt_debug_test_config;
|
|
+}
|
|
+
|
|
+unsigned int pdma_dvt_get_lro_test_config()
|
|
+{
|
|
+ return g_pdma_dvt_lro_test_config;
|
|
+}
|
|
+
|
|
+void pdma_dvt_reset_config()
|
|
+{
|
|
+ g_pdma_dvt_show_config = 0;
|
|
+ g_pdma_dvt_rx_test_config = 0;
|
|
+ g_pdma_dvt_tx_test_config = 0;
|
|
+ g_pdma_dvt_lro_test_config = 0;
|
|
+}
|
|
+
|
|
+void raeth_pdma_rx_desc_dvt(END_DEVICE *ei_local, int rx_dma_owner_idx0)
|
|
+{
|
|
+#if 0
|
|
+ unsigned int udf = 0;
|
|
+#endif
|
|
+
|
|
+ if (pdma_dvt_get_show_config() & PDMA_SHOW_RX_DESC) {
|
|
+ printk("------- rt2880_eth_recv --------\n");
|
|
+ printk("rx_info1=0x%x\n",
|
|
+ *(unsigned int *)&ei_local->
|
|
+ rx_ring0[rx_dma_owner_idx0].rxd_info1);
|
|
+ printk("rx_info2=0x%x\n",
|
|
+ *(unsigned int *)&ei_local->
|
|
+ rx_ring0[rx_dma_owner_idx0].rxd_info2);
|
|
+ printk("rx_info3=0x%x\n",
|
|
+ *(unsigned int *)&ei_local->
|
|
+ rx_ring0[rx_dma_owner_idx0].rxd_info3);
|
|
+ printk("rx_info4=0x%x\n",
|
|
+ *(unsigned int *)&ei_local->
|
|
+ rx_ring0[rx_dma_owner_idx0].rxd_info4);
|
|
+ printk("-------------------------------\n");
|
|
+ }
|
|
+ if ((pdma_dvt_get_show_config() & PDMA_SHOW_DETAIL_RX_DESC) ||
|
|
+ pdma_dvt_get_rx_test_config()) {
|
|
+#if 0
|
|
+ udf = ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP6 << 5 |
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4 << 4 |
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4F << 3 |
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.TACK << 2 |
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4VLD << 1 |
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4F;
|
|
+#endif
|
|
+ printk("------- rt2880_eth_recv --------\n");
|
|
+ printk("rx_info1.PDP0=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info1.PDP0);
|
|
+ printk("rx_info2.DDONE_bit=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info2.DDONE_bit);
|
|
+ printk("rx_info2.LS0=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.LS0);
|
|
+ printk("rx_info2.PLEN0=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.PLEN0);
|
|
+ printk("rx_info2.TAG=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.TAG);
|
|
+#if defined(CONFIG_ARCH_MT7623)
|
|
+ printk("rx_info2.LRO_AGG_CNT=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info2.LRO_AGG_CNT);
|
|
+#else
|
|
+ printk("rx_info2.LS1=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.LS1);
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+ printk("rx_info2.PLEN1=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.PLEN1);
|
|
+ printk("rx_info3.TPID=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info3.TPID);
|
|
+ printk("rx_info3.VID=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info3.VID);
|
|
+#if 0
|
|
+ printk("rx_info4.UDF=0x%x\n", udf);
|
|
+#endif
|
|
+ printk("rx_info4.IP6=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP6);
|
|
+ printk("rx_info4.IP4=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4);
|
|
+ printk("rx_info4.IP4F=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4F);
|
|
+ printk("rx_info4.TACK=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.TACK);
|
|
+ printk("rx_info4.L4VLD=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4VLD);
|
|
+ printk("rx_info4.L4F=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4F);
|
|
+ printk("rx_info4.SPORT=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.SP);
|
|
+ printk("rx_info4.CRSN=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.CRSN);
|
|
+ printk("rx_info4.FOE_Entry=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info4.FOE_Entry);
|
|
+ printk("-------------------------------\n");
|
|
+ }
|
|
+ if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_IPV6)) {
|
|
+ if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP6) {
|
|
+ printk("PASS!!! => rx_info4.IP6=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info4.IP6);
|
|
+ pdma_dvt_reset_config();
|
|
+ }
|
|
+ } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_IPV4)) {
|
|
+ if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4) {
|
|
+ printk("PASS!!! => rx_info4.IP4=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info4.IP4);
|
|
+ pdma_dvt_reset_config();
|
|
+ }
|
|
+ } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_IPV4F)) {
|
|
+ if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.IP4F) {
|
|
+ printk("PASS!!! => rx_info4.IP4F=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info4.IP4F);
|
|
+ pdma_dvt_reset_config();
|
|
+ }
|
|
+ } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_L4VLD)) {
|
|
+ if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4VLD) {
|
|
+ printk("PASS!!! => rx_info4.L4VLD=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info4.L4VLD);
|
|
+ pdma_dvt_reset_config();
|
|
+ }
|
|
+ } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_L4F)) {
|
|
+ if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.L4F) {
|
|
+ printk("PASS!!! => rx_info4.L4F=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info4.L4F);
|
|
+ pdma_dvt_reset_config();
|
|
+ }
|
|
+ } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_SPORT)) {
|
|
+ if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.SP == 1) {
|
|
+ g_pdma_dev_lanport++;
|
|
+ } else if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info4.SP ==
|
|
+ 2) {
|
|
+ g_pdma_dev_wanport++;
|
|
+ }
|
|
+ if (g_pdma_dev_lanport && g_pdma_dev_wanport) {
|
|
+ printk
|
|
+ ("PASS!!! => g_pdma_dev_lanport=0x%x, g_pdma_dev_wanport=0x%x",
|
|
+ g_pdma_dev_lanport, g_pdma_dev_wanport);
|
|
+
|
|
+ g_pdma_dev_lanport = 0;
|
|
+ g_pdma_dev_wanport = 0;
|
|
+ pdma_dvt_reset_config();
|
|
+ }
|
|
+ } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_VID_OFF)) {
|
|
+ if (!ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info3.VID) {
|
|
+ printk("PASS!!! => rxd_info3.VID=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info3.VID);
|
|
+ pdma_dvt_reset_config();
|
|
+ }
|
|
+ } else if ((pdma_dvt_get_rx_test_config() & PDMA_TEST_RX_VID_ON)) {
|
|
+ printk("RX data: (PDP0=%x)\n",
|
|
+ (unsigned int)ei_local->
|
|
+ netrx0_skbuf[rx_dma_owner_idx0]->data);
|
|
+
|
|
+ skb_dump(ei_local->netrx0_skbuf[rx_dma_owner_idx0]);
|
|
+
|
|
+ if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info3.VID &&
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.TAG) {
|
|
+ printk("PASS!!! => rxd_info2.TAG=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info2.TAG);
|
|
+ printk("PASS!!! => rxd_info3.VID=0x%x\n",
|
|
+ ei_local->rx_ring0[rx_dma_owner_idx0].
|
|
+ rxd_info3.VID);
|
|
+ pdma_dvt_reset_config();
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+void raeth_pdma_tx_vlan_dvt(END_DEVICE *ei_local,
|
|
+ unsigned long tx_cpu_owner_idx0)
|
|
+{
|
|
+ if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_ON)) {
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | 0xE007; /* VLAN_TAG = 0x1E007 */
|
|
+ } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_ZERO)) {
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | 0x0000; /* VLAN_TAG = 0x10000 */
|
|
+ } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_MAX)) {
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | 0xFFFF; /* VLAN_TAG = 0x1FFFF */
|
|
+ }
|
|
+}
|
|
+
|
|
+void raeth_pdma_tx_desc_dvt(END_DEVICE *ei_local,
|
|
+ unsigned long tx_cpu_owner_idx0)
|
|
+{
|
|
+ if (PDMA_TEST_RX_UDF == pdma_dvt_get_rx_test_config()) {
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 4; /* PPE */
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.UDF = 0x2F;
|
|
+ }
|
|
+ if (pdma_dvt_get_show_config() & PDMA_SHOW_TX_DESC) {
|
|
+ printk("------- rt2880_eth_send --------\n");
|
|
+ printk("tx_info1=%x\n",
|
|
+ *(unsigned int *)&ei_local->
|
|
+ tx_ring0[tx_cpu_owner_idx0].txd_info1);
|
|
+ printk("tx_info2=%x\n",
|
|
+ *(unsigned int *)&ei_local->
|
|
+ tx_ring0[tx_cpu_owner_idx0].txd_info2);
|
|
+ printk("tx_info3=%x\n",
|
|
+ *(unsigned int *)&ei_local->
|
|
+ tx_ring0[tx_cpu_owner_idx0].txd_info3);
|
|
+ printk("tx_info4=%x\n",
|
|
+ *(unsigned int *)&ei_local->
|
|
+ tx_ring0[tx_cpu_owner_idx0].txd_info4);
|
|
+ printk("--------------------------------\n");
|
|
+ }
|
|
+ if ((pdma_dvt_get_show_config() & PDMA_SHOW_DETAIL_TX_DESC) ||
|
|
+ pdma_dvt_get_tx_test_config()) {
|
|
+ printk("------- rt2880_eth_send --------\n");
|
|
+ printk("tx_info1.SDP0=%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0);
|
|
+ printk("tx_info2.DDONE_bit=%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].
|
|
+ txd_info2.DDONE_bit);
|
|
+ printk("tx_info2.LS0_bit=%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS0_bit);
|
|
+ printk("tx_info2.SDL0=%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL0);
|
|
+ printk("tx_info2.BURST_bit=%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].
|
|
+ txd_info2.BURST_bit);
|
|
+ printk("tx_info2.LS1_bit=%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS1_bit);
|
|
+ printk("tx_info2.SDL1=%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL1);
|
|
+ printk("tx_info3.SDP1=%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info3.SDP1);
|
|
+ printk("tx_info4.TUI_CO=%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO);
|
|
+ printk("tx_info4.TSO=%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TSO);
|
|
+ printk("tx_info4.FPORT=%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT);
|
|
+ printk("tx_info4.UDF=%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.UDF);
|
|
+ printk("tx_info4.RESV=%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.RESV);
|
|
+ printk("tx_info4.VLAN_TAG=%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].
|
|
+ txd_info4.VLAN_TAG);
|
|
+ printk("--------------------------------\n");
|
|
+ }
|
|
+ if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_LAN_SPORT)) {
|
|
+ if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT == 1) {
|
|
+ printk("PASS!!! => txd_info4.FPORT=0x%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].
|
|
+ txd_info4.FPORT);
|
|
+ pdma_dvt_reset_config();
|
|
+ }
|
|
+ } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_WAN_SPORT)) {
|
|
+ if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT == 2) {
|
|
+ printk("PASS!!! => txd_info4.FPORT=0x%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].
|
|
+ txd_info4.FPORT);
|
|
+ pdma_dvt_reset_config();
|
|
+ }
|
|
+ } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_ON)) {
|
|
+ if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG) {
|
|
+ printk("PASS!!! => txd_info4.VLAN_TAG=0x%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].
|
|
+ txd_info4.VLAN_TAG);
|
|
+ /* pdma_dvt_reset_config(); */
|
|
+ }
|
|
+ } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_OFF)) {
|
|
+ if (!ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG) {
|
|
+ printk("PASS!!! => txd_info4.VLAN_TAG=0x%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].
|
|
+ txd_info4.VLAN_TAG);
|
|
+ pdma_dvt_reset_config();
|
|
+ }
|
|
+ } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_ZERO)) {
|
|
+ if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG) {
|
|
+ printk("PASS!!! => txd_info4.VLAN_TAG=0x%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].
|
|
+ txd_info4.VLAN_TAG);
|
|
+ /* pdma_dvt_reset_config(); */
|
|
+ }
|
|
+ } else if ((pdma_dvt_get_tx_test_config() & PDMA_TEST_TX_VLAN_MAX)) {
|
|
+ if (ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG) {
|
|
+ printk("PASS!!! => txd_info4.VLAN_TAG=0x%x\n",
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].
|
|
+ txd_info4.VLAN_TAG);
|
|
+ /* pdma_dvt_reset_config(); */
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+void raeth_pdma_lro_dly_int_dvt(void)
|
|
+{
|
|
+ unsigned int reg_int_val;
|
|
+
|
|
+ reg_int_val = sysRegRead(RAETH_FE_INT_STATUS);
|
|
+
|
|
+ if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_DLY_INT0) {
|
|
+ if ((reg_int_val & RX_DLY_INT)) {
|
|
+ printk("PASS!!! => reg_int_val=0x%x\n", reg_int_val);
|
|
+ pdma_dvt_reset_config();
|
|
+ }
|
|
+ } else if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_DLY_INT1) {
|
|
+ if ((reg_int_val & RING1_RX_DLY_INT)) {
|
|
+ printk("PASS!!! => reg_int_val=0x%x\n", reg_int_val);
|
|
+ pdma_dvt_reset_config();
|
|
+ }
|
|
+ } else if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_DLY_INT2) {
|
|
+ if ((reg_int_val & RING2_RX_DLY_INT)) {
|
|
+ printk("PASS!!! => reg_int_val=0x%x\n", reg_int_val);
|
|
+ pdma_dvt_reset_config();
|
|
+ }
|
|
+ } else if (pdma_dvt_get_lro_test_config() == PDMA_TEST_LRO_DLY_INT3) {
|
|
+ if ((reg_int_val & RING3_RX_DLY_INT)) {
|
|
+ printk("PASS!!! => reg_int_val=0x%x\n", reg_int_val);
|
|
+ pdma_dvt_reset_config();
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+void pdma_dvt_set_dma_mode(void)
|
|
+{
|
|
+#if defined(CONFIG_RAETH_PDMA_LEGACY_MODE)
|
|
+ unsigned int regVal;
|
|
+ regVal = sysRegRead(ADMA_LRO_CTRL_DW3);
|
|
+ regVal &= ~(BIT(15));
|
|
+ sysRegWrite(ADMA_LRO_CTRL_DW3, regVal);
|
|
+#endif /* CONFIG_RAETH_PDMA_DVT */
|
|
+}
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/dvt/raether_pdma_dvt.h
|
|
@@ -0,0 +1,75 @@
|
|
+/* Show controls */
|
|
+#define PDMA_SHOW_RX_DESC (1 << 1)
|
|
+#define PDMA_SHOW_TX_DESC (1 << 2)
|
|
+#define PDMA_SHOW_DETAIL_RX_DESC (1 << 3)
|
|
+#define PDMA_SHOW_DETAIL_TX_DESC (1 << 4)
|
|
+
|
|
+/* Rx test controls */
|
|
+#define PDMA_TEST_RX_UDF (1 << 1)
|
|
+#define PDMA_TEST_RX_IPV6 (1 << 2)
|
|
+#define PDMA_TEST_RX_IPV4 (1 << 3)
|
|
+#define PDMA_TEST_RX_IPV4F (1 << 4)
|
|
+#define PDMA_TEST_RX_L4VLD (1 << 5)
|
|
+#define PDMA_TEST_RX_L4F (1 << 6)
|
|
+#define PDMA_TEST_RX_SPORT (1 << 7)
|
|
+#define PDMA_TEST_RX_VID_ON (1 << 8)
|
|
+#define PDMA_TEST_RX_VID_OFF (1 << 9)
|
|
+
|
|
+/* Tx test controls */
|
|
+#define PDMA_TEST_TX_LAN_SPORT (1 << 1)
|
|
+#define PDMA_TEST_TX_WAN_SPORT (1 << 2)
|
|
+#define PDMA_TEST_TX_VLAN_ON (1 << 3)
|
|
+#define PDMA_TEST_TX_VLAN_OFF (1 << 4)
|
|
+#define PDMA_TEST_TX_VLAN_ZERO (1 << 5)
|
|
+#define PDMA_TEST_TX_VLAN_MAX (1 << 6)
|
|
+#define PDMA_TEST_TX_PDMA_LPK (1 << 31)
|
|
+
|
|
+/* Debug controls */
|
|
+#define PDMA_TEST_TSO_DEBUG (1 << 1)
|
|
+
|
|
+/* LRO test controls */
|
|
+typedef int (*PDMA_LRO_DVT_FUNC) (void);
|
|
+
|
|
+#define PDMA_TEST_LRO_DISABLE (0)
|
|
+#define PDMA_TEST_LRO_FORCE_PORT (1)
|
|
+#define PDMA_TEST_LRO_AUTO_LEARN (2)
|
|
+#define PDMA_TEST_LRO_AUTO_IPV6 (3)
|
|
+#define PDMA_TEST_LRO_AUTO_MYIP (4)
|
|
+#define PDMA_TEST_LRO_FORCE_AGGREGATE (5)
|
|
+#define PDMA_TEST_NON_LRO_PORT_ID (6)
|
|
+#define PDMA_TEST_NON_LRO_STAG (7)
|
|
+#define PDMA_TEST_NON_LRO_VLAN (8)
|
|
+#define PDMA_TEST_NON_LRO_TCP_ACK (9)
|
|
+#define PDMA_TEST_NON_LRO_PRI1 (10)
|
|
+#define PDMA_TEST_NON_LRO_PRI2 (11)
|
|
+#define PDMA_TEST_LRO_DLY_INT0 (12)
|
|
+#define PDMA_TEST_LRO_DLY_INT1 (13)
|
|
+#define PDMA_TEST_LRO_DLY_INT2 (14)
|
|
+#define PDMA_TEST_LRO_DLY_INT3 (15)
|
|
+
|
|
+void skb_dump(struct sk_buff *sk);
|
|
+
|
|
+int pdma_dvt_show_ctrl(int par1, int par2);
|
|
+int pdma_dvt_test_rx_ctrl(int par1, int par2);
|
|
+int pdma_dvt_test_tx_ctrl(int par1, int par2);
|
|
+int pdma_dvt_test_debug_ctrl(int par1, int par2);
|
|
+int pdma_dvt_test_lro_ctrl(int par1, int par2);
|
|
+
|
|
+unsigned int pdma_dvt_get_show_config(void);
|
|
+unsigned int pdma_dvt_get_rx_test_config(void);
|
|
+unsigned int pdma_dvt_get_tx_test_config(void);
|
|
+unsigned int pdma_dvt_get_debug_test_config(void);
|
|
+unsigned int pdma_dvt_get_lro_test_config(void);
|
|
+void pdma_dvt_reset_config(void);
|
|
+
|
|
+void raeth_pdma_rx_desc_dvt(END_DEVICE *ei_local, int rx_dma_owner_idx0);
|
|
+void raeth_pdma_tx_vlan_dvt(END_DEVICE *ei_local,
|
|
+ unsigned long tx_cpu_owner_idx0);
|
|
+void raeth_pdma_tx_desc_dvt(END_DEVICE *ei_local,
|
|
+ unsigned long tx_cpu_owner_idx0);
|
|
+
|
|
+void raeth_pdma_lro_dvt(int rx_ring_no, END_DEVICE *ei_local,
|
|
+ int rx_dma_owner_idx0);
|
|
+void raeth_pdma_lro_dly_int_dvt(void);
|
|
+void pdma_dvt_set_dma_mode(void);
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/ethtool_readme.txt
|
|
@@ -0,0 +1,44 @@
|
|
+
|
|
+Ethtool readme for selecting different PHY address.
|
|
+
|
|
+Before doing any ethtool command you should make sure the current PHY
|
|
+address is expected. The default PHY address is 1(port 1).
|
|
+
|
|
+You can change current PHY address to X(0~4) by doing follow command:
|
|
+# echo X > /proc/rt2880/gmac
|
|
+
|
|
+Ethtool command also would show the current PHY address as following.
|
|
+
|
|
+# ethtool eth2
|
|
+Settings for eth2:
|
|
+ Supported ports: [ TP MII ]
|
|
+ Supported link modes: 10baseT/Half 10baseT/Full
|
|
+ 100baseT/Half 100baseT/Full
|
|
+ Supports auto-negotiation: Yes
|
|
+ Advertised link modes: 10baseT/Half 10baseT/Full
|
|
+ 100baseT/Half 100baseT/Full
|
|
+ Advertised auto-negotiation: No
|
|
+ Speed: 10Mb/s
|
|
+ Duplex: Full
|
|
+ Port: MII
|
|
+ PHYAD: 1
|
|
+ Transceiver: internal
|
|
+ Auto-negotiation: off
|
|
+ Current message level: 0x00000000 (0)
|
|
+ Link detected: no
|
|
+
|
|
+
|
|
+The "PHYAD" field shows the current PHY address.
|
|
+
|
|
+
|
|
+
|
|
+Usage example
|
|
+1) show port1 info
|
|
+# echo 1 > /proc/rt2880/gmac # change phy address to 1
|
|
+# ethtool eth2
|
|
+
|
|
+2) show port0 info
|
|
+# echo 0 > /proc/rt2880/gmac # change phy address to 0
|
|
+# ethtool eth2
|
|
+
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/mcast.c
|
|
@@ -0,0 +1,187 @@
|
|
+#include <linux/config.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/skbuff.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/netdevice.h>
|
|
+#include <linux/if_vlan.h>
|
|
+
|
|
+
|
|
+#define MAX_MCAST_ENTRY 16
|
|
+#define AGEING_TIME 5 //Unit: Sec
|
|
+#define MAC_ARG(x) ((u8*)(x))[0],((u8*)(x))[1],((u8*)(x))[2], \
|
|
+ ((u8*)(x))[3],((u8*)(x))[4],((u8*)(x))[5]
|
|
+
|
|
+//#define MCAST_DEBUG
|
|
+#ifdef MCAST_DEBUG
|
|
+#define MCAST_PRINT(fmt, args...) printk(KERN_INFO fmt, ## args)
|
|
+#else
|
|
+#define MCAST_PRINT(fmt, args...) { }
|
|
+#endif
|
|
+
|
|
+typedef struct {
|
|
+ uint8_t src_mac[6];
|
|
+ uint8_t dst_mac[6];
|
|
+ uint16_t vlan_id;
|
|
+ uint32_t valid;
|
|
+ uint32_t use_count;
|
|
+ unsigned long ageout;
|
|
+} mcast_entry;
|
|
+
|
|
+mcast_entry mcast_tbl[MAX_MCAST_ENTRY];
|
|
+atomic_t mcast_entry_num=ATOMIC_INIT(0);
|
|
+DECLARE_MUTEX(mtbl_lock);
|
|
+
|
|
+uint32_t inline is_multicast_pkt(uint8_t *mac)
|
|
+{
|
|
+ if(mac[0]==0x01 && mac[1]==0x00 && mac[2]==0x5E) {
|
|
+ return 1;
|
|
+ }else{
|
|
+ return 0;
|
|
+ }
|
|
+}
|
|
+
|
|
+int32_t inline mcast_entry_get(uint16_t vlan_id, uint8_t *src_mac, uint8_t *dst_mac)
|
|
+{
|
|
+ int i=0;
|
|
+
|
|
+ for(i=0;i<MAX_MCAST_ENTRY;i++) {
|
|
+ if( (mcast_tbl[i].vlan_id == vlan_id) &&
|
|
+ memcmp(mcast_tbl[i].src_mac,src_mac, 6)==0 &&
|
|
+ memcmp(mcast_tbl[i].dst_mac, dst_mac, 6)==0 &&
|
|
+ mcast_tbl[i].valid == 1) {
|
|
+ return i;
|
|
+ }
|
|
+ }
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+int inline __add_mcast_entry(uint16_t vlan_id, uint8_t *src_mac, uint8_t *dst_mac)
|
|
+{
|
|
+ int i=0;
|
|
+
|
|
+ // use empty or ageout entry
|
|
+ for(i=0;i<MAX_MCAST_ENTRY;i++) {
|
|
+ if( mcast_tbl[i].valid==0 ||
|
|
+ time_after(jiffies, mcast_tbl[i].ageout)) {
|
|
+
|
|
+ if(mcast_tbl[i].valid==0) {
|
|
+ atomic_inc(&mcast_entry_num);
|
|
+ }
|
|
+ mcast_tbl[i].vlan_id = vlan_id;
|
|
+ memcpy(mcast_tbl[i].src_mac, src_mac, 6);
|
|
+ memcpy(mcast_tbl[i].dst_mac, dst_mac, 6);
|
|
+ mcast_tbl[i].valid=1;
|
|
+ mcast_tbl[i].use_count=1;
|
|
+ mcast_tbl[i].ageout=jiffies + AGEING_TIME * HZ;
|
|
+
|
|
+ return 1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ MCAST_PRINT("RAETH: Multicast Table is FULL!!\n");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int inline mcast_entry_ins(uint16_t vlan_id, uint8_t *src_mac, uint8_t *dst_mac)
|
|
+{
|
|
+ int entry_num=0, ret=0;
|
|
+
|
|
+ down(&mtbl_lock);
|
|
+ if((entry_num = mcast_entry_get(vlan_id, src_mac, dst_mac)) >=0) {
|
|
+ mcast_tbl[entry_num].use_count++;
|
|
+ mcast_tbl[entry_num].ageout=jiffies + AGEING_TIME * HZ;
|
|
+ MCAST_PRINT("%s: Update %0X:%0X:%0X:%0X:%0X:%0X's use_count=%d\n" \
|
|
+ ,__FUNCTION__, MAC_ARG(dst_mac), mcast_tbl[entry_num].use_count);
|
|
+ ret = 1;
|
|
+ }else { //if entry not found, create new entry.
|
|
+ MCAST_PRINT("%s: Create new entry %0X:%0X:%0X:%0X:%0X:%0X\n", \
|
|
+ __FUNCTION__, MAC_ARG(dst_mac));
|
|
+ ret = __add_mcast_entry(vlan_id, src_mac,dst_mac);
|
|
+ }
|
|
+
|
|
+ up(&mtbl_lock);
|
|
+ return ret;
|
|
+
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Return:
|
|
+ * 0: entry found
|
|
+ * 1: entry not found
|
|
+ */
|
|
+int inline mcast_entry_del(uint16_t vlan_id, uint8_t *src_mac, uint8_t *dst_mac)
|
|
+{
|
|
+ int entry_num;
|
|
+
|
|
+ down(&mtbl_lock);
|
|
+ if((entry_num = mcast_entry_get(vlan_id, src_mac, dst_mac)) >=0) {
|
|
+ if((--mcast_tbl[entry_num].use_count)==0) {
|
|
+ MCAST_PRINT("%s: %0X:%0X:%0X:%0X:%0X:%0X (entry_num=%d)\n", \
|
|
+ __FUNCTION__, MAC_ARG(dst_mac), entry_num);
|
|
+ mcast_tbl[entry_num].valid=0;
|
|
+ atomic_dec(&mcast_entry_num);
|
|
+ }
|
|
+ up(&mtbl_lock);
|
|
+ return 0;
|
|
+ }else {
|
|
+ /* this multicast packet was not sent by meself, just ignore it */
|
|
+ up(&mtbl_lock);
|
|
+ return 1;
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Return
|
|
+ * 0: drop packet
|
|
+ * 1: continue
|
|
+ */
|
|
+int32_t mcast_rx(struct sk_buff * skb)
|
|
+{
|
|
+ struct vlan_ethhdr *eth = (struct vlan_ethhdr *)(skb->data-ETH_HLEN);
|
|
+
|
|
+ /* if we do not send multicast packet before,
|
|
+ * we don't need to check re-inject multicast packet.
|
|
+ */
|
|
+ if (atomic_read(&mcast_entry_num)==0) {
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+
|
|
+ if(is_multicast_pkt(eth->h_dest)) {
|
|
+ MCAST_PRINT("%s: %0X:%0X:%0X:%0X:%0X:%0X\n", __FUNCTION__, \
|
|
+ MAC_ARG(eth->h_dest));
|
|
+
|
|
+ if(ntohs(eth->h_vlan_proto)==0x8100) {
|
|
+ return mcast_entry_del(eth->h_vlan_TCI, eth->h_source, eth->h_dest);
|
|
+ } else {
|
|
+ return mcast_entry_del(0, eth->h_source, eth->h_dest);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+
|
|
+int32_t mcast_tx(struct sk_buff *skb)
|
|
+{
|
|
+ struct vlan_ethhdr *eth = (struct vlan_ethhdr *)(skb->data);
|
|
+
|
|
+
|
|
+ if(is_multicast_pkt(eth->h_dest)) {
|
|
+ MCAST_PRINT("%s: %0X:%0X:%0X:%0X:%0X:%0X\n", __FUNCTION__,\
|
|
+ MAC_ARG(eth->h_dest));
|
|
+
|
|
+ if(ntohs(eth->h_vlan_proto)==0x8100) {
|
|
+ mcast_entry_ins(eth->h_vlan_TCI, eth->h_source, eth->h_dest);
|
|
+ } else {
|
|
+ mcast_entry_ins(0, eth->h_source, eth->h_dest);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/mii_mgr.c
|
|
@@ -0,0 +1,603 @@
|
|
+#include <linux/module.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/netdevice.h>
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/sched.h>
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
|
|
+#include <asm/rt2880/rt_mmap.h>
|
|
+#endif
|
|
+
|
|
+#include "ra2882ethreg.h"
|
|
+#include "raether.h"
|
|
+
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+#define PHY_CONTROL_0 0xC0
|
|
+#define PHY_CONTROL_1 0xC4
|
|
+#define MDIO_PHY_CONTROL_0 (RALINK_ETH_SW_BASE + PHY_CONTROL_0)
|
|
+#define MDIO_PHY_CONTROL_1 (RALINK_ETH_SW_BASE + PHY_CONTROL_1)
|
|
+
|
|
+#define GPIO_MDIO_BIT (1<<7)
|
|
+#define GPIO_PURPOSE_SELECT 0x60
|
|
+#define GPIO_PRUPOSE (RALINK_SYSCTL_BASE + GPIO_PURPOSE_SELECT)
|
|
+
|
|
+#elif defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A)
|
|
+
|
|
+#define PHY_CONTROL_0 0x7004
|
|
+#define MDIO_PHY_CONTROL_0 (RALINK_ETH_SW_BASE + PHY_CONTROL_0)
|
|
+#define enable_mdio(x)
|
|
+
|
|
+#elif defined (CONFIG_RALINK_MT7620)
|
|
+
|
|
+#define PHY_CONTROL_0 0x7004
|
|
+#define MDIO_PHY_CONTROL_0 (RALINK_ETH_SW_BASE + PHY_CONTROL_0)
|
|
+#define enable_mdio(x)
|
|
+
|
|
+#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+
|
|
+#define PHY_CONTROL_0 0x0004
|
|
+#define MDIO_PHY_CONTROL_0 (RALINK_ETH_SW_BASE + PHY_CONTROL_0)
|
|
+#define enable_mdio(x)
|
|
+
|
|
+#else
|
|
+#define PHY_CONTROL_0 0x00
|
|
+#define PHY_CONTROL_1 0x04
|
|
+#define MDIO_PHY_CONTROL_0 (RALINK_FRAME_ENGINE_BASE + PHY_CONTROL_0)
|
|
+#define MDIO_PHY_CONTROL_1 (RALINK_FRAME_ENGINE_BASE + PHY_CONTROL_1)
|
|
+#define enable_mdio(x)
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+void enable_mdio(int enable)
|
|
+{
|
|
+#if !defined (CONFIG_P5_MAC_TO_PHY_MODE) && !defined(CONFIG_GE1_RGMII_AN) && !defined(CONFIG_GE2_RGMII_AN) && \
|
|
+ !defined (CONFIG_GE1_MII_AN) && !defined (CONFIG_GE2_MII_AN) && !defined (CONFIG_RALINK_MT7628)
|
|
+ u32 data = sysRegRead(GPIO_PRUPOSE);
|
|
+ if (enable)
|
|
+ data &= ~GPIO_MDIO_BIT;
|
|
+ else
|
|
+ data |= GPIO_MDIO_BIT;
|
|
+ sysRegWrite(GPIO_PRUPOSE, data);
|
|
+#endif
|
|
+}
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A)
|
|
+
|
|
+u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
|
|
+{
|
|
+ u32 volatile status = 0;
|
|
+ u32 rc = 0;
|
|
+ unsigned long volatile t_start = jiffies;
|
|
+ u32 volatile data = 0;
|
|
+
|
|
+ /* We enable mdio gpio purpose register, and disable it when exit. */
|
|
+ enable_mdio(1);
|
|
+
|
|
+ // make sure previous read operation is complete
|
|
+ while (1) {
|
|
+ // 0 : Read/write operation complete
|
|
+ if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
|
|
+ {
|
|
+ break;
|
|
+ }
|
|
+ else if (time_after(jiffies, t_start + 5*HZ)) {
|
|
+ enable_mdio(0);
|
|
+ printk("\n MDIO Read operation is ongoing !!\n");
|
|
+ return rc;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ data = (0x01 << 16) | (0x02 << 18) | (phy_addr << 20) | (phy_register << 25);
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data);
|
|
+ data |= (1<<31);
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data);
|
|
+ //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
|
|
+
|
|
+
|
|
+ // make sure read operation is complete
|
|
+ t_start = jiffies;
|
|
+ while (1) {
|
|
+ if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
|
|
+ status = sysRegRead(MDIO_PHY_CONTROL_0);
|
|
+ *read_data = (u32)(status & 0x0000FFFF);
|
|
+
|
|
+ enable_mdio(0);
|
|
+ return 1;
|
|
+ }
|
|
+ else if (time_after(jiffies, t_start+5*HZ)) {
|
|
+ enable_mdio(0);
|
|
+ printk("\n MDIO Read operation is ongoing and Time Out!!\n");
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
|
|
+{
|
|
+ unsigned long volatile t_start=jiffies;
|
|
+ u32 volatile data;
|
|
+
|
|
+ enable_mdio(1);
|
|
+
|
|
+ // make sure previous write operation is complete
|
|
+ while(1) {
|
|
+ if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
|
|
+ {
|
|
+ break;
|
|
+ }
|
|
+ else if (time_after(jiffies, t_start + 5 * HZ)) {
|
|
+ enable_mdio(0);
|
|
+ printk("\n MDIO Write operation ongoing\n");
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ data = (0x01 << 16)| (1<<18) | (phy_addr << 20) | (phy_register << 25) | write_data;
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data);
|
|
+ data |= (1<<31);
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data); //start operation
|
|
+ //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
|
|
+
|
|
+ t_start = jiffies;
|
|
+
|
|
+ // make sure write operation is complete
|
|
+ while (1) {
|
|
+ if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) //0 : Read/write operation complete
|
|
+ {
|
|
+ enable_mdio(0);
|
|
+ return 1;
|
|
+ }
|
|
+ else if (time_after(jiffies, t_start + 5 * HZ)) {
|
|
+ enable_mdio(0);
|
|
+ printk("\n MDIO Write operation Time Out\n");
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_ARCH_MT7623)
|
|
+
|
|
+u32 __mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
|
|
+{
|
|
+ u32 volatile status = 0;
|
|
+ u32 rc = 0;
|
|
+ unsigned long volatile t_start = jiffies;
|
|
+ u32 volatile data = 0;
|
|
+
|
|
+ /* We enable mdio gpio purpose register, and disable it when exit. */
|
|
+ enable_mdio(1);
|
|
+
|
|
+ // make sure previous read operation is complete
|
|
+ while (1) {
|
|
+ // 0 : Read/write operation complete
|
|
+ if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
|
|
+ {
|
|
+ break;
|
|
+ }
|
|
+ else if (time_after(jiffies, t_start + 5*HZ)) {
|
|
+ enable_mdio(0);
|
|
+ printk("\n MDIO Read operation is ongoing !!\n");
|
|
+ return rc;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ data = (0x01 << 16) | (0x02 << 18) | (phy_addr << 20) | (phy_register << 25);
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data);
|
|
+ data |= (1<<31);
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data);
|
|
+ //printk("\n Set Command [0x%08X] = [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0, data);
|
|
+
|
|
+
|
|
+ // make sure read operation is complete
|
|
+ t_start = jiffies;
|
|
+ while (1) {
|
|
+ if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
|
|
+ status = sysRegRead(MDIO_PHY_CONTROL_0);
|
|
+ *read_data = (u32)(status & 0x0000FFFF);
|
|
+
|
|
+ enable_mdio(0);
|
|
+ return 1;
|
|
+ }
|
|
+ else if (time_after(jiffies, t_start+5*HZ)) {
|
|
+ enable_mdio(0);
|
|
+ printk("\n MDIO Read operation is ongoing and Time Out!!\n");
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+u32 __mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
|
|
+{
|
|
+ unsigned long volatile t_start=jiffies;
|
|
+ u32 volatile data;
|
|
+
|
|
+ enable_mdio(1);
|
|
+
|
|
+ // make sure previous write operation is complete
|
|
+ while(1) {
|
|
+ if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
|
|
+ {
|
|
+ break;
|
|
+ }
|
|
+ else if (time_after(jiffies, t_start + 5 * HZ)) {
|
|
+ enable_mdio(0);
|
|
+ printk("\n MDIO Write operation ongoing\n");
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ data = (0x01 << 16)| (1<<18) | (phy_addr << 20) | (phy_register << 25) | write_data;
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data);
|
|
+ data |= (1<<31);
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data); //start operation
|
|
+ //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
|
|
+
|
|
+ t_start = jiffies;
|
|
+
|
|
+ // make sure write operation is complete
|
|
+ while (1) {
|
|
+ if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) //0 : Read/write operation complete
|
|
+ {
|
|
+ enable_mdio(0);
|
|
+ return 1;
|
|
+ }
|
|
+ else if (time_after(jiffies, t_start + 5 * HZ)) {
|
|
+ enable_mdio(0);
|
|
+ printk("\n MDIO Write operation Time Out\n");
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
|
|
+{
|
|
+#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
|
|
+ u32 low_word;
|
|
+ u32 high_word;
|
|
+ u32 an_status = 0;
|
|
+
|
|
+ if(phy_addr==31)
|
|
+ {
|
|
+ an_status = (*(unsigned long *)(ESW_PHY_POLLING) & (1<<31));
|
|
+ if(an_status){
|
|
+ *(unsigned long *)(ESW_PHY_POLLING) &= ~(1<<31);//(AN polling off)
|
|
+ }
|
|
+ //phase1: write page address phase
|
|
+ if(__mii_mgr_write(phy_addr, 0x1f, ((phy_register >> 6) & 0x3FF))) {
|
|
+ //phase2: write address & read low word phase
|
|
+ if(__mii_mgr_read(phy_addr, (phy_register >> 2) & 0xF, &low_word)) {
|
|
+ //phase3: write address & read high word phase
|
|
+ if(__mii_mgr_read(phy_addr, (0x1 << 4), &high_word)) {
|
|
+ *read_data = (high_word << 16) | (low_word & 0xFFFF);
|
|
+ if(an_status){
|
|
+ *(unsigned long *)(ESW_PHY_POLLING) |= (1<<31);//(AN polling on)
|
|
+ }
|
|
+ return 1;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if(an_status){
|
|
+ *(unsigned long *)(ESW_PHY_POLLING) |= (1<<31);//(AN polling on)
|
|
+ }
|
|
+ } else
|
|
+#endif
|
|
+ {
|
|
+ if(__mii_mgr_read(phy_addr, phy_register, read_data)) {
|
|
+ return 1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
|
|
+{
|
|
+#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
|
|
+ u32 an_status = 0;
|
|
+
|
|
+ if(phy_addr == 31)
|
|
+ {
|
|
+ an_status = (*(unsigned long *)(ESW_PHY_POLLING) & (1<<31));
|
|
+ if(an_status){
|
|
+ *(unsigned long *)(ESW_PHY_POLLING) &= ~(1<<31);//(AN polling off)
|
|
+ }
|
|
+ //phase1: write page address phase
|
|
+ if(__mii_mgr_write(phy_addr, 0x1f, (phy_register >> 6) & 0x3FF)) {
|
|
+ //phase2: write address & read low word phase
|
|
+ if(__mii_mgr_write(phy_addr, ((phy_register >> 2) & 0xF), write_data & 0xFFFF)) {
|
|
+ //phase3: write address & read high word phase
|
|
+ if(__mii_mgr_write(phy_addr, (0x1 << 4), write_data >> 16)) {
|
|
+ if(an_status){
|
|
+ *(unsigned long *)(ESW_PHY_POLLING) |= (1<<31);//(AN polling on)
|
|
+ }
|
|
+ return 1;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if(an_status){
|
|
+ *(unsigned long *)(ESW_PHY_POLLING) |= (1<<31);//(AN polling on)
|
|
+ }
|
|
+ } else
|
|
+#endif
|
|
+ {
|
|
+ if(__mii_mgr_write(phy_addr, phy_register, write_data)) {
|
|
+ return 1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+u32 mii_mgr_cl45_set_address(u32 port_num, u32 dev_addr, u32 reg_addr)
|
|
+{
|
|
+ u32 rc = 0;
|
|
+ unsigned long volatile t_start = jiffies;
|
|
+ u32 volatile data = 0;
|
|
+
|
|
+ enable_mdio(1);
|
|
+
|
|
+ while (1) {
|
|
+ if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
|
|
+ {
|
|
+ break;
|
|
+ }
|
|
+ else if (time_after(jiffies, t_start + 5*HZ)) {
|
|
+ enable_mdio(0);
|
|
+ printk("\n MDIO Read operation is ongoing !!\n");
|
|
+ return rc;
|
|
+ }
|
|
+ }
|
|
+ data = (dev_addr << 25) | (port_num << 20) | (0x00 << 18) | (0x00 << 16) | reg_addr;
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data);
|
|
+ data |= (1<<31);
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data);
|
|
+
|
|
+ t_start = jiffies;
|
|
+ while (1) {
|
|
+ if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) //0 : Read/write operation complete
|
|
+ {
|
|
+ enable_mdio(0);
|
|
+ return 1;
|
|
+ }
|
|
+ else if (time_after(jiffies, t_start + 5 * HZ)) {
|
|
+ enable_mdio(0);
|
|
+ printk("\n MDIO Write operation Time Out\n");
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+}
|
|
+
|
|
+
|
|
+u32 mii_mgr_read_cl45(u32 port_num, u32 dev_addr, u32 reg_addr, u32 *read_data)
|
|
+{
|
|
+ u32 volatile status = 0;
|
|
+ u32 rc = 0;
|
|
+ unsigned long volatile t_start = jiffies;
|
|
+ u32 volatile data = 0;
|
|
+
|
|
+ // set address first
|
|
+ mii_mgr_cl45_set_address(port_num, dev_addr, reg_addr);
|
|
+ //udelay(10);
|
|
+
|
|
+ enable_mdio(1);
|
|
+
|
|
+ while (1) {
|
|
+ if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
|
|
+ {
|
|
+ break;
|
|
+ }
|
|
+ else if (time_after(jiffies, t_start + 5*HZ)) {
|
|
+ enable_mdio(0);
|
|
+ printk("\n MDIO Read operation is ongoing !!\n");
|
|
+ return rc;
|
|
+ }
|
|
+ }
|
|
+ data = (dev_addr << 25) | (port_num << 20) | (0x03 << 18) | (0x00 << 16) | reg_addr;
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data);
|
|
+ data |= (1<<31);
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data);
|
|
+ t_start = jiffies;
|
|
+ while (1) {
|
|
+ if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
|
|
+ *read_data = (sysRegRead(MDIO_PHY_CONTROL_0) & 0x0000FFFF);
|
|
+ enable_mdio(0);
|
|
+ return 1;
|
|
+ }
|
|
+ else if (time_after(jiffies, t_start+5*HZ)) {
|
|
+ enable_mdio(0);
|
|
+ printk("\n Set Operation: MDIO Read operation is ongoing and Time Out!!\n");
|
|
+ return 0;
|
|
+ }
|
|
+ status = sysRegRead(MDIO_PHY_CONTROL_0);
|
|
+ }
|
|
+
|
|
+}
|
|
+
|
|
+u32 mii_mgr_write_cl45 (u32 port_num, u32 dev_addr, u32 reg_addr, u32 write_data)
|
|
+{
|
|
+ u32 rc = 0;
|
|
+ unsigned long volatile t_start = jiffies;
|
|
+ u32 volatile data = 0;
|
|
+
|
|
+ // set address first
|
|
+ mii_mgr_cl45_set_address(port_num, dev_addr, reg_addr);
|
|
+ //udelay(10);
|
|
+
|
|
+ enable_mdio(1);
|
|
+ while (1) {
|
|
+ if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
|
|
+ {
|
|
+ break;
|
|
+ }
|
|
+ else if (time_after(jiffies, t_start + 5*HZ)) {
|
|
+ enable_mdio(0);
|
|
+ printk("\n MDIO Read operation is ongoing !!\n");
|
|
+ return rc;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ data = (dev_addr << 25) | (port_num << 20) | (0x01 << 18) | (0x00 << 16) | write_data;
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data);
|
|
+ data |= (1<<31);
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data);
|
|
+
|
|
+ t_start = jiffies;
|
|
+
|
|
+ while (1) {
|
|
+ if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
|
|
+ {
|
|
+ enable_mdio(0);
|
|
+ return 1;
|
|
+ }
|
|
+ else if (time_after(jiffies, t_start + 5 * HZ)) {
|
|
+ enable_mdio(0);
|
|
+ printk("\n MDIO Write operation Time Out\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ }
|
|
+}
|
|
+
|
|
+#else // not rt6855
|
|
+
|
|
+u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data)
|
|
+{
|
|
+ u32 volatile status = 0;
|
|
+ u32 rc = 0;
|
|
+ unsigned long volatile t_start = jiffies;
|
|
+#if !defined (CONFIG_RALINK_RT3052) && !defined (CONFIG_RALINK_RT3352) && !defined (CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
|
|
+ u32 volatile data = 0;
|
|
+#endif
|
|
+
|
|
+ /* We enable mdio gpio purpose register, and disable it when exit. */
|
|
+ enable_mdio(1);
|
|
+
|
|
+ // make sure previous read operation is complete
|
|
+ while (1) {
|
|
+#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+ // rd_rdy: read operation is complete
|
|
+ if(!( sysRegRead(MDIO_PHY_CONTROL_1) & (0x1 << 1)))
|
|
+#else
|
|
+ // 0 : Read/write operation complet
|
|
+ if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
|
|
+#endif
|
|
+ {
|
|
+ break;
|
|
+ }
|
|
+ else if (time_after(jiffies, t_start + 5*HZ)) {
|
|
+ enable_mdio(0);
|
|
+ printk("\n MDIO Read operation is ongoing !!\n");
|
|
+ return rc;
|
|
+ }
|
|
+ }
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0 , (1<<14) | (phy_register << 8) | (phy_addr));
|
|
+#else
|
|
+ data = (phy_addr << 24) | (phy_register << 16);
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data);
|
|
+ data |= (1<<31);
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data);
|
|
+#endif
|
|
+ //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
|
|
+
|
|
+
|
|
+ // make sure read operation is complete
|
|
+ t_start = jiffies;
|
|
+ while (1) {
|
|
+#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+ if (sysRegRead(MDIO_PHY_CONTROL_1) & (0x1 << 1)) {
|
|
+ status = sysRegRead(MDIO_PHY_CONTROL_1);
|
|
+ *read_data = (u32)(status >>16);
|
|
+
|
|
+ enable_mdio(0);
|
|
+ return 1;
|
|
+ }
|
|
+#else
|
|
+ if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) {
|
|
+ status = sysRegRead(MDIO_PHY_CONTROL_0);
|
|
+ *read_data = (u32)(status & 0x0000FFFF);
|
|
+
|
|
+ enable_mdio(0);
|
|
+ return 1;
|
|
+ }
|
|
+#endif
|
|
+ else if (time_after(jiffies, t_start+5*HZ)) {
|
|
+ enable_mdio(0);
|
|
+ printk("\n MDIO Read operation is ongoing and Time Out!!\n");
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data)
|
|
+{
|
|
+ unsigned long volatile t_start=jiffies;
|
|
+ u32 volatile data;
|
|
+
|
|
+ enable_mdio(1);
|
|
+
|
|
+ // make sure previous write operation is complete
|
|
+ while(1) {
|
|
+#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+ if (!(sysRegRead(MDIO_PHY_CONTROL_1) & (0x1 << 0)))
|
|
+#else
|
|
+ if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31)))
|
|
+#endif
|
|
+ {
|
|
+ break;
|
|
+ }
|
|
+ else if (time_after(jiffies, t_start + 5 * HZ)) {
|
|
+ enable_mdio(0);
|
|
+ printk("\n MDIO Write operation ongoing\n");
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+ data = ((write_data & 0xFFFF) << 16);
|
|
+ data |= (phy_register << 8) | (phy_addr);
|
|
+ data |= (1<<13);
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data);
|
|
+#else
|
|
+ data = (1<<30) | (phy_addr << 24) | (phy_register << 16) | write_data;
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data);
|
|
+ data |= (1<<31);
|
|
+ sysRegWrite(MDIO_PHY_CONTROL_0, data); //start operation
|
|
+#endif
|
|
+ //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0);
|
|
+
|
|
+ t_start = jiffies;
|
|
+
|
|
+ // make sure write operation is complete
|
|
+ while (1) {
|
|
+#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+ if (sysRegRead(MDIO_PHY_CONTROL_1) & (0x1 << 0)) //wt_done ?= 1
|
|
+#else
|
|
+ if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) //0 : Read/write operation complete
|
|
+#endif
|
|
+ {
|
|
+ enable_mdio(0);
|
|
+ return 1;
|
|
+ }
|
|
+ else if (time_after(jiffies, t_start + 5 * HZ)) {
|
|
+ enable_mdio(0);
|
|
+ printk("\n MDIO Write operation Time Out\n");
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+#endif
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+EXPORT_SYMBOL(mii_mgr_write);
|
|
+EXPORT_SYMBOL(mii_mgr_read);
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/ra2882ethreg.h
|
|
@@ -0,0 +1,1985 @@
|
|
+#ifndef RA2882ETHREG_H
|
|
+#define RA2882ETHREG_H
|
|
+
|
|
+#include <linux/mii.h> // for struct mii_if_info in ra2882ethreg.h
|
|
+#include <linux/version.h> /* check linux version for 2.4 and 2.6 compatibility */
|
|
+#include <linux/interrupt.h> /* for "struct tasklet_struct" in linux-3.10.14 */
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+#include <linux/ip.h>
|
|
+#include <linux/ipv6.h>
|
|
+#endif
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
|
|
+#include <asm/rt2880/rt_mmap.h>
|
|
+#endif
|
|
+#include "raether.h"
|
|
+
|
|
+#ifdef WORKQUEUE_BH
|
|
+#include <linux/workqueue.h>
|
|
+#endif // WORKQUEUE_BH //
|
|
+#ifdef CONFIG_RAETH_LRO
|
|
+#include <linux/inet_lro.h>
|
|
+#endif
|
|
+
|
|
+#define MAX_PACKET_SIZE 1514
|
|
+#define MIN_PACKET_SIZE 60
|
|
+#define MAX_TXD_LEN 0x3fff
|
|
+
|
|
+#if defined (CONFIG_ARCH_MT7623)
|
|
+#define phys_to_bus(a) (a)
|
|
+#else
|
|
+#define phys_to_bus(a) (a & 0x1FFFFFFF)
|
|
+#endif
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+#define BIT(x) ((1 << x))
|
|
+#endif
|
|
+/* bits range: for example BITS(16,23) = 0xFF0000
|
|
+ * ==> (BIT(m)-1) = 0x0000FFFF ~(BIT(m)-1) => 0xFFFF0000
|
|
+ * ==> (BIT(n+1)-1) = 0x00FFFFFF
|
|
+ */
|
|
+#define BITS(m,n) (~(BIT(m)-1) & ((BIT(n) - 1) | BIT(n)))
|
|
+
|
|
+#define ETHER_ADDR_LEN 6
|
|
+
|
|
+/* Phy Vender ID list */
|
|
+
|
|
+#define EV_ICPLUS_PHY_ID0 0x0243
|
|
+#define EV_ICPLUS_PHY_ID1 0x0D90
|
|
+#define EV_MARVELL_PHY_ID0 0x0141
|
|
+#define EV_MARVELL_PHY_ID1 0x0CC2
|
|
+#define EV_VTSS_PHY_ID0 0x0007
|
|
+#define EV_VTSS_PHY_ID1 0x0421
|
|
+
|
|
+/*
|
|
+ FE_INT_STATUS
|
|
+*/
|
|
+#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628) || \
|
|
+ defined (CONFIG_ARCH_MT7623)
|
|
+
|
|
+#define RX_COHERENT BIT(31)
|
|
+#define RX_DLY_INT BIT(30)
|
|
+#define TX_COHERENT BIT(29)
|
|
+#define TX_DLY_INT BIT(28)
|
|
+#define RING3_RX_DLY_INT BIT(27)
|
|
+#define RING2_RX_DLY_INT BIT(26)
|
|
+#define RING1_RX_DLY_INT BIT(25)
|
|
+
|
|
+#define ALT_RPLC_INT3 BIT(23)
|
|
+#define ALT_RPLC_INT2 BIT(22)
|
|
+#define ALT_RPLC_INT1 BIT(21)
|
|
+
|
|
+#define RX_DONE_INT3 BIT(19)
|
|
+#define RX_DONE_INT2 BIT(18)
|
|
+#define RX_DONE_INT1 BIT(17)
|
|
+#define RX_DONE_INT0 BIT(16)
|
|
+
|
|
+#define TX_DONE_INT3 BIT(3)
|
|
+#define TX_DONE_INT2 BIT(2)
|
|
+#define TX_DONE_INT1 BIT(1)
|
|
+#define TX_DONE_INT0 BIT(0)
|
|
+
|
|
+#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+#define RLS_COHERENT BIT(29)
|
|
+#define RLS_DLY_INT BIT(28)
|
|
+#define RLS_DONE_INT BIT(0)
|
|
+#endif
|
|
+
|
|
+#else
|
|
+//#define CNT_PPE_AF BIT(31)
|
|
+//#define CNT_GDM_AF BIT(29)
|
|
+#define PSE_P2_FC BIT(26)
|
|
+#define GDM_CRC_DROP BIT(25)
|
|
+#define PSE_BUF_DROP BIT(24)
|
|
+#define GDM_OTHER_DROP BIT(23)
|
|
+#define PSE_P1_FC BIT(22)
|
|
+#define PSE_P0_FC BIT(21)
|
|
+#define PSE_FQ_EMPTY BIT(20)
|
|
+#define GE1_STA_CHG BIT(18)
|
|
+#define TX_COHERENT BIT(17)
|
|
+#define RX_COHERENT BIT(16)
|
|
+
|
|
+#define TX_DONE_INT3 BIT(11)
|
|
+#define TX_DONE_INT2 BIT(10)
|
|
+#define TX_DONE_INT1 BIT(9)
|
|
+#define TX_DONE_INT0 BIT(8)
|
|
+#define RX_DONE_INT1 RX_DONE_INT0
|
|
+#define RX_DONE_INT0 BIT(2)
|
|
+#define TX_DLY_INT BIT(1)
|
|
+#define RX_DLY_INT BIT(0)
|
|
+#endif
|
|
+
|
|
+#define FE_INT_ALL (TX_DONE_INT3 | TX_DONE_INT2 | \
|
|
+ TX_DONE_INT1 | TX_DONE_INT0 | \
|
|
+ RX_DONE_INT0 | RX_DONE_INT1 | \
|
|
+ RX_DONE_INT2 | RX_DONE_INT3)
|
|
+
|
|
+#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+#define QFE_INT_ALL (RLS_DONE_INT | RX_DONE_INT0 | RX_DONE_INT1)
|
|
+#define QFE_INT_DLY_INIT (RLS_DLY_INT | RX_DLY_INT)
|
|
+
|
|
+#define NUM_QDMA_PAGE 512
|
|
+#define QDMA_PAGE_SIZE 2048
|
|
+#endif
|
|
+/*
|
|
+ * SW_INT_STATUS
|
|
+ */
|
|
+#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+#define PORT0_QUEUE_FULL BIT(14) //port0 queue full
|
|
+#define PORT1_QUEUE_FULL BIT(15) //port1 queue full
|
|
+#define PORT2_QUEUE_FULL BIT(16) //port2 queue full
|
|
+#define PORT3_QUEUE_FULL BIT(17) //port3 queue full
|
|
+#define PORT4_QUEUE_FULL BIT(18) //port4 queue full
|
|
+#define PORT5_QUEUE_FULL BIT(19) //port5 queue full
|
|
+#define PORT6_QUEUE_FULL BIT(20) //port6 queue full
|
|
+#define SHARED_QUEUE_FULL BIT(23) //shared queue full
|
|
+#define QUEUE_EXHAUSTED BIT(24) //global queue is used up and all packets are dropped
|
|
+#define BC_STROM BIT(25) //the device is undergoing broadcast storm
|
|
+#define PORT_ST_CHG BIT(26) //Port status change
|
|
+#define UNSECURED_ALERT BIT(27) //Intruder alert
|
|
+#define ABNORMAL_ALERT BIT(28) //Abnormal
|
|
+
|
|
+#define ESW_ISR (RALINK_ETH_SW_BASE + 0x00)
|
|
+#define ESW_IMR (RALINK_ETH_SW_BASE + 0x04)
|
|
+#define ESW_INT_ALL (PORT_ST_CHG)
|
|
+
|
|
+#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined (CONFIG_RALINK_MT7620)
|
|
+#define MIB_INT BIT(25)
|
|
+#define ACL_INT BIT(24)
|
|
+#define P5_LINK_CH BIT(5)
|
|
+#define P4_LINK_CH BIT(4)
|
|
+#define P3_LINK_CH BIT(3)
|
|
+#define P2_LINK_CH BIT(2)
|
|
+#define P1_LINK_CH BIT(1)
|
|
+#define P0_LINK_CH BIT(0)
|
|
+
|
|
+#define RX_GOCT_CNT BIT(4)
|
|
+#define RX_GOOD_CNT BIT(6)
|
|
+#define TX_GOCT_CNT BIT(17)
|
|
+#define TX_GOOD_CNT BIT(19)
|
|
+
|
|
+#define MSK_RX_GOCT_CNT BIT(4)
|
|
+#define MSK_RX_GOOD_CNT BIT(6)
|
|
+#define MSK_TX_GOCT_CNT BIT(17)
|
|
+#define MSK_TX_GOOD_CNT BIT(19)
|
|
+#define MSK_CNT_INT_ALL (MSK_RX_GOCT_CNT | MSK_RX_GOOD_CNT | MSK_TX_GOCT_CNT | MSK_TX_GOOD_CNT)
|
|
+//#define MSK_CNT_INT_ALL (MSK_RX_GOOD_CNT | MSK_TX_GOOD_CNT)
|
|
+
|
|
+
|
|
+#define ESW_IMR (RALINK_ETH_SW_BASE + 0x7000 + 0x8)
|
|
+#define ESW_ISR (RALINK_ETH_SW_BASE + 0x7000 + 0xC)
|
|
+#define ESW_INT_ALL (P0_LINK_CH | P1_LINK_CH | P2_LINK_CH | P3_LINK_CH | P4_LINK_CH | P5_LINK_CH | ACL_INT | MIB_INT)
|
|
+#define ESW_AISR (RALINK_ETH_SW_BASE + 0x8)
|
|
+#define ESW_P0_IntSn (RALINK_ETH_SW_BASE + 0x4004)
|
|
+#define ESW_P1_IntSn (RALINK_ETH_SW_BASE + 0x4104)
|
|
+#define ESW_P2_IntSn (RALINK_ETH_SW_BASE + 0x4204)
|
|
+#define ESW_P3_IntSn (RALINK_ETH_SW_BASE + 0x4304)
|
|
+#define ESW_P4_IntSn (RALINK_ETH_SW_BASE + 0x4404)
|
|
+#define ESW_P5_IntSn (RALINK_ETH_SW_BASE + 0x4504)
|
|
+#define ESW_P6_IntSn (RALINK_ETH_SW_BASE + 0x4604)
|
|
+#define ESW_P0_IntMn (RALINK_ETH_SW_BASE + 0x4008)
|
|
+#define ESW_P1_IntMn (RALINK_ETH_SW_BASE + 0x4108)
|
|
+#define ESW_P2_IntMn (RALINK_ETH_SW_BASE + 0x4208)
|
|
+#define ESW_P3_IntMn (RALINK_ETH_SW_BASE + 0x4308)
|
|
+#define ESW_P4_IntMn (RALINK_ETH_SW_BASE + 0x4408)
|
|
+#define ESW_P5_IntMn (RALINK_ETH_SW_BASE + 0x4508)
|
|
+#define ESW_P6_IntMn (RALINK_ETH_SW_BASE + 0x4608)
|
|
+
|
|
+#if defined (CONFIG_RALINK_MT7620)
|
|
+#define ESW_P7_IntSn (RALINK_ETH_SW_BASE + 0x4704)
|
|
+#define ESW_P7_IntMn (RALINK_ETH_SW_BASE + 0x4708)
|
|
+#endif
|
|
+
|
|
+
|
|
+#define ESW_PHY_POLLING (RALINK_ETH_SW_BASE + 0x7000)
|
|
+
|
|
+#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+
|
|
+#define ESW_PHY_POLLING (RALINK_ETH_SW_BASE + 0x0000)
|
|
+
|
|
+#define P5_LINK_CH BIT(5)
|
|
+#define P4_LINK_CH BIT(4)
|
|
+#define P3_LINK_CH BIT(3)
|
|
+#define P2_LINK_CH BIT(2)
|
|
+#define P1_LINK_CH BIT(1)
|
|
+#define P0_LINK_CH BIT(0)
|
|
+
|
|
+
|
|
+#endif // CONFIG_RALINK_RT3052 || CONFIG_RALINK_RT3352 || CONFIG_RALINK_RT5350 || defined (CONFIG_RALINK_MT7628)//
|
|
+
|
|
+#define RX_BUF_ALLOC_SIZE 2000
|
|
+#define FASTPATH_HEADROOM 64
|
|
+
|
|
+#define ETHER_BUFFER_ALIGN 32 ///// Align on a cache line
|
|
+
|
|
+#define ETHER_ALIGNED_RX_SKB_ADDR(addr) \
|
|
+ ((((unsigned long)(addr) + ETHER_BUFFER_ALIGN - 1) & \
|
|
+ ~(ETHER_BUFFER_ALIGN - 1)) - (unsigned long)(addr))
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+typedef struct _PSEUDO_ADAPTER {
|
|
+ struct net_device *RaethDev;
|
|
+ struct net_device *PseudoDev;
|
|
+ struct net_device_stats stat;
|
|
+#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
|
|
+ struct mii_if_info mii_info;
|
|
+#endif
|
|
+
|
|
+} PSEUDO_ADAPTER, PPSEUDO_ADAPTER;
|
|
+
|
|
+#define MAX_PSEUDO_ENTRY 1
|
|
+#endif
|
|
+
|
|
+
|
|
+
|
|
+/* Register Categories Definition */
|
|
+#define RAFRAMEENGINE_OFFSET 0x0000
|
|
+#define RAGDMA_OFFSET 0x0020
|
|
+#define RAPSE_OFFSET 0x0040
|
|
+#define RAGDMA2_OFFSET 0x0060
|
|
+#define RACDMA_OFFSET 0x0080
|
|
+#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628) || \
|
|
+ defined (CONFIG_ARCH_MT7623)
|
|
+
|
|
+#define RAPDMA_OFFSET 0x0800
|
|
+#define SDM_OFFSET 0x0C00
|
|
+#else
|
|
+#define RAPDMA_OFFSET 0x0100
|
|
+#endif
|
|
+#define RAPPE_OFFSET 0x0200
|
|
+#define RACMTABLE_OFFSET 0x0400
|
|
+#define RAPOLICYTABLE_OFFSET 0x1000
|
|
+
|
|
+
|
|
+/* Register Map Detail */
|
|
+/* RT3883 */
|
|
+#define SYSCFG1 (RALINK_SYSCTL_BASE + 0x14)
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+
|
|
+/* 1. PDMA */
|
|
+#define TX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x000)
|
|
+#define TX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x004)
|
|
+#define TX_CTX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x008)
|
|
+#define TX_DTX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x00C)
|
|
+
|
|
+#define TX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x010)
|
|
+#define TX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x014)
|
|
+#define TX_CTX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x018)
|
|
+#define TX_DTX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x01C)
|
|
+
|
|
+#define TX_BASE_PTR2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x020)
|
|
+#define TX_MAX_CNT2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x024)
|
|
+#define TX_CTX_IDX2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x028)
|
|
+#define TX_DTX_IDX2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x02C)
|
|
+
|
|
+#define TX_BASE_PTR3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x030)
|
|
+#define TX_MAX_CNT3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x034)
|
|
+#define TX_CTX_IDX3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x038)
|
|
+#define TX_DTX_IDX3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x03C)
|
|
+
|
|
+#define RX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x100)
|
|
+#define RX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x104)
|
|
+#define RX_CALC_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x108)
|
|
+#define RX_DRX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x10C)
|
|
+
|
|
+#define RX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x110)
|
|
+#define RX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x114)
|
|
+#define RX_CALC_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x118)
|
|
+#define RX_DRX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x11C)
|
|
+
|
|
+#define PDMA_INFO (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x200)
|
|
+#define PDMA_GLO_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x204)
|
|
+#define PDMA_RST_IDX (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x208)
|
|
+#define PDMA_RST_CFG (PDMA_RST_IDX)
|
|
+#define DLY_INT_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x20C)
|
|
+#define FREEQ_THRES (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x210)
|
|
+#define INT_STATUS (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x220)
|
|
+#define FE_INT_STATUS (INT_STATUS)
|
|
+#define INT_MASK (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x228)
|
|
+#define FE_INT_ENABLE (INT_MASK)
|
|
+#define PDMA_WRR (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x280)
|
|
+#define PDMA_SCH_CFG (PDMA_WRR)
|
|
+
|
|
+#define SDM_CON (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x00) //Switch DMA configuration
|
|
+#define SDM_RRING (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x04) //Switch DMA Rx Ring
|
|
+#define SDM_TRING (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x08) //Switch DMA Tx Ring
|
|
+#define SDM_MAC_ADRL (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x0C) //Switch MAC address LSB
|
|
+#define SDM_MAC_ADRH (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x10) //Switch MAC Address MSB
|
|
+#define SDM_TPCNT (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x100) //Switch DMA Tx packet count
|
|
+#define SDM_TBCNT (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x104) //Switch DMA Tx byte count
|
|
+#define SDM_RPCNT (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x108) //Switch DMA rx packet count
|
|
+#define SDM_RBCNT (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x10C) //Switch DMA rx byte count
|
|
+#define SDM_CS_ERR (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x110) //Switch DMA rx checksum error count
|
|
+
|
|
+#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) || \
|
|
+ defined (CONFIG_ARCH_MT7623)
|
|
+
|
|
+/* Old FE with New PDMA */
|
|
+#define PDMA_RELATED 0x0800
|
|
+/* 1. PDMA */
|
|
+#define TX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x000)
|
|
+#define TX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x004)
|
|
+#define TX_CTX_IDX0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x008)
|
|
+#define TX_DTX_IDX0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x00C)
|
|
+
|
|
+#define TX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x010)
|
|
+#define TX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x014)
|
|
+#define TX_CTX_IDX1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x018)
|
|
+#define TX_DTX_IDX1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x01C)
|
|
+
|
|
+#define TX_BASE_PTR2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x020)
|
|
+#define TX_MAX_CNT2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x024)
|
|
+#define TX_CTX_IDX2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x028)
|
|
+#define TX_DTX_IDX2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x02C)
|
|
+
|
|
+#define TX_BASE_PTR3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x030)
|
|
+#define TX_MAX_CNT3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x034)
|
|
+#define TX_CTX_IDX3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x038)
|
|
+#define TX_DTX_IDX3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x03C)
|
|
+
|
|
+#define RX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x100)
|
|
+#define RX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x104)
|
|
+#define RX_CALC_IDX0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x108)
|
|
+#define RX_DRX_IDX0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x10C)
|
|
+
|
|
+#define RX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x110)
|
|
+#define RX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x114)
|
|
+#define RX_CALC_IDX1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x118)
|
|
+#define RX_DRX_IDX1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x11C)
|
|
+
|
|
+#define RX_BASE_PTR2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x120)
|
|
+#define RX_MAX_CNT2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x124)
|
|
+#define RX_CALC_IDX2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x128)
|
|
+#define RX_DRX_IDX12 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x12C)
|
|
+
|
|
+#define RX_BASE_PTR3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x130)
|
|
+#define RX_MAX_CNT3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x134)
|
|
+#define RX_CALC_IDX3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x138)
|
|
+#define RX_DRX_IDX3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x13C)
|
|
+
|
|
+#define PDMA_INFO (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x200)
|
|
+#define PDMA_GLO_CFG (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x204)
|
|
+#define PDMA_RST_IDX (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x208)
|
|
+#define PDMA_RST_CFG (PDMA_RST_IDX)
|
|
+#define DLY_INT_CFG (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x20C)
|
|
+#define FREEQ_THRES (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x210)
|
|
+#define INT_STATUS (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x220)
|
|
+#define FE_INT_STATUS (INT_STATUS)
|
|
+#define INT_MASK (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x228)
|
|
+#define FE_INT_ENABLE (INT_MASK)
|
|
+#define SCH_Q01_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x280)
|
|
+#define SCH_Q23_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x284)
|
|
+
|
|
+#define FE_GLO_CFG RALINK_FRAME_ENGINE_BASE + 0x00
|
|
+#define FE_RST_GL RALINK_FRAME_ENGINE_BASE + 0x04
|
|
+#define FE_INT_STATUS2 RALINK_FRAME_ENGINE_BASE + 0x08
|
|
+#define FE_INT_ENABLE2 RALINK_FRAME_ENGINE_BASE + 0x0c
|
|
+//#define FC_DROP_STA RALINK_FRAME_ENGINE_BASE + 0x18
|
|
+#define FOE_TS_T RALINK_FRAME_ENGINE_BASE + 0x10
|
|
+
|
|
+#if defined (CONFIG_RALINK_MT7620)
|
|
+#define GDMA1_RELATED 0x0600
|
|
+#define GDMA1_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x00)
|
|
+#define GDMA1_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x04)
|
|
+#define GDMA1_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x08)
|
|
+#define GDMA1_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x0C)
|
|
+#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+#define GDMA1_RELATED 0x0500
|
|
+#define GDMA1_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x00)
|
|
+#define GDMA1_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x04)
|
|
+#define GDMA1_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x08)
|
|
+#define GDMA1_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x0C)
|
|
+
|
|
+#define GDMA2_RELATED 0x1500
|
|
+#define GDMA2_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x00)
|
|
+#define GDMA2_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x04)
|
|
+#define GDMA2_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x08)
|
|
+#define GDMA2_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x0C)
|
|
+#else
|
|
+#define GDMA1_RELATED 0x0020
|
|
+#define GDMA1_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x00)
|
|
+#define GDMA1_SCH_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x04)
|
|
+#define GDMA1_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x08)
|
|
+#define GDMA1_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x0C)
|
|
+#define GDMA1_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x10)
|
|
+
|
|
+#define GDMA2_RELATED 0x0060
|
|
+#define GDMA2_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x00)
|
|
+#define GDMA2_SCH_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x04)
|
|
+#define GDMA2_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x08)
|
|
+#define GDMA2_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x0C)
|
|
+#define GDMA2_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x10)
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RALINK_MT7620)
|
|
+#define PSE_RELATED 0x0500
|
|
+#define PSE_FQFC_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x00)
|
|
+#define PSE_IQ_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x04)
|
|
+#define PSE_QUE_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x08)
|
|
+#else
|
|
+#define PSE_RELATED 0x0040
|
|
+#define PSE_FQ_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x00)
|
|
+#define CDMA_FC_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x04)
|
|
+#define GDMA1_FC_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x08)
|
|
+#define GDMA2_FC_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x0C)
|
|
+#define CDMA_OQ_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x10)
|
|
+#define GDMA1_OQ_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x14)
|
|
+#define GDMA2_OQ_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x18)
|
|
+#define PSE_IQ_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x1C)
|
|
+#endif
|
|
+
|
|
+
|
|
+#if defined (CONFIG_RALINK_MT7620)
|
|
+#define CDMA_RELATED 0x0400
|
|
+#define CDMA_CSG_CFG (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00)
|
|
+#define SMACCR0 (RALINK_ETH_SW_BASE + 0x3FE4)
|
|
+#define SMACCR1 (RALINK_ETH_SW_BASE + 0x3FE8)
|
|
+#define CKGCR (RALINK_ETH_SW_BASE + 0x3FF0)
|
|
+#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+#define CDMA_RELATED 0x0400
|
|
+#define CDMA_CSG_CFG (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00) //fake definition
|
|
+#define CDMP_IG_CTRL (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00)
|
|
+#define CDMP_EG_CTRL (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x04)
|
|
+#else
|
|
+#define CDMA_RELATED 0x0080
|
|
+#define CDMA_CSG_CFG (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00)
|
|
+#define CDMA_SCH_CFG (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x04)
|
|
+#define SMACCR0 (RALINK_ETH_SW_BASE + 0x30E4)
|
|
+#define SMACCR1 (RALINK_ETH_SW_BASE + 0x30E8)
|
|
+#define CKGCR (RALINK_ETH_SW_BASE + 0x30F0)
|
|
+#endif
|
|
+
|
|
+#define PDMA_FC_CFG (RALINK_FRAME_ENGINE_BASE+0x100)
|
|
+
|
|
+
|
|
+#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+/*kurtis: add QDMA define*/
|
|
+
|
|
+#define CLK_CFG_0 (RALINK_SYSCTL_BASE + 0x2C)
|
|
+#define PAD_RGMII2_MDIO_CFG (RALINK_SYSCTL_BASE + 0x58)
|
|
+
|
|
+#define QDMA_RELATED 0x1800
|
|
+#define QTX_CFG_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x000)
|
|
+#define QTX_SCH_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x004)
|
|
+#define QTX_HEAD_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x008)
|
|
+#define QTX_TAIL_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x00C)
|
|
+#define QTX_CFG_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x010)
|
|
+#define QTX_SCH_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x014)
|
|
+#define QTX_HEAD_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x018)
|
|
+#define QTX_TAIL_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x01C)
|
|
+#define QTX_CFG_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x020)
|
|
+#define QTX_SCH_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x024)
|
|
+#define QTX_HEAD_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x028)
|
|
+#define QTX_TAIL_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x02C)
|
|
+#define QTX_CFG_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x030)
|
|
+#define QTX_SCH_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x034)
|
|
+#define QTX_HEAD_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x038)
|
|
+#define QTX_TAIL_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x03C)
|
|
+#define QTX_CFG_4 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x040)
|
|
+#define QTX_SCH_4 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x044)
|
|
+#define QTX_HEAD_4 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x048)
|
|
+#define QTX_TAIL_4 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x04C)
|
|
+#define QTX_CFG_5 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x050)
|
|
+#define QTX_SCH_5 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x054)
|
|
+#define QTX_HEAD_5 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x058)
|
|
+#define QTX_TAIL_5 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x05C)
|
|
+#define QTX_CFG_6 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x060)
|
|
+#define QTX_SCH_6 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x064)
|
|
+#define QTX_HEAD_6 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x068)
|
|
+#define QTX_TAIL_6 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x06C)
|
|
+#define QTX_CFG_7 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x070)
|
|
+#define QTX_SCH_7 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x074)
|
|
+#define QTX_HEAD_7 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x078)
|
|
+#define QTX_TAIL_7 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x07C)
|
|
+#define QTX_CFG_8 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x080)
|
|
+#define QTX_SCH_8 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x084)
|
|
+#define QTX_HEAD_8 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x088)
|
|
+#define QTX_TAIL_8 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x08C)
|
|
+#define QTX_CFG_9 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x090)
|
|
+#define QTX_SCH_9 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x094)
|
|
+#define QTX_HEAD_9 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x098)
|
|
+#define QTX_TAIL_9 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x09C)
|
|
+#define QTX_CFG_10 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0A0)
|
|
+#define QTX_SCH_10 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0A4)
|
|
+#define QTX_HEAD_10 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0A8)
|
|
+#define QTX_TAIL_10 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0AC)
|
|
+#define QTX_CFG_11 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0B0)
|
|
+#define QTX_SCH_11 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0B4)
|
|
+#define QTX_HEAD_11 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0B8)
|
|
+#define QTX_TAIL_11 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0BC)
|
|
+#define QTX_CFG_12 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0C0)
|
|
+#define QTX_SCH_12 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0C4)
|
|
+#define QTX_HEAD_12 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0C8)
|
|
+#define QTX_TAIL_12 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0CC)
|
|
+#define QTX_CFG_13 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0D0)
|
|
+#define QTX_SCH_13 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0D4)
|
|
+#define QTX_HEAD_13 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0D8)
|
|
+#define QTX_TAIL_13 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0DC)
|
|
+#define QTX_CFG_14 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0E0)
|
|
+#define QTX_SCH_14 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0E4)
|
|
+#define QTX_HEAD_14 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0E8)
|
|
+#define QTX_TAIL_14 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0EC)
|
|
+#define QTX_CFG_15 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0F0)
|
|
+#define QTX_SCH_15 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0F4)
|
|
+#define QTX_HEAD_15 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0F8)
|
|
+#define QTX_TAIL_15 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0FC)
|
|
+#define QRX_BASE_PTR_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x100)
|
|
+#define QRX_MAX_CNT_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x104)
|
|
+#define QRX_CRX_IDX_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x108)
|
|
+#define QRX_DRX_IDX_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x10C)
|
|
+#define QRX_BASE_PTR_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x110)
|
|
+#define QRX_MAX_CNT_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x114)
|
|
+#define QRX_CRX_IDX_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x118)
|
|
+#define QRX_DRX_IDX_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x11C)
|
|
+#if defined (CONFIG_ARCH_MT7623)
|
|
+#define VQTX_TB_BASE_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x180)
|
|
+#define VQTX_TB_BASE_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x184)
|
|
+#define VQTX_TB_BASE_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x188)
|
|
+#define VQTX_TB_BASE_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x18C)
|
|
+#endif
|
|
+#define QDMA_INFO (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x200)
|
|
+#define QDMA_GLO_CFG (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x204)
|
|
+#define QDMA_RST_IDX (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x208)
|
|
+#define QDMA_RST_CFG (QDMA_RST_IDX)
|
|
+#define QDMA_DELAY_INT (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x20C)
|
|
+#define QDMA_FC_THRES (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x210)
|
|
+#define QDMA_TX_SCH (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x214)
|
|
+#define QDMA_INT_STS (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x218)
|
|
+#define QFE_INT_STATUS (QDMA_INT_STS)
|
|
+#define QDMA_INT_MASK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x21C)
|
|
+#define QFE_INT_ENABLE (QDMA_INT_MASK)
|
|
+#define QDMA_TRTCM (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x220)
|
|
+#define QDMA_DATA0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x224)
|
|
+#define QDMA_DATA1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x228)
|
|
+#define QDMA_RED_THRES (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x22C)
|
|
+#define QDMA_TEST (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x230)
|
|
+#define QDMA_DMA (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x234)
|
|
+#define QDMA_BMU (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x238)
|
|
+#define QDMA_HRED1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x240)
|
|
+#define QDMA_HRED2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x244)
|
|
+#define QDMA_SRED1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x248)
|
|
+#define QDMA_SRED2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x24C)
|
|
+#define QTX_CTX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x300)
|
|
+#define QTX_DTX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x304)
|
|
+#define QTX_FWD_CNT (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x308)
|
|
+#define QTX_CRX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x310)
|
|
+#define QTX_DRX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x314)
|
|
+#define QTX_RLS_CNT (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x318)
|
|
+#define QDMA_FQ_HEAD (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x320)
|
|
+#define QDMA_FQ_TAIL (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x324)
|
|
+#define QDMA_FQ_CNT (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x328)
|
|
+#define QDMA_FQ_BLEN (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x32C)
|
|
+#define QTX_Q0MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x350)
|
|
+#define QTX_Q1MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x354)
|
|
+#define QTX_Q2MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x358)
|
|
+#define QTX_Q3MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x35C)
|
|
+#define QTX_Q0MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x360)
|
|
+#define QTX_Q1MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x364)
|
|
+#define QTX_Q2MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x368)
|
|
+#define QTX_Q3MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x36C)
|
|
+
|
|
+
|
|
+#endif/*MT7621 QDMA*/
|
|
+
|
|
+#else
|
|
+
|
|
+/* 1. Frame Engine Global Registers */
|
|
+#define MDIO_ACCESS (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x00)
|
|
+#define MDIO_CFG (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x04)
|
|
+#define FE_GLO_CFG (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x08)
|
|
+#define FE_RST_GL (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x0C)
|
|
+#define FE_INT_STATUS (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x10)
|
|
+#define FE_INT_ENABLE (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x14)
|
|
+#define MDIO_CFG2 (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x18) //Original:FC_DROP_STA
|
|
+#define FOC_TS_T (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x1C)
|
|
+
|
|
+
|
|
+/* 2. GDMA Registers */
|
|
+#define GDMA1_FWD_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x00)
|
|
+#define GDMA1_SCH_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x04)
|
|
+#define GDMA1_SHPR_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x08)
|
|
+#define GDMA1_MAC_ADRL (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x0C)
|
|
+#define GDMA1_MAC_ADRH (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x10)
|
|
+
|
|
+#define GDMA2_FWD_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x00)
|
|
+#define GDMA2_SCH_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x04)
|
|
+#define GDMA2_SHPR_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x08)
|
|
+#define GDMA2_MAC_ADRL (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x0C)
|
|
+#define GDMA2_MAC_ADRH (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x10)
|
|
+
|
|
+/* 3. PSE */
|
|
+#define PSE_FQ_CFG (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x00)
|
|
+#define CDMA_FC_CFG (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x04)
|
|
+#define GDMA1_FC_CFG (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x08)
|
|
+#define GDMA2_FC_CFG (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x0C)
|
|
+#define PDMA_FC_CFG (RALINK_FRAME_ENGINE_BASE+0x1f0)
|
|
+
|
|
+/* 4. CDMA */
|
|
+#define CDMA_CSG_CFG (RALINK_FRAME_ENGINE_BASE+RACDMA_OFFSET+0x00)
|
|
+#define CDMA_SCH_CFG (RALINK_FRAME_ENGINE_BASE+RACDMA_OFFSET+0x04)
|
|
+/* skip ppoe sid and vlan id definition */
|
|
+
|
|
+
|
|
+/* 5. PDMA */
|
|
+#define PDMA_GLO_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x00)
|
|
+#define PDMA_RST_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x04)
|
|
+#define PDMA_SCH_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x08)
|
|
+
|
|
+#define DLY_INT_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x0C)
|
|
+
|
|
+#define TX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x10)
|
|
+#define TX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x14)
|
|
+#define TX_CTX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x18)
|
|
+#define TX_DTX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x1C)
|
|
+
|
|
+#define TX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x20)
|
|
+#define TX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x24)
|
|
+#define TX_CTX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x28)
|
|
+#define TX_DTX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x2C)
|
|
+
|
|
+#define TX_BASE_PTR2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x40)
|
|
+#define TX_MAX_CNT2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x44)
|
|
+#define TX_CTX_IDX2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x48)
|
|
+#define TX_DTX_IDX2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x4C)
|
|
+
|
|
+#define TX_BASE_PTR3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x50)
|
|
+#define TX_MAX_CNT3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x54)
|
|
+#define TX_CTX_IDX3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x58)
|
|
+#define TX_DTX_IDX3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x5C)
|
|
+
|
|
+#define RX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x30)
|
|
+#define RX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x34)
|
|
+#define RX_CALC_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x38)
|
|
+#define RX_DRX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x3C)
|
|
+
|
|
+#define RX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x40)
|
|
+#define RX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x44)
|
|
+#define RX_CALC_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x48)
|
|
+#define RX_DRX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x4C)
|
|
+
|
|
+#endif
|
|
+
|
|
+#define DELAY_INT_INIT 0x84048404
|
|
+#define FE_INT_DLY_INIT (TX_DLY_INT | RX_DLY_INT)
|
|
+
|
|
+
|
|
+#if !defined (CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
|
|
+
|
|
+/* 6. Counter and Meter Table */
|
|
+#define PPE_AC_BCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x000) /* PPE Accounting Group 0 Byte Cnt */
|
|
+#define PPE_AC_PCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x004) /* PPE Accounting Group 0 Packet Cnt */
|
|
+/* 0 ~ 63 */
|
|
+
|
|
+#define PPE_MTR_CNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x200) /* 0 ~ 63 */
|
|
+/* skip... */
|
|
+#define PPE_MTR_CNT63 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x2FC)
|
|
+
|
|
+#define GDMA_TX_GBCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x300) /* Transmit good byte cnt for GEport */
|
|
+#define GDMA_TX_GPCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x304) /* Transmit good pkt cnt for GEport */
|
|
+#define GDMA_TX_SKIPCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x308) /* Transmit skip cnt for GEport */
|
|
+#define GDMA_TX_COLCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x30C) /* Transmit collision cnt for GEport */
|
|
+
|
|
+/* update these address mapping to fit data sheet v0.26, by bobtseng, 2007.6.14 */
|
|
+#define GDMA_RX_GBCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x320)
|
|
+#define GDMA_RX_GPCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x324)
|
|
+#define GDMA_RX_OERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x328)
|
|
+#define GDMA_RX_FERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x32C)
|
|
+#define GDMA_RX_SERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x330)
|
|
+#define GDMA_RX_LERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x334)
|
|
+#define GDMA_RX_CERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x338)
|
|
+#define GDMA_RX_FCCNT1 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x33C)
|
|
+
|
|
+#endif
|
|
+
|
|
+/* LRO global control */
|
|
+/* Bits [15:0]:LRO_ALT_RFSH_TIMER, Bits [20:16]:LRO_ALT_TICK_TIMER */
|
|
+#define LRO_ALT_REFRESH_TIMER (RALINK_FRAME_ENGINE_BASE+0x001C)
|
|
+
|
|
+/* LRO auto-learn table info */
|
|
+#define PDMA_FE_ALT_CF8 (RALINK_FRAME_ENGINE_BASE+0x0300)
|
|
+#define PDMA_FE_ALT_SGL_CFC (RALINK_FRAME_ENGINE_BASE+0x0304)
|
|
+#define PDMA_FE_ALT_SEQ_CFC (RALINK_FRAME_ENGINE_BASE+0x0308)
|
|
+
|
|
+/* LRO controls */
|
|
+#define ADMA_LRO_CTRL_OFFSET 0x0980
|
|
+/*
|
|
+ * Bit [0]:LRO_EN, Bit [1]:LRO_IPv6_EN, Bit [2]:MULTIPLE_NON_LRO_RX_RING_EN, Bit [3]:MULTIPLE_RXD_PREFETCH_EN,
|
|
+ * Bit [4]:RXD_PREFETCH_EN, Bit [5]:LRO_DLY_INT_EN, Bit [6]:LRO_CRSN_BNW, Bit [7]:L3_CKS_UPD_EN,
|
|
+ * Bit [20]:first_ineligible_pkt_redirect_en, Bit [21]:cr_lro_alt_score_mode, Bit [22]:cr_lro_alt_rplc_mode,
|
|
+ * Bit [23]:cr_lro_l4_ctrl_psh_en, Bits [28:26]:LRO_RING_RELINGUISH_REQ, Bits [31:29]:LRO_RING_RELINGUISH_DONE
|
|
+ */
|
|
+#define ADMA_LRO_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE+ADMA_LRO_CTRL_OFFSET+0x00)
|
|
+/* Bits [31:0]:LRO_CPU_REASON */
|
|
+#define ADMA_LRO_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE+ADMA_LRO_CTRL_OFFSET+0x04)
|
|
+/* Bits [31:0]:AUTO_LEARN_LRO_ELIGIBLE_THRESHOLD */
|
|
+#define ADMA_LRO_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE+ADMA_LRO_CTRL_OFFSET+0x08)
|
|
+/*
|
|
+ * Bits [7:0]:LRO_MAX_AGGREGATED_CNT, Bits [11:8]:LRO_VLAN_EN, Bits [13:12]:LRO_VLAN_VID_CMP_DEPTH,
|
|
+ * Bit [14]:ADMA_FW_RSTN_REQ, Bit [15]:ADMA_MODE, Bits [31:16]:LRO_MIN_RXD_SDL0
|
|
+ */
|
|
+#define ADMA_LRO_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE+ADMA_LRO_CTRL_OFFSET+0x0C)
|
|
+
|
|
+/* LRO RX delay interrupt configurations */
|
|
+#define LRO_RX1_DLY_INT (RALINK_FRAME_ENGINE_BASE+0x0a70)
|
|
+#define LRO_RX2_DLY_INT (RALINK_FRAME_ENGINE_BASE+0x0a74)
|
|
+#define LRO_RX3_DLY_INT (RALINK_FRAME_ENGINE_BASE+0x0a78)
|
|
+
|
|
+/* LRO auto-learn configurations */
|
|
+#define PDMA_LRO_ATL_OVERFLOW_ADJ_OFFSET 0x0990
|
|
+#define PDMA_LRO_ATL_OVERFLOW_ADJ (RALINK_FRAME_ENGINE_BASE+PDMA_LRO_ATL_OVERFLOW_ADJ_OFFSET)
|
|
+#define LRO_ALT_SCORE_DELTA (RALINK_FRAME_ENGINE_BASE+0x0a4c)
|
|
+
|
|
+/* LRO agg timer configurations */
|
|
+#define LRO_MAX_AGG_TIME (RALINK_FRAME_ENGINE_BASE+0x0a5c)
|
|
+
|
|
+/* LRO configurations of RX ring #0 */
|
|
+#define LRO_RXRING0_OFFSET 0x0b00
|
|
+#define LRO_RX_RING0_DIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x04)
|
|
+#define LRO_RX_RING0_DIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x08)
|
|
+#define LRO_RX_RING0_DIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x0C)
|
|
+#define LRO_RX_RING0_DIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x10)
|
|
+#define LRO_RX_RING0_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x28)
|
|
+/* Bit [8]:RING0_VLD, Bit [9]:RING0_MYIP_VLD */
|
|
+#define LRO_RX_RING0_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x2C)
|
|
+#define LRO_RX_RING0_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING0_OFFSET+0x30)
|
|
+/* LRO configurations of RX ring #1 */
|
|
+#define LRO_RXRING1_OFFSET 0x0b40
|
|
+#define LRO_RX_RING1_STP_DTP_DW (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x00)
|
|
+#define LRO_RX_RING1_DIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x04)
|
|
+#define LRO_RX_RING1_DIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x08)
|
|
+#define LRO_RX_RING1_DIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x0C)
|
|
+#define LRO_RX_RING1_DIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x10)
|
|
+#define LRO_RX_RING1_SIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x14)
|
|
+#define LRO_RX_RING1_SIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x18)
|
|
+#define LRO_RX_RING1_SIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x1C)
|
|
+#define LRO_RX_RING1_SIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x20)
|
|
+#define LRO_RX_RING1_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x24)
|
|
+#define LRO_RX_RING1_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x28)
|
|
+#define LRO_RX_RING1_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x2C)
|
|
+#define LRO_RX_RING1_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING1_OFFSET+0x30)
|
|
+#define LRO_RXRING2_OFFSET 0x0b80
|
|
+#define LRO_RX_RING2_STP_DTP_DW (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x00)
|
|
+#define LRO_RX_RING2_DIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x04)
|
|
+#define LRO_RX_RING2_DIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x08)
|
|
+#define LRO_RX_RING2_DIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x0C)
|
|
+#define LRO_RX_RING2_DIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x10)
|
|
+#define LRO_RX_RING2_SIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x14)
|
|
+#define LRO_RX_RING2_SIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x18)
|
|
+#define LRO_RX_RING2_SIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x1C)
|
|
+#define LRO_RX_RING2_SIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x20)
|
|
+#define LRO_RX_RING2_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x24)
|
|
+#define LRO_RX_RING2_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x28)
|
|
+#define LRO_RX_RING2_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x2C)
|
|
+#define LRO_RX_RING2_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING2_OFFSET+0x30)
|
|
+#define LRO_RXRING3_OFFSET 0x0bc0
|
|
+#define LRO_RX_RING3_STP_DTP_DW (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x00)
|
|
+#define LRO_RX_RING3_DIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x04)
|
|
+#define LRO_RX_RING3_DIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x08)
|
|
+#define LRO_RX_RING3_DIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x0C)
|
|
+#define LRO_RX_RING3_DIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x10)
|
|
+#define LRO_RX_RING3_SIP_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x14)
|
|
+#define LRO_RX_RING3_SIP_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x18)
|
|
+#define LRO_RX_RING3_SIP_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x1C)
|
|
+#define LRO_RX_RING3_SIP_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x20)
|
|
+#define LRO_RX_RING3_CTRL_DW0 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x24)
|
|
+#define LRO_RX_RING3_CTRL_DW1 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x28)
|
|
+#define LRO_RX_RING3_CTRL_DW2 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x2C)
|
|
+#define LRO_RX_RING3_CTRL_DW3 (RALINK_FRAME_ENGINE_BASE+LRO_RXRING3_OFFSET+0x30)
|
|
+
|
|
+/* LRO RX ring mode */
|
|
+#define PDMA_RX_NORMAL_MODE (0x0)
|
|
+#define PDMA_RX_PSE_MODE (0x1)
|
|
+#define PDMA_RX_FORCE_PORT (0x2)
|
|
+#define PDMA_RX_AUTO_LEARN (0x3)
|
|
+
|
|
+#define ADMA_RX_RING0 (0)
|
|
+#define ADMA_RX_RING1 (1)
|
|
+#define ADMA_RX_RING2 (2)
|
|
+#define ADMA_RX_RING3 (3)
|
|
+
|
|
+#define ADMA_RX_LEN0_MASK (0x3fff)
|
|
+#define ADMA_RX_LEN1_MASK (0x3)
|
|
+
|
|
+#define PDMA_LRO_EN BIT(0)
|
|
+#define PDMA_LRO_IPV6_EN BIT(1)
|
|
+#define PDMA_LRO_IPV4_CSUM_UPDATE_EN BIT(7)
|
|
+#define PDMA_LRO_IPV4_CTRL_PUSH_EN BIT(23)
|
|
+#define PDMA_LRO_RXD_PREFETCH_EN BITS(3,4)
|
|
+#define PDMA_NON_LRO_MULTI_EN BIT(2)
|
|
+#define PDMA_LRO_DLY_INT_EN BIT(5)
|
|
+#define PDMA_LRO_FUSH_REQ BITS(26,28)
|
|
+#define PDMA_LRO_RELINGUISH BITS(29,31)
|
|
+#define PDMA_LRO_FREQ_PRI_ADJ BITS(16,19)
|
|
+#define PDMA_LRO_TPUT_PRE_ADJ BITS(8,11)
|
|
+#define PDMA_LRO_TPUT_PRI_ADJ BITS(12,15)
|
|
+#define PDMA_LRO_ALT_SCORE_MODE BIT(21)
|
|
+#define PDMA_LRO_RING_AGE1 BITS(22,31)
|
|
+#define PDMA_LRO_RING_AGE2 BITS(0,5)
|
|
+#define PDMA_LRO_RING_AGG BITS(10,25)
|
|
+#define PDMA_LRO_RING_AGG_CNT1 BITS(26,31)
|
|
+#define PDMA_LRO_RING_AGG_CNT2 BITS(0,1)
|
|
+#define PDMA_LRO_ALT_TICK_TIMER BITS(16,20)
|
|
+#define PDMA_LRO_LRO_MIN_RXD_SDL0 BITS(16,31)
|
|
+
|
|
+#define PDMA_LRO_DLY_INT_EN_OFFSET (5)
|
|
+#define PDMA_LRO_TPUT_PRE_ADJ_OFFSET (8)
|
|
+#define PDMA_LRO_FREQ_PRI_ADJ_OFFSET (16)
|
|
+#define PDMA_LRO_LRO_MIN_RXD_SDL0_OFFSET (16)
|
|
+#define PDMA_LRO_TPUT_PRI_ADJ_OFFSET (12)
|
|
+#define PDMA_LRO_ALT_SCORE_MODE_OFFSET (21)
|
|
+#define PDMA_LRO_FUSH_REQ_OFFSET (26)
|
|
+#define PDMA_NON_LRO_MULTI_EN_OFFSET (2)
|
|
+#define PDMA_LRO_IPV6_EN_OFFSET (1)
|
|
+#define PDMA_LRO_RXD_PREFETCH_EN_OFFSET (3)
|
|
+#define PDMA_LRO_IPV4_CSUM_UPDATE_EN_OFFSET (7)
|
|
+#define PDMA_LRO_IPV4_CTRL_PUSH_EN_OFFSET (23)
|
|
+#define PDMA_LRO_ALT_TICK_TIMER_OFFSET (16)
|
|
+
|
|
+#define PDMA_LRO_TPUT_OVERFLOW_ADJ BITS(12,31)
|
|
+#define PDMA_LRO_CNT_OVERFLOW_ADJ BITS(0,11)
|
|
+
|
|
+#define PDMA_LRO_TPUT_OVERFLOW_ADJ_OFFSET (12)
|
|
+#define PDMA_LRO_CNT_OVERFLOW_ADJ_OFFSET (0)
|
|
+
|
|
+#define PDMA_LRO_ALT_BYTE_CNT_MODE (0)
|
|
+#define PDMA_LRO_ALT_PKT_CNT_MODE (1)
|
|
+
|
|
+/* LRO_RX_RING1_CTRL_DW1 offsets */
|
|
+#define PDMA_LRO_AGE_H_OFFSET (10)
|
|
+#define PDMA_LRO_RING_AGE1_OFFSET (22)
|
|
+#define PDMA_LRO_RING_AGG_CNT1_OFFSET (26)
|
|
+/* LRO_RX_RING1_CTRL_DW2 offsets */
|
|
+#define PDMA_RX_MODE_OFFSET (6)
|
|
+#define PDMA_RX_PORT_VALID_OFFSET (8)
|
|
+#define PDMA_RX_MYIP_VALID_OFFSET (9)
|
|
+#define PDMA_LRO_RING_AGE2_OFFSET (0)
|
|
+#define PDMA_LRO_RING_AGG_OFFSET (10)
|
|
+#define PDMA_LRO_RING_AGG_CNT2_OFFSET (0)
|
|
+/* LRO_RX_RING1_CTRL_DW3 offsets */
|
|
+#define PDMA_LRO_AGG_CNT_H_OFFSET (6)
|
|
+/* LRO_RX_RING1_STP_DTP_DW offsets */
|
|
+#define PDMA_RX_TCP_SRC_PORT_OFFSET (16)
|
|
+#define PDMA_RX_TCP_DEST_PORT_OFFSET (0)
|
|
+/* LRO_RX_RING1_CTRL_DW0 offsets */
|
|
+#define PDMA_RX_IPV4_FORCE_OFFSET (1)
|
|
+#define PDMA_RX_IPV6_FORCE_OFFSET (0)
|
|
+
|
|
+#define SET_ADMA_RX_LEN0(x) ((x)&ADMA_RX_LEN0_MASK)
|
|
+#define SET_ADMA_RX_LEN1(x) ((x)&ADMA_RX_LEN1_MASK)
|
|
+
|
|
+#define SET_PDMA_LRO_MAX_AGG_CNT(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW3; \
|
|
+ *addr &= ~0xff; \
|
|
+ *addr |= ((x) & 0xff); \
|
|
+ }
|
|
+#define SET_PDMA_LRO_FLUSH_REQ(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
|
|
+ *addr &= ~PDMA_LRO_FUSH_REQ; \
|
|
+ *addr |= ((x) & 0x7)<<PDMA_LRO_FUSH_REQ_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_LRO_IPV6_EN(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
|
|
+ *addr &= ~PDMA_LRO_IPV6_EN; \
|
|
+ *addr |= ((x) & 0x1)<<PDMA_LRO_IPV6_EN_OFFSET; \
|
|
+ }
|
|
+#if defined(CONFIG_RAETH_HW_LRO_PREFETCH)
|
|
+#define SET_PDMA_LRO_RXD_PREFETCH_EN(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
|
|
+ *addr &= ~PDMA_LRO_RXD_PREFETCH_EN; \
|
|
+ *addr |= ((x) & 0x3)<<PDMA_LRO_RXD_PREFETCH_EN_OFFSET; \
|
|
+ }
|
|
+#else
|
|
+#define SET_PDMA_LRO_RXD_PREFETCH_EN(x)
|
|
+#endif
|
|
+#define SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
|
|
+ *addr &= ~PDMA_LRO_IPV4_CSUM_UPDATE_EN; \
|
|
+ *addr |= ((x) & 0x1)<<PDMA_LRO_IPV4_CSUM_UPDATE_EN_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_LRO_IPV4_CTRL_PUSH_EN(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
|
|
+ *addr &= ~PDMA_LRO_IPV4_CTRL_PUSH_EN; \
|
|
+ *addr |= ((x) & 0x1)<<PDMA_LRO_IPV4_CTRL_PUSH_EN_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_NON_LRO_MULTI_EN(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
|
|
+ *addr &= ~(PDMA_NON_LRO_MULTI_EN); \
|
|
+ *addr |= ((x) & 0x1)<<PDMA_NON_LRO_MULTI_EN_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_LRO_FREQ_PRI_ADJ(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
|
|
+ *addr &= ~PDMA_LRO_FREQ_PRI_ADJ; \
|
|
+ *addr |= ((x) & 0xf)<<PDMA_LRO_FREQ_PRI_ADJ_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_LRO_TPUT_PRE_ADJ(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
|
|
+ *addr &= ~PDMA_LRO_TPUT_PRE_ADJ; \
|
|
+ *addr |= ((x) & 0xf)<<PDMA_LRO_TPUT_PRE_ADJ_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_LRO_TPUT_PRI_ADJ(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
|
|
+ *addr &= ~PDMA_LRO_TPUT_PRI_ADJ; \
|
|
+ *addr |= ((x) & 0xf)<<PDMA_LRO_TPUT_PRI_ADJ_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_LRO_ALT_SCORE_MODE(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
|
|
+ *addr &= ~PDMA_LRO_ALT_SCORE_MODE; \
|
|
+ *addr |= ((x) & 0x1)<<PDMA_LRO_ALT_SCORE_MODE_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_LRO_DLY_INT_EN(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW0; \
|
|
+ *addr &= ~PDMA_LRO_DLY_INT_EN; \
|
|
+ *addr |= ((x) & 0x1)<<PDMA_LRO_DLY_INT_EN_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_LRO_BW_THRESHOLD(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW2; \
|
|
+ *addr = (x); \
|
|
+ }
|
|
+#define SET_PDMA_LRO_MIN_RXD_SDL(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)ADMA_LRO_CTRL_DW3; \
|
|
+ *addr &= ~PDMA_LRO_LRO_MIN_RXD_SDL0; \
|
|
+ *addr |= ((x) & 0xffff)<<PDMA_LRO_LRO_MIN_RXD_SDL0_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_LRO_TPUT_OVERFLOW_ADJ(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)PDMA_LRO_ATL_OVERFLOW_ADJ; \
|
|
+ *addr &= ~PDMA_LRO_TPUT_OVERFLOW_ADJ; \
|
|
+ *addr |= ((x) & 0xfffff)<<PDMA_LRO_TPUT_OVERFLOW_ADJ_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_LRO_CNT_OVERFLOW_ADJ(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)PDMA_LRO_ATL_OVERFLOW_ADJ; \
|
|
+ *addr &= ~PDMA_LRO_CNT_OVERFLOW_ADJ; \
|
|
+ *addr |= ((x) & 0xfff)<<PDMA_LRO_CNT_OVERFLOW_ADJ_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)LRO_ALT_REFRESH_TIMER; \
|
|
+ *addr &= ~PDMA_LRO_ALT_TICK_TIMER; \
|
|
+ *addr |= ((x) & 0x1f)<<PDMA_LRO_ALT_TICK_TIMER_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_LRO_ALT_REFRESH_TIMER(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)LRO_ALT_REFRESH_TIMER; \
|
|
+ *addr &= ~0xffff; \
|
|
+ *addr |= ((x) & 0xffff); \
|
|
+ }
|
|
+#define SET_PDMA_LRO_MAX_AGG_TIME(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)LRO_MAX_AGG_TIME; \
|
|
+ *addr &= ~0xffff; \
|
|
+ *addr |= ((x) & 0xffff); \
|
|
+ }
|
|
+#define SET_PDMA_RXRING_MODE(x,y) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
|
|
+ *addr &= ~(0x3<<PDMA_RX_MODE_OFFSET); \
|
|
+ *addr |= (y)<<PDMA_RX_MODE_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_RXRING_MYIP_VALID(x,y) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
|
|
+ *addr &= ~(0x1<<PDMA_RX_MYIP_VALID_OFFSET); \
|
|
+ *addr |= ((y)&0x1)<<PDMA_RX_MYIP_VALID_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_RXRING_VALID(x,y) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
|
|
+ *addr &= ~(0x1<<PDMA_RX_PORT_VALID_OFFSET); \
|
|
+ *addr |= ((y)&0x1)<<PDMA_RX_PORT_VALID_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_RXRING_TCP_SRC_PORT(x,y) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING1_STP_DTP_DW + (((x)-1) << 6)); \
|
|
+ *addr &= ~(0xffff<<PDMA_RX_TCP_SRC_PORT_OFFSET); \
|
|
+ *addr |= (y)<<PDMA_RX_TCP_SRC_PORT_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_RXRING_TCP_DEST_PORT(x,y) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING1_STP_DTP_DW + (((x)-1) << 6)); \
|
|
+ *addr &= ~(0xffff<<PDMA_RX_TCP_DEST_PORT_OFFSET); \
|
|
+ *addr |= (y)<<PDMA_RX_TCP_DEST_PORT_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_RXRING_IPV4_FORCE_MODE(x,y) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING1_CTRL_DW0 + (((x)-1) << 6)); \
|
|
+ *addr &= ~(0x1<<PDMA_RX_IPV4_FORCE_OFFSET); \
|
|
+ *addr |= (y)<<PDMA_RX_IPV4_FORCE_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_RXRING_IPV6_FORCE_MODE(x,y) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING1_CTRL_DW0 + (((x)-1) << 6)); \
|
|
+ *addr &= ~(0x1<<PDMA_RX_IPV6_FORCE_OFFSET); \
|
|
+ *addr |= (y)<<PDMA_RX_IPV6_FORCE_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_RXRING_AGE_TIME(x,y) \
|
|
+ { volatile unsigned int *addr1 = (unsigned int*)(LRO_RX_RING0_CTRL_DW1 + ((x) << 6)); \
|
|
+ volatile unsigned int *addr2 = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
|
|
+ *addr1 &= ~PDMA_LRO_RING_AGE1; \
|
|
+ *addr2 &= ~PDMA_LRO_RING_AGE2; \
|
|
+ *addr1 |= ((y) & 0x3ff)<<PDMA_LRO_RING_AGE1_OFFSET; \
|
|
+ *addr2 |= (((y)>>PDMA_LRO_AGE_H_OFFSET) & 0x03f)<<PDMA_LRO_RING_AGE2_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_RXRING_AGG_TIME(x,y) \
|
|
+ { volatile unsigned int *addr = (unsigned int*)(LRO_RX_RING0_CTRL_DW2 + ((x) << 6)); \
|
|
+ *addr &= ~PDMA_LRO_RING_AGG; \
|
|
+ *addr |= ((y) & 0xffff)<<PDMA_LRO_RING_AGG_OFFSET; \
|
|
+ }
|
|
+#define SET_PDMA_RXRING_MAX_AGG_CNT(x,y) \
|
|
+ { volatile unsigned int *addr1 = (unsigned int*)(LRO_RX_RING1_CTRL_DW2 + (((x)-1) << 6)); \
|
|
+ volatile unsigned int *addr2 = (unsigned int*)(LRO_RX_RING1_CTRL_DW3 + (((x)-1) << 6)); \
|
|
+ *addr1 &= ~PDMA_LRO_RING_AGG_CNT1; \
|
|
+ *addr2 &= ~PDMA_LRO_RING_AGG_CNT2; \
|
|
+ *addr1 |= ((y) & 0x3f)<<PDMA_LRO_RING_AGG_CNT1_OFFSET; \
|
|
+ *addr2 |= (((y)>>PDMA_LRO_AGG_CNT_H_OFFSET) & 0x03)<<PDMA_LRO_RING_AGG_CNT2_OFFSET; \
|
|
+ }
|
|
+
|
|
+typedef struct _PDMA_LRO_AUTO_TLB_INFO0_ PDMA_LRO_AUTO_TLB_INFO0_T;
|
|
+typedef struct _PDMA_LRO_AUTO_TLB_INFO1_ PDMA_LRO_AUTO_TLB_INFO1_T;
|
|
+typedef struct _PDMA_LRO_AUTO_TLB_INFO2_ PDMA_LRO_AUTO_TLB_INFO2_T;
|
|
+typedef struct _PDMA_LRO_AUTO_TLB_INFO3_ PDMA_LRO_AUTO_TLB_INFO3_T;
|
|
+typedef struct _PDMA_LRO_AUTO_TLB_INFO4_ PDMA_LRO_AUTO_TLB_INFO4_T;
|
|
+typedef struct _PDMA_LRO_AUTO_TLB_INFO5_ PDMA_LRO_AUTO_TLB_INFO5_T;
|
|
+typedef struct _PDMA_LRO_AUTO_TLB_INFO6_ PDMA_LRO_AUTO_TLB_INFO6_T;
|
|
+typedef struct _PDMA_LRO_AUTO_TLB_INFO7_ PDMA_LRO_AUTO_TLB_INFO7_T;
|
|
+typedef struct _PDMA_LRO_AUTO_TLB_INFO8_ PDMA_LRO_AUTO_TLB_INFO8_T;
|
|
+
|
|
+struct _PDMA_LRO_AUTO_TLB_INFO0_
|
|
+{
|
|
+ unsigned int DTP : 16;
|
|
+ unsigned int STP : 16;
|
|
+};
|
|
+struct _PDMA_LRO_AUTO_TLB_INFO1_
|
|
+{
|
|
+ unsigned int SIP0 : 32;
|
|
+};
|
|
+struct _PDMA_LRO_AUTO_TLB_INFO2_
|
|
+{
|
|
+ unsigned int SIP1 : 32;
|
|
+};
|
|
+struct _PDMA_LRO_AUTO_TLB_INFO3_
|
|
+{
|
|
+ unsigned int SIP2 : 32;
|
|
+};
|
|
+struct _PDMA_LRO_AUTO_TLB_INFO4_
|
|
+{
|
|
+ unsigned int SIP3 : 32;
|
|
+};
|
|
+struct _PDMA_LRO_AUTO_TLB_INFO5_
|
|
+{
|
|
+ unsigned int VLAN_VID0 : 32;
|
|
+};
|
|
+struct _PDMA_LRO_AUTO_TLB_INFO6_
|
|
+{
|
|
+ unsigned int VLAN_VID1 : 16;
|
|
+ unsigned int VLAN_VID_VLD : 4;
|
|
+ unsigned int CNT : 12;
|
|
+};
|
|
+struct _PDMA_LRO_AUTO_TLB_INFO7_
|
|
+{
|
|
+ unsigned int DW_LEN : 32;
|
|
+};
|
|
+struct _PDMA_LRO_AUTO_TLB_INFO8_
|
|
+{
|
|
+ unsigned int DIP_ID : 2;
|
|
+ unsigned int IPV6 : 1;
|
|
+ unsigned int IPV4 : 1;
|
|
+ unsigned int RESV : 27;
|
|
+ unsigned int VALID : 1;
|
|
+};
|
|
+struct PDMA_LRO_AUTO_TLB_INFO {
|
|
+ PDMA_LRO_AUTO_TLB_INFO0_T auto_tlb_info0;
|
|
+ PDMA_LRO_AUTO_TLB_INFO1_T auto_tlb_info1;
|
|
+ PDMA_LRO_AUTO_TLB_INFO2_T auto_tlb_info2;
|
|
+ PDMA_LRO_AUTO_TLB_INFO3_T auto_tlb_info3;
|
|
+ PDMA_LRO_AUTO_TLB_INFO4_T auto_tlb_info4;
|
|
+ PDMA_LRO_AUTO_TLB_INFO5_T auto_tlb_info5;
|
|
+ PDMA_LRO_AUTO_TLB_INFO6_T auto_tlb_info6;
|
|
+ PDMA_LRO_AUTO_TLB_INFO7_T auto_tlb_info7;
|
|
+ PDMA_LRO_AUTO_TLB_INFO8_T auto_tlb_info8;
|
|
+};
|
|
+
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+#define VQTX_TB_BASE0 (ETHDMASYS_FRAME_ENGINE_BASE + 0x1980)
|
|
+#define VQTX_TB_BASE1 (ETHDMASYS_FRAME_ENGINE_BASE + 0x1984)
|
|
+#define VQTX_TB_BASE2 (ETHDMASYS_FRAME_ENGINE_BASE + 0x1988)
|
|
+#define VQTX_TB_BASE3 (ETHDMASYS_FRAME_ENGINE_BASE + 0x198C)
|
|
+#define SFQ_OFFSET 0x1A80
|
|
+#define VQTX_GLO (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET)
|
|
+#define VQTX_INVLD_PTR (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x0C)
|
|
+#define VQTX_NUM (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x10)
|
|
+#define VQTX_SCH (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x18)
|
|
+#define VQTX_HASH_CFG (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x20)
|
|
+#define VQTX_HASH_SD (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x24)
|
|
+#define VQTX_VLD_CFG (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x30)
|
|
+#define VQTX_MIB_IF (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x3C)
|
|
+#define VQTX_MIB_PCNT (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x40)
|
|
+#define VQTX_MIB_BCNT0 (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x44)
|
|
+#define VQTX_MIB_BCNT1 (ETHDMASYS_FRAME_ENGINE_BASE + SFQ_OFFSET + 0x48)
|
|
+
|
|
+#define VQTX_MIB_EN (1<<17)
|
|
+#define VQTX_NUM_0 (4<<0)
|
|
+#define VQTX_NUM_1 (4<<4)
|
|
+#define VQTX_NUM_2 (4<<8)
|
|
+#define VQTX_NUM_3 (4<<12)
|
|
+
|
|
+/*=========================================
|
|
+ SFQ Table Format define
|
|
+=========================================*/
|
|
+typedef struct _SFQ_INFO1_ SFQ_INFO1_T;
|
|
+
|
|
+struct _SFQ_INFO1_
|
|
+{
|
|
+ unsigned int VQHPTR;
|
|
+};
|
|
+//-------------------------------------------------
|
|
+typedef struct _SFQ_INFO2_ SFQ_INFO2_T;
|
|
+
|
|
+struct _SFQ_INFO2_
|
|
+{
|
|
+ unsigned int VQTPTR;
|
|
+};
|
|
+//-------------------------------------------------
|
|
+typedef struct _SFQ_INFO3_ SFQ_INFO3_T;
|
|
+
|
|
+struct _SFQ_INFO3_
|
|
+{
|
|
+ unsigned int QUE_DEPTH:16;
|
|
+ unsigned int DEFICIT_CNT:16;
|
|
+};
|
|
+//-------------------------------------------------
|
|
+typedef struct _SFQ_INFO4_ SFQ_INFO4_T;
|
|
+
|
|
+struct _SFQ_INFO4_
|
|
+{
|
|
+ unsigned int RESV;
|
|
+};
|
|
+//-------------------------------------------------
|
|
+
|
|
+typedef struct _SFQ_INFO5_ SFQ_INFO5_T;
|
|
+
|
|
+struct _SFQ_INFO5_
|
|
+{
|
|
+ unsigned int PKT_CNT;
|
|
+};
|
|
+//-------------------------------------------------
|
|
+
|
|
+typedef struct _SFQ_INFO6_ SFQ_INFO6_T;
|
|
+
|
|
+struct _SFQ_INFO6_
|
|
+{
|
|
+ unsigned int BYTE_CNT;
|
|
+};
|
|
+//-------------------------------------------------
|
|
+
|
|
+typedef struct _SFQ_INFO7_ SFQ_INFO7_T;
|
|
+
|
|
+struct _SFQ_INFO7_
|
|
+{
|
|
+ unsigned int BYTE_CNT;
|
|
+};
|
|
+//-------------------------------------------------
|
|
+
|
|
+typedef struct _SFQ_INFO8_ SFQ_INFO8_T;
|
|
+
|
|
+struct _SFQ_INFO8_
|
|
+{
|
|
+ unsigned int RESV;
|
|
+};
|
|
+
|
|
+
|
|
+struct SFQ_table {
|
|
+ SFQ_INFO1_T sfq_info1;
|
|
+ SFQ_INFO2_T sfq_info2;
|
|
+ SFQ_INFO3_T sfq_info3;
|
|
+ SFQ_INFO4_T sfq_info4;
|
|
+ SFQ_INFO5_T sfq_info5;
|
|
+ SFQ_INFO6_T sfq_info6;
|
|
+ SFQ_INFO7_T sfq_info7;
|
|
+ SFQ_INFO8_T sfq_info8;
|
|
+
|
|
+};
|
|
+#endif
|
|
+#if defined (CONFIG_RAETH_HW_LRO) || defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+#define FE_GDM_RXID1_OFFSET (0x0130)
|
|
+#define FE_GDM_RXID1 (RALINK_FRAME_ENGINE_BASE+FE_GDM_RXID1_OFFSET)
|
|
+#define GDM_VLAN_PRI7_RXID_SEL BITS(30,31)
|
|
+#define GDM_VLAN_PRI6_RXID_SEL BITS(28,29)
|
|
+#define GDM_VLAN_PRI5_RXID_SEL BITS(26,27)
|
|
+#define GDM_VLAN_PRI4_RXID_SEL BITS(24,25)
|
|
+#define GDM_VLAN_PRI3_RXID_SEL BITS(22,23)
|
|
+#define GDM_VLAN_PRI2_RXID_SEL BITS(20,21)
|
|
+#define GDM_VLAN_PRI1_RXID_SEL BITS(18,19)
|
|
+#define GDM_VLAN_PRI0_RXID_SEL BITS(16,17)
|
|
+#define GDM_TCP_ACK_RXID_SEL BITS(4,5)
|
|
+#define GDM_TCP_ACK_WZPC BIT(3)
|
|
+#define GDM_RXID_PRI_SEL BITS(0,2)
|
|
+
|
|
+#define FE_GDM_RXID2_OFFSET (0x0134)
|
|
+#define FE_GDM_RXID2 (RALINK_FRAME_ENGINE_BASE+FE_GDM_RXID2_OFFSET)
|
|
+#define GDM_STAG7_RXID_SEL BITS(30,31)
|
|
+#define GDM_STAG6_RXID_SEL BITS(28,29)
|
|
+#define GDM_STAG5_RXID_SEL BITS(26,27)
|
|
+#define GDM_STAG4_RXID_SEL BITS(24,25)
|
|
+#define GDM_STAG3_RXID_SEL BITS(22,23)
|
|
+#define GDM_STAG2_RXID_SEL BITS(20,21)
|
|
+#define GDM_STAG1_RXID_SEL BITS(18,19)
|
|
+#define GDM_STAG0_RXID_SEL BITS(16,17)
|
|
+#define GDM_PID2_RXID_SEL BITS(2,3)
|
|
+#define GDM_PID1_RXID_SEL BITS(0,1)
|
|
+
|
|
+#define GDM_PRI_PID (0)
|
|
+#define GDM_PRI_VLAN_PID (1)
|
|
+#define GDM_PRI_ACK_PID (2)
|
|
+#define GDM_PRI_VLAN_ACK_PID (3)
|
|
+#define GDM_PRI_ACK_VLAN_PID (4)
|
|
+
|
|
+#define SET_GDM_VLAN_PRI_RXID_SEL(x,y) \
|
|
+ { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID1; \
|
|
+ *addr &= ~(0x03 << (((x) << 1)+16)); \
|
|
+ *addr |= ((y) & 0x3) << (((x) << 1)+16); \
|
|
+ }
|
|
+#define SET_GDM_TCP_ACK_RXID_SEL(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID1; \
|
|
+ *addr &= ~(GDM_TCP_ACK_RXID_SEL); \
|
|
+ *addr |= ((x) & 0x3) << 4; \
|
|
+ }
|
|
+#define SET_GDM_TCP_ACK_WZPC(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID1; \
|
|
+ *addr &= ~(GDM_TCP_ACK_WZPC); \
|
|
+ *addr |= ((x) & 0x1) << 3; \
|
|
+ }
|
|
+#define SET_GDM_RXID_PRI_SEL(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID1; \
|
|
+ *addr &= ~(GDM_RXID_PRI_SEL); \
|
|
+ *addr |= (x) & 0x7; \
|
|
+ }
|
|
+#define GDM_STAG_RXID_SEL(x,y) \
|
|
+ { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID2; \
|
|
+ *addr &= ~(0x03 << (((x) << 1)+16)); \
|
|
+ *addr |= ((y) & 0x3) << (((x) << 1)+16); \
|
|
+ }
|
|
+#define SET_GDM_PID2_RXID_SEL(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID2; \
|
|
+ *addr &= ~(GDM_PID2_RXID_SEL); \
|
|
+ *addr |= ((x) & 0x3) << 2; \
|
|
+ }
|
|
+#define SET_GDM_PID1_RXID_SEL(x) \
|
|
+ { volatile unsigned int *addr = (unsigned int *)FE_GDM_RXID2; \
|
|
+ *addr &= ~(GDM_PID1_RXID_SEL); \
|
|
+ *addr |= ((x) & 0x3); \
|
|
+ }
|
|
+#endif /* CONFIG_RAETH_MULTIPLE_RX_RING */
|
|
+/* Per Port Packet Counts in RT3052, added by bobtseng 2009.4.17. */
|
|
+#define PORT0_PKCOUNT (0xb01100e8)
|
|
+#define PORT1_PKCOUNT (0xb01100ec)
|
|
+#define PORT2_PKCOUNT (0xb01100f0)
|
|
+#define PORT3_PKCOUNT (0xb01100f4)
|
|
+#define PORT4_PKCOUNT (0xb01100f8)
|
|
+#define PORT5_PKCOUNT (0xb01100fc)
|
|
+
|
|
+#if defined (CONFIG_ARCH_MT7623)
|
|
+#include "sync_write.h"
|
|
+#define sysRegRead(phys) (*(volatile unsigned int *)((phys)))
|
|
+#define sysRegWrite(phys, val) mt65xx_reg_sync_writel((val), (phys))
|
|
+#else
|
|
+#define PHYS_TO_K1(physaddr) KSEG1ADDR(physaddr)
|
|
+#define sysRegRead(phys) (*(volatile unsigned int *)PHYS_TO_K1(phys))
|
|
+#define sysRegWrite(phys, val) ((*(volatile unsigned int *)PHYS_TO_K1(phys)) = (val))
|
|
+#endif
|
|
+
|
|
+#define u_long unsigned long
|
|
+#define u32 unsigned int
|
|
+#define u16 unsigned short
|
|
+
|
|
+
|
|
+/* ====================================== */
|
|
+#define GDM1_DISPAD BIT(18)
|
|
+#define GDM1_DISCRC BIT(17)
|
|
+
|
|
+//GDMA1 uni-cast frames destination port
|
|
+#define GDM1_ICS_EN (0x1 << 22)
|
|
+#define GDM1_TCS_EN (0x1 << 21)
|
|
+#define GDM1_UCS_EN (0x1 << 20)
|
|
+#define GDM1_JMB_EN (0x1 << 19)
|
|
+#define GDM1_STRPCRC (0x1 << 16)
|
|
+#define GDM1_UFRC_P_CPU (0 << 12)
|
|
+#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+#define GDM1_UFRC_P_PPE (4 << 12)
|
|
+#else
|
|
+#define GDM1_UFRC_P_PPE (6 << 12)
|
|
+#endif
|
|
+
|
|
+//GDMA1 broad-cast MAC address frames
|
|
+#define GDM1_BFRC_P_CPU (0 << 8)
|
|
+#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+#define GDM1_BFRC_P_PPE (4 << 8)
|
|
+#else
|
|
+#define GDM1_BFRC_P_PPE (6 << 8)
|
|
+#endif
|
|
+
|
|
+//GDMA1 multi-cast MAC address frames
|
|
+#define GDM1_MFRC_P_CPU (0 << 4)
|
|
+#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+#define GDM1_MFRC_P_PPE (4 << 4)
|
|
+#else
|
|
+#define GDM1_MFRC_P_PPE (6 << 4)
|
|
+#endif
|
|
+
|
|
+//GDMA1 other MAC address frames destination port
|
|
+#define GDM1_OFRC_P_CPU (0 << 0)
|
|
+#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+#define GDM1_OFRC_P_PPE (4 << 0)
|
|
+#else
|
|
+#define GDM1_OFRC_P_PPE (6 << 0)
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT6856) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+/* checksum generator registers are removed */
|
|
+#define ICS_GEN_EN (0 << 2)
|
|
+#define UCS_GEN_EN (0 << 1)
|
|
+#define TCS_GEN_EN (0 << 0)
|
|
+#else
|
|
+#define ICS_GEN_EN (1 << 2)
|
|
+#define UCS_GEN_EN (1 << 1)
|
|
+#define TCS_GEN_EN (1 << 0)
|
|
+#endif
|
|
+
|
|
+// MDIO_CFG bit
|
|
+#define MDIO_CFG_GP1_FC_TX (1 << 11)
|
|
+#define MDIO_CFG_GP1_FC_RX (1 << 10)
|
|
+
|
|
+/* ====================================== */
|
|
+/* ====================================== */
|
|
+#define GP1_LNK_DWN BIT(9)
|
|
+#define GP1_AN_FAIL BIT(8)
|
|
+/* ====================================== */
|
|
+/* ====================================== */
|
|
+#define PSE_RESET BIT(0)
|
|
+/* ====================================== */
|
|
+#define PST_DRX_IDX3 BIT(19)
|
|
+#define PST_DRX_IDX2 BIT(18)
|
|
+#define PST_DRX_IDX1 BIT(17)
|
|
+#define PST_DRX_IDX0 BIT(16)
|
|
+#define PST_DTX_IDX3 BIT(3)
|
|
+#define PST_DTX_IDX2 BIT(2)
|
|
+#define PST_DTX_IDX1 BIT(1)
|
|
+#define PST_DTX_IDX0 BIT(0)
|
|
+
|
|
+#define RX_2B_OFFSET BIT(31)
|
|
+#define DESC_32B_EN BIT(8)
|
|
+#define TX_WB_DDONE BIT(6)
|
|
+#define RX_DMA_BUSY BIT(3)
|
|
+#define TX_DMA_BUSY BIT(1)
|
|
+#define RX_DMA_EN BIT(2)
|
|
+#define TX_DMA_EN BIT(0)
|
|
+
|
|
+#define PDMA_BT_SIZE_4DWORDS (0<<4)
|
|
+#define PDMA_BT_SIZE_8DWORDS (1<<4)
|
|
+#define PDMA_BT_SIZE_16DWORDS (2<<4)
|
|
+#define PDMA_BT_SIZE_32DWORDS (3<<4)
|
|
+
|
|
+#define ADMA_RX_BT_SIZE_4DWORDS (0<<11)
|
|
+#define ADMA_RX_BT_SIZE_8DWORDS (1<<11)
|
|
+#define ADMA_RX_BT_SIZE_16DWORDS (2<<11)
|
|
+#define ADMA_RX_BT_SIZE_32DWORDS (3<<11)
|
|
+
|
|
+/* Register bits.
|
|
+ */
|
|
+
|
|
+#define MACCFG_RXEN (1<<2)
|
|
+#define MACCFG_TXEN (1<<3)
|
|
+#define MACCFG_PROMISC (1<<18)
|
|
+#define MACCFG_RXMCAST (1<<19)
|
|
+#define MACCFG_FDUPLEX (1<<20)
|
|
+#define MACCFG_PORTSEL (1<<27)
|
|
+#define MACCFG_HBEATDIS (1<<28)
|
|
+
|
|
+
|
|
+#define DMACTL_SR (1<<1) /* Start/Stop Receive */
|
|
+#define DMACTL_ST (1<<13) /* Start/Stop Transmission Command */
|
|
+
|
|
+#define DMACFG_SWR (1<<0) /* Software Reset */
|
|
+#define DMACFG_BURST32 (32<<8)
|
|
+
|
|
+#define DMASTAT_TS 0x00700000 /* Transmit Process State */
|
|
+#define DMASTAT_RS 0x000e0000 /* Receive Process State */
|
|
+
|
|
+#define MACCFG_INIT 0 //(MACCFG_FDUPLEX) // | MACCFG_PORTSEL)
|
|
+
|
|
+
|
|
+
|
|
+/* Descriptor bits.
|
|
+ */
|
|
+#define R_OWN 0x80000000 /* Own Bit */
|
|
+#define RD_RER 0x02000000 /* Receive End Of Ring */
|
|
+#define RD_LS 0x00000100 /* Last Descriptor */
|
|
+#define RD_ES 0x00008000 /* Error Summary */
|
|
+#define RD_CHAIN 0x01000000 /* Chained */
|
|
+
|
|
+/* Word 0 */
|
|
+#define T_OWN 0x80000000 /* Own Bit */
|
|
+#define TD_ES 0x00008000 /* Error Summary */
|
|
+
|
|
+/* Word 1 */
|
|
+#define TD_LS 0x40000000 /* Last Segment */
|
|
+#define TD_FS 0x20000000 /* First Segment */
|
|
+#define TD_TER 0x08000000 /* Transmit End Of Ring */
|
|
+#define TD_CHAIN 0x01000000 /* Chained */
|
|
+
|
|
+
|
|
+#define TD_SET 0x08000000 /* Setup Packet */
|
|
+
|
|
+
|
|
+#define POLL_DEMAND 1
|
|
+
|
|
+#define RSTCTL (0x34)
|
|
+#define RSTCTL_RSTENET1 (1<<19)
|
|
+#define RSTCTL_RSTENET2 (1<<20)
|
|
+
|
|
+#define INIT_VALUE_OF_RT2883_PSE_FQ_CFG 0xff908000
|
|
+#define INIT_VALUE_OF_PSE_FQFC_CFG 0x80504000
|
|
+#define INIT_VALUE_OF_FORCE_100_FD 0x1001BC01
|
|
+#define INIT_VALUE_OF_FORCE_1000_FD 0x1F01DC01
|
|
+
|
|
+// Define Whole FE Reset Register
|
|
+#define RSTCTRL (RALINK_SYSCTL_BASE + 0x34)
|
|
+#define RT2880_AGPIOCFG_REG (RALINK_SYSCTL_BASE + 0x3C)
|
|
+
|
|
+/*=========================================
|
|
+ PDMA RX Descriptor Format define
|
|
+=========================================*/
|
|
+
|
|
+//-------------------------------------------------
|
|
+typedef struct _PDMA_RXD_INFO1_ PDMA_RXD_INFO1_T;
|
|
+
|
|
+struct _PDMA_RXD_INFO1_
|
|
+{
|
|
+ unsigned int PDP0;
|
|
+};
|
|
+//-------------------------------------------------
|
|
+typedef struct _PDMA_RXD_INFO2_ PDMA_RXD_INFO2_T;
|
|
+
|
|
+struct _PDMA_RXD_INFO2_
|
|
+{
|
|
+#if defined (CONFIG_ARCH_MT7623)
|
|
+ unsigned int PLEN1 : 2;
|
|
+ unsigned int LRO_AGG_CNT : 8;
|
|
+ unsigned int REV : 5;
|
|
+#else
|
|
+ unsigned int PLEN1 : 14;
|
|
+ unsigned int LS1 : 1;
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+ unsigned int TAG : 1;
|
|
+ unsigned int PLEN0 : 14;
|
|
+ unsigned int LS0 : 1;
|
|
+ unsigned int DDONE_bit : 1;
|
|
+};
|
|
+//-------------------------------------------------
|
|
+typedef struct _PDMA_RXD_INFO3_ PDMA_RXD_INFO3_T;
|
|
+
|
|
+struct _PDMA_RXD_INFO3_
|
|
+{
|
|
+ unsigned int VID:16;
|
|
+ unsigned int TPID:16;
|
|
+};
|
|
+//-------------------------------------------------
|
|
+typedef struct _PDMA_RXD_INFO4_ PDMA_RXD_INFO4_T;
|
|
+
|
|
+struct _PDMA_RXD_INFO4_
|
|
+{
|
|
+#if defined (CONFIG_RALINK_MT7620)
|
|
+ unsigned int FOE_Entry : 14;
|
|
+ unsigned int CRSN : 5;
|
|
+ unsigned int SPORT : 3;
|
|
+ unsigned int L4F : 1;
|
|
+ unsigned int L4VLD : 1;
|
|
+ unsigned int TACK : 1;
|
|
+ unsigned int IP4F : 1;
|
|
+ unsigned int IP4 : 1;
|
|
+ unsigned int IP6 : 1;
|
|
+ unsigned int UN_USE1 : 4;
|
|
+#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+ unsigned int FOE_Entry : 14;
|
|
+ unsigned int CRSN : 5;
|
|
+ unsigned int SP : 4;
|
|
+ unsigned int L4F : 1;
|
|
+ unsigned int L4VLD : 1;
|
|
+ unsigned int TACK : 1;
|
|
+ unsigned int IP4F : 1;
|
|
+ unsigned int IP4 : 1;
|
|
+ unsigned int IP6 : 1;
|
|
+ unsigned int UN_USE1 : 3;
|
|
+#else
|
|
+ unsigned int FOE_Entry : 14;
|
|
+ unsigned int FVLD : 1;
|
|
+ unsigned int UN_USE1 : 1;
|
|
+ unsigned int AI : 8;
|
|
+ unsigned int SP : 3;
|
|
+ unsigned int AIS : 1;
|
|
+ unsigned int L4F : 1;
|
|
+ unsigned int IPF : 1;
|
|
+ unsigned int L4FVLD_bit : 1;
|
|
+ unsigned int IPFVLD_bit : 1;
|
|
+#endif
|
|
+};
|
|
+
|
|
+
|
|
+struct PDMA_rxdesc {
|
|
+ PDMA_RXD_INFO1_T rxd_info1;
|
|
+ PDMA_RXD_INFO2_T rxd_info2;
|
|
+ PDMA_RXD_INFO3_T rxd_info3;
|
|
+ PDMA_RXD_INFO4_T rxd_info4;
|
|
+#ifdef CONFIG_32B_DESC
|
|
+ unsigned int rxd_info5;
|
|
+ unsigned int rxd_info6;
|
|
+ unsigned int rxd_info7;
|
|
+ unsigned int rxd_info8;
|
|
+#endif
|
|
+};
|
|
+
|
|
+/*=========================================
|
|
+ PDMA TX Descriptor Format define
|
|
+=========================================*/
|
|
+//-------------------------------------------------
|
|
+typedef struct _PDMA_TXD_INFO1_ PDMA_TXD_INFO1_T;
|
|
+
|
|
+struct _PDMA_TXD_INFO1_
|
|
+{
|
|
+ unsigned int SDP0;
|
|
+};
|
|
+//-------------------------------------------------
|
|
+typedef struct _PDMA_TXD_INFO2_ PDMA_TXD_INFO2_T;
|
|
+
|
|
+struct _PDMA_TXD_INFO2_
|
|
+{
|
|
+ unsigned int SDL1 : 14;
|
|
+ unsigned int LS1_bit : 1;
|
|
+ unsigned int BURST_bit : 1;
|
|
+ unsigned int SDL0 : 14;
|
|
+ unsigned int LS0_bit : 1;
|
|
+ unsigned int DDONE_bit : 1;
|
|
+};
|
|
+//-------------------------------------------------
|
|
+typedef struct _PDMA_TXD_INFO3_ PDMA_TXD_INFO3_T;
|
|
+
|
|
+struct _PDMA_TXD_INFO3_
|
|
+{
|
|
+ unsigned int SDP1;
|
|
+};
|
|
+//-------------------------------------------------
|
|
+typedef struct _PDMA_TXD_INFO4_ PDMA_TXD_INFO4_T;
|
|
+
|
|
+struct _PDMA_TXD_INFO4_
|
|
+{
|
|
+#if defined (CONFIG_RALINK_MT7620)
|
|
+ unsigned int VPRI_VIDX : 8;
|
|
+ unsigned int SIDX : 4;
|
|
+ unsigned int INSP : 1;
|
|
+ unsigned int RESV : 2;
|
|
+ unsigned int UDF : 5;
|
|
+ unsigned int FP_BMAP : 8;
|
|
+ unsigned int TSO : 1;
|
|
+ unsigned int TUI_CO : 3;
|
|
+#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+ unsigned int VLAN_TAG :17; // INSV(1)+VPRI(3)+CFI(1)+VID(12)
|
|
+ unsigned int RESV : 2;
|
|
+ unsigned int UDF : 6;
|
|
+ unsigned int FPORT : 3;
|
|
+ unsigned int TSO : 1;
|
|
+ unsigned int TUI_CO : 3;
|
|
+#else
|
|
+ unsigned int VPRI_VIDX : 8;
|
|
+ unsigned int SIDX : 4;
|
|
+ unsigned int INSP : 1;
|
|
+ unsigned int RESV : 1;
|
|
+ unsigned int UN_USE3 : 2;
|
|
+ unsigned int QN : 3;
|
|
+ unsigned int UN_USE2 : 1;
|
|
+ unsigned int UDF : 4;
|
|
+ unsigned int PN : 3;
|
|
+ unsigned int UN_USE1 : 1;
|
|
+ unsigned int TSO : 1;
|
|
+ unsigned int TUI_CO : 3;
|
|
+#endif
|
|
+};
|
|
+
|
|
+
|
|
+struct PDMA_txdesc {
|
|
+ PDMA_TXD_INFO1_T txd_info1;
|
|
+ PDMA_TXD_INFO2_T txd_info2;
|
|
+ PDMA_TXD_INFO3_T txd_info3;
|
|
+ PDMA_TXD_INFO4_T txd_info4;
|
|
+#ifdef CONFIG_32B_DESC
|
|
+ unsigned int txd_info5;
|
|
+ unsigned int txd_info6;
|
|
+ unsigned int txd_info7;
|
|
+ unsigned int txd_info8;
|
|
+#endif
|
|
+};
|
|
+
|
|
+
|
|
+#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+/*=========================================
|
|
+ QDMA TX Descriptor Format define
|
|
+=========================================*/
|
|
+//-------------------------------------------------
|
|
+typedef struct _QDMA_TXD_INFO1_ QDMA_TXD_INFO1_T;
|
|
+
|
|
+struct _QDMA_TXD_INFO1_
|
|
+{
|
|
+ unsigned int SDP;
|
|
+};
|
|
+//-------------------------------------------------
|
|
+typedef struct _QDMA_TXD_INFO2_ QDMA_TXD_INFO2_T;
|
|
+
|
|
+struct _QDMA_TXD_INFO2_
|
|
+{
|
|
+ unsigned int NDP;
|
|
+};
|
|
+//-------------------------------------------------
|
|
+typedef struct _QDMA_TXD_INFO3_ QDMA_TXD_INFO3_T;
|
|
+
|
|
+struct _QDMA_TXD_INFO3_
|
|
+{
|
|
+ unsigned int QID : 4;
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ //unsigned int VQID : 10;
|
|
+ unsigned int PROT : 3;
|
|
+ unsigned int IPOFST : 7;
|
|
+#else
|
|
+ unsigned int RESV : 10;
|
|
+#endif
|
|
+ unsigned int SWC_bit : 1;
|
|
+ unsigned int BURST_bit : 1;
|
|
+ unsigned int SDL : 14;
|
|
+ unsigned int LS_bit : 1;
|
|
+ unsigned int OWN_bit : 1;
|
|
+};
|
|
+//-------------------------------------------------
|
|
+typedef struct _QDMA_TXD_INFO4_ QDMA_TXD_INFO4_T;
|
|
+
|
|
+struct _QDMA_TXD_INFO4_
|
|
+{
|
|
+ unsigned int VLAN_TAG :17; // INSV(1)+VPRI(3)+CFI(1)+VID(12)
|
|
+#if defined (CONFIG_RALINK_MT7621)
|
|
+ unsigned int RESV : 2;
|
|
+ unsigned int UDF : 6;
|
|
+#elif defined(CONFIG_ARCH_MT7623)
|
|
+ unsigned int VQID0 : 1;
|
|
+ unsigned int RESV : 7;
|
|
+#endif
|
|
+ unsigned int FPORT : 3;
|
|
+ unsigned int TSO : 1;
|
|
+ unsigned int TUI_CO : 3;
|
|
+};
|
|
+
|
|
+
|
|
+struct QDMA_txdesc {
|
|
+ QDMA_TXD_INFO1_T txd_info1;
|
|
+ QDMA_TXD_INFO2_T txd_info2;
|
|
+ QDMA_TXD_INFO3_T txd_info3;
|
|
+ QDMA_TXD_INFO4_T txd_info4;
|
|
+#ifdef CONFIG_32B_DESC
|
|
+ unsigned int txd_info5;
|
|
+ unsigned int txd_info6;
|
|
+ unsigned int txd_info7;
|
|
+ unsigned int txd_info8;
|
|
+#endif
|
|
+};
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_ARCH_MT7623)
|
|
+#define phys_to_bus(a) (a)
|
|
+#else
|
|
+#define phys_to_bus(a) (a & 0x1FFFFFFF)
|
|
+#endif
|
|
+
|
|
+#define PHY_Enable_Auto_Nego 0x1000
|
|
+#define PHY_Restart_Auto_Nego 0x0200
|
|
+
|
|
+/* PHY_STAT_REG = 1; */
|
|
+#define PHY_Auto_Neco_Comp 0x0020
|
|
+#define PHY_Link_Status 0x0004
|
|
+
|
|
+/* PHY_AUTO_NEGO_REG = 4; */
|
|
+#define PHY_Cap_10_Half 0x0020
|
|
+#define PHY_Cap_10_Full 0x0040
|
|
+#define PHY_Cap_100_Half 0x0080
|
|
+#define PHY_Cap_100_Full 0x0100
|
|
+
|
|
+/* proc definition */
|
|
+
|
|
+#if !defined (CONFIG_RALINK_RT6855) && !defined(CONFIG_RALINK_RT6855A) && \
|
|
+ !defined (CONFIG_RALINK_MT7620) && !defined (CONFIG_RALINK_MT7621) && \
|
|
+ !defined (CONFIG_ARCH_MT7623)
|
|
+#define CDMA_OQ_STA (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x4c)
|
|
+#define GDMA1_OQ_STA (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x50)
|
|
+#define PPE_OQ_STA (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x54)
|
|
+#define PSE_IQ_STA (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x58)
|
|
+#endif
|
|
+
|
|
+#define PROCREG_CONTROL_FILE "/var/run/procreg_control"
|
|
+#if defined (CONFIG_RALINK_RT2880)
|
|
+#define PROCREG_DIR "rt2880"
|
|
+#elif defined (CONFIG_RALINK_RT3052)
|
|
+#define PROCREG_DIR "rt3052"
|
|
+#elif defined (CONFIG_RALINK_RT3352)
|
|
+#define PROCREG_DIR "rt3352"
|
|
+#elif defined (CONFIG_RALINK_RT5350)
|
|
+#define PROCREG_DIR "rt5350"
|
|
+#elif defined (CONFIG_RALINK_RT2883)
|
|
+#define PROCREG_DIR "rt2883"
|
|
+#elif defined (CONFIG_RALINK_RT3883)
|
|
+#define PROCREG_DIR "rt3883"
|
|
+#elif defined (CONFIG_RALINK_RT6855)
|
|
+#define PROCREG_DIR "rt6855"
|
|
+#elif defined (CONFIG_RALINK_MT7620)
|
|
+#define PROCREG_DIR "mt7620"
|
|
+#elif defined (CONFIG_RALINK_MT7621)
|
|
+#define PROCREG_DIR "mt7621"
|
|
+#elif defined (CONFIG_ARCH_MT7623)
|
|
+#define PROCREG_DIR "mt7623"
|
|
+#elif defined (CONFIG_RALINK_MT7628)
|
|
+#define PROCREG_DIR "mt7628"
|
|
+#elif defined (CONFIG_RALINK_RT6855A)
|
|
+#define PROCREG_DIR "rt6855a"
|
|
+#else
|
|
+#define PROCREG_DIR "rt2880"
|
|
+#endif
|
|
+#define PROCREG_SKBFREE "skb_free"
|
|
+#define PROCREG_TXRING "tx_ring"
|
|
+#define PROCREG_RXRING "rx_ring"
|
|
+#define PROCREG_RXRING1 "rx_ring1"
|
|
+#define PROCREG_RXRING2 "rx_ring2"
|
|
+#define PROCREG_RXRING3 "rx_ring3"
|
|
+#define PROCREG_NUM_OF_TXD "num_of_txd"
|
|
+#define PROCREG_TSO_LEN "tso_len"
|
|
+#define PROCREG_LRO_STATS "lro_stats"
|
|
+#define PROCREG_HW_LRO_STATS "hw_lro_stats"
|
|
+#define PROCREG_HW_LRO_AUTO_TLB "hw_lro_auto_tlb"
|
|
+#define PROCREG_GMAC "gmac"
|
|
+#define PROCREG_GMAC2 "gmac2"
|
|
+#define PROCREG_CP0 "cp0"
|
|
+#define PROCREG_RAQOS "qos"
|
|
+#define PROCREG_READ_VAL "regread_value"
|
|
+#define PROCREG_WRITE_VAL "regwrite_value"
|
|
+#define PROCREG_ADDR "reg_addr"
|
|
+#define PROCREG_CTL "procreg_control"
|
|
+#define PROCREG_RXDONE_INTR "rxdone_intr_count"
|
|
+#define PROCREG_ESW_INTR "esw_intr_count"
|
|
+#define PROCREG_ESW_CNT "esw_cnt"
|
|
+#define PROCREG_SNMP "snmp"
|
|
+#if defined (TASKLET_WORKQUEUE_SW)
|
|
+#define PROCREG_SCHE "schedule"
|
|
+#endif
|
|
+#define PROCREG_QDMA "qdma"
|
|
+#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+#define PROCREG_PDMA_DVT "pdma_dvt"
|
|
+#endif //#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+struct rt2880_reg_op_data {
|
|
+ char name[64];
|
|
+ unsigned int reg_addr;
|
|
+ unsigned int op;
|
|
+ unsigned int reg_value;
|
|
+};
|
|
+
|
|
+#ifdef CONFIG_RAETH_LRO
|
|
+struct lro_counters {
|
|
+ u32 lro_aggregated;
|
|
+ u32 lro_flushed;
|
|
+ u32 lro_no_desc;
|
|
+};
|
|
+
|
|
+struct lro_para_struct {
|
|
+ unsigned int lan_ip1;
|
|
+};
|
|
+
|
|
+#endif // CONFIG_RAETH_LRO //
|
|
+
|
|
+
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+typedef struct {
|
|
+ //layer2 header
|
|
+ uint8_t dmac[6];
|
|
+ uint8_t smac[6];
|
|
+
|
|
+ //vlan header
|
|
+ uint16_t vlan_tag;
|
|
+ uint16_t vlan1_gap;
|
|
+ uint16_t vlan1;
|
|
+ uint16_t vlan2_gap;
|
|
+ uint16_t vlan2;
|
|
+ uint16_t vlan_layer;
|
|
+
|
|
+ //pppoe header
|
|
+ uint32_t pppoe_gap;
|
|
+ uint16_t ppp_tag;
|
|
+ uint16_t pppoe_sid;
|
|
+
|
|
+ //layer3 header
|
|
+ uint16_t eth_type;
|
|
+ struct iphdr iph;
|
|
+ struct ipv6hdr ip6h;
|
|
+
|
|
+ //layer4 header
|
|
+ struct tcphdr th;
|
|
+ struct udphdr uh;
|
|
+
|
|
+ uint32_t pkt_type;
|
|
+ uint8_t is_mcast;
|
|
+
|
|
+} ParseResult;
|
|
+#endif
|
|
+typedef struct end_device
|
|
+{
|
|
+
|
|
+ unsigned int tx_cpu_owner_idx0;
|
|
+ unsigned int rx_cpu_owner_idx0;
|
|
+ unsigned int fe_int_status;
|
|
+ unsigned int tx_full;
|
|
+
|
|
+#if !defined (CONFIG_RAETH_QDMA)
|
|
+ unsigned int phy_tx_ring0;
|
|
+#else
|
|
+ /* QDMA Tx PTR */
|
|
+ struct sk_buff *free_skb[NUM_TX_DESC];
|
|
+ unsigned int tx_dma_ptr;
|
|
+ unsigned int tx_cpu_ptr;
|
|
+ unsigned int free_txd_num;
|
|
+ unsigned int free_txd_head;
|
|
+ unsigned int free_txd_tail;
|
|
+ struct QDMA_txdesc *txd_pool;
|
|
+ dma_addr_t phy_txd_pool;
|
|
+ unsigned int txd_pool_info[NUM_TX_DESC];
|
|
+ struct QDMA_txdesc *free_head;
|
|
+ unsigned int phy_free_head;
|
|
+ unsigned int *free_page_head;
|
|
+ unsigned int phy_free_page_head;
|
|
+ struct PDMA_rxdesc *qrx_ring;
|
|
+ unsigned int phy_qrx_ring;
|
|
+#ifdef CONFIG_RAETH_PDMATX_QDMARX /* QDMA RX */
|
|
+ unsigned int phy_tx_ring0;
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+ unsigned int phy_rx_ring0, phy_rx_ring1, phy_rx_ring2, phy_rx_ring3;
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || \
|
|
+ defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_RT6855) || \
|
|
+ defined(CONFIG_RALINK_RT6855A) || defined (CONFIG_RALINK_MT7620) || \
|
|
+ defined(CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628) || \
|
|
+ defined (CONFIG_ARCH_MT7623)
|
|
+ //send signal to user application to notify link status changed
|
|
+ struct work_struct kill_sig_wq;
|
|
+#endif
|
|
+
|
|
+ struct work_struct reset_task;
|
|
+#ifdef WORKQUEUE_BH
|
|
+ struct work_struct rx_wq;
|
|
+#else
|
|
+#if defined (TASKLET_WORKQUEUE_SW)
|
|
+ struct work_struct rx_wq;
|
|
+#endif
|
|
+ struct tasklet_struct rx_tasklet;
|
|
+ struct tasklet_struct tx_tasklet;
|
|
+#endif // WORKQUEUE_BH //
|
|
+
|
|
+#if defined(CONFIG_RAETH_QOS)
|
|
+ struct sk_buff * skb_free[NUM_TX_RINGS][NUM_TX_DESC];
|
|
+ unsigned int free_idx[NUM_TX_RINGS];
|
|
+#else
|
|
+ struct sk_buff* skb_free[NUM_TX_DESC];
|
|
+ unsigned int free_idx;
|
|
+#endif
|
|
+
|
|
+ struct net_device_stats stat; /* The new statistics table. */
|
|
+ spinlock_t page_lock; /* Page register locks */
|
|
+ struct PDMA_txdesc *tx_ring0;
|
|
+#if defined(CONFIG_RAETH_QOS)
|
|
+ struct PDMA_txdesc *tx_ring1;
|
|
+ struct PDMA_txdesc *tx_ring2;
|
|
+ struct PDMA_txdesc *tx_ring3;
|
|
+#endif
|
|
+ struct PDMA_rxdesc *rx_ring0;
|
|
+ struct sk_buff *netrx0_skbuf[NUM_RX_DESC];
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ struct PDMA_rxdesc *rx_ring3;
|
|
+ struct sk_buff *netrx3_skbuf[NUM_RX_DESC];
|
|
+ struct PDMA_rxdesc *rx_ring2;
|
|
+ struct sk_buff *netrx2_skbuf[NUM_RX_DESC];
|
|
+ struct PDMA_rxdesc *rx_ring1;
|
|
+ struct sk_buff *netrx1_skbuf[NUM_RX_DESC];
|
|
+#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+ struct PDMA_rxdesc *rx_ring1;
|
|
+ struct sk_buff *netrx1_skbuf[NUM_RX_DESC];
|
|
+#if defined(CONFIG_ARCH_MT7623)
|
|
+ struct PDMA_rxdesc *rx_ring2;
|
|
+ struct sk_buff *netrx2_skbuf[NUM_RX_DESC];
|
|
+ struct PDMA_rxdesc *rx_ring3;
|
|
+ struct sk_buff *netrx3_skbuf[NUM_RX_DESC];
|
|
+#endif /* CONFIG_ARCH_MT7623 */
|
|
+#endif
|
|
+#ifdef CONFIG_RAETH_NAPI
|
|
+ atomic_t irq_sem;
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ struct napi_struct napi;
|
|
+#endif
|
|
+#endif
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ struct net_device *PseudoDev;
|
|
+ unsigned int isPseudo;
|
|
+#endif
|
|
+#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
|
|
+ struct mii_if_info mii_info;
|
|
+#endif
|
|
+#ifdef CONFIG_RAETH_LRO
|
|
+ struct lro_counters lro_counters;
|
|
+ struct net_lro_mgr lro_mgr;
|
|
+ struct net_lro_desc lro_arr[8];
|
|
+#endif
|
|
+#ifdef CONFIG_RAETH_HW_VLAN_RX
|
|
+ struct vlan_group *vlgrp;
|
|
+#endif
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ struct work_struct hw_lro_wq;
|
|
+ unsigned int hw_lro_pkt_interval[3];
|
|
+ unsigned int hw_lro_alpha; /* 0 < packet interval alpha <= 10 */
|
|
+ unsigned int hw_lro_fix_setting; /* 0: dynamical AGG/AGE time, 1: fixed AGG/AGE time */
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+} END_DEVICE, *pEND_DEVICE;
|
|
+
|
|
+
|
|
+#define RAETH_VERSION "v3.1"
|
|
+
|
|
+#endif
|
|
+
|
|
+#define DMA_GLO_CFG PDMA_GLO_CFG
|
|
+
|
|
+#if defined(CONFIG_RAETH_QDMATX_QDMARX)
|
|
+#define GDMA1_FWD_PORT 0x5555
|
|
+#define GDMA2_FWD_PORT 0x5555
|
|
+#elif defined(CONFIG_RAETH_PDMATX_QDMARX)
|
|
+#define GDMA1_FWD_PORT 0x5555
|
|
+#define GDMA2_FWD_PORT 0x5555
|
|
+#else
|
|
+#define GDMA1_FWD_PORT 0x0000
|
|
+#define GDMA2_FWD_PORT 0x0000
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RAETH_QDMATX_QDMARX)
|
|
+#define RAETH_RX_CALC_IDX0 QRX_CRX_IDX_0
|
|
+#define RAETH_RX_CALC_IDX1 QRX_CRX_IDX_1
|
|
+#elif defined(CONFIG_RAETH_PDMATX_QDMARX)
|
|
+#define RAETH_RX_CALC_IDX0 QRX_CRX_IDX_0
|
|
+#define RAETH_RX_CALC_IDX1 QRX_CRX_IDX_1
|
|
+#else
|
|
+#define RAETH_RX_CALC_IDX0 RX_CALC_IDX0
|
|
+#define RAETH_RX_CALC_IDX1 RX_CALC_IDX1
|
|
+#endif
|
|
+#define RAETH_RX_CALC_IDX2 RX_CALC_IDX2
|
|
+#define RAETH_RX_CALC_IDX3 RX_CALC_IDX3
|
|
+#define RAETH_FE_INT_STATUS FE_INT_STATUS
|
|
+#define RAETH_FE_INT_ALL FE_INT_ALL
|
|
+#define RAETH_FE_INT_ENABLE FE_INT_ENABLE
|
|
+#define RAETH_FE_INT_DLY_INIT FE_INT_DLY_INIT
|
|
+#define RAETH_FE_INT_SETTING RX_DONE_INT0 | RX_DONE_INT1 | TX_DONE_INT0 | TX_DONE_INT1 | TX_DONE_INT2 | TX_DONE_INT3
|
|
+#define QFE_INT_SETTING RX_DONE_INT0 | RX_DONE_INT1 | TX_DONE_INT0 | TX_DONE_INT1 | TX_DONE_INT2 | TX_DONE_INT3
|
|
+#define RAETH_TX_DLY_INT TX_DLY_INT
|
|
+#define RAETH_TX_DONE_INT0 TX_DONE_INT0
|
|
+#define RAETH_DLY_INT_CFG DLY_INT_CFG
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/ra_ethtool.c
|
|
@@ -0,0 +1,515 @@
|
|
+#include <linux/module.h>
|
|
+#include <linux/version.h>
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/sched.h>
|
|
+
|
|
+#include <linux/netdevice.h>
|
|
+#include <linux/etherdevice.h>
|
|
+#include <linux/skbuff.h>
|
|
+#include <linux/if_ether.h>
|
|
+#include <linux/ethtool.h>
|
|
+
|
|
+#include "ra2882ethreg.h"
|
|
+#include "raether.h"
|
|
+#include "ra_mac.h"
|
|
+#include "ra_ethtool.h"
|
|
+
|
|
+#define RAETHER_DRIVER_NAME "raether"
|
|
+#define RA_NUM_STATS 4
|
|
+
|
|
+
|
|
+static struct {
|
|
+ const char str[ETH_GSTRING_LEN];
|
|
+} ethtool_stats_keys[] = {
|
|
+ { "statistic1" },
|
|
+ { "statistic2" },
|
|
+ { "statistic3" },
|
|
+ { "statistic4" },
|
|
+};
|
|
+
|
|
+unsigned char get_current_phy_address(void)
|
|
+{
|
|
+ struct net_device *cur_dev_p;
|
|
+ END_DEVICE *ei_local;
|
|
+#if 0
|
|
+ for(cur_dev_p=dev_base; cur_dev_p!=NULL; cur_dev_p=cur_dev_p->next){
|
|
+ if (strncmp(cur_dev_p->name, DEV_NAME /* "eth2" usually */, 4) == 0)
|
|
+ break;
|
|
+ }
|
|
+#else
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ cur_dev_p = dev_get_by_name(&init_net, DEV_NAME);
|
|
+#else
|
|
+ cur_dev_p = dev_get_by_name(DEV_NAME);
|
|
+#endif
|
|
+#endif
|
|
+ if(!cur_dev_p)
|
|
+ return 0;
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ ei_local = netdev_priv(cur_dev_p);
|
|
+#else
|
|
+ ei_local = cur_dev_p->priv;
|
|
+#endif
|
|
+ return ei_local->mii_info.phy_id;
|
|
+}
|
|
+#if 0
|
|
+static u32 et_get_tx_csum(struct net_device *dev)
|
|
+{
|
|
+ return (sysRegRead(GDMA1_FWD_CFG) & GDM1_DISCRC) ? 0 : 1; // a pitfall here, "0" means to enable.
|
|
+}
|
|
+
|
|
+static u32 et_get_rx_csum(struct net_device *dev)
|
|
+{
|
|
+ return (sysRegRead(GDMA1_FWD_CFG) & GDM1_STRPCRC) ? 1 : 0;
|
|
+}
|
|
+
|
|
+static int et_set_tx_csum(struct net_device *dev, u32 data)
|
|
+{
|
|
+ int value;
|
|
+ //printk("et_set_tx_csum(): data = %d\n", data);
|
|
+
|
|
+ value = sysRegRead(GDMA1_FWD_CFG);
|
|
+ if(data)
|
|
+ value |= GDM1_DISCRC;
|
|
+ else
|
|
+ value &= ~GDM1_DISCRC;
|
|
+
|
|
+ sysRegWrite(GDMA1_FWD_CFG, value);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int et_set_rx_csum(struct net_device *dev, u32 data)
|
|
+{
|
|
+ int value;
|
|
+ //printk("et_set_rx_csum(): data = %d\n", data);
|
|
+
|
|
+ value = sysRegRead(GDMA1_FWD_CFG);
|
|
+ if(data)
|
|
+ value |= GDM1_STRPCRC;
|
|
+ else
|
|
+ value &= ~GDM1_STRPCRC;
|
|
+
|
|
+ sysRegWrite(GDMA1_FWD_CFG, value);
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+#define MII_CR_ADDR 0x00
|
|
+#define MII_CR_MR_AUTONEG_ENABLE (1 << 12)
|
|
+#define MII_CR_MR_RESTART_NEGOTIATION (1 << 9)
|
|
+
|
|
+#define AUTO_NEGOTIATION_ADVERTISEMENT 0x04
|
|
+#define AN_PAUSE (1 << 10)
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
|
|
+static void et_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
|
|
+{
|
|
+ int mii_an_reg;
|
|
+ int mdio_cfg_reg;
|
|
+ END_DEVICE *ei_local = dev->priv;
|
|
+
|
|
+ // get mii auto-negotiation register
|
|
+ mii_mgr_read(ei_local->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, &mii_an_reg);
|
|
+ epause->autoneg = (mii_an_reg & AN_PAUSE) ? 1 : 0; //get autonet_enable flag bit
|
|
+
|
|
+ mdio_cfg_reg = sysRegRead(MDIO_CFG);
|
|
+ epause->tx_pause = (mdio_cfg_reg & MDIO_CFG_GP1_FC_TX) ? 1 : 0;
|
|
+ epause->rx_pause = (mdio_cfg_reg & MDIO_CFG_GP1_FC_RX) ? 1 : 0;
|
|
+
|
|
+ //printk("et_get_pauseparam(): autoneg=%d, tx_pause=%d, rx_pause=%d\n", epause->autoneg, epause->tx_pause, epause->rx_pause);
|
|
+}
|
|
+
|
|
+static int et_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
|
|
+{
|
|
+ int mdio_cfg_reg;
|
|
+ int mii_an_reg;
|
|
+ END_DEVICE *ei_local = dev->priv;
|
|
+
|
|
+ //printk("et_set_pauseparam(): autoneg=%d, tx_pause=%d, rx_pause=%d\n", epause->autoneg, epause->tx_pause, epause->rx_pause);
|
|
+
|
|
+ // auto-neg pause
|
|
+ mii_mgr_read(ei_local->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, &mii_an_reg);
|
|
+ if(epause->autoneg)
|
|
+ mii_an_reg |= AN_PAUSE;
|
|
+ else
|
|
+ mii_an_reg &= ~AN_PAUSE;
|
|
+ mii_mgr_write(ei_local->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, mii_an_reg);
|
|
+
|
|
+ // tx/rx pause
|
|
+ mdio_cfg_reg = sysRegRead(MDIO_CFG);
|
|
+ if(epause->tx_pause)
|
|
+ mdio_cfg_reg |= MDIO_CFG_GP1_FC_TX;
|
|
+ else
|
|
+ mdio_cfg_reg &= ~MDIO_CFG_GP1_FC_TX;
|
|
+ if(epause->rx_pause)
|
|
+ mdio_cfg_reg |= MDIO_CFG_GP1_FC_RX;
|
|
+ else
|
|
+ mdio_cfg_reg &= ~MDIO_CFG_GP1_FC_RX;
|
|
+ sysRegWrite(MDIO_CFG, mdio_cfg_reg);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int et_nway_reset(struct net_device *dev)
|
|
+{
|
|
+ END_DEVICE *ei_local = dev->priv;
|
|
+ return mii_nway_restart(&ei_local->mii_info);
|
|
+}
|
|
+#endif
|
|
+
|
|
+static u32 et_get_link(struct net_device *dev)
|
|
+{
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+#else
|
|
+ END_DEVICE *ei_local = dev->priv;
|
|
+#endif
|
|
+ return mii_link_ok(&ei_local->mii_info);
|
|
+}
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
|
|
+static int et_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|
+{
|
|
+ END_DEVICE *ei_local = dev->priv;
|
|
+ int rc;
|
|
+ rc = mii_ethtool_sset(&ei_local->mii_info, cmd);
|
|
+ return rc;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static int et_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|
+{
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+#else
|
|
+ END_DEVICE *ei_local = dev->priv;
|
|
+#endif
|
|
+ mii_ethtool_gset(&ei_local->mii_info, cmd);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
|
|
+static u32 et_get_msglevel(struct net_device *dev)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void et_set_msglevel(struct net_device *dev, u32 datum)
|
|
+{
|
|
+ return;
|
|
+}
|
|
+
|
|
+static void et_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
|
|
+{
|
|
+ //END_DEVICE *ei_local = dev->priv;
|
|
+ strcpy(info->driver, RAETHER_DRIVER_NAME);
|
|
+ strcpy(info->version, RAETH_VERSION);
|
|
+ strcpy(info->bus_info, "n/a");
|
|
+ info->n_stats = RA_NUM_STATS;
|
|
+ info->eedump_len = 0;
|
|
+ info->regdump_len = 0;
|
|
+}
|
|
+
|
|
+static int et_get_stats_count(struct net_device *dev)
|
|
+{
|
|
+ return RA_NUM_STATS;
|
|
+}
|
|
+
|
|
+static void et_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
|
|
+{
|
|
+// END_DEVICE *ei_local = dev->priv;
|
|
+ data[0] = 0;//np->xstats.early_rx;
|
|
+ data[1] = 0;//np->xstats.tx_buf_mapped;
|
|
+ data[2] = 0;//np->xstats.tx_timeouts;
|
|
+ data[3] = 0;//np->xstats.rx_lost_in_ring;
|
|
+}
|
|
+
|
|
+static void et_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
|
+{
|
|
+ memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
|
|
+}
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * mii_mgr_read wrapper for mii.o ethtool
|
|
+ */
|
|
+int mdio_read(struct net_device *dev, int phy_id, int location)
|
|
+{
|
|
+ unsigned int result;
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+#else
|
|
+ END_DEVICE *ei_local = dev->priv;
|
|
+#endif
|
|
+ mii_mgr_read( (unsigned int) ei_local->mii_info.phy_id, (unsigned int)location, &result);
|
|
+ //printk("\n%s mii.o query= phy_id:%d, address:%d retval:%x\n", dev->name, phy_id, location, result);
|
|
+ return (int)result;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * mii_mgr_write wrapper for mii.o ethtool
|
|
+ */
|
|
+void mdio_write(struct net_device *dev, int phy_id, int location, int value)
|
|
+{
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+#else
|
|
+ END_DEVICE *ei_local = dev->priv;
|
|
+#endif
|
|
+ //printk("mii.o write= phy_id:%d, address:%d value:%x\n", phy_id, location, value);
|
|
+ mii_mgr_write( (unsigned int) ei_local->mii_info.phy_id, (unsigned int)location, (unsigned int)value);
|
|
+ return;
|
|
+}
|
|
+
|
|
+struct ethtool_ops ra_ethtool_ops = {
|
|
+
|
|
+ .get_settings = et_get_settings,
|
|
+ .get_link = et_get_link,
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
|
|
+ .get_drvinfo = et_get_drvinfo,
|
|
+ .set_settings = et_set_settings,
|
|
+ .get_pauseparam = et_get_pauseparam,
|
|
+ .set_pauseparam = et_set_pauseparam,
|
|
+// .get_rx_csum = et_get_rx_csum,
|
|
+// .set_rx_csum = et_set_rx_csum,
|
|
+// .get_tx_csum = et_get_tx_csum,
|
|
+// .set_tx_csum = et_set_tx_csum,
|
|
+ .nway_reset = et_nway_reset,
|
|
+ .get_msglevel = et_get_msglevel,
|
|
+ .set_msglevel = et_set_msglevel,
|
|
+ .get_strings = et_get_strings,
|
|
+ .get_stats_count = et_get_stats_count,
|
|
+ .get_ethtool_stats = et_get_ethtool_stats,
|
|
+/* .get_regs_len = et_get_regs_len,
|
|
+ .get_regs = et_get_regs,
|
|
+*/
|
|
+#endif
|
|
+};
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+/*
|
|
+ * We unable to re-use the Raether functions because it is hard to tell
|
|
+ * where the calling from is. From eth2 or eth3?
|
|
+ *
|
|
+ * These code size is around 950 bytes.
|
|
+ */
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
|
|
+static void et_virt_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
|
|
+{
|
|
+ // PSEUDO_ADAPTER *pseudo = dev->priv;
|
|
+ return et_get_drvinfo(dev, info);
|
|
+}
|
|
+
|
|
+static void et_virt_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
|
|
+{
|
|
+ int mii_an_reg, mdio_cfg_reg;
|
|
+ PSEUDO_ADAPTER *pseudo = dev->priv;
|
|
+
|
|
+ // get mii auto-negotiation register
|
|
+ mii_mgr_read(pseudo->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, &mii_an_reg);
|
|
+ epause->autoneg = (mii_an_reg & AN_PAUSE) ? 1 : 0; //get autonet_enable flag bit
|
|
+
|
|
+ mdio_cfg_reg = sysRegRead(MDIO_CFG);
|
|
+ epause->tx_pause = (mdio_cfg_reg & MDIO_CFG_GP1_FC_TX) ? 1 : 0;
|
|
+ epause->rx_pause = (mdio_cfg_reg & MDIO_CFG_GP1_FC_RX) ? 1 : 0;
|
|
+
|
|
+ //printk("et_get_pauseparam(): autoneg=%d, tx_pause=%d, rx_pause=%d\n", epause->autoneg, epause->tx_pause, epause->rx_pause);
|
|
+}
|
|
+
|
|
+static int et_virt_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
|
|
+{
|
|
+ int mdio_cfg_reg;
|
|
+ int mii_an_reg;
|
|
+ PSEUDO_ADAPTER *pseudo = dev->priv;
|
|
+
|
|
+ //printk("et_set_pauseparam(): autoneg=%d, tx_pause=%d, rx_pause=%d\n", epause->autoneg, epause->tx_pause, epause->rx_pause);
|
|
+ // auto-neg pause
|
|
+ mii_mgr_read(pseudo->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, &mii_an_reg);
|
|
+ if(epause->autoneg)
|
|
+ mii_an_reg |= AN_PAUSE;
|
|
+ else
|
|
+ mii_an_reg &= ~AN_PAUSE;
|
|
+ mii_mgr_write(pseudo->mii_info.phy_id, AUTO_NEGOTIATION_ADVERTISEMENT, mii_an_reg);
|
|
+
|
|
+ // tx/rx pause
|
|
+ mdio_cfg_reg = sysRegRead(MDIO_CFG);
|
|
+ if(epause->tx_pause)
|
|
+ mdio_cfg_reg |= MDIO_CFG_GP1_FC_TX;
|
|
+ else
|
|
+ mdio_cfg_reg &= ~MDIO_CFG_GP1_FC_TX;
|
|
+ if(epause->rx_pause)
|
|
+ mdio_cfg_reg |= MDIO_CFG_GP1_FC_RX;
|
|
+ else
|
|
+ mdio_cfg_reg &= ~MDIO_CFG_GP1_FC_RX;
|
|
+ sysRegWrite(MDIO_CFG, mdio_cfg_reg);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static u32 et_virt_get_tx_csum(struct net_device *dev)
|
|
+{
|
|
+ return (sysRegRead(GDMA2_FWD_CFG) & GDM1_DISCRC) ? 0 : 1; // a pitfall here, "0" means to enable.
|
|
+}
|
|
+
|
|
+static u32 et_virt_get_rx_csum(struct net_device *dev)
|
|
+{
|
|
+ return (sysRegRead(GDMA2_FWD_CFG) & GDM1_STRPCRC) ? 1 : 0;
|
|
+}
|
|
+
|
|
+static int et_virt_set_tx_csum(struct net_device *dev, u32 data)
|
|
+{
|
|
+ int value;
|
|
+ //printk("et_set_tx_csum(): data = %d\n", data);
|
|
+ value = sysRegRead(GDMA2_FWD_CFG);
|
|
+ if(data)
|
|
+ value |= GDM1_DISCRC;
|
|
+ else
|
|
+ value &= ~GDM1_DISCRC;
|
|
+ sysRegWrite(GDMA1_FWD_CFG, value);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int et_virt_set_rx_csum(struct net_device *dev, u32 data)
|
|
+{
|
|
+ int value;
|
|
+ //printk("et_set_rx_csum(): data = %d\n", data);
|
|
+ value = sysRegRead(GDMA2_FWD_CFG);
|
|
+ if(data)
|
|
+ value |= GDM1_STRPCRC;
|
|
+ else
|
|
+ value &= ~GDM1_STRPCRC;
|
|
+ sysRegWrite(GDMA1_FWD_CFG, value);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int et_virt_nway_reset(struct net_device *dev)
|
|
+{
|
|
+ PSEUDO_ADAPTER *pseudo = dev->priv;
|
|
+ return mii_nway_restart(&pseudo->mii_info);
|
|
+}
|
|
+#endif
|
|
+
|
|
+static u32 et_virt_get_link(struct net_device *dev)
|
|
+{
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
|
|
+#else
|
|
+ PSEUDO_ADAPTER *pseudo = dev->priv;
|
|
+#endif
|
|
+ return mii_link_ok(&pseudo->mii_info);
|
|
+}
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
|
|
+static int et_virt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|
+{
|
|
+ PSEUDO_ADAPTER *pseudo = dev->priv;
|
|
+ int rc = mii_ethtool_sset(&pseudo->mii_info, cmd);
|
|
+ return rc;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static int et_virt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|
+{
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
|
|
+#else
|
|
+ PSEUDO_ADAPTER *pseudo = dev->priv;
|
|
+#endif
|
|
+ mii_ethtool_gset(&pseudo->mii_info, cmd);
|
|
+ return 0;
|
|
+}
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
|
|
+static u32 et_virt_get_msglevel(struct net_device *dev)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void et_virt_set_msglevel(struct net_device *dev, u32 datum)
|
|
+{
|
|
+ return;
|
|
+}
|
|
+
|
|
+static void et_virt_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
|
|
+{
|
|
+// PSEUDO_ADAPTER *pseudo = dev->priv;
|
|
+ data[0] = 0;//np->xstats.early_rx;
|
|
+ data[1] = 0;//np->xstats.tx_buf_mapped;
|
|
+ data[2] = 0;//np->xstats.tx_timeouts;
|
|
+ data[3] = 0;//np->xstats.rx_lost_in_ring;
|
|
+}
|
|
+
|
|
+/* for virtual interface dedicated */
|
|
+#define RA_VIRT_NUM_STATS 4
|
|
+static struct {
|
|
+ const char str[ETH_GSTRING_LEN];
|
|
+} ethtool_stats_keys_2[] = {
|
|
+ { "statistic1" },
|
|
+ { "statistic2" },
|
|
+ { "statistic3" },
|
|
+ { "statistic4" },
|
|
+};
|
|
+
|
|
+static int et_virt_get_stats_count(struct net_device *dev)
|
|
+{
|
|
+ return RA_VIRT_NUM_STATS;
|
|
+}
|
|
+
|
|
+static void et_virt_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
|
+{
|
|
+ memcpy(data, ethtool_stats_keys_2, sizeof(ethtool_stats_keys_2));
|
|
+}
|
|
+#endif
|
|
+
|
|
+struct ethtool_ops ra_virt_ethtool_ops = {
|
|
+ .get_settings = et_virt_get_settings,
|
|
+ .get_link = et_virt_get_link,
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
|
|
+ .get_drvinfo = et_virt_get_drvinfo,
|
|
+ .set_settings = et_virt_set_settings,
|
|
+ .get_pauseparam = et_virt_get_pauseparam,
|
|
+ .set_pauseparam = et_virt_set_pauseparam,
|
|
+ .get_rx_csum = et_virt_get_rx_csum,
|
|
+ .set_rx_csum = et_virt_set_rx_csum,
|
|
+ .get_tx_csum = et_virt_get_tx_csum,
|
|
+ .set_tx_csum = et_virt_set_tx_csum,
|
|
+ .nway_reset = et_virt_nway_reset,
|
|
+ .get_msglevel = et_virt_get_msglevel,
|
|
+ .set_msglevel = et_virt_set_msglevel,
|
|
+ .get_strings = et_virt_get_strings,
|
|
+ .get_stats_count = et_virt_get_stats_count,
|
|
+ .get_ethtool_stats = et_virt_get_ethtool_stats,
|
|
+/* .get_regs_len = et_virt_get_regs_len,
|
|
+ .get_regs = et_virt_get_regs,
|
|
+*/
|
|
+#endif
|
|
+};
|
|
+
|
|
+int mdio_virt_read(struct net_device *dev, int phy_id, int location)
|
|
+{
|
|
+ unsigned int result;
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
|
|
+#else
|
|
+ PSEUDO_ADAPTER *pseudo = dev->priv;
|
|
+#endif
|
|
+ mii_mgr_read( (unsigned int) pseudo->mii_info.phy_id, (unsigned int)location, &result);
|
|
+// printk("%s mii.o query= phy_id:%d, address:%d retval:%d\n", dev->name, phy_id, location, result);
|
|
+ return (int)result;
|
|
+}
|
|
+
|
|
+void mdio_virt_write(struct net_device *dev, int phy_id, int location, int value)
|
|
+{
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ PSEUDO_ADAPTER *pseudo = netdev_priv(dev);
|
|
+#else
|
|
+ PSEUDO_ADAPTER *pseudo = dev->priv;
|
|
+#endif
|
|
+// printk("mii.o write= phy_id:%d, address:%d value:%d\n", phy_id, location, value);
|
|
+ mii_mgr_write( (unsigned int) pseudo->mii_info.phy_id, (unsigned int)location, (unsigned int)value);
|
|
+ return;
|
|
+}
|
|
+
|
|
+#endif /* CONFIG_PSEUDO_SUPPORT */
|
|
+
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/ra_ethtool.h
|
|
@@ -0,0 +1,13 @@
|
|
+#ifndef RA_ETHTOOL_H
|
|
+#define RA_ETHTOOL_H
|
|
+
|
|
+/* ethtool related */
|
|
+unsigned char get_current_phy_address(void);
|
|
+int mdio_read(struct net_device *dev, int phy_id, int location);
|
|
+void mdio_write(struct net_device *dev, int phy_id, int location, int value);
|
|
+
|
|
+/* for pseudo interface */
|
|
+int mdio_virt_read(struct net_device *dev, int phy_id, int location);
|
|
+void mdio_virt_write(struct net_device *dev, int phy_id, int location, int value);
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/ra_ioctl.h
|
|
@@ -0,0 +1,102 @@
|
|
+#ifndef _RAETH_IOCTL_H
|
|
+#define _RAETH_IOCTL_H
|
|
+
|
|
+/* ioctl commands */
|
|
+#define RAETH_ESW_REG_READ 0x89F1
|
|
+#define RAETH_ESW_REG_WRITE 0x89F2
|
|
+#define RAETH_MII_READ 0x89F3
|
|
+#define RAETH_MII_WRITE 0x89F4
|
|
+#define RAETH_ESW_INGRESS_RATE 0x89F5
|
|
+#define RAETH_ESW_EGRESS_RATE 0x89F6
|
|
+#define RAETH_ESW_PHY_DUMP 0x89F7
|
|
+#define RAETH_QDMA_REG_READ 0x89F8
|
|
+#define RAETH_QDMA_REG_WRITE 0x89F9
|
|
+#define RAETH_QDMA_QUEUE_MAPPING 0x89FA
|
|
+#define RAETH_QDMA_READ_CPU_CLK 0x89FB
|
|
+#define RAETH_MII_READ_CL45 0x89FC
|
|
+#define RAETH_MII_WRITE_CL45 0x89FD
|
|
+#if defined(CONFIG_HW_SFQ)
|
|
+#define RAETH_QDMA_SFQ_WEB_ENABLE 0x89FE
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
|
|
+ defined (CONFIG_ARCH_MT7623)
|
|
+
|
|
+#define REG_ESW_WT_MAC_MFC 0x10
|
|
+#define REG_ESW_ISC 0x18
|
|
+#define REG_ESW_WT_MAC_ATA1 0x74
|
|
+#define REG_ESW_WT_MAC_ATA2 0x78
|
|
+#define REG_ESW_WT_MAC_ATWD 0x7C
|
|
+#define REG_ESW_WT_MAC_ATC 0x80
|
|
+
|
|
+#define REG_ESW_TABLE_TSRA1 0x84
|
|
+#define REG_ESW_TABLE_TSRA2 0x88
|
|
+#define REG_ESW_TABLE_ATRD 0x8C
|
|
+
|
|
+
|
|
+#define REG_ESW_VLAN_VTCR 0x90
|
|
+#define REG_ESW_VLAN_VAWD1 0x94
|
|
+#define REG_ESW_VLAN_VAWD2 0x98
|
|
+
|
|
+
|
|
+#define REG_ESW_VLAN_ID_BASE 0x100
|
|
+
|
|
+//#define REG_ESW_VLAN_ID_BASE 0x50
|
|
+#define REG_ESW_VLAN_MEMB_BASE 0x70
|
|
+#define REG_ESW_TABLE_SEARCH 0x24
|
|
+#define REG_ESW_TABLE_STATUS0 0x28
|
|
+#define REG_ESW_TABLE_STATUS1 0x2C
|
|
+#define REG_ESW_TABLE_STATUS2 0x30
|
|
+#define REG_ESW_WT_MAC_AD0 0x34
|
|
+#define REG_ESW_WT_MAC_AD1 0x38
|
|
+#define REG_ESW_WT_MAC_AD2 0x3C
|
|
+
|
|
+#else
|
|
+/* rt3052 embedded ethernet switch registers */
|
|
+#define REG_ESW_VLAN_ID_BASE 0x50
|
|
+#define REG_ESW_VLAN_MEMB_BASE 0x70
|
|
+#define REG_ESW_TABLE_SEARCH 0x24
|
|
+#define REG_ESW_TABLE_STATUS0 0x28
|
|
+#define REG_ESW_TABLE_STATUS1 0x2C
|
|
+#define REG_ESW_TABLE_STATUS2 0x30
|
|
+#define REG_ESW_WT_MAC_AD0 0x34
|
|
+#define REG_ESW_WT_MAC_AD1 0x38
|
|
+#define REG_ESW_WT_MAC_AD2 0x3C
|
|
+#endif
|
|
+
|
|
+
|
|
+#if defined(CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+#define REG_ESW_MAX 0x16C
|
|
+#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined (CONFIG_RALINK_MT7620)
|
|
+#define REG_ESW_MAX 0x7FFFF
|
|
+#else //RT305x, RT3350
|
|
+#define REG_ESW_MAX 0xFC
|
|
+#endif
|
|
+#define REG_HQOS_MAX 0x3FFF
|
|
+
|
|
+
|
|
+typedef struct rt3052_esw_reg {
|
|
+ unsigned int off;
|
|
+ unsigned int val;
|
|
+} esw_reg;
|
|
+
|
|
+typedef struct ralink_mii_ioctl_data {
|
|
+ __u32 phy_id;
|
|
+ __u32 reg_num;
|
|
+ __u32 val_in;
|
|
+ __u32 val_out;
|
|
+ __u32 port_num;
|
|
+ __u32 dev_addr;
|
|
+ __u32 reg_addr;
|
|
+} ra_mii_ioctl_data;
|
|
+
|
|
+typedef struct rt335x_esw_reg {
|
|
+ unsigned int on_off;
|
|
+ unsigned int port;
|
|
+ unsigned int bw;/*Mbps*/
|
|
+} esw_rate;
|
|
+
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/ra_mac.c
|
|
@@ -0,0 +1,2645 @@
|
|
+#include <linux/module.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/fcntl.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/ptrace.h>
|
|
+#include <linux/ioport.h>
|
|
+#include <linux/in.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/string.h>
|
|
+#include <linux/signal.h>
|
|
+#include <linux/irq.h>
|
|
+#include <linux/ctype.h>
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,4)
|
|
+#include <asm/system.h>
|
|
+#include <linux/mca.h>
|
|
+#endif
|
|
+#include <asm/io.h>
|
|
+#include <asm/bitops.h>
|
|
+#include <asm/io.h>
|
|
+#include <asm/dma.h>
|
|
+
|
|
+#include <asm/rt2880/surfboardint.h> /* for cp0 reg access, added by bobtseng */
|
|
+
|
|
+#include <linux/errno.h>
|
|
+#include <linux/init.h>
|
|
+
|
|
+#include <linux/netdevice.h>
|
|
+#include <linux/etherdevice.h>
|
|
+#include <linux/skbuff.h>
|
|
+
|
|
+#include <linux/init.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/proc_fs.h>
|
|
+#include <asm/uaccess.h>
|
|
+
|
|
+#include <linux/seq_file.h>
|
|
+
|
|
+
|
|
+#if defined(CONFIG_RAETH_LRO)
|
|
+#include <linux/inet_lro.h>
|
|
+#endif
|
|
+
|
|
+#include "ra2882ethreg.h"
|
|
+#include "raether.h"
|
|
+#include "ra_mac.h"
|
|
+#include "ra_ethtool.h"
|
|
+#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+#include "dvt/raether_pdma_dvt.h"
|
|
+#endif //#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+
|
|
+extern struct net_device *dev_raether;
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined (CONFIG_RALINK_MT7620)
|
|
+extern unsigned short p0_rx_good_cnt;
|
|
+extern unsigned short p0_tx_good_cnt;
|
|
+extern unsigned short p1_rx_good_cnt;
|
|
+extern unsigned short p1_tx_good_cnt;
|
|
+extern unsigned short p2_rx_good_cnt;
|
|
+extern unsigned short p2_tx_good_cnt;
|
|
+extern unsigned short p3_rx_good_cnt;
|
|
+extern unsigned short p3_tx_good_cnt;
|
|
+extern unsigned short p4_rx_good_cnt;
|
|
+extern unsigned short p4_tx_good_cnt;
|
|
+extern unsigned short p5_rx_good_cnt;
|
|
+extern unsigned short p5_tx_good_cnt;
|
|
+extern unsigned short p6_rx_good_cnt;
|
|
+extern unsigned short p6_tx_good_cnt;
|
|
+
|
|
+extern unsigned short p0_rx_byte_cnt;
|
|
+extern unsigned short p1_rx_byte_cnt;
|
|
+extern unsigned short p2_rx_byte_cnt;
|
|
+extern unsigned short p3_rx_byte_cnt;
|
|
+extern unsigned short p4_rx_byte_cnt;
|
|
+extern unsigned short p5_rx_byte_cnt;
|
|
+extern unsigned short p6_rx_byte_cnt;
|
|
+extern unsigned short p0_tx_byte_cnt;
|
|
+extern unsigned short p1_tx_byte_cnt;
|
|
+extern unsigned short p2_tx_byte_cnt;
|
|
+extern unsigned short p3_tx_byte_cnt;
|
|
+extern unsigned short p4_tx_byte_cnt;
|
|
+extern unsigned short p5_tx_byte_cnt;
|
|
+extern unsigned short p6_tx_byte_cnt;
|
|
+
|
|
+#if defined(CONFIG_RALINK_MT7620)
|
|
+extern unsigned short p7_rx_good_cnt;
|
|
+extern unsigned short p7_tx_good_cnt;
|
|
+extern unsigned short p7_rx_byte_cnt;
|
|
+extern unsigned short p7_tx_byte_cnt;
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+
|
|
+
|
|
+#if defined(CONFIG_RAETH_TSO)
|
|
+int txd_cnt[MAX_SKB_FRAGS/2 + 1];
|
|
+int tso_cnt[16];
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RAETH_LRO)
|
|
+#define MAX_AGGR 64
|
|
+#define MAX_DESC 8
|
|
+int lro_stats_cnt[MAX_AGGR + 1];
|
|
+int lro_flush_cnt[MAX_AGGR + 1];
|
|
+int lro_len_cnt1[16];
|
|
+//int lro_len_cnt2[16];
|
|
+int aggregated[MAX_DESC];
|
|
+int lro_aggregated;
|
|
+int lro_flushed;
|
|
+int lro_nodesc;
|
|
+int force_flush;
|
|
+int tot_called1;
|
|
+int tot_called2;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RAETH_HW_LRO)
|
|
+#define HW_LRO_RING_NUM 3
|
|
+#define MAX_HW_LRO_AGGR 64
|
|
+unsigned int hw_lro_agg_num_cnt[HW_LRO_RING_NUM][MAX_HW_LRO_AGGR + 1];
|
|
+unsigned int hw_lro_agg_size_cnt[HW_LRO_RING_NUM][16];
|
|
+unsigned int hw_lro_tot_agg_cnt[HW_LRO_RING_NUM];
|
|
+unsigned int hw_lro_tot_flush_cnt[HW_LRO_RING_NUM];
|
|
+#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
|
|
+unsigned int hw_lro_agg_flush_cnt[HW_LRO_RING_NUM];
|
|
+unsigned int hw_lro_age_flush_cnt[HW_LRO_RING_NUM];
|
|
+unsigned int hw_lro_seq_flush_cnt[HW_LRO_RING_NUM];
|
|
+unsigned int hw_lro_timestamp_flush_cnt[HW_LRO_RING_NUM];
|
|
+unsigned int hw_lro_norule_flush_cnt[HW_LRO_RING_NUM];
|
|
+#endif /* CONFIG_RAETH_HW_LRO_REASON_DBG */
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+
|
|
+#if defined(CONFIG_RAETH_QDMA)
|
|
+extern unsigned int M2Q_table[64];
|
|
+extern struct QDMA_txdesc *free_head;
|
|
+#endif
|
|
+#if defined (CONFIG_ARCH_MT7623)
|
|
+extern struct SFQ_table *sfq0;
|
|
+extern struct SFQ_table *sfq1;
|
|
+extern struct SFQ_table *sfq2;
|
|
+extern struct SFQ_table *sfq3;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_USER_SNMPD)
|
|
+
|
|
+static int ra_snmp_seq_show(struct seq_file *seq, void *v)
|
|
+{
|
|
+#if !defined(CONFIG_RALINK_RT5350) && !defined(CONFIG_RALINK_MT7620) && !defined (CONFIG_RALINK_MT7628)
|
|
+
|
|
+ seq_printf(seq, "rx counters: %x %x %x %x %x %x %x\n", sysRegRead(GDMA_RX_GBCNT0), sysRegRead(GDMA_RX_GPCNT0),sysRegRead(GDMA_RX_OERCNT0), sysRegRead(GDMA_RX_FERCNT0), sysRegRead(GDMA_RX_SERCNT0), sysRegRead(GDMA_RX_LERCNT0), sysRegRead(GDMA_RX_CERCNT0));
|
|
+
|
|
+ seq_printf(seq, "fc config: %x %x %x %x\n", sysRegRead(CDMA_FC_CFG), sysRegRead(GDMA1_FC_CFG), PDMA_FC_CFG, sysRegRead(PDMA_FC_CFG));
|
|
+
|
|
+ seq_printf(seq, "scheduler: %x %x %x\n", sysRegRead(GDMA1_SCH_CFG), sysRegRead(GDMA2_SCH_CFG), sysRegRead(PDMA_SCH_CFG));
|
|
+
|
|
+#endif
|
|
+ seq_printf(seq, "ports: %x %x %x %x %x %x\n", sysRegRead(PORT0_PKCOUNT), sysRegRead(PORT1_PKCOUNT), sysRegRead(PORT2_PKCOUNT), sysRegRead(PORT3_PKCOUNT), sysRegRead(PORT4_PKCOUNT), sysRegRead(PORT5_PKCOUNT));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ra_snmp_seq_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, ra_snmp_seq_show, NULL);
|
|
+}
|
|
+
|
|
+static const struct file_operations ra_snmp_seq_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = ra_snmp_seq_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release
|
|
+};
|
|
+#endif
|
|
+
|
|
+
|
|
+#if defined (CONFIG_GIGAPHY) || defined (CONFIG_100PHY) || \
|
|
+ defined (CONFIG_P5_MAC_TO_PHY_MODE) || defined (CONFIG_RAETH_GMAC2)
|
|
+#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
|
|
+ defined (CONFIG_ARCH_MT7623)
|
|
+void enable_auto_negotiate(int unused)
|
|
+{
|
|
+ u32 regValue;
|
|
+#if !defined (CONFIG_RALINK_MT7621) && !defined (CONFIG_ARCH_MT7623)
|
|
+ u32 addr = CONFIG_MAC_TO_GIGAPHY_MODE_ADDR;
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RALINK_MT7621)
|
|
+ //enable MDIO mode all the time
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
|
|
+ regValue &= ~(0x3 << 12);
|
|
+ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
|
|
+#endif
|
|
+
|
|
+ /* FIXME: we don't know how to deal with PHY end addr */
|
|
+ regValue = sysRegRead(ESW_PHY_POLLING);
|
|
+ regValue |= (1<<31);
|
|
+ regValue &= ~(0x1f);
|
|
+ regValue &= ~(0x1f<<8);
|
|
+#if defined (CONFIG_RALINK_MT7620)
|
|
+ regValue |= ((addr-1) << 0);//setup PHY address for auto polling (Start Addr).
|
|
+ regValue |= (addr << 8);// setup PHY address for auto polling (End Addr).
|
|
+#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN)|| defined (CONFIG_GE_RGMII_INTERNAL_P4_AN) || defined (CONFIG_GE2_RGMII_AN)
|
|
+ regValue |= ((CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2-1)&0x1f << 0);//setup PHY address for auto polling (Start Addr).
|
|
+ regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2 << 8);// setup PHY address for auto polling (End Addr).
|
|
+#else
|
|
+ regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR << 0);//setup PHY address for auto polling (Start Addr).
|
|
+ regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2 << 8);// setup PHY address for auto polling (End Addr).
|
|
+#endif
|
|
+#else
|
|
+ regValue |= (addr << 0);// setup PHY address for auto polling (start Addr).
|
|
+ regValue |= (addr << 8);// setup PHY address for auto polling (End Addr).
|
|
+#endif
|
|
+
|
|
+ /*kurtis: AN is strange*/
|
|
+ sysRegWrite(ESW_PHY_POLLING, regValue);
|
|
+
|
|
+#if defined (CONFIG_P4_MAC_TO_PHY_MODE)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x56330;
|
|
+#endif
|
|
+#if defined (CONFIG_P5_MAC_TO_PHY_MODE)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x56330;
|
|
+#endif
|
|
+}
|
|
+#elif defined (CONFIG_RALINK_RT2880) || defined(CONFIG_RALINK_RT3883) || \
|
|
+ defined (CONFIG_RALINK_RT3052) || defined(CONFIG_RALINK_RT3352)
|
|
+
|
|
+void enable_auto_negotiate(int ge)
|
|
+{
|
|
+#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352)
|
|
+ u32 regValue = sysRegRead(0xb01100C8);
|
|
+#else
|
|
+ u32 regValue;
|
|
+ regValue = (ge == 2)? sysRegRead(MDIO_CFG2) : sysRegRead(MDIO_CFG);
|
|
+#endif
|
|
+
|
|
+ regValue &= 0xe0ff7fff; // clear auto polling related field:
|
|
+ // (MD_PHY1ADDR & GP1_FRC_EN).
|
|
+ regValue |= 0x20000000; // force to enable MDC/MDIO auto polling.
|
|
+
|
|
+#if defined (CONFIG_GE2_RGMII_AN) || defined (CONFIG_GE2_MII_AN)
|
|
+ if(ge==2) {
|
|
+ regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2 << 24); // setup PHY address for auto polling.
|
|
+ }
|
|
+#endif
|
|
+#if defined (CONFIG_GE1_RGMII_AN) || defined (CONFIG_GE1_MII_AN) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
|
|
+ if(ge==1) {
|
|
+ regValue |= (CONFIG_MAC_TO_GIGAPHY_MODE_ADDR << 24); // setup PHY address for auto polling.
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352)
|
|
+ sysRegWrite(0xb01100C8, regValue);
|
|
+#else
|
|
+ if (ge == 2)
|
|
+ sysRegWrite(MDIO_CFG2, regValue);
|
|
+ else
|
|
+ sysRegWrite(MDIO_CFG, regValue);
|
|
+#endif
|
|
+}
|
|
+#endif
|
|
+#endif
|
|
+void ra2880stop(END_DEVICE *ei_local)
|
|
+{
|
|
+ unsigned int regValue;
|
|
+ printk("ra2880stop()...");
|
|
+
|
|
+ regValue = sysRegRead(DMA_GLO_CFG);
|
|
+ regValue &= ~(TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
|
|
+ sysRegWrite(DMA_GLO_CFG, regValue);
|
|
+
|
|
+ printk("Done\n");
|
|
+ // printk("Done0x%x...\n", readreg(DMA_GLO_CFG));
|
|
+}
|
|
+
|
|
+void ei_irq_clear(void)
|
|
+{
|
|
+ sysRegWrite(FE_INT_STATUS, 0xFFFFFFFF);
|
|
+}
|
|
+
|
|
+void rt2880_gmac_hard_reset(void)
|
|
+{
|
|
+#if !defined (CONFIG_RALINK_RT6855A)
|
|
+ //FIXME
|
|
+ sysRegWrite(RSTCTRL, RALINK_FE_RST);
|
|
+ sysRegWrite(RSTCTRL, 0);
|
|
+#endif
|
|
+}
|
|
+
|
|
+void ra2880EnableInterrupt()
|
|
+{
|
|
+ unsigned int regValue = sysRegRead(FE_INT_ENABLE);
|
|
+ RAETH_PRINT("FE_INT_ENABLE -- : 0x%08x\n", regValue);
|
|
+// regValue |= (RX_DONE_INT0 | TX_DONE_INT0);
|
|
+
|
|
+ sysRegWrite(FE_INT_ENABLE, regValue);
|
|
+}
|
|
+
|
|
+void ra2880MacAddressSet(unsigned char p[6])
|
|
+{
|
|
+ unsigned long regValue;
|
|
+
|
|
+ regValue = (p[0] << 8) | (p[1]);
|
|
+#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+ sysRegWrite(SDM_MAC_ADRH, regValue);
|
|
+ printk("GMAC1_MAC_ADRH -- : 0x%08x\n", sysRegRead(SDM_MAC_ADRH));
|
|
+#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A)
|
|
+ sysRegWrite(GDMA1_MAC_ADRH, regValue);
|
|
+ printk("GMAC1_MAC_ADRH -- : 0x%08x\n", sysRegRead(GDMA1_MAC_ADRH));
|
|
+
|
|
+ /* To keep the consistence between RT6855 and RT62806, GSW should keep the register. */
|
|
+ sysRegWrite(SMACCR1, regValue);
|
|
+ printk("SMACCR1 -- : 0x%08x\n", sysRegRead(SMACCR1));
|
|
+#elif defined (CONFIG_RALINK_MT7620)
|
|
+ sysRegWrite(SMACCR1, regValue);
|
|
+ printk("SMACCR1 -- : 0x%08x\n", sysRegRead(SMACCR1));
|
|
+#else
|
|
+ sysRegWrite(GDMA1_MAC_ADRH, regValue);
|
|
+ printk("GMAC1_MAC_ADRH -- : 0x%08x\n", sysRegRead(GDMA1_MAC_ADRH));
|
|
+#endif
|
|
+
|
|
+ regValue = (p[2] << 24) | (p[3] <<16) | (p[4] << 8) | p[5];
|
|
+#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+ sysRegWrite(SDM_MAC_ADRL, regValue);
|
|
+ printk("GMAC1_MAC_ADRL -- : 0x%08x\n", sysRegRead(SDM_MAC_ADRL));
|
|
+#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A)
|
|
+ sysRegWrite(GDMA1_MAC_ADRL, regValue);
|
|
+ printk("GMAC1_MAC_ADRL -- : 0x%08x\n", sysRegRead(GDMA1_MAC_ADRL));
|
|
+
|
|
+ /* To keep the consistence between RT6855 and RT62806, GSW should keep the register. */
|
|
+ sysRegWrite(SMACCR0, regValue);
|
|
+ printk("SMACCR0 -- : 0x%08x\n", sysRegRead(SMACCR0));
|
|
+#elif defined (CONFIG_RALINK_MT7620)
|
|
+ sysRegWrite(SMACCR0, regValue);
|
|
+ printk("SMACCR0 -- : 0x%08x\n", sysRegRead(SMACCR0));
|
|
+#else
|
|
+ sysRegWrite(GDMA1_MAC_ADRL, regValue);
|
|
+ printk("GMAC1_MAC_ADRL -- : 0x%08x\n", sysRegRead(GDMA1_MAC_ADRL));
|
|
+#endif
|
|
+
|
|
+ return;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+void ra2880Mac2AddressSet(unsigned char p[6])
|
|
+{
|
|
+ unsigned long regValue;
|
|
+
|
|
+ regValue = (p[0] << 8) | (p[1]);
|
|
+ sysRegWrite(GDMA2_MAC_ADRH, regValue);
|
|
+
|
|
+ regValue = (p[2] << 24) | (p[3] <<16) | (p[4] << 8) | p[5];
|
|
+ sysRegWrite(GDMA2_MAC_ADRL, regValue);
|
|
+
|
|
+ printk("GDMA2_MAC_ADRH -- : 0x%08x\n", sysRegRead(GDMA2_MAC_ADRH));
|
|
+ printk("GDMA2_MAC_ADRL -- : 0x%08x\n", sysRegRead(GDMA2_MAC_ADRL));
|
|
+ return;
|
|
+}
|
|
+#endif
|
|
+
|
|
+/**
|
|
+ * hard_init - Called by raeth_probe to inititialize network device
|
|
+ * @dev: device pointer
|
|
+ *
|
|
+ * ethdev_init initilize dev->priv and set to END_DEVICE structure
|
|
+ *
|
|
+ */
|
|
+void ethtool_init(struct net_device *dev)
|
|
+{
|
|
+#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+
|
|
+ // init mii structure
|
|
+ ei_local->mii_info.dev = dev;
|
|
+ ei_local->mii_info.mdio_read = mdio_read;
|
|
+ ei_local->mii_info.mdio_write = mdio_write;
|
|
+ ei_local->mii_info.phy_id_mask = 0x1f;
|
|
+ ei_local->mii_info.reg_num_mask = 0x1f;
|
|
+ ei_local->mii_info.supports_gmii = mii_check_gmii_support(&ei_local->mii_info);
|
|
+ // TODO: phy_id: 0~4
|
|
+ ei_local->mii_info.phy_id = 1;
|
|
+#endif
|
|
+ return;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Routine Name : get_idx(mode, index)
|
|
+ * Description: calculate ring usage for tx/rx rings
|
|
+ * Mode 1 : Tx Ring
|
|
+ * Mode 2 : Rx Ring
|
|
+ */
|
|
+int get_ring_usage(int mode, int i)
|
|
+{
|
|
+ unsigned long tx_ctx_idx, tx_dtx_idx, tx_usage;
|
|
+ unsigned long rx_calc_idx, rx_drx_idx, rx_usage;
|
|
+
|
|
+ struct PDMA_rxdesc* rxring;
|
|
+ struct PDMA_txdesc* txring;
|
|
+
|
|
+ END_DEVICE *ei_local = netdev_priv(dev_raether);
|
|
+
|
|
+
|
|
+ if (mode == 2 ) {
|
|
+ /* cpu point to the next descriptor of rx dma ring */
|
|
+ rx_calc_idx = *(unsigned long*)RX_CALC_IDX0;
|
|
+ rx_drx_idx = *(unsigned long*)RX_DRX_IDX0;
|
|
+ rxring = (struct PDMA_rxdesc*)RX_BASE_PTR0;
|
|
+
|
|
+ rx_usage = (rx_drx_idx - rx_calc_idx -1 + NUM_RX_DESC) % NUM_RX_DESC;
|
|
+ if ( rx_calc_idx == rx_drx_idx ) {
|
|
+ if ( rxring[rx_drx_idx].rxd_info2.DDONE_bit == 1)
|
|
+ tx_usage = NUM_RX_DESC;
|
|
+ else
|
|
+ tx_usage = 0;
|
|
+ }
|
|
+ return rx_usage;
|
|
+ }
|
|
+
|
|
+
|
|
+ switch (i) {
|
|
+ case 0:
|
|
+ tx_ctx_idx = *(unsigned long*)TX_CTX_IDX0;
|
|
+ tx_dtx_idx = *(unsigned long*)TX_DTX_IDX0;
|
|
+ txring = ei_local->tx_ring0;
|
|
+ break;
|
|
+#if defined(CONFIG_RAETH_QOS)
|
|
+ case 1:
|
|
+ tx_ctx_idx = *(unsigned long*)TX_CTX_IDX1;
|
|
+ tx_dtx_idx = *(unsigned long*)TX_DTX_IDX1;
|
|
+ txring = ei_local->tx_ring1;
|
|
+ break;
|
|
+ case 2:
|
|
+ tx_ctx_idx = *(unsigned long*)TX_CTX_IDX2;
|
|
+ tx_dtx_idx = *(unsigned long*)TX_DTX_IDX2;
|
|
+ txring = ei_local->tx_ring2;
|
|
+ break;
|
|
+ case 3:
|
|
+ tx_ctx_idx = *(unsigned long*)TX_CTX_IDX3;
|
|
+ tx_dtx_idx = *(unsigned long*)TX_DTX_IDX3;
|
|
+ txring = ei_local->tx_ring3;
|
|
+ break;
|
|
+#endif
|
|
+ default:
|
|
+ printk("get_tx_idx failed %d %d\n", mode, i);
|
|
+ return 0;
|
|
+ };
|
|
+
|
|
+ tx_usage = (tx_ctx_idx - tx_dtx_idx + NUM_TX_DESC) % NUM_TX_DESC;
|
|
+ if ( tx_ctx_idx == tx_dtx_idx ) {
|
|
+ if ( txring[tx_ctx_idx].txd_info2.DDONE_bit == 1)
|
|
+ tx_usage = 0;
|
|
+ else
|
|
+ tx_usage = NUM_TX_DESC;
|
|
+ }
|
|
+ return tx_usage;
|
|
+
|
|
+}
|
|
+
|
|
+#if defined(CONFIG_RAETH_QOS)
|
|
+void dump_qos(struct seq_file *s)
|
|
+{
|
|
+ int usage;
|
|
+ int i;
|
|
+
|
|
+ seq_printf(s, "\n-----Raeth QOS -----\n\n");
|
|
+
|
|
+ for ( i = 0; i < 4; i++) {
|
|
+ usage = get_ring_usage(1,i);
|
|
+ seq_printf(s, "Tx Ring%d Usage : %d/%d\n", i, usage, NUM_TX_DESC);
|
|
+ }
|
|
+
|
|
+ usage = get_ring_usage(2,0);
|
|
+ seq_printf(s, "RX Usage : %d/%d\n\n", usage, NUM_RX_DESC);
|
|
+#if defined (CONFIG_RALINK_MT7620)
|
|
+ seq_printf(s, "PSE_FQFC_CFG(0x%08x) : 0x%08x\n", PSE_FQFC_CFG, sysRegRead(PSE_FQFC_CFG));
|
|
+ seq_printf(s, "PSE_IQ_CFG(0x%08x) : 0x%08x\n", PSE_IQ_CFG, sysRegRead(PSE_IQ_CFG));
|
|
+ seq_printf(s, "PSE_QUE_STA(0x%08x) : 0x%08x\n", PSE_QUE_STA, sysRegRead(PSE_QUE_STA));
|
|
+#elif defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+
|
|
+#else
|
|
+ seq_printf(s, "GDMA1_FC_CFG(0x%08x) : 0x%08x\n", GDMA1_FC_CFG, sysRegRead(GDMA1_FC_CFG));
|
|
+ seq_printf(s, "GDMA2_FC_CFG(0x%08x) : 0x%08x\n", GDMA2_FC_CFG, sysRegRead(GDMA2_FC_CFG));
|
|
+ seq_printf(s, "PDMA_FC_CFG(0x%08x) : 0x%08x\n", PDMA_FC_CFG, sysRegRead(PDMA_FC_CFG));
|
|
+ seq_printf(s, "PSE_FQ_CFG(0x%08x) : 0x%08x\n", PSE_FQ_CFG, sysRegRead(PSE_FQ_CFG));
|
|
+#endif
|
|
+ seq_printf(s, "\n\nTX_CTX_IDX0 : 0x%08x\n", sysRegRead(TX_CTX_IDX0));
|
|
+ seq_printf(s, "TX_DTX_IDX0 : 0x%08x\n", sysRegRead(TX_DTX_IDX0));
|
|
+ seq_printf(s, "TX_CTX_IDX1 : 0x%08x\n", sysRegRead(TX_CTX_IDX1));
|
|
+ seq_printf(s, "TX_DTX_IDX1 : 0x%08x\n", sysRegRead(TX_DTX_IDX1));
|
|
+ seq_printf(s, "TX_CTX_IDX2 : 0x%08x\n", sysRegRead(TX_CTX_IDX2));
|
|
+ seq_printf(s, "TX_DTX_IDX2 : 0x%08x\n", sysRegRead(TX_DTX_IDX2));
|
|
+ seq_printf(s, "TX_CTX_IDX3 : 0x%08x\n", sysRegRead(TX_CTX_IDX3));
|
|
+ seq_printf(s, "TX_DTX_IDX3 : 0x%08x\n", sysRegRead(TX_DTX_IDX3));
|
|
+ seq_printf(s, "RX_CALC_IDX0 : 0x%08x\n", sysRegRead(RX_CALC_IDX0));
|
|
+ seq_printf(s, "RX_DRX_IDX0 : 0x%08x\n", sysRegRead(RX_DRX_IDX0));
|
|
+
|
|
+ seq_printf(s, "\n------------------------------\n\n");
|
|
+}
|
|
+#endif
|
|
+
|
|
+void dump_reg(struct seq_file *s)
|
|
+{
|
|
+ int fe_int_enable;
|
|
+ int rx_usage;
|
|
+ int dly_int_cfg;
|
|
+ int rx_base_ptr0;
|
|
+ int rx_max_cnt0;
|
|
+ int rx_calc_idx0;
|
|
+ int rx_drx_idx0;
|
|
+#if !defined (CONFIG_RAETH_QDMA)
|
|
+ int tx_usage;
|
|
+ int tx_base_ptr[4];
|
|
+ int tx_max_cnt[4];
|
|
+ int tx_ctx_idx[4];
|
|
+ int tx_dtx_idx[4];
|
|
+ int i;
|
|
+#endif
|
|
+
|
|
+ fe_int_enable = sysRegRead(FE_INT_ENABLE);
|
|
+ rx_usage = get_ring_usage(2,0);
|
|
+
|
|
+ dly_int_cfg = sysRegRead(DLY_INT_CFG);
|
|
+
|
|
+#if !defined (CONFIG_RAETH_QDMA)
|
|
+ tx_usage = get_ring_usage(1,0);
|
|
+
|
|
+ tx_base_ptr[0] = sysRegRead(TX_BASE_PTR0);
|
|
+ tx_max_cnt[0] = sysRegRead(TX_MAX_CNT0);
|
|
+ tx_ctx_idx[0] = sysRegRead(TX_CTX_IDX0);
|
|
+ tx_dtx_idx[0] = sysRegRead(TX_DTX_IDX0);
|
|
+
|
|
+ tx_base_ptr[1] = sysRegRead(TX_BASE_PTR1);
|
|
+ tx_max_cnt[1] = sysRegRead(TX_MAX_CNT1);
|
|
+ tx_ctx_idx[1] = sysRegRead(TX_CTX_IDX1);
|
|
+ tx_dtx_idx[1] = sysRegRead(TX_DTX_IDX1);
|
|
+
|
|
+ tx_base_ptr[2] = sysRegRead(TX_BASE_PTR2);
|
|
+ tx_max_cnt[2] = sysRegRead(TX_MAX_CNT2);
|
|
+ tx_ctx_idx[2] = sysRegRead(TX_CTX_IDX2);
|
|
+ tx_dtx_idx[2] = sysRegRead(TX_DTX_IDX2);
|
|
+
|
|
+ tx_base_ptr[3] = sysRegRead(TX_BASE_PTR3);
|
|
+ tx_max_cnt[3] = sysRegRead(TX_MAX_CNT3);
|
|
+ tx_ctx_idx[3] = sysRegRead(TX_CTX_IDX3);
|
|
+ tx_dtx_idx[3] = sysRegRead(TX_DTX_IDX3);
|
|
+#endif
|
|
+
|
|
+ rx_base_ptr0 = sysRegRead(RX_BASE_PTR0);
|
|
+ rx_max_cnt0 = sysRegRead(RX_MAX_CNT0);
|
|
+ rx_calc_idx0 = sysRegRead(RX_CALC_IDX0);
|
|
+ rx_drx_idx0 = sysRegRead(RX_DRX_IDX0);
|
|
+
|
|
+ seq_printf(s, "\n\nFE_INT_ENABLE : 0x%08x\n", fe_int_enable);
|
|
+#if !defined (CONFIG_RAETH_QDMA)
|
|
+ seq_printf(s, "TxRing PktCnt: %d/%d\n", tx_usage, NUM_TX_DESC);
|
|
+#endif
|
|
+ seq_printf(s, "RxRing PktCnt: %d/%d\n\n", rx_usage, NUM_RX_DESC);
|
|
+ seq_printf(s, "DLY_INT_CFG : 0x%08x\n", dly_int_cfg);
|
|
+
|
|
+#if !defined (CONFIG_RAETH_QDMA)
|
|
+ for(i=0;i<4;i++) {
|
|
+ seq_printf(s, "TX_BASE_PTR%d : 0x%08x\n", i, tx_base_ptr[i]);
|
|
+ seq_printf(s, "TX_MAX_CNT%d : 0x%08x\n", i, tx_max_cnt[i]);
|
|
+ seq_printf(s, "TX_CTX_IDX%d : 0x%08x\n", i, tx_ctx_idx[i]);
|
|
+ seq_printf(s, "TX_DTX_IDX%d : 0x%08x\n", i, tx_dtx_idx[i]);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ seq_printf(s, "RX_BASE_PTR0 : 0x%08x\n", rx_base_ptr0);
|
|
+ seq_printf(s, "RX_MAX_CNT0 : 0x%08x\n", rx_max_cnt0);
|
|
+ seq_printf(s, "RX_CALC_IDX0 : 0x%08x\n", rx_calc_idx0);
|
|
+ seq_printf(s, "RX_DRX_IDX0 : 0x%08x\n", rx_drx_idx0);
|
|
+
|
|
+#if defined (CONFIG_ETHTOOL) && defined (CONFIG_RAETH_ROUTER)
|
|
+ seq_printf(s, "The current PHY address selected by ethtool is %d\n", get_current_phy_address());
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT2883) || defined(CONFIG_RALINK_RT3883)
|
|
+ seq_printf(s, "GDMA_RX_FCCNT1(0x%08x) : 0x%08x\n\n", GDMA_RX_FCCNT1, sysRegRead(GDMA_RX_FCCNT1));
|
|
+#endif
|
|
+}
|
|
+
|
|
+#if 0
|
|
+void dump_cp0(void)
|
|
+{
|
|
+ printk("CP0 Register dump --\n");
|
|
+ printk("CP0_INDEX\t: 0x%08x\n", read_32bit_cp0_register(CP0_INDEX));
|
|
+ printk("CP0_RANDOM\t: 0x%08x\n", read_32bit_cp0_register(CP0_RANDOM));
|
|
+ printk("CP0_ENTRYLO0\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYLO0));
|
|
+ printk("CP0_ENTRYLO1\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYLO1));
|
|
+ printk("CP0_CONF\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONF));
|
|
+ printk("CP0_CONTEXT\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONTEXT));
|
|
+ printk("CP0_PAGEMASK\t: 0x%08x\n", read_32bit_cp0_register(CP0_PAGEMASK));
|
|
+ printk("CP0_WIRED\t: 0x%08x\n", read_32bit_cp0_register(CP0_WIRED));
|
|
+ printk("CP0_INFO\t: 0x%08x\n", read_32bit_cp0_register(CP0_INFO));
|
|
+ printk("CP0_BADVADDR\t: 0x%08x\n", read_32bit_cp0_register(CP0_BADVADDR));
|
|
+ printk("CP0_COUNT\t: 0x%08x\n", read_32bit_cp0_register(CP0_COUNT));
|
|
+ printk("CP0_ENTRYHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYHI));
|
|
+ printk("CP0_COMPARE\t: 0x%08x\n", read_32bit_cp0_register(CP0_COMPARE));
|
|
+ printk("CP0_STATUS\t: 0x%08x\n", read_32bit_cp0_register(CP0_STATUS));
|
|
+ printk("CP0_CAUSE\t: 0x%08x\n", read_32bit_cp0_register(CP0_CAUSE));
|
|
+ printk("CP0_EPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_EPC));
|
|
+ printk("CP0_PRID\t: 0x%08x\n", read_32bit_cp0_register(CP0_PRID));
|
|
+ printk("CP0_CONFIG\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONFIG));
|
|
+ printk("CP0_LLADDR\t: 0x%08x\n", read_32bit_cp0_register(CP0_LLADDR));
|
|
+ printk("CP0_WATCHLO\t: 0x%08x\n", read_32bit_cp0_register(CP0_WATCHLO));
|
|
+ printk("CP0_WATCHHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_WATCHHI));
|
|
+ printk("CP0_XCONTEXT\t: 0x%08x\n", read_32bit_cp0_register(CP0_XCONTEXT));
|
|
+ printk("CP0_FRAMEMASK\t: 0x%08x\n", read_32bit_cp0_register(CP0_FRAMEMASK));
|
|
+ printk("CP0_DIAGNOSTIC\t: 0x%08x\n", read_32bit_cp0_register(CP0_DIAGNOSTIC));
|
|
+ printk("CP0_DEBUG\t: 0x%08x\n", read_32bit_cp0_register(CP0_DEBUG));
|
|
+ printk("CP0_DEPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_DEPC));
|
|
+ printk("CP0_PERFORMANCE\t: 0x%08x\n", read_32bit_cp0_register(CP0_PERFORMANCE));
|
|
+ printk("CP0_ECC\t: 0x%08x\n", read_32bit_cp0_register(CP0_ECC));
|
|
+ printk("CP0_CACHEERR\t: 0x%08x\n", read_32bit_cp0_register(CP0_CACHEERR));
|
|
+ printk("CP0_TAGLO\t: 0x%08x\n", read_32bit_cp0_register(CP0_TAGLO));
|
|
+ printk("CP0_TAGHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_TAGHI));
|
|
+ printk("CP0_ERROREPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_ERROREPC));
|
|
+ printk("CP0_DESAVE\t: 0x%08x\n\n", read_32bit_cp0_register(CP0_DESAVE));
|
|
+}
|
|
+#endif
|
|
+
|
|
+struct proc_dir_entry *procRegDir;
|
|
+static struct proc_dir_entry *procGmac, *procSysCP0, *procTxRing, *procRxRing, *procSkbFree;
|
|
+#if defined(CONFIG_PSEUDO_SUPPORT) && defined(CONFIG_ETHTOOL)
|
|
+static struct proc_dir_entry *procGmac2;
|
|
+#endif
|
|
+#if defined(CONFIG_USER_SNMPD)
|
|
+static struct proc_dir_entry *procRaSnmp;
|
|
+#endif
|
|
+#if defined(CONFIG_RAETH_TSO)
|
|
+static struct proc_dir_entry *procNumOfTxd, *procTsoLen;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RAETH_LRO)
|
|
+static struct proc_dir_entry *procLroStats;
|
|
+#endif
|
|
+#if defined(CONFIG_RAETH_HW_LRO) || defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+static struct proc_dir_entry *procRxRing1, *procRxRing2, *procRxRing3;
|
|
+static struct proc_dir_entry *procHwLroStats, *procHwLroAutoTlb;
|
|
+const static HWLRO_DBG_FUNC hw_lro_dbg_func[] =
|
|
+{
|
|
+ [0] = hwlro_agg_cnt_ctrl,
|
|
+ [1] = hwlro_agg_time_ctrl,
|
|
+ [2] = hwlro_age_time_ctrl,
|
|
+ [3] = hwlro_pkt_int_alpha_ctrl,
|
|
+ [4] = hwlro_threshold_ctrl,
|
|
+ [5] = hwlro_fix_setting_switch_ctrl,
|
|
+};
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+#if defined (TASKLET_WORKQUEUE_SW)
|
|
+static struct proc_dir_entry *procSCHE;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+static struct proc_dir_entry *procPdmaDvt;
|
|
+
|
|
+const static PDMA_DBG_FUNC pdma_dvt_dbg_func[] =
|
|
+{
|
|
+ [0] = pdma_dvt_show_ctrl,
|
|
+ [1] = pdma_dvt_test_rx_ctrl,
|
|
+ [2] = pdma_dvt_test_tx_ctrl,
|
|
+ [3] = pdma_dvt_test_debug_ctrl,
|
|
+ [4] = pdma_dvt_test_lro_ctrl,
|
|
+};
|
|
+#endif //#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+
|
|
+int RegReadMain(struct seq_file *seq, void *v)
|
|
+{
|
|
+ dump_reg(seq);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void *seq_SkbFree_start(struct seq_file *seq, loff_t *pos)
|
|
+{
|
|
+ if (*pos < NUM_TX_DESC)
|
|
+ return pos;
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static void *seq_SkbFree_next(struct seq_file *seq, void *v, loff_t *pos)
|
|
+{
|
|
+ (*pos)++;
|
|
+ if (*pos >= NUM_TX_DESC)
|
|
+ return NULL;
|
|
+ return pos;
|
|
+}
|
|
+
|
|
+static void seq_SkbFree_stop(struct seq_file *seq, void *v)
|
|
+{
|
|
+ /* Nothing to do */
|
|
+}
|
|
+
|
|
+static int seq_SkbFree_show(struct seq_file *seq, void *v)
|
|
+{
|
|
+ int i = *(loff_t *) v;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev_raether);
|
|
+
|
|
+ seq_printf(seq, "%d: %08x\n",i, *(int *)&ei_local->skb_free[i]);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct seq_operations seq_skb_free_ops = {
|
|
+ .start = seq_SkbFree_start,
|
|
+ .next = seq_SkbFree_next,
|
|
+ .stop = seq_SkbFree_stop,
|
|
+ .show = seq_SkbFree_show
|
|
+};
|
|
+
|
|
+static int skb_free_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return seq_open(file, &seq_skb_free_ops);
|
|
+}
|
|
+
|
|
+static const struct file_operations skb_free_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = skb_free_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = seq_release
|
|
+};
|
|
+
|
|
+#if defined (CONFIG_RAETH_QDMA)
|
|
+int QDMARead(struct seq_file *seq, void *v)
|
|
+{
|
|
+ unsigned int temp,i;
|
|
+ unsigned int sw_fq, hw_fq;
|
|
+ unsigned int min_en, min_rate, max_en, max_rate, sch, weight;
|
|
+ unsigned int queue, tx_des_cnt, hw_resv, sw_resv, queue_head, queue_tail;
|
|
+ struct net_device *dev = dev_raether;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+
|
|
+ seq_printf(seq, "==== General Information ====\n");
|
|
+ temp = sysRegRead(QDMA_FQ_CNT);
|
|
+ sw_fq = (temp&0xFFFF0000)>>16;
|
|
+ hw_fq = (temp&0x0000FFFF);
|
|
+ seq_printf(seq, "SW TXD: %d/%d; HW TXD: %d/%d\n", sw_fq, NUM_TX_DESC, hw_fq,NUM_QDMA_PAGE);
|
|
+ seq_printf(seq, "SW TXD virtual start address: 0x%08x\n", ei_local->txd_pool);
|
|
+ seq_printf(seq, "HW TXD virtual start address: 0x%08x\n\n", free_head);
|
|
+
|
|
+ seq_printf(seq, "==== Scheduler Information ====\n");
|
|
+ temp = sysRegRead(QDMA_TX_SCH);
|
|
+ max_en = (temp&0x00000800)>>11;
|
|
+ max_rate = (temp&0x000007F0)>>4;
|
|
+ for(i=0;i<(temp&0x0000000F);i++)
|
|
+ max_rate *= 10;
|
|
+ seq_printf(seq, "SCH1 rate control:%d. Rate is %dKbps.\n", max_en, max_rate);
|
|
+ max_en = (temp&0x08000000)>>27;
|
|
+ max_rate = (temp&0x07F00000)>>20;
|
|
+ for(i=0;i<(temp&0x000F0000);i++)
|
|
+ max_rate *= 10;
|
|
+ seq_printf(seq, "SCH2 rate control:%d. Rate is %dKbps.\n\n", max_en, max_rate);
|
|
+
|
|
+ seq_printf(seq, "==== Physical Queue Information ====\n");
|
|
+ for (queue = 0; queue < 16; queue++){
|
|
+ temp = sysRegRead(QTX_CFG_0 + 0x10 * queue);
|
|
+ tx_des_cnt = (temp & 0xffff0000) >> 16;
|
|
+ hw_resv = (temp & 0xff00) >> 8;
|
|
+ sw_resv = (temp & 0xff);
|
|
+ temp = sysRegRead(QTX_CFG_0 +(0x10 * queue) + 0x4);
|
|
+ sch = (temp >> 31) + 1 ;
|
|
+ min_en = (temp & 0x8000000) >> 27;
|
|
+ min_rate = (temp & 0x7f00000) >> 20;
|
|
+ for (i = 0; i< (temp & 0xf0000) >> 16; i++)
|
|
+ min_rate *= 10;
|
|
+ max_en = (temp & 0x800) >> 11;
|
|
+ max_rate = (temp & 0x7f0) >> 4;
|
|
+ for (i = 0; i< (temp & 0xf); i++)
|
|
+ max_rate *= 10;
|
|
+ weight = (temp & 0xf000) >> 12;
|
|
+ queue_head = sysRegRead(QTX_HEAD_0 + 0x10 * queue);
|
|
+ queue_tail = sysRegRead(QTX_TAIL_0 + 0x10 * queue);
|
|
+
|
|
+ seq_printf(seq, "Queue#%d Information:\n", queue);
|
|
+ seq_printf(seq, "%d packets in the queue; head address is 0x%08x, tail address is 0x%08x.\n", tx_des_cnt, queue_head, queue_tail);
|
|
+ seq_printf(seq, "HW_RESV: %d; SW_RESV: %d; SCH: %d; Weighting: %d\n", hw_resv, sw_resv, sch, weight);
|
|
+ seq_printf(seq, "Min_Rate_En is %d, Min_Rate is %dKbps; Max_Rate_En is %d, Max_Rate is %dKbps.\n\n", min_en, min_rate, max_en, max_rate);
|
|
+ }
|
|
+#if defined (CONFIG_ARCH_MT7623) && defined(CONFIG_HW_SFQ)
|
|
+ seq_printf(seq, "==== Virtual Queue Information ====\n");
|
|
+ seq_printf(seq, "VQTX_TB_BASE_0:0x%08x;VQTX_TB_BASE_1:0x%08x;VQTX_TB_BASE_2:0x%08x;VQTX_TB_BASE_3:0x%08x\n", \
|
|
+ sfq0, sfq1, sfq2, sfq3);
|
|
+ temp = sysRegRead(VQTX_NUM);
|
|
+ seq_printf(seq, "VQTX_NUM_0:0x%01x;VQTX_NUM_1:0x%01x;VQTX_NUM_2:0x%01x;VQTX_NUM_3:0x%01x\n\n", \
|
|
+ temp&0xF, (temp&0xF0)>>4, (temp&0xF00)>>8, (temp&0xF000)>>12);
|
|
+
|
|
+#endif
|
|
+
|
|
+ seq_printf(seq, "==== Flow Control Information ====\n");
|
|
+ temp = sysRegRead(QDMA_FC_THRES);
|
|
+ seq_printf(seq, "SW_DROP_EN:%x; SW_DROP_FFA:%d; SW_DROP_MODE:%d\n", \
|
|
+ (temp&0x1000000)>>24, (temp&0x200000)>>25, (temp&0x30000000)>>28);
|
|
+ seq_printf(seq, "WH_DROP_EN:%x; HW_DROP_FFA:%d; HW_DROP_MODE:%d\n", \
|
|
+ (temp&0x10000)>>16, (temp&0x2000)>>17, (temp&0x300000)>>20);
|
|
+#if defined (CONFIG_ARCH_MT7623)
|
|
+ seq_printf(seq, "SW_DROP_FSTVQ_MODE:%d;SW_DROP_FSTVQ:%d\n", \
|
|
+ (temp&0xC0000000)>>30, (temp&0x08000000)>>27);
|
|
+ seq_printf(seq, "HW_DROP_FSTVQ_MODE:%d;HW_DROP_FSTVQ:%d\n", \
|
|
+ (temp&0xC00000)>>22, (temp&0x080000)>>19);
|
|
+#endif
|
|
+
|
|
+ seq_printf(seq, "\n==== FSM Information\n");
|
|
+ temp = sysRegRead(QDMA_DMA);
|
|
+#if defined (CONFIG_ARCH_MT7623)
|
|
+ seq_printf(seq, "VQTB_FSM:0x%01x\n", (temp&0x0F000000)>>24);
|
|
+#endif
|
|
+ seq_printf(seq, "FQ_FSM:0x%01x\n", (temp&0x000F0000)>>16);
|
|
+ seq_printf(seq, "TX_FSM:0x%01x\n", (temp&0x00000F00)>>12);
|
|
+ seq_printf(seq, "RX_FSM:0x%01x\n\n", (temp&0x0000000f));
|
|
+
|
|
+ seq_printf(seq, "==== M2Q Information ====\n");
|
|
+ for (i = 0; i < 64; i+=8){
|
|
+ seq_printf(seq, " (%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)(%d,%d)\n",
|
|
+ i, M2Q_table[i], i+1, M2Q_table[i+1], i+2, M2Q_table[i+2], i+3, M2Q_table[i+3],
|
|
+ i+4, M2Q_table[i+4], i+5, M2Q_table[i+5], i+6, M2Q_table[i+6], i+7, M2Q_table[i+7]);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+}
|
|
+
|
|
+static int qdma_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, QDMARead, NULL);
|
|
+}
|
|
+
|
|
+static const struct file_operations qdma_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = qdma_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release
|
|
+};
|
|
+#endif
|
|
+
|
|
+int TxRingRead(struct seq_file *seq, void *v)
|
|
+{
|
|
+ END_DEVICE *ei_local = netdev_priv(dev_raether);
|
|
+ struct PDMA_txdesc *tx_ring;
|
|
+ int i = 0;
|
|
+
|
|
+ tx_ring = kmalloc(sizeof(struct PDMA_txdesc) * NUM_TX_DESC, GFP_KERNEL);
|
|
+ if(tx_ring==NULL){
|
|
+ seq_printf(seq, " allocate temp tx_ring fail.\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ for (i=0; i < NUM_TX_DESC; i++) {
|
|
+ tx_ring[i] = ei_local->tx_ring0[i];
|
|
+ }
|
|
+
|
|
+ for (i=0; i < NUM_TX_DESC; i++) {
|
|
+#ifdef CONFIG_32B_DESC
|
|
+ seq_printf(seq, "%d: %08x %08x %08x %08x %08x %08x %08x %08x\n",i, *(int *)&tx_ring[i].txd_info1,
|
|
+ *(int *)&tx_ring[i].txd_info2, *(int *)&tx_ring[i].txd_info3,
|
|
+ *(int *)&tx_ring[i].txd_info4, *(int *)&tx_ring[i].txd_info5,
|
|
+ *(int *)&tx_ring[i].txd_info6, *(int *)&tx_ring[i].txd_info7,
|
|
+ *(int *)&tx_ring[i].txd_info8);
|
|
+#else
|
|
+ seq_printf(seq, "%d: %08x %08x %08x %08x\n",i, *(int *)&tx_ring[i].txd_info1, *(int *)&tx_ring[i].txd_info2,
|
|
+ *(int *)&tx_ring[i].txd_info3, *(int *)&tx_ring[i].txd_info4);
|
|
+#endif
|
|
+ }
|
|
+
|
|
+ kfree(tx_ring);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int tx_ring_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+#if !defined (CONFIG_RAETH_QDMA)
|
|
+ return single_open(file, TxRingRead, NULL);
|
|
+#else
|
|
+ return single_open(file, QDMARead, NULL);
|
|
+#endif
|
|
+}
|
|
+
|
|
+static const struct file_operations tx_ring_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = tx_ring_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release
|
|
+};
|
|
+
|
|
+int RxRingRead(struct seq_file *seq, void *v)
|
|
+{
|
|
+ END_DEVICE *ei_local = netdev_priv(dev_raether);
|
|
+ struct PDMA_rxdesc *rx_ring;
|
|
+ int i = 0;
|
|
+
|
|
+ rx_ring = kmalloc(sizeof(struct PDMA_rxdesc) * NUM_RX_DESC, GFP_KERNEL);
|
|
+ if(rx_ring==NULL){
|
|
+ seq_printf(seq, " allocate temp rx_ring fail.\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ for (i=0; i < NUM_RX_DESC; i++) {
|
|
+ memcpy(&rx_ring[i], &ei_local->rx_ring0[i], sizeof(struct PDMA_rxdesc));
|
|
+ }
|
|
+
|
|
+ for (i=0; i < NUM_RX_DESC; i++) {
|
|
+#ifdef CONFIG_32B_DESC
|
|
+ seq_printf(seq, "%d: %08x %08x %08x %08x %08x %08x %08x %08x\n",i, *(int *)&rx_ring[i].rxd_info1,
|
|
+ *(int *)&rx_ring[i].rxd_info2, *(int *)&rx_ring[i].rxd_info3,
|
|
+ *(int *)&rx_ring[i].rxd_info4, *(int *)&rx_ring[i].rxd_info5,
|
|
+ *(int *)&rx_ring[i].rxd_info6, *(int *)&rx_ring[i].rxd_info7,
|
|
+ *(int *)&rx_ring[i].rxd_info8);
|
|
+#else
|
|
+ seq_printf(seq, "%d: %08x %08x %08x %08x\n",i, *(int *)&rx_ring[i].rxd_info1, *(int *)&rx_ring[i].rxd_info2,
|
|
+ *(int *)&rx_ring[i].rxd_info3, *(int *)&rx_ring[i].rxd_info4);
|
|
+#endif
|
|
+ }
|
|
+
|
|
+ kfree(rx_ring);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int rx_ring_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, RxRingRead, NULL);
|
|
+}
|
|
+
|
|
+static const struct file_operations rx_ring_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = rx_ring_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release
|
|
+};
|
|
+
|
|
+#if defined(CONFIG_RAETH_HW_LRO) || defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+int RxLRORingRead(struct seq_file *seq, void *v, struct PDMA_rxdesc *rx_ring_p)
|
|
+{
|
|
+ struct PDMA_rxdesc *rx_ring;
|
|
+ int i = 0;
|
|
+
|
|
+ rx_ring = kmalloc(sizeof(struct PDMA_rxdesc) * NUM_LRO_RX_DESC, GFP_KERNEL);
|
|
+ if(rx_ring==NULL){
|
|
+ seq_printf(seq, " allocate temp rx_ring fail.\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ for (i=0; i < NUM_LRO_RX_DESC; i++) {
|
|
+ memcpy(&rx_ring[i], &rx_ring_p[i], sizeof(struct PDMA_rxdesc));
|
|
+ }
|
|
+
|
|
+ for (i=0; i < NUM_LRO_RX_DESC; i++) {
|
|
+#ifdef CONFIG_32B_DESC
|
|
+ seq_printf(seq, "%d: %08x %08x %08x %08x %08x %08x %08x %08x\n",i, *(int *)&rx_ring[i].rxd_info1,
|
|
+ *(int *)&rx_ring[i].rxd_info2, *(int *)&rx_ring[i].rxd_info3,
|
|
+ *(int *)&rx_ring[i].rxd_info4, *(int *)&rx_ring[i].rxd_info5,
|
|
+ *(int *)&rx_ring[i].rxd_info6, *(int *)&rx_ring[i].rxd_info7,
|
|
+ *(int *)&rx_ring[i].rxd_info8);
|
|
+#else
|
|
+ seq_printf(seq, "%d: %08x %08x %08x %08x\n",i, *(int *)&rx_ring[i].rxd_info1, *(int *)&rx_ring[i].rxd_info2,
|
|
+ *(int *)&rx_ring[i].rxd_info3, *(int *)&rx_ring[i].rxd_info4);
|
|
+#endif
|
|
+ }
|
|
+
|
|
+ kfree(rx_ring);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int RxRing1Read(struct seq_file *seq, void *v)
|
|
+{
|
|
+ END_DEVICE *ei_local = netdev_priv(dev_raether);
|
|
+ RxLRORingRead(seq, v, ei_local->rx_ring1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int RxRing2Read(struct seq_file *seq, void *v)
|
|
+{
|
|
+ END_DEVICE *ei_local = netdev_priv(dev_raether);
|
|
+ RxLRORingRead(seq, v, ei_local->rx_ring2);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int RxRing3Read(struct seq_file *seq, void *v)
|
|
+{
|
|
+ END_DEVICE *ei_local = netdev_priv(dev_raether);
|
|
+ RxLRORingRead(seq, v, ei_local->rx_ring3);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int rx_ring1_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, RxRing1Read, NULL);
|
|
+}
|
|
+
|
|
+static int rx_ring2_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, RxRing2Read, NULL);
|
|
+}
|
|
+
|
|
+static int rx_ring3_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, RxRing3Read, NULL);
|
|
+}
|
|
+
|
|
+static const struct file_operations rx_ring1_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = rx_ring1_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release
|
|
+};
|
|
+
|
|
+static const struct file_operations rx_ring2_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = rx_ring2_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release
|
|
+};
|
|
+
|
|
+static const struct file_operations rx_ring3_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = rx_ring3_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release
|
|
+};
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+
|
|
+#if defined(CONFIG_RAETH_TSO)
|
|
+
|
|
+int NumOfTxdUpdate(int num_of_txd)
|
|
+{
|
|
+
|
|
+ txd_cnt[num_of_txd]++;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void *seq_TsoTxdNum_start(struct seq_file *seq, loff_t *pos)
|
|
+{
|
|
+ seq_printf(seq, "TXD | Count\n");
|
|
+ if (*pos < (MAX_SKB_FRAGS/2 + 1))
|
|
+ return pos;
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static void *seq_TsoTxdNum_next(struct seq_file *seq, void *v, loff_t *pos)
|
|
+{
|
|
+ (*pos)++;
|
|
+ if (*pos >= (MAX_SKB_FRAGS/2 + 1))
|
|
+ return NULL;
|
|
+ return pos;
|
|
+}
|
|
+
|
|
+static void seq_TsoTxdNum_stop(struct seq_file *seq, void *v)
|
|
+{
|
|
+ /* Nothing to do */
|
|
+}
|
|
+
|
|
+static int seq_TsoTxdNum_show(struct seq_file *seq, void *v)
|
|
+{
|
|
+ int i = *(loff_t *) v;
|
|
+ seq_printf(seq, "%d: %d\n",i , txd_cnt[i]);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+ssize_t NumOfTxdWrite(struct file *file, const char __user *buffer,
|
|
+ size_t count, loff_t *data)
|
|
+{
|
|
+ memset(txd_cnt, 0, sizeof(txd_cnt));
|
|
+ printk("clear txd cnt table\n");
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+int TsoLenUpdate(int tso_len)
|
|
+{
|
|
+
|
|
+ if(tso_len > 70000) {
|
|
+ tso_cnt[14]++;
|
|
+ }else if(tso_len > 65000) {
|
|
+ tso_cnt[13]++;
|
|
+ }else if(tso_len > 60000) {
|
|
+ tso_cnt[12]++;
|
|
+ }else if(tso_len > 55000) {
|
|
+ tso_cnt[11]++;
|
|
+ }else if(tso_len > 50000) {
|
|
+ tso_cnt[10]++;
|
|
+ }else if(tso_len > 45000) {
|
|
+ tso_cnt[9]++;
|
|
+ }else if(tso_len > 40000) {
|
|
+ tso_cnt[8]++;
|
|
+ }else if(tso_len > 35000) {
|
|
+ tso_cnt[7]++;
|
|
+ }else if(tso_len > 30000) {
|
|
+ tso_cnt[6]++;
|
|
+ }else if(tso_len > 25000) {
|
|
+ tso_cnt[5]++;
|
|
+ }else if(tso_len > 20000) {
|
|
+ tso_cnt[4]++;
|
|
+ }else if(tso_len > 15000) {
|
|
+ tso_cnt[3]++;
|
|
+ }else if(tso_len > 10000) {
|
|
+ tso_cnt[2]++;
|
|
+ }else if(tso_len > 5000) {
|
|
+ tso_cnt[1]++;
|
|
+ }else {
|
|
+ tso_cnt[0]++;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+ssize_t TsoLenWrite(struct file *file, const char __user *buffer,
|
|
+ size_t count, loff_t *data)
|
|
+{
|
|
+ memset(tso_cnt, 0, sizeof(tso_cnt));
|
|
+ printk("clear tso cnt table\n");
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static void *seq_TsoLen_start(struct seq_file *seq, loff_t *pos)
|
|
+{
|
|
+ seq_printf(seq, " Length | Count\n");
|
|
+ if (*pos < 15)
|
|
+ return pos;
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static void *seq_TsoLen_next(struct seq_file *seq, void *v, loff_t *pos)
|
|
+{
|
|
+ (*pos)++;
|
|
+ if (*pos >= 15)
|
|
+ return NULL;
|
|
+ return pos;
|
|
+}
|
|
+
|
|
+static void seq_TsoLen_stop(struct seq_file *seq, void *v)
|
|
+{
|
|
+ /* Nothing to do */
|
|
+}
|
|
+
|
|
+static int seq_TsoLen_show(struct seq_file *seq, void *v)
|
|
+{
|
|
+ int i = *(loff_t *) v;
|
|
+
|
|
+ seq_printf(seq, "%d~%d: %d\n", i*5000, (i+1)*5000, tso_cnt[i]);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct seq_operations seq_tso_txd_num_ops = {
|
|
+ .start = seq_TsoTxdNum_start,
|
|
+ .next = seq_TsoTxdNum_next,
|
|
+ .stop = seq_TsoTxdNum_stop,
|
|
+ .show = seq_TsoTxdNum_show
|
|
+};
|
|
+
|
|
+static int tso_txd_num_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return seq_open(file, &seq_tso_txd_num_ops);
|
|
+}
|
|
+
|
|
+static struct file_operations tso_txd_num_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = tso_txd_num_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .write = NumOfTxdWrite,
|
|
+ .release = seq_release
|
|
+};
|
|
+
|
|
+static const struct seq_operations seq_tso_len_ops = {
|
|
+ .start = seq_TsoLen_start,
|
|
+ .next = seq_TsoLen_next,
|
|
+ .stop = seq_TsoLen_stop,
|
|
+ .show = seq_TsoLen_show
|
|
+};
|
|
+
|
|
+static int tso_len_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return seq_open(file, &seq_tso_len_ops);
|
|
+}
|
|
+
|
|
+static struct file_operations tso_len_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = tso_len_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .write = TsoLenWrite,
|
|
+ .release = seq_release
|
|
+};
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RAETH_LRO)
|
|
+static int LroLenUpdate(struct net_lro_desc *lro_desc)
|
|
+{
|
|
+ int len_idx;
|
|
+
|
|
+ if(lro_desc->ip_tot_len > 65000) {
|
|
+ len_idx = 13;
|
|
+ }else if(lro_desc->ip_tot_len > 60000) {
|
|
+ len_idx = 12;
|
|
+ }else if(lro_desc->ip_tot_len > 55000) {
|
|
+ len_idx = 11;
|
|
+ }else if(lro_desc->ip_tot_len > 50000) {
|
|
+ len_idx = 10;
|
|
+ }else if(lro_desc->ip_tot_len > 45000) {
|
|
+ len_idx = 9;
|
|
+ }else if(lro_desc->ip_tot_len > 40000) {
|
|
+ len_idx = 8;
|
|
+ }else if(lro_desc->ip_tot_len > 35000) {
|
|
+ len_idx = 7;
|
|
+ }else if(lro_desc->ip_tot_len > 30000) {
|
|
+ len_idx = 6;
|
|
+ }else if(lro_desc->ip_tot_len > 25000) {
|
|
+ len_idx = 5;
|
|
+ }else if(lro_desc->ip_tot_len > 20000) {
|
|
+ len_idx = 4;
|
|
+ }else if(lro_desc->ip_tot_len > 15000) {
|
|
+ len_idx = 3;
|
|
+ }else if(lro_desc->ip_tot_len > 10000) {
|
|
+ len_idx = 2;
|
|
+ }else if(lro_desc->ip_tot_len > 5000) {
|
|
+ len_idx = 1;
|
|
+ }else {
|
|
+ len_idx = 0;
|
|
+ }
|
|
+
|
|
+ return len_idx;
|
|
+}
|
|
+int LroStatsUpdate(struct net_lro_mgr *lro_mgr, bool all_flushed)
|
|
+{
|
|
+ struct net_lro_desc *tmp;
|
|
+ int len_idx;
|
|
+ int i, j;
|
|
+
|
|
+ if (all_flushed) {
|
|
+ for (i=0; i< MAX_DESC; i++) {
|
|
+ tmp = & lro_mgr->lro_arr[i];
|
|
+ if (tmp->pkt_aggr_cnt !=0) {
|
|
+ for(j=0; j<=MAX_AGGR; j++) {
|
|
+ if(tmp->pkt_aggr_cnt == j) {
|
|
+ lro_flush_cnt[j]++;
|
|
+ }
|
|
+ }
|
|
+ len_idx = LroLenUpdate(tmp);
|
|
+ lro_len_cnt1[len_idx]++;
|
|
+ tot_called1++;
|
|
+ }
|
|
+ aggregated[i] = 0;
|
|
+ }
|
|
+ } else {
|
|
+ if (lro_flushed != lro_mgr->stats.flushed) {
|
|
+ if (lro_aggregated != lro_mgr->stats.aggregated) {
|
|
+ for (i=0; i<MAX_DESC; i++) {
|
|
+ tmp = &lro_mgr->lro_arr[i];
|
|
+ if ((aggregated[i]!= tmp->pkt_aggr_cnt)
|
|
+ && (tmp->pkt_aggr_cnt == 0)) {
|
|
+ aggregated[i] ++;
|
|
+ for (j=0; j<=MAX_AGGR; j++) {
|
|
+ if (aggregated[i] == j) {
|
|
+ lro_stats_cnt[j] ++;
|
|
+ }
|
|
+ }
|
|
+ aggregated[i] = 0;
|
|
+ //len_idx = LroLenUpdate(tmp);
|
|
+ //lro_len_cnt2[len_idx]++;
|
|
+ tot_called2++;
|
|
+ }
|
|
+ }
|
|
+ } else {
|
|
+ for (i=0; i<MAX_DESC; i++) {
|
|
+ tmp = &lro_mgr->lro_arr[i];
|
|
+ if ((aggregated[i] != 0) && (tmp->pkt_aggr_cnt==0)) {
|
|
+ for (j=0; j<=MAX_AGGR; j++) {
|
|
+ if (aggregated[i] == j) {
|
|
+ lro_stats_cnt[j] ++;
|
|
+ }
|
|
+ }
|
|
+ aggregated[i] = 0;
|
|
+ //len_idx = LroLenUpdate(tmp);
|
|
+ //lro_len_cnt2[len_idx]++;
|
|
+ force_flush ++;
|
|
+ tot_called2++;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ } else {
|
|
+ if (lro_aggregated != lro_mgr->stats.aggregated) {
|
|
+ for (i=0; i<MAX_DESC; i++) {
|
|
+ tmp = &lro_mgr->lro_arr[i];
|
|
+ if (tmp->active) {
|
|
+ if (aggregated[i] != tmp->pkt_aggr_cnt)
|
|
+ aggregated[i] = tmp->pkt_aggr_cnt;
|
|
+ } else
|
|
+ aggregated[i] = 0;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ }
|
|
+
|
|
+ lro_aggregated = lro_mgr->stats.aggregated;
|
|
+ lro_flushed = lro_mgr->stats.flushed;
|
|
+ lro_nodesc = lro_mgr->stats.no_desc;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+}
|
|
+
|
|
+
|
|
+ssize_t LroStatsWrite(struct file *file, const char __user *buffer,
|
|
+ size_t count, loff_t *data)
|
|
+{
|
|
+ memset(lro_stats_cnt, 0, sizeof(lro_stats_cnt));
|
|
+ memset(lro_flush_cnt, 0, sizeof(lro_flush_cnt));
|
|
+ memset(lro_len_cnt1, 0, sizeof(lro_len_cnt1));
|
|
+ //memset(lro_len_cnt2, 0, sizeof(lro_len_cnt2));
|
|
+ memset(aggregated, 0, sizeof(aggregated));
|
|
+ lro_aggregated = 0;
|
|
+ lro_flushed = 0;
|
|
+ lro_nodesc = 0;
|
|
+ force_flush = 0;
|
|
+ tot_called1 = 0;
|
|
+ tot_called2 = 0;
|
|
+ printk("clear lro cnt table\n");
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+int LroStatsRead(struct seq_file *seq, void *v)
|
|
+{
|
|
+ int i;
|
|
+ int tot_cnt=0;
|
|
+ int tot_aggr=0;
|
|
+ int ave_aggr=0;
|
|
+
|
|
+ seq_printf(seq, "LRO statistic dump:\n");
|
|
+ seq_printf(seq, "Cnt: Kernel | Driver\n");
|
|
+ for(i=0; i<=MAX_AGGR; i++) {
|
|
+ tot_cnt = tot_cnt + lro_stats_cnt[i] + lro_flush_cnt[i];
|
|
+ seq_printf(seq, " %d : %d %d\n", i, lro_stats_cnt[i], lro_flush_cnt[i]);
|
|
+ tot_aggr = tot_aggr + i * (lro_stats_cnt[i] + lro_flush_cnt[i]);
|
|
+ }
|
|
+ ave_aggr = lro_aggregated/lro_flushed;
|
|
+ seq_printf(seq, "Total aggregated pkt: %d\n", lro_aggregated);
|
|
+ seq_printf(seq, "Flushed pkt: %d %d\n", lro_flushed, force_flush);
|
|
+ seq_printf(seq, "Average flush cnt: %d\n", ave_aggr);
|
|
+ seq_printf(seq, "No descriptor pkt: %d\n\n\n", lro_nodesc);
|
|
+
|
|
+ seq_printf(seq, "Driver flush pkt len:\n");
|
|
+ seq_printf(seq, " Length | Count\n");
|
|
+ for(i=0; i<15; i++) {
|
|
+ seq_printf(seq, "%d~%d: %d\n", i*5000, (i+1)*5000, lro_len_cnt1[i]);
|
|
+ }
|
|
+ seq_printf(seq, "Kernel flush: %d; Driver flush: %d\n", tot_called2, tot_called1);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int lro_stats_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, LroStatsRead, NULL);
|
|
+}
|
|
+
|
|
+static struct file_operations lro_stats_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = lro_stats_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .write = LroStatsWrite,
|
|
+ .release = single_release
|
|
+};
|
|
+#endif
|
|
+
|
|
+int getnext(const char *src, int separator, char *dest)
|
|
+{
|
|
+ char *c;
|
|
+ int len;
|
|
+
|
|
+ if ( (src == NULL) || (dest == NULL) ) {
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ c = strchr(src, separator);
|
|
+ if (c == NULL) {
|
|
+ strcpy(dest, src);
|
|
+ return -1;
|
|
+ }
|
|
+ len = c - src;
|
|
+ strncpy(dest, src, len);
|
|
+ dest[len] = '\0';
|
|
+ return len + 1;
|
|
+}
|
|
+
|
|
+int str_to_ip(unsigned int *ip, const char *str)
|
|
+{
|
|
+ int len;
|
|
+ const char *ptr = str;
|
|
+ char buf[128];
|
|
+ unsigned char c[4];
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < 3; ++i) {
|
|
+ if ((len = getnext(ptr, '.', buf)) == -1) {
|
|
+ return 1; /* parse error */
|
|
+ }
|
|
+ c[i] = simple_strtoul(buf, NULL, 10);
|
|
+ ptr += len;
|
|
+ }
|
|
+ c[3] = simple_strtoul(ptr, NULL, 0);
|
|
+ *ip = (c[0]<<24) + (c[1]<<16) + (c[2]<<8) + c[3];
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#if defined(CONFIG_RAETH_HW_LRO)
|
|
+static int HwLroLenUpdate(unsigned int agg_size)
|
|
+{
|
|
+ int len_idx;
|
|
+
|
|
+ if(agg_size > 65000) {
|
|
+ len_idx = 13;
|
|
+ }else if(agg_size > 60000) {
|
|
+ len_idx = 12;
|
|
+ }else if(agg_size > 55000) {
|
|
+ len_idx = 11;
|
|
+ }else if(agg_size > 50000) {
|
|
+ len_idx = 10;
|
|
+ }else if(agg_size > 45000) {
|
|
+ len_idx = 9;
|
|
+ }else if(agg_size > 40000) {
|
|
+ len_idx = 8;
|
|
+ }else if(agg_size > 35000) {
|
|
+ len_idx = 7;
|
|
+ }else if(agg_size > 30000) {
|
|
+ len_idx = 6;
|
|
+ }else if(agg_size > 25000) {
|
|
+ len_idx = 5;
|
|
+ }else if(agg_size > 20000) {
|
|
+ len_idx = 4;
|
|
+ }else if(agg_size > 15000) {
|
|
+ len_idx = 3;
|
|
+ }else if(agg_size > 10000) {
|
|
+ len_idx = 2;
|
|
+ }else if(agg_size > 5000) {
|
|
+ len_idx = 1;
|
|
+ }else {
|
|
+ len_idx = 0;
|
|
+ }
|
|
+
|
|
+ return len_idx;
|
|
+}
|
|
+
|
|
+int HwLroStatsUpdate(unsigned int ring_num, unsigned int agg_cnt, unsigned int agg_size)
|
|
+{
|
|
+ if( (ring_num > 0) && (ring_num < 4) )
|
|
+ {
|
|
+ hw_lro_agg_size_cnt[ring_num-1][HwLroLenUpdate(agg_size)]++;
|
|
+ hw_lro_agg_num_cnt[ring_num-1][agg_cnt]++;
|
|
+ hw_lro_tot_flush_cnt[ring_num-1]++;
|
|
+ hw_lro_tot_agg_cnt[ring_num-1] += agg_cnt;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
|
|
+int HwLroFlushStatsUpdate(unsigned int ring_num, unsigned int flush_reason)
|
|
+{
|
|
+ if( (ring_num > 0) && (ring_num < 4) )
|
|
+ {
|
|
+#if 1
|
|
+ if ( (flush_reason & 0x7) == HW_LRO_AGG_FLUSH )
|
|
+ hw_lro_agg_flush_cnt[ring_num-1]++;
|
|
+ else if ( (flush_reason & 0x7) == HW_LRO_AGE_FLUSH )
|
|
+ hw_lro_age_flush_cnt[ring_num-1]++;
|
|
+ else if ( (flush_reason & 0x7) == HW_LRO_NOT_IN_SEQ_FLUSH )
|
|
+ hw_lro_seq_flush_cnt[ring_num-1]++;
|
|
+ else if ( (flush_reason & 0x7) == HW_LRO_TIMESTAMP_FLUSH )
|
|
+ hw_lro_timestamp_flush_cnt[ring_num-1]++;
|
|
+ else if ( (flush_reason & 0x7) == HW_LRO_NON_RULE_FLUSH )
|
|
+ hw_lro_norule_flush_cnt[ring_num-1]++;
|
|
+#else
|
|
+ if ( flush_reason & BIT(4) )
|
|
+ hw_lro_agg_flush_cnt[ring_num-1]++;
|
|
+ else if ( flush_reason & BIT(3) )
|
|
+ hw_lro_age_flush_cnt[ring_num-1]++;
|
|
+ else if ( flush_reason & BIT(2) )
|
|
+ hw_lro_seq_flush_cnt[ring_num-1]++;
|
|
+ else if ( flush_reason & BIT(1) )
|
|
+ hw_lro_timestamp_flush_cnt[ring_num-1]++;
|
|
+ else if ( flush_reason & BIT(0) )
|
|
+ hw_lro_norule_flush_cnt[ring_num-1]++;
|
|
+#endif
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#endif /* CONFIG_RAETH_HW_LRO_REASON_DBG */
|
|
+
|
|
+ssize_t HwLroStatsWrite(struct file *file, const char __user *buffer,
|
|
+ size_t count, loff_t *data)
|
|
+{
|
|
+ memset(hw_lro_agg_num_cnt, 0, sizeof(hw_lro_agg_num_cnt));
|
|
+ memset(hw_lro_agg_size_cnt, 0, sizeof(hw_lro_agg_size_cnt));
|
|
+ memset(hw_lro_tot_agg_cnt, 0, sizeof(hw_lro_tot_agg_cnt));
|
|
+ memset(hw_lro_tot_flush_cnt, 0, sizeof(hw_lro_tot_flush_cnt));
|
|
+#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
|
|
+ memset(hw_lro_agg_flush_cnt, 0, sizeof(hw_lro_agg_flush_cnt));
|
|
+ memset(hw_lro_age_flush_cnt, 0, sizeof(hw_lro_age_flush_cnt));
|
|
+ memset(hw_lro_seq_flush_cnt, 0, sizeof(hw_lro_seq_flush_cnt));
|
|
+ memset(hw_lro_timestamp_flush_cnt, 0, sizeof(hw_lro_timestamp_flush_cnt));
|
|
+ memset(hw_lro_norule_flush_cnt, 0, sizeof(hw_lro_norule_flush_cnt));
|
|
+#endif /* CONFIG_RAETH_HW_LRO_REASON_DBG */
|
|
+
|
|
+ printk("clear hw lro cnt table\n");
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+int HwLroStatsRead(struct seq_file *seq, void *v)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ seq_printf(seq, "HW LRO statistic dump:\n");
|
|
+
|
|
+ /* Agg number count */
|
|
+ seq_printf(seq, "Cnt: RING1 | RING2 | RING3 | Total\n");
|
|
+ for(i=0; i<=MAX_HW_LRO_AGGR; i++) {
|
|
+ seq_printf(seq, " %d : %d %d %d %d\n",
|
|
+ i, hw_lro_agg_num_cnt[0][i], hw_lro_agg_num_cnt[1][i], hw_lro_agg_num_cnt[2][i],
|
|
+ hw_lro_agg_num_cnt[0][i]+hw_lro_agg_num_cnt[1][i]+hw_lro_agg_num_cnt[2][i]);
|
|
+ }
|
|
+
|
|
+ /* Total agg count */
|
|
+ seq_printf(seq, "Total agg: RING1 | RING2 | RING3 | Total\n");
|
|
+ seq_printf(seq, " %d %d %d %d\n",
|
|
+ hw_lro_tot_agg_cnt[0], hw_lro_tot_agg_cnt[1], hw_lro_tot_agg_cnt[2],
|
|
+ hw_lro_tot_agg_cnt[0]+hw_lro_tot_agg_cnt[1]+hw_lro_tot_agg_cnt[2]);
|
|
+
|
|
+ /* Total flush count */
|
|
+ seq_printf(seq, "Total flush: RING1 | RING2 | RING3 | Total\n");
|
|
+ seq_printf(seq, " %d %d %d %d\n",
|
|
+ hw_lro_tot_flush_cnt[0], hw_lro_tot_flush_cnt[1], hw_lro_tot_flush_cnt[2],
|
|
+ hw_lro_tot_flush_cnt[0]+hw_lro_tot_flush_cnt[1]+hw_lro_tot_flush_cnt[2]);
|
|
+
|
|
+ /* Avg agg count */
|
|
+ seq_printf(seq, "Avg agg: RING1 | RING2 | RING3 | Total\n");
|
|
+ seq_printf(seq, " %d %d %d %d\n",
|
|
+ (hw_lro_tot_flush_cnt[0]) ? hw_lro_tot_agg_cnt[0]/hw_lro_tot_flush_cnt[0] : 0,
|
|
+ (hw_lro_tot_flush_cnt[1]) ? hw_lro_tot_agg_cnt[1]/hw_lro_tot_flush_cnt[1] : 0,
|
|
+ (hw_lro_tot_flush_cnt[2]) ? hw_lro_tot_agg_cnt[2]/hw_lro_tot_flush_cnt[2] : 0,
|
|
+ (hw_lro_tot_flush_cnt[0]+hw_lro_tot_flush_cnt[1]+hw_lro_tot_flush_cnt[2]) ? \
|
|
+ ((hw_lro_tot_agg_cnt[0]+hw_lro_tot_agg_cnt[1]+hw_lro_tot_agg_cnt[2])/(hw_lro_tot_flush_cnt[0]+hw_lro_tot_flush_cnt[1]+hw_lro_tot_flush_cnt[2])) : 0
|
|
+ );
|
|
+
|
|
+ /* Statistics of aggregation size counts */
|
|
+ seq_printf(seq, "HW LRO flush pkt len:\n");
|
|
+ seq_printf(seq, " Length | RING1 | RING2 | RING3 | Total\n");
|
|
+ for(i=0; i<15; i++) {
|
|
+ seq_printf(seq, "%d~%d: %d %d %d %d\n", i*5000, (i+1)*5000,
|
|
+ hw_lro_agg_size_cnt[0][i], hw_lro_agg_size_cnt[1][i], hw_lro_agg_size_cnt[2][i],
|
|
+ hw_lro_agg_size_cnt[0][i]+hw_lro_agg_size_cnt[1][i]+hw_lro_agg_size_cnt[2][i]);
|
|
+ }
|
|
+#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
|
|
+ seq_printf(seq, "Flush reason: RING1 | RING2 | RING3 | Total\n");
|
|
+ seq_printf(seq, "AGG timeout: %d %d %d %d\n",
|
|
+ hw_lro_agg_flush_cnt[0], hw_lro_agg_flush_cnt[1], hw_lro_agg_flush_cnt[2],
|
|
+ (hw_lro_agg_flush_cnt[0]+hw_lro_agg_flush_cnt[1]+hw_lro_agg_flush_cnt[2])
|
|
+ );
|
|
+ seq_printf(seq, "AGE timeout: %d %d %d %d\n",
|
|
+ hw_lro_age_flush_cnt[0], hw_lro_age_flush_cnt[1], hw_lro_age_flush_cnt[2],
|
|
+ (hw_lro_age_flush_cnt[0]+hw_lro_age_flush_cnt[1]+hw_lro_age_flush_cnt[2])
|
|
+ );
|
|
+ seq_printf(seq, "Not in-sequence: %d %d %d %d\n",
|
|
+ hw_lro_seq_flush_cnt[0], hw_lro_seq_flush_cnt[1], hw_lro_seq_flush_cnt[2],
|
|
+ (hw_lro_seq_flush_cnt[0]+hw_lro_seq_flush_cnt[1]+hw_lro_seq_flush_cnt[2])
|
|
+ );
|
|
+ seq_printf(seq, "Timestamp: %d %d %d %d\n",
|
|
+ hw_lro_timestamp_flush_cnt[0], hw_lro_timestamp_flush_cnt[1], hw_lro_timestamp_flush_cnt[2],
|
|
+ (hw_lro_timestamp_flush_cnt[0]+hw_lro_timestamp_flush_cnt[1]+hw_lro_timestamp_flush_cnt[2])
|
|
+ );
|
|
+ seq_printf(seq, "No LRO rule: %d %d %d %d\n",
|
|
+ hw_lro_norule_flush_cnt[0], hw_lro_norule_flush_cnt[1], hw_lro_norule_flush_cnt[2],
|
|
+ (hw_lro_norule_flush_cnt[0]+hw_lro_norule_flush_cnt[1]+hw_lro_norule_flush_cnt[2])
|
|
+ );
|
|
+#endif /* CONFIG_RAETH_HW_LRO_REASON_DBG */
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int hw_lro_stats_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, HwLroStatsRead, NULL);
|
|
+}
|
|
+
|
|
+static struct file_operations hw_lro_stats_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = hw_lro_stats_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .write = HwLroStatsWrite,
|
|
+ .release = single_release
|
|
+};
|
|
+
|
|
+int hwlro_agg_cnt_ctrl(int par1, int par2)
|
|
+{
|
|
+ SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, par2);
|
|
+ SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING2, par2);
|
|
+ SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING3, par2);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int hwlro_agg_time_ctrl(int par1, int par2)
|
|
+{
|
|
+ SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, par2);
|
|
+ SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING2, par2);
|
|
+ SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING3, par2);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int hwlro_age_time_ctrl(int par1, int par2)
|
|
+{
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, par2);
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, par2);
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, par2);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int hwlro_pkt_int_alpha_ctrl(int par1, int par2)
|
|
+{
|
|
+ END_DEVICE *ei_local = netdev_priv(dev_raether);
|
|
+
|
|
+ ei_local->hw_lro_alpha = par2;
|
|
+ printk("[hwlro_pkt_int_alpha_ctrl]ei_local->hw_lro_alpha = %d\n", ei_local->hw_lro_alpha);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int hwlro_threshold_ctrl(int par1, int par2)
|
|
+{
|
|
+ /* bandwidth threshold setting */
|
|
+ SET_PDMA_LRO_BW_THRESHOLD(par2);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int hwlro_fix_setting_switch_ctrl(int par1, int par2)
|
|
+{
|
|
+#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
|
|
+ END_DEVICE *ei_local = netdev_priv(dev_raether);
|
|
+
|
|
+ ei_local->hw_lro_fix_setting = par2;
|
|
+ printk("[hwlro_pkt_int_alpha_ctrl]ei_local->hw_lro_fix_setting = %d\n", ei_local->hw_lro_fix_setting);
|
|
+#endif /* CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG */
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+ssize_t HwLroAutoTlbWrite(struct file *file, const char __user *buffer,
|
|
+ size_t count, loff_t *data)
|
|
+{
|
|
+ char buf[32];
|
|
+ char *pBuf;
|
|
+ int len = count;
|
|
+ int x = 0,y = 0;
|
|
+ char *pToken = NULL;
|
|
+ char *pDelimiter = " \t";
|
|
+
|
|
+ printk("[HwLroAutoTlbWrite]write parameter len = %d\n\r", (int)len);
|
|
+ if(len >= sizeof(buf)){
|
|
+ printk("input handling fail!\n");
|
|
+ len = sizeof(buf) - 1;
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ if(copy_from_user(buf, buffer, len)){
|
|
+ return -EFAULT;
|
|
+ }
|
|
+ buf[len] = '\0';
|
|
+ printk("[HwLroAutoTlbWrite]write parameter data = %s\n\r", buf);
|
|
+
|
|
+ pBuf = buf;
|
|
+ pToken = strsep(&pBuf, pDelimiter);
|
|
+ x = NULL != pToken ? simple_strtol(pToken, NULL, 16) : 0;
|
|
+
|
|
+ pToken = strsep(&pBuf, "\t\n ");
|
|
+ if(pToken != NULL){
|
|
+ y = NULL != pToken ? simple_strtol(pToken, NULL, 16) : 0;
|
|
+ printk("y = 0x%08x \n\r", y);
|
|
+ }
|
|
+
|
|
+ if ( (sizeof(hw_lro_dbg_func)/sizeof(hw_lro_dbg_func[0]) > x) && NULL != hw_lro_dbg_func[x])
|
|
+ {
|
|
+ (*hw_lro_dbg_func[x])(x, y);
|
|
+ }
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+void HwLroAutoTlbDump(struct seq_file *seq, unsigned int index)
|
|
+{
|
|
+ int i;
|
|
+ struct PDMA_LRO_AUTO_TLB_INFO pdma_lro_auto_tlb;
|
|
+ unsigned int tlb_info[9];
|
|
+ unsigned int dw_len, cnt, priority;
|
|
+ unsigned int entry;
|
|
+
|
|
+ if( index > 4 )
|
|
+ index = index - 1;
|
|
+ entry = (index * 9) + 1;
|
|
+
|
|
+ /* read valid entries of the auto-learn table */
|
|
+ sysRegWrite( PDMA_FE_ALT_CF8, entry );
|
|
+
|
|
+ //seq_printf(seq, "\nEntry = %d\n", entry);
|
|
+ for(i=0; i<9; i++){
|
|
+ tlb_info[i] = sysRegRead(PDMA_FE_ALT_SEQ_CFC);
|
|
+ //seq_printf(seq, "tlb_info[%d] = 0x%x\n", i, tlb_info[i]);
|
|
+ }
|
|
+ memcpy(&pdma_lro_auto_tlb, tlb_info, sizeof(struct PDMA_LRO_AUTO_TLB_INFO));
|
|
+
|
|
+ dw_len = pdma_lro_auto_tlb.auto_tlb_info7.DW_LEN;
|
|
+ cnt = pdma_lro_auto_tlb.auto_tlb_info6.CNT;
|
|
+
|
|
+ if ( sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_ALT_SCORE_MODE ) /* packet count */
|
|
+ priority = cnt;
|
|
+ else /* byte count */
|
|
+ priority = dw_len;
|
|
+
|
|
+ /* dump valid entries of the auto-learn table */
|
|
+ if( index >= 4 )
|
|
+ seq_printf(seq, "\n===== TABLE Entry: %d (Act) =====\n", index);
|
|
+ else
|
|
+ seq_printf(seq, "\n===== TABLE Entry: %d (LRU) =====\n", index);
|
|
+ if( pdma_lro_auto_tlb.auto_tlb_info8.IPV4 ){
|
|
+ seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv4)\n",
|
|
+ pdma_lro_auto_tlb.auto_tlb_info4.SIP3,
|
|
+ pdma_lro_auto_tlb.auto_tlb_info3.SIP2,
|
|
+ pdma_lro_auto_tlb.auto_tlb_info2.SIP1,
|
|
+ pdma_lro_auto_tlb.auto_tlb_info1.SIP0);
|
|
+ }
|
|
+ else{
|
|
+ seq_printf(seq, "SIP = 0x%x:0x%x:0x%x:0x%x (IPv6)\n",
|
|
+ pdma_lro_auto_tlb.auto_tlb_info4.SIP3,
|
|
+ pdma_lro_auto_tlb.auto_tlb_info3.SIP2,
|
|
+ pdma_lro_auto_tlb.auto_tlb_info2.SIP1,
|
|
+ pdma_lro_auto_tlb.auto_tlb_info1.SIP0);
|
|
+ }
|
|
+ seq_printf(seq, "DIP_ID = %d\n", pdma_lro_auto_tlb.auto_tlb_info8.DIP_ID);
|
|
+ seq_printf(seq, "TCP SPORT = %d | TCP DPORT = %d\n",
|
|
+ pdma_lro_auto_tlb.auto_tlb_info0.STP,
|
|
+ pdma_lro_auto_tlb.auto_tlb_info0.DTP);
|
|
+ seq_printf(seq, "VLAN1 = %d | VLAN2 = %d | VLAN3 = %d | VLAN4 =%d \n",
|
|
+ pdma_lro_auto_tlb.auto_tlb_info5.VLAN_VID0,
|
|
+ (pdma_lro_auto_tlb.auto_tlb_info5.VLAN_VID0 << 12),
|
|
+ (pdma_lro_auto_tlb.auto_tlb_info5.VLAN_VID0 << 24),
|
|
+ pdma_lro_auto_tlb.auto_tlb_info6.VLAN_VID1);
|
|
+ seq_printf(seq, "TPUT = %d | FREQ = %d\n", dw_len, cnt);
|
|
+ seq_printf(seq, "PRIORITY = %d\n", priority);
|
|
+}
|
|
+
|
|
+int HwLroAutoTlbRead(struct seq_file *seq, void *v)
|
|
+{
|
|
+ int i;
|
|
+ unsigned int regVal;
|
|
+ unsigned int regOp1, regOp2, regOp3, regOp4;
|
|
+ unsigned int agg_cnt, agg_time, age_time;
|
|
+
|
|
+ /* Read valid entries of the auto-learn table */
|
|
+ sysRegWrite(PDMA_FE_ALT_CF8, 0);
|
|
+ regVal = sysRegRead(PDMA_FE_ALT_SEQ_CFC);
|
|
+
|
|
+ seq_printf(seq, "HW LRO Auto-learn Table: (PDMA_LRO_ALT_CFC_RSEQ_DBG=0x%x)\n", regVal);
|
|
+
|
|
+ for(i = 7; i >= 0; i--)
|
|
+ {
|
|
+ if( regVal & (1 << i) )
|
|
+ HwLroAutoTlbDump(seq, i);
|
|
+ }
|
|
+
|
|
+ /* Read the agg_time/age_time/agg_cnt of LRO rings */
|
|
+ seq_printf(seq, "\nHW LRO Ring Settings\n");
|
|
+ for(i = 1; i <= 3; i++)
|
|
+ {
|
|
+ regOp1 = sysRegRead( LRO_RX_RING0_CTRL_DW1 + (i * 0x40) );
|
|
+ regOp2 = sysRegRead( LRO_RX_RING0_CTRL_DW2 + (i * 0x40) );
|
|
+ regOp3 = sysRegRead( LRO_RX_RING0_CTRL_DW3 + (i * 0x40) );
|
|
+ regOp4 = sysRegRead( ADMA_LRO_CTRL_DW2 );
|
|
+ agg_cnt = ((regOp3 & 0x03) << PDMA_LRO_AGG_CNT_H_OFFSET) | ((regOp2 >> PDMA_LRO_RING_AGG_CNT1_OFFSET) & 0x3f);
|
|
+ agg_time = (regOp2 >> PDMA_LRO_RING_AGG_OFFSET) & 0xffff;
|
|
+ age_time = ((regOp2 & 0x03f) << PDMA_LRO_AGE_H_OFFSET) | ((regOp1 >> PDMA_LRO_RING_AGE1_OFFSET) & 0x3ff);
|
|
+ seq_printf(seq, "Ring[%d]: MAX_AGG_CNT=%d, AGG_TIME=%d, AGE_TIME=%d, Threshold=%d\n",
|
|
+ i, agg_cnt, agg_time, age_time, regOp4);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int hw_lro_auto_tlb_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, HwLroAutoTlbRead, NULL);
|
|
+}
|
|
+
|
|
+static struct file_operations hw_lro_auto_tlb_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = hw_lro_auto_tlb_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .write = HwLroAutoTlbWrite,
|
|
+ .release = single_release
|
|
+};
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+
|
|
+#if defined (CONFIG_MIPS)
|
|
+int CP0RegRead(struct seq_file *seq, void *v)
|
|
+{
|
|
+ seq_printf(seq, "CP0 Register dump --\n");
|
|
+ seq_printf(seq, "CP0_INDEX\t: 0x%08x\n", read_32bit_cp0_register(CP0_INDEX));
|
|
+ seq_printf(seq, "CP0_RANDOM\t: 0x%08x\n", read_32bit_cp0_register(CP0_RANDOM));
|
|
+ seq_printf(seq, "CP0_ENTRYLO0\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYLO0));
|
|
+ seq_printf(seq, "CP0_ENTRYLO1\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYLO1));
|
|
+ seq_printf(seq, "CP0_CONF\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONF));
|
|
+ seq_printf(seq, "CP0_CONTEXT\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONTEXT));
|
|
+ seq_printf(seq, "CP0_PAGEMASK\t: 0x%08x\n", read_32bit_cp0_register(CP0_PAGEMASK));
|
|
+ seq_printf(seq, "CP0_WIRED\t: 0x%08x\n", read_32bit_cp0_register(CP0_WIRED));
|
|
+ seq_printf(seq, "CP0_INFO\t: 0x%08x\n", read_32bit_cp0_register(CP0_INFO));
|
|
+ seq_printf(seq, "CP0_BADVADDR\t: 0x%08x\n", read_32bit_cp0_register(CP0_BADVADDR));
|
|
+ seq_printf(seq, "CP0_COUNT\t: 0x%08x\n", read_32bit_cp0_register(CP0_COUNT));
|
|
+ seq_printf(seq, "CP0_ENTRYHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_ENTRYHI));
|
|
+ seq_printf(seq, "CP0_COMPARE\t: 0x%08x\n", read_32bit_cp0_register(CP0_COMPARE));
|
|
+ seq_printf(seq, "CP0_STATUS\t: 0x%08x\n", read_32bit_cp0_register(CP0_STATUS));
|
|
+ seq_printf(seq, "CP0_CAUSE\t: 0x%08x\n", read_32bit_cp0_register(CP0_CAUSE));
|
|
+ seq_printf(seq, "CP0_EPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_EPC));
|
|
+ seq_printf(seq, "CP0_PRID\t: 0x%08x\n", read_32bit_cp0_register(CP0_PRID));
|
|
+ seq_printf(seq, "CP0_CONFIG\t: 0x%08x\n", read_32bit_cp0_register(CP0_CONFIG));
|
|
+ seq_printf(seq, "CP0_LLADDR\t: 0x%08x\n", read_32bit_cp0_register(CP0_LLADDR));
|
|
+ seq_printf(seq, "CP0_WATCHLO\t: 0x%08x\n", read_32bit_cp0_register(CP0_WATCHLO));
|
|
+ seq_printf(seq, "CP0_WATCHHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_WATCHHI));
|
|
+ seq_printf(seq, "CP0_XCONTEXT\t: 0x%08x\n", read_32bit_cp0_register(CP0_XCONTEXT));
|
|
+ seq_printf(seq, "CP0_FRAMEMASK\t: 0x%08x\n", read_32bit_cp0_register(CP0_FRAMEMASK));
|
|
+ seq_printf(seq, "CP0_DIAGNOSTIC\t: 0x%08x\n", read_32bit_cp0_register(CP0_DIAGNOSTIC));
|
|
+ seq_printf(seq, "CP0_DEBUG\t: 0x%08x\n", read_32bit_cp0_register(CP0_DEBUG));
|
|
+ seq_printf(seq, "CP0_DEPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_DEPC));
|
|
+ seq_printf(seq, "CP0_PERFORMANCE\t: 0x%08x\n", read_32bit_cp0_register(CP0_PERFORMANCE));
|
|
+ seq_printf(seq, "CP0_ECC\t: 0x%08x\n", read_32bit_cp0_register(CP0_ECC));
|
|
+ seq_printf(seq, "CP0_CACHEERR\t: 0x%08x\n", read_32bit_cp0_register(CP0_CACHEERR));
|
|
+ seq_printf(seq, "CP0_TAGLO\t: 0x%08x\n", read_32bit_cp0_register(CP0_TAGLO));
|
|
+ seq_printf(seq, "CP0_TAGHI\t: 0x%08x\n", read_32bit_cp0_register(CP0_TAGHI));
|
|
+ seq_printf(seq, "CP0_ERROREPC\t: 0x%08x\n", read_32bit_cp0_register(CP0_ERROREPC));
|
|
+ seq_printf(seq, "CP0_DESAVE\t: 0x%08x\n\n", read_32bit_cp0_register(CP0_DESAVE));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int cp0_reg_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, CP0RegRead, NULL);
|
|
+}
|
|
+
|
|
+static const struct file_operations cp0_reg_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = cp0_reg_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release
|
|
+};
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RAETH_QOS)
|
|
+static struct proc_dir_entry *procRaQOS, *procRaFeIntr, *procRaEswIntr;
|
|
+extern uint32_t num_of_rxdone_intr;
|
|
+extern uint32_t num_of_esw_intr;
|
|
+
|
|
+int RaQOSRegRead(struct seq_file *seq, void *v)
|
|
+{
|
|
+ dump_qos(seq);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int raeth_qos_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, RaQOSRegRead, NULL);
|
|
+}
|
|
+
|
|
+static const struct file_operations raeth_qos_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = raeth_qos_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release
|
|
+};
|
|
+#endif
|
|
+
|
|
+static struct proc_dir_entry *procEswCnt;
|
|
+
|
|
+int EswCntRead(struct seq_file *seq, void *v)
|
|
+{
|
|
+#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE) || defined (CONFIG_ARCH_MT7623)
|
|
+ unsigned int pkt_cnt = 0;
|
|
+ int i = 0;
|
|
+#endif
|
|
+ seq_printf(seq, "\n <<CPU>> \n");
|
|
+ seq_printf(seq, " | \n");
|
|
+#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+ seq_printf(seq, "+-----------------------------------------------+\n");
|
|
+ seq_printf(seq, "| <<PDMA>> |\n");
|
|
+ seq_printf(seq, "+-----------------------------------------------+\n");
|
|
+#else
|
|
+ seq_printf(seq, "+-----------------------------------------------+\n");
|
|
+ seq_printf(seq, "| <<PSE>> |\n");
|
|
+ seq_printf(seq, "+-----------------------------------------------+\n");
|
|
+ seq_printf(seq, " | \n");
|
|
+ seq_printf(seq, "+-----------------------------------------------+\n");
|
|
+ seq_printf(seq, "| <<GDMA>> |\n");
|
|
+#if defined (CONFIG_RALINK_MT7620)
|
|
+ seq_printf(seq, "| GDMA1_TX_GPCNT : %010u (Tx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1304));
|
|
+ seq_printf(seq, "| GDMA1_RX_GPCNT : %010u (Rx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1324));
|
|
+ seq_printf(seq, "| |\n");
|
|
+ seq_printf(seq, "| GDMA1_TX_SKIPCNT: %010u (skip) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1308));
|
|
+ seq_printf(seq, "| GDMA1_TX_COLCNT : %010u (collision) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x130c));
|
|
+ seq_printf(seq, "| GDMA1_RX_OERCNT : %010u (overflow) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1328));
|
|
+ seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x132c));
|
|
+ seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1330));
|
|
+ seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1334));
|
|
+ seq_printf(seq, "| GDMA1_RX_CERCNT : %010u (l3/l4 checksum) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1338));
|
|
+ seq_printf(seq, "| GDMA1_RX_FCCNT : %010u (flow control) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x133c));
|
|
+
|
|
+ seq_printf(seq, "| |\n");
|
|
+ seq_printf(seq, "| GDMA2_TX_GPCNT : %010u (Tx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1344));
|
|
+ seq_printf(seq, "| GDMA2_RX_GPCNT : %010u (Rx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1364));
|
|
+ seq_printf(seq, "| |\n");
|
|
+ seq_printf(seq, "| GDMA2_TX_SKIPCNT: %010u (skip) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1348));
|
|
+ seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x134c));
|
|
+ seq_printf(seq, "| GDMA2_RX_OERCNT : %010u (overflow) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1368));
|
|
+ seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x136c));
|
|
+ seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1370));
|
|
+ seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1374));
|
|
+ seq_printf(seq, "| GDMA2_RX_CERCNT : %010u (l3/l4 checksum) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x1378));
|
|
+ seq_printf(seq, "| GDMA2_RX_FCCNT : %010u (flow control) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x137c));
|
|
+#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+ seq_printf(seq, "| GDMA1_RX_GBCNT : %010u (Rx Good Bytes) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2400));
|
|
+ seq_printf(seq, "| GDMA1_RX_GPCNT : %010u (Rx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2408));
|
|
+ seq_printf(seq, "| GDMA1_RX_OERCNT : %010u (overflow error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2410));
|
|
+ seq_printf(seq, "| GDMA1_RX_FERCNT : %010u (FCS error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2414));
|
|
+ seq_printf(seq, "| GDMA1_RX_SERCNT : %010u (too short) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2418));
|
|
+ seq_printf(seq, "| GDMA1_RX_LERCNT : %010u (too long) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x241C));
|
|
+ seq_printf(seq, "| GDMA1_RX_CERCNT : %010u (checksum error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2420));
|
|
+ seq_printf(seq, "| GDMA1_RX_FCCNT : %010u (flow control) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2424));
|
|
+ seq_printf(seq, "| GDMA1_TX_SKIPCNT: %010u (about count) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2428));
|
|
+ seq_printf(seq, "| GDMA1_TX_COLCNT : %010u (collision count) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x242C));
|
|
+ seq_printf(seq, "| GDMA1_TX_GBCNT : %010u (Tx Good Bytes) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2430));
|
|
+ seq_printf(seq, "| GDMA1_TX_GPCNT : %010u (Tx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2438));
|
|
+ seq_printf(seq, "| |\n");
|
|
+ seq_printf(seq, "| GDMA2_RX_GBCNT : %010u (Rx Good Bytes) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2440));
|
|
+ seq_printf(seq, "| GDMA2_RX_GPCNT : %010u (Rx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2448));
|
|
+ seq_printf(seq, "| GDMA2_RX_OERCNT : %010u (overflow error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2450));
|
|
+ seq_printf(seq, "| GDMA2_RX_FERCNT : %010u (FCS error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2454));
|
|
+ seq_printf(seq, "| GDMA2_RX_SERCNT : %010u (too short) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2458));
|
|
+ seq_printf(seq, "| GDMA2_RX_LERCNT : %010u (too long) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x245C));
|
|
+ seq_printf(seq, "| GDMA2_RX_CERCNT : %010u (checksum error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2460));
|
|
+ seq_printf(seq, "| GDMA2_RX_FCCNT : %010u (flow control) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2464));
|
|
+ seq_printf(seq, "| GDMA2_TX_SKIPCNT: %010u (skip) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2468));
|
|
+ seq_printf(seq, "| GDMA2_TX_COLCNT : %010u (collision) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x246C));
|
|
+ seq_printf(seq, "| GDMA2_TX_GBCNT : %010u (Tx Good Bytes) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2470));
|
|
+ seq_printf(seq, "| GDMA2_TX_GPCNT : %010u (Tx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x2478));
|
|
+#else
|
|
+ seq_printf(seq, "| GDMA_TX_GPCNT1 : %010u (Tx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x704));
|
|
+ seq_printf(seq, "| GDMA_RX_GPCNT1 : %010u (Rx Good Pkts) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x724));
|
|
+ seq_printf(seq, "| |\n");
|
|
+ seq_printf(seq, "| GDMA_TX_SKIPCNT1: %010u (skip) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x708));
|
|
+ seq_printf(seq, "| GDMA_TX_COLCNT1 : %010u (collision) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x70c));
|
|
+ seq_printf(seq, "| GDMA_RX_OERCNT1 : %010u (overflow) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x728));
|
|
+ seq_printf(seq, "| GDMA_RX_FERCNT1 : %010u (FCS error) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x72c));
|
|
+ seq_printf(seq, "| GDMA_RX_SERCNT1 : %010u (too short) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x730));
|
|
+ seq_printf(seq, "| GDMA_RX_LERCNT1 : %010u (too long) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x734));
|
|
+ seq_printf(seq, "| GDMA_RX_CERCNT1 : %010u (l3/l4 checksum) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x738));
|
|
+ seq_printf(seq, "| GDMA_RX_FCCNT1 : %010u (flow control) |\n", sysRegRead(RALINK_FRAME_ENGINE_BASE+0x73c));
|
|
+
|
|
+#endif
|
|
+ seq_printf(seq, "+-----------------------------------------------+\n");
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined (CONFIG_RALINK_MT7620)
|
|
+
|
|
+ seq_printf(seq, " ^ \n");
|
|
+ seq_printf(seq, " | Port6 Rx:%010u Good Pkt \n", ((p6_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4620)&0xFFFF)));
|
|
+ seq_printf(seq, " | Port6 Rx:%010u Bad Pkt \n", sysRegRead(RALINK_ETH_SW_BASE+0x4620)>>16);
|
|
+ seq_printf(seq, " | Port6 Tx:%010u Good Pkt \n", ((p6_tx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4610)&0xFFFF)));
|
|
+ seq_printf(seq, " | Port6 Tx:%010u Bad Pkt \n", sysRegRead(RALINK_ETH_SW_BASE+0x4610)>>16);
|
|
+#if defined (CONFIG_RALINK_MT7620)
|
|
+
|
|
+ seq_printf(seq, " | Port7 Rx:%010u Good Pkt \n", ((p7_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4720)&0xFFFF)));
|
|
+ seq_printf(seq, " | Port7 Rx:%010u Bad Pkt \n", sysRegRead(RALINK_ETH_SW_BASE+0x4720)>>16);
|
|
+ seq_printf(seq, " | Port7 Tx:%010u Good Pkt \n", ((p7_tx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4710)&0xFFFF)));
|
|
+ seq_printf(seq, " | Port7 Tx:%010u Bad Pkt \n", sysRegRead(RALINK_ETH_SW_BASE+0x4710)>>16);
|
|
+#endif
|
|
+ seq_printf(seq, "+---------------------v-------------------------+\n");
|
|
+ seq_printf(seq, "| P6 |\n");
|
|
+ seq_printf(seq, "| <<10/100/1000 Embedded Switch>> |\n");
|
|
+ seq_printf(seq, "| P0 P1 P2 P3 P4 P5 |\n");
|
|
+ seq_printf(seq, "+-----------------------------------------------+\n");
|
|
+ seq_printf(seq, " | | | | | | \n");
|
|
+#elif defined (CONFIG_RALINK_RT3883) || defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+ /* no built-in switch */
|
|
+#else
|
|
+ seq_printf(seq, " ^ \n");
|
|
+ seq_printf(seq, " | Port6 Rx:%08u Good Pkt \n", sysRegRead(RALINK_ETH_SW_BASE+0xE0)&0xFFFF);
|
|
+ seq_printf(seq, " | Port6 Tx:%08u Good Pkt \n", sysRegRead(RALINK_ETH_SW_BASE+0xE0)>>16);
|
|
+ seq_printf(seq, "+---------------------v-------------------------+\n");
|
|
+ seq_printf(seq, "| P6 |\n");
|
|
+ seq_printf(seq, "| <<10/100 Embedded Switch>> |\n");
|
|
+ seq_printf(seq, "| P0 P1 P2 P3 P4 P5 |\n");
|
|
+ seq_printf(seq, "+-----------------------------------------------+\n");
|
|
+ seq_printf(seq, " | | | | | | \n");
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined (CONFIG_RALINK_MT7620)
|
|
+
|
|
+ seq_printf(seq, "Port0 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p0_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4020)&0xFFFF)), ((p0_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4010)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4020)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4010)>>16);
|
|
+
|
|
+ seq_printf(seq, "Port1 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p1_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4120)&0xFFFF)), ((p1_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4110)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4120)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4110)>>16);
|
|
+
|
|
+ seq_printf(seq, "Port2 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p2_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4220)&0xFFFF)), ((p2_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4210)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4220)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4210)>>16);
|
|
+
|
|
+ seq_printf(seq, "Port3 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p3_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4320)&0xFFFF)), ((p3_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4310)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4320)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4310)>>16);
|
|
+
|
|
+ seq_printf(seq, "Port4 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p4_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4420)&0xFFFF)), ((p4_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4410)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4420)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4410)>>16);
|
|
+
|
|
+ seq_printf(seq, "Port5 Good RX=%010u Tx=%010u (Bad Rx=%010u Tx=%010u)\n", ((p5_rx_good_cnt << 16) | (sysRegRead(RALINK_ETH_SW_BASE+0x4520)&0xFFFF)), ((p5_tx_good_cnt << 16)| (sysRegRead(RALINK_ETH_SW_BASE+0x4510)&0xFFFF)), sysRegRead(RALINK_ETH_SW_BASE+0x4520)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x4510)>>16);
|
|
+
|
|
+ seq_printf(seq, "Port0 KBytes RX=%010u Tx=%010u \n", ((p0_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4028) >> 10)), ((p0_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4018) >> 10)));
|
|
+
|
|
+ seq_printf(seq, "Port1 KBytes RX=%010u Tx=%010u \n", ((p1_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4128) >> 10)), ((p1_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4118) >> 10)));
|
|
+
|
|
+ seq_printf(seq, "Port2 KBytes RX=%010u Tx=%010u \n", ((p2_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4228) >> 10)), ((p2_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4218) >> 10)));
|
|
+
|
|
+ seq_printf(seq, "Port3 KBytes RX=%010u Tx=%010u \n", ((p3_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4328) >> 10)), ((p3_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4318) >> 10)));
|
|
+
|
|
+ seq_printf(seq, "Port4 KBytes RX=%010u Tx=%010u \n", ((p4_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4428) >> 10)), ((p4_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4418) >> 10)));
|
|
+
|
|
+ seq_printf(seq, "Port5 KBytes RX=%010u Tx=%010u \n", ((p5_rx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4528) >> 10)), ((p5_tx_byte_cnt << 22) + (sysRegRead(RALINK_ETH_SW_BASE+0x4518) >> 10)));
|
|
+
|
|
+#if defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
|
|
+#define DUMP_EACH_PORT(base) \
|
|
+ for(i=0; i < 7;i++) { \
|
|
+ mii_mgr_read(31, (base) + (i*0x100), &pkt_cnt); \
|
|
+ seq_printf(seq, "%8u ", pkt_cnt); \
|
|
+ } \
|
|
+ seq_printf(seq, "\n");
|
|
+ seq_printf(seq, "========================================[MT7530] READ CLEAR========================\n");
|
|
+
|
|
+ seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n","Port0", "Port1", "Port2", "Port3", "Port4", "Port5", "Port6");
|
|
+ seq_printf(seq, "Tx Drop Packet :"); DUMP_EACH_PORT(0x4000);
|
|
+ //seq_printf(seq, "Tx CRC Error :"); DUMP_EACH_PORT(0x4004);
|
|
+ seq_printf(seq, "Tx Unicast Packet :"); DUMP_EACH_PORT(0x4008);
|
|
+ seq_printf(seq, "Tx Multicast Packet :"); DUMP_EACH_PORT(0x400C);
|
|
+ seq_printf(seq, "Tx Broadcast Packet :"); DUMP_EACH_PORT(0x4010);
|
|
+ //seq_printf(seq, "Tx Collision Event :"); DUMP_EACH_PORT(0x4014);
|
|
+ seq_printf(seq, "Tx Pause Packet :"); DUMP_EACH_PORT(0x402C);
|
|
+ seq_printf(seq, "Rx Drop Packet :"); DUMP_EACH_PORT(0x4060);
|
|
+ seq_printf(seq, "Rx Filtering Packet :"); DUMP_EACH_PORT(0x4064);
|
|
+ seq_printf(seq, "Rx Unicast Packet :"); DUMP_EACH_PORT(0x4068);
|
|
+ seq_printf(seq, "Rx Multicast Packet :"); DUMP_EACH_PORT(0x406C);
|
|
+ seq_printf(seq, "Rx Broadcast Packet :"); DUMP_EACH_PORT(0x4070);
|
|
+ seq_printf(seq, "Rx Alignment Error :"); DUMP_EACH_PORT(0x4074);
|
|
+ seq_printf(seq, "Rx CRC Error :"); DUMP_EACH_PORT(0x4078);
|
|
+ seq_printf(seq, "Rx Undersize Error :"); DUMP_EACH_PORT(0x407C);
|
|
+ //seq_printf(seq, "Rx Fragment Error :"); DUMP_EACH_PORT(0x4080);
|
|
+ //seq_printf(seq, "Rx Oversize Error :"); DUMP_EACH_PORT(0x4084);
|
|
+ //seq_printf(seq, "Rx Jabber Error :"); DUMP_EACH_PORT(0x4088);
|
|
+ seq_printf(seq, "Rx Pause Packet :"); DUMP_EACH_PORT(0x408C);
|
|
+ mii_mgr_write(31, 0x4fe0, 0xf0);
|
|
+ mii_mgr_write(31, 0x4fe0, 0x800000f0);
|
|
+#endif
|
|
+
|
|
+
|
|
+#elif defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+ seq_printf(seq, "Port0 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xE8)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x150)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xE8)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x150)>>16);
|
|
+
|
|
+ seq_printf(seq, "Port1 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xEC)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x154)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xEC)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x154)>>16);
|
|
+
|
|
+ seq_printf(seq, "Port2 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF0)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x158)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF0)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x158)>>16);
|
|
+
|
|
+ seq_printf(seq, "Port3 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF4)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x15C)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF4)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x15c)>>16);
|
|
+
|
|
+ seq_printf(seq, "Port4 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF8)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x160)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF8)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x160)>>16);
|
|
+
|
|
+ seq_printf(seq, "Port5 Good Pkt Cnt: RX=%08u Tx=%08u (Bad Pkt Cnt: Rx=%08u Tx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xFC)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0x164)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xFC)>>16, sysRegRead(RALINK_ETH_SW_BASE+0x164)>>16);
|
|
+#elif defined (CONFIG_RALINK_RT3883)
|
|
+ /* no built-in switch */
|
|
+#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+
|
|
+#define DUMP_EACH_PORT(base) \
|
|
+ for(i=0; i < 7;i++) { \
|
|
+ mii_mgr_read(31, (base) + (i*0x100), &pkt_cnt); \
|
|
+ seq_printf(seq, "%8u ", pkt_cnt); \
|
|
+ } \
|
|
+ seq_printf(seq, "\n");
|
|
+
|
|
+#if defined (CONFIG_RALINK_MT7621) /* TODO: need to update to use MT7530 compiler flag */
|
|
+ if(sysRegRead(0xbe00000c & (1<<16)))//MCM
|
|
+#endif
|
|
+ {
|
|
+ seq_printf(seq, "===================== %8s %8s %8s %8s %8s %8s %8s\n","Port0", "Port1", "Port2", "Port3", "Port4", "Port5", "Port6");
|
|
+ seq_printf(seq, "Tx Drop Packet :"); DUMP_EACH_PORT(0x4000);
|
|
+ seq_printf(seq, "Tx CRC Error :"); DUMP_EACH_PORT(0x4004);
|
|
+ seq_printf(seq, "Tx Unicast Packet :"); DUMP_EACH_PORT(0x4008);
|
|
+ seq_printf(seq, "Tx Multicast Packet :"); DUMP_EACH_PORT(0x400C);
|
|
+ seq_printf(seq, "Tx Broadcast Packet :"); DUMP_EACH_PORT(0x4010);
|
|
+ seq_printf(seq, "Tx Collision Event :"); DUMP_EACH_PORT(0x4014);
|
|
+ seq_printf(seq, "Tx Pause Packet :"); DUMP_EACH_PORT(0x402C);
|
|
+ seq_printf(seq, "Rx Drop Packet :"); DUMP_EACH_PORT(0x4060);
|
|
+ seq_printf(seq, "Rx Filtering Packet :"); DUMP_EACH_PORT(0x4064);
|
|
+ seq_printf(seq, "Rx Unicast Packet :"); DUMP_EACH_PORT(0x4068);
|
|
+ seq_printf(seq, "Rx Multicast Packet :"); DUMP_EACH_PORT(0x406C);
|
|
+ seq_printf(seq, "Rx Broadcast Packet :"); DUMP_EACH_PORT(0x4070);
|
|
+ seq_printf(seq, "Rx Alignment Error :"); DUMP_EACH_PORT(0x4074);
|
|
+ seq_printf(seq, "Rx CRC Error :"); DUMP_EACH_PORT(0x4078);
|
|
+ seq_printf(seq, "Rx Undersize Error :"); DUMP_EACH_PORT(0x407C);
|
|
+ seq_printf(seq, "Rx Fragment Error :"); DUMP_EACH_PORT(0x4080);
|
|
+ seq_printf(seq, "Rx Oversize Error :"); DUMP_EACH_PORT(0x4084);
|
|
+ seq_printf(seq, "Rx Jabber Error :"); DUMP_EACH_PORT(0x4088);
|
|
+ seq_printf(seq, "Rx Pause Packet :"); DUMP_EACH_PORT(0x408C);
|
|
+ mii_mgr_write(31, 0x4fe0, 0xf0);
|
|
+ mii_mgr_write(31, 0x4fe0, 0x800000f0);
|
|
+ }
|
|
+#if defined (CONFIG_RALINK_MT7621) /* TODO: need to update to use MT7530 compiler flag */
|
|
+ else {
|
|
+ seq_printf(seq, "no built-in switch\n");
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#else /* RT305x, RT3352 */
|
|
+ seq_printf(seq, "Port0: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xE8)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xE8)>>16);
|
|
+ seq_printf(seq, "Port1: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xEC)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xEC)>>16);
|
|
+ seq_printf(seq, "Port2: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF0)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF0)>>16);
|
|
+ seq_printf(seq, "Port3: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF4)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF4)>>16);
|
|
+ seq_printf(seq, "Port4: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xF8)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xF8)>>16);
|
|
+ seq_printf(seq, "Port5: Good Pkt Cnt: RX=%08u (Bad Pkt Cnt: Rx=%08u)\n", sysRegRead(RALINK_ETH_SW_BASE+0xFC)&0xFFFF,sysRegRead(RALINK_ETH_SW_BASE+0xFC)>>16);
|
|
+#endif
|
|
+ seq_printf(seq, "\n");
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int switch_count_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, EswCntRead, NULL);
|
|
+}
|
|
+
|
|
+static const struct file_operations switch_count_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = switch_count_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release
|
|
+};
|
|
+
|
|
+#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
|
|
+/*
|
|
+ * proc write procedure
|
|
+ */
|
|
+static ssize_t change_phyid(struct file *file, const char __user *buffer,
|
|
+ size_t count, loff_t *data)
|
|
+{
|
|
+ char buf[32];
|
|
+ struct net_device *cur_dev_p;
|
|
+ END_DEVICE *ei_local;
|
|
+ char if_name[64];
|
|
+ unsigned int phy_id;
|
|
+
|
|
+ if (count > 32)
|
|
+ count = 32;
|
|
+ memset(buf, 0, 32);
|
|
+ if (copy_from_user(buf, buffer, count))
|
|
+ return -EFAULT;
|
|
+
|
|
+ /* determine interface name */
|
|
+ strcpy(if_name, DEV_NAME); /* "eth2" by default */
|
|
+ if(isalpha(buf[0]))
|
|
+ sscanf(buf, "%s %d", if_name, &phy_id);
|
|
+ else
|
|
+ phy_id = simple_strtol(buf, 0, 10);
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ cur_dev_p = dev_get_by_name(&init_net, DEV_NAME);
|
|
+#else
|
|
+ cur_dev_p = dev_get_by_name(DEV_NAME);
|
|
+#endif
|
|
+ if (cur_dev_p == NULL)
|
|
+ return -EFAULT;
|
|
+
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ ei_local = netdev_priv(cur_dev_p);
|
|
+#else
|
|
+ ei_local = cur_dev_p->priv;
|
|
+#endif
|
|
+ ei_local->mii_info.phy_id = (unsigned char)phy_id;
|
|
+ return count;
|
|
+}
|
|
+
|
|
+#if defined(CONFIG_PSEUDO_SUPPORT)
|
|
+static ssize_t change_gmac2_phyid(struct file *file, const char __user *buffer,
|
|
+ size_t count, loff_t *data)
|
|
+{
|
|
+ char buf[32];
|
|
+ struct net_device *cur_dev_p;
|
|
+ PSEUDO_ADAPTER *pPseudoAd;
|
|
+ char if_name[64];
|
|
+ unsigned int phy_id;
|
|
+
|
|
+ if (count > 32)
|
|
+ count = 32;
|
|
+ memset(buf, 0, 32);
|
|
+ if (copy_from_user(buf, buffer, count))
|
|
+ return -EFAULT;
|
|
+ /* determine interface name */
|
|
+ strcpy(if_name, DEV2_NAME); /* "eth3" by default */
|
|
+ if(isalpha(buf[0]))
|
|
+ sscanf(buf, "%s %d", if_name, &phy_id);
|
|
+ else
|
|
+ phy_id = simple_strtol(buf, 0, 10);
|
|
+
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ cur_dev_p = dev_get_by_name(&init_net, DEV2_NAME);
|
|
+#else
|
|
+ cur_dev_p = dev_get_by_name(DEV2_NAMEj);
|
|
+#endif
|
|
+ if (cur_dev_p == NULL)
|
|
+ return -EFAULT;
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ pPseudoAd = netdev_priv(cur_dev_p);
|
|
+#else
|
|
+ pPseudoAd = cur_dev_p->priv;
|
|
+#endif
|
|
+ pPseudoAd->mii_info.phy_id = (unsigned char)phy_id;
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static struct file_operations gmac2_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .write = change_gmac2_phyid
|
|
+};
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+static int gmac_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, RegReadMain, NULL);
|
|
+}
|
|
+
|
|
+static struct file_operations gmac_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = gmac_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+#if defined (CONFIG_ETHTOOL)
|
|
+ .write = change_phyid,
|
|
+#endif
|
|
+ .release = single_release
|
|
+};
|
|
+
|
|
+#if defined (TASKLET_WORKQUEUE_SW)
|
|
+extern int init_schedule;
|
|
+extern int working_schedule;
|
|
+static int ScheduleRead(struct seq_file *seq, void *v)
|
|
+{
|
|
+ if (init_schedule == 1)
|
|
+ seq_printf(seq, "Initialize Raeth with workqueque<%d>\n", init_schedule);
|
|
+ else
|
|
+ seq_printf(seq, "Initialize Raeth with tasklet<%d>\n", init_schedule);
|
|
+ if (working_schedule == 1)
|
|
+ seq_printf(seq, "Raeth is running at workqueque<%d>\n", working_schedule);
|
|
+ else
|
|
+ seq_printf(seq, "Raeth is running at tasklet<%d>\n", working_schedule);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static ssize_t ScheduleWrite(struct file *file, const char __user *buffer,
|
|
+ size_t count, loff_t *data)
|
|
+{
|
|
+ char buf[2];
|
|
+ int old;
|
|
+
|
|
+ if (copy_from_user(buf, buffer, count))
|
|
+ return -EFAULT;
|
|
+ old = init_schedule;
|
|
+ init_schedule = simple_strtol(buf, 0, 10);
|
|
+ printk("Change Raeth initial schedule from <%d> to <%d>\n! Not running schedule at present !\n",
|
|
+ old, init_schedule);
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static int schedule_switch_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, ScheduleRead, NULL);
|
|
+}
|
|
+
|
|
+static const struct file_operations schedule_sw_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = schedule_switch_open,
|
|
+ .read = seq_read,
|
|
+ .write = ScheduleWrite,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release
|
|
+};
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+static int PdmaDvtRead(struct seq_file *seq, void *v)
|
|
+{
|
|
+ seq_printf(seq, "g_pdma_dvt_show_config = 0x%x\n", pdma_dvt_get_show_config());
|
|
+ seq_printf(seq, "g_pdma_dvt_rx_test_config = 0x%x\n", pdma_dvt_get_rx_test_config());
|
|
+ seq_printf(seq, "g_pdma_dvt_tx_test_config = 0x%x\n", pdma_dvt_get_tx_test_config());
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int PdmaDvtOpen(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, PdmaDvtRead, NULL);
|
|
+}
|
|
+
|
|
+static ssize_t PdmaDvtWrite(struct file *file, const char __user *buffer,
|
|
+ size_t count, loff_t *data)
|
|
+{
|
|
+ char buf[32];
|
|
+ char *pBuf;
|
|
+ int len = count;
|
|
+ int x = 0,y = 0;
|
|
+ char *pToken = NULL;
|
|
+ char *pDelimiter = " \t";
|
|
+
|
|
+ printk("write parameter len = %d\n\r", (int)len);
|
|
+ if(len >= sizeof(buf)){
|
|
+ printk("input handling fail!\n");
|
|
+ len = sizeof(buf) - 1;
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ if(copy_from_user(buf, buffer, len)){
|
|
+ return -EFAULT;
|
|
+ }
|
|
+ buf[len] = '\0';
|
|
+ printk("write parameter data = %s\n\r", buf);
|
|
+
|
|
+ pBuf = buf;
|
|
+ pToken = strsep(&pBuf, pDelimiter);
|
|
+ x = NULL != pToken ? simple_strtol(pToken, NULL, 16) : 0;
|
|
+
|
|
+ pToken = strsep(&pBuf, "\t\n ");
|
|
+ if(pToken != NULL){
|
|
+ y = NULL != pToken ? simple_strtol(pToken, NULL, 16) : 0;
|
|
+ printk("y = 0x%08x \n\r", y);
|
|
+ }
|
|
+
|
|
+ if ( (sizeof(pdma_dvt_dbg_func)/sizeof(pdma_dvt_dbg_func[0]) > x) && NULL != pdma_dvt_dbg_func[x])
|
|
+ {
|
|
+ (*pdma_dvt_dbg_func[x])(x, y);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ printk("no handler defined for command id(0x%08x)\n\r", x);
|
|
+ }
|
|
+
|
|
+ printk("x(0x%08x), y(0x%08x)\n", x, y);
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static const struct file_operations pdma_dev_sw_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = PdmaDvtOpen,
|
|
+ .read = seq_read,
|
|
+ .write = PdmaDvtWrite
|
|
+};
|
|
+#endif //#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+
|
|
+int debug_proc_init(void)
|
|
+{
|
|
+ if (procRegDir == NULL)
|
|
+ procRegDir = proc_mkdir(PROCREG_DIR, NULL);
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procGmac = create_proc_entry(PROCREG_GMAC, 0, procRegDir)))
|
|
+ procGmac->proc_fops = &gmac_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procGmac = proc_create(PROCREG_GMAC, 0, procRegDir, &gmac_fops)))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_GMAC);
|
|
+#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
|
|
+#if defined(CONFIG_PSEUDO_SUPPORT)
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procGmac2 = create_proc_entry(PROCREG_GMAC2, 0, procRegDir)))
|
|
+ procGmac2->proc_fops = &gmac2_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procGmac2 = proc_create(PROCREG_GMAC2, 0, procRegDir, &gmac2_fops)))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_GMAC2);
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procSkbFree = create_proc_entry(PROCREG_SKBFREE, 0, procRegDir)))
|
|
+ procSkbFree->proc_fops = &skb_free_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procSkbFree = proc_create(PROCREG_SKBFREE, 0, procRegDir, &skb_free_fops)))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_SKBFREE);
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procTxRing = create_proc_entry(PROCREG_TXRING, 0, procRegDir)))
|
|
+ procTxRing->proc_fops = &tx_ring_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procTxRing = proc_create(PROCREG_TXRING, 0, procRegDir, &tx_ring_fops)))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_TXRING);
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procRxRing = create_proc_entry(PROCREG_RXRING, 0, procRegDir)))
|
|
+ procRxRing->proc_fops = &rx_ring_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procRxRing = proc_create(PROCREG_RXRING, 0, procRegDir, &rx_ring_fops)))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_RXRING);
|
|
+
|
|
+#if defined (CONFIG_RAETH_HW_LRO) || defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procRxRing1 = create_proc_entry(PROCREG_RXRING1, 0, procRegDir)))
|
|
+ procRxRing1->proc_fops = &rx_ring1_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procRxRing1 = proc_create(PROCREG_RXRING1, 0, procRegDir, &rx_ring1_fops)))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_RXRING1);
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procRxRing2 = create_proc_entry(PROCREG_RXRING2, 0, procRegDir)))
|
|
+ procRxRing2->proc_fops = &rx_ring2_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procRxRing2 = proc_create(PROCREG_RXRING2, 0, procRegDir, &rx_ring2_fops)))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_RXRING2);
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procRxRing3 = create_proc_entry(PROCREG_RXRING3, 0, procRegDir)))
|
|
+ procRxRing3->proc_fops = &rx_ring3_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procRxRing3 = proc_create(PROCREG_RXRING3, 0, procRegDir, &rx_ring3_fops)))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_RXRING3);
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+
|
|
+#if defined (CONFIG_MIPS)
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procSysCP0 = create_proc_entry(PROCREG_CP0, 0, procRegDir)))
|
|
+ procSysCP0->proc_fops = &cp0_reg_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procSysCP0 = proc_create(PROCREG_CP0, 0, procRegDir, &cp0_reg_fops)))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_CP0);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RAETH_TSO)
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procNumOfTxd = create_proc_entry(PROCREG_NUM_OF_TXD, 0, procRegDir)))
|
|
+ procNumOfTxd->proc_fops = &tso_txd_num_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procNumOfTxd = proc_create(PROCREG_NUM_OF_TXD, 0, procRegDir, &tso_txd_num_fops)))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_NUM_OF_TXD);
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procTsoLen = create_proc_entry(PROCREG_TSO_LEN, 0, procRegDir)))
|
|
+ procTsoLen->proc_fops = &tso_len_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procTsoLen = proc_create(PROCREG_TSO_LEN, 0, procRegDir, &tso_len_fops)))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_TSO_LEN);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RAETH_LRO)
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procLroStats = create_proc_entry(PROCREG_LRO_STATS, 0, procRegDir)))
|
|
+ procLroStats->proc_fops = &lro_stats_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procLroStats = proc_create(PROCREG_LRO_STATS, 0, procRegDir, &lro_stats_fops)))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_LRO_STATS);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RAETH_HW_LRO)
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procHwLroStats = create_proc_entry(PROCREG_HW_LRO_STATS, 0, procRegDir)))
|
|
+ procHwLroStats->proc_fops = &hw_lro_stats_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procHwLroStats = proc_create(PROCREG_HW_LRO_STATS, 0, procRegDir, &hw_lro_stats_fops)))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_STATS);
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procHwLroAutoTlb = create_proc_entry(PROCREG_HW_LRO_AUTO_TLB, 0, procRegDir)))
|
|
+ procHwLroAutoTlb->proc_fops = &hw_lro_auto_tlb_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procHwLroAutoTlb = proc_create(PROCREG_HW_LRO_AUTO_TLB, 0, procRegDir, &hw_lro_auto_tlb_fops)))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_HW_LRO_AUTO_TLB);
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+
|
|
+#if defined(CONFIG_RAETH_QOS)
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procRaQOS = create_proc_entry(PROCREG_RAQOS, 0, procRegDir)))
|
|
+ procRaQOS->proc_fops = &raeth_qos_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procRaQOS = proc_create(PROCREG_RAQOS, 0, procRegDir, &raeth_qos_fops)))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_RAQOS);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_USER_SNMPD)
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procRaSnmp = create_proc_entry(PROCREG_SNMP, S_IRUGO, procRegDir)))
|
|
+ procRaSnmp->proc_fops = &ra_snmp_seq_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procRaSnmp = proc_create(PROCREG_SNMP, S_IRUGO, procRegDir, &ra_snmp_seq_fops)))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_SNMP);
|
|
+#endif
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procEswCnt = create_proc_entry(PROCREG_ESW_CNT, 0, procRegDir)))
|
|
+ procEswCnt->proc_fops = &switch_count_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procEswCnt = proc_create(PROCREG_ESW_CNT, 0, procRegDir, &switch_count_fops)))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_ESW_CNT);
|
|
+
|
|
+#if defined (TASKLET_WORKQUEUE_SW)
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procSCHE = create_proc_entry(PROCREG_SCHE, 0, procRegDir)))
|
|
+ procSCHE->proc_fops = &schedule_sw_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procSCHE = proc_create(PROCREG_SCHE, 0, procRegDir, &schedule_sw_fops)))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_SCHE);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
|
|
+ if ((procPdmaDvt = create_proc_entry(PROCREG_PDMA_DVT, 0, procRegDir)))
|
|
+ procPdmaDvt->proc_fops = &pdma_dev_sw_fops;
|
|
+ else
|
|
+#else
|
|
+ if (!(procPdmaDvt = proc_create(PROCREG_PDMA_DVT, 0, procRegDir, &pdma_dev_sw_fops )))
|
|
+#endif
|
|
+ printk("!! FAIL to create %s PROC !!\n", PROCREG_PDMA_DVT);
|
|
+#endif //#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+
|
|
+ printk(KERN_ALERT "PROC INIT OK!\n");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void debug_proc_exit(void)
|
|
+{
|
|
+
|
|
+ if (procSysCP0)
|
|
+ remove_proc_entry(PROCREG_CP0, procRegDir);
|
|
+
|
|
+ if (procGmac)
|
|
+ remove_proc_entry(PROCREG_GMAC, procRegDir);
|
|
+#if defined(CONFIG_PSEUDO_SUPPORT) && defined(CONFIG_ETHTOOL)
|
|
+ if (procGmac)
|
|
+ remove_proc_entry(PROCREG_GMAC, procRegDir);
|
|
+#endif
|
|
+ if (procSkbFree)
|
|
+ remove_proc_entry(PROCREG_SKBFREE, procRegDir);
|
|
+
|
|
+ if (procTxRing)
|
|
+ remove_proc_entry(PROCREG_TXRING, procRegDir);
|
|
+
|
|
+ if (procRxRing)
|
|
+ remove_proc_entry(PROCREG_RXRING, procRegDir);
|
|
+
|
|
+#if defined(CONFIG_RAETH_TSO)
|
|
+ if (procNumOfTxd)
|
|
+ remove_proc_entry(PROCREG_NUM_OF_TXD, procRegDir);
|
|
+
|
|
+ if (procTsoLen)
|
|
+ remove_proc_entry(PROCREG_TSO_LEN, procRegDir);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RAETH_LRO)
|
|
+ if (procLroStats)
|
|
+ remove_proc_entry(PROCREG_LRO_STATS, procRegDir);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RAETH_QOS)
|
|
+ if (procRaQOS)
|
|
+ remove_proc_entry(PROCREG_RAQOS, procRegDir);
|
|
+ if (procRaFeIntr)
|
|
+ remove_proc_entry(PROCREG_RXDONE_INTR, procRegDir);
|
|
+ if (procRaEswIntr)
|
|
+ remove_proc_entry(PROCREG_ESW_INTR, procRegDir);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_USER_SNMPD)
|
|
+ if (procRaSnmp)
|
|
+ remove_proc_entry(PROCREG_SNMP, procRegDir);
|
|
+#endif
|
|
+
|
|
+ if (procEswCnt)
|
|
+ remove_proc_entry(PROCREG_ESW_CNT, procRegDir);
|
|
+
|
|
+ //if (procRegDir)
|
|
+ //remove_proc_entry(PROCREG_DIR, 0);
|
|
+
|
|
+ printk(KERN_ALERT "proc exit\n");
|
|
+}
|
|
+EXPORT_SYMBOL(procRegDir);
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/ra_mac.h
|
|
@@ -0,0 +1,57 @@
|
|
+#ifndef RA_MAC_H
|
|
+#define RA_MAC_H
|
|
+
|
|
+void ra2880stop(END_DEVICE *ei_local);
|
|
+void ra2880MacAddressSet(unsigned char p[6]);
|
|
+void ra2880Mac2AddressSet(unsigned char p[6]);
|
|
+void ethtool_init(struct net_device *dev);
|
|
+
|
|
+void ra2880EnableInterrupt(void);
|
|
+
|
|
+void dump_qos(void);
|
|
+void dump_reg(struct seq_file *s);
|
|
+void dump_cp0(void);
|
|
+
|
|
+int debug_proc_init(void);
|
|
+void debug_proc_exit(void);
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621)
|
|
+void enable_auto_negotiate(int unused);
|
|
+#else
|
|
+void enable_auto_negotiate(int ge);
|
|
+#endif
|
|
+
|
|
+void rt2880_gmac_hard_reset(void);
|
|
+
|
|
+int TsoLenUpdate(int tso_len);
|
|
+int NumOfTxdUpdate(int num_of_txd);
|
|
+
|
|
+#ifdef CONFIG_RAETH_LRO
|
|
+int LroStatsUpdate(struct net_lro_mgr *lro_mgr, bool all_flushed);
|
|
+#endif
|
|
+#ifdef CONFIG_RAETH_HW_LRO
|
|
+int HwLroStatsUpdate(unsigned int ring_num, unsigned int agg_cnt, unsigned int agg_size);
|
|
+#if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
|
|
+#define HW_LRO_AGG_FLUSH (1)
|
|
+#define HW_LRO_AGE_FLUSH (2)
|
|
+#define HW_LRO_NOT_IN_SEQ_FLUSH (3)
|
|
+#define HW_LRO_TIMESTAMP_FLUSH (4)
|
|
+#define HW_LRO_NON_RULE_FLUSH (5)
|
|
+int HwLroFlushStatsUpdate(unsigned int ring_num, unsigned int flush_reason);
|
|
+#endif /* CONFIG_RAETH_HW_LRO_REASON_DBG */
|
|
+typedef int (*HWLRO_DBG_FUNC)(int par1, int par2);
|
|
+int hwlro_agg_cnt_ctrl(int par1, int par2);
|
|
+int hwlro_agg_time_ctrl(int par1, int par2);
|
|
+int hwlro_age_time_ctrl(int par1, int par2);
|
|
+int hwlro_pkt_int_alpha_ctrl(int par1, int par2);
|
|
+int hwlro_threshold_ctrl(int par1, int par2);
|
|
+int hwlro_fix_setting_switch_ctrl(int par1, int par2);
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+int getnext(const char *src, int separator, char *dest);
|
|
+int str_to_ip(unsigned int *ip, const char *str);
|
|
+
|
|
+#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+typedef int (*PDMA_DBG_FUNC)(int par1, int par2);
|
|
+#endif //#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/ra_netlink.c
|
|
@@ -0,0 +1,142 @@
|
|
+// for netlink header
|
|
+#include <asm/types.h>
|
|
+#include <net/sock.h>
|
|
+#include <linux/socket.h>
|
|
+#include <linux/netlink.h>
|
|
+#include <linux/skbuff.h>
|
|
+#include <linux/net.h>
|
|
+#include <linux/version.h>
|
|
+
|
|
+#include "csr_netlink.h"
|
|
+#include "ra2882ethreg.h"
|
|
+#include "ra_netlink.h"
|
|
+
|
|
+static struct sock *csr_msg_socket = NULL; // synchronize socket for netlink use
|
|
+unsigned int flags;
|
|
+
|
|
+void rt2880_csr_receiver(struct sock *sk, int len)
|
|
+{
|
|
+ struct sk_buff *skb;
|
|
+ int err;
|
|
+ struct nlmsghdr *nlh;
|
|
+ unsigned int reg_value = 0;
|
|
+ CSR_MSG *csrmsg;
|
|
+ RAETH_PRINT("csr netlink receiver!\n");
|
|
+ skb = skb_recv_datagram(sk, 0, 1, &err);
|
|
+
|
|
+ RAETH_PRINT("error no : %d\n", err);
|
|
+
|
|
+ if (skb == NULL) {
|
|
+ printk("rt2880_csr_receiver(): No data received, error!\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ nlh = (struct nlmsghdr*)skb->data;
|
|
+
|
|
+ csrmsg = NLMSG_DATA(nlh);
|
|
+
|
|
+ if (csrmsg->enable == CSR_READ ) {
|
|
+ reg_value = sysRegRead(csrmsg->address);
|
|
+#if 0
|
|
+ printk("raeth -- 0x%08x: 0x%08x\n", csrmsg->address, reg_value);
|
|
+#endif
|
|
+ } else if ( csrmsg->enable == CSR_WRITE ) {
|
|
+ sysRegWrite(csrmsg->address, csrmsg->default_value);
|
|
+ reg_value = sysRegRead(csrmsg->address);
|
|
+ } else if ( csrmsg->enable == CSR_TEST ) {
|
|
+ reg_value = sysRegRead(csrmsg->address);
|
|
+ printk("0x%08x: 0x%08x\n", (unsigned int)csrmsg->address, reg_value);
|
|
+ }
|
|
+ else
|
|
+ printk("drv: Command format error!\n");
|
|
+
|
|
+ csrmsg->default_value = reg_value;
|
|
+
|
|
+ RAETH_PRINT("drv: rt2880_csr_msgsend() - msg to send!\n");
|
|
+
|
|
+ err = rt2880_csr_msgsend(csrmsg);
|
|
+
|
|
+ if ( err == -2 )
|
|
+ printk("drv: msg send error!\n");
|
|
+
|
|
+ skb_free_datagram(sk, skb);
|
|
+}
|
|
+
|
|
+int rt2880_csr_msgsend(CSR_MSG* csrmsg)
|
|
+{
|
|
+ struct sk_buff *skb;
|
|
+ struct nlmsghdr *nlh = NULL;
|
|
+ size_t size = 0;
|
|
+ struct sock *send_syncnl = csr_msg_socket;
|
|
+
|
|
+ CSR_MSG* csr_reg;
|
|
+ if (send_syncnl == NULL) {
|
|
+ printk("drv: netlink_kernel_create() failed!\n");
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ size = NLMSG_SPACE(sizeof(CSR_MSG));
|
|
+ skb = alloc_skb(size, GFP_ATOMIC);
|
|
+
|
|
+ if(!skb)
|
|
+ {
|
|
+ printk("rt2880_csr_msgsend() : error! msg structure not available\n");
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ nlh = NLMSG_PUT(skb, 0, 0, RALINK_CSR_GROUP, size - sizeof(struct nlmsghdr));
|
|
+
|
|
+ if (!nlh)
|
|
+ {
|
|
+ printk("rt2880_csr_msgsend() : error! nlh structure not available\n");
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ csr_reg = NLMSG_DATA(nlh);
|
|
+ if (!csr_reg)
|
|
+ {
|
|
+ printk("rt2880_csr_msgsend() : error! nlh structure not available\n");
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ csr_reg->address = csrmsg->address;
|
|
+ csr_reg->default_value = csrmsg->default_value;
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
|
|
+ NETLINK_CB(skb).dst_group = RALINK_CSR_GROUP;
|
|
+#else
|
|
+ NETLINK_CB(skb).dst_groups = RALINK_CSR_GROUP;
|
|
+#endif
|
|
+ netlink_broadcast(send_syncnl, skb, 0, RALINK_CSR_GROUP, GFP_ATOMIC);
|
|
+ return 0;
|
|
+
|
|
+nlmsg_failure:
|
|
+ return -2;
|
|
+}
|
|
+
|
|
+int csr_netlink_init()
|
|
+{
|
|
+
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
|
|
+ csr_msg_socket = netlink_kernel_create(NETLINK_CSR, RALINK_CSR_GROUP, rt2880_csr_receiver, THIS_MODULE);
|
|
+#else
|
|
+ csr_msg_socket = netlink_kernel_create(NETLINK_CSR, rt2880_csr_receiver);
|
|
+#endif
|
|
+
|
|
+ if ( csr_msg_socket == NULL )
|
|
+ printk("unable to create netlink socket!\n");
|
|
+ else
|
|
+ printk("Netlink init ok!\n");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void csr_netlink_end()
|
|
+{
|
|
+ if (csr_msg_socket != NULL){
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
|
|
+ sock_release(csr_msg_socket->sk_socket);
|
|
+#else
|
|
+ sock_release(csr_msg_socket->socket);
|
|
+#endif
|
|
+ printk("Netlink end...\n");
|
|
+ }
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/ra_netlink.h
|
|
@@ -0,0 +1,10 @@
|
|
+#ifndef RA_NETLINK
|
|
+#define RA_NETLINK
|
|
+
|
|
+#include "csr_netlink.h"
|
|
+int rt2880_csr_msgsend(CSR_MSG* csrmsg);
|
|
+void rt2880_csr_receiver(struct sock *sk, int len);
|
|
+int csr_netlink_init(void);
|
|
+void csr_netlink_end(void);
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/ra_qos.c
|
|
@@ -0,0 +1,655 @@
|
|
+#include <asm/io.h>
|
|
+#include <linux/pci.h>
|
|
+#include <linux/netdevice.h>
|
|
+#include <linux/etherdevice.h>
|
|
+#include <linux/net.h>
|
|
+#include <linux/in.h>
|
|
+#include "ra_qos.h"
|
|
+#include "raether.h"
|
|
+#include "ra2882ethreg.h"
|
|
+
|
|
+#include <asm/types.h>
|
|
+#include <net/sock.h>
|
|
+#include <linux/socket.h>
|
|
+#include <linux/skbuff.h>
|
|
+#include <linux/net.h>
|
|
+#include <linux/if_vlan.h>
|
|
+#include <linux/ip.h>
|
|
+
|
|
+
|
|
+#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
|
|
+#include "../../../net/nat/hw_nat/ra_nat.h"
|
|
+#endif
|
|
+
|
|
+#define CONTI_TX_SEND_MAX_SIZE 1440
|
|
+
|
|
+/*
|
|
+ * set tx queue # to descriptor
|
|
+ */
|
|
+void rt3052_tx_queue_init(unsigned long data)
|
|
+{
|
|
+ /* define qos p */
|
|
+
|
|
+}
|
|
+
|
|
+void rt3052_pse_port0_fc_clear(unsigned long data)
|
|
+{
|
|
+ /* clear FE_INT_STATUS.PSE_P0_FC */
|
|
+
|
|
+}
|
|
+
|
|
+inline int get_tx_ctx_idx(unsigned int ring_no, unsigned long *idx)
|
|
+{
|
|
+ switch (ring_no) {
|
|
+ case RING0:
|
|
+ *idx = *(unsigned long*)TX_CTX_IDX0;
|
|
+ break;
|
|
+ case RING1:
|
|
+ *idx = *(unsigned long*)TX_CTX_IDX1;
|
|
+ break;
|
|
+ case RING2:
|
|
+ *idx = *(unsigned long*)TX_CTX_IDX2;
|
|
+ break;
|
|
+ case RING3:
|
|
+ *idx = *(unsigned long*)TX_CTX_IDX3;
|
|
+ break;
|
|
+ default:
|
|
+ printk("set_tx_ctx_idex error\n");
|
|
+ return -1;
|
|
+ };
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+inline int set_tx_ctx_idx(unsigned int ring_no, unsigned int idx)
|
|
+{
|
|
+ switch (ring_no ) {
|
|
+ case RING0:
|
|
+ *(unsigned long*)TX_CTX_IDX0 = cpu_to_le32((u32)idx);
|
|
+ break;
|
|
+ case RING1:
|
|
+ *(unsigned long*)TX_CTX_IDX1 = cpu_to_le32((u32)idx);
|
|
+ break;
|
|
+ case RING2:
|
|
+ *(unsigned long*)TX_CTX_IDX2 = cpu_to_le32((u32)idx);
|
|
+ break;
|
|
+ case RING3:
|
|
+ *(unsigned long*)TX_CTX_IDX3 = cpu_to_le32((u32)idx);
|
|
+ break;
|
|
+ default:
|
|
+ printk("set_tx_ctx_idex error\n");
|
|
+ return -1;
|
|
+ };
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+void get_tx_desc_and_dtx_idx(END_DEVICE* ei_local, int ring_no, unsigned long *tx_dtx_idx, struct PDMA_txdesc **tx_desc)
|
|
+{
|
|
+ switch (ring_no) {
|
|
+ case RING0:
|
|
+ *tx_desc = ei_local->tx_ring0;
|
|
+ *tx_dtx_idx = *(unsigned long*)TX_DTX_IDX0;
|
|
+ break;
|
|
+ case RING1:
|
|
+ *tx_desc = ei_local->tx_ring1;
|
|
+ *tx_dtx_idx = *(unsigned long*)TX_DTX_IDX1;
|
|
+ break;
|
|
+ case RING2:
|
|
+ *tx_desc = ei_local->tx_ring2;
|
|
+ *tx_dtx_idx = *(unsigned long*)TX_DTX_IDX2;
|
|
+ break;
|
|
+ case RING3:
|
|
+ *tx_desc = ei_local->tx_ring3;
|
|
+ *tx_dtx_idx = *(unsigned long*)TX_DTX_IDX3;
|
|
+ break;
|
|
+ default:
|
|
+ printk("ring_no input error... %d\n", ring_no);
|
|
+ };
|
|
+}
|
|
+
|
|
+int fe_qos_packet_send(struct net_device *dev, struct sk_buff* skb, unsigned int ring_no, unsigned int qn, unsigned pn)
|
|
+{
|
|
+ END_DEVICE* ei_local = netdev_priv(dev);
|
|
+ struct PDMA_txdesc* tx_desc;
|
|
+ unsigned int tx_cpu_owner_idx, tx_dtx_idx;
|
|
+
|
|
+ unsigned int length=skb->len;
|
|
+ int ret;
|
|
+ unsigned long flags;
|
|
+
|
|
+ //printk("fe_qos_packet_send: ring_no=%d qn=%d pn=%d\n", ring_no, qn, pn);
|
|
+
|
|
+ switch ( ring_no ) {
|
|
+ case 0:
|
|
+ tx_desc = ei_local->tx_ring0;
|
|
+ tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX0;
|
|
+ tx_dtx_idx = *(unsigned long*)TX_DTX_IDX0;
|
|
+ break;
|
|
+ case 1:
|
|
+ tx_desc = ei_local->tx_ring1;
|
|
+ tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX1;
|
|
+ tx_dtx_idx = *(unsigned long*)TX_DTX_IDX1;
|
|
+ break;
|
|
+ case 2:
|
|
+ tx_desc = ei_local->tx_ring2;
|
|
+ tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX2;
|
|
+ tx_dtx_idx = *(unsigned long*)TX_DTX_IDX2;
|
|
+ break;
|
|
+ case 3:
|
|
+ tx_desc = ei_local->tx_ring3;
|
|
+ tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX3;
|
|
+ tx_dtx_idx = *(unsigned long*)TX_DTX_IDX3;
|
|
+ break;
|
|
+ default:
|
|
+ printk("ring_no input error... %d\n", ring_no);
|
|
+ return -1;
|
|
+ };
|
|
+
|
|
+ //printk("tx_cpu_owner_idx=%d tx_dtx_idx=%d\n", tx_cpu_owner_idx, tx_dtx_idx);
|
|
+
|
|
+ if(tx_desc == NULL) {
|
|
+ printk("%s : txdesc is NULL\n", dev->name);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ tx_desc[tx_cpu_owner_idx].txd_info1.SDP0 = virt_to_phys(skb->data);
|
|
+ tx_desc[tx_cpu_owner_idx].txd_info2.SDL0 = length;
|
|
+ tx_desc[tx_cpu_owner_idx].txd_info2.DDONE_bit = 0;
|
|
+ tx_desc[tx_cpu_owner_idx].txd_info4.PN = pn;
|
|
+ tx_desc[tx_cpu_owner_idx].txd_info4.QN = qn;
|
|
+
|
|
+#ifdef CONFIG_RAETH_CHECKSUM_OFFLOAD
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx].txd_info4.TCO = 1;
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx].txd_info4.UCO = 1;
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx].txd_info4.ICO = 1;
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
|
|
+ if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
|
|
+ tx_desc[tx_cpu_owner_idx].txd_info4.PN = 6; /* PPE */
|
|
+ } else {
|
|
+ tx_desc[tx_cpu_owner_idx].txd_info4.PN = pn;
|
|
+ }
|
|
+
|
|
+#endif
|
|
+
|
|
+ spin_lock_irqsave(&ei_local->page_lock, flags);
|
|
+ ei_local->skb_free[ring_no][tx_cpu_owner_idx] = skb;
|
|
+ tx_cpu_owner_idx = (tx_cpu_owner_idx +1) % NUM_TX_DESC;
|
|
+ ret = set_tx_ctx_idx(ring_no, tx_cpu_owner_idx);
|
|
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
|
|
+
|
|
+ ei_local->stat.tx_packets++;
|
|
+ ei_local->stat.tx_bytes += length;
|
|
+
|
|
+#ifdef CONFIG_RAETH_NAPI
|
|
+ switch ( ring_no ) {
|
|
+ case 0:
|
|
+ if ( ei_local->tx0_full == 1) {
|
|
+ ei_local->tx0_full = 0;
|
|
+ netif_wake_queue(dev);
|
|
+ }
|
|
+ break;
|
|
+ case 1:
|
|
+ if ( ei_local->tx1_full == 1) {
|
|
+ ei_local->tx1_full = 0;
|
|
+ netif_wake_queue(dev);
|
|
+ }
|
|
+ break;
|
|
+ case 2:
|
|
+ if ( ei_local->tx2_full == 1) {
|
|
+ ei_local->tx2_full = 0;
|
|
+ netif_wake_queue(dev);
|
|
+ }
|
|
+ break;
|
|
+ case 3:
|
|
+ if ( ei_local->tx3_full == 1) {
|
|
+ ei_local->tx3_full = 0;
|
|
+ netif_wake_queue(dev);
|
|
+ }
|
|
+ break;
|
|
+ default :
|
|
+ printk("ring_no input error %d\n", ring_no);
|
|
+ };
|
|
+#endif
|
|
+ return length;
|
|
+}
|
|
+
|
|
+int fe_tx_desc_init(struct net_device *dev, unsigned int ring_no, unsigned int qn, unsigned int pn)
|
|
+{
|
|
+ END_DEVICE* ei_local = netdev_priv(dev);
|
|
+ struct PDMA_txdesc *tx_desc;
|
|
+ unsigned int tx_cpu_owner_idx = 0;
|
|
+ int i;
|
|
+ unsigned int phy_tx_ring;
|
|
+
|
|
+ // sanity check
|
|
+ if ( ring_no > 3 ){
|
|
+ printk("%s : ring_no - %d, please under 4...\n", dev->name, ring_no);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if ( pn > 2 ){
|
|
+ printk("%s : pn - %d, please under 2...\n", dev->name, pn);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ tx_desc = pci_alloc_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), &phy_tx_ring);
|
|
+ ei_local->tx_cpu_owner_idx0 = tx_cpu_owner_idx;
|
|
+
|
|
+ switch (ring_no) {
|
|
+ case 0:
|
|
+ ei_local->tx_ring0 = tx_desc;
|
|
+ ei_local->phy_tx_ring0 = phy_tx_ring;
|
|
+ break;
|
|
+ case 1:
|
|
+ ei_local->phy_tx_ring1 = phy_tx_ring;
|
|
+ ei_local->tx_ring1 = tx_desc;
|
|
+ break;
|
|
+ case 2:
|
|
+ ei_local->phy_tx_ring2 = phy_tx_ring;
|
|
+ ei_local->tx_ring2 = tx_desc;
|
|
+ break;
|
|
+ case 3:
|
|
+ ei_local->phy_tx_ring3 = phy_tx_ring;
|
|
+ ei_local->tx_ring3 = tx_desc;
|
|
+ break;
|
|
+ default:
|
|
+ printk("ring_no input error! %d\n", ring_no);
|
|
+ pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), tx_desc, phy_tx_ring);
|
|
+ return 0;
|
|
+ };
|
|
+
|
|
+ if ( tx_desc == NULL)
|
|
+ {
|
|
+ printk("tx desc allocation failed!\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ for( i = 0; i < NUM_TX_DESC; i++) {
|
|
+ memset( &tx_desc[i], 0, sizeof(struct PDMA_txdesc));
|
|
+ tx_desc[i].txd_info2.LS0_bit = 1;
|
|
+ tx_desc[i].txd_info2.DDONE_bit = 1;
|
|
+ tx_desc[i].txd_info4.PN = pn;
|
|
+ tx_desc[i].txd_info4.QN = qn;
|
|
+ }
|
|
+
|
|
+ switch ( ring_no ) {
|
|
+ case 0 :
|
|
+ *(unsigned long*)TX_BASE_PTR0 = phys_to_bus((u32) phy_tx_ring);
|
|
+ *(unsigned long*)TX_MAX_CNT0 = cpu_to_le32((u32)NUM_TX_DESC);
|
|
+ *(unsigned long*)TX_CTX_IDX0 = cpu_to_le32((u32) tx_cpu_owner_idx);
|
|
+ sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX0);
|
|
+ break;
|
|
+ case 1 :
|
|
+ *(unsigned long*)TX_BASE_PTR1 = phys_to_bus((u32) phy_tx_ring);
|
|
+ *(unsigned long*)TX_MAX_CNT1 = cpu_to_le32((u32)NUM_TX_DESC);
|
|
+ *(unsigned long*)TX_CTX_IDX1 = cpu_to_le32((u32) tx_cpu_owner_idx);
|
|
+ sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX1);
|
|
+ break;
|
|
+ case 2 :
|
|
+ *(unsigned long*)TX_BASE_PTR2 = phys_to_bus((u32) phy_tx_ring);
|
|
+ *(unsigned long*)TX_MAX_CNT2 = cpu_to_le32((u32)NUM_TX_DESC);
|
|
+ *(unsigned long*)TX_CTX_IDX2 = cpu_to_le32((u32) tx_cpu_owner_idx);
|
|
+ sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX2);
|
|
+ break;
|
|
+ case 3 :
|
|
+ *(unsigned long*)TX_BASE_PTR3 = phys_to_bus((u32) phy_tx_ring);
|
|
+ *(unsigned long*)TX_MAX_CNT3 = cpu_to_le32((u32)NUM_TX_DESC);
|
|
+ *(unsigned long*)TX_CTX_IDX3 = cpu_to_le32((u32) tx_cpu_owner_idx);
|
|
+ sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX3);
|
|
+ break;
|
|
+ default :
|
|
+ printk("tx descriptor init failed %d\n", ring_no);
|
|
+ return 0;
|
|
+ };
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/*
|
|
+ DSCP | AC | WMM_AC (Access Category)
|
|
+ ------+----+--------
|
|
+ 00-07| 1 | BE
|
|
+ 24-31| 1 | BE
|
|
+ 08-15| 0 | BG
|
|
+ 16-23| 0 | BG
|
|
+ 32-39| 2 | VI
|
|
+ 40-47| 2 | VI
|
|
+ 48-55| 3 | VO
|
|
+ 56-63| 3 | VO
|
|
+
|
|
+ | TOS |
|
|
+ DSCP |(bit5~bit7)| WMM
|
|
+ -------+-----------+-------
|
|
+ 0x00 | 000 | BE
|
|
+ 0x18 | 011 | BE
|
|
+ 0x08 | 001 | BG
|
|
+ 0x10 | 010 | BG
|
|
+ 0x20 | 100 | VI
|
|
+ 0x28 | 101 | VI
|
|
+ 0x30 | 110 | VO
|
|
+ 0x38 | 111 | VO
|
|
+
|
|
+ Notes: BE should be mapped to AC1, but mapped to AC0 in linux kernel.
|
|
+
|
|
+ */
|
|
+
|
|
+int pkt_classifier(struct sk_buff *skb,int gmac_no, int *ring_no, int *queue_no, int *port_no)
|
|
+{
|
|
+#if defined(CONFIG_RALINK_RT2880)
|
|
+ /* RT2880 -- Assume using 1 Ring (Ring0), Queue 0, and Port 0 */
|
|
+ *port_no = 0;
|
|
+ *ring_no = 0;
|
|
+ *queue_no = 0;
|
|
+#else
|
|
+ unsigned int ac=0;
|
|
+ unsigned int bridge_traffic=0, lan_traffic=0;
|
|
+ struct iphdr *iph=NULL;
|
|
+ struct vlan_ethhdr *veth=NULL;
|
|
+ unsigned int vlan_id=0;
|
|
+#if defined (CONFIG_RAETH_QOS_DSCP_BASED)
|
|
+ static char DscpToAcMap[8]={1,0,0,1,2,2,3,3};
|
|
+#elif defined (CONFIG_RAETH_QOS_VPRI_BASED)
|
|
+ static char VlanPriToAcMap[8]={1,0,0,1,2,2,3,3};
|
|
+#endif
|
|
+
|
|
+ /* Bridge:: {BG,BE,VI,VO} */
|
|
+ /* GateWay:: WAN: {BG,BE,VI,VO}, LAN: {BG,BE,VI,VO} */
|
|
+#if defined (CONFIG_RALINK_RT3883) && defined (CONFIG_RAETH_GMAC2)
|
|
+ /*
|
|
+ * 1) Bridge:
|
|
+ * 1.1) GMAC1 ONLY:
|
|
+ * VO/VI->Ring3, BG/BE->Ring2
|
|
+ * 1.2) GMAC1+GMAC2:
|
|
+ * GMAC1:: VO/VI->Ring3, BG/BE->Ring2
|
|
+ * GMAC2:: VO/VI->Ring1, BG/BE->Ring0
|
|
+ * 2) GateWay:
|
|
+ * 2.1) GMAC1 ONLY:
|
|
+ * GMAC1:: LAN:VI/VO->Ring2, BE/BK->Ring2
|
|
+ * WAN:VI/VO->Ring3, BE/BK->Ring3
|
|
+ * 2.2)GMAC1+GMAC2:
|
|
+ * GMAC1:: LAN:VI/VO/BE/BK->Ring2, WAN:VI/VO/BE/BK->Ring3
|
|
+ * GMAC2:: VI/VO->Ring1, BE/BK->Ring0
|
|
+ */
|
|
+ static unsigned char AcToRing_BridgeMap[4] = {2, 2, 3, 3};
|
|
+ static unsigned char AcToRing_GE1Map[2][4] = {{3, 3, 3, 3},{2, 2, 2, 2}};
|
|
+ static unsigned char AcToRing_GE2Map[4] = {0, 0, 1, 1};
|
|
+#elif defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT2883) || \
|
|
+ defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || \
|
|
+ defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
|
|
+ defined (CONFIG_RALINK_MT7628) || \
|
|
+ (defined (CONFIG_RALINK_RT3883) && !defined(CONFIG_RAETH_GMAC2))
|
|
+ /*
|
|
+ * 1) Bridge: VO->Ring3, VI->Ring2, BG->Ring1, BE->Ring0
|
|
+ * 2) GateWay:
|
|
+ * 2.1) GMAC1:: LAN:VI/VO->Ring1, BE/BK->Ring0
|
|
+ * WAN:VI/VO->Ring3, BE/BK->Ring2
|
|
+ */
|
|
+ static unsigned char AcToRing_BridgeMap[4] = {0, 1, 2, 3};
|
|
+ static unsigned char AcToRing_GE1Map[2][4] = {{2, 2, 3, 3},{0, 0, 1, 1}};
|
|
+#endif // CONFIG_RALINK_RT2883
|
|
+
|
|
+ /*
|
|
+ * Set queue no - QN field in TX Descriptor
|
|
+ * always use queue 3 for the packet from CPU to GMAC
|
|
+ */
|
|
+ *queue_no = 3;
|
|
+
|
|
+ /* Get access category */
|
|
+ veth = (struct vlan_ethhdr *)(skb->data);
|
|
+ if(veth->h_vlan_proto == htons(ETH_P_8021Q)) { // VLAN traffic
|
|
+ iph= (struct iphdr *)(skb->data + VLAN_ETH_HLEN);
|
|
+
|
|
+ vlan_id = ntohs(veth->h_vlan_TCI & VLAN_VID_MASK);
|
|
+ if(vlan_id==1) { //LAN
|
|
+ lan_traffic = 1;
|
|
+ } else { //WAN
|
|
+ lan_traffic = 0;
|
|
+ }
|
|
+
|
|
+ if (veth->h_vlan_encapsulated_proto == htons(ETH_P_IP)) { //IPv4
|
|
+#if defined (CONFIG_RAETH_QOS_DSCP_BASED)
|
|
+ ac = DscpToAcMap[(iph->tos & 0xe0) >> 5];
|
|
+#elif defined (CONFIG_RAETH_QOS_VPRI_BASED)
|
|
+ ac = VlanPriToAcMap[skb->priority];
|
|
+#endif
|
|
+ }else { //Ipv6, ARP ...etc
|
|
+ ac = 0;
|
|
+ }
|
|
+ }else { // non-VLAN traffic
|
|
+ if (veth->h_vlan_proto == htons(ETH_P_IP)) { //IPv4
|
|
+#if defined (CONFIG_RAETH_QOS_DSCP_BASED)
|
|
+ iph= (struct iphdr *)(skb->data + ETH_HLEN);
|
|
+ ac = DscpToAcMap[(iph->tos & 0xe0) >> 5];
|
|
+#elif defined (CONFIG_RAETH_QOS_VPRI_BASED)
|
|
+ ac= VlanPriToAcMap[skb->priority];
|
|
+#endif
|
|
+ }else { // IPv6, ARP ...etc
|
|
+ ac = 0;
|
|
+ }
|
|
+
|
|
+ bridge_traffic=1;
|
|
+ }
|
|
+
|
|
+
|
|
+ /* Set Tx Ring no */
|
|
+ if(gmac_no==1) { //GMAC1
|
|
+ if(bridge_traffic) { //Bridge Mode
|
|
+ *ring_no = AcToRing_BridgeMap[ac];
|
|
+ }else { //GateWay Mode
|
|
+ *ring_no = AcToRing_GE1Map[lan_traffic][ac];
|
|
+ }
|
|
+ }else { //GMAC2
|
|
+#if defined (CONFIG_RALINK_RT3883) && defined (CONFIG_RAETH_GMAC2)
|
|
+ *ring_no = AcToRing_GE2Map[ac];
|
|
+#endif
|
|
+ }
|
|
+
|
|
+
|
|
+ /* Set Port No - PN field in Tx Descriptor*/
|
|
+#if defined(CONFIG_RAETH_GMAC2)
|
|
+ *port_no = gmac_no;
|
|
+#else
|
|
+ if(bridge_traffic) {
|
|
+ *port_no = 1;
|
|
+ }else {
|
|
+ if(lan_traffic==1) { //LAN use VP1
|
|
+ *port_no = 1;
|
|
+ }else { //WAN use VP2
|
|
+ *port_no = 2;
|
|
+ }
|
|
+ }
|
|
+#endif // CONFIG_RAETH_GMAC2 //
|
|
+
|
|
+#endif
|
|
+
|
|
+ return 1;
|
|
+
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Routine Description :
|
|
+ * Hi/Li Rings and Queues definition for QoS Purpose
|
|
+ *
|
|
+ * Related registers: (Detail information refer to pp106 of RT3052_DS_20080226.doc)
|
|
+ * Priority High/Low Definition - PDMA_FC_CFG, GDMA1_FC_CFG, GDMA2_FC_CFG
|
|
+ * Bit 28 - Allows high priority Q to share low priority Q's reserved pages
|
|
+ * Bit 27:24 - Px high priority definition bitmap
|
|
+ * Weight Configuration - GDMA1_SCH_CFG, GDMA2_SCH_CFG, PDMA_SCH_CFG -> default 3210
|
|
+ *
|
|
+ * Parameter:
|
|
+ * NONE
|
|
+ *
|
|
+*/
|
|
+#define PSE_P1_LQ_FULL (1<<2)
|
|
+#define PSE_P1_HQ_FULL (1<<3)
|
|
+#define PSE_P2_LQ_FULL (1<<4)
|
|
+#define PSE_P2_HQ_FULL (1<<5)
|
|
+
|
|
+#define HIGH_QUEUE(queue) (1<<(queue))
|
|
+#define LOW_QUEUE(queue) (0<<(queue))
|
|
+#define PAGES_SHARING (1<<28)
|
|
+#define RSEV_PAGE_COUNT_HQ 0x10 /* Reserved page count for high priority Q */
|
|
+#define RSEV_PAGE_COUNT_LQ 0x10 /* Reserved page count for low priority Q */
|
|
+#define VIQ_FC_ASRT 0x10 /* Virtual input Q FC assertion threshold */
|
|
+
|
|
+#define QUEUE_WEIGHT_1 0
|
|
+#define QUEUE_WEIGHT_2 1
|
|
+#define QUEUE_WEIGHT_4 2
|
|
+#define QUEUE_WEIGHT_8 3
|
|
+#define QUEUE_WEIGHT_16 4
|
|
+
|
|
+#define WRR_SCH 0 /*WRR */
|
|
+#define STRICT_PRI_SCH 1 /* Strict Priority */
|
|
+#define MIX_SCH 2 /* Mixed : Q3>WRR(Q2,Q1,Q0) */
|
|
+
|
|
+/*
|
|
+ * Ring3 Ring2 Ring1 Ring0
|
|
+ * | | | | | | | |
|
|
+ * | | | | | | | |
|
|
+ * --------------------------------
|
|
+ * | WRR Scheduler |
|
|
+ * --------------------------------
|
|
+ * |
|
|
+ * ---------------------------------------
|
|
+ * | PDMA |
|
|
+ * ---------------------------------------
|
|
+ * |Q3||Q2||Q1||Q0| |Q3||Q2||Q1||Q0|
|
|
+ * | || || || | | || || || |
|
|
+ * ------------------- -------------------
|
|
+ * | GDMA2 | | GDMA1 |
|
|
+ * ------------------- -------------------
|
|
+ * | |
|
|
+ * ------------------------------------
|
|
+ * | GMAC |
|
|
+ * ------------------------------------
|
|
+ * |
|
|
+ *
|
|
+ */
|
|
+void set_scheduler_weight(void)
|
|
+{
|
|
+#if !defined (CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
|
|
+ /*
|
|
+ * STEP1: Queue scheduling configuration
|
|
+ */
|
|
+ *(unsigned long *)GDMA1_SCH_CFG = (WRR_SCH << 24) |
|
|
+ (QUEUE_WEIGHT_16 << 12) | /* queue 3 weight */
|
|
+ (QUEUE_WEIGHT_8 << 8) | /* queue 2 weight */
|
|
+ (QUEUE_WEIGHT_4 << 4) | /* queue 1 weight */
|
|
+ (QUEUE_WEIGHT_2 << 0); /* queue 0 weight */
|
|
+
|
|
+ *(unsigned long *)GDMA2_SCH_CFG = (WRR_SCH << 24) |
|
|
+ (QUEUE_WEIGHT_16 << 12) | /* queue 3 weight */
|
|
+ (QUEUE_WEIGHT_8 << 8) | /* queue 2 weight */
|
|
+ (QUEUE_WEIGHT_4 << 4) | /* queue 1 weight */
|
|
+ (QUEUE_WEIGHT_2 << 0); /* queue 0 weight */
|
|
+
|
|
+#endif
|
|
+ /*
|
|
+ * STEP2: Ring scheduling configuration
|
|
+ */
|
|
+#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621)
|
|
+ /* MIN_RATE_RATIO0=0, MAX_RATE_ULMT0=1, Weight0=1 */
|
|
+ *(unsigned long *)SCH_Q01_CFG = (0 << 10) | (1<<14) | (0 << 12);
|
|
+ /* MIN_RATE_RATIO1=0, MAX_RATE_ULMT1=1, Weight1=4 */
|
|
+ *(unsigned long *)SCH_Q01_CFG |= (0 << 26) | (1<<30) | (2 << 28);
|
|
+
|
|
+ /* MIN_RATE_RATIO2=0, MAX_RATE_ULMT2=1, Weight0=1 */
|
|
+ *(unsigned long *)SCH_Q23_CFG = (0 << 10) | (1<<14) | (0 << 12);
|
|
+ /* MIN_RATE_RATIO3=0, MAX_RATE_ULMT3=1, Weight1=4 */
|
|
+ *(unsigned long *)SCH_Q23_CFG |= (0 << 26) | (1<<30) | (2 << 28);
|
|
+#else
|
|
+ *(unsigned long *)PDMA_SCH_CFG = (WRR_SCH << 24) |
|
|
+ (QUEUE_WEIGHT_16 << 12) | /* ring 3 weight */
|
|
+ (QUEUE_WEIGHT_4 << 8) | /* ring 2 weight */
|
|
+ (QUEUE_WEIGHT_16 << 4) | /* ring 1 weight */
|
|
+ (QUEUE_WEIGHT_4 << 0); /* ring 0 weight */
|
|
+#endif
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Routine Description :
|
|
+ * Bucket size and related information from ASIC Designer,
|
|
+ * please check Max Lee to update these values
|
|
+ *
|
|
+ * Related Registers
|
|
+ * FE_GLO_CFG - initialize clock rate for rate limiting
|
|
+ * PDMA_FC_CFG - Pause mechanism for Rings (Ref to pp116 in datasheet)
|
|
+ * :
|
|
+ * Parameter:
|
|
+ * NONE
|
|
+ */
|
|
+/*
|
|
+ * Bit 29:24 - Q3 flow control pause condition
|
|
+ * Bit 21:16 - Q2 flow control pause condition
|
|
+ * Bit 13:8 - Q1 flow control pause condition
|
|
+ * Bit 5:0 - Q0 flow control pause condition
|
|
+ *
|
|
+ * detail bitmap -
|
|
+ * Bit[5] - Pause Qx when PSE p2 HQ full
|
|
+ * Bit[4] - Pause Qx when PSE p2 LQ full
|
|
+ * Bit[3] - Pause Qx when PSE p1 HQ full
|
|
+ * Bit[2] - Pause Qx when PSE p1 LQ full
|
|
+ * Bit[1] - Pause Qx when PSE p0 HQ full
|
|
+ * Bit[0] - Pause Qx when PSE p0 LQ full
|
|
+ */
|
|
+void set_schedule_pause_condition(void)
|
|
+{
|
|
+#if defined (CONFIG_RALINK_MT7620)
|
|
+
|
|
+#elif defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+ *(unsigned long *)SDM_TRING = (0xC << 28) | (0x3 << 24) | (0xC << 4) | 0x3;
|
|
+#else
|
|
+ /*
|
|
+ * STEP1: Set queue priority is high or low
|
|
+ *
|
|
+ * Set queue 3 as high queue in GMAC1/GMAC2
|
|
+ */
|
|
+ *(unsigned long *)GDMA1_FC_CFG = ((HIGH_QUEUE(3)|LOW_QUEUE(2) |
|
|
+ LOW_QUEUE(1)|LOW_QUEUE(0))<<24) |
|
|
+ (RSEV_PAGE_COUNT_HQ << 16) |
|
|
+ (RSEV_PAGE_COUNT_LQ <<8) |
|
|
+ VIQ_FC_ASRT | PAGES_SHARING;
|
|
+
|
|
+ *(unsigned long *)GDMA2_FC_CFG = ((HIGH_QUEUE(3)|LOW_QUEUE(2) |
|
|
+ LOW_QUEUE(1)|LOW_QUEUE(0))<<24) |
|
|
+ (RSEV_PAGE_COUNT_HQ << 16) |
|
|
+ (RSEV_PAGE_COUNT_LQ <<8) |
|
|
+ VIQ_FC_ASRT | PAGES_SHARING;
|
|
+
|
|
+ /*
|
|
+ * STEP2: Set flow control pause condition
|
|
+ *
|
|
+ * CPU always use queue 3, and queue3 is high queue.
|
|
+ * If P2(GMAC2) high queue is full, pause ring3/ring2
|
|
+ * If P1(GMAC1) high queue is full, pause ring1/ring0
|
|
+ */
|
|
+ *(unsigned long *)PDMA_FC_CFG = ( PSE_P2_HQ_FULL << 24 ) | /* queue 3 */
|
|
+ ( PSE_P2_HQ_FULL << 16 ) | /* queue 2 */
|
|
+ ( PSE_P1_HQ_FULL << 8 ) | /* queue 1 */
|
|
+ ( PSE_P1_HQ_FULL << 0 ); /* queue 0 */
|
|
+#endif
|
|
+
|
|
+}
|
|
+
|
|
+
|
|
+void set_output_shaper(void)
|
|
+{
|
|
+#define GDMA1_TOKEN_RATE 16 /* unit=64bits/ms */
|
|
+#define GDMA2_TOKEN_RATE 16 /* unit=64bits/ms */
|
|
+
|
|
+#if 0
|
|
+ *(unsigned long *)GDMA1_SHPR_CFG = (1 << 24) | /* output shaper enable */
|
|
+ (128 << 16) | /* bucket size (unit=1KB) */
|
|
+ (GDMA1_TOKEN_RATE << 0); /* token rate (unit=8B/ms) */
|
|
+#endif
|
|
+
|
|
+#if 0
|
|
+ *(unsigned long *)GDMA2_SHPR_CFG = (1 << 24) | /* output shaper enable */
|
|
+ (128 << 16) | /* bucket size (unit=1KB) */
|
|
+ (GDMA2_TOKEN_RATE << 0); /* token rate (unit=8B/ms) */
|
|
+#endif
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/ra_qos.h
|
|
@@ -0,0 +1,18 @@
|
|
+#ifndef RA_QOS_H
|
|
+#define RA_QOS_H
|
|
+
|
|
+#include "ra2882ethreg.h"
|
|
+#define RING0 0
|
|
+#define RING1 1
|
|
+#define RING2 2
|
|
+#define RING3 3
|
|
+void get_tx_desc_and_dtx_idx(END_DEVICE* ei_local, int ring_no, unsigned long *tx_dtx_idx, struct PDMA_txdesc **tx_desc);
|
|
+int get_tx_ctx_idx(unsigned int ring_no, unsigned long *idx);
|
|
+int fe_tx_desc_init(struct net_device *dev, unsigned int ring_no, unsigned int qn, unsigned int pn);
|
|
+int fe_qos_packet_send(struct net_device *dev, struct sk_buff* skb, unsigned int ring_no, unsigned int qn, unsigned int pn);
|
|
+
|
|
+int pkt_classifier(struct sk_buff *skb,int gmac_no, int *ring_no, int *queue_no, int *port_no);
|
|
+void set_schedule_pause_condition(void);
|
|
+void set_scheduler_weight(void);
|
|
+void set_output_shaper(void);
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/ra_rfrw.c
|
|
@@ -0,0 +1,66 @@
|
|
+#include <linux/module.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/fcntl.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/ptrace.h>
|
|
+#include <linux/ioport.h>
|
|
+#include <linux/in.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/string.h>
|
|
+#include <linux/signal.h>
|
|
+#include <linux/irq.h>
|
|
+#include <linux/netdevice.h>
|
|
+#include <linux/etherdevice.h>
|
|
+#include <linux/skbuff.h>
|
|
+
|
|
+#include "ra2882ethreg.h"
|
|
+#include "raether.h"
|
|
+#include "ra_mac.h"
|
|
+
|
|
+#define RF_CSR_CFG 0xb0180500
|
|
+#define RF_CSR_KICK (1<<17)
|
|
+int rw_rf_reg(int write, int reg, int *data)
|
|
+{
|
|
+ unsigned long rfcsr, i = 0;
|
|
+
|
|
+ while (1) {
|
|
+ rfcsr = sysRegRead(RF_CSR_CFG);
|
|
+ if (! (rfcsr & (u32)RF_CSR_KICK) )
|
|
+ break;
|
|
+ if (++i > 10000) {
|
|
+ printk("Warning: Abort rw rf register: too busy\n");
|
|
+ return -1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ rfcsr = (u32)(RF_CSR_KICK | ((reg&0x3f) << 8) | (*data & 0xff));
|
|
+ if (write)
|
|
+ rfcsr |= 0x10000;
|
|
+
|
|
+ sysRegRead(RF_CSR_CFG) = cpu_to_le32(rfcsr);
|
|
+
|
|
+ i = 0;
|
|
+ while (1) {
|
|
+ rfcsr = sysRegRead(RF_CSR_CFG);
|
|
+ if (! (rfcsr & (u32)RF_CSR_KICK) )
|
|
+ break;
|
|
+ if (++i > 10000) {
|
|
+ printk("Warning: still busy\n");
|
|
+ return -1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ rfcsr = sysRegRead(RF_CSR_CFG);
|
|
+
|
|
+ if (((rfcsr&0x1f00) >> 8) != (reg & 0x1f)) {
|
|
+ printk("Error: rw register failed\n");
|
|
+ return -1;
|
|
+ }
|
|
+ *data = (int)(rfcsr & 0xff);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/ra_rfrw.h
|
|
@@ -0,0 +1,6 @@
|
|
+#ifndef RA_RFRW_H
|
|
+#define RA_RFRW_H
|
|
+
|
|
+int rw_rf_reg(int write, int reg, int *data);
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/raether.c
|
|
@@ -0,0 +1,6401 @@
|
|
+#include <linux/module.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/pci.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/skbuff.h>
|
|
+#include <linux/if_vlan.h>
|
|
+#include <linux/if_ether.h>
|
|
+#include <linux/fs.h>
|
|
+#include <asm/uaccess.h>
|
|
+#include <asm/rt2880/surfboardint.h>
|
|
+#include <linux/platform_device.h>
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+#include <linux/tcp.h>
|
|
+#include <net/ipv6.h>
|
|
+#include <linux/ip.h>
|
|
+#include <net/ip.h>
|
|
+#include <net/tcp.h>
|
|
+#include <linux/in.h>
|
|
+#include <linux/ppp_defs.h>
|
|
+#include <linux/if_pppox.h>
|
|
+#endif
|
|
+#if defined (CONFIG_RAETH_LRO)
|
|
+#include <linux/inet_lro.h>
|
|
+#endif
|
|
+#include <linux/delay.h>
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+#include <linux/sched.h>
|
|
+#endif
|
|
+
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
|
|
+#include <asm/rt2880/rt_mmap.h>
|
|
+#else
|
|
+#include <linux/libata-compat.h>
|
|
+#endif
|
|
+
|
|
+#include "ra2882ethreg.h"
|
|
+#include "raether.h"
|
|
+#include "ra_mac.h"
|
|
+#include "ra_ioctl.h"
|
|
+#include "ra_rfrw.h"
|
|
+#ifdef CONFIG_RAETH_NETLINK
|
|
+#include "ra_netlink.h"
|
|
+#endif
|
|
+#if defined (CONFIG_RAETH_QOS)
|
|
+#include "ra_qos.h"
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
|
|
+#include "../../../net/nat/hw_nat/ra_nat.h"
|
|
+#endif
|
|
+#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+#include "dvt/raether_pdma_dvt.h"
|
|
+#endif /* CONFIG_RAETH_PDMA_DVT */
|
|
+
|
|
+static int fe_irq = 0;
|
|
+
|
|
+#if defined (TASKLET_WORKQUEUE_SW)
|
|
+int init_schedule;
|
|
+int working_schedule;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RAETH_NAPI
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+static int raeth_clean(struct napi_struct *napi, int budget);
|
|
+#else
|
|
+static int raeth_clean(struct net_device *dev, int *budget);
|
|
+#endif
|
|
+
|
|
+static int rt2880_eth_recv(struct net_device* dev, int *work_done, int work_to_do);
|
|
+#else
|
|
+static int rt2880_eth_recv(struct net_device* dev);
|
|
+#endif
|
|
+
|
|
+#if !defined(CONFIG_RA_NAT_NONE)
|
|
+/* bruce+
|
|
+ */
|
|
+extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
|
|
+extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
|
|
+/* Qwert+
|
|
+ */
|
|
+#include <asm/mipsregs.h>
|
|
+extern int (*ra_classifier_hook_tx)(struct sk_buff *skb, unsigned long cur_cycle);
|
|
+extern int (*ra_classifier_hook_rx)(struct sk_buff *skb, unsigned long cur_cycle);
|
|
+#endif /* CONFIG_RA_CLASSIFIER */
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT3052_MP2)
|
|
+int32_t mcast_rx(struct sk_buff * skb);
|
|
+int32_t mcast_tx(struct sk_buff * skb);
|
|
+#endif
|
|
+
|
|
+int ra_mtd_read_nm(char *name, loff_t from, size_t len, u_char *buf)
|
|
+{
|
|
+ /* TODO */
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE) || defined (CONFIG_ARCH_MT7623)
|
|
+void setup_internal_gsw(void);
|
|
+#if defined (CONFIG_GE1_TRGMII_FORCE_1200)
|
|
+void apll_xtal_enable(void);
|
|
+#define REGBIT(x, n) (x << n)
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_MT7623_FPGA)
|
|
+void setup_fpga_gsw(void);
|
|
+#endif
|
|
+
|
|
+/* gmac driver feature set config */
|
|
+#if defined (CONFIG_RAETH_NAPI) || defined (CONFIG_RAETH_QOS)
|
|
+#undef DELAY_INT
|
|
+#else
|
|
+#if defined (CONFIG_ARCH_MT7623)
|
|
+#undef DELAY_INT
|
|
+#else
|
|
+#define DELAY_INT 1
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+//#define CONFIG_UNH_TEST
|
|
+/* end of config */
|
|
+
|
|
+#if defined (CONFIG_RAETH_JUMBOFRAME)
|
|
+#define MAX_RX_LENGTH 4096
|
|
+#else
|
|
+#define MAX_RX_LENGTH 1536
|
|
+#endif
|
|
+
|
|
+struct net_device *dev_raether;
|
|
+
|
|
+static int rx_dma_owner_idx;
|
|
+static int rx_dma_owner_idx0;
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+static int rx_dma_owner_lro1;
|
|
+static int rx_dma_owner_lro2;
|
|
+static int rx_dma_owner_lro3;
|
|
+#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+static int rx_dma_owner_idx1;
|
|
+#if defined(CONFIG_ARCH_MT7623)
|
|
+static int rx_dma_owner_idx2;
|
|
+static int rx_dma_owner_idx3;
|
|
+#endif /* CONFIG_ARCH_MT7623 */
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+int rx_calc_idx1;
|
|
+#endif
|
|
+#endif
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+int rx_calc_idx0;
|
|
+#endif
|
|
+static int pending_recv;
|
|
+static struct PDMA_rxdesc *rx_ring;
|
|
+unsigned long tx_ring_full=0;
|
|
+
|
|
+#if defined(CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined(CONFIG_RALINK_MT7620)
|
|
+unsigned short p0_rx_good_cnt = 0;
|
|
+unsigned short p1_rx_good_cnt = 0;
|
|
+unsigned short p2_rx_good_cnt = 0;
|
|
+unsigned short p3_rx_good_cnt = 0;
|
|
+unsigned short p4_rx_good_cnt = 0;
|
|
+unsigned short p5_rx_good_cnt = 0;
|
|
+unsigned short p6_rx_good_cnt = 0;
|
|
+unsigned short p0_tx_good_cnt = 0;
|
|
+unsigned short p1_tx_good_cnt = 0;
|
|
+unsigned short p2_tx_good_cnt = 0;
|
|
+unsigned short p3_tx_good_cnt = 0;
|
|
+unsigned short p4_tx_good_cnt = 0;
|
|
+unsigned short p5_tx_good_cnt = 0;
|
|
+unsigned short p6_tx_good_cnt = 0;
|
|
+
|
|
+unsigned short p0_rx_byte_cnt = 0;
|
|
+unsigned short p1_rx_byte_cnt = 0;
|
|
+unsigned short p2_rx_byte_cnt = 0;
|
|
+unsigned short p3_rx_byte_cnt = 0;
|
|
+unsigned short p4_rx_byte_cnt = 0;
|
|
+unsigned short p5_rx_byte_cnt = 0;
|
|
+unsigned short p6_rx_byte_cnt = 0;
|
|
+unsigned short p0_tx_byte_cnt = 0;
|
|
+unsigned short p1_tx_byte_cnt = 0;
|
|
+unsigned short p2_tx_byte_cnt = 0;
|
|
+unsigned short p3_tx_byte_cnt = 0;
|
|
+unsigned short p4_tx_byte_cnt = 0;
|
|
+unsigned short p5_tx_byte_cnt = 0;
|
|
+unsigned short p6_tx_byte_cnt = 0;
|
|
+
|
|
+#if defined(CONFIG_RALINK_MT7620)
|
|
+unsigned short p7_rx_good_cnt = 0;
|
|
+unsigned short p7_tx_good_cnt = 0;
|
|
+
|
|
+unsigned short p7_rx_byte_cnt = 0;
|
|
+unsigned short p7_tx_byte_cnt = 0;
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
|
|
+#include "ra_ethtool.h"
|
|
+extern struct ethtool_ops ra_ethtool_ops;
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+extern struct ethtool_ops ra_virt_ethtool_ops;
|
|
+#endif // CONFIG_PSEUDO_SUPPORT //
|
|
+#endif // (CONFIG_ETHTOOL //
|
|
+
|
|
+#ifdef CONFIG_RALINK_VISTA_BASIC
|
|
+int is_switch_175c = 1;
|
|
+#endif
|
|
+
|
|
+unsigned int M2Q_table[64] = {0};
|
|
+unsigned int lan_wan_separate = 0;
|
|
+
|
|
+#if defined(CONFIG_HW_SFQ)
|
|
+unsigned int web_sfq_enable = 0;
|
|
+EXPORT_SYMBOL(web_sfq_enable);
|
|
+#endif
|
|
+
|
|
+EXPORT_SYMBOL(M2Q_table);
|
|
+EXPORT_SYMBOL(lan_wan_separate);
|
|
+#if defined (CONFIG_RAETH_LRO)
|
|
+unsigned int lan_ip;
|
|
+struct lro_para_struct lro_para;
|
|
+int lro_flush_needed;
|
|
+extern char const *nvram_get(int index, char *name);
|
|
+#endif
|
|
+
|
|
+#define KSEG1 0xa0000000
|
|
+#if defined (CONFIG_MIPS)
|
|
+#define PHYS_TO_VIRT(x) ((void *)((x) | KSEG1))
|
|
+#define VIRT_TO_PHYS(x) ((unsigned long)(x) & ~KSEG1)
|
|
+#else
|
|
+#define PHYS_TO_VIRT(x) phys_to_virt(x)
|
|
+#define VIRT_TO_PHYS(x) virt_to_phys(x)
|
|
+#endif
|
|
+
|
|
+extern int fe_dma_init(struct net_device *dev);
|
|
+extern int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no);
|
|
+extern void ei_xmit_housekeeping(unsigned long unused);
|
|
+extern inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no);
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+extern int fe_hw_lro_init(struct net_device *dev);
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+
|
|
+#if 0
|
|
+void skb_dump(struct sk_buff* sk) {
|
|
+ unsigned int i;
|
|
+
|
|
+ printk("skb_dump: from %s with len %d (%d) headroom=%d tailroom=%d\n",
|
|
+ sk->dev?sk->dev->name:"ip stack",sk->len,sk->truesize,
|
|
+ skb_headroom(sk),skb_tailroom(sk));
|
|
+
|
|
+ //for(i=(unsigned int)sk->head;i<=(unsigned int)sk->tail;i++) {
|
|
+ for(i=(unsigned int)sk->head;i<=(unsigned int)sk->data+20;i++) {
|
|
+ if((i % 20) == 0)
|
|
+ printk("\n");
|
|
+ if(i==(unsigned int)sk->data) printk("{");
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,21)
|
|
+ if(i==(unsigned int)sk->transport_header) printk("#");
|
|
+ if(i==(unsigned int)sk->network_header) printk("|");
|
|
+ if(i==(unsigned int)sk->mac_header) printk("*");
|
|
+#else
|
|
+ if(i==(unsigned int)sk->h.raw) printk("#");
|
|
+ if(i==(unsigned int)sk->nh.raw) printk("|");
|
|
+ if(i==(unsigned int)sk->mac.raw) printk("*");
|
|
+#endif
|
|
+ printk("%02X-",*((unsigned char*)i));
|
|
+ if(i==(unsigned int)sk->tail) printk("}");
|
|
+ }
|
|
+ printk("\n");
|
|
+}
|
|
+#endif
|
|
+
|
|
+
|
|
+
|
|
+#if defined (CONFIG_GIGAPHY) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
|
|
+int isICPlusGigaPHY(int ge)
|
|
+{
|
|
+ u32 phy_id0 = 0, phy_id1 = 0;
|
|
+
|
|
+#ifdef CONFIG_GE2_RGMII_AN
|
|
+ if (ge == 2) {
|
|
+ if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 2, &phy_id0)) {
|
|
+ printk("\n Read PhyID 1 is Fail!!\n");
|
|
+ phy_id0 =0;
|
|
+ }
|
|
+ if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 3, &phy_id1)) {
|
|
+ printk("\n Read PhyID 1 is Fail!!\n");
|
|
+ phy_id1 = 0;
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+#endif
|
|
+#if defined (CONFIG_GE1_RGMII_AN) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
|
|
+ {
|
|
+ if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 2, &phy_id0)) {
|
|
+ printk("\n Read PhyID 0 is Fail!!\n");
|
|
+ phy_id0 =0;
|
|
+ }
|
|
+ if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 3, &phy_id1)) {
|
|
+ printk("\n Read PhyID 0 is Fail!!\n");
|
|
+ phy_id1 = 0;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ if ((phy_id0 == EV_ICPLUS_PHY_ID0) && ((phy_id1 & 0xfff0) == EV_ICPLUS_PHY_ID1))
|
|
+ return 1;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+int isMarvellGigaPHY(int ge)
|
|
+{
|
|
+ u32 phy_id0 = 0, phy_id1 = 0;
|
|
+
|
|
+#if defined (CONFIG_GE2_RGMII_AN) || defined (CONFIG_P4_MAC_TO_PHY_MODE)
|
|
+ if (ge == 2) {
|
|
+ if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 2, &phy_id0)) {
|
|
+ printk("\n Read PhyID 1 is Fail!!\n");
|
|
+ phy_id0 =0;
|
|
+ }
|
|
+ if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 3, &phy_id1)) {
|
|
+ printk("\n Read PhyID 1 is Fail!!\n");
|
|
+ phy_id1 = 0;
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+#endif
|
|
+#if defined (CONFIG_GE1_RGMII_AN) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
|
|
+ {
|
|
+ if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 2, &phy_id0)) {
|
|
+ printk("\n Read PhyID 0 is Fail!!\n");
|
|
+ phy_id0 =0;
|
|
+ }
|
|
+ if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 3, &phy_id1)) {
|
|
+ printk("\n Read PhyID 0 is Fail!!\n");
|
|
+ phy_id1 = 0;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+ ;
|
|
+ if ((phy_id0 == EV_MARVELL_PHY_ID0) && (phy_id1 == EV_MARVELL_PHY_ID1))
|
|
+ return 1;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int isVtssGigaPHY(int ge)
|
|
+{
|
|
+ u32 phy_id0 = 0, phy_id1 = 0;
|
|
+
|
|
+#if defined (CONFIG_GE2_RGMII_AN) || defined (CONFIG_P4_MAC_TO_PHY_MODE)
|
|
+ if (ge == 2) {
|
|
+ if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 2, &phy_id0)) {
|
|
+ printk("\n Read PhyID 1 is Fail!!\n");
|
|
+ phy_id0 =0;
|
|
+ }
|
|
+ if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 3, &phy_id1)) {
|
|
+ printk("\n Read PhyID 1 is Fail!!\n");
|
|
+ phy_id1 = 0;
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+#endif
|
|
+#if defined (CONFIG_GE1_RGMII_AN) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
|
|
+ {
|
|
+ if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 2, &phy_id0)) {
|
|
+ printk("\n Read PhyID 0 is Fail!!\n");
|
|
+ phy_id0 =0;
|
|
+ }
|
|
+ if (!mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 3, &phy_id1)) {
|
|
+ printk("\n Read PhyID 0 is Fail!!\n");
|
|
+ phy_id1 = 0;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+ ;
|
|
+ if ((phy_id0 == EV_VTSS_PHY_ID0) && (phy_id1 == EV_VTSS_PHY_ID1))
|
|
+ return 1;
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * Set the hardware MAC address.
|
|
+ */
|
|
+static int ei_set_mac_addr(struct net_device *dev, void *p)
|
|
+{
|
|
+ struct sockaddr *addr = p;
|
|
+
|
|
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
|
|
+
|
|
+ if(netif_running(dev))
|
|
+ return -EBUSY;
|
|
+
|
|
+ ra2880MacAddressSet(addr->sa_data);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+static int ei_set_mac2_addr(struct net_device *dev, void *p)
|
|
+{
|
|
+ struct sockaddr *addr = p;
|
|
+
|
|
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
|
|
+
|
|
+ if(netif_running(dev))
|
|
+ return -EBUSY;
|
|
+
|
|
+ ra2880Mac2AddressSet(addr->sa_data);
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+void set_fe_dma_glo_cfg(void)
|
|
+{
|
|
+ int dma_glo_cfg=0;
|
|
+#if defined (CONFIG_RALINK_RT2880) || defined(CONFIG_RALINK_RT2883) || \
|
|
+ defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3883)
|
|
+ int fe_glo_cfg=0;
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A)
|
|
+ dma_glo_cfg = (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_32DWORDS);
|
|
+#elif defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621)
|
|
+ dma_glo_cfg = (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_16DWORDS);
|
|
+#elif defined (CONFIG_ARCH_MT7623)
|
|
+ dma_glo_cfg = (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_16DWORDS | ADMA_RX_BT_SIZE_32DWORDS);
|
|
+#else
|
|
+ dma_glo_cfg = (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_4DWORDS);
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+ dma_glo_cfg |= (RX_2B_OFFSET);
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_32B_DESC)
|
|
+ dma_glo_cfg |= (DESC_32B_EN);
|
|
+#endif
|
|
+ sysRegWrite(DMA_GLO_CFG, dma_glo_cfg);
|
|
+#ifdef CONFIG_RAETH_QDMA
|
|
+ sysRegWrite(QDMA_GLO_CFG, dma_glo_cfg);
|
|
+#endif
|
|
+
|
|
+ /* only the following chipset need to set it */
|
|
+#if defined (CONFIG_RALINK_RT2880) || defined(CONFIG_RALINK_RT2883) || \
|
|
+ defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3883)
|
|
+ //set 1us timer count in unit of clock cycle
|
|
+ fe_glo_cfg = sysRegRead(FE_GLO_CFG);
|
|
+ fe_glo_cfg &= ~(0xff << 8); //clear bit8-bit15
|
|
+ fe_glo_cfg |= (((get_surfboard_sysclk()/1000000)) << 8);
|
|
+ sysRegWrite(FE_GLO_CFG, fe_glo_cfg);
|
|
+#endif
|
|
+}
|
|
+
|
|
+int forward_config(struct net_device *dev)
|
|
+{
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+
|
|
+ /* RT5350: No GDMA, PSE, CDMA, PPE */
|
|
+ unsigned int sdmVal;
|
|
+ sdmVal = sysRegRead(SDM_CON);
|
|
+
|
|
+#ifdef CONFIG_RAETH_CHECKSUM_OFFLOAD
|
|
+ sdmVal |= 0x7<<16; // UDPCS, TCPCS, IPCS=1
|
|
+#endif // CONFIG_RAETH_CHECKSUM_OFFLOAD //
|
|
+
|
|
+#if defined (CONFIG_RAETH_SPECIAL_TAG)
|
|
+ sdmVal |= 0x1<<20; // TCI_81XX
|
|
+#endif // CONFIG_RAETH_SPECIAL_TAG //
|
|
+
|
|
+ sysRegWrite(SDM_CON, sdmVal);
|
|
+
|
|
+#else //Non RT5350 chipset
|
|
+
|
|
+ unsigned int regVal, regCsg;
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ unsigned int regVal2;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RAETH_HW_VLAN_TX
|
|
+#if defined(CONFIG_RALINK_MT7620)
|
|
+ /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc. */
|
|
+ *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x430) = 0x00010000;
|
|
+ *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x434) = 0x00030002;
|
|
+ *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x438) = 0x00050004;
|
|
+ *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x43C) = 0x00070006;
|
|
+ *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x440) = 0x00090008;
|
|
+ *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x444) = 0x000b000a;
|
|
+ *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x448) = 0x000d000c;
|
|
+ *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0x44C) = 0x000f000e;
|
|
+#else
|
|
+ /*
|
|
+ * VLAN_IDX 0 = VLAN_ID 0
|
|
+ * .........
|
|
+ * VLAN_IDX 15 = VLAN ID 15
|
|
+ *
|
|
+ */
|
|
+ /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc. */
|
|
+ *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xa8) = 0x00010000;
|
|
+ *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xac) = 0x00030002;
|
|
+ *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xb0) = 0x00050004;
|
|
+ *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xb4) = 0x00070006;
|
|
+ *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xb8) = 0x00090008;
|
|
+ *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xbc) = 0x000b000a;
|
|
+ *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xc0) = 0x000d000c;
|
|
+ *(unsigned long *)(RALINK_FRAME_ENGINE_BASE + 0xc4) = 0x000f000e;
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+ regVal = sysRegRead(GDMA1_FWD_CFG);
|
|
+ regCsg = sysRegRead(CDMA_CSG_CFG);
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ regVal2 = sysRegRead(GDMA2_FWD_CFG);
|
|
+#endif
|
|
+
|
|
+ //set unicast/multicast/broadcast frame to cpu
|
|
+#if defined (CONFIG_RALINK_MT7620)
|
|
+ /* GDMA1 frames destination port is port0 CPU*/
|
|
+ regVal &= ~0x7;
|
|
+#else
|
|
+ regVal &= ~0xFFFF;
|
|
+ regVal |= GDMA1_FWD_PORT;
|
|
+#endif
|
|
+ regCsg &= ~0x7;
|
|
+
|
|
+#if defined (CONFIG_RAETH_SPECIAL_TAG)
|
|
+ regVal |= (1 << 24); //GDM1_TCI_81xx
|
|
+#endif
|
|
+
|
|
+
|
|
+#ifdef CONFIG_RAETH_HW_VLAN_TX
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
|
|
+ dev->features |= NETIF_F_HW_VLAN_TX;
|
|
+#else
|
|
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX;
|
|
+#endif
|
|
+#endif
|
|
+#ifdef CONFIG_RAETH_HW_VLAN_RX
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
|
|
+ dev->features |= NETIF_F_HW_VLAN_RX;
|
|
+#else
|
|
+ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RAETH_CHECKSUM_OFFLOAD
|
|
+ //enable ipv4 header checksum check
|
|
+ regVal |= GDM1_ICS_EN;
|
|
+ regCsg |= ICS_GEN_EN;
|
|
+
|
|
+ //enable tcp checksum check
|
|
+ regVal |= GDM1_TCS_EN;
|
|
+ regCsg |= TCS_GEN_EN;
|
|
+
|
|
+ //enable udp checksum check
|
|
+ regVal |= GDM1_UCS_EN;
|
|
+ regCsg |= UCS_GEN_EN;
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ regVal2 &= ~0xFFFF;
|
|
+ regVal2 |= GDMA2_FWD_PORT;
|
|
+
|
|
+ regVal2 |= GDM1_ICS_EN;
|
|
+ regVal2 |= GDM1_TCS_EN;
|
|
+ regVal2 |= GDM1_UCS_EN;
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ dev->features |= NETIF_F_HW_CSUM;
|
|
+#else
|
|
+ dev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4 */
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+//#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0)
|
|
+// dev->vlan_features |= NETIF_F_IP_CSUM;
|
|
+//#endif
|
|
+
|
|
+#if defined(CONFIG_RALINK_MT7620)
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+ if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
|
|
+ dev->features |= NETIF_F_SG;
|
|
+ dev->features |= NETIF_F_TSO;
|
|
+ }
|
|
+#endif // CONFIG_RAETH_TSO //
|
|
+
|
|
+#if defined (CONFIG_RAETH_TSOV6)
|
|
+ if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
|
|
+ dev->features |= NETIF_F_TSO6;
|
|
+ dev->features |= NETIF_F_IPV6_CSUM; /* Can checksum TCP/UDP over IPv6 */
|
|
+ }
|
|
+#endif // CONFIG_RAETH_TSOV6 //
|
|
+#else
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+ dev->features |= NETIF_F_SG;
|
|
+ dev->features |= NETIF_F_TSO;
|
|
+#endif // CONFIG_RAETH_TSO //
|
|
+
|
|
+#if defined (CONFIG_RAETH_TSOV6)
|
|
+ dev->features |= NETIF_F_TSO6;
|
|
+ dev->features |= NETIF_F_IPV6_CSUM; /* Can checksum TCP/UDP over IPv6 */
|
|
+#endif // CONFIG_RAETH_TSOV6 //
|
|
+#endif // CONFIG_RALINK_MT7620 //
|
|
+#else // Checksum offload disabled
|
|
+
|
|
+ //disable ipv4 header checksum check
|
|
+ regVal &= ~GDM1_ICS_EN;
|
|
+ regCsg &= ~ICS_GEN_EN;
|
|
+
|
|
+ //disable tcp checksum check
|
|
+ regVal &= ~GDM1_TCS_EN;
|
|
+ regCsg &= ~TCS_GEN_EN;
|
|
+
|
|
+ //disable udp checksum check
|
|
+ regVal &= ~GDM1_UCS_EN;
|
|
+ regCsg &= ~UCS_GEN_EN;
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ regVal2 &= ~GDM1_ICS_EN;
|
|
+ regVal2 &= ~GDM1_TCS_EN;
|
|
+ regVal2 &= ~GDM1_UCS_EN;
|
|
+#endif
|
|
+
|
|
+ dev->features &= ~NETIF_F_IP_CSUM; /* disable checksum TCP/UDP over IPv4 */
|
|
+#endif // CONFIG_RAETH_CHECKSUM_OFFLOAD //
|
|
+
|
|
+#ifdef CONFIG_RAETH_JUMBOFRAME
|
|
+ regVal |= GDM1_JMB_EN;
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ regVal2 |= GDM1_JMB_EN;
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+ sysRegWrite(GDMA1_FWD_CFG, regVal);
|
|
+ sysRegWrite(CDMA_CSG_CFG, regCsg);
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ sysRegWrite(GDMA2_FWD_CFG, regVal2);
|
|
+#endif
|
|
+
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0)
|
|
+ dev->vlan_features = dev->features;
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * PSE_FQ_CFG register definition -
|
|
+ *
|
|
+ * Define max free queue page count in PSE. (31:24)
|
|
+ * RT2883/RT3883 - 0xff908000 (255 pages)
|
|
+ * RT3052 - 0x80504000 (128 pages)
|
|
+ * RT2880 - 0x80504000 (128 pages)
|
|
+ *
|
|
+ * In each page, there are 128 bytes in each page.
|
|
+ *
|
|
+ * 23:16 - free queue flow control release threshold
|
|
+ * 15:8 - free queue flow control assertion threshold
|
|
+ * 7:0 - free queue empty threshold
|
|
+ *
|
|
+ * The register affects QOS correctness in frame engine!
|
|
+ */
|
|
+
|
|
+#if defined(CONFIG_RALINK_RT2883) || defined(CONFIG_RALINK_RT3883)
|
|
+ sysRegWrite(PSE_FQ_CFG, cpu_to_le32(INIT_VALUE_OF_RT2883_PSE_FQ_CFG));
|
|
+#elif defined(CONFIG_RALINK_RT3352) || defined(CONFIG_RALINK_RT5350) || \
|
|
+ defined(CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined(CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
|
|
+ defined (CONFIG_RALINK_MT7628) || defined(CONFIG_ARCH_MT7623)
|
|
+ /*use default value*/
|
|
+#else
|
|
+ sysRegWrite(PSE_FQ_CFG, cpu_to_le32(INIT_VALUE_OF_PSE_FQFC_CFG));
|
|
+#endif
|
|
+
|
|
+ /*
|
|
+ *FE_RST_GLO register definition -
|
|
+ *Bit 0: PSE Rest
|
|
+ *Reset PSE after re-programming PSE_FQ_CFG.
|
|
+ */
|
|
+ regVal = 0x1;
|
|
+ sysRegWrite(FE_RST_GL, regVal);
|
|
+ sysRegWrite(FE_RST_GL, 0); // update for RSTCTL issue
|
|
+
|
|
+ regCsg = sysRegRead(CDMA_CSG_CFG);
|
|
+ printk("CDMA_CSG_CFG = %0X\n",regCsg);
|
|
+ regVal = sysRegRead(GDMA1_FWD_CFG);
|
|
+ printk("GDMA1_FWD_CFG = %0X\n",regVal);
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ regVal = sysRegRead(GDMA2_FWD_CFG);
|
|
+ printk("GDMA2_FWD_CFG = %0X\n",regVal);
|
|
+#endif
|
|
+#endif
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_RAETH_LRO
|
|
+static int
|
|
+rt_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph,
|
|
+ u64 *hdr_flags, void *priv)
|
|
+{
|
|
+ struct iphdr *iph = NULL;
|
|
+ int vhdr_len = 0;
|
|
+
|
|
+ /*
|
|
+ * Make sure that this packet is Ethernet II, is not VLAN
|
|
+ * tagged, is IPv4, has a valid IP header, and is TCP.
|
|
+ */
|
|
+ if (skb->protocol == 0x0081) {
|
|
+ vhdr_len = VLAN_HLEN;
|
|
+ }
|
|
+
|
|
+ iph = (struct iphdr *)(skb->data + vhdr_len);
|
|
+ if (iph->daddr != lro_para.lan_ip1) {
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ if(iph->protocol != IPPROTO_TCP) {
|
|
+ return -1;
|
|
+ } else {
|
|
+ *iphdr = iph;
|
|
+ *tcph = skb->data + (iph->ihl << 2) + vhdr_len;
|
|
+ *hdr_flags = LRO_IPV4 | LRO_TCP;
|
|
+
|
|
+ lro_flush_needed = 1;
|
|
+ return 0;
|
|
+ }
|
|
+}
|
|
+#endif // CONFIG_RAETH_LRO //
|
|
+
|
|
+#ifdef CONFIG_RAETH_NAPI
|
|
+static int rt2880_eth_recv(struct net_device* dev, int *work_done, int work_to_do)
|
|
+#else
|
|
+static int rt2880_eth_recv(struct net_device* dev)
|
|
+#endif
|
|
+{
|
|
+ struct sk_buff *skb, *rx_skb;
|
|
+ unsigned int length = 0;
|
|
+ unsigned long RxProcessed;
|
|
+
|
|
+ int bReschedule = 0;
|
|
+ END_DEVICE* ei_local = netdev_priv(dev);
|
|
+#if defined (CONFIG_RAETH_MULTIPLE_RX_RING) || defined (CONFIG_RAETH_HW_LRO)
|
|
+ int rx_ring_no=0;
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RAETH_SPECIAL_TAG)
|
|
+ struct vlan_ethhdr *veth=NULL;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ PSEUDO_ADAPTER *pAd;
|
|
+#endif
|
|
+
|
|
+ RxProcessed = 0;
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ rx_dma_owner_idx0 = (rx_calc_idx0 + 1) % NUM_RX_DESC;
|
|
+#else
|
|
+ rx_dma_owner_idx0 = (sysRegRead(RAETH_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_32B_DESC)
|
|
+ dma_cache_sync(NULL, &ei_local->rx_ring0[rx_dma_owner_idx0], sizeof(struct PDMA_rxdesc), DMA_FROM_DEVICE);
|
|
+#endif
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ rx_dma_owner_lro1 = (sysRegRead(RX_CALC_IDX1) + 1) % NUM_LRO_RX_DESC;
|
|
+ rx_dma_owner_lro2 = (sysRegRead(RX_CALC_IDX2) + 1) % NUM_LRO_RX_DESC;
|
|
+ rx_dma_owner_lro3 = (sysRegRead(RX_CALC_IDX3) + 1) % NUM_LRO_RX_DESC;
|
|
+#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ rx_dma_owner_idx1 = (rx_calc_idx1 + 1) % NUM_RX_DESC;
|
|
+#else
|
|
+ rx_dma_owner_idx1 = (sysRegRead(RX_CALC_IDX1) + 1) % NUM_RX_DESC;
|
|
+#endif /* CONFIG_RAETH_RW_PDMAPTR_FROM_VAR */
|
|
+#if defined(CONFIG_ARCH_MT7623)
|
|
+ rx_dma_owner_idx2 = (sysRegRead(RX_CALC_IDX2) + 1) % NUM_RX_DESC;
|
|
+ rx_dma_owner_idx3 = (sysRegRead(RX_CALC_IDX3) + 1) % NUM_RX_DESC;
|
|
+#endif
|
|
+#if defined (CONFIG_32B_DESC)
|
|
+ dma_cache_sync(NULL, &ei_local->rx_ring1[rx_dma_owner_idx1], sizeof(struct PDMA_rxdesc), DMA_FROM_DEVICE);
|
|
+#endif
|
|
+#endif
|
|
+ for ( ; ; ) {
|
|
+
|
|
+
|
|
+#ifdef CONFIG_RAETH_NAPI
|
|
+ if(*work_done >= work_to_do)
|
|
+ break;
|
|
+ (*work_done)++;
|
|
+#else
|
|
+ if (RxProcessed++ > NUM_RX_MAX_PROCESS)
|
|
+ {
|
|
+ // need to reschedule rx handle
|
|
+ bReschedule = 1;
|
|
+ break;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ if (ei_local->rx_ring3[rx_dma_owner_lro3].rxd_info2.DDONE_bit == 1) {
|
|
+ rx_ring = ei_local->rx_ring3;
|
|
+ rx_dma_owner_idx = rx_dma_owner_lro3;
|
|
+ // printk("rx_dma_owner_lro3=%x\n",rx_dma_owner_lro3);
|
|
+ rx_ring_no=3;
|
|
+ }
|
|
+ else if (ei_local->rx_ring2[rx_dma_owner_lro2].rxd_info2.DDONE_bit == 1) {
|
|
+ rx_ring = ei_local->rx_ring2;
|
|
+ rx_dma_owner_idx = rx_dma_owner_lro2;
|
|
+ // printk("rx_dma_owner_lro2=%x\n",rx_dma_owner_lro2);
|
|
+ rx_ring_no=2;
|
|
+ }
|
|
+ else if (ei_local->rx_ring1[rx_dma_owner_lro1].rxd_info2.DDONE_bit == 1) {
|
|
+ rx_ring = ei_local->rx_ring1;
|
|
+ rx_dma_owner_idx = rx_dma_owner_lro1;
|
|
+ // printk("rx_dma_owner_lro1=%x\n",rx_dma_owner_lro1);
|
|
+ rx_ring_no=1;
|
|
+ }
|
|
+ else if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.DDONE_bit == 1) {
|
|
+ rx_ring = ei_local->rx_ring0;
|
|
+ rx_dma_owner_idx = rx_dma_owner_idx0;
|
|
+ // printk("rx_dma_owner_idx0=%x\n",rx_dma_owner_idx0);
|
|
+ rx_ring_no=0;
|
|
+ } else {
|
|
+ break;
|
|
+ }
|
|
+ #if defined (CONFIG_RAETH_HW_LRO_DBG)
|
|
+ HwLroStatsUpdate(rx_ring_no, rx_ring[rx_dma_owner_idx].rxd_info2.LRO_AGG_CNT, \
|
|
+ (rx_ring[rx_dma_owner_idx].rxd_info2.PLEN1 << 14) | rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0);
|
|
+ #endif
|
|
+ #if defined(CONFIG_RAETH_HW_LRO_REASON_DBG)
|
|
+ HwLroFlushStatsUpdate(rx_ring_no, rx_ring[rx_dma_owner_idx].rxd_info2.REV);
|
|
+ #endif
|
|
+#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+ if (ei_local->rx_ring1[rx_dma_owner_idx1].rxd_info2.DDONE_bit == 1) {
|
|
+ rx_ring = ei_local->rx_ring1;
|
|
+ rx_dma_owner_idx = rx_dma_owner_idx1;
|
|
+ // printk("rx_dma_owner_idx1=%x\n",rx_dma_owner_idx1);
|
|
+ rx_ring_no=1;
|
|
+ }
|
|
+#if defined(CONFIG_ARCH_MT7623)
|
|
+ else if (ei_local->rx_ring2[rx_dma_owner_idx2].rxd_info2.DDONE_bit == 1) {
|
|
+ rx_ring = ei_local->rx_ring2;
|
|
+ rx_dma_owner_idx = rx_dma_owner_idx2;
|
|
+ // printk("rx_dma_owner_idx2=%x\n",rx_dma_owner_idx2);
|
|
+ rx_ring_no=2;
|
|
+ }
|
|
+ else if (ei_local->rx_ring3[rx_dma_owner_idx3].rxd_info2.DDONE_bit == 1) {
|
|
+ rx_ring = ei_local->rx_ring3;
|
|
+ rx_dma_owner_idx = rx_dma_owner_idx3;
|
|
+ // printk("rx_dma_owner_idx3=%x\n",rx_dma_owner_idx3);
|
|
+ rx_ring_no=3;
|
|
+ }
|
|
+#endif /* CONFIG_ARCH_MT7623 */
|
|
+ else if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.DDONE_bit == 1) {
|
|
+ rx_ring = ei_local->rx_ring0;
|
|
+ rx_dma_owner_idx = rx_dma_owner_idx0;
|
|
+ // printk("rx_dma_owner_idx0=%x\n",rx_dma_owner_idx0);
|
|
+ rx_ring_no=0;
|
|
+ } else {
|
|
+ break;
|
|
+ }
|
|
+#else
|
|
+
|
|
+ if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.DDONE_bit == 1) {
|
|
+ rx_ring = ei_local->rx_ring0;
|
|
+ rx_dma_owner_idx = rx_dma_owner_idx0;
|
|
+ } else {
|
|
+ break;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_32B_DESC)
|
|
+ prefetch(&rx_ring[(rx_dma_owner_idx + 1) % NUM_RX_DESC]);
|
|
+#endif
|
|
+ /* skb processing */
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ length = (rx_ring[rx_dma_owner_idx].rxd_info2.PLEN1 << 14) | rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0;
|
|
+#else
|
|
+ length = rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0;
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+
|
|
+#if defined (CONFIG_ARCH_MT7623)
|
|
+ dma_unmap_single(NULL, rx_ring[rx_dma_owner_idx].rxd_info1.PDP0, length, DMA_FROM_DEVICE);
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ if(rx_ring_no==3) {
|
|
+ rx_skb = ei_local->netrx3_skbuf[rx_dma_owner_idx];
|
|
+ rx_skb->data = ei_local->netrx3_skbuf[rx_dma_owner_idx]->data;
|
|
+ }
|
|
+ else if(rx_ring_no==2) {
|
|
+ rx_skb = ei_local->netrx2_skbuf[rx_dma_owner_idx];
|
|
+ rx_skb->data = ei_local->netrx2_skbuf[rx_dma_owner_idx]->data;
|
|
+ }
|
|
+ else if(rx_ring_no==1) {
|
|
+ rx_skb = ei_local->netrx1_skbuf[rx_dma_owner_idx];
|
|
+ rx_skb->data = ei_local->netrx1_skbuf[rx_dma_owner_idx]->data;
|
|
+ }
|
|
+ else {
|
|
+ rx_skb = ei_local->netrx0_skbuf[rx_dma_owner_idx];
|
|
+ rx_skb->data = ei_local->netrx0_skbuf[rx_dma_owner_idx]->data;
|
|
+ }
|
|
+ #if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+ raeth_pdma_lro_dvt( rx_ring_no, ei_local, rx_dma_owner_idx );
|
|
+ #endif /* CONFIG_RAETH_PDMA_DVT */
|
|
+#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+ if(rx_ring_no==1) {
|
|
+ rx_skb = ei_local->netrx1_skbuf[rx_dma_owner_idx];
|
|
+ rx_skb->data = ei_local->netrx1_skbuf[rx_dma_owner_idx]->data;
|
|
+ }
|
|
+#if defined(CONFIG_ARCH_MT7623)
|
|
+ else if(rx_ring_no==2) {
|
|
+ rx_skb = ei_local->netrx2_skbuf[rx_dma_owner_idx];
|
|
+ rx_skb->data = ei_local->netrx2_skbuf[rx_dma_owner_idx]->data;
|
|
+ }
|
|
+ else if(rx_ring_no==3) {
|
|
+ rx_skb = ei_local->netrx3_skbuf[rx_dma_owner_idx];
|
|
+ rx_skb->data = ei_local->netrx3_skbuf[rx_dma_owner_idx]->data;
|
|
+ }
|
|
+#endif /* CONFIG_ARCH_MT7623 */
|
|
+ else {
|
|
+ rx_skb = ei_local->netrx0_skbuf[rx_dma_owner_idx];
|
|
+ rx_skb->data = ei_local->netrx0_skbuf[rx_dma_owner_idx]->data;
|
|
+ }
|
|
+ #if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+ raeth_pdma_lro_dvt( rx_ring_no, ei_local, rx_dma_owner_idx );
|
|
+ #endif /* CONFIG_RAETH_PDMA_DVT */
|
|
+#else
|
|
+ rx_skb = ei_local->netrx0_skbuf[rx_dma_owner_idx];
|
|
+ rx_skb->data = ei_local->netrx0_skbuf[rx_dma_owner_idx]->data;
|
|
+ #if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+ raeth_pdma_rx_desc_dvt( ei_local, rx_dma_owner_idx0 );
|
|
+ #endif /* CONFIG_RAETH_PDMA_DVT */
|
|
+#endif
|
|
+ rx_skb->len = length;
|
|
+/*TODO*/
|
|
+#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+ rx_skb->data += NET_IP_ALIGN;
|
|
+#endif
|
|
+ rx_skb->tail = rx_skb->data + length;
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ if(rx_ring[rx_dma_owner_idx].rxd_info4.SP == 2) {
|
|
+ if(ei_local->PseudoDev!=NULL) {
|
|
+ rx_skb->dev = ei_local->PseudoDev;
|
|
+ rx_skb->protocol = eth_type_trans(rx_skb,ei_local->PseudoDev);
|
|
+ }else {
|
|
+ printk("ERROR: PseudoDev is still not initialize but receive packet from GMAC2\n");
|
|
+ }
|
|
+ }else{
|
|
+ rx_skb->dev = dev;
|
|
+ rx_skb->protocol = eth_type_trans(rx_skb,dev);
|
|
+ }
|
|
+#else
|
|
+ rx_skb->dev = dev;
|
|
+ rx_skb->protocol = eth_type_trans(rx_skb,dev);
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RAETH_CHECKSUM_OFFLOAD
|
|
+#if defined (CONFIG_PDMA_NEW)
|
|
+ if(rx_ring[rx_dma_owner_idx].rxd_info4.L4VLD) {
|
|
+ rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
+ }else {
|
|
+ rx_skb->ip_summed = CHECKSUM_NONE;
|
|
+ }
|
|
+#else
|
|
+ if(rx_ring[rx_dma_owner_idx].rxd_info4.IPFVLD_bit) {
|
|
+ rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
+ }else {
|
|
+ rx_skb->ip_summed = CHECKSUM_NONE;
|
|
+ }
|
|
+#endif
|
|
+#else
|
|
+ rx_skb->ip_summed = CHECKSUM_NONE;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
|
|
+ /* Qwert+
|
|
+ */
|
|
+ if(ra_classifier_hook_rx!= NULL)
|
|
+ {
|
|
+#if defined(CONFIG_RALINK_EXTERNAL_TIMER)
|
|
+ ra_classifier_hook_rx(rx_skb, (*((volatile u32 *)(0xB0000D08))&0x0FFFF));
|
|
+#else
|
|
+ ra_classifier_hook_rx(rx_skb, read_c0_count());
|
|
+#endif
|
|
+ }
|
|
+#endif /* CONFIG_RA_CLASSIFIER */
|
|
+
|
|
+#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
|
|
+ if(ra_sw_nat_hook_rx != NULL) {
|
|
+ FOE_MAGIC_TAG(rx_skb)= FOE_MAGIC_GE;
|
|
+ *(uint32_t *)(FOE_INFO_START_ADDR(rx_skb)+2) = *(uint32_t *)&rx_ring[rx_dma_owner_idx].rxd_info4;
|
|
+ FOE_ALG(rx_skb) = 0;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ /* We have to check the free memory size is big enough
|
|
+ * before pass the packet to cpu*/
|
|
+#if defined (CONFIG_RAETH_SKB_RECYCLE_2K)
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ if( rx_ring != ei_local->rx_ring0 )
|
|
+ skb = __dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN, GFP_ATOMIC);
|
|
+ else
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+ skb = skbmgr_dev_alloc_skb2k();
|
|
+#else
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ if( rx_ring != ei_local->rx_ring0 )
|
|
+ skb = __dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN, GFP_ATOMIC);
|
|
+ else
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+ skb = __dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN, GFP_ATOMIC);
|
|
+#endif
|
|
+
|
|
+ if (unlikely(skb == NULL))
|
|
+ {
|
|
+ printk(KERN_ERR "skb not available...\n");
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ if (rx_ring[rx_dma_owner_idx].rxd_info4.SP == 2) {
|
|
+ if (ei_local->PseudoDev != NULL) {
|
|
+ pAd = netdev_priv(ei_local->PseudoDev);
|
|
+ pAd->stat.rx_dropped++;
|
|
+ }
|
|
+ } else
|
|
+#endif
|
|
+ ei_local->stat.rx_dropped++;
|
|
+ bReschedule = 1;
|
|
+ break;
|
|
+ }
|
|
+#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+ skb_reserve(skb, NET_IP_ALIGN);
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RAETH_SPECIAL_TAG)
|
|
+ // port0: 0x8100 => 0x8100 0001
|
|
+ // port1: 0x8101 => 0x8100 0002
|
|
+ // port2: 0x8102 => 0x8100 0003
|
|
+ // port3: 0x8103 => 0x8100 0004
|
|
+ // port4: 0x8104 => 0x8100 0005
|
|
+ // port5: 0x8105 => 0x8100 0006
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,21)
|
|
+ veth = (struct vlan_ethhdr *)(rx_skb->mac_header);
|
|
+#else
|
|
+ veth = (struct vlan_ethhdr *)(rx_skb->mac.raw);
|
|
+#endif
|
|
+ /*donot check 0x81 due to MT7530 SPEC*/
|
|
+ //if((veth->h_vlan_proto & 0xFF) == 0x81)
|
|
+ {
|
|
+ veth->h_vlan_TCI = htons( (((veth->h_vlan_proto >> 8) & 0xF) + 1) );
|
|
+ rx_skb->protocol = veth->h_vlan_proto = htons(ETH_P_8021Q);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+/* ra_sw_nat_hook_rx return 1 --> continue
|
|
+ * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
|
|
+ */
|
|
+#if !defined(CONFIG_RA_NAT_NONE)
|
|
+ if((ra_sw_nat_hook_rx == NULL) ||
|
|
+ (ra_sw_nat_hook_rx!= NULL && ra_sw_nat_hook_rx(rx_skb)))
|
|
+#endif
|
|
+ {
|
|
+#if defined (CONFIG_RALINK_RT3052_MP2)
|
|
+ if(mcast_rx(rx_skb)==0) {
|
|
+ kfree_skb(rx_skb);
|
|
+ }else
|
|
+#endif
|
|
+#if defined (CONFIG_RAETH_LRO)
|
|
+ if (rx_skb->ip_summed == CHECKSUM_UNNECESSARY) {
|
|
+ lro_receive_skb(&ei_local->lro_mgr, rx_skb, NULL);
|
|
+ //LroStatsUpdate(&ei_local->lro_mgr,0);
|
|
+ } else
|
|
+#endif
|
|
+#ifdef CONFIG_RAETH_NAPI
|
|
+ netif_receive_skb(rx_skb);
|
|
+#else
|
|
+#ifdef CONFIG_RAETH_HW_VLAN_RX
|
|
+ if(ei_local->vlgrp && rx_ring[rx_dma_owner_idx].rxd_info2.TAG) {
|
|
+ vlan_hwaccel_rx(rx_skb, ei_local->vlgrp, rx_ring[rx_dma_owner_idx].rxd_info3.VID);
|
|
+ } else {
|
|
+ netif_rx(rx_skb);
|
|
+ }
|
|
+#else
|
|
+#ifdef CONFIG_RAETH_CPU_LOOPBACK
|
|
+ skb_push(rx_skb,ETH_HLEN);
|
|
+ ei_start_xmit(rx_skb, dev, 1);
|
|
+#else
|
|
+ netif_rx(rx_skb);
|
|
+#endif
|
|
+#endif
|
|
+#endif
|
|
+ }
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ if (rx_ring[rx_dma_owner_idx].rxd_info4.SP == 2) {
|
|
+ if (ei_local->PseudoDev != NULL) {
|
|
+ pAd = netdev_priv(ei_local->PseudoDev);
|
|
+ pAd->stat.rx_packets++;
|
|
+ pAd->stat.rx_bytes += length;
|
|
+ }
|
|
+ } else
|
|
+#endif
|
|
+ {
|
|
+ ei_local->stat.rx_packets++;
|
|
+ ei_local->stat.rx_bytes += length;
|
|
+ }
|
|
+
|
|
+
|
|
+#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ if( rx_ring != ei_local->rx_ring0 ){
|
|
+ rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0 = SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
|
|
+ rx_ring[rx_dma_owner_idx].rxd_info2.PLEN1 = SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
|
|
+ }
|
|
+ else
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+ rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0 = MAX_RX_LENGTH;
|
|
+ rx_ring[rx_dma_owner_idx].rxd_info2.LS0 = 0;
|
|
+#endif
|
|
+ rx_ring[rx_dma_owner_idx].rxd_info2.DDONE_bit = 0;
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ if( rx_ring != ei_local->rx_ring0 )
|
|
+ rx_ring[rx_dma_owner_idx].rxd_info1.PDP0 = dma_map_single(NULL, skb->data, MAX_LRO_RX_LENGTH, PCI_DMA_FROMDEVICE);
|
|
+ else
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+ rx_ring[rx_dma_owner_idx].rxd_info1.PDP0 = dma_map_single(NULL, skb->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
|
|
+#ifdef CONFIG_32B_DESC
|
|
+ dma_cache_sync(NULL, &rx_ring[rx_dma_owner_idx], sizeof(struct PDMA_rxdesc), DMA_TO_DEVICE);
|
|
+#endif
|
|
+ /* Move point to next RXD which wants to alloc*/
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ if(rx_ring_no==3) {
|
|
+ sysRegWrite(RAETH_RX_CALC_IDX3, rx_dma_owner_idx);
|
|
+ ei_local->netrx3_skbuf[rx_dma_owner_idx] = skb;
|
|
+ }
|
|
+ else if(rx_ring_no==2) {
|
|
+ sysRegWrite(RAETH_RX_CALC_IDX2, rx_dma_owner_idx);
|
|
+ ei_local->netrx2_skbuf[rx_dma_owner_idx] = skb;
|
|
+ }
|
|
+ else if(rx_ring_no==1) {
|
|
+ sysRegWrite(RAETH_RX_CALC_IDX1, rx_dma_owner_idx);
|
|
+ ei_local->netrx1_skbuf[rx_dma_owner_idx] = skb;
|
|
+ }
|
|
+ else if(rx_ring_no==0) {
|
|
+ sysRegWrite(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
|
|
+ ei_local->netrx0_skbuf[rx_dma_owner_idx] = skb;
|
|
+ }
|
|
+#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+ if(rx_ring_no==0) {
|
|
+ sysRegWrite(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
|
|
+ ei_local->netrx0_skbuf[rx_dma_owner_idx] = skb;
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ rx_calc_idx0 = rx_dma_owner_idx;
|
|
+#endif
|
|
+ }
|
|
+#if defined(CONFIG_ARCH_MT7623)
|
|
+ else if(rx_ring_no==3) {
|
|
+ sysRegWrite(RAETH_RX_CALC_IDX3, rx_dma_owner_idx);
|
|
+ ei_local->netrx3_skbuf[rx_dma_owner_idx] = skb;
|
|
+ }
|
|
+ else if(rx_ring_no==2) {
|
|
+ sysRegWrite(RAETH_RX_CALC_IDX2, rx_dma_owner_idx);
|
|
+ ei_local->netrx2_skbuf[rx_dma_owner_idx] = skb;
|
|
+ }
|
|
+#endif /* CONFIG_ARCH_MT7623 */
|
|
+ else {
|
|
+ sysRegWrite(RAETH_RX_CALC_IDX1, rx_dma_owner_idx);
|
|
+ ei_local->netrx1_skbuf[rx_dma_owner_idx] = skb;
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ rx_calc_idx1 = rx_dma_owner_idx;
|
|
+#endif
|
|
+ }
|
|
+#else
|
|
+ sysRegWrite(RAETH_RX_CALC_IDX0, rx_dma_owner_idx);
|
|
+ ei_local->netrx0_skbuf[rx_dma_owner_idx] = skb;
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ rx_calc_idx0 = rx_dma_owner_idx;
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+
|
|
+ /* Update to Next packet point that was received.
|
|
+ */
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ if(rx_ring_no==3)
|
|
+ rx_dma_owner_lro3 = (sysRegRead(RAETH_RX_CALC_IDX3) + 1) % NUM_LRO_RX_DESC;
|
|
+ else if(rx_ring_no==2)
|
|
+ rx_dma_owner_lro2 = (sysRegRead(RAETH_RX_CALC_IDX2) + 1) % NUM_LRO_RX_DESC;
|
|
+ else if(rx_ring_no==1)
|
|
+ rx_dma_owner_lro1 = (sysRegRead(RAETH_RX_CALC_IDX1) + 1) % NUM_LRO_RX_DESC;
|
|
+ else if(rx_ring_no==0)
|
|
+ rx_dma_owner_idx0 = (sysRegRead(RAETH_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
|
|
+ else {
|
|
+ }
|
|
+#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+ if(rx_ring_no==0) {
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ rx_dma_owner_idx0 = (rx_dma_owner_idx + 1) % NUM_RX_DESC;
|
|
+#else
|
|
+ rx_dma_owner_idx0 = (sysRegRead(RAETH_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
|
|
+#endif
|
|
+#if defined(CONFIG_ARCH_MT7623)
|
|
+ }else if(rx_ring_no==3) {
|
|
+ rx_dma_owner_idx3 = (sysRegRead(RAETH_RX_CALC_IDX3) + 1) % NUM_RX_DESC;
|
|
+ }else if(rx_ring_no==2) {
|
|
+ rx_dma_owner_idx2 = (sysRegRead(RAETH_RX_CALC_IDX2) + 1) % NUM_RX_DESC;
|
|
+#endif /* CONFIG_ARCH_MT7623 */
|
|
+ }else {
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ rx_dma_owner_idx1 = (rx_dma_owner_idx + 1) % NUM_RX_DESC;
|
|
+#else
|
|
+ rx_dma_owner_idx1 = (sysRegRead(RAETH_RX_CALC_IDX1) + 1) % NUM_RX_DESC;
|
|
+#endif
|
|
+ }
|
|
+#else
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ rx_dma_owner_idx0 = (rx_dma_owner_idx + 1) % NUM_RX_DESC;
|
|
+#else
|
|
+ rx_dma_owner_idx0 = (sysRegRead(RAETH_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
|
|
+#endif
|
|
+#endif
|
|
+ } /* for */
|
|
+
|
|
+#if defined (CONFIG_RAETH_LRO)
|
|
+ if (lro_flush_needed) {
|
|
+ //LroStatsUpdate(&ei_local->lro_mgr,1);
|
|
+ lro_flush_all(&ei_local->lro_mgr);
|
|
+ lro_flush_needed = 0;
|
|
+ }
|
|
+#endif
|
|
+ return bReschedule;
|
|
+}
|
|
+
|
|
+
|
|
+///////////////////////////////////////////////////////////////////
|
|
+/////
|
|
+///// ra_get_stats - gather packet information for management plane
|
|
+/////
|
|
+///// Pass net_device_stats to the upper layer.
|
|
+/////
|
|
+/////
|
|
+///// RETURNS: pointer to net_device_stats
|
|
+///////////////////////////////////////////////////////////////////
|
|
+
|
|
+struct net_device_stats *ra_get_stats(struct net_device *dev)
|
|
+{
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+ return &ei_local->stat;
|
|
+}
|
|
+
|
|
+#if defined (CONFIG_RT_3052_ESW)
|
|
+void kill_sig_workq(struct work_struct *work)
|
|
+{
|
|
+ struct file *fp;
|
|
+ char pid[8];
|
|
+ struct task_struct *p = NULL;
|
|
+
|
|
+ //read udhcpc pid from file, and send signal USR2,USR1 to get a new IP
|
|
+ fp = filp_open("/var/run/udhcpc.pid", O_RDONLY, 0);
|
|
+ if (IS_ERR(fp))
|
|
+ return;
|
|
+
|
|
+ if (fp->f_op && fp->f_op->read) {
|
|
+ if (fp->f_op->read(fp, pid, 8, &fp->f_pos) > 0) {
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ p = pid_task(find_get_pid(simple_strtoul(pid, NULL, 10)), PIDTYPE_PID);
|
|
+#else
|
|
+ p = find_task_by_pid(simple_strtoul(pid, NULL, 10));
|
|
+#endif
|
|
+
|
|
+ if (NULL != p) {
|
|
+ send_sig(SIGUSR2, p, 0);
|
|
+ send_sig(SIGUSR1, p, 0);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ filp_close(fp, NULL);
|
|
+
|
|
+}
|
|
+#endif
|
|
+
|
|
+
|
|
+///////////////////////////////////////////////////////////////////
|
|
+/////
|
|
+///// ra2880Recv - process the next incoming packet
|
|
+/////
|
|
+///// Handle one incoming packet. The packet is checked for errors and sent
|
|
+///// to the upper layer.
|
|
+/////
|
|
+///// RETURNS: OK on success or ERROR.
|
|
+///////////////////////////////////////////////////////////////////
|
|
+
|
|
+#ifndef CONFIG_RAETH_NAPI
|
|
+#if defined WORKQUEUE_BH || defined (TASKLET_WORKQUEUE_SW)
|
|
+void ei_receive_workq(struct work_struct *work)
|
|
+#else
|
|
+void ei_receive(unsigned long unused) // device structure
|
|
+#endif // WORKQUEUE_BH //
|
|
+{
|
|
+ struct net_device *dev = dev_raether;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+ unsigned long reg_int_mask=0;
|
|
+ int bReschedule=0;
|
|
+
|
|
+
|
|
+ if(tx_ring_full==0){
|
|
+ bReschedule = rt2880_eth_recv(dev);
|
|
+ if(bReschedule)
|
|
+ {
|
|
+#ifdef WORKQUEUE_BH
|
|
+ schedule_work(&ei_local->rx_wq);
|
|
+#else
|
|
+#if defined (TASKLET_WORKQUEUE_SW)
|
|
+ if (working_schedule == 1)
|
|
+ schedule_work(&ei_local->rx_wq);
|
|
+ else
|
|
+#endif
|
|
+ tasklet_hi_schedule(&ei_local->rx_tasklet);
|
|
+#endif // WORKQUEUE_BH //
|
|
+ }else{
|
|
+ reg_int_mask=sysRegRead(RAETH_FE_INT_ENABLE);
|
|
+#if defined(DELAY_INT)
|
|
+ sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask| RX_DLY_INT);
|
|
+#else
|
|
+ sysRegWrite(RAETH_FE_INT_ENABLE, (reg_int_mask | RX_DONE_INT0 | RX_DONE_INT1 | RX_DONE_INT2 | RX_DONE_INT3));
|
|
+#endif
|
|
+#ifdef CONFIG_RAETH_QDMA
|
|
+ reg_int_mask=sysRegRead(QFE_INT_ENABLE);
|
|
+#if defined(DELAY_INT)
|
|
+ sysRegWrite(QFE_INT_ENABLE, reg_int_mask| RX_DLY_INT);
|
|
+#else
|
|
+ sysRegWrite(QFE_INT_ENABLE, (reg_int_mask | RX_DONE_INT0 | RX_DONE_INT1));
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+
|
|
+ }
|
|
+ }else{
|
|
+#ifdef WORKQUEUE_BH
|
|
+ schedule_work(&ei_local->rx_wq);
|
|
+#else
|
|
+#if defined (TASKLET_WORKQUEUE_SW)
|
|
+ if (working_schedule == 1)
|
|
+ schedule_work(&ei_local->rx_wq);
|
|
+ else
|
|
+#endif
|
|
+ tasklet_schedule(&ei_local->rx_tasklet);
|
|
+#endif // WORKQUEUE_BH //
|
|
+ }
|
|
+}
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+void ei_hw_lro_auto_adj(unsigned int index, END_DEVICE* ei_local)
|
|
+{
|
|
+ unsigned int entry;
|
|
+ unsigned int pkt_cnt;
|
|
+ unsigned int tick_cnt;
|
|
+ unsigned int duration_us;
|
|
+ unsigned int byte_cnt;
|
|
+
|
|
+ /* read packet count statitics of the auto-learn table */
|
|
+ entry = index + 68;
|
|
+ sysRegWrite( PDMA_FE_ALT_CF8, entry );
|
|
+ pkt_cnt = sysRegRead(PDMA_FE_ALT_SGL_CFC) & 0xfff;
|
|
+ tick_cnt = (sysRegRead(PDMA_FE_ALT_SGL_CFC) >> 16) & 0xffff;
|
|
+#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
|
|
+ printk("[HW LRO] ei_hw_lro_auto_adj(): pkt_cnt[%d]=%d, tick_cnt[%d]=%d\n", index, pkt_cnt, index, tick_cnt);
|
|
+ printk("[HW LRO] ei_hw_lro_auto_adj(): packet_interval[%d]=%d (ticks/pkt)\n", index, tick_cnt/pkt_cnt);
|
|
+#endif
|
|
+
|
|
+ /* read byte count statitics of the auto-learn table */
|
|
+ entry = index + 64;
|
|
+ sysRegWrite( PDMA_FE_ALT_CF8, entry );
|
|
+ byte_cnt = sysRegRead(PDMA_FE_ALT_SGL_CFC);
|
|
+#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
|
|
+ printk("[HW LRO] ei_hw_lro_auto_adj(): byte_cnt[%d]=%d\n", index, byte_cnt);
|
|
+#endif
|
|
+
|
|
+ /* calculate the packet interval of the rx flow */
|
|
+ duration_us = tick_cnt * HW_LRO_TIMER_UNIT;
|
|
+ ei_local->hw_lro_pkt_interval[index - 1] = (duration_us/pkt_cnt) * ei_local->hw_lro_alpha / 100;
|
|
+#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
|
|
+ printk("[HW LRO] ei_hw_lro_auto_adj(): packet_interval[%d]=%d (20us)\n", index, duration_us/pkt_cnt);
|
|
+#endif
|
|
+
|
|
+ if ( !ei_local->hw_lro_fix_setting ){
|
|
+ /* adjust age_time, agg_time for the lro ring */
|
|
+ if(ei_local->hw_lro_pkt_interval[index - 1] > 0){
|
|
+ SET_PDMA_RXRING_AGE_TIME(index, (ei_local->hw_lro_pkt_interval[index - 1] * HW_LRO_MAX_AGG_CNT));
|
|
+ SET_PDMA_RXRING_AGG_TIME(index, (ei_local->hw_lro_pkt_interval[index - 1] * HW_LRO_AGG_DELTA));
|
|
+ }
|
|
+ else{
|
|
+ SET_PDMA_RXRING_AGE_TIME(index, HW_LRO_MAX_AGG_CNT);
|
|
+ SET_PDMA_RXRING_AGG_TIME(index, HW_LRO_AGG_DELTA);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+void ei_hw_lro_workq(struct work_struct *work)
|
|
+{
|
|
+ END_DEVICE *ei_local;
|
|
+ unsigned int reg_int_val;
|
|
+ unsigned int reg_int_mask;
|
|
+
|
|
+ ei_local = container_of(work, struct end_device, hw_lro_wq);
|
|
+
|
|
+ reg_int_val = sysRegRead(RAETH_FE_INT_STATUS);
|
|
+#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
|
|
+ printk("[HW LRO] ei_hw_lro_workq(): RAETH_FE_INT_STATUS=0x%x\n", reg_int_val);
|
|
+#endif
|
|
+ if((reg_int_val & ALT_RPLC_INT3)){
|
|
+#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
|
|
+ printk("[HW LRO] ALT_RPLC_INT3 occurred!\n");
|
|
+#endif
|
|
+ sysRegWrite(RAETH_FE_INT_STATUS, ALT_RPLC_INT3);
|
|
+ ei_hw_lro_auto_adj(3, ei_local);
|
|
+ }
|
|
+ if((reg_int_val & ALT_RPLC_INT2)){
|
|
+#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
|
|
+ printk("[HW LRO] ALT_RPLC_INT2 occurred!\n");
|
|
+#endif
|
|
+ sysRegWrite(RAETH_FE_INT_STATUS, ALT_RPLC_INT2);
|
|
+ ei_hw_lro_auto_adj(2, ei_local);
|
|
+ }
|
|
+ if((reg_int_val & ALT_RPLC_INT1)){
|
|
+#if defined (CONFIG_RAETH_HW_LRO_AUTO_ADJ_DBG)
|
|
+ printk("[HW LRO] ALT_RPLC_INT1 occurred!\n");
|
|
+#endif
|
|
+ sysRegWrite(RAETH_FE_INT_STATUS, ALT_RPLC_INT1);
|
|
+ ei_hw_lro_auto_adj(1, ei_local);
|
|
+ }
|
|
+
|
|
+ /* unmask interrupts of rx flow to hw lor rings */
|
|
+ reg_int_mask = sysRegRead(RAETH_FE_INT_ENABLE);
|
|
+ sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask | ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1);
|
|
+}
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+
|
|
+#ifdef CONFIG_RAETH_NAPI
|
|
+static int
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+raeth_clean(struct napi_struct *napi, int budget)
|
|
+#else
|
|
+raeth_clean(struct net_device *netdev, int *budget)
|
|
+#endif
|
|
+{
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ struct net_device *netdev=dev_raether;
|
|
+ int work_to_do = budget;
|
|
+#else
|
|
+ int work_to_do = min(*budget, netdev->quota);
|
|
+#endif
|
|
+ END_DEVICE *ei_local =netdev_priv(netdev);
|
|
+ int work_done = 0;
|
|
+ unsigned long reg_int_mask=0;
|
|
+
|
|
+ ei_xmit_housekeeping(0);
|
|
+
|
|
+ rt2880_eth_recv(netdev, &work_done, work_to_do);
|
|
+
|
|
+ /* this could control when to re-enable interrupt, 0-> mean never enable interrupt*/
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
|
|
+ *budget -= work_done;
|
|
+ netdev->quota -= work_done;
|
|
+#endif
|
|
+ /* if no Tx and not enough Rx work done, exit the polling mode */
|
|
+ if(( (work_done < work_to_do)) || !netif_running(netdev)) {
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ napi_complete(&ei_local->napi);
|
|
+#else
|
|
+ netif_rx_complete(netdev);
|
|
+#endif
|
|
+ atomic_dec_and_test(&ei_local->irq_sem);
|
|
+
|
|
+ sysRegWrite(RAETH_FE_INT_STATUS, RAETH_FE_INT_ALL); // ack all fe interrupts
|
|
+ reg_int_mask=sysRegRead(RAETH_FE_INT_ENABLE);
|
|
+
|
|
+#ifdef DELAY_INT
|
|
+ sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask |RAETH_FE_INT_DLY_INIT); // init delay interrupt only
|
|
+#else
|
|
+ sysRegWrite(RAETH_FE_INT_ENABLE,reg_int_mask | RAETH_FE_INT_SETTING);
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RAETH_QDMA
|
|
+ sysRegWrite(QFE_INT_STATUS, QFE_INT_ALL);
|
|
+ reg_int_mask=sysRegRead(QFE_INT_ENABLE);
|
|
+#ifdef DELAY_INT
|
|
+ sysRegWrite(QFE_INT_ENABLE, reg_int_mask |QFE_INT_DLY_INIT); // init delay interrupt only
|
|
+#else
|
|
+ sysRegWrite(QFE_INT_ENABLE,reg_int_mask | (RX_DONE_INT0 | RX_DONE_INT1 | RLS_DONE_INT));
|
|
+#endif
|
|
+#endif // CONFIG_RAETH_QDMA //
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
+
|
|
+void gsw_delay_setting(void)
|
|
+{
|
|
+#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN) || defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
|
|
+ END_DEVICE *ei_local = netdev_priv(dev_raether);
|
|
+ int reg_int_val = 0;
|
|
+ int link_speed = 0;
|
|
+
|
|
+ reg_int_val = sysRegRead(FE_INT_STATUS2);
|
|
+#if defined (CONFIG_RALINK_MT7621)
|
|
+ if( reg_int_val & BIT(25))
|
|
+ {
|
|
+ if(sysRegRead(RALINK_ETH_SW_BASE+0x0208) & 0x1) // link up
|
|
+ {
|
|
+ link_speed = (sysRegRead(RALINK_ETH_SW_BASE+0x0208)>>2 & 0x3);
|
|
+ if(link_speed == 1)
|
|
+ {
|
|
+ // delay setting for 100M
|
|
+ if((sysRegRead(0xbe00000c)&0xFFFF)==0x0101)
|
|
+ mii_mgr_write(31, 0x7b00, 8);
|
|
+ printk("MT7621 GE2 link rate to 100M\n");
|
|
+ } else
|
|
+ {
|
|
+ //delay setting for 10/1000M
|
|
+ if((sysRegRead(0xbe00000c)&0xFFFF)==0x0101)
|
|
+ mii_mgr_write(31, 0x7b00, 0x102);
|
|
+ printk("MT7621 GE2 link rate to 10M/1G\n");
|
|
+ }
|
|
+ schedule_work(&ei_local->kill_sig_wq);
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+ sysRegWrite(FE_INT_STATUS2, reg_int_val);
|
|
+#endif
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ei_interrupt - handle controler interrupt
|
|
+ *
|
|
+ * This routine is called at interrupt level in response to an interrupt from
|
|
+ * the controller.
|
|
+ *
|
|
+ * RETURNS: N/A.
|
|
+ */
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
|
|
+static irqreturn_t ei_interrupt(int irq, void *dev_id)
|
|
+#else
|
|
+static irqreturn_t ei_interrupt(int irq, void *dev_id, struct pt_regs * regs)
|
|
+#endif
|
|
+{
|
|
+#if !defined(CONFIG_RAETH_NAPI)
|
|
+ unsigned long reg_int_val;
|
|
+ unsigned long reg_int_mask=0;
|
|
+ unsigned int recv = 0;
|
|
+ unsigned int transmit __maybe_unused = 0;
|
|
+ unsigned long flags;
|
|
+#endif
|
|
+
|
|
+ struct net_device *dev = (struct net_device *) dev_id;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+
|
|
+ //Qwert
|
|
+ /*
|
|
+ unsigned long old,cur,dcycle;
|
|
+ static int cnt = 0;
|
|
+ static unsigned long max_dcycle = 0,tcycle = 0;
|
|
+ old = read_c0_count();
|
|
+ */
|
|
+ if (dev == NULL)
|
|
+ {
|
|
+ printk (KERN_ERR "net_interrupt(): irq %x for unknown device.\n", IRQ_ENET0);
|
|
+ return IRQ_NONE;
|
|
+ }
|
|
+
|
|
+#ifdef CONFIG_RAETH_NAPI
|
|
+ gsw_delay_setting();
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ if(napi_schedule_prep(&ei_local->napi)) {
|
|
+#else
|
|
+ if(netif_rx_schedule_prep(dev)) {
|
|
+#endif
|
|
+ atomic_inc(&ei_local->irq_sem);
|
|
+ sysRegWrite(RAETH_FE_INT_ENABLE, 0);
|
|
+#ifdef CONFIG_RAETH_QDMA
|
|
+ sysRegWrite(QFE_INT_ENABLE, 0);
|
|
+#endif
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ __napi_schedule(&ei_local->napi);
|
|
+#else
|
|
+ __netif_rx_schedule(dev);
|
|
+#endif
|
|
+ }
|
|
+#else
|
|
+
|
|
+ spin_lock_irqsave(&(ei_local->page_lock), flags);
|
|
+ reg_int_val = sysRegRead(RAETH_FE_INT_STATUS);
|
|
+#ifdef CONFIG_RAETH_QDMA
|
|
+ reg_int_val |= sysRegRead(QFE_INT_STATUS);
|
|
+#endif
|
|
+#if defined (DELAY_INT)
|
|
+ if((reg_int_val & RX_DLY_INT))
|
|
+ recv = 1;
|
|
+
|
|
+ if (reg_int_val & RAETH_TX_DLY_INT)
|
|
+ transmit = 1;
|
|
+
|
|
+#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+ raeth_pdma_lro_dly_int_dvt();
|
|
+#endif /* CONFIG_RAETH_PDMA_DVT */
|
|
+
|
|
+#else
|
|
+ if((reg_int_val & (RX_DONE_INT0 | RX_DONE_INT3 | RX_DONE_INT2 | RX_DONE_INT1)))
|
|
+ recv = 1;
|
|
+
|
|
+#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+#if defined(CONFIG_ARCH_MT7623)
|
|
+ if((reg_int_val & RX_DONE_INT3))
|
|
+ recv = 3;
|
|
+ if((reg_int_val & RX_DONE_INT2))
|
|
+ recv = 2;
|
|
+#endif /* CONFIG_ARCH_MT7623 */
|
|
+ if((reg_int_val & RX_DONE_INT1))
|
|
+ recv = 1;
|
|
+#endif
|
|
+
|
|
+ if (reg_int_val & RAETH_TX_DONE_INT0)
|
|
+ transmit |= RAETH_TX_DONE_INT0;
|
|
+#if defined (CONFIG_RAETH_QOS)
|
|
+ if (reg_int_val & TX_DONE_INT1)
|
|
+ transmit |= TX_DONE_INT1;
|
|
+ if (reg_int_val & TX_DONE_INT2)
|
|
+ transmit |= TX_DONE_INT2;
|
|
+ if (reg_int_val & TX_DONE_INT3)
|
|
+ transmit |= TX_DONE_INT3;
|
|
+#endif //CONFIG_RAETH_QOS
|
|
+
|
|
+#endif //DELAY_INT
|
|
+
|
|
+#if defined (DELAY_INT)
|
|
+ sysRegWrite(RAETH_FE_INT_STATUS, RAETH_FE_INT_DLY_INIT);
|
|
+#else
|
|
+ sysRegWrite(RAETH_FE_INT_STATUS, RAETH_FE_INT_ALL);
|
|
+#endif
|
|
+#ifdef CONFIG_RAETH_QDMA
|
|
+#if defined (DELAY_INT)
|
|
+ sysRegWrite(QFE_INT_STATUS, QFE_INT_DLY_INIT);
|
|
+#else
|
|
+ sysRegWrite(QFE_INT_STATUS, QFE_INT_ALL);
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ if( reg_int_val & (ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1) ){
|
|
+ /* mask interrupts of rx flow to hw lor rings */
|
|
+ reg_int_mask = sysRegRead(RAETH_FE_INT_ENABLE);
|
|
+ sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask & ~(ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1));
|
|
+ schedule_work(&ei_local->hw_lro_wq);
|
|
+ }
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0)
|
|
+ if(transmit)
|
|
+ ei_xmit_housekeeping(0);
|
|
+#else
|
|
+ ei_xmit_housekeeping(0);
|
|
+#endif
|
|
+
|
|
+ if (((recv == 1) || (pending_recv ==1)) && (tx_ring_full==0))
|
|
+ {
|
|
+ reg_int_mask = sysRegRead(RAETH_FE_INT_ENABLE);
|
|
+#if defined (DELAY_INT)
|
|
+ sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask & ~(RX_DLY_INT));
|
|
+#else
|
|
+ sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask & ~(RX_DONE_INT0 | RX_DONE_INT1 | RX_DONE_INT2 | RX_DONE_INT3));
|
|
+#endif //DELAY_INT
|
|
+#ifdef CONFIG_RAETH_QDMA
|
|
+ reg_int_mask = sysRegRead(QFE_INT_ENABLE);
|
|
+#if defined (DELAY_INT)
|
|
+ sysRegWrite(QFE_INT_ENABLE, reg_int_mask & ~(RX_DLY_INT));
|
|
+#else
|
|
+ sysRegWrite(QFE_INT_ENABLE, reg_int_mask & ~(RX_DONE_INT0 | RX_DONE_INT1 | RX_DONE_INT2 | RX_DONE_INT3));
|
|
+#endif //DELAY_INT
|
|
+#endif
|
|
+
|
|
+ pending_recv=0;
|
|
+#ifdef WORKQUEUE_BH
|
|
+ schedule_work(&ei_local->rx_wq);
|
|
+#else
|
|
+#if defined (TASKLET_WORKQUEUE_SW)
|
|
+ if (working_schedule == 1)
|
|
+ schedule_work(&ei_local->rx_wq);
|
|
+ else
|
|
+#endif
|
|
+ tasklet_hi_schedule(&ei_local->rx_tasklet);
|
|
+#endif // WORKQUEUE_BH //
|
|
+ }
|
|
+ else if (recv == 1 && tx_ring_full==1)
|
|
+ {
|
|
+ pending_recv=1;
|
|
+ }
|
|
+ else if((recv == 0) && (transmit == 0))
|
|
+ {
|
|
+ gsw_delay_setting();
|
|
+ }
|
|
+ spin_unlock_irqrestore(&(ei_local->page_lock), flags);
|
|
+#endif
|
|
+
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A) || \
|
|
+ defined (CONFIG_RALINK_MT7620)|| defined (CONFIG_RALINK_MT7621)
|
|
+static void esw_link_status_changed(int port_no, void *dev_id)
|
|
+{
|
|
+ unsigned int reg_val;
|
|
+ struct net_device *dev = (struct net_device *) dev_id;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined (CONFIG_RALINK_MT7620)
|
|
+ reg_val = *((volatile u32 *)(RALINK_ETH_SW_BASE+ 0x3008 + (port_no*0x100)));
|
|
+#elif defined (CONFIG_RALINK_MT7621)
|
|
+ mii_mgr_read(31, (0x3008 + (port_no*0x100)), ®_val);
|
|
+#endif
|
|
+ if(reg_val & 0x1) {
|
|
+ printk("ESW: Link Status Changed - Port%d Link UP\n", port_no);
|
|
+#if defined (CONFIG_RALINK_MT7621) && defined (CONFIG_RAETH_8023AZ_EEE)
|
|
+ mii_mgr_write(port_no, 31, 0x52b5);
|
|
+ mii_mgr_write(port_no, 16, 0xb780);
|
|
+ mii_mgr_write(port_no, 17, 0x00e0);
|
|
+ mii_mgr_write(port_no, 16, 0x9780);
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_WAN_AT_P0)
|
|
+ if(port_no==0) {
|
|
+ schedule_work(&ei_local->kill_sig_wq);
|
|
+ }
|
|
+#elif defined (CONFIG_WAN_AT_P4)
|
|
+ if(port_no==4) {
|
|
+ schedule_work(&ei_local->kill_sig_wq);
|
|
+ }
|
|
+#endif
|
|
+ } else {
|
|
+ printk("ESW: Link Status Changed - Port%d Link Down\n", port_no);
|
|
+#if defined (CONFIG_RALINK_MT7621) && defined (CONFIG_RAETH_8023AZ_EEE)
|
|
+ mii_mgr_write(port_no, 31, 0x52b5);
|
|
+ mii_mgr_write(port_no, 16, 0xb780);
|
|
+ mii_mgr_write(port_no, 17, 0x0000);
|
|
+ mii_mgr_write(port_no, 16, 0x9780);
|
|
+#endif
|
|
+
|
|
+ }
|
|
+}
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RT_3052_ESW) && ! defined(CONFIG_RALINK_MT7621) && ! defined(CONFIG_ARCH_MT7623)
|
|
+static irqreturn_t esw_interrupt(int irq, void *dev_id)
|
|
+{
|
|
+ unsigned long flags;
|
|
+ unsigned long reg_int_val;
|
|
+#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined(CONFIG_RALINK_MT7620)
|
|
+ unsigned long acl_int_val;
|
|
+ unsigned long mib_int_val;
|
|
+#else
|
|
+ static unsigned long stat;
|
|
+ unsigned long stat_curr;
|
|
+#endif
|
|
+
|
|
+ struct net_device *dev = (struct net_device *) dev_id;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+
|
|
+
|
|
+ spin_lock_irqsave(&(ei_local->page_lock), flags);
|
|
+ reg_int_val = (*((volatile u32 *)(ESW_ISR))); //Interrupt Status Register
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined(CONFIG_RALINK_MT7620)
|
|
+ if (reg_int_val & P5_LINK_CH) {
|
|
+ esw_link_status_changed(5, dev_id);
|
|
+ }
|
|
+ if (reg_int_val & P4_LINK_CH) {
|
|
+ esw_link_status_changed(4, dev_id);
|
|
+ }
|
|
+ if (reg_int_val & P3_LINK_CH) {
|
|
+ esw_link_status_changed(3, dev_id);
|
|
+ }
|
|
+ if (reg_int_val & P2_LINK_CH) {
|
|
+ esw_link_status_changed(2, dev_id);
|
|
+ }
|
|
+ if (reg_int_val & P1_LINK_CH) {
|
|
+ esw_link_status_changed(1, dev_id);
|
|
+ }
|
|
+ if (reg_int_val & P0_LINK_CH) {
|
|
+ esw_link_status_changed(0, dev_id);
|
|
+ }
|
|
+ if (reg_int_val & ACL_INT) {
|
|
+ acl_int_val = sysRegRead(ESW_AISR);
|
|
+ sysRegWrite(ESW_AISR, acl_int_val);
|
|
+ }
|
|
+ if (reg_int_val & MIB_INT) {
|
|
+
|
|
+ mib_int_val = sysRegRead(ESW_P0_IntSn);
|
|
+ if(mib_int_val){
|
|
+ sysRegWrite(ESW_P0_IntSn, mib_int_val);
|
|
+ if(mib_int_val & RX_GOOD_CNT)
|
|
+ p0_rx_good_cnt ++;
|
|
+ if(mib_int_val & TX_GOOD_CNT)
|
|
+ p0_tx_good_cnt ++;
|
|
+ if(mib_int_val & RX_GOCT_CNT)
|
|
+ p0_rx_byte_cnt ++;
|
|
+ if(mib_int_val & TX_GOCT_CNT)
|
|
+ p0_tx_byte_cnt ++;
|
|
+ }
|
|
+
|
|
+ mib_int_val = sysRegRead(ESW_P1_IntSn);
|
|
+ if(mib_int_val){
|
|
+ sysRegWrite(ESW_P1_IntSn, mib_int_val);
|
|
+ if(mib_int_val & RX_GOOD_CNT)
|
|
+ p1_rx_good_cnt ++;
|
|
+ if(mib_int_val & TX_GOOD_CNT)
|
|
+ p1_tx_good_cnt ++;
|
|
+ if(mib_int_val & RX_GOCT_CNT)
|
|
+ p1_rx_byte_cnt ++;
|
|
+ if(mib_int_val & TX_GOCT_CNT)
|
|
+ p1_tx_byte_cnt ++;
|
|
+ }
|
|
+
|
|
+ mib_int_val = sysRegRead(ESW_P2_IntSn);
|
|
+ if(mib_int_val){
|
|
+ sysRegWrite(ESW_P2_IntSn, mib_int_val);
|
|
+ if(mib_int_val & RX_GOOD_CNT)
|
|
+ p2_rx_good_cnt ++;
|
|
+ if(mib_int_val & TX_GOOD_CNT)
|
|
+ p2_tx_good_cnt ++;
|
|
+ if(mib_int_val & RX_GOCT_CNT)
|
|
+ p2_rx_byte_cnt ++;
|
|
+ if(mib_int_val & TX_GOCT_CNT)
|
|
+ p2_tx_byte_cnt ++;
|
|
+ }
|
|
+
|
|
+
|
|
+ mib_int_val = sysRegRead(ESW_P3_IntSn);
|
|
+ if(mib_int_val){
|
|
+ sysRegWrite(ESW_P3_IntSn, mib_int_val);
|
|
+ if(mib_int_val & RX_GOOD_CNT)
|
|
+ p3_rx_good_cnt ++;
|
|
+ if(mib_int_val & TX_GOOD_CNT)
|
|
+ p3_tx_good_cnt ++;
|
|
+ if(mib_int_val & RX_GOCT_CNT)
|
|
+ p3_rx_byte_cnt ++;
|
|
+ if(mib_int_val & TX_GOCT_CNT)
|
|
+ p3_tx_byte_cnt ++;
|
|
+ }
|
|
+
|
|
+ mib_int_val = sysRegRead(ESW_P4_IntSn);
|
|
+ if(mib_int_val){
|
|
+ sysRegWrite(ESW_P4_IntSn, mib_int_val);
|
|
+ if(mib_int_val & RX_GOOD_CNT)
|
|
+ p4_rx_good_cnt ++;
|
|
+ if(mib_int_val & TX_GOOD_CNT)
|
|
+ p4_tx_good_cnt ++;
|
|
+ if(mib_int_val & RX_GOCT_CNT)
|
|
+ p4_rx_byte_cnt ++;
|
|
+ if(mib_int_val & TX_GOCT_CNT)
|
|
+ p4_tx_byte_cnt ++;
|
|
+ }
|
|
+
|
|
+ mib_int_val = sysRegRead(ESW_P5_IntSn);
|
|
+ if(mib_int_val){
|
|
+ sysRegWrite(ESW_P5_IntSn, mib_int_val);
|
|
+ if(mib_int_val & RX_GOOD_CNT)
|
|
+ p5_rx_good_cnt ++;
|
|
+ if(mib_int_val & TX_GOOD_CNT)
|
|
+ p5_tx_good_cnt ++;
|
|
+ if(mib_int_val & RX_GOCT_CNT)
|
|
+ p5_rx_byte_cnt ++;
|
|
+ if(mib_int_val & TX_GOCT_CNT)
|
|
+ p5_tx_byte_cnt ++;
|
|
+ }
|
|
+
|
|
+ mib_int_val = sysRegRead(ESW_P6_IntSn);
|
|
+ if(mib_int_val){
|
|
+ sysRegWrite(ESW_P6_IntSn, mib_int_val);
|
|
+ if(mib_int_val & RX_GOOD_CNT)
|
|
+ p6_rx_good_cnt ++;
|
|
+ if(mib_int_val & TX_GOOD_CNT)
|
|
+ p6_tx_good_cnt ++;
|
|
+ if(mib_int_val & RX_GOCT_CNT)
|
|
+ p6_rx_byte_cnt ++;
|
|
+ if(mib_int_val & TX_GOCT_CNT)
|
|
+ p6_tx_byte_cnt ++;
|
|
+ }
|
|
+#if defined (CONFIG_RALINK_MT7620)
|
|
+ mib_int_val = sysRegRead(ESW_P7_IntSn);
|
|
+ if(mib_int_val){
|
|
+ sysRegWrite(ESW_P7_IntSn, mib_int_val);
|
|
+ if(mib_int_val & RX_GOOD_CNT)
|
|
+ p7_rx_good_cnt ++;
|
|
+ if(mib_int_val & TX_GOOD_CNT)
|
|
+ p7_tx_good_cnt ++;
|
|
+ if(mib_int_val & RX_GOCT_CNT)
|
|
+ p7_rx_byte_cnt ++;
|
|
+ if(mib_int_val & TX_GOCT_CNT)
|
|
+ p7_tx_byte_cnt ++;
|
|
+
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+
|
|
+#else // not RT6855
|
|
+ if (reg_int_val & PORT_ST_CHG) {
|
|
+ printk("RT305x_ESW: Link Status Changed\n");
|
|
+
|
|
+ stat_curr = *((volatile u32 *)(RALINK_ETH_SW_BASE+0x80));
|
|
+#ifdef CONFIG_WAN_AT_P0
|
|
+ //link down --> link up : send signal to user application
|
|
+ //link up --> link down : ignore
|
|
+ if ((stat & (1<<25)) || !(stat_curr & (1<<25)))
|
|
+#else
|
|
+ if ((stat & (1<<29)) || !(stat_curr & (1<<29)))
|
|
+#endif
|
|
+ goto out;
|
|
+
|
|
+ schedule_work(&ei_local->kill_sig_wq);
|
|
+out:
|
|
+ stat = stat_curr;
|
|
+ }
|
|
+
|
|
+#endif // defined(CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A)//
|
|
+
|
|
+ sysRegWrite(ESW_ISR, reg_int_val);
|
|
+
|
|
+ spin_unlock_irqrestore(&(ei_local->page_lock), flags);
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+#elif defined (CONFIG_RT_3052_ESW) && defined(CONFIG_RALINK_MT7621)
|
|
+
|
|
+static irqreturn_t esw_interrupt(int irq, void *dev_id)
|
|
+{
|
|
+ unsigned long flags;
|
|
+ unsigned int reg_int_val;
|
|
+ struct net_device *dev = (struct net_device *) dev_id;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+
|
|
+ spin_lock_irqsave(&(ei_local->page_lock), flags);
|
|
+ mii_mgr_read(31, 0x700c, ®_int_val);
|
|
+
|
|
+ if (reg_int_val & P4_LINK_CH) {
|
|
+ esw_link_status_changed(4, dev_id);
|
|
+ }
|
|
+
|
|
+ if (reg_int_val & P3_LINK_CH) {
|
|
+ esw_link_status_changed(3, dev_id);
|
|
+ }
|
|
+ if (reg_int_val & P2_LINK_CH) {
|
|
+ esw_link_status_changed(2, dev_id);
|
|
+ }
|
|
+ if (reg_int_val & P1_LINK_CH) {
|
|
+ esw_link_status_changed(1, dev_id);
|
|
+ }
|
|
+ if (reg_int_val & P0_LINK_CH) {
|
|
+ esw_link_status_changed(0, dev_id);
|
|
+ }
|
|
+
|
|
+ mii_mgr_write(31, 0x700c, 0x1f); //ack switch link change
|
|
+
|
|
+ spin_unlock_irqrestore(&(ei_local->page_lock), flags);
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
+
|
|
+static int ei_start_xmit_fake(struct sk_buff* skb, struct net_device *dev)
|
|
+{
|
|
+ return ei_start_xmit(skb, dev, 1);
|
|
+}
|
|
+
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350)
|
|
+void dump_phy_reg(int port_no, int from, int to, int is_local)
|
|
+{
|
|
+ u32 i=0;
|
|
+ u32 temp=0;
|
|
+
|
|
+ if(is_local==0) {
|
|
+ printk("Global Register\n");
|
|
+ printk("===============");
|
|
+ mii_mgr_write(0, 31, 0); //select global register
|
|
+ for(i=from;i<=to;i++) {
|
|
+ if(i%8==0) {
|
|
+ printk("\n");
|
|
+ }
|
|
+ mii_mgr_read(port_no,i, &temp);
|
|
+ printk("%02d: %04X ",i, temp);
|
|
+ }
|
|
+ } else {
|
|
+ mii_mgr_write(0, 31, 0x8000); //select local register
|
|
+ printk("\n\nLocal Register Port %d\n",port_no);
|
|
+ printk("===============");
|
|
+ for(i=from;i<=to;i++) {
|
|
+ if(i%8==0) {
|
|
+ printk("\n");
|
|
+ }
|
|
+ mii_mgr_read(port_no,i, &temp);
|
|
+ printk("%02d: %04X ",i, temp);
|
|
+ }
|
|
+ }
|
|
+ printk("\n");
|
|
+}
|
|
+#else
|
|
+void dump_phy_reg(int port_no, int from, int to, int is_local, int page_no)
|
|
+{
|
|
+
|
|
+ u32 i=0;
|
|
+ u32 temp=0;
|
|
+ u32 r31=0;
|
|
+
|
|
+
|
|
+ if(is_local==0) {
|
|
+
|
|
+ printk("\n\nGlobal Register Page %d\n",page_no);
|
|
+ printk("===============");
|
|
+ r31 |= 0 << 15; //global
|
|
+ r31 |= ((page_no&0x7) << 12); //page no
|
|
+ mii_mgr_write(port_no, 31, r31); //select global page x
|
|
+ for(i=16;i<32;i++) {
|
|
+ if(i%8==0) {
|
|
+ printk("\n");
|
|
+ }
|
|
+ mii_mgr_read(port_no,i, &temp);
|
|
+ printk("%02d: %04X ",i, temp);
|
|
+ }
|
|
+ }else {
|
|
+ printk("\n\nLocal Register Port %d Page %d\n",port_no, page_no);
|
|
+ printk("===============");
|
|
+ r31 |= 1 << 15; //local
|
|
+ r31 |= ((page_no&0x7) << 12); //page no
|
|
+ mii_mgr_write(port_no, 31, r31); //select local page x
|
|
+ for(i=16;i<32;i++) {
|
|
+ if(i%8==0) {
|
|
+ printk("\n");
|
|
+ }
|
|
+ mii_mgr_read(port_no,i, &temp);
|
|
+ printk("%02d: %04X ",i, temp);
|
|
+ }
|
|
+ }
|
|
+ printk("\n");
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
+int ei_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|
+{
|
|
+#if defined(CONFIG_RT_3052_ESW) || defined(CONFIG_RAETH_QDMA)
|
|
+ esw_reg reg;
|
|
+#endif
|
|
+#if defined(CONFIG_RALINK_RT3352) || defined(CONFIG_RALINK_RT5350) || \
|
|
+ defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined(CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || \
|
|
+ defined (CONFIG_RALINK_MT7628) || defined (CONFIG_ARCH_MT7623)
|
|
+ esw_rate ratelimit;
|
|
+#endif
|
|
+#if defined(CONFIG_RT_3052_ESW)
|
|
+ unsigned int offset = 0;
|
|
+ unsigned int value = 0;
|
|
+#endif
|
|
+
|
|
+ int ret = 0;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+ ra_mii_ioctl_data mii;
|
|
+ spin_lock_irq(&ei_local->page_lock);
|
|
+
|
|
+ switch (cmd) {
|
|
+#if defined(CONFIG_RAETH_QDMA)
|
|
+#define _HQOS_REG(x) (*((volatile u32 *)(RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + x)))
|
|
+ case RAETH_QDMA_REG_READ:
|
|
+ copy_from_user(®, ifr->ifr_data, sizeof(reg));
|
|
+ if (reg.off > REG_HQOS_MAX) {
|
|
+ ret = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+ reg.val = _HQOS_REG(reg.off);
|
|
+ //printk("read reg off:%x val:%x\n", reg.off, reg.val);
|
|
+ copy_to_user(ifr->ifr_data, ®, sizeof(reg));
|
|
+ break;
|
|
+ case RAETH_QDMA_REG_WRITE:
|
|
+ copy_from_user(®, ifr->ifr_data, sizeof(reg));
|
|
+ if (reg.off > REG_HQOS_MAX) {
|
|
+ ret = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+ _HQOS_REG(reg.off) = reg.val;
|
|
+ //printk("write reg off:%x val:%x\n", reg.off, reg.val);
|
|
+ break;
|
|
+#if 0
|
|
+ case RAETH_QDMA_READ_CPU_CLK:
|
|
+ copy_from_user(®, ifr->ifr_data, sizeof(reg));
|
|
+ reg.val = get_surfboard_sysclk();
|
|
+ //printk("read reg off:%x val:%x\n", reg.off, reg.val);
|
|
+ copy_to_user(ifr->ifr_data, ®, sizeof(reg));
|
|
+ break;
|
|
+#endif
|
|
+ case RAETH_QDMA_QUEUE_MAPPING:
|
|
+ copy_from_user(®, ifr->ifr_data, sizeof(reg));
|
|
+ if((reg.off&0x100) == 0x100){
|
|
+ lan_wan_separate = 1;
|
|
+ reg.off &= 0xff;
|
|
+ }else{
|
|
+ lan_wan_separate = 0;
|
|
+ }
|
|
+ M2Q_table[reg.off] = reg.val;
|
|
+ break;
|
|
+#if defined(CONFIG_HW_SFQ)
|
|
+ case RAETH_QDMA_SFQ_WEB_ENABLE:
|
|
+ copy_from_user(®, ifr->ifr_data, sizeof(reg));
|
|
+ if((reg.val) == 0x1){
|
|
+ web_sfq_enable = 1;
|
|
+
|
|
+ }else{
|
|
+ web_sfq_enable = 0;
|
|
+ }
|
|
+ break;
|
|
+#endif
|
|
+
|
|
+
|
|
+#endif
|
|
+ case RAETH_MII_READ:
|
|
+ copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
|
|
+ mii_mgr_read(mii.phy_id, mii.reg_num, &mii.val_out);
|
|
+ //printk("phy %d, reg %d, val 0x%x\n", mii.phy_id, mii.reg_num, mii.val_out);
|
|
+ copy_to_user(ifr->ifr_data, &mii, sizeof(mii));
|
|
+ break;
|
|
+
|
|
+ case RAETH_MII_WRITE:
|
|
+ copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
|
|
+ //printk("phy %d, reg %d, val 0x%x\n", mii.phy_id, mii.reg_num, mii.val_in);
|
|
+ mii_mgr_write(mii.phy_id, mii.reg_num, mii.val_in);
|
|
+ break;
|
|
+#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_ARCH_MT7623)
|
|
+ case RAETH_MII_READ_CL45:
|
|
+ copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
|
|
+ //mii_mgr_cl45_set_address(mii.port_num, mii.dev_addr, mii.reg_addr);
|
|
+ mii_mgr_read_cl45(mii.port_num, mii.dev_addr, mii.reg_addr, &mii.val_out);
|
|
+ copy_to_user(ifr->ifr_data, &mii, sizeof(mii));
|
|
+ break;
|
|
+ case RAETH_MII_WRITE_CL45:
|
|
+ copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
|
|
+ //mii_mgr_cl45_set_address(mii.port_num, mii.dev_addr, mii.reg_addr);
|
|
+ mii_mgr_write_cl45(mii.port_num, mii.dev_addr, mii.reg_addr, mii.val_in);
|
|
+ break;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RT_3052_ESW)
|
|
+#define _ESW_REG(x) (*((volatile u32 *)(RALINK_ETH_SW_BASE + x)))
|
|
+ case RAETH_ESW_REG_READ:
|
|
+ copy_from_user(®, ifr->ifr_data, sizeof(reg));
|
|
+ if (reg.off > REG_ESW_MAX) {
|
|
+ ret = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+ reg.val = _ESW_REG(reg.off);
|
|
+ //printk("read reg off:%x val:%x\n", reg.off, reg.val);
|
|
+ copy_to_user(ifr->ifr_data, ®, sizeof(reg));
|
|
+ break;
|
|
+ case RAETH_ESW_REG_WRITE:
|
|
+ copy_from_user(®, ifr->ifr_data, sizeof(reg));
|
|
+ if (reg.off > REG_ESW_MAX) {
|
|
+ ret = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+ _ESW_REG(reg.off) = reg.val;
|
|
+ //printk("write reg off:%x val:%x\n", reg.off, reg.val);
|
|
+ break;
|
|
+ case RAETH_ESW_PHY_DUMP:
|
|
+ copy_from_user(®, ifr->ifr_data, sizeof(reg));
|
|
+#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350)
|
|
+ if (reg.val ==32 ) {//dump all phy register
|
|
+ /* Global Register 0~31
|
|
+ * Local Register 0~31
|
|
+ */
|
|
+ dump_phy_reg(0, 0, 31, 0); //dump global register
|
|
+ for(offset=0;offset<5;offset++) {
|
|
+ dump_phy_reg(offset, 0, 31, 1); //dump local register
|
|
+ }
|
|
+ } else {
|
|
+ dump_phy_reg(reg.val, 0, 31, 0); //dump global register
|
|
+ dump_phy_reg(reg.val, 0, 31, 1); //dump local register
|
|
+ }
|
|
+#else
|
|
+ /* SPEC defined Register 0~15
|
|
+ * Global Register 16~31 for each page
|
|
+ * Local Register 16~31 for each page
|
|
+ */
|
|
+ printk("SPEC defined Register");
|
|
+ if (reg.val ==32 ) {//dump all phy register
|
|
+ int i = 0;
|
|
+ for(i=0; i<5; i++){
|
|
+ printk("\n[Port %d]===============",i);
|
|
+ for(offset=0;offset<16;offset++) {
|
|
+ if(offset%8==0) {
|
|
+ printk("\n");
|
|
+ }
|
|
+ mii_mgr_read(i,offset, &value);
|
|
+ printk("%02d: %04X ",offset, value);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ else{
|
|
+ printk("\n[Port %d]===============",reg.val);
|
|
+ for(offset=0;offset<16;offset++) {
|
|
+ if(offset%8==0) {
|
|
+ printk("\n");
|
|
+ }
|
|
+ mii_mgr_read(reg.val,offset, &value);
|
|
+ printk("%02d: %04X ",offset, value);
|
|
+ }
|
|
+ }
|
|
+
|
|
+#if defined (CONFIG_RALINK_MT7628)
|
|
+ for(offset=0;offset<7;offset++) { //global register page 0~6
|
|
+#else
|
|
+ for(offset=0;offset<5;offset++) { //global register page 0~4
|
|
+#endif
|
|
+ if(reg.val == 32) //dump all phy register
|
|
+ dump_phy_reg(0, 16, 31, 0, offset);
|
|
+ else
|
|
+ dump_phy_reg(reg.val, 16, 31, 0, offset);
|
|
+ }
|
|
+
|
|
+ if (reg.val == 32) {//dump all phy register
|
|
+#if !defined (CONFIG_RAETH_HAS_PORT4)
|
|
+ for(offset=0;offset<5;offset++) { //local register port 0-port4
|
|
+#else
|
|
+ for(offset=0;offset<4;offset++) { //local register port 0-port3
|
|
+#endif
|
|
+ dump_phy_reg(offset, 16, 31, 1, 0); //dump local page 0
|
|
+ dump_phy_reg(offset, 16, 31, 1, 1); //dump local page 1
|
|
+ dump_phy_reg(offset, 16, 31, 1, 2); //dump local page 2
|
|
+ dump_phy_reg(offset, 16, 31, 1, 3); //dump local page 3
|
|
+ }
|
|
+ }else {
|
|
+ dump_phy_reg(reg.val, 16, 31, 1, 0); //dump local page 0
|
|
+ dump_phy_reg(reg.val, 16, 31, 1, 1); //dump local page 1
|
|
+ dump_phy_reg(reg.val, 16, 31, 1, 2); //dump local page 2
|
|
+ dump_phy_reg(reg.val, 16, 31, 1, 3); //dump local page 3
|
|
+ }
|
|
+#endif
|
|
+ break;
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+#define _ESW_REG(x) (*((volatile u32 *)(RALINK_ETH_SW_BASE + x)))
|
|
+ case RAETH_ESW_INGRESS_RATE:
|
|
+ copy_from_user(&ratelimit, ifr->ifr_data, sizeof(ratelimit));
|
|
+ offset = 0x11c + (4 * (ratelimit.port / 2));
|
|
+ value = _ESW_REG(offset);
|
|
+
|
|
+ if((ratelimit.port % 2) == 0)
|
|
+ {
|
|
+ value &= 0xffff0000;
|
|
+ if(ratelimit.on_off == 1)
|
|
+ {
|
|
+ value |= (ratelimit.on_off << 14);
|
|
+ value |= (0x07 << 10);
|
|
+ value |= ratelimit.bw;
|
|
+ }
|
|
+ }
|
|
+ else if((ratelimit.port % 2) == 1)
|
|
+ {
|
|
+ value &= 0x0000ffff;
|
|
+ if(ratelimit.on_off == 1)
|
|
+ {
|
|
+ value |= (ratelimit.on_off << 30);
|
|
+ value |= (0x07 << 26);
|
|
+ value |= (ratelimit.bw << 16);
|
|
+ }
|
|
+ }
|
|
+ printk("offset = 0x%4x value=0x%x\n\r", offset, value);
|
|
+
|
|
+ _ESW_REG(offset) = value;
|
|
+ break;
|
|
+
|
|
+ case RAETH_ESW_EGRESS_RATE:
|
|
+ copy_from_user(&ratelimit, ifr->ifr_data, sizeof(ratelimit));
|
|
+ offset = 0x140 + (4 * (ratelimit.port / 2));
|
|
+ value = _ESW_REG(offset);
|
|
+
|
|
+ if((ratelimit.port % 2) == 0)
|
|
+ {
|
|
+ value &= 0xffff0000;
|
|
+ if(ratelimit.on_off == 1)
|
|
+ {
|
|
+ value |= (ratelimit.on_off << 12);
|
|
+ value |= (0x03 << 10);
|
|
+ value |= ratelimit.bw;
|
|
+ }
|
|
+ }
|
|
+ else if((ratelimit.port % 2) == 1)
|
|
+ {
|
|
+ value &= 0x0000ffff;
|
|
+ if(ratelimit.on_off == 1)
|
|
+ {
|
|
+ value |= (ratelimit.on_off << 28);
|
|
+ value |= (0x03 << 26);
|
|
+ value |= (ratelimit.bw << 16);
|
|
+ }
|
|
+ }
|
|
+ printk("offset = 0x%4x value=0x%x\n\r", offset, value);
|
|
+ _ESW_REG(offset) = value;
|
|
+ break;
|
|
+#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \
|
|
+ defined(CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+#define _ESW_REG(x) (*((volatile u32 *)(RALINK_ETH_SW_BASE + x)))
|
|
+ case RAETH_ESW_INGRESS_RATE:
|
|
+ copy_from_user(&ratelimit, ifr->ifr_data, sizeof(ratelimit));
|
|
+#if defined(CONFIG_RALINK_RT6855A) || defined(CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+ offset = 0x1800 + (0x100 * ratelimit.port);
|
|
+#else
|
|
+ offset = 0x1080 + (0x100 * ratelimit.port);
|
|
+#endif
|
|
+ value = _ESW_REG(offset);
|
|
+
|
|
+ value &= 0xffff0000;
|
|
+ if(ratelimit.on_off == 1)
|
|
+ {
|
|
+ value |= (ratelimit.on_off << 15);
|
|
+ if (ratelimit.bw < 100)
|
|
+ {
|
|
+ value |= (0x0 << 8);
|
|
+ value |= ratelimit.bw;
|
|
+ }else if(ratelimit.bw < 1000)
|
|
+ {
|
|
+ value |= (0x1 << 8);
|
|
+ value |= ratelimit.bw/10;
|
|
+ }else if(ratelimit.bw < 10000)
|
|
+ {
|
|
+ value |= (0x2 << 8);
|
|
+ value |= ratelimit.bw/100;
|
|
+ }else if(ratelimit.bw < 100000)
|
|
+ {
|
|
+ value |= (0x3 << 8);
|
|
+ value |= ratelimit.bw/1000;
|
|
+ }else
|
|
+ {
|
|
+ value |= (0x4 << 8);
|
|
+ value |= ratelimit.bw/10000;
|
|
+ }
|
|
+ }
|
|
+ printk("offset = 0x%4x value=0x%x\n\r", offset, value);
|
|
+#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+ mii_mgr_write(0x1f, offset, value);
|
|
+#else
|
|
+ _ESW_REG(offset) = value;
|
|
+#endif
|
|
+ break;
|
|
+
|
|
+ case RAETH_ESW_EGRESS_RATE:
|
|
+ copy_from_user(&ratelimit, ifr->ifr_data, sizeof(ratelimit));
|
|
+ offset = 0x1040 + (0x100 * ratelimit.port);
|
|
+ value = _ESW_REG(offset);
|
|
+
|
|
+ value &= 0xffff0000;
|
|
+ if(ratelimit.on_off == 1)
|
|
+ {
|
|
+ value |= (ratelimit.on_off << 15);
|
|
+ if (ratelimit.bw < 100)
|
|
+ {
|
|
+ value |= (0x0 << 8);
|
|
+ value |= ratelimit.bw;
|
|
+ }else if(ratelimit.bw < 1000)
|
|
+ {
|
|
+ value |= (0x1 << 8);
|
|
+ value |= ratelimit.bw/10;
|
|
+ }else if(ratelimit.bw < 10000)
|
|
+ {
|
|
+ value |= (0x2 << 8);
|
|
+ value |= ratelimit.bw/100;
|
|
+ }else if(ratelimit.bw < 100000)
|
|
+ {
|
|
+ value |= (0x3 << 8);
|
|
+ value |= ratelimit.bw/1000;
|
|
+ }else
|
|
+ {
|
|
+ value |= (0x4 << 8);
|
|
+ value |= ratelimit.bw/10000;
|
|
+ }
|
|
+ }
|
|
+ printk("offset = 0x%4x value=0x%x\n\r", offset, value);
|
|
+#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+ mii_mgr_write(0x1f, offset, value);
|
|
+#else
|
|
+ _ESW_REG(offset) = value;
|
|
+#endif
|
|
+ break;
|
|
+#endif
|
|
+#endif // CONFIG_RT_3052_ESW
|
|
+ default:
|
|
+ ret = -EOPNOTSUPP;
|
|
+ break;
|
|
+
|
|
+ }
|
|
+
|
|
+ spin_unlock_irq(&ei_local->page_lock);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Set new MTU size
|
|
+ * Change the mtu of Raeth Ethernet Device
|
|
+ */
|
|
+static int ei_change_mtu(struct net_device *dev, int new_mtu)
|
|
+{
|
|
+ END_DEVICE *ei_local = netdev_priv(dev); // get priv ei_local pointer from net_dev structure
|
|
+
|
|
+ if ( ei_local == NULL ) {
|
|
+ printk(KERN_EMERG "%s: ei_change_mtu passed a non-existent private pointer from net_dev!\n", dev->name);
|
|
+ return -ENXIO;
|
|
+ }
|
|
+
|
|
+
|
|
+ if ( (new_mtu > 4096) || (new_mtu < 64)) {
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+#ifndef CONFIG_RAETH_JUMBOFRAME
|
|
+ if ( new_mtu > 1500 ) {
|
|
+ return -EINVAL;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ dev->mtu = new_mtu;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_RAETH_HW_VLAN_RX
|
|
+static void ei_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
|
|
+{
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+
|
|
+ ei_local->vlgrp = grp;
|
|
+
|
|
+ /* enable HW VLAN RX */
|
|
+ sysRegWrite(CDMP_EG_CTRL, 1);
|
|
+
|
|
+}
|
|
+#endif
|
|
+
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+static const struct net_device_ops ei_netdev_ops = {
|
|
+ .ndo_init = rather_probe,
|
|
+ .ndo_open = ei_open,
|
|
+ .ndo_stop = ei_close,
|
|
+ .ndo_start_xmit = ei_start_xmit_fake,
|
|
+ .ndo_get_stats = ra_get_stats,
|
|
+ .ndo_set_mac_address = eth_mac_addr,
|
|
+ .ndo_change_mtu = ei_change_mtu,
|
|
+ .ndo_do_ioctl = ei_ioctl,
|
|
+ .ndo_validate_addr = eth_validate_addr,
|
|
+#ifdef CONFIG_RAETH_HW_VLAN_RX
|
|
+ .ndo_vlan_rx_register = ei_vlan_rx_register,
|
|
+#endif
|
|
+#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
+ .ndo_poll_controller = raeth_clean,
|
|
+#endif
|
|
+// .ndo_tx_timeout = ei_tx_timeout,
|
|
+};
|
|
+#endif
|
|
+
|
|
+void ra2880_setup_dev_fptable(struct net_device *dev)
|
|
+{
|
|
+ RAETH_PRINT(__FUNCTION__ "is called!\n");
|
|
+
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ dev->netdev_ops = &ei_netdev_ops;
|
|
+#else
|
|
+ dev->open = ei_open;
|
|
+ dev->stop = ei_close;
|
|
+ dev->hard_start_xmit = ei_start_xmit_fake;
|
|
+ dev->get_stats = ra_get_stats;
|
|
+ dev->set_mac_address = ei_set_mac_addr;
|
|
+ dev->change_mtu = ei_change_mtu;
|
|
+ dev->mtu = 1500;
|
|
+ dev->do_ioctl = ei_ioctl;
|
|
+// dev->tx_timeout = ei_tx_timeout;
|
|
+
|
|
+#ifdef CONFIG_RAETH_NAPI
|
|
+ dev->poll = &raeth_clean;
|
|
+#if defined (CONFIG_RAETH_ROUTER)
|
|
+ dev->weight = 32;
|
|
+#elif defined (CONFIG_RT_3052_ESW)
|
|
+ dev->weight = 32;
|
|
+#else
|
|
+ dev->weight = 128;
|
|
+#endif
|
|
+#endif
|
|
+#endif
|
|
+#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
|
|
+ dev->ethtool_ops = &ra_ethtool_ops;
|
|
+#endif
|
|
+#define TX_TIMEOUT (5*HZ)
|
|
+ dev->watchdog_timeo = TX_TIMEOUT;
|
|
+
|
|
+}
|
|
+
|
|
+/* reset frame engine */
|
|
+void fe_reset(void)
|
|
+{
|
|
+#if defined (CONFIG_RALINK_RT6855A)
|
|
+ /* FIXME */
|
|
+#else
|
|
+ u32 val;
|
|
+
|
|
+ //val = *(volatile u32 *)(0x1b000000);
|
|
+ //printk("0x1b000000 is 0x%x\n", val);
|
|
+ //val = sysRegRead(0xFB110100);
|
|
+ //val = 0x8000;
|
|
+ //sysRegWrite(0xFB110100, val);
|
|
+
|
|
+
|
|
+
|
|
+ val = sysRegRead(RSTCTRL);
|
|
+
|
|
+// RT5350 need to reset ESW and FE at the same to avoid PDMA panic //
|
|
+#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+ val = val | RALINK_FE_RST | RALINK_ESW_RST ;
|
|
+#else
|
|
+ val = val | RALINK_FE_RST;
|
|
+#endif
|
|
+ sysRegWrite(RSTCTRL, val);
|
|
+#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7628)
|
|
+ val = val & ~(RALINK_FE_RST | RALINK_ESW_RST);
|
|
+#else
|
|
+ val = val & ~(RALINK_FE_RST);
|
|
+#endif
|
|
+
|
|
+ sysRegWrite(RSTCTRL, val);
|
|
+#endif
|
|
+}
|
|
+
|
|
+/* set TRGMII */
|
|
+#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
|
|
+void trgmii_set_7621(void)
|
|
+{
|
|
+ u32 val = 0;
|
|
+ u32 val_0 = 0;
|
|
+
|
|
+ val = sysRegRead(RSTCTRL);
|
|
+// MT7621 need to reset GMAC and FE first //
|
|
+ val = val | RALINK_FE_RST | RALINK_ETH_RST ;
|
|
+ sysRegWrite(RSTCTRL, val);
|
|
+
|
|
+//set TRGMII clock//
|
|
+ val_0 = sysRegRead(CLK_CFG_0);
|
|
+ val_0 &= 0xffffff9f;
|
|
+ val_0 |= (0x1 << 5);
|
|
+ sysRegWrite(CLK_CFG_0, val_0);
|
|
+ mdelay(1);
|
|
+ val_0 = sysRegRead(CLK_CFG_0);
|
|
+ printk("set CLK_CFG_0 = 0x%x!!!!!!!!!!!!!!!!!!1\n",val_0);
|
|
+ val = val & ~(RALINK_FE_RST | RALINK_ETH_RST);
|
|
+ sysRegWrite(RSTCTRL, val);
|
|
+}
|
|
+
|
|
+void trgmii_set_7530(void)
|
|
+{
|
|
+// set MT7530 //
|
|
+#if 0
|
|
+
|
|
+ mii_mgr_write(31, 103, 0x0020);
|
|
+
|
|
+
|
|
+ //disable EEE
|
|
+ mii_mgr_write(0, 0x16, 0);
|
|
+ mii_mgr_write(1, 0x16, 0);
|
|
+ mii_mgr_write(2, 0x16, 0);
|
|
+ mii_mgr_write(3, 0x16, 0);
|
|
+ mii_mgr_write(4, 0x16, 0);
|
|
+
|
|
+
|
|
+ //PLL reset for E2
|
|
+ mii_mgr_write(31, 104, 0x0608);
|
|
+ mii_mgr_write(31, 104, 0x2608);
|
|
+
|
|
+ mii_mgr_write(31, 0x7808, 0x0);
|
|
+ mdelay(1);
|
|
+ mii_mgr_write(31, 0x7804, 0x01017e8f);
|
|
+ mdelay(1);
|
|
+ mii_mgr_write(31, 0x7808, 0x1);
|
|
+ mdelay(1);
|
|
+
|
|
+#endif
|
|
+#if 1
|
|
+ //CL45 command
|
|
+ //PLL to 150Mhz
|
|
+ mii_mgr_write(0, 13, 0x1f);
|
|
+ mii_mgr_write(0, 14, 0x404);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_read(31, 0x7800, ®Value);
|
|
+ regValue = (regValue >> 9) & 0x3;
|
|
+ if(regValue == 0x3) { //25Mhz Xtal
|
|
+ mii_mgr_write(0, 14, 0x0A00);//25Mhz XTAL for 150Mhz CLK
|
|
+ } else if(regValue == 0x2) { //40Mhz
|
|
+ mii_mgr_write(0, 14, 0x0780);//40Mhz XTAL for 150Mhz CLK
|
|
+ }
|
|
+ //mii_mgr_write(0, 14, 0x0C00);//ori
|
|
+ mdelay(1);
|
|
+
|
|
+ mii_mgr_write(0, 13, 0x1f);
|
|
+ mii_mgr_write(0, 14, 0x409);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x57);
|
|
+ mdelay(1);
|
|
+
|
|
+ mii_mgr_write(0, 13, 0x1f);
|
|
+ mii_mgr_write(0, 14, 0x40a);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x57);
|
|
+
|
|
+//PLL BIAS en
|
|
+ mii_mgr_write(0, 13, 0x1f);
|
|
+ mii_mgr_write(0, 14, 0x403);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x1800);
|
|
+ mdelay(1);
|
|
+
|
|
+//BIAS LPF en
|
|
+ mii_mgr_write(0, 13, 0x1f);
|
|
+ mii_mgr_write(0, 14, 0x403);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x1c00);
|
|
+
|
|
+//sys PLL en
|
|
+ mii_mgr_write(0, 13, 0x1f);
|
|
+ mii_mgr_write(0, 14, 0x401);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0xc020);
|
|
+
|
|
+//LCDDDS PWDS
|
|
+ mii_mgr_write(0, 13, 0x1f);
|
|
+ mii_mgr_write(0, 14, 0x406);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0xa030);
|
|
+ mdelay(1);
|
|
+
|
|
+//GSW_2X_CLK
|
|
+ mii_mgr_write(0, 13, 0x1f);
|
|
+ mii_mgr_write(0, 14, 0x410);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x0003);
|
|
+
|
|
+//enable P6
|
|
+ mii_mgr_write(31, 0x3600, 0x5e33b);
|
|
+
|
|
+//enable TRGMII
|
|
+ mii_mgr_write(31, 0x7830, 0x1);
|
|
+#endif
|
|
+
|
|
+}
|
|
+#endif
|
|
+
|
|
+void ei_reset_task(struct work_struct *work)
|
|
+{
|
|
+ struct net_device *dev = dev_raether;
|
|
+
|
|
+ ei_close(dev);
|
|
+ ei_open(dev);
|
|
+
|
|
+ return;
|
|
+}
|
|
+
|
|
+void ei_tx_timeout(struct net_device *dev)
|
|
+{
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+
|
|
+ schedule_work(&ei_local->reset_task);
|
|
+}
|
|
+
|
|
+void setup_statistics(END_DEVICE* ei_local)
|
|
+{
|
|
+ ei_local->stat.tx_packets = 0;
|
|
+ ei_local->stat.tx_bytes = 0;
|
|
+ ei_local->stat.tx_dropped = 0;
|
|
+ ei_local->stat.tx_errors = 0;
|
|
+ ei_local->stat.tx_aborted_errors= 0;
|
|
+ ei_local->stat.tx_carrier_errors= 0;
|
|
+ ei_local->stat.tx_fifo_errors = 0;
|
|
+ ei_local->stat.tx_heartbeat_errors = 0;
|
|
+ ei_local->stat.tx_window_errors = 0;
|
|
+
|
|
+ ei_local->stat.rx_packets = 0;
|
|
+ ei_local->stat.rx_bytes = 0;
|
|
+ ei_local->stat.rx_dropped = 0;
|
|
+ ei_local->stat.rx_errors = 0;
|
|
+ ei_local->stat.rx_length_errors = 0;
|
|
+ ei_local->stat.rx_over_errors = 0;
|
|
+ ei_local->stat.rx_crc_errors = 0;
|
|
+ ei_local->stat.rx_frame_errors = 0;
|
|
+ ei_local->stat.rx_fifo_errors = 0;
|
|
+ ei_local->stat.rx_missed_errors = 0;
|
|
+
|
|
+ ei_local->stat.collisions = 0;
|
|
+#if defined (CONFIG_RAETH_QOS)
|
|
+ ei_local->tx3_full = 0;
|
|
+ ei_local->tx2_full = 0;
|
|
+ ei_local->tx1_full = 0;
|
|
+ ei_local->tx0_full = 0;
|
|
+#else
|
|
+ ei_local->tx_full = 0;
|
|
+#endif
|
|
+#ifdef CONFIG_RAETH_NAPI
|
|
+ atomic_set(&ei_local->irq_sem, 1);
|
|
+#endif
|
|
+
|
|
+}
|
|
+
|
|
+/**
|
|
+ * rather_probe - pick up ethernet port at boot time
|
|
+ * @dev: network device to probe
|
|
+ *
|
|
+ * This routine probe the ethernet port at boot time.
|
|
+ *
|
|
+ *
|
|
+ */
|
|
+
|
|
+int __init rather_probe(struct net_device *dev)
|
|
+{
|
|
+ int i;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+ struct sockaddr addr;
|
|
+ unsigned char zero1[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
|
|
+ unsigned char zero2[6]={0x00,0x00,0x00,0x00,0x00,0x00};
|
|
+
|
|
+ fe_reset();
|
|
+
|
|
+ //Get mac0 address from flash
|
|
+#ifdef RA_MTD_RW_BY_NUM
|
|
+ i = ra_mtd_read(2, GMAC0_OFFSET, 6, addr.sa_data);
|
|
+#else
|
|
+ i = ra_mtd_read_nm("Factory", GMAC0_OFFSET, 6, addr.sa_data);
|
|
+#endif
|
|
+ //If reading mtd failed or mac0 is empty, generate a mac address
|
|
+ if (i < 0 || ((memcmp(addr.sa_data, zero1, 6) == 0) || (addr.sa_data[0] & 0x1)) ||
|
|
+ (memcmp(addr.sa_data, zero2, 6) == 0)) {
|
|
+ unsigned char mac_addr01234[5] = {0x00, 0x0C, 0x43, 0x28, 0x80};
|
|
+ // net_srandom(jiffies);
|
|
+ memcpy(addr.sa_data, mac_addr01234, 5);
|
|
+ // addr.sa_data[5] = net_random()&0xFF;
|
|
+ }
|
|
+
|
|
+#ifdef CONFIG_RAETH_NAPI
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ netif_napi_add(dev, &ei_local->napi, raeth_clean, 128);
|
|
+#endif
|
|
+#endif
|
|
+ ei_set_mac_addr(dev, &addr);
|
|
+ spin_lock_init(&ei_local->page_lock);
|
|
+ ether_setup(dev);
|
|
+
|
|
+#ifdef CONFIG_RAETH_LRO
|
|
+ ei_local->lro_mgr.dev = dev;
|
|
+ memset(&ei_local->lro_mgr.stats, 0, sizeof(ei_local->lro_mgr.stats));
|
|
+ ei_local->lro_mgr.features = LRO_F_NAPI;
|
|
+ ei_local->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
|
|
+ ei_local->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
|
|
+ ei_local->lro_mgr.max_desc = ARRAY_SIZE(ei_local->lro_arr);
|
|
+ ei_local->lro_mgr.max_aggr = 64;
|
|
+ ei_local->lro_mgr.frag_align_pad = 0;
|
|
+ ei_local->lro_mgr.lro_arr = ei_local->lro_arr;
|
|
+ ei_local->lro_mgr.get_skb_header = rt_get_skb_header;
|
|
+#endif
|
|
+
|
|
+ setup_statistics(ei_local);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+int VirtualIF_ioctl(struct net_device * net_dev,
|
|
+ struct ifreq * ifr, int cmd)
|
|
+{
|
|
+ ra_mii_ioctl_data mii;
|
|
+
|
|
+ switch (cmd) {
|
|
+ case RAETH_MII_READ:
|
|
+ copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
|
|
+ mii_mgr_read(mii.phy_id, mii.reg_num, &mii.val_out);
|
|
+ //printk("phy %d, reg %d, val 0x%x\n", mii.phy_id, mii.reg_num, mii.val_out);
|
|
+ copy_to_user(ifr->ifr_data, &mii, sizeof(mii));
|
|
+ break;
|
|
+
|
|
+ case RAETH_MII_WRITE:
|
|
+ copy_from_user(&mii, ifr->ifr_data, sizeof(mii));
|
|
+ //printk("phy %d, reg %d, val 0x%x\n", mii.phy_id, mii.reg_num, mii.val_in);
|
|
+ mii_mgr_write(mii.phy_id, mii.reg_num, mii.val_in);
|
|
+ break;
|
|
+ default:
|
|
+ return -EOPNOTSUPP;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+struct net_device_stats *VirtualIF_get_stats(struct net_device *dev)
|
|
+{
|
|
+ PSEUDO_ADAPTER *pAd = netdev_priv(dev);
|
|
+ return &pAd->stat;
|
|
+}
|
|
+
|
|
+int VirtualIF_open(struct net_device * dev)
|
|
+{
|
|
+ PSEUDO_ADAPTER *pPesueoAd = netdev_priv(dev);
|
|
+
|
|
+ printk("%s: ===> VirtualIF_open\n", dev->name);
|
|
+
|
|
+#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN) || defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
|
|
+ *((volatile u32 *)(FE_INT_ENABLE2)) |= (1<<25); //enable GE2 link change intr for MT7530 delay setting
|
|
+#endif
|
|
+
|
|
+ netif_start_queue(pPesueoAd->PseudoDev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int VirtualIF_close(struct net_device * dev)
|
|
+{
|
|
+ PSEUDO_ADAPTER *pPesueoAd = netdev_priv(dev);
|
|
+
|
|
+ printk("%s: ===> VirtualIF_close\n", dev->name);
|
|
+
|
|
+ netif_stop_queue(pPesueoAd->PseudoDev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int VirtualIFSendPackets(struct sk_buff * pSkb,
|
|
+ struct net_device * dev)
|
|
+{
|
|
+ PSEUDO_ADAPTER *pPesueoAd = netdev_priv(dev);
|
|
+ END_DEVICE *ei_local __maybe_unused;
|
|
+
|
|
+
|
|
+ //printk("VirtualIFSendPackets --->\n");
|
|
+
|
|
+ ei_local = netdev_priv(dev);
|
|
+ if (!(pPesueoAd->RaethDev->flags & IFF_UP)) {
|
|
+ dev_kfree_skb_any(pSkb);
|
|
+ return 0;
|
|
+ }
|
|
+ //pSkb->cb[40]=0x5a;
|
|
+ pSkb->dev = pPesueoAd->RaethDev;
|
|
+ ei_start_xmit(pSkb, pPesueoAd->RaethDev, 2);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void virtif_setup_statistics(PSEUDO_ADAPTER* pAd)
|
|
+{
|
|
+ pAd->stat.tx_packets = 0;
|
|
+ pAd->stat.tx_bytes = 0;
|
|
+ pAd->stat.tx_dropped = 0;
|
|
+ pAd->stat.tx_errors = 0;
|
|
+ pAd->stat.tx_aborted_errors= 0;
|
|
+ pAd->stat.tx_carrier_errors= 0;
|
|
+ pAd->stat.tx_fifo_errors = 0;
|
|
+ pAd->stat.tx_heartbeat_errors = 0;
|
|
+ pAd->stat.tx_window_errors = 0;
|
|
+
|
|
+ pAd->stat.rx_packets = 0;
|
|
+ pAd->stat.rx_bytes = 0;
|
|
+ pAd->stat.rx_dropped = 0;
|
|
+ pAd->stat.rx_errors = 0;
|
|
+ pAd->stat.rx_length_errors = 0;
|
|
+ pAd->stat.rx_over_errors = 0;
|
|
+ pAd->stat.rx_crc_errors = 0;
|
|
+ pAd->stat.rx_frame_errors = 0;
|
|
+ pAd->stat.rx_fifo_errors = 0;
|
|
+ pAd->stat.rx_missed_errors = 0;
|
|
+
|
|
+ pAd->stat.collisions = 0;
|
|
+}
|
|
+
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+static const struct net_device_ops VirtualIF_netdev_ops = {
|
|
+ .ndo_open = VirtualIF_open,
|
|
+ .ndo_stop = VirtualIF_close,
|
|
+ .ndo_start_xmit = VirtualIFSendPackets,
|
|
+ .ndo_get_stats = VirtualIF_get_stats,
|
|
+ .ndo_set_mac_address = ei_set_mac2_addr,
|
|
+ .ndo_change_mtu = ei_change_mtu,
|
|
+ .ndo_do_ioctl = VirtualIF_ioctl,
|
|
+ .ndo_validate_addr = eth_validate_addr,
|
|
+};
|
|
+#endif
|
|
+// Register pseudo interface
|
|
+void RAETH_Init_PSEUDO(pEND_DEVICE pAd, struct net_device *net_dev)
|
|
+{
|
|
+ int index;
|
|
+ struct net_device *dev;
|
|
+ PSEUDO_ADAPTER *pPseudoAd;
|
|
+ int i = 0;
|
|
+ struct sockaddr addr;
|
|
+ unsigned char zero1[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
|
|
+ unsigned char zero2[6]={0x00,0x00,0x00,0x00,0x00,0x00};
|
|
+
|
|
+ for (index = 0; index < MAX_PSEUDO_ENTRY; index++) {
|
|
+
|
|
+ dev = alloc_etherdev(sizeof(PSEUDO_ADAPTER));
|
|
+ if (NULL == dev)
|
|
+ {
|
|
+ printk(" alloc_etherdev for PSEUDO_ADAPTER failed.\n");
|
|
+ return;
|
|
+ }
|
|
+ strcpy(dev->name, DEV2_NAME);
|
|
+
|
|
+ //Get mac2 address from flash
|
|
+#ifdef RA_MTD_RW_BY_NUM
|
|
+ i = ra_mtd_read(2, GMAC2_OFFSET, 6, addr.sa_data);
|
|
+#else
|
|
+ i = ra_mtd_read_nm("Factory", GMAC2_OFFSET, 6, addr.sa_data);
|
|
+#endif
|
|
+
|
|
+ //If reading mtd failed or mac0 is empty, generate a mac address
|
|
+ if (i < 0 || ((memcmp(addr.sa_data, zero1, 6) == 0) || (addr.sa_data[0] & 0x1)) ||
|
|
+ (memcmp(addr.sa_data, zero2, 6) == 0)) {
|
|
+ unsigned char mac_addr01234[5] = {0x00, 0x0C, 0x43, 0x28, 0x80};
|
|
+ // net_srandom(jiffies);
|
|
+ memcpy(addr.sa_data, mac_addr01234, 5);
|
|
+ // addr.sa_data[5] = net_random()&0xFF;
|
|
+ }
|
|
+
|
|
+ ei_set_mac2_addr(dev, &addr);
|
|
+ ether_setup(dev);
|
|
+ pPseudoAd = netdev_priv(dev);
|
|
+
|
|
+ pPseudoAd->PseudoDev = dev;
|
|
+ pPseudoAd->RaethDev = net_dev;
|
|
+ virtif_setup_statistics(pPseudoAd);
|
|
+ pAd->PseudoDev = dev;
|
|
+
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ dev->netdev_ops = &VirtualIF_netdev_ops;
|
|
+#else
|
|
+ dev->hard_start_xmit = VirtualIFSendPackets;
|
|
+ dev->stop = VirtualIF_close;
|
|
+ dev->open = VirtualIF_open;
|
|
+ dev->do_ioctl = VirtualIF_ioctl;
|
|
+ dev->set_mac_address = ei_set_mac2_addr;
|
|
+ dev->get_stats = VirtualIF_get_stats;
|
|
+ dev->change_mtu = ei_change_mtu;
|
|
+ dev->mtu = 1500;
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ dev->features |= NETIF_F_HW_CSUM;
|
|
+#else
|
|
+ dev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4 */
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+
|
|
+#if defined(CONFIG_RALINK_MT7620)
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+ if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
|
|
+ dev->features |= NETIF_F_SG;
|
|
+ dev->features |= NETIF_F_TSO;
|
|
+ }
|
|
+#endif // CONFIG_RAETH_TSO //
|
|
+
|
|
+#if defined (CONFIG_RAETH_TSOV6)
|
|
+ if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
|
|
+ dev->features |= NETIF_F_TSO6;
|
|
+ dev->features |= NETIF_F_IPV6_CSUM; /* Can checksum TCP/UDP over IPv6 */
|
|
+ }
|
|
+#endif
|
|
+#else
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+ dev->features |= NETIF_F_SG;
|
|
+ dev->features |= NETIF_F_TSO;
|
|
+#endif // CONFIG_RAETH_TSO //
|
|
+
|
|
+#if defined (CONFIG_RAETH_TSOV6)
|
|
+ dev->features |= NETIF_F_TSO6;
|
|
+ dev->features |= NETIF_F_IPV6_CSUM; /* Can checksum TCP/UDP over IPv6 */
|
|
+#endif
|
|
+#endif // CONFIG_RALINK_MT7620 //
|
|
+
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0)
|
|
+ dev->vlan_features = dev->features;
|
|
+#endif
|
|
+
|
|
+
|
|
+#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
|
|
+ dev->ethtool_ops = &ra_virt_ethtool_ops;
|
|
+ // init mii structure
|
|
+ pPseudoAd->mii_info.dev = dev;
|
|
+ pPseudoAd->mii_info.mdio_read = mdio_virt_read;
|
|
+ pPseudoAd->mii_info.mdio_write = mdio_virt_write;
|
|
+ pPseudoAd->mii_info.phy_id_mask = 0x1f;
|
|
+ pPseudoAd->mii_info.reg_num_mask = 0x1f;
|
|
+ pPseudoAd->mii_info.phy_id = 0x1e;
|
|
+ pPseudoAd->mii_info.supports_gmii = mii_check_gmii_support(&pPseudoAd->mii_info);
|
|
+#endif
|
|
+
|
|
+ // Register this device
|
|
+ register_netdevice(dev);
|
|
+ }
|
|
+}
|
|
+#endif
|
|
+
|
|
+/**
|
|
+ * ei_open - Open/Initialize the ethernet port.
|
|
+ * @dev: network device to initialize
|
|
+ *
|
|
+ * This routine goes all-out, setting everything
|
|
+ * up a new at each open, even though many of these registers should only need to be set once at boot.
|
|
+ */
|
|
+int ei_open(struct net_device *dev)
|
|
+{
|
|
+ int i, err;
|
|
+#if !defined (CONFIG_MT7623_FPGA)
|
|
+ unsigned long flags;
|
|
+#endif
|
|
+ END_DEVICE *ei_local;
|
|
+
|
|
+#ifdef CONFIG_RAETH_LRO
|
|
+ const char *lan_ip_tmp;
|
|
+#ifdef CONFIG_DUAL_IMAGE
|
|
+#define RT2860_NVRAM 1
|
|
+#else
|
|
+#define RT2860_NVRAM 0
|
|
+#endif
|
|
+#endif // CONFIG_RAETH_LRO //
|
|
+
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
|
|
+ if (!try_module_get(THIS_MODULE))
|
|
+ {
|
|
+ printk("%s: Cannot reserve module\n", __FUNCTION__);
|
|
+ return -1;
|
|
+ }
|
|
+#else
|
|
+ MOD_INC_USE_COUNT;
|
|
+#endif
|
|
+
|
|
+ printk("Raeth %s (",RAETH_VERSION);
|
|
+#if defined (CONFIG_RAETH_NAPI)
|
|
+ printk("NAPI\n");
|
|
+#elif defined (CONFIG_RA_NETWORK_TASKLET_BH)
|
|
+ printk("Tasklet");
|
|
+#elif defined (CONFIG_RA_NETWORK_WORKQUEUE_BH)
|
|
+ printk("Workqueue");
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RAETH_SKB_RECYCLE_2K)
|
|
+ printk(",SkbRecycle");
|
|
+#endif
|
|
+ printk(")\n");
|
|
+
|
|
+
|
|
+ ei_local = netdev_priv(dev); // get device pointer from System
|
|
+ // unsigned int flags;
|
|
+
|
|
+ if (ei_local == NULL)
|
|
+ {
|
|
+ printk(KERN_EMERG "%s: ei_open passed a non-existent device!\n", dev->name);
|
|
+ return -ENXIO;
|
|
+ }
|
|
+
|
|
+ /* receiving packet buffer allocation - NUM_RX_DESC x MAX_RX_LENGTH */
|
|
+ for ( i = 0; i < NUM_RX_DESC; i++)
|
|
+ {
|
|
+#if defined (CONFIG_RAETH_SKB_RECYCLE_2K)
|
|
+ ei_local->netrx0_skbuf[i] = skbmgr_dev_alloc_skb2k();
|
|
+#else
|
|
+ ei_local->netrx0_skbuf[i] = dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN);
|
|
+#endif
|
|
+ if (ei_local->netrx0_skbuf[i] == NULL ) {
|
|
+ printk("rx skbuff buffer allocation failed!");
|
|
+ } else {
|
|
+#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+ skb_reserve(ei_local->netrx0_skbuf[i], NET_IP_ALIGN);
|
|
+#endif
|
|
+ }
|
|
+
|
|
+
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ ei_local->netrx3_skbuf[i] = dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN);
|
|
+ if (ei_local->netrx3_skbuf[i] == NULL ) {
|
|
+ printk("rx3 skbuff buffer allocation failed!");
|
|
+ } else {
|
|
+#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+ skb_reserve(ei_local->netrx3_skbuf[i], NET_IP_ALIGN);
|
|
+#endif
|
|
+ }
|
|
+ ei_local->netrx2_skbuf[i] = dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN);
|
|
+ if (ei_local->netrx2_skbuf[i] == NULL ) {
|
|
+ printk("rx2 skbuff buffer allocation failed!");
|
|
+ } else {
|
|
+#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+ skb_reserve(ei_local->netrx2_skbuf[i], NET_IP_ALIGN);
|
|
+#endif
|
|
+ }
|
|
+ ei_local->netrx1_skbuf[i] = dev_alloc_skb(MAX_LRO_RX_LENGTH + NET_IP_ALIGN);
|
|
+ if (ei_local->netrx1_skbuf[i] == NULL ) {
|
|
+ printk("rx1 skbuff buffer allocation failed!");
|
|
+ } else {
|
|
+#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+ skb_reserve(ei_local->netrx1_skbuf[i], NET_IP_ALIGN);
|
|
+#endif
|
|
+ }
|
|
+#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+#if defined(CONFIG_ARCH_MT7623)
|
|
+ ei_local->netrx3_skbuf[i] = dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN);
|
|
+ if (ei_local->netrx3_skbuf[i] == NULL ) {
|
|
+ printk("rx3 skbuff buffer allocation failed!");
|
|
+ } else {
|
|
+#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+ skb_reserve(ei_local->netrx3_skbuf[i], NET_IP_ALIGN);
|
|
+#endif
|
|
+ }
|
|
+ ei_local->netrx2_skbuf[i] = dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN);
|
|
+ if (ei_local->netrx2_skbuf[i] == NULL ) {
|
|
+ printk("rx2 skbuff buffer allocation failed!");
|
|
+ } else {
|
|
+#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+ skb_reserve(ei_local->netrx2_skbuf[i], NET_IP_ALIGN);
|
|
+#endif
|
|
+ }
|
|
+#endif /* CONFIG_ARCH_MT7623 */
|
|
+ ei_local->netrx1_skbuf[i] = dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN);
|
|
+ if (ei_local->netrx1_skbuf[i] == NULL ) {
|
|
+ printk("rx1 skbuff buffer allocation failed!");
|
|
+ } else {
|
|
+#if !defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+ skb_reserve(ei_local->netrx1_skbuf[i], NET_IP_ALIGN);
|
|
+#endif
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
|
|
+ trgmii_set_7621(); //reset FE/GMAC in this function
|
|
+#endif
|
|
+
|
|
+ fe_dma_init(dev);
|
|
+
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ fe_hw_lro_init(dev);
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+
|
|
+ fe_sw_init(); //initialize fe and switch register
|
|
+#if defined (CONFIG_MIPS)
|
|
+ err = request_irq( dev->irq, ei_interrupt, IRQF_DISABLED, dev->name, dev); // try to fix irq in open
|
|
+#else
|
|
+ err = request_irq(dev->irq, ei_interrupt, /*IRQF_TRIGGER_LOW*/ 0, dev->name, dev); // try to fix irq in open
|
|
+#endif
|
|
+ if (err)
|
|
+ return err;
|
|
+
|
|
+ if ( dev->dev_addr != NULL) {
|
|
+ ra2880MacAddressSet((void *)(dev->dev_addr));
|
|
+ } else {
|
|
+ printk("dev->dev_addr is empty !\n");
|
|
+ }
|
|
+/*TODO: MT7623 MCM INT */
|
|
+#if defined (CONFIG_RT_3052_ESW) && !defined(CONFIG_ARCH_MT7623)
|
|
+ err = request_irq(SURFBOARDINT_ESW, esw_interrupt, IRQF_DISABLED, "Ralink_ESW", dev);
|
|
+ if (err)
|
|
+ return err;
|
|
+ INIT_WORK(&ei_local->kill_sig_wq, kill_sig_workq);
|
|
+#if defined (CONFIG_RALINK_MT7621)
|
|
+ mii_mgr_write(31, 0x7008, 0x1f); //enable switch link change intr
|
|
+
|
|
+#else
|
|
+ *((volatile u32 *)(RALINK_INTCL_BASE + 0x34)) = (1<<17);
|
|
+ *((volatile u32 *)(ESW_IMR)) &= ~(ESW_INT_ALL);
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A) || \
|
|
+ defined (CONFIG_RALINK_MT7620)
|
|
+ *((volatile u32 *)(ESW_P0_IntMn)) &= ~(MSK_CNT_INT_ALL);
|
|
+ *((volatile u32 *)(ESW_P1_IntMn)) &= ~(MSK_CNT_INT_ALL);
|
|
+ *((volatile u32 *)(ESW_P2_IntMn)) &= ~(MSK_CNT_INT_ALL);
|
|
+ *((volatile u32 *)(ESW_P3_IntMn)) &= ~(MSK_CNT_INT_ALL);
|
|
+ *((volatile u32 *)(ESW_P4_IntMn)) &= ~(MSK_CNT_INT_ALL);
|
|
+ *((volatile u32 *)(ESW_P5_IntMn)) &= ~(MSK_CNT_INT_ALL);
|
|
+ *((volatile u32 *)(ESW_P6_IntMn)) &= ~(MSK_CNT_INT_ALL);
|
|
+#endif
|
|
+#if defined(CONFIG_RALINK_MT7620)
|
|
+ *((volatile u32 *)(ESW_P7_IntMn)) &= ~(MSK_CNT_INT_ALL);
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+#endif // CONFIG_RT_3052_ESW //
|
|
+
|
|
+/*TODO*/
|
|
+#if !defined (CONFIG_MT7623_FPGA)
|
|
+ spin_lock_irqsave(&(ei_local->page_lock), flags);
|
|
+#endif
|
|
+
|
|
+
|
|
+#ifdef DELAY_INT
|
|
+ sysRegWrite(RAETH_DLY_INT_CFG, DELAY_INT_INIT);
|
|
+ sysRegWrite(RAETH_FE_INT_ENABLE, RAETH_FE_INT_DLY_INIT);
|
|
+ #if defined (CONFIG_RAETH_HW_LRO)
|
|
+ sysRegWrite(RAETH_FE_INT_ENABLE, RAETH_FE_INT_DLY_INIT | ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1);
|
|
+ #endif /* CONFIG_RAETH_HW_LRO */
|
|
+#else
|
|
+ sysRegWrite(RAETH_FE_INT_ENABLE, RAETH_FE_INT_ALL);
|
|
+ #if defined (CONFIG_RAETH_HW_LRO)
|
|
+ sysRegWrite(RAETH_FE_INT_ENABLE, RAETH_FE_INT_ALL | ALT_RPLC_INT3 | ALT_RPLC_INT2 | ALT_RPLC_INT1);
|
|
+ #endif /* CONFIG_RAETH_HW_LRO */
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RAETH_QDMA
|
|
+#ifdef DELAY_INT
|
|
+ sysRegWrite(QDMA_DELAY_INT, DELAY_INT_INIT);
|
|
+ sysRegWrite(QFE_INT_ENABLE, QFE_INT_DLY_INIT);
|
|
+#else
|
|
+ sysRegWrite(QFE_INT_ENABLE, QFE_INT_ALL);
|
|
+
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+ INIT_WORK(&ei_local->reset_task, ei_reset_task);
|
|
+
|
|
+#ifdef WORKQUEUE_BH
|
|
+#ifndef CONFIG_RAETH_NAPI
|
|
+ INIT_WORK(&ei_local->rx_wq, ei_receive_workq);
|
|
+#endif // CONFIG_RAETH_NAPI //
|
|
+#else
|
|
+#ifndef CONFIG_RAETH_NAPI
|
|
+#if defined (TASKLET_WORKQUEUE_SW)
|
|
+ working_schedule = init_schedule;
|
|
+ INIT_WORK(&ei_local->rx_wq, ei_receive_workq);
|
|
+ tasklet_init(&ei_local->rx_tasklet, ei_receive_workq, 0);
|
|
+#else
|
|
+ tasklet_init(&ei_local->rx_tasklet, ei_receive, 0);
|
|
+#endif
|
|
+#endif // CONFIG_RAETH_NAPI //
|
|
+#endif // WORKQUEUE_BH //
|
|
+
|
|
+ netif_start_queue(dev);
|
|
+
|
|
+#ifdef CONFIG_RAETH_NAPI
|
|
+ atomic_dec(&ei_local->irq_sem);
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ napi_enable(&ei_local->napi);
|
|
+#else
|
|
+ netif_poll_enable(dev);
|
|
+#endif
|
|
+#endif
|
|
+//*TODO*/
|
|
+#if !defined (CONFIG_MT7623_FPGA)
|
|
+ spin_unlock_irqrestore(&(ei_local->page_lock), flags);
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ if(ei_local->PseudoDev == NULL) {
|
|
+ RAETH_Init_PSEUDO(ei_local, dev);
|
|
+ }
|
|
+
|
|
+ if(ei_local->PseudoDev == NULL)
|
|
+ printk("Open PseudoDev failed.\n");
|
|
+ else
|
|
+ VirtualIF_open(ei_local->PseudoDev);
|
|
+
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RAETH_LRO
|
|
+ lan_ip_tmp = nvram_get(RT2860_NVRAM, "lan_ipaddr");
|
|
+ str_to_ip(&lan_ip, lan_ip_tmp);
|
|
+ lro_para.lan_ip1 = lan_ip = htonl(lan_ip);
|
|
+#endif // CONFIG_RAETH_LRO //
|
|
+
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ INIT_WORK(&ei_local->hw_lro_wq, ei_hw_lro_workq);
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+
|
|
+ forward_config(dev);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ei_close - shut down network device
|
|
+ * @dev: network device to clear
|
|
+ *
|
|
+ * This routine shut down network device.
|
|
+ *
|
|
+ *
|
|
+ */
|
|
+int ei_close(struct net_device *dev)
|
|
+{
|
|
+ int i;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev); // device pointer
|
|
+
|
|
+ netif_stop_queue(dev);
|
|
+ ra2880stop(ei_local);
|
|
+
|
|
+ free_irq(dev->irq, dev);
|
|
+
|
|
+/*TODO: MT7623 MCM INT */
|
|
+#if defined (CONFIG_RT_3052_ESW) && !defined(CONFIG_ARCH_MT7623)
|
|
+ free_irq(SURFBOARDINT_ESW, dev);
|
|
+#endif
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ cancel_work_sync(&ei_local->reset_task);
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ VirtualIF_close(ei_local->PseudoDev);
|
|
+#endif
|
|
+
|
|
+
|
|
+#ifdef WORKQUEUE_BH
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ cancel_work_sync(&ei_local->rx_wq);
|
|
+#endif
|
|
+#else
|
|
+#if defined (TASKLET_WORKQUEUE_SW)
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ cancel_work_sync(&ei_local->rx_wq);
|
|
+#endif
|
|
+#endif
|
|
+ tasklet_kill(&ei_local->tx_tasklet);
|
|
+ tasklet_kill(&ei_local->rx_tasklet);
|
|
+#endif // WORKQUEUE_BH //
|
|
+
|
|
+#ifdef CONFIG_RAETH_NAPI
|
|
+ atomic_inc(&ei_local->irq_sem);
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ napi_disable(&ei_local->napi);
|
|
+#else
|
|
+ netif_poll_disable(dev);
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ cancel_work_sync(&ei_local->hw_lro_wq);
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+
|
|
+ for ( i = 0; i < NUM_RX_DESC; i++)
|
|
+ {
|
|
+ if (ei_local->netrx0_skbuf[i] != NULL) {
|
|
+ dev_kfree_skb(ei_local->netrx0_skbuf[i]);
|
|
+ ei_local->netrx0_skbuf[i] = NULL;
|
|
+ }
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ if (ei_local->netrx3_skbuf[i] != NULL) {
|
|
+ dev_kfree_skb(ei_local->netrx3_skbuf[i]);
|
|
+ ei_local->netrx3_skbuf[i] = NULL;
|
|
+ }
|
|
+ if (ei_local->netrx2_skbuf[i] != NULL) {
|
|
+ dev_kfree_skb(ei_local->netrx2_skbuf[i]);
|
|
+ ei_local->netrx2_skbuf[i] = NULL;
|
|
+ }
|
|
+ if (ei_local->netrx1_skbuf[i] != NULL) {
|
|
+ dev_kfree_skb(ei_local->netrx1_skbuf[i]);
|
|
+ ei_local->netrx1_skbuf[i] = NULL;
|
|
+ }
|
|
+#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+#if defined(CONFIG_ARCH_MT7623)
|
|
+ if (ei_local->netrx3_skbuf[i] != NULL) {
|
|
+ dev_kfree_skb(ei_local->netrx3_skbuf[i]);
|
|
+ ei_local->netrx3_skbuf[i] = NULL;
|
|
+ }
|
|
+ if (ei_local->netrx2_skbuf[i] != NULL) {
|
|
+ dev_kfree_skb(ei_local->netrx2_skbuf[i]);
|
|
+ ei_local->netrx2_skbuf[i] = NULL;
|
|
+ }
|
|
+#endif /* CONFIG_ARCH_MT7623 */
|
|
+ if (ei_local->netrx1_skbuf[i] != NULL) {
|
|
+ dev_kfree_skb(ei_local->netrx1_skbuf[i]);
|
|
+ ei_local->netrx1_skbuf[i] = NULL;
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+
|
|
+ for ( i = 0; i < NUM_TX_DESC; i++)
|
|
+ {
|
|
+ if((ei_local->skb_free[i]!=(struct sk_buff *)0xFFFFFFFF) && (ei_local->skb_free[i]!= 0))
|
|
+ {
|
|
+ dev_kfree_skb_any(ei_local->skb_free[i]);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* TX Ring */
|
|
+#ifdef CONFIG_RAETH_QDMA
|
|
+ if (ei_local->txd_pool != NULL) {
|
|
+ pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct QDMA_txdesc), ei_local->txd_pool, ei_local->phy_txd_pool);
|
|
+ }
|
|
+ if (ei_local->free_head != NULL){
|
|
+ pci_free_consistent(NULL, NUM_QDMA_PAGE * sizeof(struct QDMA_txdesc), ei_local->free_head, ei_local->phy_free_head);
|
|
+ }
|
|
+ if (ei_local->free_page_head != NULL){
|
|
+ pci_free_consistent(NULL, NUM_QDMA_PAGE * QDMA_PAGE_SIZE, ei_local->free_page_head, ei_local->phy_free_page_head);
|
|
+ }
|
|
+#else
|
|
+ if (ei_local->tx_ring0 != NULL) {
|
|
+ pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), ei_local->tx_ring0, ei_local->phy_tx_ring0);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RAETH_QOS)
|
|
+ if (ei_local->tx_ring1 != NULL) {
|
|
+ pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), ei_local->tx_ring1, ei_local->phy_tx_ring1);
|
|
+ }
|
|
+
|
|
+#if !defined (CONFIG_RALINK_RT2880)
|
|
+ if (ei_local->tx_ring2 != NULL) {
|
|
+ pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), ei_local->tx_ring2, ei_local->phy_tx_ring2);
|
|
+ }
|
|
+
|
|
+ if (ei_local->tx_ring3 != NULL) {
|
|
+ pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), ei_local->tx_ring3, ei_local->phy_tx_ring3);
|
|
+ }
|
|
+#endif
|
|
+#endif
|
|
+ /* RX Ring */
|
|
+#ifdef CONFIG_32B_DESC
|
|
+ kfree(ei_local->rx_ring0);
|
|
+#else
|
|
+ pci_free_consistent(NULL, NUM_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring0, ei_local->phy_rx_ring0);
|
|
+#endif
|
|
+#if defined CONFIG_RAETH_QDMA && !defined(CONFIG_RAETH_QDMATX_QDMARX)
|
|
+#ifdef CONFIG_32B_DESC
|
|
+ kfree(ei_local->qrx_ring);
|
|
+#else
|
|
+ pci_free_consistent(NULL, NUM_QRX_DESC*sizeof(struct PDMA_rxdesc), ei_local->qrx_ring, ei_local->phy_qrx_ring);
|
|
+#endif
|
|
+#endif
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+ pci_free_consistent(NULL, NUM_LRO_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring3, ei_local->phy_rx_ring3);
|
|
+ pci_free_consistent(NULL, NUM_LRO_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring2, ei_local->phy_rx_ring2);
|
|
+ pci_free_consistent(NULL, NUM_LRO_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring1, ei_local->phy_rx_ring1);
|
|
+#elif defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+#ifdef CONFIG_32B_DESC
|
|
+ kfree(ei_local->rx_ring1);
|
|
+#else
|
|
+#if defined(CONFIG_ARCH_MT7623)
|
|
+ pci_free_consistent(NULL, NUM_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring3, ei_local->phy_rx_ring3);
|
|
+ pci_free_consistent(NULL, NUM_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring2, ei_local->phy_rx_ring2);
|
|
+#endif /* CONFIG_ARCH_MT7623 */
|
|
+ pci_free_consistent(NULL, NUM_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring1, ei_local->phy_rx_ring1);
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+ printk("Free TX/RX Ring Memory!\n");
|
|
+
|
|
+ fe_reset();
|
|
+
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
|
|
+ module_put(THIS_MODULE);
|
|
+#else
|
|
+ MOD_DEC_USE_COUNT;
|
|
+#endif
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#if defined (CONFIG_RT6855A_FPGA)
|
|
+void rt6855A_eth_gpio_reset(void)
|
|
+{
|
|
+ u8 ether_gpio = 12;
|
|
+
|
|
+ /* Load the ethernet gpio value to reset Ethernet PHY */
|
|
+ *(unsigned long *)(RALINK_PIO_BASE + 0x00) |= 1<<(ether_gpio<<1);
|
|
+ *(unsigned long *)(RALINK_PIO_BASE + 0x14) |= 1<<(ether_gpio);
|
|
+ *(unsigned long *)(RALINK_PIO_BASE + 0x04) &= ~(1<<ether_gpio);
|
|
+
|
|
+ udelay(100000);
|
|
+
|
|
+ *(unsigned long *)(RALINK_PIO_BASE + 0x04) |= (1<<ether_gpio);
|
|
+
|
|
+ /* must wait for 0.6 seconds after reset*/
|
|
+ udelay(600000);
|
|
+}
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RALINK_RT6855A)
|
|
+void rt6855A_gsw_init(void)
|
|
+{
|
|
+ u32 phy_val=0;
|
|
+ u32 rev=0;
|
|
+
|
|
+#if defined (CONFIG_RT6855A_FPGA)
|
|
+ /*keep dump switch mode */
|
|
+ rt6855A_eth_gpio_reset();
|
|
+
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3000) = 0x5e353;//(P0,Force mode,Link Up,100Mbps,Full-Duplex,FC ON)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3100) = 0x5e353;//(P1,Force mode,Link Up,100Mbps,Full-Duplex,FC ON)
|
|
+ //*(unsigned long *)(RALINK_ETH_SW_BASE+0x3000) = 0x5e333;//(P0,Force mode,Link Up,10Mbps,Full-Duplex,FC ON)
|
|
+ //*(unsigned long *)(RALINK_ETH_SW_BASE+0x3100) = 0x5e333;//(P1,Force mode,Link Up,10Mbps,Full-Duplex,FC ON)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3200) = 0x8000;//link down
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3300) = 0x8000;//link down
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x8000;//link down
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x8000;//link down
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3600) = 0x5e33b;//CPU Port6 Force Link 1G, FC ON
|
|
+
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x0010) = 0xffffffe0;//Set Port6 CPU Port
|
|
+
|
|
+ /* In order to use 10M/Full on FPGA board. We configure phy capable to
|
|
+ * 10M Full/Half duplex, so we can use auto-negotiation on PC side */
|
|
+ for(i=6;i<8;i++){
|
|
+ mii_mgr_write(i, 4, 0x07e1); //Capable of 10M&100M Full/Half Duplex, flow control on/off
|
|
+ //mii_mgr_write(i, 4, 0x0461); //Capable of 10M Full/Half Duplex, flow control on/off
|
|
+ mii_mgr_write(i, 0, 0xB100); //reset all digital logic, except phy_reg
|
|
+ mii_mgr_read(i, 9, &phy_val);
|
|
+ phy_val &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
|
|
+ mii_mgr_write(i, 9, phy_val);
|
|
+ }
|
|
+#elif defined (CONFIG_RT6855A_ASIC)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3600) = 0x5e33b;//CPU Port6 Force Link 1G, FC ON
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x0010) = 0xffffffe0;//Set Port6 CPU Port
|
|
+
|
|
+ *(unsigned long *)(RALINK_FRAME_ENGINE_BASE+0x1ec) = 0x0fffffff;//Set PSE should pause 4 tx ring as default
|
|
+ *(unsigned long *)(RALINK_FRAME_ENGINE_BASE+0x1f0) = 0x0fffffff;//switch IOT more stable
|
|
+
|
|
+ *(unsigned long *)(CKGCR) &= ~(0x3 << 4); //keep rx/tx port clock ticking, disable internal clock-gating to avoid switch stuck
|
|
+
|
|
+ /*
|
|
+ *Reg 31: Page Control
|
|
+ * Bit 15 => PortPageSel, 1=local, 0=global
|
|
+ * Bit 14:12 => PageSel, local:0~3, global:0~4
|
|
+ *
|
|
+ *Reg16~30:Local/Global registers
|
|
+ *
|
|
+ */
|
|
+ /*correct PHY setting J8.0*/
|
|
+ mii_mgr_read(0, 31, &rev);
|
|
+ rev &= (0x0f);
|
|
+
|
|
+ mii_mgr_write(1, 31, 0x4000); //global, page 4
|
|
+
|
|
+ mii_mgr_write(1, 16, 0xd4cc);
|
|
+ mii_mgr_write(1, 17, 0x7444);
|
|
+ mii_mgr_write(1, 19, 0x0112);
|
|
+ mii_mgr_write(1, 21, 0x7160);
|
|
+ mii_mgr_write(1, 22, 0x10cf);
|
|
+ mii_mgr_write(1, 26, 0x0777);
|
|
+
|
|
+ if(rev == 0){
|
|
+ mii_mgr_write(1, 25, 0x0102);
|
|
+ mii_mgr_write(1, 29, 0x8641);
|
|
+ }
|
|
+ else{
|
|
+ mii_mgr_write(1, 25, 0x0212);
|
|
+ mii_mgr_write(1, 29, 0x4640);
|
|
+ }
|
|
+
|
|
+ mii_mgr_write(1, 31, 0x2000); //global, page 2
|
|
+ mii_mgr_write(1, 21, 0x0655);
|
|
+ mii_mgr_write(1, 22, 0x0fd3);
|
|
+ mii_mgr_write(1, 23, 0x003d);
|
|
+ mii_mgr_write(1, 24, 0x096e);
|
|
+ mii_mgr_write(1, 25, 0x0fed);
|
|
+ mii_mgr_write(1, 26, 0x0fc4);
|
|
+
|
|
+ mii_mgr_write(1, 31, 0x1000); //global, page 1
|
|
+ mii_mgr_write(1, 17, 0xe7f8);
|
|
+
|
|
+
|
|
+ mii_mgr_write(1, 31, 0xa000); //local, page 2
|
|
+
|
|
+ mii_mgr_write(0, 16, 0x0e0e);
|
|
+ mii_mgr_write(1, 16, 0x0c0c);
|
|
+ mii_mgr_write(2, 16, 0x0f0f);
|
|
+ mii_mgr_write(3, 16, 0x1010);
|
|
+ mii_mgr_write(4, 16, 0x0909);
|
|
+
|
|
+ mii_mgr_write(0, 17, 0x0000);
|
|
+ mii_mgr_write(1, 17, 0x0000);
|
|
+ mii_mgr_write(2, 17, 0x0000);
|
|
+ mii_mgr_write(3, 17, 0x0000);
|
|
+ mii_mgr_write(4, 17, 0x0000);
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RT6855A_ASIC)
|
|
+
|
|
+#if defined (CONFIG_P5_RGMII_TO_MAC_MODE)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e33b;//(P5, Force mode, Link Up, 1000Mbps, Full-Duplex, FC ON)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x7014) = 0x1f0c000c;//disable port0-port4 internal phy, set phy base address to 12
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x250c) = 0x000fff10;//disable port5 mac learning
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x260c) = 0x000fff10;//disable port6 mac learning
|
|
+
|
|
+#elif defined (CONFIG_P5_MII_TO_MAC_MODE)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
|
|
+#elif defined (CONFIG_P5_MAC_TO_PHY_MODE)
|
|
+ //rt6855/6 need to modify TX/RX phase
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x7014) = 0xc;//TX/RX CLOCK Phase select
|
|
+
|
|
+ enable_auto_negotiate(1);
|
|
+
|
|
+ if (isICPlusGigaPHY(1)) {
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, &phy_val);
|
|
+ phy_val |= 1<<10; //enable pause ability
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, phy_val);
|
|
+
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
|
|
+ phy_val |= 1<<9; //restart AN
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
|
|
+ }
|
|
+
|
|
+ if (isMarvellGigaPHY(1)) {
|
|
+ printk("Reset MARVELL phy1\n");
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, &phy_val);
|
|
+ phy_val |= 1<<7; //Add delay to RX_CLK for RXD Outputs
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, phy_val);
|
|
+
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
|
|
+ phy_val |= 1<<15; //PHY Software Reset
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
|
|
+ }
|
|
+ if (isVtssGigaPHY(1)) {
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0001); //extended page
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, &phy_val);
|
|
+ printk("Vitesse phy skew: %x --> ", phy_val);
|
|
+ phy_val |= (0x3<<12); // RGMII RX skew compensation= 2.0 ns
|
|
+ phy_val &= ~(0x3<<14);// RGMII TX skew compensation= 0 ns
|
|
+ printk("%x\n", phy_val);
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, phy_val);
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0000); //main registers
|
|
+ }
|
|
+#elif defined (CONFIG_P5_RMII_TO_MAC_MODE)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
|
|
+#else // Port 5 Disabled //
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x8000;//link down
|
|
+#endif
|
|
+#endif
|
|
+}
|
|
+#endif
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+#if defined (CONFIG_MT7623_FPGA)
|
|
+void setup_fpga_gsw(void)
|
|
+{
|
|
+ u32 i;
|
|
+ u32 regValue;
|
|
+
|
|
+ /* reduce RGMII2 PAD driving strength */
|
|
+ *(volatile u_long *)(PAD_RGMII2_MDIO_CFG) &= ~(0x3 << 4);
|
|
+
|
|
+ //RGMII1=Normal mode
|
|
+ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 14);
|
|
+
|
|
+ //GMAC1= RGMII mode
|
|
+ *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 12);
|
|
+
|
|
+ //enable MDIO to control MT7530
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
|
|
+ regValue &= ~(0x3 << 12);
|
|
+ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
|
|
+
|
|
+ for(i=0;i<=4;i++)
|
|
+ {
|
|
+ //turn off PHY
|
|
+ mii_mgr_read(i, 0x0 ,®Value);
|
|
+ regValue |= (0x1<<11);
|
|
+ mii_mgr_write(i, 0x0, regValue);
|
|
+ }
|
|
+ mii_mgr_write(31, 0x7000, 0x3); //reset switch
|
|
+ udelay(10);
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e337);//(GE1, Force 100M/FD, FC ON)
|
|
+ mii_mgr_write(31, 0x3600, 0x5e337);
|
|
+
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x00008000);//(GE2, Link down)
|
|
+ mii_mgr_write(31, 0x3500, 0x8000);
|
|
+
|
|
+#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
|
|
+ //regValue = 0x117ccf; //Enable Port 6, P5 as GMAC5, P5 disable*/
|
|
+ mii_mgr_read(31, 0x7804 ,®Value);
|
|
+ regValue &= ~(1<<8); //Enable Port 6
|
|
+ regValue |= (1<<6); //Disable Port 5
|
|
+ regValue |= (1<<13); //Port 5 as GMAC, no Internal PHY
|
|
+
|
|
+#if defined (CONFIG_RAETH_GMAC2)
|
|
+ //RGMII2=Normal mode
|
|
+ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
|
|
+
|
|
+ //GMAC2= RGMII mode
|
|
+ *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
|
|
+
|
|
+ mii_mgr_write(31, 0x3500, 0x56300); //MT7530 P5 AN, we can ignore this setting??????
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(GE2, auto-polling)
|
|
+
|
|
+ enable_auto_negotiate(0);//set polling address
|
|
+ /* set MT7530 Port 5 to PHY 0/4 mode */
|
|
+#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN)
|
|
+ regValue &= ~((1<<13)|(1<<6));
|
|
+ regValue |= ((1<<7)|(1<<16)|(1<<20));
|
|
+#elif defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
|
|
+ regValue &= ~((1<<13)|(1<<6)|((1<<20)));
|
|
+ regValue |= ((1<<7)|(1<<16));
|
|
+#endif
|
|
+
|
|
+ //sysRegWrite(GDMA2_FWD_CFG, 0x20710000);
|
|
+#endif
|
|
+ regValue |= (1<<16);//change HW-TRAP
|
|
+ printk("change HW-TRAP to 0x%x\n",regValue);
|
|
+ mii_mgr_write(31, 0x7804 ,regValue);
|
|
+#endif
|
|
+ mii_mgr_write(0, 14, 0x1); /*RGMII*/
|
|
+/* set MT7530 central align */
|
|
+ mii_mgr_read(31, 0x7830, ®Value);
|
|
+ regValue &= ~1;
|
|
+ regValue |= 1<<1;
|
|
+ mii_mgr_write(31, 0x7830, regValue);
|
|
+
|
|
+ mii_mgr_read(31, 0x7a40, ®Value);
|
|
+ regValue &= ~(1<<30);
|
|
+ mii_mgr_write(31, 0x7a40, regValue);
|
|
+
|
|
+ regValue = 0x855;
|
|
+ mii_mgr_write(31, 0x7a78, regValue);
|
|
+
|
|
+/*to check!!*/
|
|
+ mii_mgr_write(31, 0x7b00, 0x102); //delay setting for 10/1000M
|
|
+ mii_mgr_write(31, 0x7b04, 0x14); //delay setting for 10/1000M
|
|
+
|
|
+ for(i=0;i<=4;i++) {
|
|
+ mii_mgr_read(i, 4, ®Value);
|
|
+ regValue |= (3<<7); //turn on 100Base-T Advertisement
|
|
+ //regValue &= ~(3<<7); //turn off 100Base-T Advertisement
|
|
+ mii_mgr_write(i, 4, regValue);
|
|
+
|
|
+ mii_mgr_read(i, 9, ®Value);
|
|
+ //regValue |= (3<<8); //turn on 1000Base-T Advertisement
|
|
+ regValue &= ~(3<<8); //turn off 1000Base-T Advertisement
|
|
+ mii_mgr_write(i, 9, regValue);
|
|
+
|
|
+ //restart AN
|
|
+ mii_mgr_read(i, 0, ®Value);
|
|
+ regValue |= (1 << 9);
|
|
+ mii_mgr_write(i, 0, regValue);
|
|
+ }
|
|
+
|
|
+ /*Tx Driving*/
|
|
+ mii_mgr_write(31, 0x7a54, 0x44); //lower driving
|
|
+ mii_mgr_write(31, 0x7a5c, 0x44); //lower driving
|
|
+ mii_mgr_write(31, 0x7a64, 0x44); //lower driving
|
|
+ mii_mgr_write(31, 0x7a6c, 0x44); //lower driving
|
|
+ mii_mgr_write(31, 0x7a74, 0x44); //lower driving
|
|
+ mii_mgr_write(31, 0x7a7c, 0x44); //lower driving
|
|
+
|
|
+ for(i=0;i<=4;i++)
|
|
+ {
|
|
+ //turn on PHY
|
|
+ mii_mgr_read(i, 0x0 ,®Value);
|
|
+ regValue &= ~(0x1<<11);
|
|
+ mii_mgr_write(i, 0x0, regValue);
|
|
+ }
|
|
+}
|
|
+#endif
|
|
+
|
|
+
|
|
+#if defined (CONFIG_RALINK_MT7621)
|
|
+
|
|
+
|
|
+void setup_external_gsw(void)
|
|
+{
|
|
+ u32 regValue;
|
|
+
|
|
+ /* reduce RGMII2 PAD driving strength */
|
|
+ *(volatile u_long *)(PAD_RGMII2_MDIO_CFG) &= ~(0x3 << 4);
|
|
+ //enable MDIO
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
|
|
+ regValue &= ~(0x3 << 12);
|
|
+ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
|
|
+
|
|
+ //RGMII1=Normal mode
|
|
+ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 14);
|
|
+ //GMAC1= RGMII mode
|
|
+ *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 12);
|
|
+
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x00008000);//(GE1, Link down)
|
|
+
|
|
+ //RGMII2=Normal mode
|
|
+ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
|
|
+ //GMAC2= RGMII mode
|
|
+ *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
|
|
+
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x2105e33b);//(GE2, Force 1000M/FD, FC ON)
|
|
+
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+void IsSwitchVlanTableBusy(void)
|
|
+{
|
|
+ int j = 0;
|
|
+ unsigned int value = 0;
|
|
+
|
|
+ for (j = 0; j < 20; j++) {
|
|
+ mii_mgr_read(31, 0x90, &value);
|
|
+ if ((value & 0x80000000) == 0 ){ //table busy
|
|
+ break;
|
|
+ }
|
|
+ udelay(70000);
|
|
+ }
|
|
+ if (j == 20)
|
|
+ printk("set vlan timeout value=0x%x.\n", value);
|
|
+}
|
|
+
|
|
+void LANWANPartition(void)
|
|
+{
|
|
+/*Set MT7530 */
|
|
+#ifdef CONFIG_WAN_AT_P0
|
|
+ printk("set LAN/WAN WLLLL\n");
|
|
+ //WLLLL, wan at P0
|
|
+ //LAN/WAN ports as security mode
|
|
+ mii_mgr_write(31, 0x2004, 0xff0003);//port0
|
|
+ mii_mgr_write(31, 0x2104, 0xff0003);//port1
|
|
+ mii_mgr_write(31, 0x2204, 0xff0003);//port2
|
|
+ mii_mgr_write(31, 0x2304, 0xff0003);//port3
|
|
+ mii_mgr_write(31, 0x2404, 0xff0003);//port4
|
|
+
|
|
+ //set PVID
|
|
+ mii_mgr_write(31, 0x2014, 0x10002);//port0
|
|
+ mii_mgr_write(31, 0x2114, 0x10001);//port1
|
|
+ mii_mgr_write(31, 0x2214, 0x10001);//port2
|
|
+ mii_mgr_write(31, 0x2314, 0x10001);//port3
|
|
+ mii_mgr_write(31, 0x2414, 0x10001);//port4
|
|
+ /*port6 */
|
|
+ //VLAN member
|
|
+ IsSwitchVlanTableBusy();
|
|
+ mii_mgr_write(31, 0x94, 0x407e0001);//VAWD1
|
|
+ mii_mgr_write(31, 0x90, 0x80001001);//VTCR, VID=1
|
|
+ IsSwitchVlanTableBusy();
|
|
+
|
|
+ mii_mgr_write(31, 0x94, 0x40610001);//VAWD1
|
|
+ mii_mgr_write(31, 0x90, 0x80001002);//VTCR, VID=2
|
|
+ IsSwitchVlanTableBusy();
|
|
+#endif
|
|
+#ifdef CONFIG_WAN_AT_P4
|
|
+ printk("set LAN/WAN LLLLW\n");
|
|
+ //LLLLW, wan at P4
|
|
+ //LAN/WAN ports as security mode
|
|
+ mii_mgr_write(31, 0x2004, 0xff0003);//port0
|
|
+ mii_mgr_write(31, 0x2104, 0xff0003);//port1
|
|
+ mii_mgr_write(31, 0x2204, 0xff0003);//port2
|
|
+ mii_mgr_write(31, 0x2304, 0xff0003);//port3
|
|
+ mii_mgr_write(31, 0x2404, 0xff0003);//port4
|
|
+
|
|
+ //set PVID
|
|
+ mii_mgr_write(31, 0x2014, 0x10001);//port0
|
|
+ mii_mgr_write(31, 0x2114, 0x10001);//port1
|
|
+ mii_mgr_write(31, 0x2214, 0x10001);//port2
|
|
+ mii_mgr_write(31, 0x2314, 0x10001);//port3
|
|
+ mii_mgr_write(31, 0x2414, 0x10002);//port4
|
|
+
|
|
+ //VLAN member
|
|
+ IsSwitchVlanTableBusy();
|
|
+ mii_mgr_write(31, 0x94, 0x404f0001);//VAWD1
|
|
+ mii_mgr_write(31, 0x90, 0x80001001);//VTCR, VID=1
|
|
+ IsSwitchVlanTableBusy();
|
|
+ mii_mgr_write(31, 0x94, 0x40500001);//VAWD1
|
|
+ mii_mgr_write(31, 0x90, 0x80001002);//VTCR, VID=2
|
|
+ IsSwitchVlanTableBusy();
|
|
+#endif
|
|
+}
|
|
+
|
|
+#if defined (CONFIG_RAETH_8023AZ_EEE) && defined (CONFIG_RALINK_MT7621)
|
|
+void mt7621_eee_patch(void)
|
|
+{
|
|
+ u32 i;
|
|
+
|
|
+ for(i=0;i<5;i++)
|
|
+ {
|
|
+ /* Enable EEE */
|
|
+ mii_mgr_write(i, 13, 0x07);
|
|
+ mii_mgr_write(i, 14, 0x3c);
|
|
+ mii_mgr_write(i, 13, 0x4007);
|
|
+ mii_mgr_write(i, 14, 0x6);
|
|
+
|
|
+ /* Forced Slave mode */
|
|
+ mii_mgr_write(i, 31, 0x0);
|
|
+ mii_mgr_write(i, 9, 0x1600);
|
|
+ /* Increase SlvDPSready time */
|
|
+ mii_mgr_write(i, 31, 0x52b5);
|
|
+ mii_mgr_write(i, 16, 0xafae);
|
|
+ mii_mgr_write(i, 18, 0x2f);
|
|
+ mii_mgr_write(i, 16, 0x8fae);
|
|
+ /* Incease post_update_timer */
|
|
+ mii_mgr_write(i, 31, 0x3);
|
|
+ mii_mgr_write(i, 17, 0x4b);
|
|
+ /* Adjust 100_mse_threshold */
|
|
+ mii_mgr_write(i, 13, 0x1e);
|
|
+ mii_mgr_write(i, 14, 0x123);
|
|
+ mii_mgr_write(i, 13, 0x401e);
|
|
+ mii_mgr_write(i, 14, 0xffff);
|
|
+ /* Disable mcc
|
|
+ mii_mgr_write(i, 13, 0x1e);
|
|
+ mii_mgr_write(i, 14, 0xa6);
|
|
+ mii_mgr_write(i, 13, 0x401e);
|
|
+ mii_mgr_write(i, 14, 0x300);
|
|
+ */
|
|
+ }
|
|
+
|
|
+}
|
|
+#endif
|
|
+
|
|
+
|
|
+#if defined (CONFIG_RALINK_MT7621)
|
|
+void setup_internal_gsw(void)
|
|
+{
|
|
+ u32 i;
|
|
+ u32 regValue;
|
|
+
|
|
+#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
|
|
+ /*Hardware reset Switch*/
|
|
+ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x34) |= (0x1 << 2);
|
|
+ udelay(1000);
|
|
+ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x34) &= ~(0x1 << 2);
|
|
+ udelay(10000);
|
|
+
|
|
+ /* reduce RGMII2 PAD driving strength */
|
|
+ *(volatile u_long *)(PAD_RGMII2_MDIO_CFG) &= ~(0x3 << 4);
|
|
+
|
|
+ //RGMII1=Normal mode
|
|
+ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 14);
|
|
+
|
|
+ //GMAC1= RGMII mode
|
|
+ *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 12);
|
|
+
|
|
+ //enable MDIO to control MT7530
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
|
|
+ regValue &= ~(0x3 << 12);
|
|
+ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
|
|
+
|
|
+ for(i=0;i<=4;i++)
|
|
+ {
|
|
+ //turn off PHY
|
|
+ mii_mgr_read(i, 0x0 ,®Value);
|
|
+ regValue |= (0x1<<11);
|
|
+ mii_mgr_write(i, 0x0, regValue);
|
|
+ }
|
|
+ mii_mgr_write(31, 0x7000, 0x3); //reset switch
|
|
+ udelay(100);
|
|
+
|
|
+
|
|
+#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
|
|
+ trgmii_set_7530(); //reset FE, config MDIO again
|
|
+
|
|
+ //enable MDIO to control MT7530
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60));
|
|
+ regValue &= ~(0x3 << 12);
|
|
+ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue;
|
|
+
|
|
+ // switch to APLL if TRGMII and DDR2
|
|
+ if ((sysRegRead(0xBE000010)>>4)&0x1)
|
|
+ {
|
|
+ apll_xtal_enable();
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_MT7621_ASIC)
|
|
+ if((sysRegRead(0xbe00000c)&0xFFFF)==0x0101) {
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e30b);//(GE1, Force 1000M/FD, FC ON)
|
|
+ mii_mgr_write(31, 0x3600, 0x5e30b);
|
|
+ } else {
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e33b);//(GE1, Force 1000M/FD, FC ON)
|
|
+ mii_mgr_write(31, 0x3600, 0x5e33b);
|
|
+ }
|
|
+#elif defined (CONFIG_MT7621_FPGA)
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e337);//(GE1, Force 100M/FD, FC ON)
|
|
+ mii_mgr_write(31, 0x3600, 0x5e337);
|
|
+#endif
|
|
+
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x00008000);//(GE2, Link down)
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
|
|
+ //regValue = 0x117ccf; //Enable Port 6, P5 as GMAC5, P5 disable*/
|
|
+ mii_mgr_read(31, 0x7804 ,®Value);
|
|
+ regValue &= ~(1<<8); //Enable Port 6
|
|
+ regValue |= (1<<6); //Disable Port 5
|
|
+ regValue |= (1<<13); //Port 5 as GMAC, no Internal PHY
|
|
+
|
|
+#if defined (CONFIG_RAETH_GMAC2)
|
|
+ //RGMII2=Normal mode
|
|
+ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
|
|
+
|
|
+ //GMAC2= RGMII mode
|
|
+ *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
|
|
+#if !defined (CONFIG_RAETH_8023AZ_EEE)
|
|
+ mii_mgr_write(31, 0x3500, 0x56300); //MT7530 P5 AN, we can ignore this setting??????
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(GE2, auto-polling)
|
|
+
|
|
+ enable_auto_negotiate(0);//set polling address
|
|
+#endif
|
|
+#if defined (CONFIG_RAETH_8023AZ_EEE)
|
|
+ mii_mgr_write(31, 0x3500, 0x5e33b); //MT7530 P5 Force 1000, we can ignore this setting??????
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x2105e33b);//(GE2, Force 1000)
|
|
+#endif
|
|
+
|
|
+
|
|
+
|
|
+ /* set MT7530 Port 5 to PHY 0/4 mode */
|
|
+#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN)
|
|
+ regValue &= ~((1<<13)|(1<<6));
|
|
+ regValue |= ((1<<7)|(1<<16)|(1<<20));
|
|
+#elif defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
|
|
+ regValue &= ~((1<<13)|(1<<6)|(1<<20));
|
|
+ regValue |= ((1<<7)|(1<<16));
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RAETH_8023AZ_EEE)
|
|
+ regValue |= (1<<13); //Port 5 as GMAC, no Internal PHY
|
|
+#endif
|
|
+ //sysRegWrite(GDMA2_FWD_CFG, 0x20710000);
|
|
+#endif
|
|
+ regValue |= (1<<16);//change HW-TRAP
|
|
+ //printk("change HW-TRAP to 0x%x\n",regValue);
|
|
+ mii_mgr_write(31, 0x7804 ,regValue);
|
|
+#endif
|
|
+ mii_mgr_read(31, 0x7800, ®Value);
|
|
+ regValue = (regValue >> 9) & 0x3;
|
|
+ if(regValue == 0x3) { //25Mhz Xtal
|
|
+ /* do nothing */
|
|
+ } else if(regValue == 0x2) { //40Mhz
|
|
+
|
|
+ mii_mgr_write(0, 13, 0x1f); // disable MT7530 core clock
|
|
+ mii_mgr_write(0, 14, 0x410);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x0);
|
|
+
|
|
+ mii_mgr_write(0, 13, 0x1f); // disable MT7530 PLL
|
|
+ mii_mgr_write(0, 14, 0x40d);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x2020);
|
|
+
|
|
+ mii_mgr_write(0, 13, 0x1f); // for MT7530 core clock = 500Mhz
|
|
+ mii_mgr_write(0, 14, 0x40e);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x119);
|
|
+
|
|
+ mii_mgr_write(0, 13, 0x1f); // enable MT7530 PLL
|
|
+ mii_mgr_write(0, 14, 0x40d);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x2820);
|
|
+
|
|
+ udelay(20); //suggest by CD
|
|
+
|
|
+ mii_mgr_write(0, 13, 0x1f); // enable MT7530 core clock
|
|
+ mii_mgr_write(0, 14, 0x410);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ }else { //20Mhz Xtal
|
|
+
|
|
+ /* TODO */
|
|
+
|
|
+ }
|
|
+#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
|
|
+ mii_mgr_write(0, 14, 0x3); /*TRGMII*/
|
|
+#else
|
|
+ mii_mgr_write(0, 14, 0x1); /*RGMII*/
|
|
+/* set MT7530 central align */
|
|
+ mii_mgr_read(31, 0x7830, ®Value);
|
|
+ regValue &= ~1;
|
|
+ regValue |= 1<<1;
|
|
+ mii_mgr_write(31, 0x7830, regValue);
|
|
+
|
|
+ mii_mgr_read(31, 0x7a40, ®Value);
|
|
+ regValue &= ~(1<<30);
|
|
+ mii_mgr_write(31, 0x7a40, regValue);
|
|
+
|
|
+ regValue = 0x855;
|
|
+ mii_mgr_write(31, 0x7a78, regValue);
|
|
+
|
|
+#endif
|
|
+#if !defined (CONFIG_RAETH_8023AZ_EEE)
|
|
+ mii_mgr_write(31, 0x7b00, 0x102); //delay setting for 10/1000M
|
|
+ mii_mgr_write(31, 0x7b04, 0x14); //delay setting for 10/1000M
|
|
+#endif
|
|
+#if 0
|
|
+ for(i=0;i<=4;i++) {
|
|
+ mii_mgr_read(i, 4, ®Value);
|
|
+ regValue |= (3<<7); //turn on 100Base-T Advertisement
|
|
+ //regValue &= ~(3<<7); //turn off 100Base-T Advertisement
|
|
+ mii_mgr_write(i, 4, regValue);
|
|
+
|
|
+ mii_mgr_read(i, 9, ®Value);
|
|
+ regValue |= (3<<8); //turn on 1000Base-T Advertisement
|
|
+ //regValue &= ~(3<<8); //turn off 1000Base-T Advertisement
|
|
+ mii_mgr_write(i, 9, regValue);
|
|
+
|
|
+ //restart AN
|
|
+ mii_mgr_read(i, 0, ®Value);
|
|
+ regValue |= (1 << 9);
|
|
+ mii_mgr_write(i, 0, regValue);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ /*Tx Driving*/
|
|
+ mii_mgr_write(31, 0x7a54, 0x44); //lower driving
|
|
+ mii_mgr_write(31, 0x7a5c, 0x44); //lower driving
|
|
+ mii_mgr_write(31, 0x7a64, 0x44); //lower driving
|
|
+ mii_mgr_write(31, 0x7a6c, 0x44); //lower driving
|
|
+ mii_mgr_write(31, 0x7a74, 0x44); //lower driving
|
|
+ mii_mgr_write(31, 0x7a7c, 0x44); //lower driving
|
|
+
|
|
+
|
|
+ LANWANPartition();
|
|
+
|
|
+#if !defined (CONFIG_RAETH_8023AZ_EEE)
|
|
+ //disable EEE
|
|
+ for(i=0;i<=4;i++)
|
|
+ {
|
|
+ mii_mgr_write(i, 13, 0x7);
|
|
+ mii_mgr_write(i, 14, 0x3C);
|
|
+ mii_mgr_write(i, 13, 0x4007);
|
|
+ mii_mgr_write(i, 14, 0x0);
|
|
+ }
|
|
+
|
|
+ //Disable EEE 10Base-Te:
|
|
+ for(i=0;i<=4;i++)
|
|
+ {
|
|
+ mii_mgr_write(i, 13, 0x1f);
|
|
+ mii_mgr_write(i, 14, 0x027b);
|
|
+ mii_mgr_write(i, 13, 0x401f);
|
|
+ mii_mgr_write(i, 14, 0x1177);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ for(i=0;i<=4;i++)
|
|
+ {
|
|
+ //turn on PHY
|
|
+ mii_mgr_read(i, 0x0 ,®Value);
|
|
+ regValue &= ~(0x1<<11);
|
|
+ mii_mgr_write(i, 0x0, regValue);
|
|
+ }
|
|
+
|
|
+ mii_mgr_read(31, 0x7808 ,®Value);
|
|
+ regValue |= (3<<16); //Enable INTR
|
|
+ mii_mgr_write(31, 0x7808 ,regValue);
|
|
+#if defined (CONFIG_RAETH_8023AZ_EEE) && defined (CONFIG_RALINK_MT7621)
|
|
+ mt7621_eee_patch();
|
|
+#endif
|
|
+}
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_GE1_TRGMII_FORCE_1200)
|
|
+void apll_xtal_enable(void)
|
|
+{
|
|
+ unsigned long data = 0;
|
|
+ unsigned long regValue = 0;
|
|
+
|
|
+ /* Firstly, reset all required register to default value */
|
|
+ sysRegWrite(RALINK_ANA_CTRL_BASE, 0x00008000);
|
|
+ sysRegWrite(RALINK_ANA_CTRL_BASE+0x0014, 0x01401d61);
|
|
+ sysRegWrite(RALINK_ANA_CTRL_BASE+0x0018, 0x38233d0e);
|
|
+ sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, 0x80120004);
|
|
+ sysRegWrite(RALINK_ANA_CTRL_BASE+0x0020, 0x1c7dbf48);
|
|
+
|
|
+ /* toggle RG_XPTL_CHG */
|
|
+ sysRegWrite(RALINK_ANA_CTRL_BASE, 0x00008800);
|
|
+ sysRegWrite(RALINK_ANA_CTRL_BASE, 0x00008c00);
|
|
+
|
|
+ data = sysRegRead(RALINK_ANA_CTRL_BASE+0x0014);
|
|
+ data &= ~(0x0000ffc0);
|
|
+
|
|
+ regValue = *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x10);
|
|
+ regValue = (regValue >> 6) & 0x7;
|
|
+ if(regValue < 6) { //20/40Mhz Xtal
|
|
+ data |= REGBIT(0x1d, 8);
|
|
+ }else {
|
|
+ data |= REGBIT(0x17, 8);
|
|
+ }
|
|
+
|
|
+ if(regValue < 6) { //20/40Mhz Xtal
|
|
+ data |= REGBIT(0x1, 6);
|
|
+ }
|
|
+
|
|
+ sysRegWrite(RALINK_ANA_CTRL_BASE+0x0014, data);
|
|
+
|
|
+ data = sysRegRead(RALINK_ANA_CTRL_BASE+0x0018);
|
|
+ data &= ~(0xf0773f00);
|
|
+ data |= REGBIT(0x3, 28);
|
|
+ data |= REGBIT(0x2, 20);
|
|
+ if(regValue < 6) { //20/40Mhz Xtal
|
|
+ data |= REGBIT(0x3, 16);
|
|
+ }else {
|
|
+ data |= REGBIT(0x2, 16);
|
|
+ }
|
|
+ data |= REGBIT(0x3, 12);
|
|
+
|
|
+ if(regValue < 6) { //20/40Mhz Xtal
|
|
+ data |= REGBIT(0xd, 8);
|
|
+ }else {
|
|
+ data |= REGBIT(0x7, 8);
|
|
+ }
|
|
+ sysRegWrite(RALINK_ANA_CTRL_BASE+0x0018, data);
|
|
+
|
|
+ if(regValue < 6) { //20/40Mhz Xtal
|
|
+ sysRegWrite(RALINK_ANA_CTRL_BASE+0x0020, 0x1c7dbf48);
|
|
+ }else {
|
|
+ sysRegWrite(RALINK_ANA_CTRL_BASE+0x0020, 0x1697cc39);
|
|
+ }
|
|
+ //*Common setting - Set PLLGP_CTRL_4 *//
|
|
+ ///* 1. Bit 31 */
|
|
+ data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
|
|
+ data &= ~(REGBIT(0x1, 31));
|
|
+ sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
|
|
+
|
|
+ /* 2. Bit 0 */
|
|
+ data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
|
|
+ data |= REGBIT(0x1, 0);
|
|
+ sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
|
|
+
|
|
+ /* 3. Bit 3 */
|
|
+ data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
|
|
+ data |= REGBIT(0x1, 3);
|
|
+ sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
|
|
+
|
|
+ /* 4. Bit 8 */
|
|
+ data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
|
|
+ data |= REGBIT(0x1, 8);
|
|
+ sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
|
|
+
|
|
+ /* 5. Bit 6 */
|
|
+ data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
|
|
+ data |= REGBIT(0x1, 6);
|
|
+ sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
|
|
+
|
|
+ /* 6. Bit 7 */
|
|
+ data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
|
|
+ data |= REGBIT(0x1, 5);
|
|
+ data |= REGBIT(0x1, 7);
|
|
+ sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
|
|
+
|
|
+ /* 7. Bit 17 */
|
|
+ data = sysRegRead(RALINK_ANA_CTRL_BASE+0x001c);
|
|
+ data &= ~REGBIT(0x1, 17);
|
|
+ sysRegWrite(RALINK_ANA_CTRL_BASE+0x001c, data);
|
|
+
|
|
+ /* 8. TRGMII TX CLK SEL APLL */
|
|
+ data = sysRegRead(0xbe00002c);
|
|
+ data &= 0xffffff9f;
|
|
+ data |= 0x40;
|
|
+ sysRegWrite(0xbe00002c, data);
|
|
+
|
|
+}
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+#if defined(CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_MT7620)
|
|
+void rt_gsw_init(void)
|
|
+{
|
|
+#if defined (CONFIG_P4_MAC_TO_PHY_MODE) || defined (CONFIG_P5_MAC_TO_PHY_MODE)
|
|
+ u32 phy_val=0;
|
|
+#endif
|
|
+#if defined (CONFIG_RT6855_FPGA) || defined (CONFIG_MT7620_FPGA)
|
|
+ u32 i=0;
|
|
+#elif defined (CONFIG_MT7620_ASIC)
|
|
+ u32 is_BGA=0;
|
|
+#endif
|
|
+#if defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
|
|
+ unsigned int regValue = 0;
|
|
+#endif
|
|
+#if defined (CONFIG_RT6855_FPGA) || defined (CONFIG_MT7620_FPGA)
|
|
+ /*keep dump switch mode */
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3000) = 0x5e333;//(P0, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3100) = 0x5e333;//(P1, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3200) = 0x5e333;//(P2, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3300) = 0x5e333;//(P3, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
|
|
+#if defined (CONFIG_RAETH_HAS_PORT4)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x5e337;//(P4, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
|
|
+#else
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x5e333;//(P4, Force mode, Link Up, 10Mbps, Full-Duplex, FC ON)
|
|
+#endif
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
|
|
+
|
|
+ /* In order to use 10M/Full on FPGA board. We configure phy capable to
|
|
+ * 10M Full/Half duplex, so we can use auto-negotiation on PC side */
|
|
+#if defined (CONFIG_RAETH_HAS_PORT4)
|
|
+ for(i=0;i<4;i++){
|
|
+#else
|
|
+ for(i=0;i<5;i++){
|
|
+#endif
|
|
+ mii_mgr_write(i, 4, 0x0461); //Capable of 10M Full/Half Duplex, flow control on/off
|
|
+ mii_mgr_write(i, 0, 0xB100); //reset all digital logic, except phy_reg
|
|
+ }
|
|
+
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_PDMA_NEW)
|
|
+ *(unsigned long *)(SYSCFG1) |= (0x1 << 8); //PCIE_RC_MODE=1
|
|
+#endif
|
|
+
|
|
+
|
|
+#if defined (CONFIG_MT7620_ASIC) && !defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
|
|
+ is_BGA = (sysRegRead(RALINK_SYSCTL_BASE + 0xc) >> 16) & 0x1;
|
|
+ /*
|
|
+ * Reg 31: Page Control
|
|
+ * Bit 15 => PortPageSel, 1=local, 0=global
|
|
+ * Bit 14:12 => PageSel, local:0~3, global:0~4
|
|
+ *
|
|
+ * Reg16~30:Local/Global registers
|
|
+ *
|
|
+ */
|
|
+ /*correct PHY setting L3.0 BGA*/
|
|
+ mii_mgr_write(1, 31, 0x4000); //global, page 4
|
|
+
|
|
+ mii_mgr_write(1, 17, 0x7444);
|
|
+ if(is_BGA){
|
|
+ mii_mgr_write(1, 19, 0x0114);
|
|
+ }else{
|
|
+ mii_mgr_write(1, 19, 0x0117);
|
|
+ }
|
|
+
|
|
+ mii_mgr_write(1, 22, 0x10cf);
|
|
+ mii_mgr_write(1, 25, 0x6212);
|
|
+ mii_mgr_write(1, 26, 0x0777);
|
|
+ mii_mgr_write(1, 29, 0x4000);
|
|
+ mii_mgr_write(1, 28, 0xc077);
|
|
+ mii_mgr_write(1, 24, 0x0000);
|
|
+
|
|
+ mii_mgr_write(1, 31, 0x3000); //global, page 3
|
|
+ mii_mgr_write(1, 17, 0x4838);
|
|
+
|
|
+ mii_mgr_write(1, 31, 0x2000); //global, page 2
|
|
+ if(is_BGA){
|
|
+ mii_mgr_write(1, 21, 0x0515);
|
|
+ mii_mgr_write(1, 22, 0x0053);
|
|
+ mii_mgr_write(1, 23, 0x00bf);
|
|
+ mii_mgr_write(1, 24, 0x0aaf);
|
|
+ mii_mgr_write(1, 25, 0x0fad);
|
|
+ mii_mgr_write(1, 26, 0x0fc1);
|
|
+ }else{
|
|
+ mii_mgr_write(1, 21, 0x0517);
|
|
+ mii_mgr_write(1, 22, 0x0fd2);
|
|
+ mii_mgr_write(1, 23, 0x00bf);
|
|
+ mii_mgr_write(1, 24, 0x0aab);
|
|
+ mii_mgr_write(1, 25, 0x00ae);
|
|
+ mii_mgr_write(1, 26, 0x0fff);
|
|
+ }
|
|
+ mii_mgr_write(1, 31, 0x1000); //global, page 1
|
|
+ mii_mgr_write(1, 17, 0xe7f8);
|
|
+
|
|
+ mii_mgr_write(1, 31, 0x8000); //local, page 0
|
|
+ mii_mgr_write(0, 30, 0xa000);
|
|
+ mii_mgr_write(1, 30, 0xa000);
|
|
+ mii_mgr_write(2, 30, 0xa000);
|
|
+ mii_mgr_write(3, 30, 0xa000);
|
|
+#if !defined (CONFIG_RAETH_HAS_PORT4)
|
|
+ mii_mgr_write(4, 30, 0xa000);
|
|
+#endif
|
|
+
|
|
+ mii_mgr_write(0, 4, 0x05e1);
|
|
+ mii_mgr_write(1, 4, 0x05e1);
|
|
+ mii_mgr_write(2, 4, 0x05e1);
|
|
+ mii_mgr_write(3, 4, 0x05e1);
|
|
+#if !defined (CONFIG_RAETH_HAS_PORT4)
|
|
+ mii_mgr_write(4, 4, 0x05e1);
|
|
+#endif
|
|
+
|
|
+ mii_mgr_write(1, 31, 0xa000); //local, page 2
|
|
+ mii_mgr_write(0, 16, 0x1111);
|
|
+ mii_mgr_write(1, 16, 0x1010);
|
|
+ mii_mgr_write(2, 16, 0x1515);
|
|
+ mii_mgr_write(3, 16, 0x0f0f);
|
|
+#if !defined (CONFIG_RAETH_HAS_PORT4)
|
|
+ mii_mgr_write(4, 16, 0x1313);
|
|
+#endif
|
|
+
|
|
+#if !defined (CONFIG_RAETH_8023AZ_EEE)
|
|
+ mii_mgr_write(1, 31, 0xb000); //local, page 3
|
|
+ mii_mgr_write(0, 17, 0x0);
|
|
+ mii_mgr_write(1, 17, 0x0);
|
|
+ mii_mgr_write(2, 17, 0x0);
|
|
+ mii_mgr_write(3, 17, 0x0);
|
|
+#if !defined (CONFIG_RAETH_HAS_PORT4)
|
|
+ mii_mgr_write(4, 17, 0x0);
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+
|
|
+
|
|
+#if 0
|
|
+ // for ethernet extended mode
|
|
+ mii_mgr_write(1, 31, 0x3000);
|
|
+ mii_mgr_write(1, 19, 0x122);
|
|
+ mii_mgr_write(1, 20, 0x0044);
|
|
+ mii_mgr_write(1, 23, 0xa80c);
|
|
+ mii_mgr_write(1, 24, 0x129d);
|
|
+ mii_mgr_write(1, 31, 9000);
|
|
+ mii_mgr_write(0, 18, 0x140c);
|
|
+ mii_mgr_write(1, 18, 0x140c);
|
|
+ mii_mgr_write(2, 18, 0x140c);
|
|
+ mii_mgr_write(3, 18, 0x140c);
|
|
+ mii_mgr_write(0, 0, 0x3300);
|
|
+ mii_mgr_write(1, 0, 0x3300);
|
|
+ mii_mgr_write(2, 0, 0x3300);
|
|
+ mii_mgr_write(3, 0, 0x3300);
|
|
+#if !defined (CONFIG_RAETH_HAS_PORT4)
|
|
+ mii_mgr_write(4, 18, 0x140c);
|
|
+ mii_mgr_write(4, 0, 0x3300);
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RALINK_MT7620)
|
|
+ if ((sysRegRead(0xB000000C) & 0xf) >= 0x5) {
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x701c) = 0x800000c; //enlarge FE2SW_IPG
|
|
+ }
|
|
+#endif // CONFIG_RAETH_7620 //
|
|
+
|
|
+
|
|
+
|
|
+#if defined (CONFIG_MT7620_FPGA)|| defined (CONFIG_MT7620_ASIC)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3600) = 0x5e33b;//CPU Port6 Force Link 1G, FC ON
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x0010) = 0x7f7f7fe0;//Set Port6 CPU Port
|
|
+
|
|
+#if defined (CONFIG_P5_RGMII_TO_MAC_MODE) || defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e33b;//(P5, Force mode, Link Up, 1000Mbps, Full-Duplex, FC ON)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x7014) = 0x1f0c000c; //disable port 0 ~ 4 internal phy, set phy base address to 12
|
|
+ /*MT7620 need mac learning for PPE*/
|
|
+ //*(unsigned long *)(RALINK_ETH_SW_BASE+0x250c) = 0x000fff10;//disable port5 mac learning
|
|
+ //*(unsigned long *)(RALINK_ETH_SW_BASE+0x260c) = 0x000fff10;//disable port6 mac learning
|
|
+ *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
|
|
+ //rxclk_skew, txclk_skew = 0
|
|
+ *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=RGMii Mode
|
|
+#if defined (CONFIG_P5_RGMII_TO_MT7530_MODE)
|
|
+
|
|
+ *(unsigned long *)(0xb0000060) &= ~(3 << 7); //set MDIO to Normal mode
|
|
+
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x56330;//(P4, AN)
|
|
+ *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
|
|
+ //rxclk_skew, txclk_skew = 0
|
|
+ *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE2_MODE=RGMii Mode
|
|
+
|
|
+
|
|
+ /* set MT7530 Port 0 to PHY mode */
|
|
+ mii_mgr_read(31, 0x7804 ,®Value);
|
|
+#if defined (CONFIG_GE_RGMII_MT7530_P0_AN)
|
|
+ regValue &= ~((1<<13)|(1<<6)|(1<<5)|(1<<15));
|
|
+ regValue |= ((1<<7)|(1<<16)|(1<<20)|(1<<24));
|
|
+ //mii_mgr_write(31, 0x7804 ,0x115c8f);
|
|
+#elif defined (CONFIG_GE_RGMII_MT7530_P4_AN)
|
|
+ regValue &= ~((1<<13)|(1<<6)|(1<<20)|(1<<5)|(1<<15));
|
|
+ regValue |= ((1<<7)|(1<<16)|(1<<24));
|
|
+#endif
|
|
+ regValue &= ~(1<<8); //Enable Port 6
|
|
+ mii_mgr_write(31, 0x7804 ,regValue); //bit 24 standalone switch
|
|
+
|
|
+/* set MT7530 central align */
|
|
+ mii_mgr_read(31, 0x7830, ®Value);
|
|
+ regValue &= ~1;
|
|
+ regValue |= 1<<1;
|
|
+ mii_mgr_write(31, 0x7830, regValue);
|
|
+
|
|
+ mii_mgr_read(31, 0x7a40, ®Value);
|
|
+ regValue &= ~(1<<30);
|
|
+ mii_mgr_write(31, 0x7a40, regValue);
|
|
+
|
|
+ regValue = 0x855;
|
|
+ mii_mgr_write(31, 0x7a78, regValue);
|
|
+
|
|
+ /*AN should be set after MT7530 HWSTRAP*/
|
|
+#if defined (CONFIG_GE_RGMII_MT7530_P0_AN)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x7000) = 0xc5000100;//(P0, AN polling)
|
|
+#elif defined (CONFIG_GE_RGMII_MT7530_P4_AN)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x7000) = 0xc5000504;//(P4, AN polling)
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+#elif defined (CONFIG_P5_MII_TO_MAC_MODE)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
|
|
+ *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
|
|
+ *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=Mii Mode
|
|
+ *(unsigned long *)(SYSCFG1) |= (0x1 << 12);
|
|
+
|
|
+#elif defined (CONFIG_P5_MAC_TO_PHY_MODE)
|
|
+ *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
|
|
+ *(unsigned long *)(0xb0000060) &= ~(3 << 7); //set MDIO to Normal mode
|
|
+ *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=RGMii Mode
|
|
+
|
|
+ enable_auto_negotiate(1);
|
|
+
|
|
+ if (isICPlusGigaPHY(1)) {
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, &phy_val);
|
|
+ phy_val |= 1<<10; //enable pause ability
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, phy_val);
|
|
+
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
|
|
+ phy_val |= 1<<9; //restart AN
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
|
|
+ }else if (isMarvellGigaPHY(1)) {
|
|
+#if defined (CONFIG_MT7620_FPGA)
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, &phy_val);
|
|
+ phy_val &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, phy_val);
|
|
+#endif
|
|
+ printk("Reset MARVELL phy1\n");
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, &phy_val);
|
|
+ phy_val |= 1<<7; //Add delay to RX_CLK for RXD Outputs
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, phy_val);
|
|
+
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
|
|
+ phy_val |= 1<<15; //PHY Software Reset
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
|
|
+ }else if (isVtssGigaPHY(1)) {
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0001); //extended page
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, &phy_val);
|
|
+ printk("Vitesse phy skew: %x --> ", phy_val);
|
|
+ phy_val |= (0x3<<12); // RGMII RX skew compensation= 2.0 ns
|
|
+ phy_val &= ~(0x3<<14);// RGMII TX skew compensation= 0 ns
|
|
+ printk("%x\n", phy_val);
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, phy_val);
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0000); //main registers
|
|
+ }
|
|
+
|
|
+
|
|
+#elif defined (CONFIG_P5_RMII_TO_MAC_MODE)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
|
|
+ *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
|
|
+ *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=RvMii Mode
|
|
+ *(unsigned long *)(SYSCFG1) |= (0x2 << 12);
|
|
+
|
|
+#else // Port 5 Disabled //
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3500) = 0x8000;//link down
|
|
+ *(unsigned long *)(0xb0000060) |= (1 << 9); //set RGMII to GPIO mode
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_P4_RGMII_TO_MAC_MODE)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x5e33b;//(P4, Force mode, Link Up, 1000Mbps, Full-Duplex, FC ON)
|
|
+ *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
|
|
+ //rxclk_skew, txclk_skew = 0
|
|
+ *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE2_MODE=RGMii Mode
|
|
+
|
|
+#elif defined (CONFIG_P4_MII_TO_MAC_MODE)
|
|
+ *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
|
|
+ *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE2_MODE=Mii Mode
|
|
+ *(unsigned long *)(SYSCFG1) |= (0x1 << 14);
|
|
+
|
|
+#elif defined (CONFIG_P4_MAC_TO_PHY_MODE)
|
|
+ *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
|
|
+ *(unsigned long *)(0xb0000060) &= ~(3 << 7); //set MDIO to Normal mode
|
|
+ *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE2_MODE=RGMii Mode
|
|
+
|
|
+ enable_auto_negotiate(1);
|
|
+
|
|
+ if (isICPlusGigaPHY(2)) {
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 4, &phy_val);
|
|
+ phy_val |= 1<<10; //enable pause ability
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 4, phy_val);
|
|
+
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, &phy_val);
|
|
+ phy_val |= 1<<9; //restart AN
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, phy_val);
|
|
+ }else if (isMarvellGigaPHY(2)) {
|
|
+#if defined (CONFIG_MT7620_FPGA)
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, &phy_val);
|
|
+ phy_val &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, phy_val);
|
|
+#endif
|
|
+ printk("Reset MARVELL phy2\n");
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 20, &phy_val);
|
|
+ phy_val |= 1<<7; //Add delay to RX_CLK for RXD Outputs
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 20, phy_val);
|
|
+
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, &phy_val);
|
|
+ phy_val |= 1<<15; //PHY Software Reset
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, phy_val);
|
|
+ }else if (isVtssGigaPHY(2)) {
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 31, 0x0001); //extended page
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 28, &phy_val);
|
|
+ printk("Vitesse phy skew: %x --> ", phy_val);
|
|
+ phy_val |= (0x3<<12); // RGMII RX skew compensation= 2.0 ns
|
|
+ phy_val &= ~(0x3<<14);// RGMII TX skew compensation= 0 ns
|
|
+ printk("%x\n", phy_val);
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 28, phy_val);
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 31, 0x0000); //main registers
|
|
+ }
|
|
+
|
|
+#elif defined (CONFIG_P4_RMII_TO_MAC_MODE)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x3400) = 0x5e337;//(P5, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
|
|
+ *(unsigned long *)(0xb0000060) &= ~(1 << 10); //set GE2 to Normal mode
|
|
+ *(unsigned long *)(SYSCFG1) &= ~(0x3 << 14); //GE1_MODE=RvMii Mode
|
|
+ *(unsigned long *)(SYSCFG1) |= (0x2 << 14);
|
|
+#elif defined (CONFIG_GE_RGMII_MT7530_P0_AN) || defined (CONFIG_GE_RGMII_MT7530_P4_AN)
|
|
+#else // Port 4 Disabled //
|
|
+ *(unsigned long *)(SYSCFG1) |= (0x3 << 14); //GE2_MODE=RJ45 Mode
|
|
+ *(unsigned long *)(0xb0000060) |= (1 << 10); //set RGMII2 to GPIO mode
|
|
+#endif
|
|
+
|
|
+}
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RALINK_MT7628)
|
|
+
|
|
+void mt7628_ephy_init(void)
|
|
+{
|
|
+ int i;
|
|
+ u32 phy_val;
|
|
+ mii_mgr_write(0, 31, 0x2000); //change G2 page
|
|
+ mii_mgr_write(0, 26, 0x0000);
|
|
+
|
|
+ for(i=0; i<5; i++){
|
|
+ mii_mgr_write(i, 31, 0x8000); //change L0 page
|
|
+ mii_mgr_write(i, 0, 0x3100);
|
|
+
|
|
+#if defined (CONFIG_RAETH_8023AZ_EEE)
|
|
+ mii_mgr_read(i, 26, &phy_val);// EEE setting
|
|
+ phy_val |= (1 << 5);
|
|
+ mii_mgr_write(i, 26, phy_val);
|
|
+#else
|
|
+ //disable EEE
|
|
+ mii_mgr_write(i, 13, 0x7);
|
|
+ mii_mgr_write(i, 14, 0x3C);
|
|
+ mii_mgr_write(i, 13, 0x4007);
|
|
+ mii_mgr_write(i, 14, 0x0);
|
|
+#endif
|
|
+ mii_mgr_write(i, 30, 0xa000);
|
|
+ mii_mgr_write(i, 31, 0xa000); // change L2 page
|
|
+ mii_mgr_write(i, 16, 0x0606);
|
|
+ mii_mgr_write(i, 23, 0x0f0e);
|
|
+ mii_mgr_write(i, 24, 0x1610);
|
|
+ mii_mgr_write(i, 30, 0x1f15);
|
|
+ mii_mgr_write(i, 28, 0x6111);
|
|
+
|
|
+ mii_mgr_read(i, 4, &phy_val);
|
|
+ phy_val |= (1 << 10);
|
|
+ mii_mgr_write(i, 4, phy_val);
|
|
+ }
|
|
+
|
|
+ //100Base AOI setting
|
|
+ mii_mgr_write(0, 31, 0x5000); //change G5 page
|
|
+ mii_mgr_write(0, 19, 0x004a);
|
|
+ mii_mgr_write(0, 20, 0x015a);
|
|
+ mii_mgr_write(0, 21, 0x00ee);
|
|
+ mii_mgr_write(0, 22, 0x0033);
|
|
+ mii_mgr_write(0, 23, 0x020a);
|
|
+ mii_mgr_write(0, 24, 0x0000);
|
|
+ mii_mgr_write(0, 25, 0x024a);
|
|
+ mii_mgr_write(0, 26, 0x035a);
|
|
+ mii_mgr_write(0, 27, 0x02ee);
|
|
+ mii_mgr_write(0, 28, 0x0233);
|
|
+ mii_mgr_write(0, 29, 0x000a);
|
|
+ mii_mgr_write(0, 30, 0x0000);
|
|
+ /* Fix EPHY idle state abnormal behavior */
|
|
+ mii_mgr_write(0, 31, 0x4000); //change G4 page
|
|
+ mii_mgr_write(0, 29, 0x000d);
|
|
+ mii_mgr_write(0, 30, 0x0500);
|
|
+
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+void rt305x_esw_init(void)
|
|
+{
|
|
+ int i=0;
|
|
+ u32 phy_val=0, val=0;
|
|
+#if defined (CONFIG_RT3052_ASIC)
|
|
+ u32 phy_val2;
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RT5350_ASIC)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x0168) = 0x17;
|
|
+#endif
|
|
+
|
|
+ /*
|
|
+ * FC_RLS_TH=200, FC_SET_TH=160
|
|
+ * DROP_RLS=120, DROP_SET_TH=80
|
|
+ */
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x0008) = 0xC8A07850;
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x00E4) = 0x00000000;
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x0014) = 0x00405555;
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x0050) = 0x00002001;
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x0090) = 0x00007f7f;
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x0098) = 0x00007f3f; //disable VLAN
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x00CC) = 0x0002500c;
|
|
+#ifndef CONFIG_UNH_TEST
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x009C) = 0x0008a301; //hashing algorithm=XOR48, aging interval=300sec
|
|
+#else
|
|
+ /*
|
|
+ * bit[30]:1 Backoff Algorithm Option: The latest one to pass UNH test
|
|
+ * bit[29]:1 Length of Received Frame Check Enable
|
|
+ * bit[8]:0 Enable collision 16 packet abort and late collision abort
|
|
+ * bit[7:6]:01 Maximum Packet Length: 1518
|
|
+ */
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x009C) = 0x6008a241;
|
|
+#endif
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x008C) = 0x02404040;
|
|
+#if defined (CONFIG_RT3052_ASIC) || defined (CONFIG_RT3352_ASIC) || defined (CONFIG_RT5350_ASIC) || defined (CONFIG_MT7628_ASIC)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) = 0x3f502b28; //Change polling Ext PHY Addr=0x1F
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x0084) = 0x00000000;
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x0110) = 0x7d000000; //1us cycle number=125 (FE's clock=125Mhz)
|
|
+#elif defined (CONFIG_RT3052_FPGA) || defined (CONFIG_RT3352_FPGA) || defined (CONFIG_RT5350_FPGA) || defined (CONFIG_MT7628_FPGA)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) = 0x00f03ff9; //polling Ext PHY Addr=0x0, force port5 as 100F/D (disable auto-polling)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x0084) = 0xffdf1f00;
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x0110) = 0x0d000000; //1us cycle number=13 (FE's clock=12.5Mhz)
|
|
+
|
|
+ /* In order to use 10M/Full on FPGA board. We configure phy capable to
|
|
+ * 10M Full/Half duplex, so we can use auto-negotiation on PC side */
|
|
+ for(i=0;i<5;i++){
|
|
+ mii_mgr_write(i, 4, 0x0461); //Capable of 10M Full/Half Duplex, flow control on/off
|
|
+ mii_mgr_write(i, 0, 0xB100); //reset all digital logic, except phy_reg
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ /*
|
|
+ * set port 5 force to 1000M/Full when connecting to switch or iNIC
|
|
+ */
|
|
+#if defined (CONFIG_P5_RGMII_TO_MAC_MODE)
|
|
+ *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1<<29); //disable port 5 auto-polling
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) |= 0x3fff; //force 1000M full duplex
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(0xf<<20); //rxclk_skew, txclk_skew = 0
|
|
+#elif defined (CONFIG_P5_MII_TO_MAC_MODE)
|
|
+ *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1<<29); //disable port 5 auto-polling
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(0x3fff);
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) |= 0x3ffd; //force 100M full duplex
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT3352)
|
|
+ *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=Mii Mode
|
|
+ *(unsigned long *)(SYSCFG1) |= (0x1 << 12);
|
|
+#endif
|
|
+
|
|
+#elif defined (CONFIG_P5_MAC_TO_PHY_MODE)
|
|
+ *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
|
|
+ *(unsigned long *)(0xb0000060) &= ~(1 << 7); //set MDIO to Normal mode
|
|
+#if defined (CONFIG_RT3052_ASIC) || defined (CONFIG_RT3352_ASIC)
|
|
+ enable_auto_negotiate(1);
|
|
+#endif
|
|
+ if (isMarvellGigaPHY(1)) {
|
|
+#if defined (CONFIG_RT3052_FPGA) || defined (CONFIG_RT3352_FPGA)
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, &phy_val);
|
|
+ phy_val &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, phy_val);
|
|
+#endif
|
|
+ printk("\n Reset MARVELL phy\n");
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, &phy_val);
|
|
+ phy_val |= 1<<7; //Add delay to RX_CLK for RXD Outputs
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, phy_val);
|
|
+
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, &phy_val);
|
|
+ phy_val |= 1<<15; //PHY Software Reset
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, phy_val);
|
|
+ }
|
|
+ if (isVtssGigaPHY(1)) {
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0001); //extended page
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, &phy_val);
|
|
+ printk("Vitesse phy skew: %x --> ", phy_val);
|
|
+ phy_val |= (0x3<<12); // RGMII RX skew compensation= 2.0 ns
|
|
+ phy_val &= ~(0x3<<14);// RGMII TX skew compensation= 0 ns
|
|
+ printk("%x\n", phy_val);
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, phy_val);
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0x0000); //main registers
|
|
+ }
|
|
+
|
|
+#elif defined (CONFIG_P5_RMII_TO_MAC_MODE)
|
|
+ *(unsigned long *)(0xb0000060) &= ~(1 << 9); //set RGMII to Normal mode
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1<<29); //disable port 5 auto-polling
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(0x3fff);
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) |= 0x3ffd; //force 100M full duplex
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT3352)
|
|
+ *(unsigned long *)(SYSCFG1) &= ~(0x3 << 12); //GE1_MODE=RvMii Mode
|
|
+ *(unsigned long *)(SYSCFG1) |= (0x2 << 12);
|
|
+#endif
|
|
+#else // Port 5 Disabled //
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT3052)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1 << 29); //port5 auto polling disable
|
|
+ *(unsigned long *)(0xb0000060) |= (1 << 7); //set MDIO to GPIO mode (GPIO22-GPIO23)
|
|
+ *(unsigned long *)(0xb0000060) |= (1 << 9); //set RGMII to GPIO mode (GPIO41-GPIO50)
|
|
+ *(unsigned long *)(0xb0000674) = 0xFFF; //GPIO41-GPIO50 output mode
|
|
+ *(unsigned long *)(0xb000067C) = 0x0; //GPIO41-GPIO50 output low
|
|
+#elif defined (CONFIG_RALINK_RT3352)
|
|
+ *(unsigned long *)(RALINK_ETH_SW_BASE+0x00C8) &= ~(1 << 29); //port5 auto polling disable
|
|
+ *(unsigned long *)(0xb0000060) |= (1 << 7); //set MDIO to GPIO mode (GPIO22-GPIO23)
|
|
+ *(unsigned long *)(0xb0000624) = 0xC0000000; //GPIO22-GPIO23 output mode
|
|
+ *(unsigned long *)(0xb000062C) = 0xC0000000; //GPIO22-GPIO23 output high
|
|
+
|
|
+ *(unsigned long *)(0xb0000060) |= (1 << 9); //set RGMII to GPIO mode (GPIO24-GPIO35)
|
|
+ *(unsigned long *)(0xb000064C) = 0xFFF; //GPIO24-GPIO35 output mode
|
|
+ *(unsigned long *)(0xb0000654) = 0xFFF; //GPIO24-GPIO35 output high
|
|
+#elif defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628)
|
|
+ /* do nothing */
|
|
+#endif
|
|
+#endif // CONFIG_P5_RGMII_TO_MAC_MODE //
|
|
+
|
|
+
|
|
+#if defined (CONFIG_RT3052_ASIC)
|
|
+ rw_rf_reg(0, 0, &phy_val);
|
|
+ phy_val = phy_val >> 4;
|
|
+
|
|
+ if(phy_val > 0x5) {
|
|
+
|
|
+ rw_rf_reg(0, 26, &phy_val);
|
|
+ phy_val2 = (phy_val | (0x3 << 5));
|
|
+ rw_rf_reg(1, 26, &phy_val2);
|
|
+
|
|
+ // reset EPHY
|
|
+ val = sysRegRead(RSTCTRL);
|
|
+ val = val | RALINK_EPHY_RST;
|
|
+ sysRegWrite(RSTCTRL, val);
|
|
+ val = val & ~(RALINK_EPHY_RST);
|
|
+ sysRegWrite(RSTCTRL, val);
|
|
+
|
|
+ rw_rf_reg(1, 26, &phy_val);
|
|
+
|
|
+ //select local register
|
|
+ mii_mgr_write(0, 31, 0x8000);
|
|
+ for(i=0;i<5;i++){
|
|
+ mii_mgr_write(i, 26, 0x1600); //TX10 waveform coefficient //LSB=0 disable PHY
|
|
+ mii_mgr_write(i, 29, 0x7058); //TX100/TX10 AD/DA current bias
|
|
+ mii_mgr_write(i, 30, 0x0018); //TX100 slew rate control
|
|
+ }
|
|
+
|
|
+ //select global register
|
|
+ mii_mgr_write(0, 31, 0x0);
|
|
+ mii_mgr_write(0, 1, 0x4a40); //enlarge agcsel threshold 3 and threshold 2
|
|
+ mii_mgr_write(0, 2, 0x6254); //enlarge agcsel threshold 5 and threshold 4
|
|
+ mii_mgr_write(0, 3, 0xa17f); //enlarge agcsel threshold 6
|
|
+//#define ENABLE_LDPS
|
|
+#if defined (ENABLE_LDPS)
|
|
+ mii_mgr_write(0, 12, 0x7eaa);
|
|
+ mii_mgr_write(0, 22, 0x252f); //tune TP_IDL tail and head waveform, enable power down slew rate control
|
|
+#else
|
|
+ mii_mgr_write(0, 12, 0x0);
|
|
+ mii_mgr_write(0, 22, 0x052f);
|
|
+#endif
|
|
+ mii_mgr_write(0, 14, 0x65); //longer TP_IDL tail length
|
|
+ mii_mgr_write(0, 16, 0x0684); //increased squelch pulse count threshold.
|
|
+ mii_mgr_write(0, 17, 0x0fe0); //set TX10 signal amplitude threshold to minimum
|
|
+ mii_mgr_write(0, 18, 0x40ba); //set squelch amplitude to higher threshold
|
|
+ mii_mgr_write(0, 27, 0x2fce); //set PLL/Receive bias current are calibrated
|
|
+ mii_mgr_write(0, 28, 0xc410); //change PLL/Receive bias current to internal(RT3350)
|
|
+ mii_mgr_write(0, 29, 0x598b); //change PLL bias current to internal(RT3052_MP3)
|
|
+ mii_mgr_write(0, 31, 0x8000); //select local register
|
|
+
|
|
+ for(i=0;i<5;i++){
|
|
+ //LSB=1 enable PHY
|
|
+ mii_mgr_read(i, 26, &phy_val);
|
|
+ phy_val |= 0x0001;
|
|
+ mii_mgr_write(i, 26, phy_val);
|
|
+ }
|
|
+ } else {
|
|
+ //select local register
|
|
+ mii_mgr_write(0, 31, 0x8000);
|
|
+ for(i=0;i<5;i++){
|
|
+ mii_mgr_write(i, 26, 0x1600); //TX10 waveform coefficient //LSB=0 disable PHY
|
|
+ mii_mgr_write(i, 29, 0x7058); //TX100/TX10 AD/DA current bias
|
|
+ mii_mgr_write(i, 30, 0x0018); //TX100 slew rate control
|
|
+ }
|
|
+
|
|
+ //select global register
|
|
+ mii_mgr_write(0, 31, 0x0);
|
|
+ mii_mgr_write(0, 1, 0x4a40); //enlarge agcsel threshold 3 and threshold 2
|
|
+ mii_mgr_write(0, 2, 0x6254); //enlarge agcsel threshold 5 and threshold 4
|
|
+ mii_mgr_write(0, 3, 0xa17f); //enlarge agcsel threshold 6
|
|
+ mii_mgr_write(0, 14, 0x65); //longer TP_IDL tail length
|
|
+ mii_mgr_write(0, 16, 0x0684); //increased squelch pulse count threshold.
|
|
+ mii_mgr_write(0, 17, 0x0fe0); //set TX10 signal amplitude threshold to minimum
|
|
+ mii_mgr_write(0, 18, 0x40ba); //set squelch amplitude to higher threshold
|
|
+ mii_mgr_write(0, 22, 0x052f); //tune TP_IDL tail and head waveform
|
|
+ mii_mgr_write(0, 27, 0x2fce); //set PLL/Receive bias current are calibrated
|
|
+ mii_mgr_write(0, 28, 0xc410); //change PLL/Receive bias current to internal(RT3350)
|
|
+ mii_mgr_write(0, 29, 0x598b); //change PLL bias current to internal(RT3052_MP3)
|
|
+ mii_mgr_write(0, 31, 0x8000); //select local register
|
|
+
|
|
+ for(i=0;i<5;i++){
|
|
+ //LSB=1 enable PHY
|
|
+ mii_mgr_read(i, 26, &phy_val);
|
|
+ phy_val |= 0x0001;
|
|
+ mii_mgr_write(i, 26, phy_val);
|
|
+ }
|
|
+ }
|
|
+#elif defined (CONFIG_RT3352_ASIC)
|
|
+ //PHY IOT
|
|
+ // reset EPHY
|
|
+ val = sysRegRead(RSTCTRL);
|
|
+ val = val | RALINK_EPHY_RST;
|
|
+ sysRegWrite(RSTCTRL, val);
|
|
+ val = val & ~(RALINK_EPHY_RST);
|
|
+ sysRegWrite(RSTCTRL, val);
|
|
+
|
|
+ //select local register
|
|
+ mii_mgr_write(0, 31, 0x8000);
|
|
+ for(i=0;i<5;i++){
|
|
+ mii_mgr_write(i, 26, 0x1600); //TX10 waveform coefficient //LSB=0 disable PHY
|
|
+ mii_mgr_write(i, 29, 0x7016); //TX100/TX10 AD/DA current bias
|
|
+ mii_mgr_write(i, 30, 0x0038); //TX100 slew rate control
|
|
+ }
|
|
+
|
|
+ //select global register
|
|
+ mii_mgr_write(0, 31, 0x0);
|
|
+ mii_mgr_write(0, 1, 0x4a40); //enlarge agcsel threshold 3 and threshold 2
|
|
+ mii_mgr_write(0, 2, 0x6254); //enlarge agcsel threshold 5 and threshold 4
|
|
+ mii_mgr_write(0, 3, 0xa17f); //enlarge agcsel threshold 6
|
|
+ mii_mgr_write(0, 12, 0x7eaa);
|
|
+ mii_mgr_write(0, 14, 0x65); //longer TP_IDL tail length
|
|
+ mii_mgr_write(0, 16, 0x0684); //increased squelch pulse count threshold.
|
|
+ mii_mgr_write(0, 17, 0x0fe0); //set TX10 signal amplitude threshold to minimum
|
|
+ mii_mgr_write(0, 18, 0x40ba); //set squelch amplitude to higher threshold
|
|
+ mii_mgr_write(0, 22, 0x253f); //tune TP_IDL tail and head waveform, enable power down slew rate control
|
|
+ mii_mgr_write(0, 27, 0x2fda); //set PLL/Receive bias current are calibrated
|
|
+ mii_mgr_write(0, 28, 0xc410); //change PLL/Receive bias current to internal(RT3350)
|
|
+ mii_mgr_write(0, 29, 0x598b); //change PLL bias current to internal(RT3052_MP3)
|
|
+ mii_mgr_write(0, 31, 0x8000); //select local register
|
|
+
|
|
+ for(i=0;i<5;i++){
|
|
+ //LSB=1 enable PHY
|
|
+ mii_mgr_read(i, 26, &phy_val);
|
|
+ phy_val |= 0x0001;
|
|
+ mii_mgr_write(i, 26, phy_val);
|
|
+ }
|
|
+
|
|
+#elif defined (CONFIG_RT5350_ASIC)
|
|
+ //PHY IOT
|
|
+ // reset EPHY
|
|
+ val = sysRegRead(RSTCTRL);
|
|
+ val = val | RALINK_EPHY_RST;
|
|
+ sysRegWrite(RSTCTRL, val);
|
|
+ val = val & ~(RALINK_EPHY_RST);
|
|
+ sysRegWrite(RSTCTRL, val);
|
|
+
|
|
+ //select local register
|
|
+ mii_mgr_write(0, 31, 0x8000);
|
|
+ for(i=0;i<5;i++){
|
|
+ mii_mgr_write(i, 26, 0x1600); //TX10 waveform coefficient //LSB=0 disable PHY
|
|
+ mii_mgr_write(i, 29, 0x7015); //TX100/TX10 AD/DA current bias
|
|
+ mii_mgr_write(i, 30, 0x0038); //TX100 slew rate control
|
|
+ }
|
|
+
|
|
+ //select global register
|
|
+ mii_mgr_write(0, 31, 0x0);
|
|
+ mii_mgr_write(0, 1, 0x4a40); //enlarge agcsel threshold 3 and threshold 2
|
|
+ mii_mgr_write(0, 2, 0x6254); //enlarge agcsel threshold 5 and threshold 4
|
|
+ mii_mgr_write(0, 3, 0xa17f); //enlarge agcsel threshold 6
|
|
+ mii_mgr_write(0, 12, 0x7eaa);
|
|
+ mii_mgr_write(0, 14, 0x65); //longer TP_IDL tail length
|
|
+ mii_mgr_write(0, 16, 0x0684); //increased squelch pulse count threshold.
|
|
+ mii_mgr_write(0, 17, 0x0fe0); //set TX10 signal amplitude threshold to minimum
|
|
+ mii_mgr_write(0, 18, 0x40ba); //set squelch amplitude to higher threshold
|
|
+ mii_mgr_write(0, 22, 0x253f); //tune TP_IDL tail and head waveform, enable power down slew rate control
|
|
+ mii_mgr_write(0, 27, 0x2fda); //set PLL/Receive bias current are calibrated
|
|
+ mii_mgr_write(0, 28, 0xc410); //change PLL/Receive bias current to internal(RT3350)
|
|
+ mii_mgr_write(0, 29, 0x598b); //change PLL bias current to internal(RT3052_MP3)
|
|
+ mii_mgr_write(0, 31, 0x8000); //select local register
|
|
+
|
|
+ for(i=0;i<5;i++){
|
|
+ //LSB=1 enable PHY
|
|
+ mii_mgr_read(i, 26, &phy_val);
|
|
+ phy_val |= 0x0001;
|
|
+ mii_mgr_write(i, 26, phy_val);
|
|
+ }
|
|
+#elif defined (CONFIG_MT7628_ASIC)
|
|
+/*INIT MT7628 PHY HERE*/
|
|
+ val = sysRegRead(RT2880_AGPIOCFG_REG);
|
|
+#if defined (CONFIG_ETH_ONE_PORT_ONLY)
|
|
+ val |= (MT7628_P0_EPHY_AIO_EN | MT7628_P1_EPHY_AIO_EN | MT7628_P2_EPHY_AIO_EN | MT7628_P3_EPHY_AIO_EN | MT7628_P4_EPHY_AIO_EN);
|
|
+ val = val & ~(MT7628_P0_EPHY_AIO_EN);
|
|
+#else
|
|
+ val = val & ~(MT7628_P0_EPHY_AIO_EN | MT7628_P1_EPHY_AIO_EN | MT7628_P2_EPHY_AIO_EN | MT7628_P3_EPHY_AIO_EN | MT7628_P4_EPHY_AIO_EN);
|
|
+#endif
|
|
+ if ((*((volatile u32 *)(RALINK_SYSCTL_BASE + 0x8))) & 0x10000)
|
|
+ val &= ~0x1f0000;
|
|
+ sysRegWrite(RT2880_AGPIOCFG_REG, val);
|
|
+
|
|
+ val = sysRegRead(RSTCTRL);
|
|
+ val = val | RALINK_EPHY_RST;
|
|
+ sysRegWrite(RSTCTRL, val);
|
|
+ val = val & ~(RALINK_EPHY_RST);
|
|
+ sysRegWrite(RSTCTRL, val);
|
|
+
|
|
+
|
|
+ val = sysRegRead(RALINK_SYSCTL_BASE + 0x64);
|
|
+#if defined (CONFIG_ETH_ONE_PORT_ONLY)
|
|
+ val &= 0xf003f003;
|
|
+ val |= 0x05540554;
|
|
+ sysRegWrite(RALINK_SYSCTL_BASE + 0x64, val); // set P0 EPHY LED mode
|
|
+#else
|
|
+ val &= 0xf003f003;
|
|
+ sysRegWrite(RALINK_SYSCTL_BASE + 0x64, val); // set P0~P4 EPHY LED mode
|
|
+#endif
|
|
+
|
|
+ udelay(5000);
|
|
+ mt7628_ephy_init();
|
|
+
|
|
+#endif
|
|
+}
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_ARCH_MT7623) /* TODO: just for bring up, should be removed!!! */
|
|
+void mt7623_pinmux_set(void)
|
|
+{
|
|
+ unsigned long regValue;
|
|
+
|
|
+ //printk("[mt7623_pinmux_set]start\n");
|
|
+ /* Pin277: ESW_RST (1) */
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ad0));
|
|
+ regValue &= ~(BITS(6,8));
|
|
+ regValue |= BIT(6);
|
|
+ *(volatile u_long *)(0xf0005ad0) = regValue;
|
|
+
|
|
+ /* Pin262: G2_TXEN (1) */
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(0xf0005aa0));
|
|
+ regValue &= ~(BITS(6,8));
|
|
+ regValue |= BIT(6);
|
|
+ *(volatile u_long *)(0xf0005aa0) = regValue;
|
|
+ /* Pin263: G2_TXD3 (1) */
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(0xf0005aa0));
|
|
+ regValue &= ~(BITS(9,11));
|
|
+ regValue |= BIT(9);
|
|
+ *(volatile u_long *)(0xf0005aa0) = regValue;
|
|
+ /* Pin264: G2_TXD2 (1) */
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(0xf0005aa0));
|
|
+ regValue &= ~(BITS(12,14));
|
|
+ regValue |= BIT(12);
|
|
+ *(volatile u_long *)(0xf0005aa0) = regValue;
|
|
+ /* Pin265: G2_TXD1 (1) */
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
|
|
+ regValue &= ~(BITS(0,2));
|
|
+ regValue |= BIT(0);
|
|
+ *(volatile u_long *)(0xf0005ab0) = regValue;
|
|
+ /* Pin266: G2_TXD0 (1) */
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
|
|
+ regValue &= ~(BITS(3,5));
|
|
+ regValue |= BIT(3);
|
|
+ *(volatile u_long *)(0xf0005ab0) = regValue;
|
|
+ /* Pin267: G2_TXC (1) */
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
|
|
+ regValue &= ~(BITS(6,8));
|
|
+ regValue |= BIT(6);
|
|
+ *(volatile u_long *)(0xf0005ab0) = regValue;
|
|
+ /* Pin268: G2_RXC (1) */
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
|
|
+ regValue &= ~(BITS(9,11));
|
|
+ regValue |= BIT(9);
|
|
+ *(volatile u_long *)(0xf0005ab0) = regValue;
|
|
+ /* Pin269: G2_RXD0 (1) */
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ab0));
|
|
+ regValue &= ~(BITS(12,14));
|
|
+ regValue |= BIT(12);
|
|
+ *(volatile u_long *)(0xf0005ab0) = regValue;
|
|
+ /* Pin270: G2_RXD1 (1) */
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ac0));
|
|
+ regValue &= ~(BITS(0,2));
|
|
+ regValue |= BIT(0);
|
|
+ *(volatile u_long *)(0xf0005ac0) = regValue;
|
|
+ /* Pin271: G2_RXD2 (1) */
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ac0));
|
|
+ regValue &= ~(BITS(3,5));
|
|
+ regValue |= BIT(3);
|
|
+ *(volatile u_long *)(0xf0005ac0) = regValue;
|
|
+ /* Pin272: G2_RXD3 (1) */
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ac0));
|
|
+ regValue &= ~(BITS(6,8));
|
|
+ regValue |= BIT(6);
|
|
+ *(volatile u_long *)(0xf0005ac0) = regValue;
|
|
+ /* Pin274: G2_RXDV (1) */
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ac0));
|
|
+ regValue &= ~(BITS(12,14));
|
|
+ regValue |= BIT(12);
|
|
+ *(volatile u_long *)(0xf0005ac0) = regValue;
|
|
+
|
|
+ /* Pin275: MDC (1) */
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ad0));
|
|
+ regValue &= ~(BITS(0,2));
|
|
+ regValue |= BIT(0);
|
|
+ *(volatile u_long *)(0xf0005ad0) = regValue;
|
|
+ /* Pin276: MDIO (1) */
|
|
+ regValue = le32_to_cpu(*(volatile u_long *)(0xf0005ad0));
|
|
+ regValue &= ~(BITS(3,5));
|
|
+ regValue |= BIT(3);
|
|
+ *(volatile u_long *)(0xf0005ad0) = regValue;
|
|
+ //printk("[mt7623_pinmux_set]end\n");
|
|
+}
|
|
+
|
|
+void wait_loop(void) {
|
|
+ int i,j;
|
|
+ int read_data;
|
|
+ j =0;
|
|
+ while (j< 10) {
|
|
+ for(i = 0; i<32; i = i+1){
|
|
+ read_data = *(volatile u_long *)(0xFB110610);
|
|
+ }
|
|
+ j++;
|
|
+ }
|
|
+}
|
|
+
|
|
+void trgmii_calibration_7623(void) {
|
|
+
|
|
+ unsigned int tap_a[5]; // minumum delay for all correct
|
|
+ unsigned int tap_b[5]; // maximum delay for all correct
|
|
+ unsigned int final_tap[5];
|
|
+ unsigned int bslip_en;
|
|
+ unsigned int rxc_step_size;
|
|
+ unsigned int rxd_step_size;
|
|
+ unsigned int read_data;
|
|
+ unsigned int tmp;
|
|
+ unsigned int rd_wd;
|
|
+ int i;
|
|
+ unsigned int err_cnt[5];
|
|
+ unsigned int init_toggle_data;
|
|
+ unsigned int err_flag[5];
|
|
+ unsigned int err_total_flag;
|
|
+ unsigned int training_word;
|
|
+ unsigned int rd_tap;
|
|
+
|
|
+ u32 TRGMII_7623_base;
|
|
+ u32 TRGMII_7623_RD_0;
|
|
+ u32 TRGMII_RD_1;
|
|
+ u32 TRGMII_RD_2;
|
|
+ u32 TRGMII_RD_3;
|
|
+ u32 TRGMII_RXCTL;
|
|
+ u32 TRGMII_RCK_CTRL;
|
|
+ u32 TRGMII_7530_base;
|
|
+ TRGMII_7623_base = 0xFB110300;
|
|
+ TRGMII_7623_RD_0 = TRGMII_7623_base + 0x10;
|
|
+ TRGMII_RCK_CTRL = TRGMII_7623_base;
|
|
+ rxd_step_size =0x1;
|
|
+ rxc_step_size =0x4;
|
|
+ init_toggle_data = 0x00000055;
|
|
+ training_word = 0x000000AC;
|
|
+
|
|
+ //printk("Calibration begin ........");
|
|
+ *(volatile u_long *)(TRGMII_7623_base +0x04) &= 0x3fffffff; // RX clock gating in MT7623
|
|
+ *(volatile u_long *)(TRGMII_7623_base +0x00) |= 0x80000000; // Assert RX reset in MT7623
|
|
+ *(volatile u_long *)(TRGMII_7623_base +0x78) |= 0x00002000; // Set TX OE edge in MT7623
|
|
+ *(volatile u_long *)(TRGMII_7623_base +0x04) |= 0xC0000000; // Disable RX clock gating in MT7623
|
|
+ *(volatile u_long *)(TRGMII_7623_base ) &= 0x7fffffff; // Release RX reset in MT7623
|
|
+ //printk("Check Point 1 .....\n");
|
|
+ for (i = 0 ; i<5 ; i++) {
|
|
+ *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) |= 0x80000000; // Set bslip_en = 1
|
|
+ }
|
|
+
|
|
+ //printk("Enable Training Mode in MT7530\n");
|
|
+ mii_mgr_read(0x1F,0x7A40,&read_data);
|
|
+ read_data |= 0xc0000000;
|
|
+ mii_mgr_write(0x1F,0x7A40,read_data); //Enable Training Mode in MT7530
|
|
+ err_total_flag = 0;
|
|
+ //printk("Adjust RXC delay in MT7623\n");
|
|
+ read_data =0x0;
|
|
+ while (err_total_flag == 0 && read_data != 0x68) {
|
|
+ //printk("2nd Enable EDGE CHK in MT7623\n");
|
|
+ /* Enable EDGE CHK in MT7623*/
|
|
+ for (i = 0 ; i<5 ; i++) {
|
|
+ tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
|
|
+ tmp |= 0x40000000;
|
|
+ *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
|
|
+ }
|
|
+ wait_loop();
|
|
+ err_total_flag = 1;
|
|
+ for (i = 0 ; i<5 ; i++) {
|
|
+ err_cnt[i] = ((*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) >> 8) & 0x0000000f;
|
|
+ rd_wd = ((*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) >> 16) & 0x000000ff;
|
|
+ //printk("ERR_CNT = %d, RD_WD =%x\n",err_cnt[i],rd_wd);
|
|
+ if ( err_cnt[i] !=0 ) {
|
|
+ err_flag[i] = 1;
|
|
+ }
|
|
+ else if (rd_wd != 0x55) {
|
|
+ err_flag[i] = 1;
|
|
+ }
|
|
+ else {
|
|
+ err_flag[i] = 0;
|
|
+ }
|
|
+ err_total_flag = err_flag[i] & err_total_flag;
|
|
+ }
|
|
+
|
|
+ //printk("2nd Disable EDGE CHK in MT7623\n");
|
|
+ /* Disable EDGE CHK in MT7623*/
|
|
+ for (i = 0 ; i<5 ; i++) {
|
|
+ tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
|
|
+ tmp |= 0x40000000;
|
|
+ *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
|
|
+ }
|
|
+ wait_loop();
|
|
+ //printk("2nd Disable EDGE CHK in MT7623\n");
|
|
+ /* Adjust RXC delay */
|
|
+ *(volatile u_long *)(TRGMII_7623_base +0x00) |= 0x80000000; // Assert RX reset in MT7623
|
|
+ *(volatile u_long *)(TRGMII_7623_base +0x04) &= 0x3fffffff; // RX clock gating in MT7623
|
|
+ read_data = *(volatile u_long *)(TRGMII_7623_base);
|
|
+ if (err_total_flag == 0) {
|
|
+ tmp = (read_data & 0x0000007f) + rxc_step_size;
|
|
+ //printk(" RXC delay = %d\n", tmp);
|
|
+ read_data >>= 8;
|
|
+ read_data &= 0xffffff80;
|
|
+ read_data |= tmp;
|
|
+ read_data <<=8;
|
|
+ read_data &= 0xffffff80;
|
|
+ read_data |=tmp;
|
|
+ *(volatile u_long *)(TRGMII_7623_base) = read_data;
|
|
+ }
|
|
+ read_data &=0x000000ff;
|
|
+ *(volatile u_long *)(TRGMII_7623_base ) &= 0x7fffffff; // Release RX reset in MT7623
|
|
+ *(volatile u_long *)(TRGMII_7623_base +0x04) |= 0xC0000000; // Disable RX clock gating in MT7623
|
|
+ for (i = 0 ; i<5 ; i++) {
|
|
+ *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = (*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) | 0x80000000; // Set bslip_en = ~bit_slip_en
|
|
+ }
|
|
+ }
|
|
+ //printk("Finish RXC Adjustment while loop\n");
|
|
+ //printk("Read RD_WD MT7623\n");
|
|
+ /* Read RD_WD MT7623*/
|
|
+ for (i = 0 ; i<5 ; i++) {
|
|
+ rd_tap=0;
|
|
+ while (err_flag[i] != 0) {
|
|
+ /* Enable EDGE CHK in MT7623*/
|
|
+ tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
|
|
+ tmp |= 0x40000000;
|
|
+ *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
|
|
+ wait_loop();
|
|
+ read_data = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
|
|
+ err_cnt[i] = (read_data >> 8) & 0x0000000f; // Read MT7623 Errcnt
|
|
+ rd_wd = (read_data >> 16) & 0x000000ff;
|
|
+ if (err_cnt[i] != 0 || rd_wd !=0x55){
|
|
+ err_flag [i] = 1;
|
|
+ }
|
|
+ else {
|
|
+ err_flag[i] =0;
|
|
+ }
|
|
+ /* Disable EDGE CHK in MT7623*/
|
|
+ *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) &= 0x4fffffff;
|
|
+ tmp |= 0x40000000;
|
|
+ *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
|
|
+ wait_loop();
|
|
+ //err_cnt[i] = ((read_data) >> 8) & 0x0000000f; // Read MT7623 Errcnt
|
|
+ if (err_flag[i] !=0) {
|
|
+ rd_tap = (read_data & 0x0000007f) + rxd_step_size; // Add RXD delay in MT7623
|
|
+ read_data = (read_data & 0xffffff80) | rd_tap;
|
|
+ *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = read_data;
|
|
+ tap_a[i] = rd_tap;
|
|
+ } else {
|
|
+ rd_tap = (read_data & 0x0000007f) + 4;
|
|
+ read_data = (read_data & 0xffffff80) | rd_tap;
|
|
+ *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = read_data;
|
|
+ }
|
|
+ //err_cnt[i] = (*(volatile u_long *)(TRGMII_7623_RD_0 + i*8) >> 8) & 0x0000000f; // Read MT7623 Errcnt
|
|
+
|
|
+ }
|
|
+ //printk("%dth bit Tap_a = %d\n", i, tap_a[i]);
|
|
+ }
|
|
+ //printk("Last While Loop\n");
|
|
+ for (i = 0 ; i<5 ; i++) {
|
|
+ //printk(" Bit%d\n", i);
|
|
+ rd_tap =0;
|
|
+ while ((err_cnt[i] == 0) && (rd_tap !=128)) {
|
|
+ read_data = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
|
|
+ rd_tap = (read_data & 0x0000007f) + rxd_step_size; // Add RXD delay in MT7623
|
|
+ read_data = (read_data & 0xffffff80) | rd_tap;
|
|
+ *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = read_data;
|
|
+ /* Enable EDGE CHK in MT7623*/
|
|
+ tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
|
|
+ tmp |= 0x40000000;
|
|
+ *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
|
|
+ wait_loop();
|
|
+ err_cnt[i] = ((*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) >> 8) & 0x0000000f; // Read MT7623 Errcnt
|
|
+ /* Disable EDGE CHK in MT7623*/
|
|
+ tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
|
|
+ tmp |= 0x40000000;
|
|
+ *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
|
|
+ wait_loop();
|
|
+ //err_cnt[i] = ((*(volatile u_long *)(TRGMII_7623_RD_0 + i*8)) >> 8) & 0x0000000f; // Read MT7623 Errcnt
|
|
+
|
|
+ }
|
|
+ tap_b[i] = rd_tap;// -rxd_step_size; // Record the max delay TAP_B
|
|
+ //printk("tap_b[%d] is %d \n", i,tap_b[i]);
|
|
+ final_tap[i] = (tap_a[i]+tap_b[i])/2; // Calculate RXD delay = (TAP_A + TAP_B)/2
|
|
+ //printk("%dth bit Final Tap = %d\n", i, final_tap[i]);
|
|
+ read_data = (read_data & 0xffffff80) | final_tap[i];
|
|
+ *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = read_data;
|
|
+ }
|
|
+// /*word alignment*/
|
|
+// mii_mgr_read(0x1F,0x7A50,&read_data);
|
|
+// read_data &= ~(0xff);
|
|
+// read_data |= 0xac;
|
|
+// mii_mgr_write(0x1F,0x7A50,read_data);
|
|
+// while (i <10) {
|
|
+// i++;
|
|
+// wait_loop();
|
|
+// }
|
|
+// /* Enable EDGE CHK in MT7623*/
|
|
+// for (i=0; i<5; i++) {
|
|
+// tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
|
|
+// tmp |= 0x40000000;
|
|
+// *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
|
|
+// wait_loop();
|
|
+// /* Disable EDGE CHK in MT7623*/
|
|
+// tmp = *(volatile u_long *)(TRGMII_7623_RD_0 + i*8);
|
|
+// tmp |= 0x40000000;
|
|
+// *(volatile u_long *)(TRGMII_7623_RD_0 + i*8) = tmp & 0x4fffffff;
|
|
+// wait_loop();
|
|
+// read_data = *(volatile u_long *)(TRGMII_7623_RD_0+i*8);
|
|
+// printk(" MT7623 training word = %x\n", read_data);
|
|
+// }
|
|
+
|
|
+
|
|
+ mii_mgr_read(0x1F,0x7A40,&read_data);
|
|
+ //printk(" MT7530 0x7A40 = %x\n", read_data);
|
|
+ read_data &=0x3fffffff;
|
|
+ mii_mgr_write(0x1F,0x7A40,read_data);
|
|
+}
|
|
+
|
|
+
|
|
+void trgmii_calibration_7530(void){
|
|
+
|
|
+ unsigned int tap_a[5];
|
|
+ unsigned int tap_b[5];
|
|
+ unsigned int final_tap[5];
|
|
+ unsigned int bslip_en;
|
|
+ unsigned int rxc_step_size;
|
|
+ unsigned int rxd_step_size;
|
|
+ unsigned int read_data;
|
|
+ unsigned int tmp;
|
|
+ int i,j;
|
|
+ unsigned int err_cnt[5];
|
|
+ unsigned int rd_wd;
|
|
+ unsigned int init_toggle_data;
|
|
+ unsigned int err_flag[5];
|
|
+ unsigned int err_total_flag;
|
|
+ unsigned int training_word;
|
|
+ unsigned int rd_tap;
|
|
+
|
|
+ u32 TRGMII_7623_base;
|
|
+ u32 TRGMII_7530_RD_0;
|
|
+ u32 TRGMII_RD_1;
|
|
+ u32 TRGMII_RD_2;
|
|
+ u32 TRGMII_RD_3;
|
|
+ u32 TRGMII_RXCTL;
|
|
+ u32 TRGMII_RCK_CTRL;
|
|
+ u32 TRGMII_7530_base;
|
|
+ u32 TRGMII_7530_TX_base;
|
|
+ TRGMII_7623_base = 0xFB110300;
|
|
+ TRGMII_7530_base = 0x7A00;
|
|
+ TRGMII_7530_RD_0 = TRGMII_7530_base + 0x10;
|
|
+ TRGMII_RCK_CTRL = TRGMII_7623_base;
|
|
+ rxd_step_size = 0x1;
|
|
+ rxc_step_size = 0x8;
|
|
+ init_toggle_data = 0x00000055;
|
|
+ training_word = 0x000000AC;
|
|
+
|
|
+ TRGMII_7530_TX_base = TRGMII_7530_base + 0x50;
|
|
+
|
|
+ //printk("Calibration begin ........\n");
|
|
+ *(volatile u_long *)(TRGMII_7623_base + 0x40) |= 0x80000000;
|
|
+ mii_mgr_read(0x1F, 0x7a10, &read_data);
|
|
+ //printk("TRGMII_7530_RD_0 is %x\n", read_data);
|
|
+
|
|
+ mii_mgr_read(0x1F,TRGMII_7530_base+0x04,&read_data);
|
|
+ read_data &= 0x3fffffff;
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_base+0x04,read_data); // RX clock gating in MT7530
|
|
+
|
|
+ mii_mgr_read(0x1F,TRGMII_7530_base+0x78,&read_data);
|
|
+ read_data |= 0x00002000;
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_base+0x78,read_data); // Set TX OE edge in MT7530
|
|
+
|
|
+ mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
|
|
+ read_data |= 0x80000000;
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_base,read_data); // Assert RX reset in MT7530
|
|
+
|
|
+
|
|
+ mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
|
|
+ read_data &= 0x7fffffff;
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_base,read_data); // Release RX reset in MT7530
|
|
+
|
|
+ mii_mgr_read(0x1F,TRGMII_7530_base+0x04,&read_data);
|
|
+ read_data |= 0xC0000000;
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_base+0x04,read_data); // Disable RX clock gating in MT7530
|
|
+
|
|
+ //printk("Enable Training Mode in MT7623\n");
|
|
+ /*Enable Training Mode in MT7623*/
|
|
+ *(volatile u_long *)(TRGMII_7623_base + 0x40) &= 0xbfffffff;
|
|
+ *(volatile u_long *)(TRGMII_7623_base + 0x40) |= 0x80000000;
|
|
+ *(volatile u_long *)(TRGMII_7623_base + 0x78) &= 0xfffff0ff;
|
|
+ *(volatile u_long *)(TRGMII_7623_base + 0x78) |= 0x00000400;
|
|
+
|
|
+ err_total_flag =0;
|
|
+ //printk("Adjust RXC delay in MT7530\n");
|
|
+ read_data =0x0;
|
|
+ while (err_total_flag == 0 && (read_data != 0x68)) {
|
|
+ //printk("2nd Enable EDGE CHK in MT7530\n");
|
|
+ /* Enable EDGE CHK in MT7530*/
|
|
+ for (i = 0 ; i<5 ; i++) {
|
|
+ mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
|
|
+ read_data |= 0x40000000;
|
|
+ read_data &= 0x4fffffff;
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
|
|
+ wait_loop();
|
|
+ //printk("2nd Disable EDGE CHK in MT7530\n");
|
|
+ mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&err_cnt[i]);
|
|
+ //printk("***** MT7530 %dth bit ERR_CNT =%x\n",i, err_cnt[i]);
|
|
+ //printk("MT7530 %dth bit ERR_CNT =%x\n",i, err_cnt[i]);
|
|
+ err_cnt[i] >>= 8;
|
|
+ err_cnt[i] &= 0x0000ff0f;
|
|
+ rd_wd = err_cnt[i] >> 8;
|
|
+ rd_wd &= 0x000000ff;
|
|
+ err_cnt[i] &= 0x0000000f;
|
|
+ //mii_mgr_read(0x1F,0x7a10,&read_data);
|
|
+ if ( err_cnt[i] !=0 ) {
|
|
+ err_flag[i] = 1;
|
|
+ }
|
|
+ else if (rd_wd != 0x55) {
|
|
+ err_flag[i] = 1;
|
|
+ } else {
|
|
+ err_flag[i] = 0;
|
|
+ }
|
|
+ if (i==0) {
|
|
+ err_total_flag = err_flag[i];
|
|
+ } else {
|
|
+ err_total_flag = err_flag[i] & err_total_flag;
|
|
+ }
|
|
+ /* Disable EDGE CHK in MT7530*/
|
|
+ mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
|
|
+ read_data |= 0x40000000;
|
|
+ read_data &= 0x4fffffff;
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
|
|
+ wait_loop();
|
|
+ }
|
|
+ /*Adjust RXC delay*/
|
|
+ if (err_total_flag ==0) {
|
|
+ mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
|
|
+ read_data |= 0x80000000;
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_base,read_data); // Assert RX reset in MT7530
|
|
+
|
|
+ mii_mgr_read(0x1F,TRGMII_7530_base+0x04,&read_data);
|
|
+ read_data &= 0x3fffffff;
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_base+0x04,read_data); // RX clock gating in MT7530
|
|
+
|
|
+ mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
|
|
+ tmp = read_data;
|
|
+ tmp &= 0x0000007f;
|
|
+ tmp += rxc_step_size;
|
|
+ //printk("Current rxc delay = %d\n", tmp);
|
|
+ read_data &= 0xffffff80;
|
|
+ read_data |= tmp;
|
|
+ mii_mgr_write (0x1F,TRGMII_7530_base,read_data);
|
|
+ mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
|
|
+ //printk("Current RXC delay = %x\n", read_data);
|
|
+
|
|
+ mii_mgr_read(0x1F,TRGMII_7530_base,&read_data);
|
|
+ read_data &= 0x7fffffff;
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_base,read_data); // Release RX reset in MT7530
|
|
+
|
|
+ mii_mgr_read(0x1F,TRGMII_7530_base+0x04,&read_data);
|
|
+ read_data |= 0xc0000000;
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_base+0x04,read_data); // Disable RX clock gating in MT7530
|
|
+ }
|
|
+ read_data = tmp;
|
|
+ }
|
|
+ //printk("RXC delay is %d\n", tmp);
|
|
+ //printk("Finish RXC Adjustment while loop\n");
|
|
+
|
|
+ //printk("Read RD_WD MT7530\n");
|
|
+ /* Read RD_WD MT7530*/
|
|
+ for (i = 0 ; i<5 ; i++) {
|
|
+ rd_tap = 0;
|
|
+ while (err_flag[i] != 0) {
|
|
+ /* Enable EDGE CHK in MT7530*/
|
|
+ mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
|
|
+ read_data |= 0x40000000;
|
|
+ read_data &= 0x4fffffff;
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
|
|
+ wait_loop();
|
|
+ err_cnt[i] = (read_data >> 8) & 0x0000000f;
|
|
+ rd_wd = (read_data >> 16) & 0x000000ff;
|
|
+ //printk("##### %dth bit ERR_CNT = %x RD_WD =%x ######\n", i, err_cnt[i],rd_wd);
|
|
+ if (err_cnt[i] != 0 || rd_wd !=0x55){
|
|
+ err_flag [i] = 1;
|
|
+ }
|
|
+ else {
|
|
+ err_flag[i] =0;
|
|
+ }
|
|
+ if (err_flag[i] !=0 ) {
|
|
+ rd_tap = (read_data & 0x0000007f) + rxd_step_size; // Add RXD delay in MT7530
|
|
+ read_data = (read_data & 0xffffff80) | rd_tap;
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
|
|
+ tap_a[i] = rd_tap;
|
|
+ } else {
|
|
+ tap_a[i] = (read_data & 0x0000007f); // Record the min delay TAP_A
|
|
+ rd_tap = tap_a[i] + 0x4;
|
|
+ read_data = (read_data & 0xffffff80) | rd_tap ;
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
|
|
+ }
|
|
+
|
|
+ /* Disable EDGE CHK in MT7530*/
|
|
+ mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
|
|
+ read_data |= 0x40000000;
|
|
+ read_data &= 0x4fffffff;
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
|
|
+ wait_loop();
|
|
+
|
|
+ }
|
|
+ //printk("%dth bit Tap_a = %d\n", i, tap_a[i]);
|
|
+ }
|
|
+ //printk("Last While Loop\n");
|
|
+ for (i = 0 ; i<5 ; i++) {
|
|
+ rd_tap =0;
|
|
+ while (err_cnt[i] == 0 && (rd_tap!=128)) {
|
|
+ /* Enable EDGE CHK in MT7530*/
|
|
+ mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
|
|
+ read_data |= 0x40000000;
|
|
+ read_data &= 0x4fffffff;
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
|
|
+ wait_loop();
|
|
+ err_cnt[i] = (read_data >> 8) & 0x0000000f;
|
|
+ //rd_tap = (read_data & 0x0000007f) + 0x4; // Add RXD delay in MT7530
|
|
+ if (err_cnt[i] == 0 && (rd_tap!=128)) {
|
|
+ rd_tap = (read_data & 0x0000007f) + rxd_step_size; // Add RXD delay in MT7530
|
|
+ read_data = (read_data & 0xffffff80) | rd_tap;
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
|
|
+ }
|
|
+ /* Disable EDGE CHK in MT7530*/
|
|
+ mii_mgr_read(0x1F,TRGMII_7530_RD_0+i*8,&read_data);
|
|
+ read_data |= 0x40000000;
|
|
+ read_data &= 0x4fffffff;
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
|
|
+ wait_loop();
|
|
+ }
|
|
+ tap_b[i] = rd_tap;// - rxd_step_size; // Record the max delay TAP_B
|
|
+ //printk("%dth bit Tap_b = %d, ERR_CNT=%d\n", i, tap_b[i],err_cnt[i]);
|
|
+ final_tap[i] = (tap_a[i]+tap_b[i])/2; // Calculate RXD delay = (TAP_A + TAP_B)/2
|
|
+ //printk("%dth bit Final Tap = %d\n", i, final_tap[i]);
|
|
+
|
|
+ read_data = ( read_data & 0xffffff80) | final_tap[i];
|
|
+ mii_mgr_write(0x1F,TRGMII_7530_RD_0+i*8,read_data);
|
|
+ }
|
|
+ *(volatile u_long *)(TRGMII_7623_base + 0x40) &=0x3fffffff;
|
|
+
|
|
+}
|
|
+
|
|
+void set_trgmii_325_delay_setting(void)
|
|
+{
|
|
+ /*mt7530 side*/
|
|
+ *(volatile u_long *)(0xfb110300) = 0x80020050;
|
|
+ *(volatile u_long *)(0xfb110304) = 0x00980000;
|
|
+ *(volatile u_long *)(0xfb110300) = 0x40020050;
|
|
+ *(volatile u_long *)(0xfb110304) = 0xc0980000;
|
|
+ *(volatile u_long *)(0xfb110310) = 0x00000028;
|
|
+ *(volatile u_long *)(0xfb110318) = 0x0000002e;
|
|
+ *(volatile u_long *)(0xfb110320) = 0x0000002d;
|
|
+ *(volatile u_long *)(0xfb110328) = 0x0000002b;
|
|
+ *(volatile u_long *)(0xfb110330) = 0x0000002a;
|
|
+ *(volatile u_long *)(0xfb110340) = 0x00020000;
|
|
+ /*mt7530 side*/
|
|
+ mii_mgr_write(31, 0x7a00, 0x10);
|
|
+ mii_mgr_write(31, 0x7a10, 0x23);
|
|
+ mii_mgr_write(31, 0x7a18, 0x27);
|
|
+ mii_mgr_write(31, 0x7a20, 0x24);
|
|
+ mii_mgr_write(31, 0x7a28, 0x29);
|
|
+ mii_mgr_write(31, 0x7a30, 0x24);
|
|
+
|
|
+}
|
|
+
|
|
+
|
|
+void setup_internal_gsw(void)
|
|
+{
|
|
+ u32 i;
|
|
+ u32 regValue;
|
|
+ u32 xtal_mode;
|
|
+
|
|
+ mt7623_pinmux_set(); /* TODO: just for bring up, should be removed!!! */
|
|
+
|
|
+#if 0
|
|
+ /* GE1: RGMII mode setting */
|
|
+ *(volatile u_long *)(0xfb110300) = 0x80020000;
|
|
+ *(volatile u_long *)(0xfb110304) = 0x00980000;
|
|
+ *(volatile u_long *)(0xfb110300) = 0x40020000;
|
|
+ *(volatile u_long *)(0xfb110304) = 0xc0980000;
|
|
+ *(volatile u_long *)(0xfb110310) = 0x00000041;
|
|
+ *(volatile u_long *)(0xfb110318) = 0x00000044;
|
|
+ *(volatile u_long *)(0xfb110320) = 0x00000043;
|
|
+ *(volatile u_long *)(0xfb110328) = 0x00000042;
|
|
+ *(volatile u_long *)(0xfb110330) = 0x00000042;
|
|
+ *(volatile u_long *)(0xfb110340) = 0x00020000;
|
|
+ *(volatile u_long *)(0xfb110390) &= 0xfffffff8; //RGMII mode
|
|
+#else
|
|
+ /* GE1: TRGMII mode setting */
|
|
+ *(volatile u_long *)(0xfb110390) |= 0x00000002; //TRGMII mode
|
|
+#endif
|
|
+
|
|
+ /*Todo: Hardware reset Switch*/
|
|
+ /*Hardware reset Switch*/
|
|
+#if defined(CONFIG_ARCH_MT7623)
|
|
+ regValue = *(volatile u_long *)(0xfb00000c);
|
|
+ /*MT7530 Reset. Flows for MT7623 and MT7683 are both excuted.*/
|
|
+ /* Should Modify this section if EFUSE is ready*/
|
|
+ /*For MT7683 reset MT7530*/
|
|
+ if(!(regValue & (1<<16)))
|
|
+ {
|
|
+ *(volatile u_long *)(0xf0005520) &= ~(1<<1);
|
|
+ udelay(1000);
|
|
+ *(volatile u_long *)(0xf0005520) |= (1<<1);
|
|
+ mdelay(100);
|
|
+ }
|
|
+ //printk("Assert MT7623 RXC reset\n");
|
|
+ *(volatile u_long *)(0xfb110300) |= 0x80000000; // Assert MT7623 RXC reset
|
|
+ /*For MT7623 reset MT7530*/
|
|
+ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x34) |= (0x1 << 2);
|
|
+ udelay(1000);
|
|
+ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x34) &= ~(0x1 << 2);
|
|
+ mdelay(100);
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
|
|
+ for(i=0;i<=4;i++)
|
|
+ {
|
|
+ //turn off PHY
|
|
+ mii_mgr_read(i, 0x0 ,®Value);
|
|
+ regValue |= (0x1<<11);
|
|
+ mii_mgr_write(i, 0x0, regValue);
|
|
+ }
|
|
+ mii_mgr_write(31, 0x7000, 0x3); //reset switch
|
|
+ udelay(100);
|
|
+
|
|
+#if defined (CONFIG_MT7621_ASIC) || defined (CONFIG_ARCH_MT7623)
|
|
+#if 0
|
|
+ if((sysRegRead(0xbe00000c)&0xFFFF)==0x0101) {
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e30b);//(GE1, Force 1000M/FD, FC ON)
|
|
+ mii_mgr_write(31, 0x3600, 0x5e30b);
|
|
+ } else
|
|
+#endif
|
|
+ {
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2105e33b);//(GE1, Force 1000M/FD, FC ON)
|
|
+ mii_mgr_write(31, 0x3600, 0x5e33b);
|
|
+ mii_mgr_read(31, 0x3600 ,®Value);
|
|
+ }
|
|
+#endif
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x00008000);//(GE2, Link down)
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
|
|
+ //regValue = 0x117ccf; //Enable Port 6, P5 as GMAC5, P5 disable*/
|
|
+ mii_mgr_read(31, 0x7804 ,®Value);
|
|
+ regValue &= ~(1<<8); //Enable Port 6
|
|
+ regValue |= (1<<6); //Disable Port 5
|
|
+ regValue |= (1<<13); //Port 5 as GMAC, no Internal PHY
|
|
+
|
|
+#if defined (CONFIG_RAETH_GMAC2)
|
|
+ //RGMII2=Normal mode
|
|
+ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
|
|
+
|
|
+ //GMAC2= RGMII mode
|
|
+ *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
|
|
+ mii_mgr_write(31, 0x3500, 0x56300); //MT7530 P5 AN, we can ignore this setting??????
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(GE2, auto-polling)
|
|
+ enable_auto_negotiate(0);//set polling address
|
|
+
|
|
+ /* set MT7530 Port 5 to PHY 0/4 mode */
|
|
+#if defined (CONFIG_GE_RGMII_INTERNAL_P0_AN)
|
|
+ regValue &= ~((1<<13)|(1<<6));
|
|
+ regValue |= ((1<<7)|(1<<16)|(1<<20));
|
|
+#elif defined (CONFIG_GE_RGMII_INTERNAL_P4_AN)
|
|
+ regValue &= ~((1<<13)|(1<<6)|(1<<20));
|
|
+ regValue |= ((1<<7)|(1<<16));
|
|
+#endif
|
|
+ /*Set MT7530 phy direct access mode**/
|
|
+ regValue &= ~(1<<5);
|
|
+
|
|
+ //sysRegWrite(GDMA2_FWD_CFG, 0x20710000);
|
|
+#endif
|
|
+ regValue |= (1<<16);//change HW-TRAP
|
|
+ printk("change HW-TRAP to 0x%x\n",regValue);
|
|
+ mii_mgr_write(31, 0x7804 ,regValue);
|
|
+#endif
|
|
+ mii_mgr_read(31, 0x7800, ®Value);
|
|
+ regValue = (regValue >> 9) & 0x3;
|
|
+ if(regValue == 0x3)//25Mhz Xtal
|
|
+ xtal_mode = 1;
|
|
+ else if(regValue == 0x2) //40Mhz
|
|
+ xtal_mode = 2;
|
|
+ else
|
|
+ xtal_mode = 3;
|
|
+
|
|
+ if(xtal_mode == 1) { //25Mhz Xtal
|
|
+ /* do nothing */
|
|
+ } else if(xtal_mode = 2) { //40Mhz
|
|
+ mii_mgr_write(0, 13, 0x1f); // disable MT7530 core clock
|
|
+ mii_mgr_write(0, 14, 0x410);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x0);
|
|
+
|
|
+ mii_mgr_write(0, 13, 0x1f); // disable MT7530 PLL
|
|
+ mii_mgr_write(0, 14, 0x40d);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x2020);
|
|
+
|
|
+ mii_mgr_write(0, 13, 0x1f); // for MT7530 core clock = 500Mhz
|
|
+ mii_mgr_write(0, 14, 0x40e);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x119);
|
|
+
|
|
+ mii_mgr_write(0, 13, 0x1f); // enable MT7530 PLL
|
|
+ mii_mgr_write(0, 14, 0x40d);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x2820);
|
|
+
|
|
+ udelay(20); //suggest by CD
|
|
+
|
|
+ mii_mgr_write(0, 13, 0x1f); // enable MT7530 core clock
|
|
+ mii_mgr_write(0, 14, 0x410);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ }else {//20MHz
|
|
+ /*TODO*/
|
|
+ }
|
|
+
|
|
+#if defined (CONFIG_GE1_TRGMII_FORCE_1200) && defined (CONFIG_MT7621_ASIC)
|
|
+ mii_mgr_write(0, 14, 0x3); /*TRGMII*/
|
|
+#else
|
|
+ mii_mgr_write(0, 14, 0x1); /*RGMII*/
|
|
+/* set MT7530 central align */
|
|
+ mii_mgr_read(31, 0x7830, ®Value);
|
|
+ regValue &= ~1;
|
|
+ regValue |= 1<<1;
|
|
+ mii_mgr_write(31, 0x7830, regValue);
|
|
+
|
|
+ mii_mgr_read(31, 0x7a40, ®Value);
|
|
+ regValue &= ~(1<<30);
|
|
+ mii_mgr_write(31, 0x7a40, regValue);
|
|
+
|
|
+ regValue = 0x855;
|
|
+ mii_mgr_write(31, 0x7a78, regValue);
|
|
+
|
|
+#endif
|
|
+ mii_mgr_write(31, 0x7b00, 0x104); //delay setting for 10/1000M
|
|
+ mii_mgr_write(31, 0x7b04, 0x10); //delay setting for 10/1000M
|
|
+
|
|
+ /*Tx Driving*/
|
|
+ mii_mgr_write(31, 0x7a54, 0x88); //lower GE1 driving
|
|
+ mii_mgr_write(31, 0x7a5c, 0x88); //lower GE1 driving
|
|
+ mii_mgr_write(31, 0x7a64, 0x88); //lower GE1 driving
|
|
+ mii_mgr_write(31, 0x7a6c, 0x88); //lower GE1 driving
|
|
+ mii_mgr_write(31, 0x7a74, 0x88); //lower GE1 driving
|
|
+ mii_mgr_write(31, 0x7a7c, 0x88); //lower GE1 driving
|
|
+ mii_mgr_write(31, 0x7810, 0x11); //lower GE2 driving
|
|
+ /*Set MT7623/MT7683 TX Driving*/
|
|
+ *(volatile u_long *)(0xfb110354) = 0x88;
|
|
+ *(volatile u_long *)(0xfb11035c) = 0x88;
|
|
+ *(volatile u_long *)(0xfb110364) = 0x88;
|
|
+ *(volatile u_long *)(0xfb11036c) = 0x88;
|
|
+ *(volatile u_long *)(0xfb110374) = 0x88;
|
|
+ *(volatile u_long *)(0xfb11037c) = 0x88;
|
|
+#if defined (CONFIG_GE2_RGMII_AN)
|
|
+ *(volatile u_long *)(0xf0005f00) = 0xe00; //Set GE2 driving and slew rate
|
|
+#else
|
|
+ *(volatile u_long *)(0xf0005f00) = 0xa00; //Set GE2 driving and slew rate
|
|
+#endif
|
|
+ *(volatile u_long *)(0xf00054c0) = 0x5; //set GE2 TDSEL
|
|
+ *(volatile u_long *)(0xf0005ed0) = 0; //set GE2 TUNE
|
|
+
|
|
+ /* TRGMII Clock */
|
|
+// printk("Set TRGMII mode clock stage 1\n");
|
|
+ mii_mgr_write(0, 13, 0x1f);
|
|
+ mii_mgr_write(0, 14, 0x404);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ if (xtal_mode == 1){ //25MHz
|
|
+#if defined (CONFIG_GE1_TRGMII_FORCE_2900)
|
|
+ mii_mgr_write(0, 14, 0x1d00); // 362.5MHz
|
|
+#elif defined (CONFIG_GE1_TRGMII_FORCE_2600)
|
|
+ mii_mgr_write(0, 14, 0x1a00); // 325MHz
|
|
+#elif defined (CONFIG_GE1_TRGMII_FORCE_2000)
|
|
+ mii_mgr_write(0, 14, 0x1400); //250MHz
|
|
+#elif defined (CONFIG_GE1_RGMII_FORCE_1000)
|
|
+ mii_mgr_write(0, 14, 0x00a0); //125MHz
|
|
+#endif
|
|
+ }else if(xtal_mode == 2){//40MHz
|
|
+#if defined (CONFIG_GE1_TRGMII_FORCE_2900)
|
|
+ mii_mgr_write(0, 14, 0x1220); // 362.5MHz
|
|
+#elif defined (CONFIG_GE1_TRGMII_FORCE_2600)
|
|
+ mii_mgr_write(0, 14, 0x1040); // 325MHz
|
|
+#elif defined (CONFIG_GE1_TRGMII_FORCE_2000)
|
|
+ mii_mgr_write(0, 14, 0x0c80); //250MHz
|
|
+#elif defined (CONFIG_GE1_RGMII_FORCE_1000)
|
|
+ mii_mgr_write(0, 14, 0x0640); //125MHz
|
|
+#endif
|
|
+ }
|
|
+// printk("Set TRGMII mode clock stage 2\n");
|
|
+ mii_mgr_write(0, 13, 0x1f);
|
|
+ mii_mgr_write(0, 14, 0x405);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x0);
|
|
+
|
|
+// printk("Set TRGMII mode clock stage 3\n");
|
|
+ mii_mgr_write(0, 13, 0x1f);
|
|
+ mii_mgr_write(0, 14, 0x409);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x0087);
|
|
+
|
|
+// printk("Set TRGMII mode clock stage 4\n");
|
|
+ mii_mgr_write(0, 13, 0x1f);
|
|
+ mii_mgr_write(0, 14, 0x40a);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x0087);
|
|
+
|
|
+// printk("Set TRGMII mode clock stage 5\n");
|
|
+ mii_mgr_write(0, 13, 0x1f);
|
|
+ mii_mgr_write(0, 14, 0x403);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x1800);
|
|
+
|
|
+// printk("Set TRGMII mode clock stage 6\n");
|
|
+ mii_mgr_write(0, 13, 0x1f);
|
|
+ mii_mgr_write(0, 14, 0x403);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x1c00);
|
|
+
|
|
+// printk("Set TRGMII mode clock stage 7\n");
|
|
+ mii_mgr_write(0, 13, 0x1f);
|
|
+ mii_mgr_write(0, 14, 0x401);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0xc020);
|
|
+
|
|
+// printk("Set TRGMII mode clock stage 8\n");
|
|
+ mii_mgr_write(0, 13, 0x1f);
|
|
+ mii_mgr_write(0, 14, 0x406);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0xa030);
|
|
+
|
|
+// printk("Set TRGMII mode clock stage 9\n");
|
|
+ mii_mgr_write(0, 13, 0x1f);
|
|
+ mii_mgr_write(0, 14, 0x406);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0xa038);
|
|
+
|
|
+ udelay(120); // for MT7623 bring up test
|
|
+
|
|
+// printk("Set TRGMII mode clock stage 10\n");
|
|
+ mii_mgr_write(0, 13, 0x1f);
|
|
+ mii_mgr_write(0, 14, 0x410);
|
|
+ mii_mgr_write(0, 13, 0x401f);
|
|
+ mii_mgr_write(0, 14, 0x3);
|
|
+
|
|
+// printk("Set TRGMII mode clock stage 11\n");
|
|
+
|
|
+ mii_mgr_read(31, 0x7830 ,®Value);
|
|
+ regValue &=0xFFFFFFFC;
|
|
+ regValue |=0x00000001;
|
|
+ mii_mgr_write(31, 0x7830, regValue);
|
|
+
|
|
+// printk("Set TRGMII mode clock stage 12\n");
|
|
+ mii_mgr_read(31, 0x7a40 ,®Value);
|
|
+ regValue &= ~(0x1<<30);
|
|
+ regValue &= ~(0x1<<28);
|
|
+ mii_mgr_write(31, 0x7a40, regValue);
|
|
+
|
|
+ //mii_mgr_write(31, 0x7a78, 0x855);
|
|
+ mii_mgr_write(31, 0x7a78, 0x55);
|
|
+// printk(" Adjust MT7530 TXC delay\n");
|
|
+ udelay(100); // for mt7623 bring up test
|
|
+
|
|
+// printk(" Release MT7623 RXC Reset\n");
|
|
+ *(volatile u_long *)(0xfb110300) &= 0x7fffffff; // Release MT7623 RXC reset
|
|
+ //disable EEE
|
|
+ for(i=0;i<=4;i++)
|
|
+ {
|
|
+ mii_mgr_write(i, 13, 0x7);
|
|
+ mii_mgr_write(i, 14, 0x3C);
|
|
+ mii_mgr_write(i, 13, 0x4007);
|
|
+ mii_mgr_write(i, 14, 0x0);
|
|
+ }
|
|
+
|
|
+ //Disable EEE 10Base-Te:
|
|
+ for(i=0;i<=4;i++)
|
|
+ {
|
|
+ mii_mgr_write(i, 13, 0x1f);
|
|
+ mii_mgr_write(i, 14, 0x027b);
|
|
+ mii_mgr_write(i, 13, 0x401f);
|
|
+ mii_mgr_write(i, 14, 0x1177);
|
|
+ }
|
|
+
|
|
+ for(i=0;i<=4;i++)
|
|
+ {
|
|
+ //turn on PHY
|
|
+ mii_mgr_read(i, 0x0 ,®Value);
|
|
+ regValue &= ~(0x1<<11);
|
|
+ mii_mgr_write(i, 0x0, regValue);
|
|
+ }
|
|
+
|
|
+ for(i=0;i<=4;i++) {
|
|
+ mii_mgr_read(i, 4, ®Value);
|
|
+ regValue |= (3<<7); //turn on 100Base-T Advertisement
|
|
+ mii_mgr_write(i, 4, regValue);
|
|
+
|
|
+ mii_mgr_read(i, 9, ®Value);
|
|
+ regValue |= (3<<8); //turn on 1000Base-T Advertisement
|
|
+ mii_mgr_write(i, 9, regValue);
|
|
+
|
|
+ //restart AN
|
|
+ mii_mgr_read(i, 0, ®Value);
|
|
+ regValue |= (1 << 9);
|
|
+ mii_mgr_write(i, 0, regValue);
|
|
+ }
|
|
+
|
|
+ mii_mgr_read(31, 0x7808 ,®Value);
|
|
+ regValue |= (3<<16); //Enable INTR
|
|
+ mii_mgr_write(31, 0x7808 ,regValue);
|
|
+}
|
|
+
|
|
+void mt7623_ethifsys_init(void)
|
|
+{
|
|
+#define TRGPLL_CON0 (0xF0209280)
|
|
+#define TRGPLL_CON1 (0xF0209284)
|
|
+#define TRGPLL_CON2 (0xF0209288)
|
|
+#define TRGPLL_PWR_CON0 (0xF020928C)
|
|
+#define ETHPLL_CON0 (0xF0209290)
|
|
+#define ETHPLL_CON1 (0xF0209294)
|
|
+#define ETHPLL_CON2 (0xF0209298)
|
|
+#define ETHPLL_PWR_CON0 (0xF020929C)
|
|
+#define ETH_PWR_CON (0xF00062A0)
|
|
+#define HIF_PWR_CON (0xF00062A4)
|
|
+
|
|
+ u32 temp, pwr_ack_status;
|
|
+ /*=========================================================================*/
|
|
+ /* Enable ETHPLL & TRGPLL*/
|
|
+ /*=========================================================================*/
|
|
+ /* xPLL PWR ON*/
|
|
+ temp = sysRegRead(ETHPLL_PWR_CON0);
|
|
+ sysRegWrite(ETHPLL_PWR_CON0, temp | 0x1);
|
|
+
|
|
+ temp = sysRegRead(TRGPLL_PWR_CON0);
|
|
+ sysRegWrite(TRGPLL_PWR_CON0, temp | 0x1);
|
|
+
|
|
+ udelay(5); /* wait for xPLL_PWR_ON ready (min delay is 1us)*/
|
|
+
|
|
+ /* xPLL ISO Disable*/
|
|
+ temp = sysRegRead(ETHPLL_PWR_CON0);
|
|
+ sysRegWrite(ETHPLL_PWR_CON0, temp & ~0x2);
|
|
+
|
|
+ temp = sysRegRead(TRGPLL_PWR_CON0);
|
|
+ sysRegWrite(TRGPLL_PWR_CON0, temp & ~0x2);
|
|
+
|
|
+ /* xPLL Frequency Set*/
|
|
+ temp = sysRegRead(ETHPLL_CON0);
|
|
+ sysRegWrite(ETHPLL_CON0, temp | 0x1);
|
|
+#if defined (CONFIG_GE1_TRGMII_FORCE_2900)
|
|
+ temp = sysRegRead(TRGPLL_CON0);
|
|
+ sysRegWrite(TRGPLL_CON0, temp | 0x1);
|
|
+#elif defined (CONFIG_GE1_TRGMII_FORCE_2600)
|
|
+ sysRegWrite(TRGPLL_CON1, 0xB2000000);
|
|
+ temp = sysRegRead(TRGPLL_CON0);
|
|
+ sysRegWrite(TRGPLL_CON0, temp | 0x1);
|
|
+#elif defined (CONFIG_GE1_TRGMII_FORCE_2000)
|
|
+ sysRegWrite(TRGPLL_CON1, 0xCCEC4EC5);
|
|
+ sysRegWrite(TRGPLL_CON0, 0x121);
|
|
+#endif
|
|
+ udelay(40); /* wait for PLL stable (min delay is 20us)*/
|
|
+
|
|
+
|
|
+ /*=========================================================================*/
|
|
+ /* Power on ETHDMASYS and HIFSYS*/
|
|
+ /*=========================================================================*/
|
|
+ /* Power on ETHDMASYS*/
|
|
+ sysRegWrite(0xF0006000, 0x0b160001);
|
|
+ pwr_ack_status = (sysRegRead(ETH_PWR_CON) & 0x0000f000) >> 12;
|
|
+
|
|
+ if(pwr_ack_status == 0x0) {
|
|
+ printk("ETH already turn on and power on flow will be skipped...\n");
|
|
+ }else {
|
|
+ temp = sysRegRead(ETH_PWR_CON) ;
|
|
+ sysRegWrite(ETH_PWR_CON, temp | 0x4); /* PWR_ON*/
|
|
+ temp = sysRegRead(ETH_PWR_CON) ;
|
|
+ sysRegWrite(ETH_PWR_CON, temp | 0x8); /* PWR_ON_S*/
|
|
+
|
|
+ udelay(5); /* wait power settle time (min delay is 1us)*/
|
|
+
|
|
+ temp = sysRegRead(ETH_PWR_CON) ;
|
|
+ sysRegWrite(ETH_PWR_CON, temp & ~0x10); /* PWR_CLK_DIS*/
|
|
+ temp = sysRegRead(ETH_PWR_CON) ;
|
|
+ sysRegWrite(ETH_PWR_CON, temp & ~0x2); /* PWR_ISO*/
|
|
+ temp = sysRegRead(ETH_PWR_CON) ;
|
|
+ sysRegWrite(ETH_PWR_CON, temp & ~0x100); /* SRAM_PDN 0*/
|
|
+ temp = sysRegRead(ETH_PWR_CON) ;
|
|
+ sysRegWrite(ETH_PWR_CON, temp & ~0x200); /* SRAM_PDN 1*/
|
|
+ temp = sysRegRead(ETH_PWR_CON) ;
|
|
+ sysRegWrite(ETH_PWR_CON, temp & ~0x400); /* SRAM_PDN 2*/
|
|
+ temp = sysRegRead(ETH_PWR_CON) ;
|
|
+ sysRegWrite(ETH_PWR_CON, temp & ~0x800); /* SRAM_PDN 3*/
|
|
+
|
|
+ udelay(5); /* wait SRAM settle time (min delay is 1Us)*/
|
|
+
|
|
+ temp = sysRegRead(ETH_PWR_CON) ;
|
|
+ sysRegWrite(ETH_PWR_CON, temp | 0x1); /* PWR_RST_B*/
|
|
+ }
|
|
+
|
|
+ /* Power on HIFSYS*/
|
|
+ pwr_ack_status = (sysRegRead(HIF_PWR_CON) & 0x0000f000) >> 12;
|
|
+ if(pwr_ack_status == 0x0) {
|
|
+ printk("HIF already turn on and power on flow will be skipped...\n");
|
|
+ }
|
|
+ else {
|
|
+ temp = sysRegRead(HIF_PWR_CON) ;
|
|
+ sysRegWrite(HIF_PWR_CON, temp | 0x4); /* PWR_ON*/
|
|
+ temp = sysRegRead(HIF_PWR_CON) ;
|
|
+ sysRegWrite(HIF_PWR_CON, temp | 0x8); /* PWR_ON_S*/
|
|
+
|
|
+ udelay(5); /* wait power settle time (min delay is 1us)*/
|
|
+
|
|
+ temp = sysRegRead(HIF_PWR_CON) ;
|
|
+ sysRegWrite(HIF_PWR_CON, temp & ~0x10); /* PWR_CLK_DIS*/
|
|
+ temp = sysRegRead(HIF_PWR_CON) ;
|
|
+ sysRegWrite(HIF_PWR_CON, temp & ~0x2); /* PWR_ISO*/
|
|
+ temp = sysRegRead(HIF_PWR_CON) ;
|
|
+ sysRegWrite(HIF_PWR_CON, temp & ~0x100); /* SRAM_PDN 0*/
|
|
+ temp = sysRegRead(HIF_PWR_CON) ;
|
|
+ sysRegWrite(HIF_PWR_CON, temp & ~0x200); /* SRAM_PDN 1*/
|
|
+ temp = sysRegRead(HIF_PWR_CON) ;
|
|
+ sysRegWrite(HIF_PWR_CON, temp & ~0x400); /* SRAM_PDN 2*/
|
|
+ temp = sysRegRead(HIF_PWR_CON) ;
|
|
+ sysRegWrite(HIF_PWR_CON, temp & ~0x800); /* SRAM_PDN 3*/
|
|
+
|
|
+ udelay(5); /* wait SRAM settle time (min delay is 1Us)*/
|
|
+
|
|
+ temp = sysRegRead(HIF_PWR_CON) ;
|
|
+ sysRegWrite(HIF_PWR_CON, temp | 0x1); /* PWR_RST_B*/
|
|
+ }
|
|
+
|
|
+ /* Release mt7530 reset */
|
|
+ temp = le32_to_cpu(*(volatile u_long *)(0xfb000034));
|
|
+ temp &= ~(BIT(2));
|
|
+ *(volatile u_long *)(0xfb000034) = temp;
|
|
+}
|
|
+#endif
|
|
+
|
|
+/**
|
|
+ * ra2882eth_init - Module Init code
|
|
+ *
|
|
+ * Called by kernel to register net_device
|
|
+ *
|
|
+ */
|
|
+
|
|
+static int fe_probe(struct platform_device *pdev)
|
|
+{
|
|
+ int ret;
|
|
+ struct net_device *dev = alloc_etherdev(sizeof(END_DEVICE));
|
|
+
|
|
+ fe_irq = platform_get_irq(pdev, 0);
|
|
+
|
|
+#ifdef CONFIG_RALINK_VISTA_BASIC
|
|
+ int sw_id=0;
|
|
+ mii_mgr_read(29, 31, &sw_id);
|
|
+ is_switch_175c = (sw_id == 0x175c) ? 1:0;
|
|
+#endif
|
|
+
|
|
+ if (!dev)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ strcpy(dev->name, DEV_NAME);
|
|
+ printk("%s:%s[%d]%d\n", __FILE__, __func__, __LINE__, fe_irq);
|
|
+ dev->irq = fe_irq;
|
|
+ dev->addr_len = 6;
|
|
+ dev->base_addr = RALINK_FRAME_ENGINE_BASE;
|
|
+
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ rather_probe(dev);
|
|
+#else
|
|
+ dev->init = rather_probe;
|
|
+#endif
|
|
+ ra2880_setup_dev_fptable(dev);
|
|
+
|
|
+ /* net_device structure Init */
|
|
+ ethtool_init(dev);
|
|
+ printk("Ralink APSoC Ethernet Driver Initilization. %s %d rx/tx descriptors allocated, mtu = %d!\n", RAETH_VERSION, NUM_RX_DESC, dev->mtu);
|
|
+#ifdef CONFIG_RAETH_NAPI
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ printk("NAPI enable, Tx Ring = %d, Rx Ring = %d\n", NUM_TX_DESC, NUM_RX_DESC);
|
|
+#else
|
|
+ printk("NAPI enable, weight = %d, Tx Ring = %d, Rx Ring = %d\n", dev->weight, NUM_TX_DESC, NUM_RX_DESC);
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+ /* Register net device for the driver */
|
|
+ if ( register_netdev(dev) != 0) {
|
|
+ printk(KERN_WARNING " " __FILE__ ": No ethernet port found.\n");
|
|
+ return -ENXIO;
|
|
+ }
|
|
+
|
|
+
|
|
+#ifdef CONFIG_RAETH_NETLINK
|
|
+ csr_netlink_init();
|
|
+#endif
|
|
+ ret = debug_proc_init();
|
|
+
|
|
+ dev_raether = dev;
|
|
+#ifdef CONFIG_ARCH_MT7623
|
|
+ mt7623_ethifsys_init();
|
|
+#endif
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+void fe_sw_init(void)
|
|
+{
|
|
+#if defined (CONFIG_GIGAPHY) || defined (CONFIG_RAETH_ROUTER) || defined (CONFIG_100PHY)
|
|
+ unsigned int regValue = 0;
|
|
+#endif
|
|
+
|
|
+ // Case1: RT288x/RT3883/MT7621 GE1 + GigaPhy
|
|
+#if defined (CONFIG_GE1_RGMII_AN)
|
|
+ enable_auto_negotiate(1);
|
|
+ if (isMarvellGigaPHY(1)) {
|
|
+#if defined (CONFIG_RT3883_FPGA)
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, ®Value);
|
|
+ regValue &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, regValue);
|
|
+
|
|
+ printk("\n Reset MARVELL phy\n");
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, ®Value);
|
|
+ regValue |= 1<<7; //Add delay to RX_CLK for RXD Outputs
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 20, regValue);
|
|
+
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, ®Value);
|
|
+ regValue |= 1<<15; //PHY Software Reset
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, regValue);
|
|
+#elif defined (CONFIG_MT7621_FPGA) || defined (CONFIG_MT7623_FPGA)
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, ®Value);
|
|
+ regValue &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 9, regValue);
|
|
+
|
|
+ /*10Mbps, debug*/
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 4, 0x461);
|
|
+
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, ®Value);
|
|
+ regValue |= 1<<9; //restart AN
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 0, regValue);
|
|
+#endif
|
|
+
|
|
+ }
|
|
+ if (isVtssGigaPHY(1)) {
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 1);
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, ®Value);
|
|
+ printk("Vitesse phy skew: %x --> ", regValue);
|
|
+ regValue |= (0x3<<12);
|
|
+ regValue &= ~(0x3<<14);
|
|
+ printk("%x\n", regValue);
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 28, regValue);
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR, 31, 0);
|
|
+ }
|
|
+#if defined (CONFIG_RALINK_MT7621)
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x21056300);//(P0, Auto mode)
|
|
+#endif
|
|
+#endif // CONFIG_GE1_RGMII_AN //
|
|
+
|
|
+ // Case2: RT3883/MT7621 GE2 + GigaPhy
|
|
+#if defined (CONFIG_GE2_RGMII_AN)
|
|
+ enable_auto_negotiate(2);
|
|
+ if (isMarvellGigaPHY(2)) {
|
|
+#if defined (CONFIG_RT3883_FPGA)
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, ®Value);
|
|
+ regValue &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, regValue);
|
|
+
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 20, ®Value);
|
|
+ regValue |= 1<<7; //Add delay to RX_CLK for RXD Outputs
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 20, regValue);
|
|
+
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, ®Value);
|
|
+ regValue |= 1<<15; //PHY Software Reset
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, regValue);
|
|
+#elif defined (CONFIG_MT7621_FPGA) || defined (CONFIG_MT7623_FPGA)
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, ®Value);
|
|
+ regValue &= ~(3<<8); //turn off 1000Base-T Advertisement (9.9=1000Full, 9.8=1000Half)
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 9, regValue);
|
|
+
|
|
+ /*10Mbps, debug*/
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 4, 0x461);
|
|
+
|
|
+
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, ®Value);
|
|
+ regValue |= 1<<9; //restart AN
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 0, regValue);
|
|
+#endif
|
|
+
|
|
+ }
|
|
+ if (isVtssGigaPHY(2)) {
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 31, 1);
|
|
+ mii_mgr_read(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 28, ®Value);
|
|
+ printk("Vitesse phy skew: %x --> ", regValue);
|
|
+ regValue |= (0x3<<12);
|
|
+ regValue &= ~(0x3<<14);
|
|
+ printk("%x\n", regValue);
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 28, regValue);
|
|
+ mii_mgr_write(CONFIG_MAC_TO_GIGAPHY_MODE_ADDR2, 31, 0);
|
|
+ }
|
|
+#if defined (CONFIG_RALINK_MT7621)
|
|
+ //RGMII2=Normal mode
|
|
+ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 15);
|
|
+ //GMAC2= RGMII mode
|
|
+ *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 14);
|
|
+
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(P1, Auto mode)
|
|
+#endif
|
|
+#endif // CONFIG_GE2_RGMII_AN //
|
|
+
|
|
+ // Case3: RT305x/RT335x/RT6855/RT6855A/MT7620 + EmbeddedSW
|
|
+#if defined (CONFIG_RT_3052_ESW) && !defined(CONFIG_RALINK_MT7621) && !defined(CONFIG_ARCH_MT7623)
|
|
+#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_MT7620)
|
|
+ rt_gsw_init();
|
|
+#elif defined(CONFIG_RALINK_RT6855A)
|
|
+ rt6855A_gsw_init();
|
|
+#else
|
|
+ rt305x_esw_init();
|
|
+#endif
|
|
+#endif
|
|
+ // Case4: RT288x/RT388x/MT7621 GE1 + Internal GigaSW
|
|
+#if defined (CONFIG_GE1_RGMII_FORCE_1000) || defined (CONFIG_GE1_TRGMII_FORCE_1200) || defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
|
|
+#if defined (CONFIG_RALINK_MT7621)
|
|
+ setup_internal_gsw();
|
|
+ /*MT7530 Init*/
|
|
+#elif defined (CONFIG_ARCH_MT7623)
|
|
+#if defined (CONFIG_GE1_TRGMII_FORCE_2000) || defined (CONFIG_GE1_TRGMII_FORCE_2600)
|
|
+ *(volatile u_long *)(0xfb00002c) |= (1<<11);
|
|
+#else
|
|
+ *(volatile u_long *)(0xfb00002c) &= ~(1<<11);
|
|
+#endif
|
|
+ setup_internal_gsw();
|
|
+ trgmii_calibration_7623();
|
|
+ trgmii_calibration_7530();
|
|
+ //*(volatile u_long *)(0xfb110300) |= (0x1f << 24); //Just only for 312.5/325MHz
|
|
+ *(volatile u_long *)(0xfb110340) = 0x00020000;
|
|
+ *(volatile u_long *)(0xfb110304) &= 0x3fffffff; // RX clock gating in MT7623
|
|
+ *(volatile u_long *)(0xfb110300) |= 0x80000000; // Assert RX reset in MT7623
|
|
+ *(volatile u_long *)(0xfb110300 ) &= 0x7fffffff; // Release RX reset in MT7623
|
|
+ *(volatile u_long *)(0xfb110300 +0x04) |= 0xC0000000; // Disable RX clock gating in MT7623
|
|
+/*GE1@125MHz(RGMII mode) TX delay adjustment*/
|
|
+#if defined (CONFIG_GE1_RGMII_FORCE_1000)
|
|
+ *(volatile u_long *)(0xfb110350) = 0x55;
|
|
+ *(volatile u_long *)(0xfb110358) = 0x55;
|
|
+ *(volatile u_long *)(0xfb110360) = 0x55;
|
|
+ *(volatile u_long *)(0xfb110368) = 0x55;
|
|
+ *(volatile u_long *)(0xfb110370) = 0x55;
|
|
+ *(volatile u_long *)(0xfb110378) = 0x855;
|
|
+#endif
|
|
+
|
|
+
|
|
+#elif defined (CONFIG_MT7623_FPGA) /* Nelson: remove for bring up, should be added!!! */
|
|
+ setup_fpga_gsw();
|
|
+#else
|
|
+ sysRegWrite(MDIO_CFG, INIT_VALUE_OF_FORCE_1000_FD);
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+ // Case5: RT388x/MT7621 GE2 + GigaSW
|
|
+#if defined (CONFIG_GE2_RGMII_FORCE_1000)
|
|
+#if defined (CONFIG_RALINK_MT7621)
|
|
+ setup_external_gsw();
|
|
+#else
|
|
+ sysRegWrite(MDIO_CFG2, INIT_VALUE_OF_FORCE_1000_FD);
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+ // Case6: RT288x GE1 /RT388x,MT7621 GE1/GE2 + (10/100 Switch or 100PHY)
|
|
+#if defined (CONFIG_RAETH_ROUTER) || defined (CONFIG_100PHY)
|
|
+
|
|
+ //set GMAC to MII or RvMII mode
|
|
+#if defined (CONFIG_RALINK_RT3883)
|
|
+ regValue = sysRegRead(SYSCFG1);
|
|
+#if defined (CONFIG_GE1_MII_FORCE_100) || defined (CONFIG_GE1_MII_AN)
|
|
+ regValue &= ~(0x3 << 12);
|
|
+ regValue |= 0x1 << 12; // GE1 MII Mode
|
|
+#elif defined (CONFIG_GE1_RVMII_FORCE_100)
|
|
+ regValue &= ~(0x3 << 12);
|
|
+ regValue |= 0x2 << 12; // GE1 RvMII Mode
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_GE2_MII_FORCE_100) || defined (CONFIG_GE2_MII_AN)
|
|
+ regValue &= ~(0x3 << 14);
|
|
+ regValue |= 0x1 << 14; // GE2 MII Mode
|
|
+#elif defined (CONFIG_GE2_RVMII_FORCE_100)
|
|
+ regValue &= ~(0x3 << 14);
|
|
+ regValue |= 0x2 << 14; // GE2 RvMII Mode
|
|
+#endif
|
|
+ sysRegWrite(SYSCFG1, regValue);
|
|
+#endif // CONFIG_RALINK_RT3883 //
|
|
+
|
|
+#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+
|
|
+#if defined (CONFIG_GE1_MII_FORCE_100)
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x5e337);//(P0, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
|
|
+#endif
|
|
+#if defined (CONFIG_GE2_MII_FORCE_100)
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x5e337);//(P1, Force mode, Link Up, 100Mbps, Full-Duplex, FC ON)
|
|
+#endif
|
|
+#if defined (CONFIG_GE1_MII_AN) || defined (CONFIG_GE1_RGMII_AN)
|
|
+ enable_auto_negotiate(1);
|
|
+#if defined (CONFIG_RALINK_MT7621)
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x21056300);//(P0, Auto mode)
|
|
+#endif
|
|
+#endif
|
|
+#if defined (CONFIG_GE2_MII_AN) || defined (CONFIG_GE1_RGMII_AN)
|
|
+ enable_auto_negotiate(2);
|
|
+#if defined (CONFIG_RALINK_MT7621)
|
|
+ sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x21056300);//(P1, Auto mode)
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+#else
|
|
+#if defined (CONFIG_GE1_MII_FORCE_100)
|
|
+#if defined (CONFIG_RALINK_MT7621)
|
|
+#else
|
|
+ sysRegWrite(MDIO_CFG, INIT_VALUE_OF_FORCE_100_FD);
|
|
+#endif
|
|
+#endif
|
|
+#if defined (CONFIG_GE2_MII_FORCE_100)
|
|
+#if defined (CONFIG_RALINK_MT7621)
|
|
+#else
|
|
+ sysRegWrite(MDIO_CFG2, INIT_VALUE_OF_FORCE_100_FD);
|
|
+#endif
|
|
+#endif
|
|
+ //add switch configuration here for other switch chips.
|
|
+#if defined (CONFIG_GE1_MII_FORCE_100) || defined (CONFIG_GE2_MII_FORCE_100)
|
|
+ // IC+ 175x: force IC+ switch cpu port is 100/FD
|
|
+ mii_mgr_write(29, 22, 0x8420);
|
|
+#endif
|
|
+
|
|
+
|
|
+#endif // defined (CONFIG_RAETH_ROUTER) || defined (CONFIG_100PHY) //
|
|
+
|
|
+}
|
|
+
|
|
+
|
|
+/**
|
|
+ * ra2882eth_cleanup_module - Module Exit code
|
|
+ *
|
|
+ * Cmd 'rmmod' will invode the routine to exit the module
|
|
+ *
|
|
+ */
|
|
+#if 0
|
|
+ void ra2882eth_cleanup_module(void)
|
|
+{
|
|
+ struct net_device *dev = dev_raether;
|
|
+ END_DEVICE *ei_local;
|
|
+
|
|
+ ei_local = netdev_priv(dev);
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ unregister_netdev(ei_local->PseudoDev);
|
|
+ free_netdev(ei_local->PseudoDev);
|
|
+#endif
|
|
+ unregister_netdev(dev);
|
|
+ RAETH_PRINT("Free ei_local and unregister netdev...\n");
|
|
+
|
|
+ free_netdev(dev);
|
|
+ debug_proc_exit();
|
|
+#ifdef CONFIG_RAETH_NETLINK
|
|
+ csr_netlink_end();
|
|
+#endif
|
|
+}
|
|
+#endif
|
|
+EXPORT_SYMBOL(set_fe_dma_glo_cfg);
|
|
+//module_init(ra2882eth_init);
|
|
+//module_exit(ra2882eth_cleanup_module);
|
|
+
|
|
+const struct of_device_id of_fe_match[] = {
|
|
+ { .compatible = "mediatek,mt7623-net", },
|
|
+ {},
|
|
+};
|
|
+
|
|
+MODULE_DEVICE_TABLE(of, of_fe_match);
|
|
+
|
|
+static struct platform_driver fe_driver = {
|
|
+ .probe = fe_probe,
|
|
+// .remove = ra2882eth_cleanup_module,
|
|
+ .driver = {
|
|
+ .name = "ralink_soc_eth",
|
|
+ .owner = THIS_MODULE,
|
|
+ .of_match_table = of_fe_match,
|
|
+ },
|
|
+};
|
|
+
|
|
+static int __init init_rtfe(void)
|
|
+{
|
|
+ int ret;
|
|
+ ret = platform_driver_register(&fe_driver);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void __exit exit_rtfe(void)
|
|
+{
|
|
+ platform_driver_unregister(&fe_driver);
|
|
+}
|
|
+
|
|
+module_init(init_rtfe);
|
|
+module_exit(exit_rtfe);
|
|
+
|
|
+
|
|
+MODULE_LICENSE("GPL");
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/raether.h
|
|
@@ -0,0 +1,126 @@
|
|
+#ifndef RA2882ETHEND_H
|
|
+#define RA2882ETHEND_H
|
|
+
|
|
+#ifdef DSP_VIA_NONCACHEABLE
|
|
+#define ESRAM_BASE 0xa0800000 /* 0x0080-0000 ~ 0x00807FFF */
|
|
+#else
|
|
+#define ESRAM_BASE 0x80800000 /* 0x0080-0000 ~ 0x00807FFF */
|
|
+#endif
|
|
+
|
|
+#define RX_RING_BASE ((int)(ESRAM_BASE + 0x7000))
|
|
+#define TX_RING_BASE ((int)(ESRAM_BASE + 0x7800))
|
|
+
|
|
+#if defined(CONFIG_RALINK_RT2880)
|
|
+#define NUM_TX_RINGS 1
|
|
+#else
|
|
+#define NUM_TX_RINGS 4
|
|
+#endif
|
|
+#ifdef MEMORY_OPTIMIZATION
|
|
+#ifdef CONFIG_RAETH_ROUTER
|
|
+#define NUM_RX_DESC 32 //128
|
|
+#define NUM_TX_DESC 32 //128
|
|
+#elif CONFIG_RT_3052_ESW
|
|
+#define NUM_RX_DESC 16 //64
|
|
+#define NUM_TX_DESC 16 //64
|
|
+#else
|
|
+#define NUM_RX_DESC 32 //128
|
|
+#define NUM_TX_DESC 32 //128
|
|
+#endif
|
|
+//#define NUM_RX_MAX_PROCESS 32
|
|
+#define NUM_RX_MAX_PROCESS 32
|
|
+#else
|
|
+#if defined (CONFIG_RAETH_ROUTER)
|
|
+#define NUM_RX_DESC 256
|
|
+#define NUM_TX_DESC 256
|
|
+#elif defined (CONFIG_RT_3052_ESW)
|
|
+#if defined (CONFIG_RALINK_MT7621)
|
|
+#define NUM_RX_DESC 512
|
|
+#define NUM_QRX_DESC 16
|
|
+#define NUM_TX_DESC 512
|
|
+#else
|
|
+#define NUM_RX_DESC 256
|
|
+#define NUM_QRX_DESC NUM_RX_DESC
|
|
+#define NUM_TX_DESC 256
|
|
+#endif
|
|
+#else
|
|
+#define NUM_RX_DESC 256
|
|
+#define NUM_QRX_DESC NUM_RX_DESC
|
|
+#define NUM_TX_DESC 256
|
|
+#endif
|
|
+#if defined(CONFIG_RALINK_RT3883) || defined(CONFIG_RALINK_MT7620)
|
|
+#define NUM_RX_MAX_PROCESS 2
|
|
+#else
|
|
+#define NUM_RX_MAX_PROCESS 16
|
|
+#endif
|
|
+#endif
|
|
+#define NUM_LRO_RX_DESC 16
|
|
+
|
|
+#if defined (CONFIG_SUPPORT_OPENWRT)
|
|
+#define DEV_NAME "eth0"
|
|
+#define DEV2_NAME "eth1"
|
|
+#else
|
|
+#define DEV_NAME "eth2"
|
|
+#define DEV2_NAME "eth3"
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT6855A) || defined (CONFIG_RALINK_MT7621)
|
|
+#define GMAC0_OFFSET 0xE000
|
|
+#define GMAC2_OFFSET 0xE006
|
|
+#else
|
|
+#define GMAC0_OFFSET 0x28
|
|
+#define GMAC2_OFFSET 0x22
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RALINK_RT6855A)
|
|
+#define IRQ_ENET0 22
|
|
+#elif defined(CONFIG_ARCH_MT7623)
|
|
+#define IRQ_ENET0 232
|
|
+#else
|
|
+#define IRQ_ENET0 3 /* hardware interrupt #3, defined in RT2880 Soc Design Spec Rev 0.03, pp43 */
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RAETH_HW_LRO)
|
|
+#define HW_LRO_TIMER_UNIT 1
|
|
+#define HW_LRO_REFRESH_TIME 50000
|
|
+#define HW_LRO_MAX_AGG_CNT 64
|
|
+#define HW_LRO_AGG_DELTA 1
|
|
+#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+#define MAX_LRO_RX_LENGTH 10240
|
|
+#else
|
|
+#define MAX_LRO_RX_LENGTH (PAGE_SIZE - SKB_DATA_ALIGN(NET_SKB_PAD + sizeof(struct skb_shared_info)))
|
|
+#endif
|
|
+#define HW_LRO_AGG_TIME 10 /* 200us */
|
|
+#define HW_LRO_AGE_TIME 50
|
|
+#define HW_LRO_BW_THRE 3000
|
|
+#define HW_LRO_PKT_INT_ALPHA 100
|
|
+#endif /* CONFIG_RAETH_HW_LRO */
|
|
+#define FE_INT_STATUS_REG (*(volatile unsigned long *)(FE_INT_STATUS))
|
|
+#define FE_INT_STATUS_CLEAN(reg) (*(volatile unsigned long *)(FE_INT_STATUS)) = reg
|
|
+
|
|
+//#define RAETH_DEBUG
|
|
+#ifdef RAETH_DEBUG
|
|
+#define RAETH_PRINT(fmt, args...) printk(KERN_INFO fmt, ## args)
|
|
+#else
|
|
+#define RAETH_PRINT(fmt, args...) { }
|
|
+#endif
|
|
+
|
|
+struct net_device_stats *ra_get_stats(struct net_device *dev);
|
|
+
|
|
+void ei_tx_timeout(struct net_device *dev);
|
|
+int rather_probe(struct net_device *dev);
|
|
+int ei_open(struct net_device *dev);
|
|
+int ei_close(struct net_device *dev);
|
|
+
|
|
+int ra2882eth_init(void);
|
|
+void ra2882eth_cleanup_module(void);
|
|
+
|
|
+void ei_xmit_housekeeping(unsigned long data);
|
|
+
|
|
+u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data);
|
|
+u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data);
|
|
+u32 mii_mgr_cl45_set_address(u32 port_num, u32 dev_addr, u32 reg_addr);
|
|
+u32 mii_mgr_read_cl45(u32 port_num, u32 dev_addr, u32 reg_addr, u32 *read_data);
|
|
+u32 mii_mgr_write_cl45(u32 port_num, u32 dev_addr, u32 reg_addr, u32 write_data);
|
|
+void fe_sw_init(void);
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/raether_hwlro.c
|
|
@@ -0,0 +1,347 @@
|
|
+#include <linux/module.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/pci.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/skbuff.h>
|
|
+#include <linux/if_vlan.h>
|
|
+#include <linux/if_ether.h>
|
|
+#include <linux/fs.h>
|
|
+#include <asm/uaccess.h>
|
|
+#include <asm/rt2880/surfboardint.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/sched.h>
|
|
+#include <asm/rt2880/rt_mmap.h>
|
|
+#include "ra2882ethreg.h"
|
|
+#include "raether.h"
|
|
+#include "ra_mac.h"
|
|
+#include "ra_ioctl.h"
|
|
+#include "ra_rfrw.h"
|
|
+
|
|
+#if defined(CONFIG_RAETH_HW_LRO_FORCE)
|
|
+int set_fe_lro_ring1_cfg(struct net_device *dev)
|
|
+{
|
|
+ unsigned int ip;
|
|
+
|
|
+ netdev_printk(KERN_CRIT, dev, "set_fe_lro_ring1_cfg()\n");
|
|
+
|
|
+ /* 1. Set RX ring mode to force port */
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_FORCE_PORT);
|
|
+
|
|
+ /* 2. Configure lro ring */
|
|
+ /* 2.1 set src/destination TCP ports */
|
|
+ SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING1, 1122);
|
|
+ SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING1, 3344);
|
|
+ /* 2.2 set src/destination IPs */
|
|
+ str_to_ip(&ip, "10.10.10.3");
|
|
+ sysRegWrite(LRO_RX_RING1_SIP_DW0, ip);
|
|
+ str_to_ip(&ip, "10.10.10.254");
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
|
|
+ /* 2.3 IPv4 force port mode */
|
|
+ SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING1, 1);
|
|
+ /* 2.4 IPv6 force port mode */
|
|
+ SET_PDMA_RXRING_IPV6_FORCE_MODE(ADMA_RX_RING1, 1);
|
|
+
|
|
+ /* 3. Set Age timer: 10 msec. */
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
|
|
+
|
|
+ /* 4. Valid LRO ring */
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int set_fe_lro_ring2_cfg(struct net_device *dev)
|
|
+{
|
|
+ unsigned int ip;
|
|
+
|
|
+ netdev_printk(KERN_CRIT, dev, "set_fe_lro_ring2_cfg()\n");
|
|
+
|
|
+ /* 1. Set RX ring mode to force port */
|
|
+ SET_PDMA_RXRING2_MODE(PDMA_RX_FORCE_PORT);
|
|
+
|
|
+ /* 2. Configure lro ring */
|
|
+ /* 2.1 set src/destination TCP ports */
|
|
+ SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING2, 5566);
|
|
+ SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING2, 7788);
|
|
+ /* 2.2 set src/destination IPs */
|
|
+ str_to_ip(&ip, "10.10.10.3");
|
|
+ sysRegWrite(LRO_RX_RING2_SIP_DW0, ip);
|
|
+ str_to_ip(&ip, "10.10.10.254");
|
|
+ sysRegWrite(LRO_RX_RING2_DIP_DW0, ip);
|
|
+ /* 2.3 IPv4 force port mode */
|
|
+ SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING2, 1);
|
|
+ /* 2.4 IPv6 force port mode */
|
|
+ SET_PDMA_RXRING_IPV6_FORCE_MODE(ADMA_RX_RING2, 1);
|
|
+
|
|
+ /* 3. Set Age timer: 10 msec. */
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
|
|
+
|
|
+ /* 4. Valid LRO ring */
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int set_fe_lro_ring3_cfg(struct net_device *dev)
|
|
+{
|
|
+ unsigned int ip;
|
|
+
|
|
+ netdev_printk(KERN_CRIT, dev, "set_fe_lro_ring3_cfg()\n");
|
|
+
|
|
+ /* 1. Set RX ring mode to force port */
|
|
+ SET_PDMA_RXRING3_MODE(PDMA_RX_FORCE_PORT);
|
|
+
|
|
+ /* 2. Configure lro ring */
|
|
+ /* 2.1 set src/destination TCP ports */
|
|
+ SET_PDMA_RXRING_TCP_SRC_PORT(ADMA_RX_RING3, 9900);
|
|
+ SET_PDMA_RXRING_TCP_DEST_PORT(ADMA_RX_RING3, 99);
|
|
+ /* 2.2 set src/destination IPs */
|
|
+ str_to_ip(&ip, "10.10.10.3");
|
|
+ sysRegWrite(LRO_RX_RING3_SIP_DW0, ip);
|
|
+ str_to_ip(&ip, "10.10.10.254");
|
|
+ sysRegWrite(LRO_RX_RING3_DIP_DW0, ip);
|
|
+ /* 2.3 IPv4 force port mode */
|
|
+ SET_PDMA_RXRING_IPV4_FORCE_MODE(ADMA_RX_RING3, 1);
|
|
+ /* 2.4 IPv6 force port mode */
|
|
+ SET_PDMA_RXRING_IPV6_FORCE_MODE(ADMA_RX_RING3, 1);
|
|
+
|
|
+ /* 3. Set Age timer: 10 msec. */
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
|
|
+
|
|
+ /* 4. Valid LRO ring */
|
|
+ SET_PDMA_RXRING3_VALID(1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int set_fe_lro_glo_cfg(struct net_device *dev)
|
|
+{
|
|
+ unsigned int regVal = 0;
|
|
+
|
|
+ netdev_printk(KERN_CRIT, dev, "set_fe_lro_glo_cfg()\n");
|
|
+
|
|
+ /* 1 Set max AGG timer: 10 msec. */
|
|
+ SET_PDMA_LRO_MAX_AGG_TIME(HW_LRO_AGG_TIME);
|
|
+
|
|
+ /* 2. Set max LRO agg count */
|
|
+ SET_PDMA_LRO_MAX_AGG_CNT(HW_LRO_MAX_AGG_CNT);
|
|
+
|
|
+ /* PDMA prefetch enable setting */
|
|
+ SET_PDMA_LRO_RXD_PREFETCH_EN(0x3);
|
|
+
|
|
+ /* 2.1 IPv4 checksum update enable */
|
|
+ SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
|
|
+
|
|
+ /* 3. Polling relinguish */
|
|
+ while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH)
|
|
+ ;
|
|
+
|
|
+ /* 4. Enable LRO */
|
|
+ regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
|
|
+ regVal |= PDMA_LRO_EN;
|
|
+ sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#else
|
|
+int set_fe_lro_auto_cfg(struct net_device *dev)
|
|
+{
|
|
+ unsigned int regVal = 0;
|
|
+ unsigned int ip;
|
|
+
|
|
+ netdev_printk(KERN_CRIT, dev, "set_fe_lro_auto_cfg()\n");
|
|
+
|
|
+ /* 1.1 Set my IP_1 */
|
|
+ str_to_ip(&ip, "10.10.10.254");
|
|
+ sysRegWrite(LRO_RX_RING0_DIP_DW0, ip);
|
|
+ sysRegWrite(LRO_RX_RING0_DIP_DW1, 0);
|
|
+ sysRegWrite(LRO_RX_RING0_DIP_DW2, 0);
|
|
+ sysRegWrite(LRO_RX_RING0_DIP_DW3, 0);
|
|
+ SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING0, 1);
|
|
+
|
|
+ /* 1.2 Set my IP_2 */
|
|
+ str_to_ip(&ip, "10.10.20.254");
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW0, ip);
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW1, 0);
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW2, 0);
|
|
+ sysRegWrite(LRO_RX_RING1_DIP_DW3, 0);
|
|
+ SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING1, 1);
|
|
+
|
|
+ /* 1.3 Set my IP_3 */
|
|
+ sysRegWrite(LRO_RX_RING2_DIP_DW3, 0x20010238);
|
|
+ sysRegWrite(LRO_RX_RING2_DIP_DW2, 0x08000000);
|
|
+ sysRegWrite(LRO_RX_RING2_DIP_DW1, 0x00000000);
|
|
+ sysRegWrite(LRO_RX_RING2_DIP_DW0, 0x00000254);
|
|
+ SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING2, 1);
|
|
+
|
|
+ /* 1.4 Set my IP_4 */
|
|
+ sysRegWrite(LRO_RX_RING3_DIP_DW3, 0x20010238);
|
|
+ sysRegWrite(LRO_RX_RING3_DIP_DW2, 0x08010000);
|
|
+ sysRegWrite(LRO_RX_RING3_DIP_DW1, 0x00000000);
|
|
+ sysRegWrite(LRO_RX_RING3_DIP_DW0, 0x00000254);
|
|
+ SET_PDMA_RXRING_MYIP_VALID(ADMA_RX_RING3, 1);
|
|
+
|
|
+ /* 2.1 Set RX ring1~3 to auto-learn modes */
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING1, PDMA_RX_AUTO_LEARN);
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING2, PDMA_RX_AUTO_LEARN);
|
|
+ SET_PDMA_RXRING_MODE(ADMA_RX_RING3, PDMA_RX_AUTO_LEARN);
|
|
+
|
|
+ /* 2.2 Valid LRO ring */
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING0, 1);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING1, 1);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING2, 1);
|
|
+ SET_PDMA_RXRING_VALID(ADMA_RX_RING3, 1);
|
|
+
|
|
+ /* 2.3 Set AGE timer */
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING1, HW_LRO_AGE_TIME);
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING2, HW_LRO_AGE_TIME);
|
|
+ SET_PDMA_RXRING_AGE_TIME(ADMA_RX_RING3, HW_LRO_AGE_TIME);
|
|
+
|
|
+ /* 2.4 Set max AGG timer */
|
|
+ SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING1, HW_LRO_AGG_TIME);
|
|
+ SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING2, HW_LRO_AGG_TIME);
|
|
+ SET_PDMA_RXRING_AGG_TIME(ADMA_RX_RING3, HW_LRO_AGG_TIME);
|
|
+
|
|
+ /* 2.5 Set max LRO agg count */
|
|
+ SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING1, HW_LRO_MAX_AGG_CNT);
|
|
+ SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING2, HW_LRO_MAX_AGG_CNT);
|
|
+ SET_PDMA_RXRING_MAX_AGG_CNT(ADMA_RX_RING3, HW_LRO_MAX_AGG_CNT);
|
|
+
|
|
+ /* 3.0 IPv6 LRO enable */
|
|
+ SET_PDMA_LRO_IPV6_EN(1);
|
|
+
|
|
+ /* 3.1 IPv4 checksum update enable */
|
|
+ SET_PDMA_LRO_IPV4_CSUM_UPDATE_EN(1);
|
|
+
|
|
+ /* 3.2 TCP push option check disable */
|
|
+ //SET_PDMA_LRO_IPV4_CTRL_PUSH_EN(0);
|
|
+
|
|
+ /* PDMA prefetch enable setting */
|
|
+ SET_PDMA_LRO_RXD_PREFETCH_EN(0x3);
|
|
+
|
|
+ /* 3.2 switch priority comparison to byte count mode */
|
|
+/* SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_BYTE_CNT_MODE); */
|
|
+ SET_PDMA_LRO_ALT_SCORE_MODE(PDMA_LRO_ALT_PKT_CNT_MODE);
|
|
+
|
|
+ /* 3.3 bandwidth threshold setting */
|
|
+ SET_PDMA_LRO_BW_THRESHOLD(HW_LRO_BW_THRE);
|
|
+
|
|
+ /* 3.4 auto-learn score delta setting */
|
|
+ sysRegWrite(LRO_ALT_SCORE_DELTA, 0);
|
|
+
|
|
+ /* 3.5 Set ALT timer to 20us: (unit: 20us) */
|
|
+ SET_PDMA_LRO_ALT_REFRESH_TIMER_UNIT(HW_LRO_TIMER_UNIT);
|
|
+ /* 3.6 Set ALT refresh timer to 1 sec. (unit: 20us) */
|
|
+ SET_PDMA_LRO_ALT_REFRESH_TIMER(HW_LRO_REFRESH_TIME);
|
|
+
|
|
+ /* 3.7 the least remaining room of SDL0 in RXD for lro aggregation */
|
|
+ SET_PDMA_LRO_MIN_RXD_SDL(1522);
|
|
+
|
|
+ /* 4. Polling relinguish */
|
|
+ while (sysRegRead(ADMA_LRO_CTRL_DW0) & PDMA_LRO_RELINGUISH)
|
|
+ ;
|
|
+
|
|
+ /* 5. Enable LRO */
|
|
+ regVal = sysRegRead(ADMA_LRO_CTRL_DW0);
|
|
+ regVal |= PDMA_LRO_EN;
|
|
+ sysRegWrite(ADMA_LRO_CTRL_DW0, regVal);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#endif /* CONFIG_RAETH_HW_LRO_FORCE */
|
|
+
|
|
+int fe_hw_lro_init(struct net_device *dev)
|
|
+{
|
|
+ int i;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+
|
|
+ /* Initial RX Ring 3 */
|
|
+ ei_local->rx_ring3 =
|
|
+ pci_alloc_consistent(NULL, NUM_LRO_RX_DESC * sizeof(struct PDMA_rxdesc),
|
|
+ &ei_local->phy_rx_ring3);
|
|
+ for (i = 0; i < NUM_LRO_RX_DESC; i++) {
|
|
+ memset(&ei_local->rx_ring3[i], 0, sizeof(struct PDMA_rxdesc));
|
|
+ ei_local->rx_ring3[i].rxd_info2.DDONE_bit = 0;
|
|
+ ei_local->rx_ring3[i].rxd_info2.LS0 = 0;
|
|
+ ei_local->rx_ring3[i].rxd_info2.PLEN0 =
|
|
+ SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
|
|
+ ei_local->rx_ring3[i].rxd_info2.PLEN1 =
|
|
+ SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
|
|
+ ei_local->rx_ring3[i].rxd_info1.PDP0 =
|
|
+ dma_map_single(NULL, ei_local->netrx3_skbuf[i]->data,
|
|
+ MAX_LRO_RX_LENGTH, PCI_DMA_FROMDEVICE);
|
|
+ }
|
|
+ netdev_printk(KERN_CRIT, dev,
|
|
+ "\nphy_rx_ring3 = 0x%08x, rx_ring3 = 0x%p\n",
|
|
+ ei_local->phy_rx_ring3, ei_local->rx_ring3);
|
|
+ /* Initial RX Ring 2 */
|
|
+ ei_local->rx_ring2 =
|
|
+ pci_alloc_consistent(NULL, NUM_LRO_RX_DESC * sizeof(struct PDMA_rxdesc),
|
|
+ &ei_local->phy_rx_ring2);
|
|
+ for (i = 0; i < NUM_LRO_RX_DESC; i++) {
|
|
+ memset(&ei_local->rx_ring2[i], 0, sizeof(struct PDMA_rxdesc));
|
|
+ ei_local->rx_ring2[i].rxd_info2.DDONE_bit = 0;
|
|
+ ei_local->rx_ring2[i].rxd_info2.LS0 = 0;
|
|
+ ei_local->rx_ring2[i].rxd_info2.PLEN0 =
|
|
+ SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
|
|
+ ei_local->rx_ring2[i].rxd_info2.PLEN1 =
|
|
+ SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
|
|
+ ei_local->rx_ring2[i].rxd_info1.PDP0 =
|
|
+ dma_map_single(NULL, ei_local->netrx2_skbuf[i]->data,
|
|
+ MAX_LRO_RX_LENGTH, PCI_DMA_FROMDEVICE);
|
|
+ }
|
|
+ netdev_printk(KERN_CRIT, dev,
|
|
+ "\nphy_rx_ring2 = 0x%08x, rx_ring2 = 0x%p\n",
|
|
+ ei_local->phy_rx_ring2, ei_local->rx_ring2);
|
|
+ /* Initial RX Ring 1 */
|
|
+ ei_local->rx_ring1 =
|
|
+ pci_alloc_consistent(NULL, NUM_LRO_RX_DESC * sizeof(struct PDMA_rxdesc),
|
|
+ &ei_local->phy_rx_ring1);
|
|
+ for (i = 0; i < NUM_LRO_RX_DESC; i++) {
|
|
+ memset(&ei_local->rx_ring1[i], 0, sizeof(struct PDMA_rxdesc));
|
|
+ ei_local->rx_ring1[i].rxd_info2.DDONE_bit = 0;
|
|
+ ei_local->rx_ring1[i].rxd_info2.LS0 = 0;
|
|
+ ei_local->rx_ring1[i].rxd_info2.PLEN0 =
|
|
+ SET_ADMA_RX_LEN0(MAX_LRO_RX_LENGTH);
|
|
+ ei_local->rx_ring1[i].rxd_info2.PLEN1 =
|
|
+ SET_ADMA_RX_LEN1(MAX_LRO_RX_LENGTH >> 14);
|
|
+ ei_local->rx_ring1[i].rxd_info1.PDP0 =
|
|
+ dma_map_single(NULL, ei_local->netrx1_skbuf[i]->data,
|
|
+ MAX_LRO_RX_LENGTH, PCI_DMA_FROMDEVICE);
|
|
+ }
|
|
+ netdev_printk(KERN_CRIT, dev,
|
|
+ "\nphy_rx_ring1 = 0x%08x, rx_ring1 = 0x%p\n",
|
|
+ ei_local->phy_rx_ring1, ei_local->rx_ring1);
|
|
+
|
|
+ sysRegWrite(RX_BASE_PTR3, phys_to_bus((u32) ei_local->phy_rx_ring3));
|
|
+ sysRegWrite(RX_MAX_CNT3, cpu_to_le32((u32) NUM_LRO_RX_DESC));
|
|
+ sysRegWrite(RX_CALC_IDX3, cpu_to_le32((u32) (NUM_LRO_RX_DESC - 1)));
|
|
+ sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX3);
|
|
+ sysRegWrite(RX_BASE_PTR2, phys_to_bus((u32) ei_local->phy_rx_ring2));
|
|
+ sysRegWrite(RX_MAX_CNT2, cpu_to_le32((u32) NUM_LRO_RX_DESC));
|
|
+ sysRegWrite(RX_CALC_IDX2, cpu_to_le32((u32) (NUM_LRO_RX_DESC - 1)));
|
|
+ sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX2);
|
|
+ sysRegWrite(RX_BASE_PTR1, phys_to_bus((u32) ei_local->phy_rx_ring1));
|
|
+ sysRegWrite(RX_MAX_CNT1, cpu_to_le32((u32) NUM_LRO_RX_DESC));
|
|
+ sysRegWrite(RX_CALC_IDX1, cpu_to_le32((u32) (NUM_LRO_RX_DESC - 1)));
|
|
+ sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX1);
|
|
+
|
|
+#if defined(CONFIG_RAETH_HW_LRO_FORCE)
|
|
+ set_fe_lro_ring1_cfg(dev);
|
|
+ set_fe_lro_ring2_cfg(dev);
|
|
+ set_fe_lro_ring3_cfg(dev);
|
|
+ set_fe_lro_glo_cfg(dev);
|
|
+#else
|
|
+ set_fe_lro_auto_cfg(dev);
|
|
+#endif /* CONFIG_RAETH_HW_LRO_FORCE */
|
|
+
|
|
+ /* HW LRO parameter settings */
|
|
+ ei_local->hw_lro_alpha = HW_LRO_PKT_INT_ALPHA;
|
|
+ ei_local->hw_lro_fix_setting = 1;
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+EXPORT_SYMBOL(fe_hw_lro_init);
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/raether_pdma.c
|
|
@@ -0,0 +1,1121 @@
|
|
+#include <linux/module.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/pci.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/skbuff.h>
|
|
+#include <linux/if_vlan.h>
|
|
+#include <linux/if_ether.h>
|
|
+#include <linux/fs.h>
|
|
+#include <asm/uaccess.h>
|
|
+#include <asm/rt2880/surfboardint.h>
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+#include <linux/tcp.h>
|
|
+#include <net/ipv6.h>
|
|
+#include <linux/ip.h>
|
|
+#include <net/ip.h>
|
|
+#include <net/tcp.h>
|
|
+#include <linux/in.h>
|
|
+#include <linux/ppp_defs.h>
|
|
+#include <linux/if_pppox.h>
|
|
+#endif
|
|
+#if defined (CONFIG_RAETH_LRO)
|
|
+#include <linux/inet_lro.h>
|
|
+#endif
|
|
+#include <linux/delay.h>
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+#include <linux/sched.h>
|
|
+#endif
|
|
+
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
|
|
+#include <asm/rt2880/rt_mmap.h>
|
|
+#else
|
|
+#include <linux/libata-compat.h>
|
|
+#endif
|
|
+
|
|
+#include "ra2882ethreg.h"
|
|
+#include "raether.h"
|
|
+#include "ra_mac.h"
|
|
+#include "ra_ioctl.h"
|
|
+#include "ra_rfrw.h"
|
|
+#ifdef CONFIG_RAETH_NETLINK
|
|
+#include "ra_netlink.h"
|
|
+#endif
|
|
+#if defined (CONFIG_RAETH_QOS)
|
|
+#include "ra_qos.h"
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
|
|
+#include "../../../net/nat/hw_nat/ra_nat.h"
|
|
+#endif
|
|
+#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+#include "dvt/raether_pdma_dvt.h"
|
|
+#endif /* CONFIG_RAETH_PDMA_DVT */
|
|
+
|
|
+#if !defined(CONFIG_RA_NAT_NONE)
|
|
+/* bruce+
|
|
+ */
|
|
+extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
|
|
+extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
|
|
+/* Qwert+
|
|
+ */
|
|
+#include <asm/mipsregs.h>
|
|
+extern int (*ra_classifier_hook_tx)(struct sk_buff *skb, unsigned long cur_cycle);
|
|
+extern int (*ra_classifier_hook_rx)(struct sk_buff *skb, unsigned long cur_cycle);
|
|
+#endif /* CONFIG_RA_CLASSIFIER */
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT3052_MP2)
|
|
+int32_t mcast_rx(struct sk_buff * skb);
|
|
+int32_t mcast_tx(struct sk_buff * skb);
|
|
+#endif
|
|
+
|
|
+#if 0
|
|
+#ifdef RA_MTD_RW_BY_NUM
|
|
+int ra_mtd_read(int num, loff_t from, size_t len, u_char *buf);
|
|
+#else
|
|
+int ra_mtd_read_nm(char *name, loff_t from, size_t len, u_char *buf);
|
|
+#endif
|
|
+#endif
|
|
+/* gmac driver feature set config */
|
|
+#if defined (CONFIG_RAETH_NAPI) || defined (CONFIG_RAETH_QOS)
|
|
+#undef DELAY_INT
|
|
+#else
|
|
+#if defined (CONFIG_ARCH_MT7623)
|
|
+#undef DELAY_INT
|
|
+#else
|
|
+#define DELAY_INT 1
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+//#define CONFIG_UNH_TEST
|
|
+/* end of config */
|
|
+
|
|
+#if defined (CONFIG_RAETH_JUMBOFRAME)
|
|
+#define MAX_RX_LENGTH 4096
|
|
+#else
|
|
+#define MAX_RX_LENGTH 1536
|
|
+#endif
|
|
+
|
|
+extern struct net_device *dev_raether;
|
|
+
|
|
+
|
|
+#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+extern int rx_calc_idx1;
|
|
+#endif
|
|
+#endif
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+extern int rx_calc_idx0;
|
|
+static unsigned long tx_cpu_owner_idx0=0;
|
|
+#endif
|
|
+extern unsigned long tx_ring_full;
|
|
+
|
|
+#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/
|
|
+#include "ra_ethtool.h"
|
|
+extern struct ethtool_ops ra_ethtool_ops;
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+extern struct ethtool_ops ra_virt_ethtool_ops;
|
|
+#endif // CONFIG_PSEUDO_SUPPORT //
|
|
+#endif // (CONFIG_ETHTOOL //
|
|
+
|
|
+#ifdef CONFIG_RALINK_VISTA_BASIC
|
|
+int is_switch_175c = 1;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RAETH_PDMATX_QDMARX /* QDMA RX */
|
|
+struct QDMA_txdesc *free_head = NULL;
|
|
+#endif
|
|
+
|
|
+//#if defined (CONFIG_RAETH_LRO)
|
|
+#if 0
|
|
+unsigned int lan_ip;
|
|
+struct lro_para_struct lro_para;
|
|
+int lro_flush_needed;
|
|
+extern char const *nvram_get(int index, char *name);
|
|
+#endif
|
|
+
|
|
+#define KSEG1 0xa0000000
|
|
+#define PHYS_TO_VIRT(x) ((void *)((x) | KSEG1))
|
|
+#define VIRT_TO_PHYS(x) ((unsigned long)(x) & ~KSEG1)
|
|
+
|
|
+extern void set_fe_dma_glo_cfg(void);
|
|
+
|
|
+/*
|
|
+ * @brief cal txd number for a page
|
|
+ *
|
|
+ * @parm size
|
|
+ *
|
|
+ * @return frag_txd_num
|
|
+ */
|
|
+
|
|
+unsigned int cal_frag_txd_num(unsigned int size)
|
|
+{
|
|
+ unsigned int frag_txd_num = 0;
|
|
+ if(size == 0)
|
|
+ return 0;
|
|
+ while(size > 0){
|
|
+ if(size > MAX_TXD_LEN){
|
|
+ frag_txd_num++;
|
|
+ size -= MAX_TXD_LEN;
|
|
+ }else{
|
|
+ frag_txd_num++;
|
|
+ size = 0;
|
|
+ }
|
|
+ }
|
|
+ return frag_txd_num;
|
|
+
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_RAETH_PDMATX_QDMARX /* QDMA RX */
|
|
+bool fq_qdma_init(struct net_device *dev)
|
|
+{
|
|
+ END_DEVICE* ei_local = netdev_priv(dev);
|
|
+ unsigned int phy_free_head;
|
|
+ unsigned int phy_free_tail;
|
|
+ unsigned int *free_page_head = NULL;
|
|
+ unsigned int phy_free_page_head;
|
|
+ int i;
|
|
+
|
|
+ free_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * sizeof(struct QDMA_txdesc), &phy_free_head);
|
|
+ if (unlikely(free_head == NULL)){
|
|
+ printk(KERN_ERR "QDMA FQ decriptor not available...\n");
|
|
+ return 0;
|
|
+ }
|
|
+ memset(free_head, 0x0, sizeof(struct QDMA_txdesc) * NUM_QDMA_PAGE);
|
|
+
|
|
+ free_page_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * QDMA_PAGE_SIZE, &phy_free_page_head);
|
|
+ if (unlikely(free_page_head == NULL)){
|
|
+ printk(KERN_ERR "QDMA FQ page not available...\n");
|
|
+ return 0;
|
|
+ }
|
|
+ for (i=0; i < NUM_QDMA_PAGE; i++) {
|
|
+ free_head[i].txd_info1.SDP = (phy_free_page_head + (i * QDMA_PAGE_SIZE));
|
|
+ if(i < (NUM_QDMA_PAGE-1)){
|
|
+ free_head[i].txd_info2.NDP = (phy_free_head + ((i+1) * sizeof(struct QDMA_txdesc)));
|
|
+
|
|
+
|
|
+#if 0
|
|
+ printk("free_head_phy[%d] is 0x%x!!!\n",i, VIRT_TO_PHYS(&free_head[i]) );
|
|
+ printk("free_head[%d] is 0x%x!!!\n",i, &free_head[i] );
|
|
+ printk("free_head[%d].txd_info1.SDP is 0x%x!!!\n",i, free_head[i].txd_info1.SDP );
|
|
+ printk("free_head[%d].txd_info2.NDP is 0x%x!!!\n",i, free_head[i].txd_info2.NDP );
|
|
+#endif
|
|
+ }
|
|
+ free_head[i].txd_info3.SDL = QDMA_PAGE_SIZE;
|
|
+
|
|
+ }
|
|
+ phy_free_tail = (phy_free_head + (u32)((NUM_QDMA_PAGE-1) * sizeof(struct QDMA_txdesc)));
|
|
+
|
|
+ printk("phy_free_head is 0x%x!!!\n", phy_free_head);
|
|
+ printk("phy_free_tail_phy is 0x%x!!!\n", phy_free_tail);
|
|
+ sysRegWrite(QDMA_FQ_HEAD, (u32)phy_free_head);
|
|
+ sysRegWrite(QDMA_FQ_TAIL, (u32)phy_free_tail);
|
|
+ sysRegWrite(QDMA_FQ_CNT, ((NUM_TX_DESC << 16) | NUM_QDMA_PAGE));
|
|
+ sysRegWrite(QDMA_FQ_BLEN, QDMA_PAGE_SIZE << 16);
|
|
+
|
|
+ ei_local->free_head = free_head;
|
|
+ ei_local->phy_free_head = phy_free_head;
|
|
+ ei_local->free_page_head = free_page_head;
|
|
+ ei_local->phy_free_page_head = phy_free_page_head;
|
|
+ return 1;
|
|
+}
|
|
+#endif
|
|
+
|
|
+int fe_dma_init(struct net_device *dev)
|
|
+{
|
|
+
|
|
+ int i;
|
|
+ unsigned int regVal;
|
|
+ END_DEVICE* ei_local = netdev_priv(dev);
|
|
+#if defined (CONFIG_RAETH_QOS)
|
|
+ int j;
|
|
+#endif
|
|
+
|
|
+ while(1)
|
|
+ {
|
|
+ regVal = sysRegRead(PDMA_GLO_CFG);
|
|
+ if((regVal & RX_DMA_BUSY))
|
|
+ {
|
|
+ printk("\n RX_DMA_BUSY !!! ");
|
|
+ continue;
|
|
+ }
|
|
+ if((regVal & TX_DMA_BUSY))
|
|
+ {
|
|
+ printk("\n TX_DMA_BUSY !!! ");
|
|
+ continue;
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+
|
|
+#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+ pdma_dvt_set_dma_mode();
|
|
+#endif /* CONFIG_RAETH_PDMA_DVT */
|
|
+
|
|
+#if defined (CONFIG_RAETH_QOS)
|
|
+ for (i=0;i<NUM_TX_RINGS;i++){
|
|
+ for (j=0;j<NUM_TX_DESC;j++){
|
|
+ ei_local->skb_free[i][j]=0;
|
|
+ }
|
|
+ ei_local->free_idx[i]=0;
|
|
+ }
|
|
+ /*
|
|
+ * RT2880: 2 x TX_Ring, 1 x Rx_Ring
|
|
+ * RT2883: 4 x TX_Ring, 1 x Rx_Ring
|
|
+ * RT3883: 4 x TX_Ring, 1 x Rx_Ring
|
|
+ * RT3052: 4 x TX_Ring, 1 x Rx_Ring
|
|
+ */
|
|
+ fe_tx_desc_init(dev, 0, 3, 1);
|
|
+ if (ei_local->tx_ring0 == NULL) {
|
|
+ printk("RAETH: tx ring0 allocation failed\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ fe_tx_desc_init(dev, 1, 3, 1);
|
|
+ if (ei_local->tx_ring1 == NULL) {
|
|
+ printk("RAETH: tx ring1 allocation failed\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ printk("\nphy_tx_ring0 = %08x, tx_ring0 = %p, size: %d bytes\n", ei_local->phy_tx_ring0, ei_local->tx_ring0, sizeof(struct PDMA_txdesc));
|
|
+
|
|
+ printk("\nphy_tx_ring1 = %08x, tx_ring1 = %p, size: %d bytes\n", ei_local->phy_tx_ring1, ei_local->tx_ring1, sizeof(struct PDMA_txdesc));
|
|
+
|
|
+#if ! defined (CONFIG_RALINK_RT2880)
|
|
+ fe_tx_desc_init(dev, 2, 3, 1);
|
|
+ if (ei_local->tx_ring2 == NULL) {
|
|
+ printk("RAETH: tx ring2 allocation failed\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ fe_tx_desc_init(dev, 3, 3, 1);
|
|
+ if (ei_local->tx_ring3 == NULL) {
|
|
+ printk("RAETH: tx ring3 allocation failed\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ printk("\nphy_tx_ring2 = %08x, tx_ring2 = %p, size: %d bytes\n", ei_local->phy_tx_ring2, ei_local->tx_ring2, sizeof(struct PDMA_txdesc));
|
|
+
|
|
+ printk("\nphy_tx_ring3 = %08x, tx_ring3 = %p, size: %d bytes\n", ei_local->phy_tx_ring3, ei_local->tx_ring3, sizeof(struct PDMA_txdesc));
|
|
+
|
|
+#endif // CONFIG_RALINK_RT2880 //
|
|
+#else
|
|
+ for (i=0;i<NUM_TX_DESC;i++){
|
|
+ ei_local->skb_free[i]=0;
|
|
+ }
|
|
+ ei_local->free_idx =0;
|
|
+#if defined (CONFIG_MIPS)
|
|
+ ei_local->tx_ring0 = pci_alloc_consistent(NULL, NUM_TX_DESC * sizeof(struct PDMA_txdesc), &ei_local->phy_tx_ring0);
|
|
+#else
|
|
+ ei_local->tx_ring0 = dma_alloc_coherent(NULL, NUM_TX_DESC * sizeof(struct PDMA_txdesc), &ei_local->phy_tx_ring0, GFP_KERNEL);
|
|
+#endif
|
|
+ printk("\nphy_tx_ring = 0x%08x, tx_ring = 0x%p\n", ei_local->phy_tx_ring0, ei_local->tx_ring0);
|
|
+
|
|
+ for (i=0; i < NUM_TX_DESC; i++) {
|
|
+ memset(&ei_local->tx_ring0[i],0,sizeof(struct PDMA_txdesc));
|
|
+ ei_local->tx_ring0[i].txd_info2.LS0_bit = 1;
|
|
+ ei_local->tx_ring0[i].txd_info2.DDONE_bit = 1;
|
|
+
|
|
+ }
|
|
+#endif // CONFIG_RAETH_QOS
|
|
+
|
|
+#ifdef CONFIG_RAETH_PDMATX_QDMARX /* QDMA RX */
|
|
+
|
|
+ fq_qdma_init(dev);
|
|
+
|
|
+ while(1)
|
|
+ {
|
|
+ regVal = sysRegRead(QDMA_GLO_CFG);
|
|
+ if((regVal & RX_DMA_BUSY))
|
|
+ {
|
|
+ printk("\n RX_DMA_BUSY !!! ");
|
|
+ continue;
|
|
+ }
|
|
+ if((regVal & TX_DMA_BUSY))
|
|
+ {
|
|
+ printk("\n TX_DMA_BUSY !!! ");
|
|
+ continue;
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /* Initial RX Ring 0*/
|
|
+
|
|
+#ifdef CONFIG_32B_DESC
|
|
+ ei_local->qrx_ring = kmalloc(NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
|
|
+ ei_local->phy_qrx_ring = virt_to_phys(ei_local->qrx_ring);
|
|
+#else
|
|
+ ei_local->qrx_ring = pci_alloc_consistent(NULL, NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_qrx_ring);
|
|
+#endif
|
|
+ for (i = 0; i < NUM_QRX_DESC; i++) {
|
|
+ memset(&ei_local->qrx_ring[i],0,sizeof(struct PDMA_rxdesc));
|
|
+ ei_local->qrx_ring[i].rxd_info2.DDONE_bit = 0;
|
|
+#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+ ei_local->qrx_ring[i].rxd_info2.LS0 = 0;
|
|
+ ei_local->qrx_ring[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
|
|
+#else
|
|
+ ei_local->qrx_ring[i].rxd_info2.LS0 = 1;
|
|
+#endif
|
|
+ ei_local->qrx_ring[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
|
|
+ }
|
|
+ printk("\nphy_qrx_ring = 0x%08x, qrx_ring = 0x%p\n",ei_local->phy_qrx_ring,ei_local->qrx_ring);
|
|
+
|
|
+ regVal = sysRegRead(QDMA_GLO_CFG);
|
|
+ regVal &= 0x000000FF;
|
|
+
|
|
+ sysRegWrite(QDMA_GLO_CFG, regVal);
|
|
+ regVal=sysRegRead(QDMA_GLO_CFG);
|
|
+
|
|
+ /* Tell the adapter where the TX/RX rings are located. */
|
|
+
|
|
+ sysRegWrite(QRX_BASE_PTR_0, phys_to_bus((u32) ei_local->phy_qrx_ring));
|
|
+ sysRegWrite(QRX_MAX_CNT_0, cpu_to_le32((u32) NUM_QRX_DESC));
|
|
+ sysRegWrite(QRX_CRX_IDX_0, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ rx_calc_idx0 = rx_dma_owner_idx0 = sysRegRead(QRX_CRX_IDX_0);
|
|
+#endif
|
|
+ sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX0);
|
|
+
|
|
+ ei_local->rx_ring0 = ei_local->qrx_ring;
|
|
+
|
|
+#else /* PDMA RX */
|
|
+
|
|
+ /* Initial RX Ring 0*/
|
|
+#ifdef CONFIG_32B_DESC
|
|
+ ei_local->rx_ring0 = kmalloc(NUM_RX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
|
|
+ ei_local->phy_rx_ring0 = virt_to_phys(ei_local->rx_ring0);
|
|
+#else
|
|
+#if defined (CONFIG_MIPS)
|
|
+ ei_local->rx_ring0 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0);
|
|
+#else
|
|
+ ei_local->rx_ring0 = dma_alloc_coherent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0, GFP_KERNEL);
|
|
+#endif
|
|
+#endif
|
|
+ for (i = 0; i < NUM_RX_DESC; i++) {
|
|
+ memset(&ei_local->rx_ring0[i],0,sizeof(struct PDMA_rxdesc));
|
|
+ ei_local->rx_ring0[i].rxd_info2.DDONE_bit = 0;
|
|
+#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+ ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
|
|
+ ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
|
|
+#else
|
|
+ ei_local->rx_ring0[i].rxd_info2.LS0 = 1;
|
|
+#endif
|
|
+ ei_local->rx_ring0[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
|
|
+ }
|
|
+ printk("\nphy_rx_ring0 = 0x%08x, rx_ring0 = 0x%p\n",ei_local->phy_rx_ring0,ei_local->rx_ring0);
|
|
+
|
|
+#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+ /* Initial RX Ring 1*/
|
|
+#ifdef CONFIG_32B_DESC
|
|
+ ei_local->rx_ring1 = kmalloc(NUM_RX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
|
|
+ ei_local->phy_rx_ring1 = virt_to_phys(ei_local->rx_ring1);
|
|
+#else
|
|
+#if defined (CONFIG_MIPS)
|
|
+ ei_local->rx_ring1 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring1);
|
|
+#else
|
|
+ ei_local->rx_ring1 = dma_alloc_coherent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring1, GFP_KERNEL);
|
|
+
|
|
+#endif
|
|
+#endif
|
|
+ for (i = 0; i < NUM_RX_DESC; i++) {
|
|
+ memset(&ei_local->rx_ring1[i],0,sizeof(struct PDMA_rxdesc));
|
|
+ ei_local->rx_ring1[i].rxd_info2.DDONE_bit = 0;
|
|
+#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+ ei_local->rx_ring1[i].rxd_info2.LS0 = 0;
|
|
+ ei_local->rx_ring1[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
|
|
+#else
|
|
+ ei_local->rx_ring1[i].rxd_info2.LS0 = 1;
|
|
+#endif
|
|
+ ei_local->rx_ring1[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx1_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
|
|
+ }
|
|
+ printk("\nphy_rx_ring1 = 0x%08x, rx_ring1 = 0x%p\n",ei_local->phy_rx_ring1,ei_local->rx_ring1);
|
|
+#if defined(CONFIG_ARCH_MT7623)
|
|
+ /* Initial RX Ring 2*/
|
|
+ ei_local->rx_ring2 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring2);
|
|
+ for (i = 0; i < NUM_RX_DESC; i++) {
|
|
+ memset(&ei_local->rx_ring2[i],0,sizeof(struct PDMA_rxdesc));
|
|
+ ei_local->rx_ring2[i].rxd_info2.DDONE_bit = 0;
|
|
+ ei_local->rx_ring2[i].rxd_info2.LS0 = 0;
|
|
+ ei_local->rx_ring2[i].rxd_info2.PLEN0 = SET_ADMA_RX_LEN0(MAX_RX_LENGTH);
|
|
+ ei_local->rx_ring2[i].rxd_info2.PLEN1 = SET_ADMA_RX_LEN1(MAX_RX_LENGTH >> 14);
|
|
+ ei_local->rx_ring2[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx2_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
|
|
+ }
|
|
+ printk("\nphy_rx_ring2 = 0x%08x, rx_ring2 = 0x%p\n",ei_local->phy_rx_ring2,ei_local->rx_ring2);
|
|
+ /* Initial RX Ring 3*/
|
|
+ ei_local->rx_ring3 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring3);
|
|
+ for (i = 0; i < NUM_RX_DESC; i++) {
|
|
+ memset(&ei_local->rx_ring3[i],0,sizeof(struct PDMA_rxdesc));
|
|
+ ei_local->rx_ring3[i].rxd_info2.DDONE_bit = 0;
|
|
+ ei_local->rx_ring3[i].rxd_info2.LS0 = 0;
|
|
+ ei_local->rx_ring3[i].rxd_info2.PLEN0 = SET_ADMA_RX_LEN0(MAX_RX_LENGTH);
|
|
+ ei_local->rx_ring3[i].rxd_info2.PLEN1 = SET_ADMA_RX_LEN1(MAX_RX_LENGTH >> 14);
|
|
+ ei_local->rx_ring3[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx3_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
|
|
+ }
|
|
+ printk("\nphy_rx_ring3 = 0x%08x, rx_ring3 = 0x%p\n",ei_local->phy_rx_ring3,ei_local->rx_ring3);
|
|
+#endif /* CONFIG_ARCH_MT7623 */
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+
|
|
+ regVal = sysRegRead(PDMA_GLO_CFG);
|
|
+ regVal &= 0x000000FF;
|
|
+ sysRegWrite(PDMA_GLO_CFG, regVal);
|
|
+ regVal=sysRegRead(PDMA_GLO_CFG);
|
|
+
|
|
+ /* Tell the adapter where the TX/RX rings are located. */
|
|
+#if !defined (CONFIG_RAETH_QOS)
|
|
+ sysRegWrite(TX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_tx_ring0));
|
|
+ sysRegWrite(TX_MAX_CNT0, cpu_to_le32((u32) NUM_TX_DESC));
|
|
+ sysRegWrite(TX_CTX_IDX0, 0);
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ tx_cpu_owner_idx0 = 0;
|
|
+#endif
|
|
+ sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX0);
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RAETH_PDMATX_QDMARX /* QDMA RX */
|
|
+ sysRegWrite(QRX_BASE_PTR_0, phys_to_bus((u32) ei_local->phy_qrx_ring));
|
|
+ sysRegWrite(QRX_MAX_CNT_0, cpu_to_le32((u32) NUM_QRX_DESC));
|
|
+ sysRegWrite(QRX_CRX_IDX_0, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
|
|
+#else /* PDMA RX */
|
|
+ sysRegWrite(RX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_rx_ring0));
|
|
+ sysRegWrite(RX_MAX_CNT0, cpu_to_le32((u32) NUM_RX_DESC));
|
|
+ sysRegWrite(RX_CALC_IDX0, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ rx_calc_idx0 = sysRegRead(RX_CALC_IDX0);
|
|
+#endif
|
|
+ sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX0);
|
|
+#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+ sysRegWrite(RX_BASE_PTR1, phys_to_bus((u32) ei_local->phy_rx_ring1));
|
|
+ sysRegWrite(RX_MAX_CNT1, cpu_to_le32((u32) NUM_RX_DESC));
|
|
+ sysRegWrite(RX_CALC_IDX1, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ rx_calc_idx1 = sysRegRead(RX_CALC_IDX1);
|
|
+#endif
|
|
+ sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX1);
|
|
+#if defined(CONFIG_ARCH_MT7623)
|
|
+ sysRegWrite(RX_BASE_PTR2, phys_to_bus((u32) ei_local->phy_rx_ring2));
|
|
+ sysRegWrite(RX_MAX_CNT2, cpu_to_le32((u32) NUM_RX_DESC));
|
|
+ sysRegWrite(RX_CALC_IDX2, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
|
|
+ sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX2);
|
|
+ sysRegWrite(RX_BASE_PTR3, phys_to_bus((u32) ei_local->phy_rx_ring3));
|
|
+ sysRegWrite(RX_MAX_CNT3, cpu_to_le32((u32) NUM_RX_DESC));
|
|
+ sysRegWrite(RX_CALC_IDX3, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
|
|
+ sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX3);
|
|
+#endif /* CONFIG_ARCH_MT7623 */
|
|
+#endif
|
|
+#if defined (CONFIG_RALINK_RT6855A)
|
|
+ regVal = sysRegRead(RX_DRX_IDX0);
|
|
+ regVal = (regVal == 0)? (NUM_RX_DESC - 1) : (regVal - 1);
|
|
+ sysRegWrite(RX_CALC_IDX0, cpu_to_le32(regVal));
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ rx_calc_idx0 = sysRegRead(RX_CALC_IDX0);
|
|
+#endif
|
|
+ regVal = sysRegRead(TX_DTX_IDX0);
|
|
+ sysRegWrite(TX_CTX_IDX0, cpu_to_le32(regVal));
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ tx_cpu_owner_idx0 = regVal;
|
|
+#endif
|
|
+ ei_local->free_idx = regVal;
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RAETH_QOS)
|
|
+ set_scheduler_weight();
|
|
+ set_schedule_pause_condition();
|
|
+ set_output_shaper();
|
|
+#endif
|
|
+
|
|
+ set_fe_dma_glo_cfg();
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no)
|
|
+{
|
|
+ unsigned int length=skb->len;
|
|
+ END_DEVICE* ei_local = netdev_priv(dev);
|
|
+#ifndef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ unsigned long tx_cpu_owner_idx0 = sysRegRead(TX_CTX_IDX0);
|
|
+#endif
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ unsigned long ctx_idx_start_addr = tx_cpu_owner_idx0;
|
|
+#endif
|
|
+ struct iphdr *iph = NULL;
|
|
+ struct tcphdr *th = NULL;
|
|
+ struct skb_frag_struct *frag;
|
|
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
|
|
+ int i=0;
|
|
+ unsigned int len, size, offset, frag_txd_num, skb_txd_num ;
|
|
+#endif // CONFIG_RAETH_TSO //
|
|
+
|
|
+#if defined (CONFIG_RAETH_TSOV6)
|
|
+ struct ipv6hdr *ip6h = NULL;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ PSEUDO_ADAPTER *pAd;
|
|
+#endif
|
|
+
|
|
+ while(ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0)
|
|
+ {
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ if (gmac_no == 2) {
|
|
+ if (ei_local->PseudoDev != NULL) {
|
|
+ pAd = netdev_priv(ei_local->PseudoDev);
|
|
+ pAd->stat.tx_errors++;
|
|
+ }
|
|
+ } else
|
|
+#endif
|
|
+ ei_local->stat.tx_errors++;
|
|
+ }
|
|
+
|
|
+#if !defined (CONFIG_RAETH_TSO)
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0 = virt_to_phys(skb->data);
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL0 = length;
|
|
+#if defined (CONFIG_RALINK_MT7620)
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FP_BMAP = 0;
|
|
+#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+ if (gmac_no == 1) {
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 1;
|
|
+ }else {
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 2;
|
|
+ }
|
|
+#else
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.PN = gmac_no;
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.QN = 3;
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined(CONFIG_RALINK_MT7628)
|
|
+ if (skb->ip_summed == CHECKSUM_PARTIAL){
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO = 7;
|
|
+ }else {
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO = 0;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RAETH_HW_VLAN_TX
|
|
+ if(vlan_tx_tag_present(skb)) {
|
|
+#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
|
|
+#else
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VPRI_VIDX = 0x80 | (vlan_tx_tag_get(skb) >> 13) << 4 | (vlan_tx_tag_get(skb) & 0xF);
|
|
+#endif
|
|
+ }else {
|
|
+#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0;
|
|
+#else
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VPRI_VIDX = 0;
|
|
+#endif
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+ raeth_pdma_tx_vlan_dvt( ei_local, tx_cpu_owner_idx0 );
|
|
+#endif /* CONFIG_RAETH_PDMA_DVT */
|
|
+
|
|
+#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
|
|
+ if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
|
|
+ if(ra_sw_nat_hook_rx!= NULL){
|
|
+#if defined (CONFIG_RALINK_MT7620)
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FP_BMAP = (1 << 7); /* PPE */
|
|
+#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 4; /* PPE */
|
|
+#else
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.PN = 6; /* PPE */
|
|
+#endif
|
|
+ FOE_MAGIC_TAG(skb) = 0;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+ raeth_pdma_tx_desc_dvt( ei_local, tx_cpu_owner_idx0 );
|
|
+#endif /* CONFIG_RAETH_PDMA_DVT */
|
|
+
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit = 0;
|
|
+
|
|
+#if 0
|
|
+ printk("---------------\n");
|
|
+ printk("tx_info1=%x\n",ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1);
|
|
+ printk("tx_info2=%x\n",ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2);
|
|
+ printk("tx_info3=%x\n",ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info3);
|
|
+ printk("tx_info4=%x\n",ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4);
|
|
+#endif
|
|
+
|
|
+#else
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0 = virt_to_phys(skb->data);
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL0 = (length - skb->data_len);
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS0_bit = nr_frags ? 0:1;
|
|
+#if defined (CONFIG_RALINK_MT7620)
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FP_BMAP = 0;
|
|
+#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+ if (gmac_no == 1) {
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 1;
|
|
+ }else {
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 2;
|
|
+ }
|
|
+#else
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.PN = gmac_no;
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.QN = 3;
|
|
+#endif
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TSO = 0;
|
|
+
|
|
+#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined(CONFIG_RALINK_MT7628)
|
|
+ if (skb->ip_summed == CHECKSUM_PARTIAL){
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO = 7;
|
|
+ }else {
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.TUI_CO = 0;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RAETH_HW_VLAN_TX
|
|
+ if(vlan_tx_tag_present(skb)) {
|
|
+#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
|
|
+#else
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VPRI_VIDX = 0x80 | (vlan_tx_tag_get(skb) >> 13) << 4 | (vlan_tx_tag_get(skb) & 0xF);
|
|
+#endif
|
|
+ }else {
|
|
+#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VLAN_TAG = 0;
|
|
+#else
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.VPRI_VIDX = 0;
|
|
+#endif
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+ raeth_pdma_tx_vlan_dvt( ei_local, tx_cpu_owner_idx0 );
|
|
+#endif /* CONFIG_RAETH_PDMA_DVT */
|
|
+
|
|
+#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
|
|
+ if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
|
|
+ if(ra_sw_nat_hook_rx!= NULL){
|
|
+#if defined (CONFIG_RALINK_MT7620)
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FP_BMAP = (1 << 7); /* PPE */
|
|
+#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_ARCH_MT7623)
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 4; /* PPE */
|
|
+#else
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.PN = 6; /* PPE */
|
|
+#endif
|
|
+ FOE_MAGIC_TAG(skb) = 0;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ skb_txd_num = 1;
|
|
+
|
|
+ if(nr_frags > 0) {
|
|
+
|
|
+ for(i=0;i<nr_frags;i++) {
|
|
+ frag = &skb_shinfo(skb)->frags[i];
|
|
+ offset = frag->page_offset;
|
|
+ len = frag->size;
|
|
+ frag_txd_num = cal_frag_txd_num(len);
|
|
+
|
|
+ while(frag_txd_num > 0){
|
|
+ if(len < MAX_TXD_LEN)
|
|
+ size = len;
|
|
+ else
|
|
+ size = MAX_TXD_LEN;
|
|
+ if(skb_txd_num%2 == 0) {
|
|
+ tx_cpu_owner_idx0 = (tx_cpu_owner_idx0+1) % NUM_TX_DESC;
|
|
+
|
|
+ while(ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0)
|
|
+ {
|
|
+#ifdef config_pseudo_support
|
|
+ if (gmac_no == 2) {
|
|
+ if (ei_local->pseudodev != null) {
|
|
+ pad = netdev_priv(ei_local->pseudodev);
|
|
+ pad->stat.tx_errors++;
|
|
+ }
|
|
+ } else
|
|
+#endif
|
|
+ ei_local->stat.tx_errors++;
|
|
+ }
|
|
+
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0 = pci_map_page(NULL, frag->page, offset, size, PCI_DMA_TODEVICE);
|
|
+#else
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0 = pci_map_page(NULL, frag->page.p, offset, size, PCI_DMA_TODEVICE);
|
|
+#endif
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL0 = size;
|
|
+
|
|
+ if( (i==(nr_frags-1)) && (frag_txd_num == 1))
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS0_bit = 1;
|
|
+ else
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS0_bit = 0;
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit = 0;
|
|
+ }else {
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info3.SDP1 = pci_map_page(NULL, frag->page, offset, size, PCI_DMA_TODEVICE);
|
|
+#else
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info3.SDP1 = pci_map_page(NULL, frag->page.p, offset, size, PCI_DMA_TODEVICE);
|
|
+
|
|
+#endif
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL1 = size;
|
|
+ if( (i==(nr_frags-1)) && (frag_txd_num == 1))
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS1_bit = 1;
|
|
+ else
|
|
+ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.LS1_bit = 0;
|
|
+ }
|
|
+ offset += size;
|
|
+ len -= size;
|
|
+ frag_txd_num--;
|
|
+ skb_txd_num++;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+ if( (pdma_dvt_get_debug_test_config() & PDMA_TEST_TSO_DEBUG) ){
|
|
+ printk("skb_shinfo(skb)->gso_segs = %d\n", skb_shinfo(skb)->gso_segs);
|
|
+ }
|
|
+#endif /* CONFIG_RAETH_PDMA_DVT */
|
|
+ /* fill in MSS info in tcp checksum field */
|
|
+ if(skb_shinfo(skb)->gso_segs > 1) {
|
|
+
|
|
+// TsoLenUpdate(skb->len);
|
|
+
|
|
+ /* TCP over IPv4 */
|
|
+ iph = (struct iphdr *)skb_network_header(skb);
|
|
+#if defined (CONFIG_RAETH_TSOV6)
|
|
+ /* TCP over IPv6 */
|
|
+ ip6h = (struct ipv6hdr *)skb_network_header(skb);
|
|
+#endif
|
|
+ if((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
|
|
+ th = (struct tcphdr *)skb_transport_header(skb);
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ ei_local->tx_ring0[ctx_idx_start_addr].txd_info4.TSO = 1;
|
|
+#else
|
|
+ ei_local->tx_ring0[sysRegRead(TX_CTX_IDX0)].txd_info4.TSO = 1;
|
|
+#endif
|
|
+ th->check = htons(skb_shinfo(skb)->gso_size);
|
|
+#if defined (CONFIG_MIPS)
|
|
+ dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
|
|
+#else
|
|
+ dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
|
|
+#endif
|
|
+ }
|
|
+
|
|
+#if defined (CONFIG_RAETH_TSOV6)
|
|
+ /* TCP over IPv6 */
|
|
+ else if ((ip6h->version == 6) && (ip6h->nexthdr == NEXTHDR_TCP)) {
|
|
+ th = (struct tcphdr *)skb_transport_header(skb);
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ ei_local->tx_ring0[ctx_idx_start_addr].txd_info4.TSO = 1;
|
|
+#else
|
|
+ ei_local->tx_ring0[sysRegRead(TX_CTX_IDX0)].txd_info4.TSO = 1;
|
|
+#endif
|
|
+ th->check = htons(skb_shinfo(skb)->gso_size);
|
|
+#if defined (CONFIG_MIPS)
|
|
+ dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
|
|
+#else
|
|
+ dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
|
|
+#endif
|
|
+ }
|
|
+#endif // CONFIG_RAETH_TSOV6 //
|
|
+ }
|
|
+
|
|
+#if defined(CONFIG_RAETH_PDMA_DVT)
|
|
+ raeth_pdma_tx_desc_dvt( ei_local, tx_cpu_owner_idx0 );
|
|
+#endif /* CONFIG_RAETH_PDMA_DVT */
|
|
+
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ ei_local->tx_ring0[ctx_idx_start_addr].txd_info2.DDONE_bit = 0;
|
|
+#else
|
|
+ ei_local->tx_ring0[sysRegRead(TX_CTX_IDX0)].txd_info2.DDONE_bit = 0;
|
|
+#endif
|
|
+#endif // CONFIG_RAETH_TSO //
|
|
+
|
|
+ tx_cpu_owner_idx0 = (tx_cpu_owner_idx0+1) % NUM_TX_DESC;
|
|
+ while(ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0)
|
|
+ {
|
|
+// printk(KERN_ERR "%s: TXD=%lu TX DMA is Busy !!\n", dev->name, tx_cpu_owner_idx0);
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ if (gmac_no == 2) {
|
|
+ if (ei_local->PseudoDev != NULL) {
|
|
+ pAd = netdev_priv(ei_local->PseudoDev);
|
|
+ pAd->stat.tx_errors++;
|
|
+ }
|
|
+ } else
|
|
+#endif
|
|
+ ei_local->stat.tx_errors++;
|
|
+ }
|
|
+ sysRegWrite(TX_CTX_IDX0, cpu_to_le32((u32)tx_cpu_owner_idx0));
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ if (gmac_no == 2) {
|
|
+ if (ei_local->PseudoDev != NULL) {
|
|
+ pAd = netdev_priv(ei_local->PseudoDev);
|
|
+ pAd->stat.tx_packets++;
|
|
+ pAd->stat.tx_bytes += length;
|
|
+ }
|
|
+ } else
|
|
+#endif
|
|
+ {
|
|
+ ei_local->stat.tx_packets++;
|
|
+ ei_local->stat.tx_bytes += length;
|
|
+ }
|
|
+#ifdef CONFIG_RAETH_NAPI
|
|
+ if ( ei_local->tx_full == 1) {
|
|
+ ei_local->tx_full = 0;
|
|
+ netif_wake_queue(dev);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ return length;
|
|
+}
|
|
+
|
|
+int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no)
|
|
+{
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+ unsigned long flags;
|
|
+ unsigned long tx_cpu_owner_idx;
|
|
+ unsigned int tx_cpu_owner_idx_next;
|
|
+ unsigned int num_of_txd = 0;
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags, i;
|
|
+ struct skb_frag_struct *frag;
|
|
+#endif
|
|
+#if !defined(CONFIG_RAETH_QOS)
|
|
+ unsigned int tx_cpu_owner_idx_next2;
|
|
+#else
|
|
+ int ring_no, queue_no, port_no;
|
|
+#endif
|
|
+#ifdef CONFIG_RALINK_VISTA_BASIC
|
|
+ struct vlan_ethhdr *veth;
|
|
+#endif
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ PSEUDO_ADAPTER *pAd;
|
|
+#endif
|
|
+
|
|
+#if !defined(CONFIG_RA_NAT_NONE)
|
|
+ if(ra_sw_nat_hook_tx!= NULL)
|
|
+ {
|
|
+#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
|
|
+ if(FOE_MAGIC_TAG(skb) != FOE_MAGIC_PPE)
|
|
+#endif
|
|
+ {
|
|
+ //spin_lock_irqsave(&ei_local->page_lock, flags);
|
|
+ if(ra_sw_nat_hook_tx(skb, gmac_no)==1){
|
|
+ //spin_unlock_irqrestore(&ei_local->page_lock, flags);
|
|
+ }else{
|
|
+ kfree_skb(skb);
|
|
+ //spin_unlock_irqrestore(&ei_local->page_lock, flags);
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
|
|
+ /* Qwert+
|
|
+ */
|
|
+ if(ra_classifier_hook_tx!= NULL)
|
|
+ {
|
|
+#if defined(CONFIG_RALINK_EXTERNAL_TIMER)
|
|
+ ra_classifier_hook_tx(skb, (*((volatile u32 *)(0xB0000D08))&0x0FFFF));
|
|
+#else
|
|
+ ra_classifier_hook_tx(skb, read_c0_count());
|
|
+#endif
|
|
+ }
|
|
+#endif /* CONFIG_RA_CLASSIFIER */
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT3052_MP2)
|
|
+ mcast_tx(skb);
|
|
+#endif
|
|
+
|
|
+#if !defined (CONFIG_RALINK_RT6855) && !defined (CONFIG_RALINK_RT6855A) && \
|
|
+ !defined(CONFIG_RALINK_MT7621) && !defined (CONFIG_ARCH_MT7623)
|
|
+
|
|
+#define MIN_PKT_LEN 60
|
|
+ if (skb->len < MIN_PKT_LEN) {
|
|
+ if (skb_padto(skb, MIN_PKT_LEN)) {
|
|
+ printk("raeth: skb_padto failed\n");
|
|
+ return 0;
|
|
+ }
|
|
+ skb_put(skb, MIN_PKT_LEN - skb->len);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ dev->trans_start = jiffies; /* save the timestamp */
|
|
+ spin_lock_irqsave(&ei_local->page_lock, flags);
|
|
+#if defined (CONFIG_MIPS)
|
|
+ dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
|
|
+#else
|
|
+ dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
|
|
+
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RALINK_VISTA_BASIC
|
|
+ veth = (struct vlan_ethhdr *)(skb->data);
|
|
+ if (is_switch_175c && veth->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
|
|
+ if ((veth->h_vlan_TCI & __constant_htons(VLAN_VID_MASK)) == 0) {
|
|
+ veth->h_vlan_TCI |= htons(VLAN_DEV_INFO(dev)->vlan_id);
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RAETH_QOS)
|
|
+ if(pkt_classifier(skb, gmac_no, &ring_no, &queue_no, &port_no)) {
|
|
+ get_tx_ctx_idx(ring_no, &tx_cpu_owner_idx);
|
|
+ tx_cpu_owner_idx_next = (tx_cpu_owner_idx + 1) % NUM_TX_DESC;
|
|
+ if(((ei_local->skb_free[ring_no][tx_cpu_owner_idx]) ==0) && (ei_local->skb_free[ring_no][tx_cpu_owner_idx_next]==0)){
|
|
+ fe_qos_packet_send(dev, skb, ring_no, queue_no, port_no);
|
|
+ }else{
|
|
+ ei_local->stat.tx_dropped++;
|
|
+ kfree_skb(skb);
|
|
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+#else
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ tx_cpu_owner_idx = tx_cpu_owner_idx0;
|
|
+#else
|
|
+ tx_cpu_owner_idx = sysRegRead(TX_CTX_IDX0);
|
|
+#endif
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+// num_of_txd = (nr_frags==0) ? 1 : ((nr_frags>>1) + 1);
|
|
+// NumOfTxdUpdate(num_of_txd);
|
|
+ if(nr_frags != 0){
|
|
+ for(i=0;i<nr_frags;i++) {
|
|
+ frag = &skb_shinfo(skb)->frags[i];
|
|
+ num_of_txd += cal_frag_txd_num(frag->size);
|
|
+ }
|
|
+ num_of_txd = (num_of_txd >> 1) + 1;
|
|
+ }else
|
|
+ num_of_txd = 1;
|
|
+
|
|
+#else
|
|
+ num_of_txd = 1;
|
|
+#endif
|
|
+ tx_cpu_owner_idx_next = (tx_cpu_owner_idx + num_of_txd) % NUM_TX_DESC;
|
|
+
|
|
+ if(((ei_local->skb_free[tx_cpu_owner_idx]) ==0) && (ei_local->skb_free[tx_cpu_owner_idx_next]==0)){
|
|
+ rt2880_eth_send(dev, skb, gmac_no);
|
|
+
|
|
+ tx_cpu_owner_idx_next2 = (tx_cpu_owner_idx_next + 1) % NUM_TX_DESC;
|
|
+
|
|
+ if(ei_local->skb_free[tx_cpu_owner_idx_next2]!=0){
|
|
+#if defined (CONFIG_RAETH_SW_FC)
|
|
+ netif_stop_queue(dev);
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ netif_stop_queue(ei_local->PseudoDev);
|
|
+#endif
|
|
+ tx_ring_full=1;
|
|
+#endif
|
|
+ }
|
|
+ }else {
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ if (gmac_no == 2) {
|
|
+ if (ei_local->PseudoDev != NULL) {
|
|
+ pAd = netdev_priv(ei_local->PseudoDev);
|
|
+ pAd->stat.tx_dropped++;
|
|
+ }
|
|
+ } else
|
|
+#endif
|
|
+ ei_local->stat.tx_dropped++;
|
|
+#if defined (CONFIG_RAETH_SW_FC)
|
|
+ printk("tx_ring_full, drop packet\n");
|
|
+#endif
|
|
+ kfree_skb(skb);
|
|
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+ /* SG: use multiple TXD to send the packet (only have one skb) */
|
|
+ ei_local->skb_free[(tx_cpu_owner_idx + num_of_txd - 1) % NUM_TX_DESC] = skb;
|
|
+ while(--num_of_txd) {
|
|
+ ei_local->skb_free[(tx_cpu_owner_idx + num_of_txd -1) % NUM_TX_DESC] = (struct sk_buff *)0xFFFFFFFF; //MAGIC ID
|
|
+ }
|
|
+#else
|
|
+ ei_local->skb_free[tx_cpu_owner_idx] = skb;
|
|
+#endif
|
|
+#endif
|
|
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void ei_xmit_housekeeping(unsigned long unused)
|
|
+{
|
|
+ struct net_device *dev = dev_raether;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+ struct PDMA_txdesc *tx_desc;
|
|
+ unsigned long skb_free_idx;
|
|
+ unsigned long tx_dtx_idx __maybe_unused;
|
|
+#ifndef CONFIG_RAETH_NAPI
|
|
+ unsigned long reg_int_mask=0;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RAETH_QOS
|
|
+ int i;
|
|
+ for (i=0;i<NUM_TX_RINGS;i++){
|
|
+ skb_free_idx = ei_local->free_idx[i];
|
|
+ if((ei_local->skb_free[i][skb_free_idx])==0){
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ get_tx_desc_and_dtx_idx(ei_local, i, &tx_dtx_idx, &tx_desc);
|
|
+
|
|
+ while(tx_desc[skb_free_idx].txd_info2.DDONE_bit==1 && (ei_local->skb_free[i][skb_free_idx])!=0 ){
|
|
+ dev_kfree_skb_any((ei_local->skb_free[i][skb_free_idx]));
|
|
+
|
|
+ ei_local->skb_free[i][skb_free_idx]=0;
|
|
+ skb_free_idx = (skb_free_idx +1) % NUM_TX_DESC;
|
|
+ }
|
|
+ ei_local->free_idx[i] = skb_free_idx;
|
|
+ }
|
|
+#else
|
|
+ tx_dtx_idx = sysRegRead(TX_DTX_IDX0);
|
|
+ tx_desc = ei_local->tx_ring0;
|
|
+ skb_free_idx = ei_local->free_idx;
|
|
+ if ((ei_local->skb_free[skb_free_idx]) != 0 && tx_desc[skb_free_idx].txd_info2.DDONE_bit==1) {
|
|
+ while(tx_desc[skb_free_idx].txd_info2.DDONE_bit==1 && (ei_local->skb_free[skb_free_idx])!=0 ){
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+ if(ei_local->skb_free[skb_free_idx]!=(struct sk_buff *)0xFFFFFFFF) {
|
|
+ dev_kfree_skb_any(ei_local->skb_free[skb_free_idx]);
|
|
+ }
|
|
+#else
|
|
+ dev_kfree_skb_any(ei_local->skb_free[skb_free_idx]);
|
|
+#endif
|
|
+ ei_local->skb_free[skb_free_idx]=0;
|
|
+ skb_free_idx = (skb_free_idx +1) % NUM_TX_DESC;
|
|
+ }
|
|
+
|
|
+ netif_wake_queue(dev);
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ netif_wake_queue(ei_local->PseudoDev);
|
|
+#endif
|
|
+ tx_ring_full=0;
|
|
+ ei_local->free_idx = skb_free_idx;
|
|
+ } /* if skb_free != 0 */
|
|
+#endif
|
|
+
|
|
+#ifndef CONFIG_RAETH_NAPI
|
|
+ reg_int_mask=sysRegRead(FE_INT_ENABLE);
|
|
+#if defined (DELAY_INT)
|
|
+ sysRegWrite(FE_INT_ENABLE, reg_int_mask| TX_DLY_INT);
|
|
+#else
|
|
+
|
|
+ sysRegWrite(FE_INT_ENABLE, reg_int_mask | TX_DONE_INT0 \
|
|
+ | TX_DONE_INT1 \
|
|
+ | TX_DONE_INT2 \
|
|
+ | TX_DONE_INT3);
|
|
+#endif
|
|
+#endif //CONFIG_RAETH_NAPI//
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+EXPORT_SYMBOL(ei_start_xmit);
|
|
+EXPORT_SYMBOL(ei_xmit_housekeeping);
|
|
+EXPORT_SYMBOL(fe_dma_init);
|
|
+EXPORT_SYMBOL(rt2880_eth_send);
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/raether_qdma.c
|
|
@@ -0,0 +1,1407 @@
|
|
+#include <linux/module.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/pci.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/skbuff.h>
|
|
+#include <linux/if_vlan.h>
|
|
+#include <linux/if_ether.h>
|
|
+#include <linux/fs.h>
|
|
+#include <asm/uaccess.h>
|
|
+#include <asm/rt2880/surfboardint.h>
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+#include <linux/tcp.h>
|
|
+#include <net/ipv6.h>
|
|
+#include <linux/ip.h>
|
|
+#include <net/ip.h>
|
|
+#include <net/tcp.h>
|
|
+#include <linux/in.h>
|
|
+#include <linux/ppp_defs.h>
|
|
+#include <linux/if_pppox.h>
|
|
+#endif
|
|
+#include <linux/delay.h>
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+#include <linux/sched.h>
|
|
+#endif
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+#include <linux/if_vlan.h>
|
|
+#include <net/ipv6.h>
|
|
+#include <net/ip.h>
|
|
+#include <linux/if_pppox.h>
|
|
+#include <linux/ppp_defs.h>
|
|
+#endif
|
|
+
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
|
|
+#include <asm/rt2880/rt_mmap.h>
|
|
+#else
|
|
+#include <linux/libata-compat.h>
|
|
+#endif
|
|
+
|
|
+#include "ra2882ethreg.h"
|
|
+#include "raether.h"
|
|
+#include "ra_mac.h"
|
|
+#include "ra_ioctl.h"
|
|
+#include "ra_rfrw.h"
|
|
+#ifdef CONFIG_RAETH_NETLINK
|
|
+#include "ra_netlink.h"
|
|
+#endif
|
|
+#if defined (CONFIG_RAETH_QOS)
|
|
+#include "ra_qos.h"
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
|
|
+#include "../../../net/nat/hw_nat/ra_nat.h"
|
|
+#endif
|
|
+
|
|
+
|
|
+#if !defined(CONFIG_RA_NAT_NONE)
|
|
+/* bruce+
|
|
+ */
|
|
+extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
|
|
+extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
|
|
+/* Qwert+
|
|
+ */
|
|
+#include <asm/mipsregs.h>
|
|
+extern int (*ra_classifier_hook_tx)(struct sk_buff *skb, unsigned long cur_cycle);
|
|
+extern int (*ra_classifier_hook_rx)(struct sk_buff *skb, unsigned long cur_cycle);
|
|
+#endif /* CONFIG_RA_CLASSIFIER */
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT3052_MP2)
|
|
+int32_t mcast_rx(struct sk_buff * skb);
|
|
+int32_t mcast_tx(struct sk_buff * skb);
|
|
+#endif
|
|
+
|
|
+#ifdef RA_MTD_RW_BY_NUM
|
|
+int ra_mtd_read(int num, loff_t from, size_t len, u_char *buf);
|
|
+#else
|
|
+int ra_mtd_read_nm(char *name, loff_t from, size_t len, u_char *buf);
|
|
+#endif
|
|
+
|
|
+/* gmac driver feature set config */
|
|
+#if defined (CONFIG_RAETH_NAPI) || defined (CONFIG_RAETH_QOS)
|
|
+#undef DELAY_INT
|
|
+#else
|
|
+#if defined (CONFIG_ARCH_MT7623)
|
|
+#undef DELAY_INT
|
|
+#else
|
|
+#define DELAY_INT 1
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+//#define CONFIG_UNH_TEST
|
|
+/* end of config */
|
|
+
|
|
+#if defined (CONFIG_RAETH_JUMBOFRAME)
|
|
+#define MAX_RX_LENGTH 4096
|
|
+#else
|
|
+#define MAX_RX_LENGTH 1536
|
|
+#endif
|
|
+
|
|
+extern struct net_device *dev_raether;
|
|
+
|
|
+#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+static int rx_dma_owner_idx1;
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+static int rx_calc_idx1;
|
|
+#endif
|
|
+#endif
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+static int rx_calc_idx0;
|
|
+static unsigned long tx_cpu_owner_idx0=0;
|
|
+#endif
|
|
+extern unsigned long tx_ring_full;
|
|
+
|
|
+#if defined (CONFIG_ETHTOOL) && defined (CONFIG_RAETH_ROUTER)
|
|
+#include "ra_ethtool.h"
|
|
+extern struct ethtool_ops ra_ethtool_ops;
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+extern struct ethtool_ops ra_virt_ethtool_ops;
|
|
+#endif // CONFIG_PSEUDO_SUPPORT //
|
|
+#endif // (CONFIG_ETHTOOL //
|
|
+
|
|
+#ifdef CONFIG_RALINK_VISTA_BASIC
|
|
+int is_switch_175c = 1;
|
|
+#endif
|
|
+
|
|
+//skb->mark to queue mapping table
|
|
+extern unsigned int M2Q_table[64];
|
|
+struct QDMA_txdesc *free_head = NULL;
|
|
+extern unsigned int lan_wan_separate;
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+extern unsigned int web_sfq_enable;
|
|
+#define HwSfqQUp 3
|
|
+#define HwSfqQDl 1
|
|
+#endif
|
|
+int dbg =0;//debug used
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+struct SFQ_table *sfq0;
|
|
+struct SFQ_table *sfq1;
|
|
+struct SFQ_table *sfq2;
|
|
+struct SFQ_table *sfq3;
|
|
+#endif
|
|
+
|
|
+#define KSEG1 0xa0000000
|
|
+#if defined (CONFIG_MIPS)
|
|
+#define PHYS_TO_VIRT(x) ((void *)((x) | KSEG1))
|
|
+#define VIRT_TO_PHYS(x) ((unsigned long)(x) & ~KSEG1)
|
|
+#else
|
|
+#define PHYS_TO_VIRT(x) phys_to_virt(x)
|
|
+#define VIRT_TO_PHYS(x) virt_to_phys(x)
|
|
+#endif
|
|
+
|
|
+extern void set_fe_dma_glo_cfg(void);
|
|
+
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ParseResult SfqParseResult;
|
|
+#endif
|
|
+
|
|
+/**
|
|
+ *
|
|
+ * @brief: get the TXD index from its address
|
|
+ *
|
|
+ * @param: cpu_ptr
|
|
+ *
|
|
+ * @return: TXD index
|
|
+*/
|
|
+
|
|
+static unsigned int GET_TXD_OFFSET(struct QDMA_txdesc **cpu_ptr)
|
|
+{
|
|
+ struct net_device *dev = dev_raether;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+ int ctx_offset;
|
|
+ //ctx_offset = (((((u32)*cpu_ptr) <<8)>>8) - ((((u32)ei_local->txd_pool)<<8)>>8))/ sizeof(struct QDMA_txdesc);
|
|
+ //ctx_offset = (*cpu_ptr - ei_local->txd_pool);
|
|
+ ctx_offset = (((((u32)*cpu_ptr) <<8)>>8) - ((((u32)ei_local->phy_txd_pool)<<8)>>8))/ sizeof(struct QDMA_txdesc);
|
|
+
|
|
+ return ctx_offset;
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+/**
|
|
+ * @brief cal txd number for a page
|
|
+ *
|
|
+ * @parm size
|
|
+ *
|
|
+ * @return frag_txd_num
|
|
+ */
|
|
+
|
|
+unsigned int cal_frag_txd_num(unsigned int size)
|
|
+{
|
|
+ unsigned int frag_txd_num = 0;
|
|
+ if(size == 0)
|
|
+ return 0;
|
|
+ while(size > 0){
|
|
+ if(size > MAX_TXD_LEN){
|
|
+ frag_txd_num++;
|
|
+ size -= MAX_TXD_LEN;
|
|
+ }else{
|
|
+ frag_txd_num++;
|
|
+ size = 0;
|
|
+ }
|
|
+ }
|
|
+ return frag_txd_num;
|
|
+
|
|
+}
|
|
+
|
|
+/**
|
|
+ * @brief get free TXD from TXD queue
|
|
+ *
|
|
+ * @param free_txd
|
|
+ *
|
|
+ * @return
|
|
+ */
|
|
+static int get_free_txd(struct QDMA_txdesc **free_txd)
|
|
+{
|
|
+ struct net_device *dev = dev_raether;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+ unsigned int tmp_idx;
|
|
+
|
|
+ if(ei_local->free_txd_num > 0){
|
|
+ tmp_idx = ei_local->free_txd_head;
|
|
+ ei_local->free_txd_head = ei_local->txd_pool_info[tmp_idx];
|
|
+ ei_local->free_txd_num -= 1;
|
|
+ //*free_txd = &ei_local->txd_pool[tmp_idx];
|
|
+ *free_txd = ei_local->phy_txd_pool + (sizeof(struct QDMA_txdesc) * tmp_idx);
|
|
+ return tmp_idx;
|
|
+ }else
|
|
+ return NUM_TX_DESC;
|
|
+}
|
|
+
|
|
+
|
|
+/**
|
|
+ * @brief add free TXD into TXD queue
|
|
+ *
|
|
+ * @param free_txd
|
|
+ *
|
|
+ * @return
|
|
+ */
|
|
+int put_free_txd(int free_txd_idx)
|
|
+{
|
|
+ struct net_device *dev = dev_raether;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+ ei_local->txd_pool_info[ei_local->free_txd_tail] = free_txd_idx;
|
|
+ ei_local->free_txd_tail = free_txd_idx;
|
|
+ ei_local->txd_pool_info[free_txd_idx] = NUM_TX_DESC;
|
|
+ ei_local->free_txd_num += 1;
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/*define qdma initial alloc*/
|
|
+/**
|
|
+ * @brief
|
|
+ *
|
|
+ * @param net_dev
|
|
+ *
|
|
+ * @return 0: fail
|
|
+ * 1: success
|
|
+ */
|
|
+bool qdma_tx_desc_alloc(void)
|
|
+{
|
|
+ struct net_device *dev = dev_raether;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+ struct QDMA_txdesc *free_txd = NULL;
|
|
+ unsigned int txd_idx;
|
|
+ int i = 0;
|
|
+
|
|
+
|
|
+ ei_local->txd_pool = pci_alloc_consistent(NULL, sizeof(struct QDMA_txdesc) * NUM_TX_DESC, &ei_local->phy_txd_pool);
|
|
+ printk("txd_pool=%p phy_txd_pool=%08X\n", ei_local->txd_pool , ei_local->phy_txd_pool);
|
|
+
|
|
+ if (ei_local->txd_pool == NULL) {
|
|
+ printk("adapter->txd_pool allocation failed!\n");
|
|
+ return 0;
|
|
+ }
|
|
+ printk("ei_local->skb_free start address is 0x%p.\n", ei_local->skb_free);
|
|
+ //set all txd_pool_info to 0.
|
|
+ for ( i = 0; i < NUM_TX_DESC; i++)
|
|
+ {
|
|
+ ei_local->skb_free[i]= 0;
|
|
+ ei_local->txd_pool_info[i] = i + 1;
|
|
+ ei_local->txd_pool[i].txd_info3.LS_bit = 1;
|
|
+ ei_local->txd_pool[i].txd_info3.OWN_bit = 1;
|
|
+ }
|
|
+
|
|
+ ei_local->free_txd_head = 0;
|
|
+ ei_local->free_txd_tail = NUM_TX_DESC - 1;
|
|
+ ei_local->free_txd_num = NUM_TX_DESC;
|
|
+
|
|
+
|
|
+ //get free txd from txd pool
|
|
+ txd_idx = get_free_txd(&free_txd);
|
|
+ if( txd_idx == NUM_TX_DESC) {
|
|
+ printk("get_free_txd fail\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ //add null TXD for transmit
|
|
+ //ei_local->tx_dma_ptr = VIRT_TO_PHYS(free_txd);
|
|
+ //ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd);
|
|
+ ei_local->tx_dma_ptr = free_txd;
|
|
+ ei_local->tx_cpu_ptr = free_txd;
|
|
+ sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
|
|
+ sysRegWrite(QTX_DTX_PTR, ei_local->tx_dma_ptr);
|
|
+
|
|
+ //get free txd from txd pool
|
|
+
|
|
+ txd_idx = get_free_txd(&free_txd);
|
|
+ if( txd_idx == NUM_TX_DESC) {
|
|
+ printk("get_free_txd fail\n");
|
|
+ return 0;
|
|
+ }
|
|
+ // add null TXD for release
|
|
+ //sysRegWrite(QTX_CRX_PTR, VIRT_TO_PHYS(free_txd));
|
|
+ //sysRegWrite(QTX_DRX_PTR, VIRT_TO_PHYS(free_txd));
|
|
+ sysRegWrite(QTX_CRX_PTR, free_txd);
|
|
+ sysRegWrite(QTX_DRX_PTR, free_txd);
|
|
+ printk("free_txd: %p, ei_local->cpu_ptr: %08X\n", free_txd, ei_local->tx_cpu_ptr);
|
|
+
|
|
+ printk(" POOL HEAD_PTR | DMA_PTR | CPU_PTR \n");
|
|
+ printk("----------------+---------+--------\n");
|
|
+ printk(" 0x%p 0x%08X 0x%08X\n",ei_local->txd_pool, ei_local->tx_dma_ptr, ei_local->tx_cpu_ptr);
|
|
+ return 1;
|
|
+}
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+bool sfq_init(void)
|
|
+{
|
|
+ unsigned int regVal;
|
|
+
|
|
+ unsigned int sfq_phy0;
|
|
+ unsigned int sfq_phy1;
|
|
+ unsigned int sfq_phy2;
|
|
+ unsigned int sfq_phy3;
|
|
+ struct SFQ_table *sfq0;
|
|
+ struct SFQ_table *sfq1;
|
|
+ struct SFQ_table *sfq2;
|
|
+ struct SFQ_table *sfq3;
|
|
+ int i = 0;
|
|
+ regVal = sysRegRead(VQTX_GLO);
|
|
+ regVal = regVal | VQTX_MIB_EN |(1<<16) ;
|
|
+ sysRegWrite(VQTX_GLO, regVal);// Virtual table extends to 32bytes
|
|
+ regVal = sysRegRead(VQTX_GLO);
|
|
+ sysRegWrite(VQTX_NUM, (VQTX_NUM_0) | (VQTX_NUM_1) | (VQTX_NUM_2) | (VQTX_NUM_3));
|
|
+ sysRegWrite(VQTX_HASH_CFG, 0xF002710); //10 s change hash algorithm
|
|
+ sysRegWrite(VQTX_VLD_CFG, 0x00);
|
|
+ sysRegWrite(VQTX_HASH_SD, 0x0D);
|
|
+ sysRegWrite(QDMA_FC_THRES, 0x9b9b4444);
|
|
+ sysRegWrite(QDMA_HRED1, 0);
|
|
+ sysRegWrite(QDMA_HRED2, 0);
|
|
+ sysRegWrite(QDMA_SRED1, 0);
|
|
+ sysRegWrite(QDMA_SRED2, 0);
|
|
+ sfq0 = pci_alloc_consistent(NULL, 256*sizeof(struct SFQ_table), &sfq_phy0);
|
|
+ memset(sfq0, 0x0, 256*sizeof(struct SFQ_table) );
|
|
+ for (i=0; i < 256; i++) {
|
|
+ sfq0[i].sfq_info1.VQHPTR = 0xdeadbeef;
|
|
+ sfq0[i].sfq_info2.VQTPTR = 0xdeadbeef;
|
|
+ }
|
|
+#if(1)
|
|
+ sfq1 = pci_alloc_consistent(NULL, 256*sizeof(struct SFQ_table), &sfq_phy1);
|
|
+
|
|
+ memset(sfq1, 0x0, 256*sizeof(struct SFQ_table) );
|
|
+ for (i=0; i < 256; i++) {
|
|
+ sfq1[i].sfq_info1.VQHPTR = 0xdeadbeef;
|
|
+ sfq1[i].sfq_info2.VQTPTR = 0xdeadbeef;
|
|
+ }
|
|
+
|
|
+ sfq2 = pci_alloc_consistent(NULL, 256*sizeof(struct SFQ_table), &sfq_phy2);
|
|
+ memset(sfq2, 0x0, 256*sizeof(struct SFQ_table) );
|
|
+ for (i=0; i < 256; i++) {
|
|
+ sfq2[i].sfq_info1.VQHPTR = 0xdeadbeef;
|
|
+ sfq2[i].sfq_info2.VQTPTR = 0xdeadbeef;
|
|
+ }
|
|
+
|
|
+ sfq3 = pci_alloc_consistent(NULL, 256*sizeof(struct SFQ_table), &sfq_phy3);
|
|
+ memset(sfq3, 0x0, 256*sizeof(struct SFQ_table) );
|
|
+ for (i=0; i < 256; i++) {
|
|
+ sfq3[i].sfq_info1.VQHPTR = 0xdeadbeef;
|
|
+ sfq3[i].sfq_info2.VQTPTR = 0xdeadbeef;
|
|
+ }
|
|
+
|
|
+#endif
|
|
+ printk("*****sfq_phy0 is 0x%x!!!*******\n", sfq_phy0);
|
|
+ printk("*****sfq_phy1 is 0x%x!!!*******\n", sfq_phy1);
|
|
+ printk("*****sfq_phy2 is 0x%x!!!*******\n", sfq_phy2);
|
|
+ printk("*****sfq_phy3 is 0x%x!!!*******\n", sfq_phy3);
|
|
+ printk("*****sfq_virt0 is 0x%x!!!*******\n", sfq0);
|
|
+ printk("*****sfq_virt1 is 0x%x!!!*******\n", sfq1);
|
|
+ printk("*****sfq_virt2 is 0x%x!!!*******\n", sfq2);
|
|
+ printk("*****sfq_virt3 is 0x%x!!!*******\n", sfq3);
|
|
+ printk("*****sfq_virt0 is 0x%x!!!*******\n", sfq0);
|
|
+ sysRegWrite(VQTX_TB_BASE0, (u32)sfq_phy0);
|
|
+ sysRegWrite(VQTX_TB_BASE1, (u32)sfq_phy1);
|
|
+ sysRegWrite(VQTX_TB_BASE2, (u32)sfq_phy2);
|
|
+ sysRegWrite(VQTX_TB_BASE3, (u32)sfq_phy3);
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+#endif
|
|
+bool fq_qdma_init(struct net_device *dev)
|
|
+{
|
|
+ END_DEVICE* ei_local = netdev_priv(dev);
|
|
+ //struct QDMA_txdesc *free_head = NULL;
|
|
+ unsigned int phy_free_head;
|
|
+ unsigned int phy_free_tail;
|
|
+ unsigned int *free_page_head = NULL;
|
|
+ unsigned int phy_free_page_head;
|
|
+ int i;
|
|
+
|
|
+ free_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * sizeof(struct QDMA_txdesc), &phy_free_head);
|
|
+ if (unlikely(free_head == NULL)){
|
|
+ printk(KERN_ERR "QDMA FQ decriptor not available...\n");
|
|
+ return 0;
|
|
+ }
|
|
+ memset(free_head, 0x0, sizeof(struct QDMA_txdesc) * NUM_QDMA_PAGE);
|
|
+
|
|
+ free_page_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * QDMA_PAGE_SIZE, &phy_free_page_head);
|
|
+ if (unlikely(free_page_head == NULL)){
|
|
+ printk(KERN_ERR "QDMA FQ page not available...\n");
|
|
+ return 0;
|
|
+ }
|
|
+ for (i=0; i < NUM_QDMA_PAGE; i++) {
|
|
+ free_head[i].txd_info1.SDP = (phy_free_page_head + (i * QDMA_PAGE_SIZE));
|
|
+ if(i < (NUM_QDMA_PAGE-1)){
|
|
+ free_head[i].txd_info2.NDP = (phy_free_head + ((i+1) * sizeof(struct QDMA_txdesc)));
|
|
+
|
|
+
|
|
+#if 0
|
|
+ printk("free_head_phy[%d] is 0x%x!!!\n",i, VIRT_TO_PHYS(&free_head[i]) );
|
|
+ printk("free_head[%d] is 0x%x!!!\n",i, &free_head[i] );
|
|
+ printk("free_head[%d].txd_info1.SDP is 0x%x!!!\n",i, free_head[i].txd_info1.SDP );
|
|
+ printk("free_head[%d].txd_info2.NDP is 0x%x!!!\n",i, free_head[i].txd_info2.NDP );
|
|
+#endif
|
|
+ }
|
|
+ free_head[i].txd_info3.SDL = QDMA_PAGE_SIZE;
|
|
+
|
|
+ }
|
|
+ phy_free_tail = (phy_free_head + (u32)((NUM_QDMA_PAGE-1) * sizeof(struct QDMA_txdesc)));
|
|
+
|
|
+ printk("phy_free_head is 0x%x!!!\n", phy_free_head);
|
|
+ printk("phy_free_tail_phy is 0x%x!!!\n", phy_free_tail);
|
|
+ sysRegWrite(QDMA_FQ_HEAD, (u32)phy_free_head);
|
|
+ sysRegWrite(QDMA_FQ_TAIL, (u32)phy_free_tail);
|
|
+ sysRegWrite(QDMA_FQ_CNT, ((NUM_TX_DESC << 16) | NUM_QDMA_PAGE));
|
|
+ sysRegWrite(QDMA_FQ_BLEN, QDMA_PAGE_SIZE << 16);
|
|
+
|
|
+ ei_local->free_head = free_head;
|
|
+ ei_local->phy_free_head = phy_free_head;
|
|
+ ei_local->free_page_head = free_page_head;
|
|
+ ei_local->phy_free_page_head = phy_free_page_head;
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+int fe_dma_init(struct net_device *dev)
|
|
+{
|
|
+
|
|
+ int i;
|
|
+ unsigned int regVal;
|
|
+ END_DEVICE* ei_local = netdev_priv(dev);
|
|
+
|
|
+
|
|
+ #if defined (CONFIG_HW_SFQ)
|
|
+ sfq_init();
|
|
+ #endif
|
|
+ fq_qdma_init(dev);
|
|
+
|
|
+ while(1)
|
|
+ {
|
|
+ regVal = sysRegRead(QDMA_GLO_CFG);
|
|
+ if((regVal & RX_DMA_BUSY))
|
|
+ {
|
|
+ printk("\n RX_DMA_BUSY !!! ");
|
|
+ continue;
|
|
+ }
|
|
+ if((regVal & TX_DMA_BUSY))
|
|
+ {
|
|
+ printk("\n TX_DMA_BUSY !!! ");
|
|
+ continue;
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ /*tx desc alloc, add a NULL TXD to HW*/
|
|
+
|
|
+ qdma_tx_desc_alloc();
|
|
+
|
|
+ /* Initial RX Ring 0*/
|
|
+
|
|
+#ifdef CONFIG_32B_DESC
|
|
+ ei_local->qrx_ring = kmalloc(NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
|
|
+ ei_local->phy_qrx_ring = virt_to_phys(ei_local->qrx_ring);
|
|
+#else
|
|
+ ei_local->qrx_ring = pci_alloc_consistent(NULL, NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_qrx_ring);
|
|
+#endif
|
|
+ for (i = 0; i < NUM_QRX_DESC; i++) {
|
|
+ memset(&ei_local->qrx_ring[i],0,sizeof(struct PDMA_rxdesc));
|
|
+ ei_local->qrx_ring[i].rxd_info2.DDONE_bit = 0;
|
|
+#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+ ei_local->qrx_ring[i].rxd_info2.LS0 = 0;
|
|
+ ei_local->qrx_ring[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
|
|
+#else
|
|
+ ei_local->qrx_ring[i].rxd_info2.LS0 = 1;
|
|
+#endif
|
|
+ ei_local->qrx_ring[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
|
|
+ }
|
|
+ printk("\nphy_qrx_ring = 0x%08x, qrx_ring = 0x%p\n",ei_local->phy_qrx_ring,ei_local->qrx_ring);
|
|
+
|
|
+ regVal = sysRegRead(QDMA_GLO_CFG);
|
|
+ regVal &= 0x000000FF;
|
|
+
|
|
+ sysRegWrite(QDMA_GLO_CFG, regVal);
|
|
+ regVal=sysRegRead(QDMA_GLO_CFG);
|
|
+
|
|
+ /* Tell the adapter where the TX/RX rings are located. */
|
|
+
|
|
+ sysRegWrite(QRX_BASE_PTR_0, phys_to_bus((u32) ei_local->phy_qrx_ring));
|
|
+ sysRegWrite(QRX_MAX_CNT_0, cpu_to_le32((u32) NUM_QRX_DESC));
|
|
+ sysRegWrite(QRX_CRX_IDX_0, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ rx_calc_idx0 = rx_dma_owner_idx0 = sysRegRead(QRX_CRX_IDX_0);
|
|
+#endif
|
|
+ sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX0);
|
|
+
|
|
+ ei_local->rx_ring0 = ei_local->qrx_ring;
|
|
+#if !defined (CONFIG_RAETH_QDMATX_QDMARX)
|
|
+ /* Initial PDMA RX Ring 0*/
|
|
+#ifdef CONFIG_32B_DESC
|
|
+ ei_local->rx_ring0 = kmalloc(NUM_RX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
|
|
+ ei_local->phy_rx_ring0 = virt_to_phys(ei_local->rx_ring0);
|
|
+#else
|
|
+ ei_local->rx_ring0 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0);
|
|
+#endif
|
|
+ for (i = 0; i < NUM_RX_DESC; i++) {
|
|
+ memset(&ei_local->rx_ring0[i],0,sizeof(struct PDMA_rxdesc));
|
|
+ ei_local->rx_ring0[i].rxd_info2.DDONE_bit = 0;
|
|
+#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+ ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
|
|
+ ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
|
|
+#else
|
|
+ ei_local->rx_ring0[i].rxd_info2.LS0 = 1;
|
|
+#endif
|
|
+ ei_local->rx_ring0[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
|
|
+ }
|
|
+ printk("\nphy_rx_ring0 = 0x%08x, rx_ring0 = 0x%p\n",ei_local->phy_rx_ring0,ei_local->rx_ring0);
|
|
+
|
|
+ regVal = sysRegRead(PDMA_GLO_CFG);
|
|
+ regVal &= 0x000000FF;
|
|
+ sysRegWrite(PDMA_GLO_CFG, regVal);
|
|
+ regVal=sysRegRead(PDMA_GLO_CFG);
|
|
+
|
|
+ sysRegWrite(RX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_rx_ring0));
|
|
+ sysRegWrite(RX_MAX_CNT0, cpu_to_le32((u32) NUM_RX_DESC));
|
|
+ sysRegWrite(RX_CALC_IDX0, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ rx_calc_idx0 = sysRegRead(RX_CALC_IDX0);
|
|
+#endif
|
|
+ sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX0);
|
|
+#endif
|
|
+#if !defined (CONFIG_HW_SFQ)
|
|
+ /* Enable randon early drop and set drop threshold automatically */
|
|
+ sysRegWrite(QDMA_FC_THRES, 0x174444);
|
|
+#endif
|
|
+ sysRegWrite(QDMA_HRED2, 0x0);
|
|
+ set_fe_dma_glo_cfg();
|
|
+#if defined (CONFIG_ARCH_MT7623)
|
|
+ printk("Enable QDMA TX NDP coherence check and re-read mechanism\n");
|
|
+ regVal=sysRegRead(QDMA_GLO_CFG);
|
|
+ regVal = regVal | 0x400;
|
|
+ sysRegWrite(QDMA_GLO_CFG, regVal);
|
|
+ printk("***********QDMA_GLO_CFG=%x\n", sysRegRead(QDMA_GLO_CFG));
|
|
+#endif
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+
|
|
+int sfq_prot = 0;
|
|
+int proto_id=0;
|
|
+int udp_source_port=0;
|
|
+int tcp_source_port=0;
|
|
+int ack_packt =0;
|
|
+int SfqParseLayerInfo(struct sk_buff * skb)
|
|
+{
|
|
+
|
|
+ struct vlan_hdr *vh_sfq = NULL;
|
|
+ struct ethhdr *eth_sfq = NULL;
|
|
+ struct iphdr *iph_sfq = NULL;
|
|
+ struct ipv6hdr *ip6h_sfq = NULL;
|
|
+ struct tcphdr *th_sfq = NULL;
|
|
+ struct udphdr *uh_sfq = NULL;
|
|
+#ifdef CONFIG_RAETH_HW_VLAN_TX
|
|
+ struct vlan_hdr pseudo_vhdr_sfq;
|
|
+#endif
|
|
+
|
|
+ memset(&SfqParseResult, 0, sizeof(SfqParseResult));
|
|
+
|
|
+ eth_sfq = (struct ethhdr *)skb->data;
|
|
+ memcpy(SfqParseResult.dmac, eth_sfq->h_dest, ETH_ALEN);
|
|
+ memcpy(SfqParseResult.smac, eth_sfq->h_source, ETH_ALEN);
|
|
+ SfqParseResult.eth_type = eth_sfq->h_proto;
|
|
+
|
|
+
|
|
+ if (SfqParseResult.eth_type == htons(ETH_P_8021Q)){
|
|
+ SfqParseResult.vlan1_gap = VLAN_HLEN;
|
|
+ vh_sfq = (struct vlan_hdr *)(skb->data + ETH_HLEN);
|
|
+ SfqParseResult.eth_type = vh_sfq->h_vlan_encapsulated_proto;
|
|
+ }else{
|
|
+ SfqParseResult.vlan1_gap = 0;
|
|
+ }
|
|
+
|
|
+
|
|
+
|
|
+ LAYER2_HEADER(skb) = skb->data;
|
|
+ LAYER3_HEADER(skb) = (skb->data + ETH_HLEN + (SfqParseResult.vlan1_gap));
|
|
+
|
|
+
|
|
+
|
|
+ /* set layer4 start addr */
|
|
+ if ((SfqParseResult.eth_type == htons(ETH_P_IP)) || (SfqParseResult.eth_type == htons(ETH_P_PPP_SES)
|
|
+ && SfqParseResult.ppp_tag == htons(PPP_IP))) {
|
|
+ iph_sfq = (struct iphdr *)LAYER3_HEADER(skb);
|
|
+
|
|
+ //prepare layer3/layer4 info
|
|
+ memcpy(&SfqParseResult.iph, iph_sfq, sizeof(struct iphdr));
|
|
+ if (iph_sfq->protocol == IPPROTO_TCP) {
|
|
+
|
|
+ LAYER4_HEADER(skb) = ((uint8_t *) iph_sfq + (iph_sfq->ihl * 4));
|
|
+ th_sfq = (struct tcphdr *)LAYER4_HEADER(skb);
|
|
+ memcpy(&SfqParseResult.th, th_sfq, sizeof(struct tcphdr));
|
|
+ SfqParseResult.pkt_type = IPV4_HNAPT;
|
|
+ //printk("tcp parsing\n");
|
|
+ tcp_source_port = ntohs(SfqParseResult.th.source);
|
|
+ udp_source_port = 0;
|
|
+ #if(0) //for TCP ack, test use
|
|
+ if(ntohl(SfqParseResult.iph.saddr) == 0xa0a0a04){ // tcp ack packet
|
|
+ ack_packt = 1;
|
|
+ }else {
|
|
+ ack_packt = 0;
|
|
+ }
|
|
+ #endif
|
|
+ sfq_prot = 2;//IPV4_HNAPT
|
|
+ proto_id = 1;//TCP
|
|
+ if(iph_sfq->frag_off & htons(IP_MF|IP_OFFSET)) {
|
|
+ //return 1;
|
|
+ }
|
|
+ } else if (iph_sfq->protocol == IPPROTO_UDP) {
|
|
+ LAYER4_HEADER(skb) = ((uint8_t *) iph_sfq + iph_sfq->ihl * 4);
|
|
+ uh_sfq = (struct udphdr *)LAYER4_HEADER(skb);
|
|
+ memcpy(&SfqParseResult.uh, uh_sfq, sizeof(struct udphdr));
|
|
+ SfqParseResult.pkt_type = IPV4_HNAPT;
|
|
+ udp_source_port = ntohs(SfqParseResult.uh.source);
|
|
+ tcp_source_port = 0;
|
|
+ ack_packt = 0;
|
|
+ sfq_prot = 2;//IPV4_HNAPT
|
|
+ proto_id =2;//UDP
|
|
+ if(iph_sfq->frag_off & htons(IP_MF|IP_OFFSET)) {
|
|
+ return 1;
|
|
+ }
|
|
+ }else{
|
|
+ sfq_prot = 1;
|
|
+ }
|
|
+ }else if (SfqParseResult.eth_type == htons(ETH_P_IPV6) ||
|
|
+ (SfqParseResult.eth_type == htons(ETH_P_PPP_SES) &&
|
|
+ SfqParseResult.ppp_tag == htons(PPP_IPV6))) {
|
|
+ ip6h_sfq = (struct ipv6hdr *)LAYER3_HEADER(skb);
|
|
+ memcpy(&SfqParseResult.ip6h, ip6h_sfq, sizeof(struct ipv6hdr));
|
|
+
|
|
+ if (ip6h_sfq->nexthdr == NEXTHDR_TCP) {
|
|
+ LAYER4_HEADER(skb) = ((uint8_t *) ip6h_sfq + sizeof(struct ipv6hdr));
|
|
+ th_sfq = (struct tcphdr *)LAYER4_HEADER(skb);
|
|
+ memcpy(&SfqParseResult.th, th_sfq, sizeof(struct tcphdr));
|
|
+ SfqParseResult.pkt_type = IPV6_5T_ROUTE;
|
|
+ sfq_prot = 4;//IPV6_5T
|
|
+ #if(0) //for TCP ack, test use
|
|
+ if(ntohl(SfqParseResult.ip6h.saddr.s6_addr32[3]) == 8){
|
|
+ ack_packt = 1;
|
|
+ }else {
|
|
+ ack_packt = 0;
|
|
+ }
|
|
+ #endif
|
|
+ } else if (ip6h_sfq->nexthdr == NEXTHDR_UDP) {
|
|
+ LAYER4_HEADER(skb) = ((uint8_t *) ip6h_sfq + sizeof(struct ipv6hdr));
|
|
+ uh_sfq = (struct udphdr *)LAYER4_HEADER(skb);
|
|
+ memcpy(&SfqParseResult.uh, uh_sfq, sizeof(struct udphdr));
|
|
+ SfqParseResult.pkt_type = IPV6_5T_ROUTE;
|
|
+ ack_packt = 0;
|
|
+ sfq_prot = 4;//IPV6_5T
|
|
+
|
|
+ }else{
|
|
+ sfq_prot = 3;//IPV6_3T
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no)
|
|
+{
|
|
+ unsigned int length=skb->len;
|
|
+ END_DEVICE* ei_local = netdev_priv(dev);
|
|
+
|
|
+ struct QDMA_txdesc *cpu_ptr;
|
|
+
|
|
+ struct QDMA_txdesc *dma_ptr __maybe_unused;
|
|
+ struct QDMA_txdesc *free_txd;
|
|
+ int ctx_offset;
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+ struct iphdr *iph = NULL;
|
|
+ struct QDMA_txdesc *init_cpu_ptr;
|
|
+ struct tcphdr *th = NULL;
|
|
+ struct skb_frag_struct *frag;
|
|
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
|
|
+ unsigned int len, size, offset, frag_txd_num;
|
|
+ int init_txd_idx, i;
|
|
+#endif // CONFIG_RAETH_TSO //
|
|
+
|
|
+#if defined (CONFIG_RAETH_TSOV6)
|
|
+ struct ipv6hdr *ip6h = NULL;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ PSEUDO_ADAPTER *pAd;
|
|
+#endif
|
|
+ //cpu_ptr = PHYS_TO_VIRT(ei_local->tx_cpu_ptr);
|
|
+ //dma_ptr = PHYS_TO_VIRT(ei_local->tx_dma_ptr);
|
|
+ //ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
|
|
+ cpu_ptr = (ei_local->tx_cpu_ptr);
|
|
+ ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
|
|
+ cpu_ptr = phys_to_virt(ei_local->tx_cpu_ptr);
|
|
+ dma_ptr = phys_to_virt(ei_local->tx_dma_ptr);
|
|
+ cpu_ptr = (ei_local->txd_pool + (ctx_offset));
|
|
+ ei_local->skb_free[ctx_offset] = skb;
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+ init_cpu_ptr = cpu_ptr;
|
|
+ init_txd_idx = ctx_offset;
|
|
+#endif
|
|
+
|
|
+#if !defined (CONFIG_RAETH_TSO)
|
|
+
|
|
+ //2. prepare data
|
|
+ //cpu_ptr->txd_info1.SDP = VIRT_TO_PHYS(skb->data);
|
|
+ cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
|
|
+ cpu_ptr->txd_info3.SDL = skb->len;
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ SfqParseLayerInfo(skb);
|
|
+ cpu_ptr->txd_info4.VQID0 = 1;//1:HW hash 0:CPU
|
|
+
|
|
+
|
|
+#if(0)// for tcp ack use, test use
|
|
+ if (ack_packt==1){
|
|
+ cpu_ptr->txd_info3.QID = 0x0a;
|
|
+ //cpu_ptr->txd_info3.VQID = 0;
|
|
+ }else{
|
|
+ cpu_ptr->txd_info3.QID = 0;
|
|
+ }
|
|
+#endif
|
|
+ cpu_ptr->txd_info3.PROT = sfq_prot;
|
|
+ cpu_ptr->txd_info3.IPOFST = 14 + (SfqParseResult.vlan1_gap); //no vlan
|
|
+
|
|
+#endif
|
|
+ if (gmac_no == 1) {
|
|
+ cpu_ptr->txd_info4.FPORT = 1;
|
|
+ }else {
|
|
+ cpu_ptr->txd_info4.FPORT = 2;
|
|
+ }
|
|
+
|
|
+ cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ if((lan_wan_separate==1) && (gmac_no==2)){
|
|
+ cpu_ptr->txd_info3.QID += 8;
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ if(web_sfq_enable==1 &&(skb->mark == 2)){
|
|
+ cpu_ptr->txd_info3.QID = HwSfqQUp;
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ if((lan_wan_separate==1) && (gmac_no==1)){
|
|
+ if(web_sfq_enable==1 &&(skb->mark == 2)){
|
|
+ cpu_ptr->txd_info3.QID = HwSfqQDl;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+#endif //end CONFIG_PSEUDO_SUPPORT
|
|
+
|
|
+ if(dbg==1){
|
|
+ printk("M2Q_table[%d]=%d\n", skb->mark, M2Q_table[skb->mark]);
|
|
+ printk("cpu_ptr->txd_info3.QID = %d\n", cpu_ptr->txd_info3.QID);
|
|
+ }
|
|
+#if 0
|
|
+ iph = (struct iphdr *)skb_network_header(skb);
|
|
+ if (iph->tos == 0xe0)
|
|
+ cpu_ptr->txd_info3.QID = 3;
|
|
+ else if (iph->tos == 0xa0)
|
|
+ cpu_ptr->txd_info3.QID = 2;
|
|
+ else if (iph->tos == 0x20)
|
|
+ cpu_ptr->txd_info3.QID = 1;
|
|
+ else
|
|
+ cpu_ptr->txd_info3.QID = 0;
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
|
|
+ if (skb->ip_summed == CHECKSUM_PARTIAL){
|
|
+ cpu_ptr->txd_info4.TUI_CO = 7;
|
|
+ }else {
|
|
+ cpu_ptr->txd_info4.TUI_CO = 0;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RAETH_HW_VLAN_TX
|
|
+ if(vlan_tx_tag_present(skb)) {
|
|
+ cpu_ptr->txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
|
|
+ }else {
|
|
+ cpu_ptr->txd_info4.VLAN_TAG = 0;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RAETH_HW_VLAN_TX // QoS Web UI used
|
|
+
|
|
+ if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==2)){
|
|
+ cpu_ptr->txd_info3.QID += 8;
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ if(web_sfq_enable==1 &&(skb->mark == 2)){
|
|
+ cpu_ptr->txd_info3.QID = HwSfqQUp;
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==1)){
|
|
+ if(web_sfq_enable==1 &&(skb->mark == 2)){
|
|
+ cpu_ptr->txd_info3.QID = HwSfqQDl;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+#endif // CONFIG_RAETH_HW_VLAN_TX
|
|
+
|
|
+
|
|
+//no hw van, no GE2, web UI used
|
|
+#ifndef CONFIG_PSEUDO_SUPPORT
|
|
+#ifndef CONFIG_RAETH_HW_VLAN_TX
|
|
+ if(lan_wan_separate==1){
|
|
+ struct vlan_hdr *vh = NULL;
|
|
+ unsigned short vlanid = 0;
|
|
+ unsigned short vlan_TCI;
|
|
+ vh = (struct vlan_hdr *)(skb->data + ETH_HLEN);
|
|
+ vlan_TCI = vh->h_vlan_TCI;
|
|
+ vlanid = (vlan_TCI & VLAN_VID_MASK)>>8;
|
|
+ if(vlanid == 2)//to wan
|
|
+ {
|
|
+ cpu_ptr->txd_info3.QID += 8;
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ if(web_sfq_enable==1 &&(skb->mark == 2)){
|
|
+ cpu_ptr->txd_info3.QID = HwSfqQUp;
|
|
+ }
|
|
+#endif
|
|
+ }else if(vlanid == 1){ //to lan
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ if(web_sfq_enable==1 &&(skb->mark == 2)){
|
|
+ cpu_ptr->txd_info3.QID = HwSfqQDl;
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+#endif
|
|
+#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
|
|
+ if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
|
|
+ if(ra_sw_nat_hook_rx!= NULL){
|
|
+ cpu_ptr->txd_info4.FPORT = 4; /* PPE */
|
|
+ FOE_MAGIC_TAG(skb) = 0;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+#if 0
|
|
+ cpu_ptr->txd_info4.FPORT = 4; /* PPE */
|
|
+ cpu_ptr->txd_info4.UDF = 0x2F;
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_MIPS)
|
|
+ dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
|
|
+#else
|
|
+ dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
|
|
+#endif
|
|
+ cpu_ptr->txd_info3.SWC_bit = 1;
|
|
+
|
|
+ //3. get NULL TXD and decrease free_tx_num by 1.
|
|
+ ctx_offset = get_free_txd(&free_txd);
|
|
+ if(ctx_offset == NUM_TX_DESC) {
|
|
+ printk("get_free_txd fail\n"); // this should not happen. free_txd_num is 2 at least.
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ //4. hook new TXD in the end of queue
|
|
+ //cpu_ptr->txd_info2.NDP = VIRT_TO_PHYS(free_txd);
|
|
+ cpu_ptr->txd_info2.NDP = (free_txd);
|
|
+
|
|
+
|
|
+ //5. move CPU_PTR to new TXD
|
|
+ //ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd);
|
|
+ ei_local->tx_cpu_ptr = (free_txd);
|
|
+ cpu_ptr->txd_info3.OWN_bit = 0;
|
|
+ sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
|
|
+
|
|
+#if 0
|
|
+ printk("----------------------------------------------\n");
|
|
+ printk("txd_info1:%08X \n",*(int *)&cpu_ptr->txd_info1);
|
|
+ printk("txd_info2:%08X \n",*(int *)&cpu_ptr->txd_info2);
|
|
+ printk("txd_info3:%08X \n",*(int *)&cpu_ptr->txd_info3);
|
|
+ printk("txd_info4:%08X \n",*(int *)&cpu_ptr->txd_info4);
|
|
+#endif
|
|
+
|
|
+#else //#if !defined (CONFIG_RAETH_TSO)
|
|
+ cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
|
|
+ cpu_ptr->txd_info3.SDL = (length - skb->data_len);
|
|
+ cpu_ptr->txd_info3.LS_bit = nr_frags ? 0:1;
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ SfqParseLayerInfo(skb);
|
|
+ // printk("tcp_source_port=%d\n", tcp_source_port);
|
|
+#if(0)
|
|
+ cpu_ptr->txd_info4.VQID0 = 0;//1:HW hash 0:CPU
|
|
+ if (tcp_source_port==1000) cpu_ptr->txd_info3.VQID = 0;
|
|
+ else if (tcp_source_port==1100) cpu_ptr->txd_info3.VQID = 1;
|
|
+ else if (tcp_source_port==1200) cpu_ptr->txd_info3.VQID = 2;
|
|
+ else cpu_ptr->txd_info3.VQID = 0;
|
|
+ #else
|
|
+ cpu_ptr->txd_info4.VQID0 = 1;
|
|
+ cpu_ptr->txd_info3.PROT = sfq_prot;
|
|
+ cpu_ptr->txd_info3.IPOFST = 14 + (SfqParseResult.vlan1_gap); //no vlan
|
|
+#endif
|
|
+#endif
|
|
+ if (gmac_no == 1) {
|
|
+ cpu_ptr->txd_info4.FPORT = 1;
|
|
+ }else {
|
|
+ cpu_ptr->txd_info4.FPORT = 2;
|
|
+ }
|
|
+
|
|
+ cpu_ptr->txd_info4.TSO = 0;
|
|
+ cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT //web UI used tso
|
|
+ if((lan_wan_separate==1) && (gmac_no==2)){
|
|
+ cpu_ptr->txd_info3.QID += 8;
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ if(web_sfq_enable == 1 &&(skb->mark == 2)){
|
|
+ cpu_ptr->txd_info3.QID = HwSfqQUp;
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ if((lan_wan_separate==1) && (gmac_no==1)){
|
|
+ if(web_sfq_enable==1 &&(skb->mark == 2)){
|
|
+ cpu_ptr->txd_info3.QID = HwSfqQDl;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+#endif //CONFIG_PSEUDO_SUPPORT
|
|
+ if(dbg==1){
|
|
+ printk("M2Q_table[%d]=%d\n", skb->mark, M2Q_table[skb->mark]);
|
|
+ printk("cpu_ptr->txd_info3.QID = %d\n", cpu_ptr->txd_info3.QID);
|
|
+ }
|
|
+#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
|
|
+ if (skb->ip_summed == CHECKSUM_PARTIAL){
|
|
+ cpu_ptr->txd_info4.TUI_CO = 7;
|
|
+ }else {
|
|
+ cpu_ptr->txd_info4.TUI_CO = 0;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RAETH_HW_VLAN_TX
|
|
+ if(vlan_tx_tag_present(skb)) {
|
|
+ cpu_ptr->txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
|
|
+ }else {
|
|
+ cpu_ptr->txd_info4.VLAN_TAG = 0;
|
|
+ }
|
|
+#endif
|
|
+#ifdef CONFIG_RAETH_HW_VLAN_TX // QoS Web UI used tso
|
|
+
|
|
+ if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==2)){
|
|
+ //cpu_ptr->txd_info3.QID += 8;
|
|
+ cpu_ptr->txd_info3.QID += 8;
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ if(web_sfq_enable==1 &&(skb->mark == 2)){
|
|
+ cpu_ptr->txd_info3.QID = HwSfqQUp;
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==1)){
|
|
+ if(web_sfq_enable==1 &&(skb->mark == 2)){
|
|
+ cpu_ptr->txd_info3.QID = HwSfqQDl;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+#endif // CONFIG_RAETH_HW_VLAN_TX
|
|
+
|
|
+
|
|
+//no hw van, no GE2, web UI used
|
|
+#ifndef CONFIG_PSEUDO_SUPPORT
|
|
+#ifndef CONFIG_RAETH_HW_VLAN_TX
|
|
+ if(lan_wan_separate==1){
|
|
+ struct vlan_hdr *vh = NULL;
|
|
+ unsigned short vlanid = 0;
|
|
+ unsigned short vlan_TCI;
|
|
+ vh = (struct vlan_hdr *)(skb->data + ETH_HLEN);
|
|
+ vlan_TCI = vh->h_vlan_TCI;
|
|
+ vlanid = (vlan_TCI & VLAN_VID_MASK)>>8;
|
|
+ if(vlanid == 2)//eth2.2 to wan
|
|
+ {
|
|
+ cpu_ptr->txd_info3.QID += 8;
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ if(web_sfq_enable==1 &&(skb->mark == 2)){
|
|
+ cpu_ptr->txd_info3.QID = HwSfqQUp;
|
|
+ }
|
|
+#endif
|
|
+ }else if(!strcmp(netdev, "eth2.1")){ // eth2.1 to lan
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ if(web_sfq_enable==1 &&(skb->mark == 2)){
|
|
+ cpu_ptr->txd_info3.QID = HwSfqQDl;
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+}
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
|
|
+ if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
|
|
+ if(ra_sw_nat_hook_rx!= NULL){
|
|
+ cpu_ptr->txd_info4.FPORT = 4; /* PPE */
|
|
+ FOE_MAGIC_TAG(skb) = 0;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ cpu_ptr->txd_info3.SWC_bit = 1;
|
|
+
|
|
+ ctx_offset = get_free_txd(&free_txd);
|
|
+ if(ctx_offset == NUM_TX_DESC) {
|
|
+ printk("get_free_txd fail\n");
|
|
+ return 0;
|
|
+ }
|
|
+ //cpu_ptr->txd_info2.NDP = VIRT_TO_PHYS(free_txd);
|
|
+ //ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd);
|
|
+ cpu_ptr->txd_info2.NDP = free_txd;
|
|
+ ei_local->tx_cpu_ptr = free_txd;
|
|
+
|
|
+
|
|
+ if(nr_frags > 0) {
|
|
+ for(i=0;i<nr_frags;i++) {
|
|
+ // 1. set or get init value for current fragment
|
|
+ offset = 0;
|
|
+ frag = &skb_shinfo(skb)->frags[i];
|
|
+ len = frag->size;
|
|
+ frag_txd_num = cal_frag_txd_num(len); // calculate the needed TXD numbers for this fragment
|
|
+ for(frag_txd_num = frag_txd_num;frag_txd_num > 0; frag_txd_num --){
|
|
+ // 2. size will be assigned to SDL and can't be larger than MAX_TXD_LEN
|
|
+ if(len < MAX_TXD_LEN)
|
|
+ size = len;
|
|
+ else
|
|
+ size = MAX_TXD_LEN;
|
|
+
|
|
+ //3. Update TXD info
|
|
+ cpu_ptr = (ei_local->txd_pool + (ctx_offset));
|
|
+ cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT //QoS Web UI used , nr_frags
|
|
+ if((lan_wan_separate==1) && (gmac_no==2)){
|
|
+ //cpu_ptr->txd_info3.QID += 8;
|
|
+ cpu_ptr->txd_info3.QID += 8;
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ if(web_sfq_enable==1 &&(skb->mark == 2)){
|
|
+ cpu_ptr->txd_info3.QID = HwSfqQUp;
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ if((lan_wan_separate==1) && (gmac_no==1)){
|
|
+ if(web_sfq_enable==1 &&(skb->mark == 2)){
|
|
+ cpu_ptr->txd_info3.QID = HwSfqQDl;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+#endif //CONFIG_PSEUDO_SUPPORT
|
|
+
|
|
+//QoS web used, nr_frags
|
|
+#ifdef CONFIG_RAETH_HW_VLAN_TX
|
|
+ if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==2)){
|
|
+ cpu_ptr->txd_info3.QID += 8;
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ if(web_sfq_enable==1 &&(skb->mark == 2)){
|
|
+ cpu_ptr->txd_info3.QID = HwSfqQUp;
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ if((lan_wan_separate==1) && (vlan_tx_tag_get(skb)==1)){
|
|
+ if(web_sfq_enable==1 &&(skb->mark == 2)){
|
|
+ cpu_ptr->txd_info3.QID = HwSfqQDl;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+#endif // CONFIG_RAETH_HW_VLAN_TX
|
|
+//no hw van, no GE2, web UI used
|
|
+#ifndef CONFIG_PSEUDO_SUPPORT
|
|
+#ifndef CONFIG_RAETH_HW_VLAN_TX
|
|
+ if(lan_wan_separate==1){
|
|
+ struct vlan_hdr *vh = NULL;
|
|
+ unsigned short vlanid = 0;
|
|
+ unsigned short vlan_TCI;
|
|
+ vh = (struct vlan_hdr *)(skb->data + ETH_HLEN);
|
|
+ vlan_TCI = vh->h_vlan_TCI;
|
|
+ vlanid = (vlan_TCI & VLAN_VID_MASK)>>8;
|
|
+ if(vlanid == 2))//eth2.2 to wan
|
|
+ {
|
|
+ cpu_ptr->txd_info3.QID += 8;
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ if(web_sfq_enable==1 &&(skb->mark == 2)){
|
|
+ cpu_ptr->txd_info3.QID = HwSfqQUp;
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+ }else if(vlanid == 1){ // eth2.1 to lan
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+ if(web_sfq_enable==1 &&(skb->mark == 2)){
|
|
+ cpu_ptr->txd_info3.QID = HwSfqQDl;
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+#endif
|
|
+ if(dbg==1){
|
|
+ printk("M2Q_table[%d]=%d\n", skb->mark, M2Q_table[skb->mark]);
|
|
+ printk("cpu_ptr->txd_info3.QID = %d\n", cpu_ptr->txd_info3.QID);
|
|
+ }
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
|
|
+ cpu_ptr->txd_info1.SDP = pci_map_page(NULL, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE);
|
|
+#else
|
|
+ cpu_ptr->txd_info1.SDP = pci_map_page(NULL, frag->page.p, frag->page_offset + offset, size, PCI_DMA_TODEVICE);
|
|
+// printk(" frag->page = %08x. frag->page_offset = %08x. frag->size = % 08x.\n", frag->page, (frag->page_offset+offset), size);
|
|
+#endif
|
|
+ cpu_ptr->txd_info3.SDL = size;
|
|
+ if( (i==(nr_frags-1)) && (frag_txd_num == 1))
|
|
+ cpu_ptr->txd_info3.LS_bit = 1;
|
|
+ else
|
|
+ cpu_ptr->txd_info3.LS_bit = 0;
|
|
+ cpu_ptr->txd_info3.OWN_bit = 0;
|
|
+ cpu_ptr->txd_info3.SWC_bit = 1;
|
|
+ //4. Update skb_free for housekeeping
|
|
+ ei_local->skb_free[ctx_offset] = (cpu_ptr->txd_info3.LS_bit == 1)?skb:(struct sk_buff *)0xFFFFFFFF; //MAGIC ID
|
|
+
|
|
+ //5. Get next TXD
|
|
+ ctx_offset = get_free_txd(&free_txd);
|
|
+ //cpu_ptr->txd_info2.NDP = VIRT_TO_PHYS(free_txd);
|
|
+ //ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd);
|
|
+ cpu_ptr->txd_info2.NDP = free_txd;
|
|
+ ei_local->tx_cpu_ptr = free_txd;
|
|
+ //6. Update offset and len.
|
|
+ offset += size;
|
|
+ len -= size;
|
|
+ }
|
|
+ }
|
|
+ ei_local->skb_free[init_txd_idx]= (struct sk_buff *)0xFFFFFFFF; //MAGIC ID
|
|
+ }
|
|
+
|
|
+ if(skb_shinfo(skb)->gso_segs > 1) {
|
|
+
|
|
+// TsoLenUpdate(skb->len);
|
|
+
|
|
+ /* TCP over IPv4 */
|
|
+ iph = (struct iphdr *)skb_network_header(skb);
|
|
+#if defined (CONFIG_RAETH_TSOV6)
|
|
+ /* TCP over IPv6 */
|
|
+ ip6h = (struct ipv6hdr *)skb_network_header(skb);
|
|
+#endif
|
|
+ if((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
|
|
+ th = (struct tcphdr *)skb_transport_header(skb);
|
|
+#if defined (CONFIG_HW_SFQ)
|
|
+#if(0)
|
|
+ init_cpu_ptr->txd_info4.VQID0 = 0;//1:HW hash 0:CPU
|
|
+ if (tcp_source_port==1000) init_cpu_ptr->txd_info3.VQID = 0;
|
|
+ else if (tcp_source_port==1100) init_cpu_ptr->txd_info3.VQID = 1;
|
|
+ else if (tcp_source_port==1200) init_cpu_ptr->txd_info3.VQID = 2;
|
|
+ else cpu_ptr->txd_info3.VQID = 0;
|
|
+ #else
|
|
+ init_cpu_ptr->txd_info4.VQID0 = 1;
|
|
+ init_cpu_ptr->txd_info3.PROT = sfq_prot;
|
|
+ init_cpu_ptr->txd_info3.IPOFST = 14 + (SfqParseResult.vlan1_gap); //no vlan
|
|
+#endif
|
|
+#endif
|
|
+ init_cpu_ptr->txd_info4.TSO = 1;
|
|
+
|
|
+ th->check = htons(skb_shinfo(skb)->gso_size);
|
|
+#if defined (CONFIG_MIPS)
|
|
+ dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
|
|
+#else
|
|
+ dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
|
|
+#endif
|
|
+ }
|
|
+
|
|
+#if defined (CONFIG_RAETH_TSOV6)
|
|
+ /* TCP over IPv6 */
|
|
+ //ip6h = (struct ipv6hdr *)skb_network_header(skb);
|
|
+ else if ((ip6h->version == 6) && (ip6h->nexthdr == NEXTHDR_TCP)) {
|
|
+ th = (struct tcphdr *)skb_transport_header(skb);
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ init_cpu_ptr->txd_info4.TSO = 1;
|
|
+#else
|
|
+ init_cpu_ptr->txd_info4.TSO = 1;
|
|
+#endif
|
|
+ th->check = htons(skb_shinfo(skb)->gso_size);
|
|
+#if defined (CONFIG_MIPS)
|
|
+ dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
|
|
+#else
|
|
+ dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
|
|
+#endif
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+
|
|
+
|
|
+// dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
|
|
+
|
|
+ init_cpu_ptr->txd_info3.OWN_bit = 0;
|
|
+#endif // CONFIG_RAETH_TSO //
|
|
+
|
|
+ sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ if (gmac_no == 2) {
|
|
+ if (ei_local->PseudoDev != NULL) {
|
|
+ pAd = netdev_priv(ei_local->PseudoDev);
|
|
+ pAd->stat.tx_packets++;
|
|
+ pAd->stat.tx_bytes += length;
|
|
+ }
|
|
+ } else
|
|
+
|
|
+#endif
|
|
+ {
|
|
+ ei_local->stat.tx_packets++;
|
|
+ ei_local->stat.tx_bytes += skb->len;
|
|
+ }
|
|
+#ifdef CONFIG_RAETH_NAPI
|
|
+ if ( ei_local->tx_full == 1) {
|
|
+ ei_local->tx_full = 0;
|
|
+ netif_wake_queue(dev);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ return length;
|
|
+}
|
|
+
|
|
+int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no)
|
|
+{
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+ unsigned long flags;
|
|
+ unsigned int num_of_txd = 0;
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags, i;
|
|
+ struct skb_frag_struct *frag;
|
|
+#endif
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ PSEUDO_ADAPTER *pAd;
|
|
+#endif
|
|
+
|
|
+#if !defined(CONFIG_RA_NAT_NONE)
|
|
+ if(ra_sw_nat_hook_tx!= NULL)
|
|
+ {
|
|
+// spin_lock_irqsave(&ei_local->page_lock, flags);
|
|
+ if(ra_sw_nat_hook_tx(skb, gmac_no)==1){
|
|
+// spin_unlock_irqrestore(&ei_local->page_lock, flags);
|
|
+ }else{
|
|
+ kfree_skb(skb);
|
|
+// spin_unlock_irqrestore(&ei_local->page_lock, flags);
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
+
|
|
+
|
|
+ dev->trans_start = jiffies; /* save the timestamp */
|
|
+ spin_lock_irqsave(&ei_local->page_lock, flags);
|
|
+#if defined (CONFIG_MIPS)
|
|
+ dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
|
|
+#else
|
|
+ dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
|
|
+#endif
|
|
+
|
|
+
|
|
+//check free_txd_num before calling rt288_eth_send()
|
|
+
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+ // num_of_txd = (nr_frags==0) ? 1 : (nr_frags + 1);
|
|
+ if(nr_frags != 0){
|
|
+ for(i=0;i<nr_frags;i++) {
|
|
+ frag = &skb_shinfo(skb)->frags[i];
|
|
+ num_of_txd += cal_frag_txd_num(frag->size);
|
|
+ }
|
|
+ }else
|
|
+ num_of_txd = 1;
|
|
+#else
|
|
+ num_of_txd = 1;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RALINK_MT7621)
|
|
+ if((sysRegRead(0xbe00000c)&0xFFFF) == 0x0101) {
|
|
+ ei_xmit_housekeeping(0);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+
|
|
+ if ((ei_local->free_txd_num > num_of_txd + 1) && (ei_local->free_txd_num != NUM_TX_DESC))
|
|
+ {
|
|
+ rt2880_eth_send(dev, skb, gmac_no); // need to modify rt2880_eth_send() for QDMA
|
|
+ if (ei_local->free_txd_num < 3)
|
|
+ {
|
|
+#if defined (CONFIG_RAETH_STOP_RX_WHEN_TX_FULL)
|
|
+ netif_stop_queue(dev);
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ netif_stop_queue(ei_local->PseudoDev);
|
|
+#endif
|
|
+ tx_ring_full = 1;
|
|
+#endif
|
|
+ }
|
|
+ } else {
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ if (gmac_no == 2)
|
|
+ {
|
|
+ if (ei_local->PseudoDev != NULL)
|
|
+ {
|
|
+ pAd = netdev_priv(ei_local->PseudoDev);
|
|
+ pAd->stat.tx_dropped++;
|
|
+ }
|
|
+ } else
|
|
+#endif
|
|
+ ei_local->stat.tx_dropped++;
|
|
+ kfree_skb(skb);
|
|
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
|
|
+ return 0;
|
|
+ }
|
|
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void ei_xmit_housekeeping(unsigned long unused)
|
|
+{
|
|
+ struct net_device *dev = dev_raether;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+#ifndef CONFIG_RAETH_NAPI
|
|
+ unsigned long reg_int_mask=0;
|
|
+#endif
|
|
+ struct QDMA_txdesc *dma_ptr = NULL;
|
|
+ struct QDMA_txdesc *cpu_ptr = NULL;
|
|
+ struct QDMA_txdesc *tmp_ptr = NULL;
|
|
+ unsigned int ctx_offset = 0;
|
|
+ unsigned int dtx_offset = 0;
|
|
+
|
|
+ cpu_ptr = sysRegRead(QTX_CRX_PTR);
|
|
+ dma_ptr = sysRegRead(QTX_DRX_PTR);
|
|
+ ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
|
|
+ dtx_offset = GET_TXD_OFFSET(&dma_ptr);
|
|
+ cpu_ptr = (ei_local->txd_pool + (ctx_offset));
|
|
+ dma_ptr = (ei_local->txd_pool + (dtx_offset));
|
|
+
|
|
+ while(cpu_ptr != dma_ptr && (cpu_ptr->txd_info3.OWN_bit == 1)) {
|
|
+ //1. keep cpu next TXD
|
|
+ tmp_ptr = cpu_ptr->txd_info2.NDP;
|
|
+ //2. release TXD
|
|
+ put_free_txd(ctx_offset);
|
|
+ //3. update ctx_offset and free skb memory
|
|
+ ctx_offset = GET_TXD_OFFSET(&tmp_ptr);
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+ if(ei_local->skb_free[ctx_offset]!=(struct sk_buff *)0xFFFFFFFF) {
|
|
+ dev_kfree_skb_any(ei_local->skb_free[ctx_offset]);
|
|
+ }
|
|
+#else
|
|
+ dev_kfree_skb_any(ei_local->skb_free[ctx_offset]);
|
|
+#endif
|
|
+ ei_local->skb_free[ctx_offset] = 0;
|
|
+
|
|
+ netif_wake_queue(dev);
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ netif_wake_queue(ei_local->PseudoDev);
|
|
+#endif
|
|
+ tx_ring_full=0;
|
|
+ //4. update cpu_ptr
|
|
+ cpu_ptr = (ei_local->txd_pool + ctx_offset);
|
|
+ }
|
|
+ sysRegWrite(QTX_CRX_PTR, (ei_local->phy_txd_pool + (ctx_offset << 4)));
|
|
+#ifndef CONFIG_RAETH_NAPI
|
|
+ reg_int_mask=sysRegRead(QFE_INT_ENABLE);
|
|
+#if defined (DELAY_INT)
|
|
+ sysRegWrite(QFE_INT_ENABLE, reg_int_mask| RLS_DLY_INT);
|
|
+#else
|
|
+
|
|
+ sysRegWrite(QFE_INT_ENABLE, reg_int_mask | RLS_DONE_INT);
|
|
+#endif
|
|
+#endif //CONFIG_RAETH_NAPI//
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(ei_start_xmit);
|
|
+EXPORT_SYMBOL(ei_xmit_housekeeping);
|
|
+EXPORT_SYMBOL(fe_dma_init);
|
|
+EXPORT_SYMBOL(rt2880_eth_send);
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/raether_qdma_mt7623.c
|
|
@@ -0,0 +1,1020 @@
|
|
+#include <linux/module.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/pci.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/skbuff.h>
|
|
+#include <linux/if_vlan.h>
|
|
+#include <linux/if_ether.h>
|
|
+#include <linux/fs.h>
|
|
+#include <asm/uaccess.h>
|
|
+#include <asm/rt2880/surfboardint.h>
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+#include <linux/tcp.h>
|
|
+#include <net/ipv6.h>
|
|
+#include <linux/ip.h>
|
|
+#include <net/ip.h>
|
|
+#include <net/tcp.h>
|
|
+#include <linux/in.h>
|
|
+#include <linux/ppp_defs.h>
|
|
+#include <linux/if_pppox.h>
|
|
+#endif
|
|
+#include <linux/delay.h>
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+#include <linux/sched.h>
|
|
+#endif
|
|
+
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
|
|
+#include <asm/rt2880/rt_mmap.h>
|
|
+#else
|
|
+#include <linux/libata-compat.h>
|
|
+#endif
|
|
+
|
|
+#include "ra2882ethreg.h"
|
|
+#include "raether.h"
|
|
+#include "ra_mac.h"
|
|
+#include "ra_ioctl.h"
|
|
+#include "ra_rfrw.h"
|
|
+#ifdef CONFIG_RAETH_NETLINK
|
|
+#include "ra_netlink.h"
|
|
+#endif
|
|
+#if defined (CONFIG_RAETH_QOS)
|
|
+#include "ra_qos.h"
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
|
|
+#include "../../../net/nat/hw_nat/ra_nat.h"
|
|
+#endif
|
|
+
|
|
+
|
|
+#if !defined(CONFIG_RA_NAT_NONE)
|
|
+/* bruce+
|
|
+ */
|
|
+extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb);
|
|
+extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
|
|
+/* Qwert+
|
|
+ */
|
|
+#include <asm/mipsregs.h>
|
|
+extern int (*ra_classifier_hook_tx)(struct sk_buff *skb, unsigned long cur_cycle);
|
|
+extern int (*ra_classifier_hook_rx)(struct sk_buff *skb, unsigned long cur_cycle);
|
|
+#endif /* CONFIG_RA_CLASSIFIER */
|
|
+
|
|
+#if defined (CONFIG_RALINK_RT3052_MP2)
|
|
+int32_t mcast_rx(struct sk_buff * skb);
|
|
+int32_t mcast_tx(struct sk_buff * skb);
|
|
+#endif
|
|
+
|
|
+#ifdef RA_MTD_RW_BY_NUM
|
|
+int ra_mtd_read(int num, loff_t from, size_t len, u_char *buf);
|
|
+#else
|
|
+int ra_mtd_read_nm(char *name, loff_t from, size_t len, u_char *buf);
|
|
+#endif
|
|
+
|
|
+/* gmac driver feature set config */
|
|
+#if defined (CONFIG_RAETH_NAPI) || defined (CONFIG_RAETH_QOS)
|
|
+#undef DELAY_INT
|
|
+#else
|
|
+#define DELAY_INT 1
|
|
+#endif
|
|
+
|
|
+//#define CONFIG_UNH_TEST
|
|
+/* end of config */
|
|
+
|
|
+#if defined (CONFIG_RAETH_JUMBOFRAME)
|
|
+#define MAX_RX_LENGTH 4096
|
|
+#else
|
|
+#define MAX_RX_LENGTH 1536
|
|
+#endif
|
|
+
|
|
+extern struct net_device *dev_raether;
|
|
+
|
|
+#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+static int rx_dma_owner_idx1;
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+static int rx_calc_idx1;
|
|
+#endif
|
|
+#endif
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+static int rx_calc_idx0;
|
|
+static unsigned long tx_cpu_owner_idx0=0;
|
|
+#endif
|
|
+static unsigned long tx_ring_full=0;
|
|
+
|
|
+#if defined (CONFIG_ETHTOOL) && defined (CONFIG_RAETH_ROUTER)
|
|
+#include "ra_ethtool.h"
|
|
+extern struct ethtool_ops ra_ethtool_ops;
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+extern struct ethtool_ops ra_virt_ethtool_ops;
|
|
+#endif // CONFIG_PSEUDO_SUPPORT //
|
|
+#endif // (CONFIG_ETHTOOL //
|
|
+
|
|
+#ifdef CONFIG_RALINK_VISTA_BASIC
|
|
+int is_switch_175c = 1;
|
|
+#endif
|
|
+
|
|
+//skb->mark to queue mapping table
|
|
+extern unsigned int M2Q_table[64];
|
|
+
|
|
+
|
|
+#define KSEG1 0xa0000000
|
|
+#if defined (CONFIG_MIPS)
|
|
+#define PHYS_TO_VIRT(x) ((void *)((x) | KSEG1))
|
|
+#define VIRT_TO_PHYS(x) ((unsigned long)(x) & ~KSEG1)
|
|
+#else
|
|
+#define PHYS_TO_VIRT(x) phys_to_virt(x)
|
|
+#define VIRT_TO_PHYS(x) virt_to_phys(x)
|
|
+#endif
|
|
+
|
|
+
|
|
+extern void set_fe_dma_glo_cfg(void);
|
|
+
|
|
+
|
|
+/**
|
|
+ *
|
|
+ * @brief: get the TXD index from its address
|
|
+ *
|
|
+ * @param: cpu_ptr
|
|
+ *
|
|
+ * @return: TXD index
|
|
+*/
|
|
+
|
|
+static unsigned int GET_TXD_OFFSET(struct QDMA_txdesc **cpu_ptr)
|
|
+{
|
|
+ struct net_device *dev = dev_raether;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+ int ctx_offset;
|
|
+ //ctx_offset = (((((u32)*cpu_ptr) <<8)>>8) - ((((u32)ei_local->txd_pool)<<8)>>8))/ sizeof(struct QDMA_txdesc);
|
|
+ //ctx_offset = (*cpu_ptr - ei_local->txd_pool);
|
|
+ /*kurtis*/
|
|
+ ctx_offset = (((((u32)*cpu_ptr) <<8)>>8) - ((((u32)ei_local->phy_txd_pool)<<8)>>8))/ sizeof(struct QDMA_txdesc);
|
|
+ return ctx_offset;
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+
|
|
+/**
|
|
+ * @brief cal txd number for a page
|
|
+ *
|
|
+ * @parm size
|
|
+ *
|
|
+ * @return frag_txd_num
|
|
+ */
|
|
+
|
|
+unsigned int cal_frag_txd_num(unsigned int size)
|
|
+{
|
|
+ unsigned int frag_txd_num = 0;
|
|
+ if(size == 0)
|
|
+ return 0;
|
|
+ while(size > 0){
|
|
+ if(size > MAX_TXD_LEN){
|
|
+ frag_txd_num++;
|
|
+ size -= MAX_TXD_LEN;
|
|
+ }else{
|
|
+ frag_txd_num++;
|
|
+ size = 0;
|
|
+ }
|
|
+ }
|
|
+ return frag_txd_num;
|
|
+
|
|
+}
|
|
+
|
|
+/**
|
|
+ * @brief get free TXD from TXD queue
|
|
+ *
|
|
+ * @param free_txd
|
|
+ *
|
|
+ * @return
|
|
+ */
|
|
+static int get_free_txd(struct QDMA_txdesc **free_txd)
|
|
+{
|
|
+ struct net_device *dev = dev_raether;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+ unsigned int tmp_idx;
|
|
+
|
|
+ if(ei_local->free_txd_num > 0){
|
|
+ tmp_idx = ei_local->free_txd_head;
|
|
+ ei_local->free_txd_head = ei_local->txd_pool_info[tmp_idx];
|
|
+ ei_local->free_txd_num -= 1;
|
|
+ //*free_txd = &ei_local->txd_pool[tmp_idx];
|
|
+ *free_txd = ei_local->phy_txd_pool + (sizeof(struct QDMA_txdesc) * tmp_idx);
|
|
+ return tmp_idx;
|
|
+ }else
|
|
+ return NUM_TX_DESC;
|
|
+}
|
|
+
|
|
+
|
|
+/**
|
|
+ * @brief add free TXD into TXD queue
|
|
+ *
|
|
+ * @param free_txd
|
|
+ *
|
|
+ * @return
|
|
+ */
|
|
+int put_free_txd(int free_txd_idx)
|
|
+{
|
|
+ struct net_device *dev = dev_raether;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+ ei_local->txd_pool_info[ei_local->free_txd_tail] = free_txd_idx;
|
|
+ ei_local->free_txd_tail = free_txd_idx;
|
|
+ ei_local->txd_pool_info[free_txd_idx] = NUM_TX_DESC;
|
|
+ ei_local->free_txd_num += 1;
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/*define qdma initial alloc*/
|
|
+/**
|
|
+ * @brief
|
|
+ *
|
|
+ * @param net_dev
|
|
+ *
|
|
+ * @return 0: fail
|
|
+ * 1: success
|
|
+ */
|
|
+bool qdma_tx_desc_alloc(void)
|
|
+{
|
|
+ struct net_device *dev = dev_raether;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+ struct QDMA_txdesc *free_txd = NULL;
|
|
+ unsigned int txd_idx;
|
|
+ int i = 0;
|
|
+
|
|
+
|
|
+ ei_local->txd_pool = pci_alloc_consistent(NULL, sizeof(struct QDMA_txdesc) * NUM_TX_DESC, &ei_local->phy_txd_pool);
|
|
+ printk("txd_pool=%p phy_txd_pool=%08X\n", ei_local->txd_pool , ei_local->phy_txd_pool);
|
|
+
|
|
+ if (ei_local->txd_pool == NULL) {
|
|
+ printk("adapter->txd_pool allocation failed!\n");
|
|
+ return 0;
|
|
+ }
|
|
+ printk("ei_local->skb_free start address is 0x%p.\n", ei_local->skb_free);
|
|
+ //set all txd_pool_info to 0.
|
|
+ for ( i = 0; i < NUM_TX_DESC; i++)
|
|
+ {
|
|
+ ei_local->skb_free[i]= 0;
|
|
+ ei_local->txd_pool_info[i] = i + 1;
|
|
+ ei_local->txd_pool[i].txd_info3.LS_bit = 1;
|
|
+ ei_local->txd_pool[i].txd_info3.OWN_bit = 1;
|
|
+ }
|
|
+
|
|
+ ei_local->free_txd_head = 0;
|
|
+ ei_local->free_txd_tail = NUM_TX_DESC - 1;
|
|
+ ei_local->free_txd_num = NUM_TX_DESC;
|
|
+
|
|
+
|
|
+ //get free txd from txd pool
|
|
+ txd_idx = get_free_txd(&free_txd);
|
|
+ if( txd_idx == NUM_TX_DESC) {
|
|
+ printk("get_free_txd fail\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ //add null TXD for transmit
|
|
+
|
|
+ /*kurtis test*/
|
|
+ ei_local->tx_dma_ptr = free_txd;
|
|
+ ei_local->tx_cpu_ptr = free_txd;
|
|
+ //ei_local->tx_dma_ptr = virt_to_phys(free_txd);
|
|
+ //ei_local->tx_cpu_ptr = virt_to_phys(free_txd);
|
|
+ sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
|
|
+ sysRegWrite(QTX_DTX_PTR, ei_local->tx_dma_ptr);
|
|
+
|
|
+ printk("kurtis: free_txd = 0x%x!!!\n", free_txd);
|
|
+ printk("kurtis: ei_local->tx_dma_ptr = 0x%x!!!\n", ei_local->tx_dma_ptr);
|
|
+
|
|
+ //get free txd from txd pool
|
|
+
|
|
+ txd_idx = get_free_txd(&free_txd);
|
|
+ if( txd_idx == NUM_TX_DESC) {
|
|
+ printk("get_free_txd fail\n");
|
|
+ return 0;
|
|
+ }
|
|
+ // add null TXD for release
|
|
+ //sysRegWrite(QTX_CRX_PTR, virt_to_phys(free_txd));
|
|
+ //sysRegWrite(QTX_DRX_PTR, virt_to_phys(free_txd));
|
|
+ sysRegWrite(QTX_CRX_PTR, free_txd);
|
|
+ sysRegWrite(QTX_DRX_PTR, free_txd);
|
|
+
|
|
+ printk("free_txd: %p, ei_local->cpu_ptr: %08X\n", free_txd, ei_local->tx_cpu_ptr);
|
|
+
|
|
+ printk(" POOL HEAD_PTR | DMA_PTR | CPU_PTR \n");
|
|
+ printk("----------------+---------+--------\n");
|
|
+#if 1
|
|
+ printk(" 0x%p 0x%08X 0x%08X\n",ei_local->txd_pool,
|
|
+ ei_local->tx_dma_ptr, ei_local->tx_cpu_ptr);
|
|
+#endif
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+bool fq_qdma_init(void)
|
|
+{
|
|
+ struct QDMA_txdesc *free_head = NULL;
|
|
+ unsigned int free_head_phy;
|
|
+ unsigned int free_tail_phy;
|
|
+ unsigned int *free_page_head = NULL;
|
|
+ unsigned int free_page_head_phy;
|
|
+ int i;
|
|
+
|
|
+ free_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * sizeof(struct QDMA_txdesc), &free_head_phy);
|
|
+ if (unlikely(free_head == NULL)){
|
|
+ printk(KERN_ERR "QDMA FQ decriptor not available...\n");
|
|
+ return 0;
|
|
+ }
|
|
+ memset(free_head, 0x0, sizeof(struct QDMA_txdesc) * NUM_QDMA_PAGE);
|
|
+
|
|
+ free_page_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * QDMA_PAGE_SIZE, &free_page_head_phy);
|
|
+ if (unlikely(free_page_head == NULL)){
|
|
+ printk(KERN_ERR "QDMA FQ pager not available...\n");
|
|
+ return 0;
|
|
+ }
|
|
+ for (i=0; i < NUM_QDMA_PAGE; i++) {
|
|
+ free_head[i].txd_info1.SDP = (free_page_head_phy + (i * QDMA_PAGE_SIZE));
|
|
+ if(i < (NUM_QDMA_PAGE-1)){
|
|
+ free_head[i].txd_info2.NDP = (free_head_phy + ((i+1) * sizeof(struct QDMA_txdesc)));
|
|
+
|
|
+
|
|
+#if 0
|
|
+ printk("free_head_phy[%d] is 0x%x!!!\n",i, VIRT_TO_PHYS(&free_head[i]) );
|
|
+ printk("free_head[%d] is 0x%x!!!\n",i, &free_head[i] );
|
|
+ printk("free_head[%d].txd_info1.SDP is 0x%x!!!\n",i, free_head[i].txd_info1.SDP );
|
|
+ printk("free_head[%d].txd_info2.NDP is 0x%x!!!\n",i, free_head[i].txd_info2.NDP );
|
|
+#endif
|
|
+ }
|
|
+ free_head[i].txd_info3.SDL = QDMA_PAGE_SIZE;
|
|
+
|
|
+ }
|
|
+ free_tail_phy = (free_head_phy + (u32)((NUM_QDMA_PAGE-1) * sizeof(struct QDMA_txdesc)));
|
|
+
|
|
+ printk("free_head_phy is 0x%x!!!\n", free_head_phy);
|
|
+ printk("free_tail_phy is 0x%x!!!\n", free_tail_phy);
|
|
+ sysRegWrite(QDMA_FQ_HEAD, (u32)free_head_phy);
|
|
+ sysRegWrite(QDMA_FQ_TAIL, (u32)free_tail_phy);
|
|
+ sysRegWrite(QDMA_FQ_CNT, ((NUM_TX_DESC << 16) | NUM_QDMA_PAGE));
|
|
+ sysRegWrite(QDMA_FQ_BLEN, QDMA_PAGE_SIZE << 16);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+int fe_dma_init(struct net_device *dev)
|
|
+{
|
|
+
|
|
+ int i;
|
|
+ unsigned int regVal;
|
|
+ END_DEVICE* ei_local = netdev_priv(dev);
|
|
+
|
|
+ fq_qdma_init();
|
|
+
|
|
+ while(1)
|
|
+ {
|
|
+ regVal = sysRegRead(QDMA_GLO_CFG);
|
|
+ if((regVal & RX_DMA_BUSY))
|
|
+ {
|
|
+ printk("\n RX_DMA_BUSY !!! ");
|
|
+ continue;
|
|
+ }
|
|
+ if((regVal & TX_DMA_BUSY))
|
|
+ {
|
|
+ printk("\n TX_DMA_BUSY !!! ");
|
|
+ continue;
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ /*tx desc alloc, add a NULL TXD to HW*/
|
|
+
|
|
+ qdma_tx_desc_alloc();
|
|
+
|
|
+
|
|
+ /* Initial RX Ring 0*/
|
|
+#ifdef CONFIG_32B_DESC
|
|
+ ei_local->rx_ring0 = kmalloc(NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
|
|
+ ei_local->phy_rx_ring0 = virt_to_phys(ei_local->rx_ring0);
|
|
+#else
|
|
+ ei_local->rx_ring0 = pci_alloc_consistent(NULL, NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0);
|
|
+#endif
|
|
+ for (i = 0; i < NUM_QRX_DESC; i++) {
|
|
+ memset(&ei_local->rx_ring0[i],0,sizeof(struct PDMA_rxdesc));
|
|
+ ei_local->rx_ring0[i].rxd_info2.DDONE_bit = 0;
|
|
+#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+ ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
|
|
+ ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
|
|
+#else
|
|
+ ei_local->rx_ring0[i].rxd_info2.LS0 = 1;
|
|
+#endif
|
|
+ ei_local->rx_ring0[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
|
|
+ }
|
|
+ printk("QDMA_RX:phy_rx_ring0 = 0x%08x, rx_ring0 = 0x%p\n",ei_local->phy_rx_ring0,ei_local->rx_ring0);
|
|
+
|
|
+#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+ /* Initial RX Ring 1*/
|
|
+#ifdef CONFIG_32B_DESC
|
|
+ ei_local->rx_ring1 = kmalloc(NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
|
|
+ ei_local->phy_rx_ring1 = virt_to_phys(ei_local->rx_ring1);
|
|
+#else
|
|
+ ei_local->rx_ring1 = pci_alloc_consistent(NULL, NUM_QRX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring1);
|
|
+#endif
|
|
+ for (i = 0; i < NUM_QRX_DESC; i++) {
|
|
+ memset(&ei_local->rx_ring1[i],0,sizeof(struct PDMA_rxdesc));
|
|
+ ei_local->rx_ring1[i].rxd_info2.DDONE_bit = 0;
|
|
+#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+ ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
|
|
+ ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
|
|
+#else
|
|
+ ei_local->rx_ring1[i].rxd_info2.LS0 = 1;
|
|
+#endif
|
|
+ ei_local->rx_ring1[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx1_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
|
|
+ }
|
|
+ printk("\nphy_rx_ring1 = 0x%08x, rx_ring1 = 0x%p\n",ei_local->phy_rx_ring1,ei_local->rx_ring1);
|
|
+#endif
|
|
+
|
|
+ regVal = sysRegRead(QDMA_GLO_CFG);
|
|
+ regVal &= 0x000000FF;
|
|
+ sysRegWrite(QDMA_GLO_CFG, regVal);
|
|
+ regVal=sysRegRead(QDMA_GLO_CFG);
|
|
+
|
|
+ /* Tell the adapter where the TX/RX rings are located. */
|
|
+
|
|
+ sysRegWrite(QRX_BASE_PTR_0, phys_to_bus((u32) ei_local->phy_rx_ring0));
|
|
+ sysRegWrite(QRX_MAX_CNT_0, cpu_to_le32((u32) NUM_QRX_DESC));
|
|
+ sysRegWrite(QRX_CRX_IDX_0, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ rx_calc_idx0 = rx_dma_owner_idx0 = sysRegRead(QRX_CRX_IDX_0);
|
|
+#endif
|
|
+ sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX0);
|
|
+#if defined (CONFIG_RAETH_MULTIPLE_RX_RING)
|
|
+ sysRegWrite(QRX_BASE_PTR_1, phys_to_bus((u32) ei_local->phy_rx_ring1));
|
|
+ sysRegWrite(QRX_MAX_CNT_1, cpu_to_le32((u32) NUM_QRX_DESC));
|
|
+ sysRegWrite(QRX_CRX_IDX_1, cpu_to_le32((u32) (NUM_QRX_DESC - 1)));
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ rx_calc_idx1 = rx_dma_owner_idx1 = sysRegRead(QRX_CRX_IDX_1);
|
|
+#endif
|
|
+ sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX1);
|
|
+#endif
|
|
+
|
|
+#if !defined (CONFIG_RAETH_QDMATX_QDMARX)
|
|
+ /* Initial PDMA RX Ring 0*/
|
|
+#ifdef CONFIG_32B_DESC
|
|
+ ei_local->rx_ring0 = kmalloc(NUM_RX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL);
|
|
+ ei_local->phy_rx_ring0 = virt_to_phys(ei_local->rx_ring0);
|
|
+#else
|
|
+ ei_local->rx_ring0 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0);
|
|
+#endif
|
|
+ for (i = 0; i < NUM_RX_DESC; i++) {
|
|
+ memset(&ei_local->rx_ring0[i],0,sizeof(struct PDMA_rxdesc));
|
|
+ ei_local->rx_ring0[i].rxd_info2.DDONE_bit = 0;
|
|
+#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA)
|
|
+ ei_local->rx_ring0[i].rxd_info2.LS0 = 0;
|
|
+ ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH;
|
|
+#else
|
|
+ ei_local->rx_ring0[i].rxd_info2.LS0 = 1;
|
|
+#endif
|
|
+ ei_local->rx_ring0[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE);
|
|
+ }
|
|
+ printk("PDMA_RX:phy_rx_ring0 = 0x%08x, rx_ring0 = 0x%p\n",ei_local->phy_rx_ring0,ei_local->rx_ring0);
|
|
+
|
|
+ regVal = sysRegRead(PDMA_GLO_CFG);
|
|
+ regVal &= 0x000000FF;
|
|
+ sysRegWrite(PDMA_GLO_CFG, regVal);
|
|
+ regVal=sysRegRead(PDMA_GLO_CFG);
|
|
+
|
|
+ sysRegWrite(RX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_rx_ring0));
|
|
+ sysRegWrite(RX_MAX_CNT0, cpu_to_le32((u32) NUM_RX_DESC));
|
|
+ sysRegWrite(RX_CALC_IDX0, cpu_to_le32((u32) (NUM_RX_DESC - 1)));
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ rx_calc_idx0 = sysRegRead(RX_CALC_IDX0);
|
|
+#endif
|
|
+ sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX0);
|
|
+
|
|
+#endif/*kurtis*/
|
|
+ /* Enable randon early drop and set drop threshold automatically */
|
|
+ sysRegWrite(QDMA_FC_THRES, 0x174444);
|
|
+ sysRegWrite(QDMA_HRED2, 0x0);
|
|
+ set_fe_dma_glo_cfg();
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no)
|
|
+{
|
|
+ unsigned int length=skb->len;
|
|
+ END_DEVICE* ei_local = netdev_priv(dev);
|
|
+
|
|
+ struct QDMA_txdesc *cpu_ptr;
|
|
+
|
|
+ struct QDMA_txdesc *dma_ptr __maybe_unused;
|
|
+ struct QDMA_txdesc *free_txd;
|
|
+ unsigned int ctx_offset = 0;
|
|
+ unsigned int dtx_offset = 0;
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+ struct iphdr *iph = NULL;
|
|
+ struct QDMA_txdesc *init_cpu_ptr;
|
|
+ struct tcphdr *th = NULL;
|
|
+ struct skb_frag_struct *frag;
|
|
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
|
|
+ unsigned int len, size, offset, frag_txd_num;
|
|
+ int init_txd_idx, i;
|
|
+#endif // CONFIG_RAETH_TSO //
|
|
+
|
|
+#if defined (CONFIG_RAETH_TSOV6)
|
|
+ struct ipv6hdr *ip6h = NULL;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ PSEUDO_ADAPTER *pAd;
|
|
+#endif
|
|
+ cpu_ptr = (ei_local->tx_cpu_ptr);
|
|
+ ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
|
|
+ cpu_ptr = phys_to_virt(ei_local->tx_cpu_ptr);
|
|
+ dma_ptr = phys_to_virt(ei_local->tx_dma_ptr);
|
|
+/*kurtis test*/
|
|
+ //dma_ptr = (ei_local->tx_dma_ptr);
|
|
+
|
|
+
|
|
+ /*only modify virtual address*/
|
|
+ //cpu_ptr = (ei_local->txd_pool) + (ctx_offset * sizeof(struct QDMA_txdesc));
|
|
+ cpu_ptr = (ei_local->txd_pool + (ctx_offset));
|
|
+
|
|
+ //dtx_offset = GET_TXD_OFFSET(&dma_ptr);
|
|
+ //dma_ptr = (ei_local->txd_pool) + (dtx_offset * sizeof(struct QDMA_txdesc));
|
|
+
|
|
+ //printk("eth_send ctx_offset = 0x%x!!!\n", ctx_offset);
|
|
+ //printk("eth_send dtx_offset = 0x%x!!!\n", dtx_offset);
|
|
+ //printk("eth_send ei_local->txd_pool = 0x%x!!!\n", ei_local->txd_pool);
|
|
+ //printk("eth_send cpu_ptr = 0x%x!!!\n", cpu_ptr);
|
|
+ //printk("eth_send ctx_offset = 0x%x!!!\n", ctx_offset);
|
|
+ //printk("eth_send ei_local->skb_free[ctx_offset] = 0x%x!!!\n", skb);
|
|
+
|
|
+
|
|
+ ei_local->skb_free[ctx_offset] = skb;
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+ init_cpu_ptr = cpu_ptr;
|
|
+ init_txd_idx = ctx_offset;
|
|
+#endif
|
|
+
|
|
+#if !defined (CONFIG_RAETH_TSO)
|
|
+
|
|
+ //2. prepare data
|
|
+ cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
|
|
+ cpu_ptr->txd_info3.SDL = skb->len;
|
|
+
|
|
+ if (gmac_no == 1) {
|
|
+ cpu_ptr->txd_info4.FPORT = 1;
|
|
+ }else {
|
|
+ cpu_ptr->txd_info4.FPORT = 2;
|
|
+ }
|
|
+
|
|
+
|
|
+ cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
|
|
+#if 0
|
|
+ iph = (struct iphdr *)skb_network_header(skb);
|
|
+ if (iph->tos == 0xe0)
|
|
+ cpu_ptr->txd_info3.QID = 3;
|
|
+ else if (iph->tos == 0xa0)
|
|
+ cpu_ptr->txd_info3.QID = 2;
|
|
+ else if (iph->tos == 0x20)
|
|
+ cpu_ptr->txd_info3.QID = 1;
|
|
+ else
|
|
+ cpu_ptr->txd_info3.QID = 0;
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
|
|
+ if (skb->ip_summed == CHECKSUM_PARTIAL){
|
|
+ cpu_ptr->txd_info4.TUI_CO = 7;
|
|
+ }else {
|
|
+ cpu_ptr->txd_info4.TUI_CO = 0;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RAETH_HW_VLAN_TX
|
|
+ if(vlan_tx_tag_present(skb)) {
|
|
+ cpu_ptr->txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
|
|
+ }else {
|
|
+ cpu_ptr->txd_info4.VLAN_TAG = 0;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
|
|
+ if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
|
|
+ if(ra_sw_nat_hook_rx!= NULL){
|
|
+ cpu_ptr->txd_info4.FPORT = 4; /* PPE */
|
|
+ FOE_MAGIC_TAG(skb) = 0;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+#if 0
|
|
+ cpu_ptr->txd_info4.FPORT = 4; /* PPE */
|
|
+ cpu_ptr->txd_info4.UDF = 0x2F;
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_MIPS)
|
|
+ dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
|
|
+#else
|
|
+ dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
|
|
+#endif
|
|
+ cpu_ptr->txd_info3.SWC_bit = 1;
|
|
+
|
|
+ //3. get NULL TXD and decrease free_tx_num by 1.
|
|
+ ctx_offset = get_free_txd(&free_txd);
|
|
+ if(ctx_offset == NUM_TX_DESC) {
|
|
+ printk("get_free_txd fail\n"); // this should not happen. free_txd_num is 2 at least.
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ //4. hook new TXD in the end of queue
|
|
+ //cpu_ptr->txd_info2.NDP = virt_to_phys(free_txd);
|
|
+ cpu_ptr->txd_info2.NDP = (free_txd);
|
|
+
|
|
+
|
|
+ //5. move CPU_PTR to new TXD
|
|
+ //ei_local->tx_cpu_ptr = virt_to_phys(free_txd);
|
|
+ ei_local->tx_cpu_ptr = (free_txd);
|
|
+ cpu_ptr->txd_info3.OWN_bit = 0;
|
|
+ sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
|
|
+
|
|
+#if 0
|
|
+ printk("----------------------------------------------\n");
|
|
+ printk("txd_info1:%08X \n",*(int *)&cpu_ptr->txd_info1);
|
|
+ printk("txd_info2:%08X \n",*(int *)&cpu_ptr->txd_info2);
|
|
+ printk("txd_info3:%08X \n",*(int *)&cpu_ptr->txd_info3);
|
|
+ printk("txd_info4:%08X \n",*(int *)&cpu_ptr->txd_info4);
|
|
+#endif
|
|
+
|
|
+#else //#if !defined (CONFIG_RAETH_TSO)
|
|
+ cpu_ptr->txd_info1.SDP = virt_to_phys(skb->data);
|
|
+ cpu_ptr->txd_info3.SDL = (length - skb->data_len);
|
|
+ cpu_ptr->txd_info3.LS_bit = nr_frags ? 0:1;
|
|
+ if (gmac_no == 1) {
|
|
+ cpu_ptr->txd_info4.FPORT = 1;
|
|
+ }else {
|
|
+ cpu_ptr->txd_info4.FPORT = 2;
|
|
+ }
|
|
+
|
|
+ cpu_ptr->txd_info4.TSO = 0;
|
|
+ cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
|
|
+#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628)
|
|
+ if (skb->ip_summed == CHECKSUM_PARTIAL){
|
|
+ cpu_ptr->txd_info4.TUI_CO = 7;
|
|
+ }else {
|
|
+ cpu_ptr->txd_info4.TUI_CO = 0;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_RAETH_HW_VLAN_TX
|
|
+ if(vlan_tx_tag_present(skb)) {
|
|
+ cpu_ptr->txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb);
|
|
+ }else {
|
|
+ cpu_ptr->txd_info4.VLAN_TAG = 0;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
|
|
+ if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
|
|
+ if(ra_sw_nat_hook_rx!= NULL){
|
|
+ cpu_ptr->txd_info4.FPORT = 4; /* PPE */
|
|
+ FOE_MAGIC_TAG(skb) = 0;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ cpu_ptr->txd_info3.SWC_bit = 1;
|
|
+
|
|
+ ctx_offset = get_free_txd(&free_txd);
|
|
+ if(ctx_offset == NUM_TX_DESC) {
|
|
+ printk("get_free_txd fail\n");
|
|
+ return 0;
|
|
+ }
|
|
+ //cpu_ptr->txd_info2.NDP = virt_to_phys(free_txd);
|
|
+ //ei_local->tx_cpu_ptr = virt_to_phys(free_txd);
|
|
+ cpu_ptr->txd_info2.NDP = free_txd;
|
|
+ ei_local->tx_cpu_ptr = free_txd;
|
|
+
|
|
+ if(nr_frags > 0) {
|
|
+ for(i=0;i<nr_frags;i++) {
|
|
+ // 1. set or get init value for current fragment
|
|
+ offset = 0;
|
|
+ frag = &skb_shinfo(skb)->frags[i];
|
|
+ len = frag->size;
|
|
+ frag_txd_num = cal_frag_txd_num(len); // calculate the needed TXD numbers for this fragment
|
|
+ for(frag_txd_num = frag_txd_num;frag_txd_num > 0; frag_txd_num --){
|
|
+ // 2. size will be assigned to SDL and can't be larger than MAX_TXD_LEN
|
|
+ if(len < MAX_TXD_LEN)
|
|
+ size = len;
|
|
+ else
|
|
+ size = MAX_TXD_LEN;
|
|
+
|
|
+ //3. Update TXD info
|
|
+ cpu_ptr = (ei_local->txd_pool + (ctx_offset));
|
|
+ cpu_ptr->txd_info3.QID = M2Q_table[skb->mark];
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
|
|
+ cpu_ptr->txd_info1.SDP = pci_map_page(NULL, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE);
|
|
+#else
|
|
+ cpu_ptr->txd_info1.SDP = pci_map_page(NULL, frag->page.p, frag->page_offset + offset, size, PCI_DMA_TODEVICE);
|
|
+// printk(" frag->page = %08x. frag->page_offset = %08x. frag->size = % 08x.\n", frag->page, (frag->page_offset+offset), size);
|
|
+#endif
|
|
+ cpu_ptr->txd_info3.SDL = size;
|
|
+ if( (i==(nr_frags-1)) && (frag_txd_num == 1))
|
|
+ cpu_ptr->txd_info3.LS_bit = 1;
|
|
+ else
|
|
+ cpu_ptr->txd_info3.LS_bit = 0;
|
|
+ cpu_ptr->txd_info3.OWN_bit = 0;
|
|
+ cpu_ptr->txd_info3.SWC_bit = 1;
|
|
+ //4. Update skb_free for housekeeping
|
|
+ ei_local->skb_free[ctx_offset] = (cpu_ptr->txd_info3.LS_bit == 1)?skb:(struct sk_buff *)0xFFFFFFFF; //MAGIC ID
|
|
+
|
|
+ //5. Get next TXD
|
|
+ ctx_offset = get_free_txd(&free_txd);
|
|
+ //cpu_ptr->txd_info2.NDP = virt_to_phys(free_txd);
|
|
+ //ei_local->tx_cpu_ptr = virt_to_phys(free_txd);
|
|
+ cpu_ptr->txd_info2.NDP = free_txd;
|
|
+ ei_local->tx_cpu_ptr = free_txd;
|
|
+ //6. Update offset and len.
|
|
+ offset += size;
|
|
+ len -= size;
|
|
+ }
|
|
+ }
|
|
+ ei_local->skb_free[init_txd_idx]= (struct sk_buff *)0xFFFFFFFF; //MAGIC ID
|
|
+ }
|
|
+
|
|
+ if(skb_shinfo(skb)->gso_segs > 1) {
|
|
+
|
|
+// TsoLenUpdate(skb->len);
|
|
+
|
|
+ /* TCP over IPv4 */
|
|
+ iph = (struct iphdr *)skb_network_header(skb);
|
|
+#if defined (CONFIG_RAETH_TSOV6)
|
|
+ /* TCP over IPv6 */
|
|
+ ip6h = (struct ipv6hdr *)skb_network_header(skb);
|
|
+#endif
|
|
+ if((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
|
|
+ th = (struct tcphdr *)skb_transport_header(skb);
|
|
+
|
|
+ init_cpu_ptr->txd_info4.TSO = 1;
|
|
+
|
|
+ th->check = htons(skb_shinfo(skb)->gso_size);
|
|
+#if defined (CONFIG_MIPS)
|
|
+ dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
|
|
+#else
|
|
+ dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
|
|
+#endif
|
|
+ }
|
|
+
|
|
+#if defined (CONFIG_RAETH_TSOV6)
|
|
+ /* TCP over IPv6 */
|
|
+ //ip6h = (struct ipv6hdr *)skb_network_header(skb);
|
|
+ else if ((ip6h->version == 6) && (ip6h->nexthdr == NEXTHDR_TCP)) {
|
|
+ th = (struct tcphdr *)skb_transport_header(skb);
|
|
+#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR
|
|
+ init_cpu_ptr->txd_info4.TSO = 1;
|
|
+#else
|
|
+ init_cpu_ptr->txd_info4.TSO = 1;
|
|
+#endif
|
|
+ th->check = htons(skb_shinfo(skb)->gso_size);
|
|
+#if defined (CONFIG_MIPS)
|
|
+ dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
|
|
+#else
|
|
+ dma_sync_single_for_device(NULL, virt_to_phys(th), sizeof(struct tcphdr), DMA_TO_DEVICE);
|
|
+#endif
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+
|
|
+
|
|
+// dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
|
|
+
|
|
+ init_cpu_ptr->txd_info3.OWN_bit = 0;
|
|
+#endif // CONFIG_RAETH_TSO //
|
|
+
|
|
+ sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr);
|
|
+
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ if (gmac_no == 2) {
|
|
+ if (ei_local->PseudoDev != NULL) {
|
|
+ pAd = netdev_priv(ei_local->PseudoDev);
|
|
+ pAd->stat.tx_packets++;
|
|
+ pAd->stat.tx_bytes += length;
|
|
+ }
|
|
+ } else
|
|
+
|
|
+#endif
|
|
+ {
|
|
+ ei_local->stat.tx_packets++;
|
|
+ ei_local->stat.tx_bytes += skb->len;
|
|
+ }
|
|
+#ifdef CONFIG_RAETH_NAPI
|
|
+ if ( ei_local->tx_full == 1) {
|
|
+ ei_local->tx_full = 0;
|
|
+ netif_wake_queue(dev);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ return length;
|
|
+}
|
|
+
|
|
+int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no)
|
|
+{
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+ unsigned long flags;
|
|
+ unsigned int num_of_txd = 0;
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags, i;
|
|
+ struct skb_frag_struct *frag;
|
|
+#endif
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ PSEUDO_ADAPTER *pAd;
|
|
+#endif
|
|
+
|
|
+#if !defined(CONFIG_RA_NAT_NONE)
|
|
+ if(ra_sw_nat_hook_tx!= NULL)
|
|
+ {
|
|
+// spin_lock_irqsave(&ei_local->page_lock, flags);
|
|
+ if(ra_sw_nat_hook_tx(skb, gmac_no)==1){
|
|
+// spin_unlock_irqrestore(&ei_local->page_lock, flags);
|
|
+ }else{
|
|
+ kfree_skb(skb);
|
|
+// spin_unlock_irqrestore(&ei_local->page_lock, flags);
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RALINK_MT7621) || defined(CONFIG_ARCH_MT7623)
|
|
+#define MIN_PKT_LEN 64
|
|
+ if (skb->len < MIN_PKT_LEN) {
|
|
+ if (skb_padto(skb, MIN_PKT_LEN)) {
|
|
+ printk("raeth: skb_padto failed\n");
|
|
+ return 0;
|
|
+ }
|
|
+ skb_put(skb, MIN_PKT_LEN - skb->len);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+
|
|
+ dev->trans_start = jiffies; /* save the timestamp */
|
|
+ spin_lock_irqsave(&ei_local->page_lock, flags);
|
|
+#if defined (CONFIG_MIPS)
|
|
+ dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE);
|
|
+#else
|
|
+ dma_sync_single_for_device(NULL, virt_to_phys(skb->data), skb->len, DMA_TO_DEVICE);
|
|
+#endif
|
|
+
|
|
+
|
|
+//check free_txd_num before calling rt288_eth_send()
|
|
+
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+ // num_of_txd = (nr_frags==0) ? 1 : (nr_frags + 1);
|
|
+ if(nr_frags != 0){
|
|
+ for(i=0;i<nr_frags;i++) {
|
|
+ frag = &skb_shinfo(skb)->frags[i];
|
|
+ num_of_txd += cal_frag_txd_num(frag->size);
|
|
+ }
|
|
+ }else
|
|
+ num_of_txd = 1;
|
|
+#else
|
|
+ num_of_txd = 1;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_RALINK_MT7621)
|
|
+ if((sysRegRead(0xbe00000c)&0xFFFF) == 0x0101) {
|
|
+ ei_xmit_housekeeping(0);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ ei_xmit_housekeeping(0);
|
|
+
|
|
+ //if ((ei_local->free_txd_num > num_of_txd + 1) && (ei_local->free_txd_num != NUM_TX_DESC))
|
|
+ if ((ei_local->free_txd_num > num_of_txd + 5) && (ei_local->free_txd_num != NUM_TX_DESC))
|
|
+ {
|
|
+ rt2880_eth_send(dev, skb, gmac_no); // need to modify rt2880_eth_send() for QDMA
|
|
+ if (ei_local->free_txd_num < 3)
|
|
+ {
|
|
+#if defined (CONFIG_RAETH_STOP_RX_WHEN_TX_FULL)
|
|
+ netif_stop_queue(dev);
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ netif_stop_queue(ei_local->PseudoDev);
|
|
+#endif
|
|
+ tx_ring_full = 1;
|
|
+#endif
|
|
+ }
|
|
+ } else {
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ if (gmac_no == 2)
|
|
+ {
|
|
+ if (ei_local->PseudoDev != NULL)
|
|
+ {
|
|
+ pAd = netdev_priv(ei_local->PseudoDev);
|
|
+ pAd->stat.tx_dropped++;
|
|
+ }
|
|
+ } else
|
|
+#endif
|
|
+ ei_local->stat.tx_dropped++;
|
|
+ kfree_skb(skb);
|
|
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
|
|
+ return 0;
|
|
+ }
|
|
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void ei_xmit_housekeeping(unsigned long unused)
|
|
+{
|
|
+ struct net_device *dev = dev_raether;
|
|
+ END_DEVICE *ei_local = netdev_priv(dev);
|
|
+#ifndef CONFIG_RAETH_NAPI
|
|
+ unsigned long reg_int_mask=0;
|
|
+#endif
|
|
+ struct QDMA_txdesc *dma_ptr = NULL;
|
|
+ struct QDMA_txdesc *cpu_ptr = NULL;
|
|
+ struct QDMA_txdesc *tmp_ptr = NULL;
|
|
+ unsigned int htx_offset = 0;
|
|
+ unsigned int ctx_offset = 0;
|
|
+ unsigned int dtx_offset = 0;
|
|
+
|
|
+ //dma_ptr = phys_to_virt(sysRegRead(QTX_DRX_PTR));
|
|
+ //cpu_ptr = phys_to_virt(sysRegRead(QTX_CRX_PTR));
|
|
+ //printk("kurtis:housekeeping QTX_DRX_PTR = 0x%x!!!\n", sysRegRead(QTX_DRX_PTR));
|
|
+ //printk("kurtis:housekeeping DMA_PTR = 0x%x!!!\n", dma_ptr);
|
|
+
|
|
+ cpu_ptr = sysRegRead(QTX_CRX_PTR);
|
|
+ dma_ptr = sysRegRead(QTX_DRX_PTR);
|
|
+
|
|
+ //printk("kurtis:housekeeping QTX_CRX_PTR = 0x%x!!!\n", cpu_ptr);
|
|
+ //printk("kurtis:housekeeping QTX_DRX_PTR = 0x%x!!!\n", dma_ptr);
|
|
+ ctx_offset = GET_TXD_OFFSET(&cpu_ptr);
|
|
+ dtx_offset = GET_TXD_OFFSET(&dma_ptr);
|
|
+ htx_offset = ctx_offset;
|
|
+ cpu_ptr = (ei_local->txd_pool + (ctx_offset));
|
|
+ dma_ptr = (ei_local->txd_pool + (dtx_offset));
|
|
+
|
|
+
|
|
+ //printk("kurtis:housekeeping CPU_PTR = 0x%x!!!\n", cpu_ptr);
|
|
+ //printk("kurtis:housekeeping DMA_PTR = 0x%x!!!\n", dma_ptr);
|
|
+
|
|
+/*temp mark*/
|
|
+#if 1
|
|
+
|
|
+
|
|
+ if(cpu_ptr != dma_ptr && (cpu_ptr->txd_info3.OWN_bit == 1)) {
|
|
+ while(cpu_ptr != dma_ptr && (cpu_ptr->txd_info3.OWN_bit == 1)) {
|
|
+
|
|
+ //1. keep cpu next TXD
|
|
+ //tmp_ptr = phys_to_virt(cpu_ptr->txd_info2.NDP);
|
|
+ tmp_ptr = cpu_ptr->txd_info2.NDP;
|
|
+ htx_offset = GET_TXD_OFFSET(&tmp_ptr);
|
|
+ //printk("kurtis:housekeeping cpu_ptr->txd_info2.NDP = 0x%x!!!\n", cpu_ptr->txd_info2.NDP);
|
|
+ //printk("kurtis:housekeeping tmp_ptr = 0x%x!!!\n", tmp_ptr);
|
|
+ //printk("kurtis:housekeeping htx_offset = 0x%x!!!\n", htx_offset);
|
|
+ //2. free skb meomry
|
|
+#if defined (CONFIG_RAETH_TSO)
|
|
+ if(ei_local->skb_free[htx_offset]!=(struct sk_buff *)0xFFFFFFFF) {
|
|
+ dev_kfree_skb_any(ei_local->skb_free[htx_offset]);
|
|
+ }
|
|
+#else
|
|
+ dev_kfree_skb_any(ei_local->skb_free[htx_offset]);
|
|
+#endif
|
|
+
|
|
+ //3. release TXD
|
|
+ //htx_offset = GET_TXD_OFFSET(&cpu_ptr);
|
|
+ //put_free_txd(htx_offset);
|
|
+ put_free_txd(ctx_offset);
|
|
+
|
|
+
|
|
+
|
|
+ netif_wake_queue(dev);
|
|
+#ifdef CONFIG_PSEUDO_SUPPORT
|
|
+ netif_wake_queue(ei_local->PseudoDev);
|
|
+#endif
|
|
+ tx_ring_full=0;
|
|
+
|
|
+ //4. update cpu_ptr to next ptr
|
|
+ //cpu_ptr = tmp_ptr;
|
|
+ cpu_ptr = (ei_local->txd_pool + htx_offset);
|
|
+ ctx_offset = htx_offset;
|
|
+ //cpu_ptr = (cpu_ptr + (htx_offset));
|
|
+ //printk("kurtis:housekeeping 4. update cpu_ptr = 0x%x!!!\n", cpu_ptr);
|
|
+ }
|
|
+ }
|
|
+ //sysRegWrite(QTX_CRX_PTR, virt_to_phys(cpu_ptr));
|
|
+ //sysRegWrite(QTX_CRX_PTR, cpu_ptr);
|
|
+ tmp_ptr = (ei_local->phy_txd_pool + (htx_offset << 4));
|
|
+ //printk("kurtis:housekeeping 5. update QTX_CRX_PTR = 0x%x!!!\n", tmp_ptr);
|
|
+ sysRegWrite(QTX_CRX_PTR, tmp_ptr);
|
|
+
|
|
+#endif
|
|
+
|
|
+#ifndef CONFIG_RAETH_NAPI
|
|
+ reg_int_mask=sysRegRead(QFE_INT_ENABLE);
|
|
+#if defined (DELAY_INT)
|
|
+ sysRegWrite(FE_INT_ENABLE, reg_int_mask| RLS_DLY_INT);
|
|
+#else
|
|
+
|
|
+ sysRegWrite(FE_INT_ENABLE, reg_int_mask | RLS_DONE_INT);
|
|
+#endif
|
|
+#endif //CONFIG_RAETH_NAPI//
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(ei_start_xmit);
|
|
+EXPORT_SYMBOL(ei_xmit_housekeeping);
|
|
+EXPORT_SYMBOL(fe_dma_init);
|
|
+EXPORT_SYMBOL(rt2880_eth_send);
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/smb_hook.c
|
|
@@ -0,0 +1,17 @@
|
|
+#include <linux/version.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/skbuff.h>
|
|
+
|
|
+
|
|
+int (*smb_nf_local_in_hook)(struct sk_buff *skb) = NULL;
|
|
+int (*smb_nf_pre_routing_hook)(struct sk_buff *skb) = NULL;
|
|
+int (*smb_nf_local_out_hook)(struct sk_buff *skb) = NULL;
|
|
+int (*smb_nf_post_routing_hook)(struct sk_buff *skb) = NULL;
|
|
+EXPORT_SYMBOL(smb_nf_local_in_hook);
|
|
+EXPORT_SYMBOL(smb_nf_pre_routing_hook);
|
|
+EXPORT_SYMBOL(smb_nf_local_out_hook);
|
|
+EXPORT_SYMBOL(smb_nf_post_routing_hook);
|
|
+
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/smb_nf.c
|
|
@@ -0,0 +1,177 @@
|
|
+#include <linux/module.h>
|
|
+#include <linux/version.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/types.h>
|
|
+
|
|
+#include <linux/inetdevice.h>
|
|
+#include <linux/tcp.h>
|
|
+#include <linux/ip.h>
|
|
+#include <net/tcp.h>
|
|
+#include <net/ip.h>
|
|
+
|
|
+extern int (*smb_nf_local_in_hook)(struct sk_buff *skb);
|
|
+extern int (*smb_nf_pre_routing_hook)(struct sk_buff *skb);
|
|
+extern int (*smb_nf_local_out_hook)(struct sk_buff *skb);
|
|
+extern int (*smb_nf_post_routing_hook)(struct sk_buff *skb);
|
|
+
|
|
+struct net_device *lan_int = NULL;
|
|
+struct in_ifaddr *lan_ifa = NULL;
|
|
+
|
|
+
|
|
+int mtk_smb_nf_local_in_hook(struct sk_buff *skb)
|
|
+{
|
|
+ struct iphdr *iph = ip_hdr(skb);
|
|
+
|
|
+ if (skb->protocol == htons(ETH_P_IP)) {
|
|
+ struct iphdr *iph = ip_hdr(skb);
|
|
+
|
|
+ if (iph->protocol == IPPROTO_TCP) {
|
|
+ struct tcphdr *th = tcp_hdr(skb);
|
|
+ unsigned short sport, dport;
|
|
+
|
|
+ th = tcp_hdr(skb);
|
|
+ th = (struct tcphdr *)(((unsigned char *)iph) + iph->ihl*4);
|
|
+
|
|
+ if ((iph->daddr == lan_ifa->ifa_local)
|
|
+ && ((th->dest == 0xbd01) || (th->dest == 0x8900)
|
|
+ || (th->dest == 0x8a00) || (th->dest == 0x8b00)))
|
|
+ return 1;
|
|
+ else
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mtk_smb_nf_pre_routing_hook(struct sk_buff *skb)
|
|
+{
|
|
+ struct iphdr *iph = ip_hdr(skb);
|
|
+
|
|
+ if (skb->protocol == htons(ETH_P_IP)) {
|
|
+ struct iphdr *iph = ip_hdr(skb);
|
|
+
|
|
+ if (iph->protocol == IPPROTO_TCP) {
|
|
+ struct tcphdr *th = tcp_hdr(skb);
|
|
+ unsigned short sport, dport;
|
|
+
|
|
+ th = tcp_hdr(skb);
|
|
+ th = (struct tcphdr *)(((unsigned char *)iph) + iph->ihl*4);
|
|
+ if ((iph->daddr == lan_ifa->ifa_local)
|
|
+ && ((th->dest == 0xbd01) || (th->dest == 0x8900)
|
|
+ || (th->dest == 0x8a00) || (th->dest == 0x8b00)))
|
|
+ return 1;
|
|
+ else
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mtk_smb_nf_local_out_hook(struct sk_buff *skb)
|
|
+{
|
|
+ struct iphdr *iph = ip_hdr(skb);
|
|
+
|
|
+ if (iph->protocol == IPPROTO_TCP) {
|
|
+ struct tcphdr *th = tcp_hdr(skb);
|
|
+
|
|
+ th = tcp_hdr(skb);
|
|
+ th = (struct tcphdr *)(((unsigned char *)iph) + iph->ihl*4);
|
|
+
|
|
+ if ((iph->saddr == lan_ifa->ifa_local)
|
|
+ && ((th->source == 0xbd01) || (th->source == 0x8900)
|
|
+ || (th->source == 0x8a00) || (th->source == 0x8b00)))
|
|
+ return 1;
|
|
+ else
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int mtk_smb_nf_post_routing_hook(struct sk_buff *skb)
|
|
+{
|
|
+ struct iphdr *iph = ip_hdr(skb);
|
|
+
|
|
+ if (skb->protocol == htons(ETH_P_IP)) {
|
|
+ struct iphdr *iph = ip_hdr(skb);
|
|
+
|
|
+ if (iph->protocol == IPPROTO_TCP) {
|
|
+ struct tcphdr *th = tcp_hdr(skb);
|
|
+
|
|
+ th = tcp_hdr(skb);
|
|
+ th = (struct tcphdr *)(((unsigned char *)iph) + iph->ihl*4);
|
|
+
|
|
+ if ((iph->saddr == lan_ifa->ifa_local)
|
|
+ && ((th->source == 0xbd01) || (th->source == 0x8900)
|
|
+ || (th->source == 0x8a00) || (th->source == 0x8b00)))
|
|
+ return 1;
|
|
+ else
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int __init mtk_smb_hook_init(void)
|
|
+{
|
|
+ struct in_device *in_dev;
|
|
+ struct in_ifaddr **ifap = NULL;
|
|
+ struct in_ifaddr *ifa = NULL;
|
|
+
|
|
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
|
|
+ lan_int = dev_get_by_name(&init_net, "br0");
|
|
+#else
|
|
+ lan_int = dev_get_by_name("br0");
|
|
+#endif
|
|
+ if (lan_int)
|
|
+ in_dev = __in_dev_get_rtnl(lan_int);
|
|
+ else
|
|
+ return 0;
|
|
+
|
|
+ if (in_dev) {
|
|
+ for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
|
|
+ ifap = &ifa->ifa_next) {
|
|
+ if (!strcmp("br0", ifa->ifa_label))
|
|
+ {
|
|
+ lan_ifa = ifa;
|
|
+ break; /* found */
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ return 0;
|
|
+
|
|
+ if (lan_ifa) {
|
|
+ smb_nf_local_in_hook = mtk_smb_nf_local_in_hook;
|
|
+ smb_nf_pre_routing_hook = mtk_smb_nf_pre_routing_hook;
|
|
+ smb_nf_local_out_hook = mtk_smb_nf_local_out_hook;
|
|
+ smb_nf_post_routing_hook = mtk_smb_nf_post_routing_hook;
|
|
+ }
|
|
+
|
|
+ printk("Samba Netfilter Hook Enabled\n");
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void mtk_smb_hook_cleanup(void)
|
|
+{
|
|
+ lan_int = NULL;
|
|
+ lan_ifa = NULL;
|
|
+ smb_nf_local_in_hook = NULL;
|
|
+ smb_nf_pre_routing_hook = NULL;
|
|
+ smb_nf_local_out_hook = NULL;
|
|
+ smb_nf_post_routing_hook = NULL;
|
|
+
|
|
+ return;
|
|
+}
|
|
+
|
|
+module_init(mtk_smb_hook_init);
|
|
+module_exit(mtk_smb_hook_cleanup);
|
|
+
|
|
+MODULE_LICENSE("GPL");
|
|
--- /dev/null
|
|
+++ b/drivers/net/ethernet/raeth/sync_write.h
|
|
@@ -0,0 +1,103 @@
|
|
+#ifndef _MT_SYNC_WRITE_H
|
|
+#define _MT_SYNC_WRITE_H
|
|
+
|
|
+#if defined(__KERNEL__)
|
|
+
|
|
+#include <linux/io.h>
|
|
+#include <asm/cacheflush.h>
|
|
+//#include <asm/system.h>
|
|
+
|
|
+/*
|
|
+ * Define macros.
|
|
+ */
|
|
+
|
|
+#define mt65xx_reg_sync_writel(v, a) \
|
|
+ do { \
|
|
+ __raw_writel((v), IOMEM((a))); \
|
|
+ dsb(); \
|
|
+ } while (0)
|
|
+
|
|
+#define mt65xx_reg_sync_writew(v, a) \
|
|
+ do { \
|
|
+ __raw_writew((v), IOMEM((a))); \
|
|
+ dsb(); \
|
|
+ } while (0)
|
|
+
|
|
+#define mt65xx_reg_sync_writeb(v, a) \
|
|
+ do { \
|
|
+ __raw_writeb((v), IOMEM((a))); \
|
|
+ dsb(); \
|
|
+ } while (0)
|
|
+
|
|
+#define mt_reg_sync_writel(v, a) \
|
|
+ do { \
|
|
+ __raw_writel((v), IOMEM((a))); \
|
|
+ dsb(); \
|
|
+ } while (0)
|
|
+
|
|
+#define mt_reg_sync_writew(v, a) \
|
|
+ do { \
|
|
+ __raw_writew((v), IOMEM((a))); \
|
|
+ dsb(); \
|
|
+ } while (0)
|
|
+
|
|
+#define mt_reg_sync_writeb(v, a) \
|
|
+ do { \
|
|
+ __raw_writeb((v), IOMEM((a))); \
|
|
+ dsb(); \
|
|
+ } while (0)
|
|
+
|
|
+
|
|
+#else /* __KERNEL__ */
|
|
+
|
|
+#include <sys/types.h>
|
|
+#include <sys/stat.h>
|
|
+#include <fcntl.h>
|
|
+#include <unistd.h>
|
|
+#include <string.h>
|
|
+
|
|
+#define dsb() \
|
|
+ do { \
|
|
+ __asm__ __volatile__ ("dsb" : : : "memory"); \
|
|
+ } while (0)
|
|
+
|
|
+#define mt65xx_reg_sync_writel(v, a) \
|
|
+ do { \
|
|
+ *(volatile unsigned int *)(a) = (v); \
|
|
+ dsb(); \
|
|
+ } while (0)
|
|
+
|
|
+#define mt65xx_reg_sync_writew(v, a) \
|
|
+ do { \
|
|
+ *(volatile unsigned short *)(a) = (v); \
|
|
+ dsb(); \
|
|
+ } while (0)
|
|
+
|
|
+#define mt65xx_reg_sync_writeb(v, a) \
|
|
+ do { \
|
|
+ *(volatile unsigned char *)(a) = (v); \
|
|
+ dsb(); \
|
|
+ } while (0)
|
|
+
|
|
+#define mt_reg_sync_writel(v, a) \
|
|
+ do { \
|
|
+ *(volatile unsigned int *)(a) = (v); \
|
|
+ dsb(); \
|
|
+ } while (0)
|
|
+
|
|
+#define mt_reg_sync_writew(v, a) \
|
|
+ do { \
|
|
+ *(volatile unsigned short *)(a) = (v); \
|
|
+ dsb(); \
|
|
+ } while (0)
|
|
+
|
|
+#define mt_reg_sync_writeb(v, a) \
|
|
+ do { \
|
|
+ *(volatile unsigned char *)(a) = (v); \
|
|
+ dsb(); \
|
|
+ } while (0)
|
|
+
|
|
+
|
|
+#endif /* __KERNEL__ */
|
|
+
|
|
+#endif /* !_MT_SYNC_WRITE_H */
|