mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-23 07:22:33 +00:00
5159d71983
All patches of LSDK 19.03 were ported to Openwrt kernel. We still used an all-in-one patch for each IP/feature for OpenWrt. Below are the changes this patch introduced. - Updated original IP/feature patches to LSDK 19.03. - Added new IP/feature patches for eTSEC/PTP/TMU. - Squashed scattered patches into IP/feature patches. - Updated config-4.14 correspondingly. - Refreshed all patches. More info about LSDK and the kernel: - https://lsdk.github.io/components.html - https://source.codeaurora.org/external/qoriq/qoriq-components/linux Signed-off-by: Biwen Li <biwen.li@nxp.com> Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
11029 lines
298 KiB
Diff
11029 lines
298 KiB
Diff
From 35745905430a4c9827c235d42f3a61bef34043e8 Mon Sep 17 00:00:00 2001
|
|
From: Biwen Li <biwen.li@nxp.com>
|
|
Date: Fri, 19 Apr 2019 13:21:09 +0800
|
|
Subject: [PATCH] pfe-eth: support layerscape
|
|
|
|
This is an integrated patch of pfe-eth for layerscape
|
|
|
|
Signed-off-by: Akhila Kavi <akhila.kavi@nxp.com>
|
|
Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
|
|
Signed-off-by: Anjaneyulu Jagarlmudi <anji.jagarlmudi@nxp.com>
|
|
Signed-off-by: Archana Madhavan <archana.madhavan@nxp.com>
|
|
Signed-off-by: Bhaskar Upadhaya <Bhaskar.Upadhaya@nxp.com>
|
|
Signed-off-by: Biwen Li <biwen.li@nxp.com>
|
|
Signed-off-by: Calvin Johnson <calvin.johnson@nxp.com>
|
|
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
|
|
Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
|
|
Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
|
|
Signed-off-by: Shreyansh Jain <shreyansh.jain@nxp.com>
|
|
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
|
|
---
|
|
.../devicetree/bindings/net/fsl_ppfe/pfe.txt | 199 ++
|
|
drivers/staging/fsl_ppfe/Kconfig | 21 +
|
|
drivers/staging/fsl_ppfe/Makefile | 20 +
|
|
drivers/staging/fsl_ppfe/TODO | 2 +
|
|
drivers/staging/fsl_ppfe/include/pfe/cbus.h | 78 +
|
|
.../staging/fsl_ppfe/include/pfe/cbus/bmu.h | 55 +
|
|
.../fsl_ppfe/include/pfe/cbus/class_csr.h | 289 ++
|
|
.../fsl_ppfe/include/pfe/cbus/emac_mtip.h | 242 ++
|
|
.../staging/fsl_ppfe/include/pfe/cbus/gpi.h | 86 +
|
|
.../staging/fsl_ppfe/include/pfe/cbus/hif.h | 100 +
|
|
.../fsl_ppfe/include/pfe/cbus/hif_nocpy.h | 50 +
|
|
.../fsl_ppfe/include/pfe/cbus/tmu_csr.h | 168 ++
|
|
.../fsl_ppfe/include/pfe/cbus/util_csr.h | 61 +
|
|
drivers/staging/fsl_ppfe/include/pfe/pfe.h | 372 +++
|
|
drivers/staging/fsl_ppfe/pfe_cdev.c | 258 ++
|
|
drivers/staging/fsl_ppfe/pfe_cdev.h | 41 +
|
|
drivers/staging/fsl_ppfe/pfe_ctrl.c | 226 ++
|
|
drivers/staging/fsl_ppfe/pfe_ctrl.h | 100 +
|
|
drivers/staging/fsl_ppfe/pfe_debugfs.c | 99 +
|
|
drivers/staging/fsl_ppfe/pfe_debugfs.h | 13 +
|
|
drivers/staging/fsl_ppfe/pfe_eth.c | 2554 +++++++++++++++++
|
|
drivers/staging/fsl_ppfe/pfe_eth.h | 175 ++
|
|
drivers/staging/fsl_ppfe/pfe_firmware.c | 302 ++
|
|
drivers/staging/fsl_ppfe/pfe_firmware.h | 20 +
|
|
drivers/staging/fsl_ppfe/pfe_hal.c | 1516 ++++++++++
|
|
drivers/staging/fsl_ppfe/pfe_hif.c | 1060 +++++++
|
|
drivers/staging/fsl_ppfe/pfe_hif.h | 200 ++
|
|
drivers/staging/fsl_ppfe/pfe_hif_lib.c | 628 ++++
|
|
drivers/staging/fsl_ppfe/pfe_hif_lib.h | 229 ++
|
|
drivers/staging/fsl_ppfe/pfe_hw.c | 164 ++
|
|
drivers/staging/fsl_ppfe/pfe_hw.h | 15 +
|
|
.../staging/fsl_ppfe/pfe_ls1012a_platform.c | 368 +++
|
|
drivers/staging/fsl_ppfe/pfe_mod.c | 158 +
|
|
drivers/staging/fsl_ppfe/pfe_mod.h | 103 +
|
|
drivers/staging/fsl_ppfe/pfe_perfmon.h | 26 +
|
|
drivers/staging/fsl_ppfe/pfe_sysfs.c | 806 ++++++
|
|
drivers/staging/fsl_ppfe/pfe_sysfs.h | 17 +
|
|
37 files changed, 10821 insertions(+)
|
|
create mode 100644 Documentation/devicetree/bindings/net/fsl_ppfe/pfe.txt
|
|
create mode 100644 drivers/staging/fsl_ppfe/Kconfig
|
|
create mode 100644 drivers/staging/fsl_ppfe/Makefile
|
|
create mode 100644 drivers/staging/fsl_ppfe/TODO
|
|
create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/include/pfe/pfe.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_cdev.c
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_cdev.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.c
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.c
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.c
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.c
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_hal.c
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.c
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.c
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.c
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.c
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_perfmon.h
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.c
|
|
create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.h
|
|
|
|
--- /dev/null
|
|
+++ b/Documentation/devicetree/bindings/net/fsl_ppfe/pfe.txt
|
|
@@ -0,0 +1,199 @@
|
|
+=============================================================================
|
|
+NXP Programmable Packet Forwarding Engine Device Bindings
|
|
+
|
|
+CONTENTS
|
|
+ - PFE Node
|
|
+ - Ethernet Node
|
|
+
|
|
+=============================================================================
|
|
+PFE Node
|
|
+
|
|
+DESCRIPTION
|
|
+
|
|
+PFE Node has all the properties associated with Packet Forwarding Engine block.
|
|
+
|
|
+PROPERTIES
|
|
+
|
|
+- compatible
|
|
+ Usage: required
|
|
+ Value type: <stringlist>
|
|
+ Definition: Must include "fsl,pfe"
|
|
+
|
|
+- reg
|
|
+ Usage: required
|
|
+ Value type: <prop-encoded-array>
|
|
+ Definition: A standard property.
|
|
+ Specifies the offset of the following registers:
|
|
+ - PFE configuration registers
|
|
+ - DDR memory used by PFE
|
|
+
|
|
+- fsl,pfe-num-interfaces
|
|
+ Usage: required
|
|
+ Value type: <u32>
|
|
+ Definition: Must be present. Value can be either one or two.
|
|
+
|
|
+- interrupts
|
|
+ Usage: required
|
|
+ Value type: <prop-encoded-array>
|
|
+ Definition: Three interrupts are specified in this property.
|
|
+ - HIF interrupt
|
|
+ - HIF NO COPY interrupt
|
|
+ - Wake On LAN interrupt
|
|
+
|
|
+- interrupt-names
|
|
+ Usage: required
|
|
+ Value type: <stringlist>
|
|
+ Definition: Following strings are defined for the 3 interrupts.
|
|
+ "pfe_hif" - HIF interrupt
|
|
+ "pfe_hif_nocpy" - HIF NO COPY interrupt
|
|
+ "pfe_wol" - Wake On LAN interrupt
|
|
+
|
|
+- memory-region
|
|
+ Usage: required
|
|
+ Value type: <phandle>
|
|
+ Definition: phandle to a node describing reserved memory used by pfe.
|
|
+ Refer:- Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
|
|
+
|
|
+- fsl,pfe-scfg
|
|
+ Usage: required
|
|
+ Value type: <phandle>
|
|
+ Definition: phandle for scfg.
|
|
+
|
|
+- fsl,rcpm-wakeup
|
|
+ Usage: required
|
|
+ Value type: <phandle>
|
|
+ Definition: phandle for rcpm.
|
|
+
|
|
+- clocks
|
|
+ Usage: required
|
|
+ Value type: <phandle>
|
|
+ Definition: phandle for clockgen.
|
|
+
|
|
+- clock-names
|
|
+ Usage: required
|
|
+ Value type: <string>
|
|
+ Definition: phandle for clock name.
|
|
+
|
|
+EXAMPLE
|
|
+
|
|
+pfe: pfe@04000000 {
|
|
+ compatible = "fsl,pfe";
|
|
+ reg = <0x0 0x04000000 0x0 0xc00000>, /* AXI 16M */
|
|
+ <0x0 0x83400000 0x0 0xc00000>; /* PFE DDR 12M */
|
|
+ reg-names = "pfe", "pfe-ddr";
|
|
+ fsl,pfe-num-interfaces = <0x2>;
|
|
+ interrupts = <0 172 0x4>, /* HIF interrupt */
|
|
+ <0 173 0x4>, /*HIF_NOCPY interrupt */
|
|
+ <0 174 0x4>; /* WoL interrupt */
|
|
+ interrupt-names = "pfe_hif", "pfe_hif_nocpy", "pfe_wol";
|
|
+ memory-region = <&pfe_reserved>;
|
|
+ fsl,pfe-scfg = <&scfg 0>;
|
|
+ fsl,rcpm-wakeup = <&rcpm 0xf0000020>;
|
|
+ clocks = <&clockgen 4 0>;
|
|
+ clock-names = "pfe";
|
|
+
|
|
+ status = "okay";
|
|
+ pfe_mac0: ethernet@0 {
|
|
+ };
|
|
+
|
|
+ pfe_mac1: ethernet@1 {
|
|
+ };
|
|
+};
|
|
+
|
|
+=============================================================================
|
|
+Ethernet Node
|
|
+
|
|
+DESCRIPTION
|
|
+
|
|
+Ethernet Node has all the properties associated with PFE used by platforms to
|
|
+connect to PHY:
|
|
+
|
|
+PROPERTIES
|
|
+
|
|
+- compatible
|
|
+ Usage: required
|
|
+ Value type: <stringlist>
|
|
+ Definition: Must include "fsl,pfe-gemac-port"
|
|
+
|
|
+- reg
|
|
+ Usage: required
|
|
+ Value type: <prop-encoded-array>
|
|
+ Definition: A standard property.
|
|
+ Specifies the gemacid of the interface.
|
|
+
|
|
+- fsl,gemac-bus-id
|
|
+ Usage: required
|
|
+ Value type: <u32>
|
|
+ Definition: Must be present. Value should be the id of the bus
|
|
+ connected to gemac.
|
|
+
|
|
+- fsl,gemac-phy-id (deprecated binding)
|
|
+ Usage: required
|
|
+ Value type: <u32>
|
|
+ Definition: This binding shouldn't be used with new platforms.
|
|
+ Must be present. Value should be the id of the phy
|
|
+ connected to gemac.
|
|
+
|
|
+- fsl,mdio-mux-val
|
|
+ Usage: required
|
|
+ Value type: <u32>
|
|
+ Definition: Must be present. Value can be either 0 or 2 or 3.
|
|
+ This value is used to configure the mux to enable mdio.
|
|
+
|
|
+- phy-mode
|
|
+ Usage: required
|
|
+ Value type: <string>
|
|
+ Definition: Must include "sgmii"
|
|
+
|
|
+- fsl,pfe-phy-if-flags (deprecated binding)
|
|
+ Usage: required
|
|
+ Value type: <u32>
|
|
+ Definition: This binding shouldn't be used with new platforms.
|
|
+ Must be present. Value should be 0 by default.
|
|
+ If there is not phy connected, this need to be 1.
|
|
+
|
|
+- phy-handle
|
|
+ Usage: optional
|
|
+ Value type: <phandle>
|
|
+ Definition: phandle to the PHY device connected to this device.
|
|
+
|
|
+- mdio : A required subnode which specifies the mdio bus in the PFE and used as
|
|
+a container for phy nodes according to ../phy.txt.
|
|
+
|
|
+EXAMPLE
|
|
+
|
|
+ethernet@0 {
|
|
+ compatible = "fsl,pfe-gemac-port";
|
|
+ #address-cells = <1>;
|
|
+ #size-cells = <0>;
|
|
+ reg = <0x0>; /* GEM_ID */
|
|
+ fsl,gemac-bus-id = <0x0>; /* BUS_ID */
|
|
+ fsl,mdio-mux-val = <0x0>;
|
|
+ phy-mode = "sgmii";
|
|
+ phy-handle = <&sgmii_phy1>;
|
|
+};
|
|
+
|
|
+
|
|
+ethernet@1 {
|
|
+ compatible = "fsl,pfe-gemac-port";
|
|
+ #address-cells = <1>;
|
|
+ #size-cells = <0>;
|
|
+ reg = <0x1>; /* GEM_ID */
|
|
+ fsl,gemac-bus-id = <0x1>; /* BUS_ID */
|
|
+ fsl,mdio-mux-val = <0x0>;
|
|
+ phy-mode = "sgmii";
|
|
+ phy-handle = <&sgmii_phy2>;
|
|
+};
|
|
+
|
|
+mdio@0 {
|
|
+ #address-cells = <1>;
|
|
+ #size-cells = <0>;
|
|
+
|
|
+ sgmii_phy1: ethernet-phy@2 {
|
|
+ reg = <0x2>;
|
|
+ };
|
|
+
|
|
+ sgmii_phy2: ethernet-phy@1 {
|
|
+ reg = <0x1>;
|
|
+ };
|
|
+};
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/Kconfig
|
|
@@ -0,0 +1,21 @@
|
|
+#
|
|
+# Freescale Programmable Packet Forwarding Engine driver
|
|
+#
|
|
+config FSL_PPFE
|
|
+ bool "Freescale PPFE Driver"
|
|
+ select FSL_GUTS
|
|
+ default n
|
|
+ ---help---
|
|
+ Freescale LS1012A SoC has a Programmable Packet Forwarding Engine.
|
|
+ It provides two high performance ethernet interfaces.
|
|
+ This driver initializes, programs and controls the PPFE.
|
|
+ Use this driver to enable network connectivity on LS1012A platforms.
|
|
+
|
|
+if FSL_PPFE
|
|
+
|
|
+config FSL_PPFE_UTIL_DISABLED
|
|
+ bool "Disable PPFE UTIL Processor Engine"
|
|
+ ---help---
|
|
+ UTIL PE has to be enabled only if required.
|
|
+
|
|
+endif # FSL_PPFE
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/Makefile
|
|
@@ -0,0 +1,20 @@
|
|
+#
|
|
+# Makefile for Freesecale PPFE driver
|
|
+#
|
|
+
|
|
+ccflags-y += -I$(src)/include -I$(src)
|
|
+
|
|
+obj-m += pfe.o
|
|
+
|
|
+pfe-y += pfe_mod.o \
|
|
+ pfe_hw.o \
|
|
+ pfe_firmware.o \
|
|
+ pfe_ctrl.o \
|
|
+ pfe_hif.o \
|
|
+ pfe_hif_lib.o\
|
|
+ pfe_eth.o \
|
|
+ pfe_sysfs.o \
|
|
+ pfe_debugfs.o \
|
|
+ pfe_ls1012a_platform.o \
|
|
+ pfe_hal.o \
|
|
+ pfe_cdev.o
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/TODO
|
|
@@ -0,0 +1,2 @@
|
|
+TODO:
|
|
+ - provide pfe pe monitoring support
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus.h
|
|
@@ -0,0 +1,78 @@
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#ifndef _CBUS_H_
|
|
+#define _CBUS_H_
|
|
+
|
|
+#define EMAC1_BASE_ADDR (CBUS_BASE_ADDR + 0x200000)
|
|
+#define EGPI1_BASE_ADDR (CBUS_BASE_ADDR + 0x210000)
|
|
+#define EMAC2_BASE_ADDR (CBUS_BASE_ADDR + 0x220000)
|
|
+#define EGPI2_BASE_ADDR (CBUS_BASE_ADDR + 0x230000)
|
|
+#define BMU1_BASE_ADDR (CBUS_BASE_ADDR + 0x240000)
|
|
+#define BMU2_BASE_ADDR (CBUS_BASE_ADDR + 0x250000)
|
|
+#define ARB_BASE_ADDR (CBUS_BASE_ADDR + 0x260000)
|
|
+#define DDR_CONFIG_BASE_ADDR (CBUS_BASE_ADDR + 0x270000)
|
|
+#define HIF_BASE_ADDR (CBUS_BASE_ADDR + 0x280000)
|
|
+#define HGPI_BASE_ADDR (CBUS_BASE_ADDR + 0x290000)
|
|
+#define LMEM_BASE_ADDR (CBUS_BASE_ADDR + 0x300000)
|
|
+#define LMEM_SIZE 0x10000
|
|
+#define LMEM_END (LMEM_BASE_ADDR + LMEM_SIZE)
|
|
+#define TMU_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x310000)
|
|
+#define CLASS_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x320000)
|
|
+#define HIF_NOCPY_BASE_ADDR (CBUS_BASE_ADDR + 0x350000)
|
|
+#define UTIL_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x360000)
|
|
+#define CBUS_GPT_BASE_ADDR (CBUS_BASE_ADDR + 0x370000)
|
|
+
|
|
+/*
|
|
+ * defgroup XXX_MEM_ACCESS_ADDR PE memory access through CSR
|
|
+ * XXX_MEM_ACCESS_ADDR register bit definitions.
|
|
+ */
|
|
+#define PE_MEM_ACCESS_WRITE BIT(31) /* Internal Memory Write. */
|
|
+#define PE_MEM_ACCESS_IMEM BIT(15)
|
|
+#define PE_MEM_ACCESS_DMEM BIT(16)
|
|
+
|
|
+/* Byte Enables of the Internal memory access. These are interpred in BE */
|
|
+#define PE_MEM_ACCESS_BYTE_ENABLE(offset, size) \
|
|
+ ({ typeof(size) size_ = (size); \
|
|
+ (((BIT(size_) - 1) << (4 - (offset) - (size_))) & 0xf) << 24; })
|
|
+
|
|
+#include "cbus/emac_mtip.h"
|
|
+#include "cbus/gpi.h"
|
|
+#include "cbus/bmu.h"
|
|
+#include "cbus/hif.h"
|
|
+#include "cbus/tmu_csr.h"
|
|
+#include "cbus/class_csr.h"
|
|
+#include "cbus/hif_nocpy.h"
|
|
+#include "cbus/util_csr.h"
|
|
+
|
|
+/* PFE cores states */
|
|
+#define CORE_DISABLE 0x00000000
|
|
+#define CORE_ENABLE 0x00000001
|
|
+#define CORE_SW_RESET 0x00000002
|
|
+
|
|
+/* LMEM defines */
|
|
+#define LMEM_HDR_SIZE 0x0010
|
|
+#define LMEM_BUF_SIZE_LN2 0x7
|
|
+#define LMEM_BUF_SIZE BIT(LMEM_BUF_SIZE_LN2)
|
|
+
|
|
+/* DDR defines */
|
|
+#define DDR_HDR_SIZE 0x0100
|
|
+#define DDR_BUF_SIZE_LN2 0xb
|
|
+#define DDR_BUF_SIZE BIT(DDR_BUF_SIZE_LN2)
|
|
+
|
|
+#endif /* _CBUS_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
|
|
@@ -0,0 +1,55 @@
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#ifndef _BMU_H_
|
|
+#define _BMU_H_
|
|
+
|
|
+#define BMU_VERSION 0x000
|
|
+#define BMU_CTRL 0x004
|
|
+#define BMU_UCAST_CONFIG 0x008
|
|
+#define BMU_UCAST_BASE_ADDR 0x00c
|
|
+#define BMU_BUF_SIZE 0x010
|
|
+#define BMU_BUF_CNT 0x014
|
|
+#define BMU_THRES 0x018
|
|
+#define BMU_INT_SRC 0x020
|
|
+#define BMU_INT_ENABLE 0x024
|
|
+#define BMU_ALLOC_CTRL 0x030
|
|
+#define BMU_FREE_CTRL 0x034
|
|
+#define BMU_FREE_ERR_ADDR 0x038
|
|
+#define BMU_CURR_BUF_CNT 0x03c
|
|
+#define BMU_MCAST_CNT 0x040
|
|
+#define BMU_MCAST_ALLOC_CTRL 0x044
|
|
+#define BMU_REM_BUF_CNT 0x048
|
|
+#define BMU_LOW_WATERMARK 0x050
|
|
+#define BMU_HIGH_WATERMARK 0x054
|
|
+#define BMU_INT_MEM_ACCESS 0x100
|
|
+
|
|
+struct BMU_CFG {
|
|
+ unsigned long baseaddr;
|
|
+ u32 count;
|
|
+ u32 size;
|
|
+ u32 low_watermark;
|
|
+ u32 high_watermark;
|
|
+};
|
|
+
|
|
+#define BMU1_BUF_SIZE LMEM_BUF_SIZE_LN2
|
|
+#define BMU2_BUF_SIZE DDR_BUF_SIZE_LN2
|
|
+
|
|
+#define BMU2_MCAST_ALLOC_CTRL (BMU2_BASE_ADDR + BMU_MCAST_ALLOC_CTRL)
|
|
+
|
|
+#endif /* _BMU_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
|
|
@@ -0,0 +1,289 @@
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#ifndef _CLASS_CSR_H_
|
|
+#define _CLASS_CSR_H_
|
|
+
|
|
+/* @file class_csr.h.
|
|
+ * class_csr - block containing all the classifier control and status register.
|
|
+ * Mapped on CBUS and accessible from all PE's and ARM.
|
|
+ */
|
|
+#define CLASS_VERSION (CLASS_CSR_BASE_ADDR + 0x000)
|
|
+#define CLASS_TX_CTRL (CLASS_CSR_BASE_ADDR + 0x004)
|
|
+#define CLASS_INQ_PKTPTR (CLASS_CSR_BASE_ADDR + 0x010)
|
|
+
|
|
+/* (ddr_hdr_size[24:16], lmem_hdr_size[5:0]) */
|
|
+#define CLASS_HDR_SIZE (CLASS_CSR_BASE_ADDR + 0x014)
|
|
+
|
|
+/* LMEM header size for the Classifier block.\ Data in the LMEM
|
|
+ * is written from this offset.
|
|
+ */
|
|
+#define CLASS_HDR_SIZE_LMEM(off) ((off) & 0x3f)
|
|
+
|
|
+/* DDR header size for the Classifier block.\ Data in the DDR
|
|
+ * is written from this offset.
|
|
+ */
|
|
+#define CLASS_HDR_SIZE_DDR(off) (((off) & 0x1ff) << 16)
|
|
+
|
|
+#define CLASS_PE0_QB_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x020)
|
|
+
|
|
+/* DMEM address of first [15:0] and second [31:16] buffers on QB side. */
|
|
+#define CLASS_PE0_QB_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x024)
|
|
+
|
|
+/* DMEM address of third [15:0] and fourth [31:16] buffers on QB side. */
|
|
+#define CLASS_PE0_RO_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x060)
|
|
+
|
|
+/* DMEM address of first [15:0] and second [31:16] buffers on RO side. */
|
|
+#define CLASS_PE0_RO_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x064)
|
|
+
|
|
+/* DMEM address of third [15:0] and fourth [31:16] buffers on RO side. */
|
|
+
|
|
+/* @name Class PE memory access. Allows external PE's and HOST to
|
|
+ * read/write PMEM/DMEM memory ranges for each classifier PE.
|
|
+ */
|
|
+/* {sr_pe_mem_cmd[31], csr_pe_mem_wren[27:24], csr_pe_mem_addr[23:0]},
|
|
+ * See \ref XXX_MEM_ACCESS_ADDR for details.
|
|
+ */
|
|
+#define CLASS_MEM_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x100)
|
|
+
|
|
+/* Internal Memory Access Write Data [31:0] */
|
|
+#define CLASS_MEM_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x104)
|
|
+
|
|
+/* Internal Memory Access Read Data [31:0] */
|
|
+#define CLASS_MEM_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x108)
|
|
+#define CLASS_TM_INQ_ADDR (CLASS_CSR_BASE_ADDR + 0x114)
|
|
+#define CLASS_PE_STATUS (CLASS_CSR_BASE_ADDR + 0x118)
|
|
+
|
|
+#define CLASS_PHY1_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x11c)
|
|
+#define CLASS_PHY1_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x120)
|
|
+#define CLASS_PHY1_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x124)
|
|
+#define CLASS_PHY1_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x128)
|
|
+#define CLASS_PHY1_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x12c)
|
|
+#define CLASS_PHY1_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x130)
|
|
+#define CLASS_PHY1_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x134)
|
|
+#define CLASS_PHY1_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x138)
|
|
+#define CLASS_PHY1_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x13c)
|
|
+#define CLASS_PHY1_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x140)
|
|
+#define CLASS_PHY2_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x144)
|
|
+#define CLASS_PHY2_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x148)
|
|
+#define CLASS_PHY2_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x14c)
|
|
+#define CLASS_PHY2_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x150)
|
|
+#define CLASS_PHY2_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x154)
|
|
+#define CLASS_PHY2_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x158)
|
|
+#define CLASS_PHY2_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x15c)
|
|
+#define CLASS_PHY2_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x160)
|
|
+#define CLASS_PHY2_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x164)
|
|
+#define CLASS_PHY2_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x168)
|
|
+#define CLASS_PHY3_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x16c)
|
|
+#define CLASS_PHY3_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x170)
|
|
+#define CLASS_PHY3_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x174)
|
|
+#define CLASS_PHY3_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x178)
|
|
+#define CLASS_PHY3_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x17c)
|
|
+#define CLASS_PHY3_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x180)
|
|
+#define CLASS_PHY3_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x184)
|
|
+#define CLASS_PHY3_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x188)
|
|
+#define CLASS_PHY3_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x18c)
|
|
+#define CLASS_PHY3_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x190)
|
|
+#define CLASS_PHY1_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x194)
|
|
+#define CLASS_PHY1_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x198)
|
|
+#define CLASS_PHY1_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x19c)
|
|
+#define CLASS_PHY1_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a0)
|
|
+#define CLASS_PHY2_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a4)
|
|
+#define CLASS_PHY2_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a8)
|
|
+#define CLASS_PHY2_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1ac)
|
|
+#define CLASS_PHY2_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b0)
|
|
+#define CLASS_PHY3_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b4)
|
|
+#define CLASS_PHY3_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b8)
|
|
+#define CLASS_PHY3_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1bc)
|
|
+#define CLASS_PHY3_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c0)
|
|
+#define CLASS_PHY4_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c4)
|
|
+#define CLASS_PHY4_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c8)
|
|
+#define CLASS_PHY4_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1cc)
|
|
+#define CLASS_PHY4_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1d0)
|
|
+#define CLASS_PHY4_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d4)
|
|
+#define CLASS_PHY4_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d8)
|
|
+#define CLASS_PHY4_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1dc)
|
|
+#define CLASS_PHY4_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e0)
|
|
+#define CLASS_PHY4_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x1e4)
|
|
+#define CLASS_PHY4_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e8)
|
|
+#define CLASS_PHY4_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x1ec)
|
|
+#define CLASS_PHY4_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x1f0)
|
|
+#define CLASS_PHY4_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f4)
|
|
+#define CLASS_PHY4_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f8)
|
|
+
|
|
+#define CLASS_PE_SYS_CLK_RATIO (CLASS_CSR_BASE_ADDR + 0x200)
|
|
+#define CLASS_AFULL_THRES (CLASS_CSR_BASE_ADDR + 0x204)
|
|
+#define CLASS_GAP_BETWEEN_READS (CLASS_CSR_BASE_ADDR + 0x208)
|
|
+#define CLASS_MAX_BUF_CNT (CLASS_CSR_BASE_ADDR + 0x20c)
|
|
+#define CLASS_TSQ_FIFO_THRES (CLASS_CSR_BASE_ADDR + 0x210)
|
|
+#define CLASS_TSQ_MAX_CNT (CLASS_CSR_BASE_ADDR + 0x214)
|
|
+#define CLASS_IRAM_DATA_0 (CLASS_CSR_BASE_ADDR + 0x218)
|
|
+#define CLASS_IRAM_DATA_1 (CLASS_CSR_BASE_ADDR + 0x21c)
|
|
+#define CLASS_IRAM_DATA_2 (CLASS_CSR_BASE_ADDR + 0x220)
|
|
+#define CLASS_IRAM_DATA_3 (CLASS_CSR_BASE_ADDR + 0x224)
|
|
+
|
|
+#define CLASS_BUS_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x228)
|
|
+
|
|
+#define CLASS_BUS_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x22c)
|
|
+#define CLASS_BUS_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x230)
|
|
+
|
|
+/* (route_entry_size[9:0], route_hash_size[23:16]
|
|
+ * (this is actually ln2(size)))
|
|
+ */
|
|
+#define CLASS_ROUTE_HASH_ENTRY_SIZE (CLASS_CSR_BASE_ADDR + 0x234)
|
|
+
|
|
+#define CLASS_ROUTE_ENTRY_SIZE(size) ((size) & 0x1ff)
|
|
+#define CLASS_ROUTE_HASH_SIZE(hash_bits) (((hash_bits) & 0xff) << 16)
|
|
+
|
|
+#define CLASS_ROUTE_TABLE_BASE (CLASS_CSR_BASE_ADDR + 0x238)
|
|
+
|
|
+#define CLASS_ROUTE_MULTI (CLASS_CSR_BASE_ADDR + 0x23c)
|
|
+#define CLASS_SMEM_OFFSET (CLASS_CSR_BASE_ADDR + 0x240)
|
|
+#define CLASS_LMEM_BUF_SIZE (CLASS_CSR_BASE_ADDR + 0x244)
|
|
+#define CLASS_VLAN_ID (CLASS_CSR_BASE_ADDR + 0x248)
|
|
+#define CLASS_BMU1_BUF_FREE (CLASS_CSR_BASE_ADDR + 0x24c)
|
|
+#define CLASS_USE_TMU_INQ (CLASS_CSR_BASE_ADDR + 0x250)
|
|
+#define CLASS_VLAN_ID1 (CLASS_CSR_BASE_ADDR + 0x254)
|
|
+
|
|
+#define CLASS_BUS_ACCESS_BASE (CLASS_CSR_BASE_ADDR + 0x258)
|
|
+#define CLASS_BUS_ACCESS_BASE_MASK (0xFF000000)
|
|
+/* bit 31:24 of PE peripheral address are stored in CLASS_BUS_ACCESS_BASE */
|
|
+
|
|
+#define CLASS_HIF_PARSE (CLASS_CSR_BASE_ADDR + 0x25c)
|
|
+
|
|
+#define CLASS_HOST_PE0_GP (CLASS_CSR_BASE_ADDR + 0x260)
|
|
+#define CLASS_PE0_GP (CLASS_CSR_BASE_ADDR + 0x264)
|
|
+#define CLASS_HOST_PE1_GP (CLASS_CSR_BASE_ADDR + 0x268)
|
|
+#define CLASS_PE1_GP (CLASS_CSR_BASE_ADDR + 0x26c)
|
|
+#define CLASS_HOST_PE2_GP (CLASS_CSR_BASE_ADDR + 0x270)
|
|
+#define CLASS_PE2_GP (CLASS_CSR_BASE_ADDR + 0x274)
|
|
+#define CLASS_HOST_PE3_GP (CLASS_CSR_BASE_ADDR + 0x278)
|
|
+#define CLASS_PE3_GP (CLASS_CSR_BASE_ADDR + 0x27c)
|
|
+#define CLASS_HOST_PE4_GP (CLASS_CSR_BASE_ADDR + 0x280)
|
|
+#define CLASS_PE4_GP (CLASS_CSR_BASE_ADDR + 0x284)
|
|
+#define CLASS_HOST_PE5_GP (CLASS_CSR_BASE_ADDR + 0x288)
|
|
+#define CLASS_PE5_GP (CLASS_CSR_BASE_ADDR + 0x28c)
|
|
+
|
|
+#define CLASS_PE_INT_SRC (CLASS_CSR_BASE_ADDR + 0x290)
|
|
+#define CLASS_PE_INT_ENABLE (CLASS_CSR_BASE_ADDR + 0x294)
|
|
+
|
|
+#define CLASS_TPID0_TPID1 (CLASS_CSR_BASE_ADDR + 0x298)
|
|
+#define CLASS_TPID2 (CLASS_CSR_BASE_ADDR + 0x29c)
|
|
+
|
|
+#define CLASS_L4_CHKSUM_ADDR (CLASS_CSR_BASE_ADDR + 0x2a0)
|
|
+
|
|
+#define CLASS_PE0_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a4)
|
|
+#define CLASS_PE1_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a8)
|
|
+#define CLASS_PE2_DEBUG (CLASS_CSR_BASE_ADDR + 0x2ac)
|
|
+#define CLASS_PE3_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b0)
|
|
+#define CLASS_PE4_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b4)
|
|
+#define CLASS_PE5_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b8)
|
|
+
|
|
+#define CLASS_STATE (CLASS_CSR_BASE_ADDR + 0x2bc)
|
|
+
|
|
+/* CLASS defines */
|
|
+#define CLASS_PBUF_SIZE 0x100 /* Fixed by hardware */
|
|
+#define CLASS_PBUF_HEADER_OFFSET 0x80 /* Can be configured */
|
|
+
|
|
+/* Can be configured */
|
|
+#define CLASS_PBUF0_BASE_ADDR 0x000
|
|
+/* Can be configured */
|
|
+#define CLASS_PBUF1_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + CLASS_PBUF_SIZE)
|
|
+/* Can be configured */
|
|
+#define CLASS_PBUF2_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + CLASS_PBUF_SIZE)
|
|
+/* Can be configured */
|
|
+#define CLASS_PBUF3_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + CLASS_PBUF_SIZE)
|
|
+
|
|
+#define CLASS_PBUF0_HEADER_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + \
|
|
+ CLASS_PBUF_HEADER_OFFSET)
|
|
+#define CLASS_PBUF1_HEADER_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + \
|
|
+ CLASS_PBUF_HEADER_OFFSET)
|
|
+#define CLASS_PBUF2_HEADER_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + \
|
|
+ CLASS_PBUF_HEADER_OFFSET)
|
|
+#define CLASS_PBUF3_HEADER_BASE_ADDR (CLASS_PBUF3_BASE_ADDR + \
|
|
+ CLASS_PBUF_HEADER_OFFSET)
|
|
+
|
|
+#define CLASS_PE0_RO_DM_ADDR0_VAL ((CLASS_PBUF1_BASE_ADDR << 16) | \
|
|
+ CLASS_PBUF0_BASE_ADDR)
|
|
+#define CLASS_PE0_RO_DM_ADDR1_VAL ((CLASS_PBUF3_BASE_ADDR << 16) | \
|
|
+ CLASS_PBUF2_BASE_ADDR)
|
|
+
|
|
+#define CLASS_PE0_QB_DM_ADDR0_VAL ((CLASS_PBUF1_HEADER_BASE_ADDR << 16) |\
|
|
+ CLASS_PBUF0_HEADER_BASE_ADDR)
|
|
+#define CLASS_PE0_QB_DM_ADDR1_VAL ((CLASS_PBUF3_HEADER_BASE_ADDR << 16) |\
|
|
+ CLASS_PBUF2_HEADER_BASE_ADDR)
|
|
+
|
|
+#define CLASS_ROUTE_SIZE 128
|
|
+#define CLASS_MAX_ROUTE_SIZE 256
|
|
+#define CLASS_ROUTE_HASH_BITS 20
|
|
+#define CLASS_ROUTE_HASH_MASK (BIT(CLASS_ROUTE_HASH_BITS) - 1)
|
|
+
|
|
+/* Can be configured */
|
|
+#define CLASS_ROUTE0_BASE_ADDR 0x400
|
|
+/* Can be configured */
|
|
+#define CLASS_ROUTE1_BASE_ADDR (CLASS_ROUTE0_BASE_ADDR + CLASS_ROUTE_SIZE)
|
|
+/* Can be configured */
|
|
+#define CLASS_ROUTE2_BASE_ADDR (CLASS_ROUTE1_BASE_ADDR + CLASS_ROUTE_SIZE)
|
|
+/* Can be configured */
|
|
+#define CLASS_ROUTE3_BASE_ADDR (CLASS_ROUTE2_BASE_ADDR + CLASS_ROUTE_SIZE)
|
|
+
|
|
+#define CLASS_SA_SIZE 128
|
|
+#define CLASS_IPSEC_SA0_BASE_ADDR 0x600
|
|
+/* not used */
|
|
+#define CLASS_IPSEC_SA1_BASE_ADDR (CLASS_IPSEC_SA0_BASE_ADDR + CLASS_SA_SIZE)
|
|
+/* not used */
|
|
+#define CLASS_IPSEC_SA2_BASE_ADDR (CLASS_IPSEC_SA1_BASE_ADDR + CLASS_SA_SIZE)
|
|
+/* not used */
|
|
+#define CLASS_IPSEC_SA3_BASE_ADDR (CLASS_IPSEC_SA2_BASE_ADDR + CLASS_SA_SIZE)
|
|
+
|
|
+/* generic purpose free dmem buffer, last portion of 2K dmem pbuf */
|
|
+#define CLASS_GP_DMEM_BUF_SIZE (2048 - (CLASS_PBUF_SIZE * 4) - \
|
|
+ (CLASS_ROUTE_SIZE * 4) - (CLASS_SA_SIZE))
|
|
+#define CLASS_GP_DMEM_BUF ((void *)(CLASS_IPSEC_SA0_BASE_ADDR + \
|
|
+ CLASS_SA_SIZE))
|
|
+
|
|
+#define TWO_LEVEL_ROUTE BIT(0)
|
|
+#define PHYNO_IN_HASH BIT(1)
|
|
+#define HW_ROUTE_FETCH BIT(3)
|
|
+#define HW_BRIDGE_FETCH BIT(5)
|
|
+#define IP_ALIGNED BIT(6)
|
|
+#define ARC_HIT_CHECK_EN BIT(7)
|
|
+#define CLASS_TOE BIT(11)
|
|
+#define HASH_NORMAL (0 << 12)
|
|
+#define HASH_CRC_PORT BIT(12)
|
|
+#define HASH_CRC_IP (2 << 12)
|
|
+#define HASH_CRC_PORT_IP (3 << 12)
|
|
+#define QB2BUS_LE BIT(15)
|
|
+
|
|
+#define TCP_CHKSUM_DROP BIT(0)
|
|
+#define UDP_CHKSUM_DROP BIT(1)
|
|
+#define IPV4_CHKSUM_DROP BIT(9)
|
|
+
|
|
+/*CLASS_HIF_PARSE bits*/
|
|
+#define HIF_PKT_CLASS_EN BIT(0)
|
|
+#define HIF_PKT_OFFSET(ofst) (((ofst) & 0xF) << 1)
|
|
+
|
|
+struct class_cfg {
|
|
+ u32 toe_mode;
|
|
+ unsigned long route_table_baseaddr;
|
|
+ u32 route_table_hash_bits;
|
|
+ u32 pe_sys_clk_ratio;
|
|
+ u32 resume;
|
|
+};
|
|
+
|
|
+#endif /* _CLASS_CSR_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
|
|
@@ -0,0 +1,242 @@
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#ifndef _EMAC_H_
|
|
+#define _EMAC_H_
|
|
+
|
|
+#include <linux/ethtool.h>
|
|
+
|
|
+#define EMAC_IEVENT_REG 0x004
|
|
+#define EMAC_IMASK_REG 0x008
|
|
+#define EMAC_R_DES_ACTIVE_REG 0x010
|
|
+#define EMAC_X_DES_ACTIVE_REG 0x014
|
|
+#define EMAC_ECNTRL_REG 0x024
|
|
+#define EMAC_MII_DATA_REG 0x040
|
|
+#define EMAC_MII_CTRL_REG 0x044
|
|
+#define EMAC_MIB_CTRL_STS_REG 0x064
|
|
+#define EMAC_RCNTRL_REG 0x084
|
|
+#define EMAC_TCNTRL_REG 0x0C4
|
|
+#define EMAC_PHY_ADDR_LOW 0x0E4
|
|
+#define EMAC_PHY_ADDR_HIGH 0x0E8
|
|
+#define EMAC_GAUR 0x120
|
|
+#define EMAC_GALR 0x124
|
|
+#define EMAC_TFWR_STR_FWD 0x144
|
|
+#define EMAC_RX_SECTION_FULL 0x190
|
|
+#define EMAC_RX_SECTION_EMPTY 0x194
|
|
+#define EMAC_TX_SECTION_EMPTY 0x1A0
|
|
+#define EMAC_TRUNC_FL 0x1B0
|
|
+
|
|
+#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */
|
|
+#define RMON_T_PACKETS 0x204 /* RMON TX packet count */
|
|
+#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */
|
|
+#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */
|
|
+#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */
|
|
+#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */
|
|
+#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
|
|
+#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */
|
|
+#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
|
|
+#define RMON_T_COL 0x224 /* RMON TX collision count */
|
|
+#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */
|
|
+#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */
|
|
+#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */
|
|
+#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */
|
|
+#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */
|
|
+#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */
|
|
+#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */
|
|
+#define RMON_T_OCTETS 0x244 /* RMON TX octets */
|
|
+#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */
|
|
+#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */
|
|
+#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */
|
|
+#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
|
|
+#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
|
|
+#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
|
|
+#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
|
|
+#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
|
|
+#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
|
|
+#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
|
|
+#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */
|
|
+#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */
|
|
+#define RMON_R_PACKETS 0x284 /* RMON RX packet count */
|
|
+#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */
|
|
+#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */
|
|
+#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */
|
|
+#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */
|
|
+#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
|
|
+#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */
|
|
+#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
|
|
+#define RMON_R_RESVD_O 0x2a4 /* Reserved */
|
|
+#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */
|
|
+#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */
|
|
+#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */
|
|
+#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */
|
|
+#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */
|
|
+#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */
|
|
+#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */
|
|
+#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */
|
|
+#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */
|
|
+#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */
|
|
+#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */
|
|
+#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */
|
|
+#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */
|
|
+#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */
|
|
+#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */
|
|
+
|
|
+#define EMAC_SMAC_0_0 0x500 /*Supplemental MAC Address 0 (RW).*/
|
|
+#define EMAC_SMAC_0_1 0x504 /*Supplemental MAC Address 0 (RW).*/
|
|
+
|
|
+/* GEMAC definitions and settings */
|
|
+
|
|
+#define EMAC_PORT_0 0
|
|
+#define EMAC_PORT_1 1
|
|
+
|
|
+/* GEMAC Bit definitions */
|
|
+#define EMAC_IEVENT_HBERR 0x80000000
|
|
+#define EMAC_IEVENT_BABR 0x40000000
|
|
+#define EMAC_IEVENT_BABT 0x20000000
|
|
+#define EMAC_IEVENT_GRA 0x10000000
|
|
+#define EMAC_IEVENT_TXF 0x08000000
|
|
+#define EMAC_IEVENT_TXB 0x04000000
|
|
+#define EMAC_IEVENT_RXF 0x02000000
|
|
+#define EMAC_IEVENT_RXB 0x01000000
|
|
+#define EMAC_IEVENT_MII 0x00800000
|
|
+#define EMAC_IEVENT_EBERR 0x00400000
|
|
+#define EMAC_IEVENT_LC 0x00200000
|
|
+#define EMAC_IEVENT_RL 0x00100000
|
|
+#define EMAC_IEVENT_UN 0x00080000
|
|
+
|
|
+#define EMAC_IMASK_HBERR 0x80000000
|
|
+#define EMAC_IMASK_BABR 0x40000000
|
|
+#define EMAC_IMASKT_BABT 0x20000000
|
|
+#define EMAC_IMASK_GRA 0x10000000
|
|
+#define EMAC_IMASKT_TXF 0x08000000
|
|
+#define EMAC_IMASK_TXB 0x04000000
|
|
+#define EMAC_IMASKT_RXF 0x02000000
|
|
+#define EMAC_IMASK_RXB 0x01000000
|
|
+#define EMAC_IMASK_MII 0x00800000
|
|
+#define EMAC_IMASK_EBERR 0x00400000
|
|
+#define EMAC_IMASK_LC 0x00200000
|
|
+#define EMAC_IMASKT_RL 0x00100000
|
|
+#define EMAC_IMASK_UN 0x00080000
|
|
+
|
|
+#define EMAC_RCNTRL_MAX_FL_SHIFT 16
|
|
+#define EMAC_RCNTRL_LOOP 0x00000001
|
|
+#define EMAC_RCNTRL_DRT 0x00000002
|
|
+#define EMAC_RCNTRL_MII_MODE 0x00000004
|
|
+#define EMAC_RCNTRL_PROM 0x00000008
|
|
+#define EMAC_RCNTRL_BC_REJ 0x00000010
|
|
+#define EMAC_RCNTRL_FCE 0x00000020
|
|
+#define EMAC_RCNTRL_RGMII 0x00000040
|
|
+#define EMAC_RCNTRL_SGMII 0x00000080
|
|
+#define EMAC_RCNTRL_RMII 0x00000100
|
|
+#define EMAC_RCNTRL_RMII_10T 0x00000200
|
|
+#define EMAC_RCNTRL_CRC_FWD 0x00004000
|
|
+
|
|
+#define EMAC_TCNTRL_GTS 0x00000001
|
|
+#define EMAC_TCNTRL_HBC 0x00000002
|
|
+#define EMAC_TCNTRL_FDEN 0x00000004
|
|
+#define EMAC_TCNTRL_TFC_PAUSE 0x00000008
|
|
+#define EMAC_TCNTRL_RFC_PAUSE 0x00000010
|
|
+
|
|
+#define EMAC_ECNTRL_RESET 0x00000001 /* reset the EMAC */
|
|
+#define EMAC_ECNTRL_ETHER_EN 0x00000002 /* enable the EMAC */
|
|
+#define EMAC_ECNTRL_MAGIC_ENA 0x00000004
|
|
+#define EMAC_ECNTRL_SLEEP 0x00000008
|
|
+#define EMAC_ECNTRL_SPEED 0x00000020
|
|
+#define EMAC_ECNTRL_DBSWAP 0x00000100
|
|
+
|
|
+#define EMAC_X_WMRK_STRFWD 0x00000100
|
|
+
|
|
+#define EMAC_X_DES_ACTIVE_TDAR 0x01000000
|
|
+#define EMAC_R_DES_ACTIVE_RDAR 0x01000000
|
|
+
|
|
+#define EMAC_RX_SECTION_EMPTY_V 0x00010006
|
|
+/*
|
|
+ * The possible operating speeds of the MAC, currently supporting 10, 100 and
|
|
+ * 1000Mb modes.
|
|
+ */
|
|
+enum mac_speed {SPEED_10M, SPEED_100M, SPEED_1000M, SPEED_1000M_PCS};
|
|
+
|
|
+/* MII-related definitios */
|
|
+#define EMAC_MII_DATA_ST 0x40000000 /* Start of frame delimiter */
|
|
+#define EMAC_MII_DATA_OP_RD 0x20000000 /* Perform a read operation */
|
|
+#define EMAC_MII_DATA_OP_CL45_RD 0x30000000 /* Perform a read operation */
|
|
+#define EMAC_MII_DATA_OP_WR 0x10000000 /* Perform a write operation */
|
|
+#define EMAC_MII_DATA_OP_CL45_WR 0x10000000 /* Perform a write operation */
|
|
+#define EMAC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address field mask */
|
|
+#define EMAC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register field mask */
|
|
+#define EMAC_MII_DATA_TA 0x00020000 /* Turnaround */
|
|
+#define EMAC_MII_DATA_DATAMSK 0x0000ffff /* PHY data field */
|
|
+
|
|
+#define EMAC_MII_DATA_RA_SHIFT 18 /* MII Register address bits */
|
|
+#define EMAC_MII_DATA_RA_MASK 0x1F /* MII Register address mask */
|
|
+#define EMAC_MII_DATA_PA_SHIFT 23 /* MII PHY address bits */
|
|
+#define EMAC_MII_DATA_PA_MASK 0x1F /* MII PHY address mask */
|
|
+
|
|
+#define EMAC_MII_DATA_RA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
|
|
+ EMAC_MII_DATA_RA_SHIFT)
|
|
+#define EMAC_MII_DATA_PA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
|
|
+ EMAC_MII_DATA_PA_SHIFT)
|
|
+#define EMAC_MII_DATA(v) ((v) & 0xffff)
|
|
+
|
|
+#define EMAC_MII_SPEED_SHIFT 1
|
|
+#define EMAC_HOLDTIME_SHIFT 8
|
|
+#define EMAC_HOLDTIME_MASK 0x7
|
|
+#define EMAC_HOLDTIME(v) (((v) & EMAC_HOLDTIME_MASK) << \
|
|
+ EMAC_HOLDTIME_SHIFT)
|
|
+
|
|
+/*
|
|
+ * The Address organisation for the MAC device. All addresses are split into
|
|
+ * two 32-bit register fields. The first one (bottom) is the lower 32-bits of
|
|
+ * the address and the other field are the high order bits - this may be 16-bits
|
|
+ * in the case of MAC addresses, or 32-bits for the hash address.
|
|
+ * In terms of memory storage, the first item (bottom) is assumed to be at a
|
|
+ * lower address location than 'top'. i.e. top should be at address location of
|
|
+ * 'bottom' + 4 bytes.
|
|
+ */
|
|
+struct pfe_mac_addr {
|
|
+ u32 bottom; /* Lower 32-bits of address. */
|
|
+ u32 top; /* Upper 32-bits of address. */
|
|
+};
|
|
+
|
|
+/*
|
|
+ * The following is the organisation of the address filters section of the MAC
|
|
+ * registers. The Cadence MAC contains four possible specific address match
|
|
+ * addresses, if an incoming frame corresponds to any one of these four
|
|
+ * addresses then the frame will be copied to memory.
|
|
+ * It is not necessary for all four of the address match registers to be
|
|
+ * programmed, this is application dependent.
|
|
+ */
|
|
+struct spec_addr {
|
|
+ struct pfe_mac_addr one; /* Specific address register 1. */
|
|
+ struct pfe_mac_addr two; /* Specific address register 2. */
|
|
+ struct pfe_mac_addr three; /* Specific address register 3. */
|
|
+ struct pfe_mac_addr four; /* Specific address register 4. */
|
|
+};
|
|
+
|
|
+struct gemac_cfg {
|
|
+ u32 mode;
|
|
+ u32 speed;
|
|
+ u32 duplex;
|
|
+};
|
|
+
|
|
+/* EMAC Hash size */
|
|
+#define EMAC_HASH_REG_BITS 64
|
|
+
|
|
+#define EMAC_SPEC_ADDR_MAX 4
|
|
+
|
|
+#endif /* _EMAC_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
|
|
@@ -0,0 +1,86 @@
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#ifndef _GPI_H_
|
|
+#define _GPI_H_
|
|
+
|
|
+#define GPI_VERSION 0x00
|
|
+#define GPI_CTRL 0x04
|
|
+#define GPI_RX_CONFIG 0x08
|
|
+#define GPI_HDR_SIZE 0x0c
|
|
+#define GPI_BUF_SIZE 0x10
|
|
+#define GPI_LMEM_ALLOC_ADDR 0x14
|
|
+#define GPI_LMEM_FREE_ADDR 0x18
|
|
+#define GPI_DDR_ALLOC_ADDR 0x1c
|
|
+#define GPI_DDR_FREE_ADDR 0x20
|
|
+#define GPI_CLASS_ADDR 0x24
|
|
+#define GPI_DRX_FIFO 0x28
|
|
+#define GPI_TRX_FIFO 0x2c
|
|
+#define GPI_INQ_PKTPTR 0x30
|
|
+#define GPI_DDR_DATA_OFFSET 0x34
|
|
+#define GPI_LMEM_DATA_OFFSET 0x38
|
|
+#define GPI_TMLF_TX 0x4c
|
|
+#define GPI_DTX_ASEQ 0x50
|
|
+#define GPI_FIFO_STATUS 0x54
|
|
+#define GPI_FIFO_DEBUG 0x58
|
|
+#define GPI_TX_PAUSE_TIME 0x5c
|
|
+#define GPI_LMEM_SEC_BUF_DATA_OFFSET 0x60
|
|
+#define GPI_DDR_SEC_BUF_DATA_OFFSET 0x64
|
|
+#define GPI_TOE_CHKSUM_EN 0x68
|
|
+#define GPI_OVERRUN_DROPCNT 0x6c
|
|
+#define GPI_CSR_MTIP_PAUSE_REG 0x74
|
|
+#define GPI_CSR_MTIP_PAUSE_QUANTUM 0x78
|
|
+#define GPI_CSR_RX_CNT 0x7c
|
|
+#define GPI_CSR_TX_CNT 0x80
|
|
+#define GPI_CSR_DEBUG1 0x84
|
|
+#define GPI_CSR_DEBUG2 0x88
|
|
+
|
|
+struct gpi_cfg {
|
|
+ u32 lmem_rtry_cnt;
|
|
+ u32 tmlf_txthres;
|
|
+ u32 aseq_len;
|
|
+ u32 mtip_pause_reg;
|
|
+};
|
|
+
|
|
+/* GPI commons defines */
|
|
+#define GPI_LMEM_BUF_EN 0x1
|
|
+#define GPI_DDR_BUF_EN 0x1
|
|
+
|
|
+/* EGPI 1 defines */
|
|
+#define EGPI1_LMEM_RTRY_CNT 0x40
|
|
+#define EGPI1_TMLF_TXTHRES 0xBC
|
|
+#define EGPI1_ASEQ_LEN 0x50
|
|
+
|
|
+/* EGPI 2 defines */
|
|
+#define EGPI2_LMEM_RTRY_CNT 0x40
|
|
+#define EGPI2_TMLF_TXTHRES 0xBC
|
|
+#define EGPI2_ASEQ_LEN 0x40
|
|
+
|
|
+/* EGPI 3 defines */
|
|
+#define EGPI3_LMEM_RTRY_CNT 0x40
|
|
+#define EGPI3_TMLF_TXTHRES 0xBC
|
|
+#define EGPI3_ASEQ_LEN 0x40
|
|
+
|
|
+/* HGPI defines */
|
|
+#define HGPI_LMEM_RTRY_CNT 0x40
|
|
+#define HGPI_TMLF_TXTHRES 0xBC
|
|
+#define HGPI_ASEQ_LEN 0x40
|
|
+
|
|
+#define EGPI_PAUSE_TIME 0x000007D0
|
|
+#define EGPI_PAUSE_ENABLE 0x40000000
|
|
+#endif /* _GPI_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
|
|
@@ -0,0 +1,100 @@
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#ifndef _HIF_H_
|
|
+#define _HIF_H_
|
|
+
|
|
+/* @file hif.h.
|
|
+ * hif - PFE hif block control and status register.
|
|
+ * Mapped on CBUS and accessible from all PE's and ARM.
|
|
+ */
|
|
+#define HIF_VERSION (HIF_BASE_ADDR + 0x00)
|
|
+#define HIF_TX_CTRL (HIF_BASE_ADDR + 0x04)
|
|
+#define HIF_TX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x08)
|
|
+#define HIF_TX_ALLOC (HIF_BASE_ADDR + 0x0c)
|
|
+#define HIF_TX_BDP_ADDR (HIF_BASE_ADDR + 0x10)
|
|
+#define HIF_TX_STATUS (HIF_BASE_ADDR + 0x14)
|
|
+#define HIF_RX_CTRL (HIF_BASE_ADDR + 0x20)
|
|
+#define HIF_RX_BDP_ADDR (HIF_BASE_ADDR + 0x24)
|
|
+#define HIF_RX_STATUS (HIF_BASE_ADDR + 0x30)
|
|
+#define HIF_INT_SRC (HIF_BASE_ADDR + 0x34)
|
|
+#define HIF_INT_ENABLE (HIF_BASE_ADDR + 0x38)
|
|
+#define HIF_POLL_CTRL (HIF_BASE_ADDR + 0x3c)
|
|
+#define HIF_RX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x40)
|
|
+#define HIF_RX_ALLOC (HIF_BASE_ADDR + 0x44)
|
|
+#define HIF_TX_DMA_STATUS (HIF_BASE_ADDR + 0x48)
|
|
+#define HIF_RX_DMA_STATUS (HIF_BASE_ADDR + 0x4c)
|
|
+#define HIF_INT_COAL (HIF_BASE_ADDR + 0x50)
|
|
+
|
|
+/* HIF_INT_SRC/ HIF_INT_ENABLE control bits */
|
|
+#define HIF_INT BIT(0)
|
|
+#define HIF_RXBD_INT BIT(1)
|
|
+#define HIF_RXPKT_INT BIT(2)
|
|
+#define HIF_TXBD_INT BIT(3)
|
|
+#define HIF_TXPKT_INT BIT(4)
|
|
+
|
|
+/* HIF_TX_CTRL bits */
|
|
+#define HIF_CTRL_DMA_EN BIT(0)
|
|
+#define HIF_CTRL_BDP_POLL_CTRL_EN BIT(1)
|
|
+#define HIF_CTRL_BDP_CH_START_WSTB BIT(2)
|
|
+
|
|
+/* HIF_RX_STATUS bits */
|
|
+#define BDP_CSR_RX_DMA_ACTV BIT(16)
|
|
+
|
|
+/* HIF_INT_ENABLE bits */
|
|
+#define HIF_INT_EN BIT(0)
|
|
+#define HIF_RXBD_INT_EN BIT(1)
|
|
+#define HIF_RXPKT_INT_EN BIT(2)
|
|
+#define HIF_TXBD_INT_EN BIT(3)
|
|
+#define HIF_TXPKT_INT_EN BIT(4)
|
|
+
|
|
+/* HIF_POLL_CTRL bits*/
|
|
+#define HIF_RX_POLL_CTRL_CYCLE 0x0400
|
|
+#define HIF_TX_POLL_CTRL_CYCLE 0x0400
|
|
+
|
|
+/* HIF_INT_COAL bits*/
|
|
+#define HIF_INT_COAL_ENABLE BIT(31)
|
|
+
|
|
+/* Buffer descriptor control bits */
|
|
+#define BD_CTRL_BUFLEN_MASK 0x3fff
|
|
+#define BD_BUF_LEN(x) ((x) & BD_CTRL_BUFLEN_MASK)
|
|
+#define BD_CTRL_CBD_INT_EN BIT(16)
|
|
+#define BD_CTRL_PKT_INT_EN BIT(17)
|
|
+#define BD_CTRL_LIFM BIT(18)
|
|
+#define BD_CTRL_LAST_BD BIT(19)
|
|
+#define BD_CTRL_DIR BIT(20)
|
|
+#define BD_CTRL_LMEM_CPY BIT(21) /* Valid only for HIF_NOCPY */
|
|
+#define BD_CTRL_PKT_XFER BIT(24)
|
|
+#define BD_CTRL_DESC_EN BIT(31)
|
|
+#define BD_CTRL_PARSE_DISABLE BIT(25)
|
|
+#define BD_CTRL_BRFETCH_DISABLE BIT(26)
|
|
+#define BD_CTRL_RTFETCH_DISABLE BIT(27)
|
|
+
|
|
+/* Buffer descriptor status bits*/
|
|
+#define BD_STATUS_CONN_ID(x) ((x) & 0xffff)
|
|
+#define BD_STATUS_DIR_PROC_ID BIT(16)
|
|
+#define BD_STATUS_CONN_ID_EN BIT(17)
|
|
+#define BD_STATUS_PE2PROC_ID(x) (((x) & 7) << 18)
|
|
+#define BD_STATUS_LE_DATA BIT(21)
|
|
+#define BD_STATUS_CHKSUM_EN BIT(22)
|
|
+
|
|
+/* HIF Buffer descriptor status bits */
|
|
+#define DIR_PROC_ID BIT(16)
|
|
+#define PROC_ID(id) ((id) << 18)
|
|
+
|
|
+#endif /* _HIF_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
|
|
@@ -0,0 +1,50 @@
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#ifndef _HIF_NOCPY_H_
|
|
+#define _HIF_NOCPY_H_
|
|
+
|
|
+#define HIF_NOCPY_VERSION (HIF_NOCPY_BASE_ADDR + 0x00)
|
|
+#define HIF_NOCPY_TX_CTRL (HIF_NOCPY_BASE_ADDR + 0x04)
|
|
+#define HIF_NOCPY_TX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x08)
|
|
+#define HIF_NOCPY_TX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x0c)
|
|
+#define HIF_NOCPY_TX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x10)
|
|
+#define HIF_NOCPY_TX_STATUS (HIF_NOCPY_BASE_ADDR + 0x14)
|
|
+#define HIF_NOCPY_RX_CTRL (HIF_NOCPY_BASE_ADDR + 0x20)
|
|
+#define HIF_NOCPY_RX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x24)
|
|
+#define HIF_NOCPY_RX_STATUS (HIF_NOCPY_BASE_ADDR + 0x30)
|
|
+#define HIF_NOCPY_INT_SRC (HIF_NOCPY_BASE_ADDR + 0x34)
|
|
+#define HIF_NOCPY_INT_ENABLE (HIF_NOCPY_BASE_ADDR + 0x38)
|
|
+#define HIF_NOCPY_POLL_CTRL (HIF_NOCPY_BASE_ADDR + 0x3c)
|
|
+#define HIF_NOCPY_RX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x40)
|
|
+#define HIF_NOCPY_RX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x44)
|
|
+#define HIF_NOCPY_TX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x48)
|
|
+#define HIF_NOCPY_RX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x4c)
|
|
+#define HIF_NOCPY_RX_INQ0_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x50)
|
|
+#define HIF_NOCPY_RX_INQ1_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x54)
|
|
+#define HIF_NOCPY_TX_PORT_NO (HIF_NOCPY_BASE_ADDR + 0x60)
|
|
+#define HIF_NOCPY_LMEM_ALLOC_ADDR (HIF_NOCPY_BASE_ADDR + 0x64)
|
|
+#define HIF_NOCPY_CLASS_ADDR (HIF_NOCPY_BASE_ADDR + 0x68)
|
|
+#define HIF_NOCPY_TMU_PORT0_ADDR (HIF_NOCPY_BASE_ADDR + 0x70)
|
|
+#define HIF_NOCPY_TMU_PORT1_ADDR (HIF_NOCPY_BASE_ADDR + 0x74)
|
|
+#define HIF_NOCPY_TMU_PORT2_ADDR (HIF_NOCPY_BASE_ADDR + 0x7c)
|
|
+#define HIF_NOCPY_TMU_PORT3_ADDR (HIF_NOCPY_BASE_ADDR + 0x80)
|
|
+#define HIF_NOCPY_TMU_PORT4_ADDR (HIF_NOCPY_BASE_ADDR + 0x84)
|
|
+#define HIF_NOCPY_INT_COAL (HIF_NOCPY_BASE_ADDR + 0x90)
|
|
+
|
|
+#endif /* _HIF_NOCPY_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
|
|
@@ -0,0 +1,168 @@
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#ifndef _TMU_CSR_H_
|
|
+#define _TMU_CSR_H_
|
|
+
|
|
+#define TMU_VERSION (TMU_CSR_BASE_ADDR + 0x000)
|
|
+#define TMU_INQ_WATERMARK (TMU_CSR_BASE_ADDR + 0x004)
|
|
+#define TMU_PHY_INQ_PKTPTR (TMU_CSR_BASE_ADDR + 0x008)
|
|
+#define TMU_PHY_INQ_PKTINFO (TMU_CSR_BASE_ADDR + 0x00c)
|
|
+#define TMU_PHY_INQ_FIFO_CNT (TMU_CSR_BASE_ADDR + 0x010)
|
|
+#define TMU_SYS_GENERIC_CONTROL (TMU_CSR_BASE_ADDR + 0x014)
|
|
+#define TMU_SYS_GENERIC_STATUS (TMU_CSR_BASE_ADDR + 0x018)
|
|
+#define TMU_SYS_GEN_CON0 (TMU_CSR_BASE_ADDR + 0x01c)
|
|
+#define TMU_SYS_GEN_CON1 (TMU_CSR_BASE_ADDR + 0x020)
|
|
+#define TMU_SYS_GEN_CON2 (TMU_CSR_BASE_ADDR + 0x024)
|
|
+#define TMU_SYS_GEN_CON3 (TMU_CSR_BASE_ADDR + 0x028)
|
|
+#define TMU_SYS_GEN_CON4 (TMU_CSR_BASE_ADDR + 0x02c)
|
|
+#define TMU_TEQ_DISABLE_DROPCHK (TMU_CSR_BASE_ADDR + 0x030)
|
|
+#define TMU_TEQ_CTRL (TMU_CSR_BASE_ADDR + 0x034)
|
|
+#define TMU_TEQ_QCFG (TMU_CSR_BASE_ADDR + 0x038)
|
|
+#define TMU_TEQ_DROP_STAT (TMU_CSR_BASE_ADDR + 0x03c)
|
|
+#define TMU_TEQ_QAVG (TMU_CSR_BASE_ADDR + 0x040)
|
|
+#define TMU_TEQ_WREG_PROB (TMU_CSR_BASE_ADDR + 0x044)
|
|
+#define TMU_TEQ_TRANS_STAT (TMU_CSR_BASE_ADDR + 0x048)
|
|
+#define TMU_TEQ_HW_PROB_CFG0 (TMU_CSR_BASE_ADDR + 0x04c)
|
|
+#define TMU_TEQ_HW_PROB_CFG1 (TMU_CSR_BASE_ADDR + 0x050)
|
|
+#define TMU_TEQ_HW_PROB_CFG2 (TMU_CSR_BASE_ADDR + 0x054)
|
|
+#define TMU_TEQ_HW_PROB_CFG3 (TMU_CSR_BASE_ADDR + 0x058)
|
|
+#define TMU_TEQ_HW_PROB_CFG4 (TMU_CSR_BASE_ADDR + 0x05c)
|
|
+#define TMU_TEQ_HW_PROB_CFG5 (TMU_CSR_BASE_ADDR + 0x060)
|
|
+#define TMU_TEQ_HW_PROB_CFG6 (TMU_CSR_BASE_ADDR + 0x064)
|
|
+#define TMU_TEQ_HW_PROB_CFG7 (TMU_CSR_BASE_ADDR + 0x068)
|
|
+#define TMU_TEQ_HW_PROB_CFG8 (TMU_CSR_BASE_ADDR + 0x06c)
|
|
+#define TMU_TEQ_HW_PROB_CFG9 (TMU_CSR_BASE_ADDR + 0x070)
|
|
+#define TMU_TEQ_HW_PROB_CFG10 (TMU_CSR_BASE_ADDR + 0x074)
|
|
+#define TMU_TEQ_HW_PROB_CFG11 (TMU_CSR_BASE_ADDR + 0x078)
|
|
+#define TMU_TEQ_HW_PROB_CFG12 (TMU_CSR_BASE_ADDR + 0x07c)
|
|
+#define TMU_TEQ_HW_PROB_CFG13 (TMU_CSR_BASE_ADDR + 0x080)
|
|
+#define TMU_TEQ_HW_PROB_CFG14 (TMU_CSR_BASE_ADDR + 0x084)
|
|
+#define TMU_TEQ_HW_PROB_CFG15 (TMU_CSR_BASE_ADDR + 0x088)
|
|
+#define TMU_TEQ_HW_PROB_CFG16 (TMU_CSR_BASE_ADDR + 0x08c)
|
|
+#define TMU_TEQ_HW_PROB_CFG17 (TMU_CSR_BASE_ADDR + 0x090)
|
|
+#define TMU_TEQ_HW_PROB_CFG18 (TMU_CSR_BASE_ADDR + 0x094)
|
|
+#define TMU_TEQ_HW_PROB_CFG19 (TMU_CSR_BASE_ADDR + 0x098)
|
|
+#define TMU_TEQ_HW_PROB_CFG20 (TMU_CSR_BASE_ADDR + 0x09c)
|
|
+#define TMU_TEQ_HW_PROB_CFG21 (TMU_CSR_BASE_ADDR + 0x0a0)
|
|
+#define TMU_TEQ_HW_PROB_CFG22 (TMU_CSR_BASE_ADDR + 0x0a4)
|
|
+#define TMU_TEQ_HW_PROB_CFG23 (TMU_CSR_BASE_ADDR + 0x0a8)
|
|
+#define TMU_TEQ_HW_PROB_CFG24 (TMU_CSR_BASE_ADDR + 0x0ac)
|
|
+#define TMU_TEQ_HW_PROB_CFG25 (TMU_CSR_BASE_ADDR + 0x0b0)
|
|
+#define TMU_TDQ_IIFG_CFG (TMU_CSR_BASE_ADDR + 0x0b4)
|
|
+/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
|
|
+ * This is a global Enable for all schedulers in PHY0
|
|
+ */
|
|
+#define TMU_TDQ0_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x0b8)
|
|
+
|
|
+#define TMU_LLM_CTRL (TMU_CSR_BASE_ADDR + 0x0bc)
|
|
+#define TMU_LLM_BASE_ADDR (TMU_CSR_BASE_ADDR + 0x0c0)
|
|
+#define TMU_LLM_QUE_LEN (TMU_CSR_BASE_ADDR + 0x0c4)
|
|
+#define TMU_LLM_QUE_HEADPTR (TMU_CSR_BASE_ADDR + 0x0c8)
|
|
+#define TMU_LLM_QUE_TAILPTR (TMU_CSR_BASE_ADDR + 0x0cc)
|
|
+#define TMU_LLM_QUE_DROPCNT (TMU_CSR_BASE_ADDR + 0x0d0)
|
|
+#define TMU_INT_EN (TMU_CSR_BASE_ADDR + 0x0d4)
|
|
+#define TMU_INT_SRC (TMU_CSR_BASE_ADDR + 0x0d8)
|
|
+#define TMU_INQ_STAT (TMU_CSR_BASE_ADDR + 0x0dc)
|
|
+#define TMU_CTRL (TMU_CSR_BASE_ADDR + 0x0e0)
|
|
+
|
|
+/* [31] Mem Access Command. 0 = Internal Memory Read, 1 = Internal memory
|
|
+ * Write [27:24] Byte Enables of the Internal memory access [23:0] Address of
|
|
+ * the internal memory. This address is used to access both the PM and DM of
|
|
+ * all the PE's
|
|
+ */
|
|
+#define TMU_MEM_ACCESS_ADDR (TMU_CSR_BASE_ADDR + 0x0e4)
|
|
+
|
|
+/* Internal Memory Access Write Data */
|
|
+#define TMU_MEM_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x0e8)
|
|
+/* Internal Memory Access Read Data. The commands are blocked
|
|
+ * at the mem_access only
|
|
+ */
|
|
+#define TMU_MEM_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x0ec)
|
|
+
|
|
+/* [31:0] PHY0 in queue address (must be initialized with one of the
|
|
+ * xxx_INQ_PKTPTR cbus addresses)
|
|
+ */
|
|
+#define TMU_PHY0_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f0)
|
|
+/* [31:0] PHY1 in queue address (must be initialized with one of the
|
|
+ * xxx_INQ_PKTPTR cbus addresses)
|
|
+ */
|
|
+#define TMU_PHY1_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f4)
|
|
+/* [31:0] PHY2 in queue address (must be initialized with one of the
|
|
+ * xxx_INQ_PKTPTR cbus addresses)
|
|
+ */
|
|
+#define TMU_PHY2_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f8)
|
|
+/* [31:0] PHY3 in queue address (must be initialized with one of the
|
|
+ * xxx_INQ_PKTPTR cbus addresses)
|
|
+ */
|
|
+#define TMU_PHY3_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0fc)
|
|
+#define TMU_BMU_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x100)
|
|
+#define TMU_TX_CTRL (TMU_CSR_BASE_ADDR + 0x104)
|
|
+
|
|
+#define TMU_BUS_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x108)
|
|
+#define TMU_BUS_ACCESS (TMU_CSR_BASE_ADDR + 0x10c)
|
|
+#define TMU_BUS_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x110)
|
|
+
|
|
+#define TMU_PE_SYS_CLK_RATIO (TMU_CSR_BASE_ADDR + 0x114)
|
|
+#define TMU_PE_STATUS (TMU_CSR_BASE_ADDR + 0x118)
|
|
+#define TMU_TEQ_MAX_THRESHOLD (TMU_CSR_BASE_ADDR + 0x11c)
|
|
+/* [31:0] PHY4 in queue address (must be initialized with one of the
|
|
+ * xxx_INQ_PKTPTR cbus addresses)
|
|
+ */
|
|
+#define TMU_PHY4_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x134)
|
|
+/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
|
|
+ * This is a global Enable for all schedulers in PHY1
|
|
+ */
|
|
+#define TMU_TDQ1_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x138)
|
|
+/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
|
|
+ * This is a global Enable for all schedulers in PHY2
|
|
+ */
|
|
+#define TMU_TDQ2_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x13c)
|
|
+/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
|
|
+ * This is a global Enable for all schedulers in PHY3
|
|
+ */
|
|
+#define TMU_TDQ3_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x140)
|
|
+#define TMU_BMU_BUF_SIZE (TMU_CSR_BASE_ADDR + 0x144)
|
|
+/* [31:0] PHY5 in queue address (must be initialized with one of the
|
|
+ * xxx_INQ_PKTPTR cbus addresses)
|
|
+ */
|
|
+#define TMU_PHY5_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x148)
|
|
+
|
|
+#define SW_RESET BIT(0) /* Global software reset */
|
|
+#define INQ_RESET BIT(2)
|
|
+#define TEQ_RESET BIT(3)
|
|
+#define TDQ_RESET BIT(4)
|
|
+#define PE_RESET BIT(5)
|
|
+#define MEM_INIT BIT(6)
|
|
+#define MEM_INIT_DONE BIT(7)
|
|
+#define LLM_INIT BIT(8)
|
|
+#define LLM_INIT_DONE BIT(9)
|
|
+#define ECC_MEM_INIT_DONE BIT(10)
|
|
+
|
|
+struct tmu_cfg {
|
|
+ u32 pe_sys_clk_ratio;
|
|
+ unsigned long llm_base_addr;
|
|
+ u32 llm_queue_len;
|
|
+};
|
|
+
|
|
+/* Not HW related for pfe_ctrl / pfe common defines */
|
|
+#define DEFAULT_MAX_QDEPTH 80
|
|
+#define DEFAULT_Q0_QDEPTH 511 /*We keep one large queue for host tx qos */
|
|
+#define DEFAULT_TMU3_QDEPTH 127
|
|
+
|
|
+#endif /* _TMU_CSR_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
|
|
@@ -0,0 +1,61 @@
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#ifndef _UTIL_CSR_H_
|
|
+#define _UTIL_CSR_H_
|
|
+
|
|
+#define UTIL_VERSION (UTIL_CSR_BASE_ADDR + 0x000)
|
|
+#define UTIL_TX_CTRL (UTIL_CSR_BASE_ADDR + 0x004)
|
|
+#define UTIL_INQ_PKTPTR (UTIL_CSR_BASE_ADDR + 0x010)
|
|
+
|
|
+#define UTIL_HDR_SIZE (UTIL_CSR_BASE_ADDR + 0x014)
|
|
+
|
|
+#define UTIL_PE0_QB_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x020)
|
|
+#define UTIL_PE0_QB_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x024)
|
|
+#define UTIL_PE0_RO_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x060)
|
|
+#define UTIL_PE0_RO_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x064)
|
|
+
|
|
+#define UTIL_MEM_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x100)
|
|
+#define UTIL_MEM_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x104)
|
|
+#define UTIL_MEM_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x108)
|
|
+
|
|
+#define UTIL_TM_INQ_ADDR (UTIL_CSR_BASE_ADDR + 0x114)
|
|
+#define UTIL_PE_STATUS (UTIL_CSR_BASE_ADDR + 0x118)
|
|
+
|
|
+#define UTIL_PE_SYS_CLK_RATIO (UTIL_CSR_BASE_ADDR + 0x200)
|
|
+#define UTIL_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x204)
|
|
+#define UTIL_GAP_BETWEEN_READS (UTIL_CSR_BASE_ADDR + 0x208)
|
|
+#define UTIL_MAX_BUF_CNT (UTIL_CSR_BASE_ADDR + 0x20c)
|
|
+#define UTIL_TSQ_FIFO_THRES (UTIL_CSR_BASE_ADDR + 0x210)
|
|
+#define UTIL_TSQ_MAX_CNT (UTIL_CSR_BASE_ADDR + 0x214)
|
|
+#define UTIL_IRAM_DATA_0 (UTIL_CSR_BASE_ADDR + 0x218)
|
|
+#define UTIL_IRAM_DATA_1 (UTIL_CSR_BASE_ADDR + 0x21c)
|
|
+#define UTIL_IRAM_DATA_2 (UTIL_CSR_BASE_ADDR + 0x220)
|
|
+#define UTIL_IRAM_DATA_3 (UTIL_CSR_BASE_ADDR + 0x224)
|
|
+
|
|
+#define UTIL_BUS_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x228)
|
|
+#define UTIL_BUS_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x22c)
|
|
+#define UTIL_BUS_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x230)
|
|
+
|
|
+#define UTIL_INQ_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x234)
|
|
+
|
|
+struct util_cfg {
|
|
+ u32 pe_sys_clk_ratio;
|
|
+};
|
|
+
|
|
+#endif /* _UTIL_CSR_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/include/pfe/pfe.h
|
|
@@ -0,0 +1,372 @@
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#ifndef _PFE_H_
|
|
+#define _PFE_H_
|
|
+
|
|
+#include "cbus.h"
|
|
+
|
|
+#define CLASS_DMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
|
|
+/*
|
|
+ * Only valid for mem access register interface
|
|
+ */
|
|
+#define CLASS_IMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
|
|
+#define CLASS_DMEM_SIZE 0x00002000
|
|
+#define CLASS_IMEM_SIZE 0x00008000
|
|
+
|
|
+#define TMU_DMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
|
|
+/*
|
|
+ * Only valid for mem access register interface
|
|
+ */
|
|
+#define TMU_IMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
|
|
+#define TMU_DMEM_SIZE 0x00000800
|
|
+#define TMU_IMEM_SIZE 0x00002000
|
|
+
|
|
+#define UTIL_DMEM_BASE_ADDR 0x00000000
|
|
+#define UTIL_DMEM_SIZE 0x00002000
|
|
+
|
|
+#define PE_LMEM_BASE_ADDR 0xc3010000
|
|
+#define PE_LMEM_SIZE 0x8000
|
|
+#define PE_LMEM_END (PE_LMEM_BASE_ADDR + PE_LMEM_SIZE)
|
|
+
|
|
+#define DMEM_BASE_ADDR 0x00000000
|
|
+#define DMEM_SIZE 0x2000 /* TMU has less... */
|
|
+#define DMEM_END (DMEM_BASE_ADDR + DMEM_SIZE)
|
|
+
|
|
+#define PMEM_BASE_ADDR 0x00010000
|
|
+#define PMEM_SIZE 0x8000 /* TMU has less... */
|
|
+#define PMEM_END (PMEM_BASE_ADDR + PMEM_SIZE)
|
|
+
|
|
+/* These check memory ranges from PE point of view/memory map */
|
|
+#define IS_DMEM(addr, len) \
|
|
+ ({ typeof(addr) addr_ = (addr); \
|
|
+ ((unsigned long)(addr_) >= DMEM_BASE_ADDR) && \
|
|
+ (((unsigned long)(addr_) + (len)) <= DMEM_END); })
|
|
+
|
|
+#define IS_PMEM(addr, len) \
|
|
+ ({ typeof(addr) addr_ = (addr); \
|
|
+ ((unsigned long)(addr_) >= PMEM_BASE_ADDR) && \
|
|
+ (((unsigned long)(addr_) + (len)) <= PMEM_END); })
|
|
+
|
|
+#define IS_PE_LMEM(addr, len) \
|
|
+ ({ typeof(addr) addr_ = (addr); \
|
|
+ ((unsigned long)(addr_) >= \
|
|
+ PE_LMEM_BASE_ADDR) && \
|
|
+ (((unsigned long)(addr_) + \
|
|
+ (len)) <= PE_LMEM_END); })
|
|
+
|
|
+#define IS_PFE_LMEM(addr, len) \
|
|
+ ({ typeof(addr) addr_ = (addr); \
|
|
+ ((unsigned long)(addr_) >= \
|
|
+ CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR)) && \
|
|
+ (((unsigned long)(addr_) + (len)) <= \
|
|
+ CBUS_VIRT_TO_PFE(LMEM_END)); })
|
|
+
|
|
+#define __IS_PHYS_DDR(addr, len) \
|
|
+ ({ typeof(addr) addr_ = (addr); \
|
|
+ ((unsigned long)(addr_) >= \
|
|
+ DDR_PHYS_BASE_ADDR) && \
|
|
+ (((unsigned long)(addr_) + (len)) <= \
|
|
+ DDR_PHYS_END); })
|
|
+
|
|
+#define IS_PHYS_DDR(addr, len) __IS_PHYS_DDR(DDR_PFE_TO_PHYS(addr), len)
|
|
+
|
|
+/*
|
|
+ * If using a run-time virtual address for the cbus base address use this code
|
|
+ */
|
|
+extern void *cbus_base_addr;
|
|
+extern void *ddr_base_addr;
|
|
+extern unsigned long ddr_phys_base_addr;
|
|
+extern unsigned int ddr_size;
|
|
+
|
|
+#define CBUS_BASE_ADDR cbus_base_addr
|
|
+#define DDR_PHYS_BASE_ADDR ddr_phys_base_addr
|
|
+#define DDR_BASE_ADDR ddr_base_addr
|
|
+#define DDR_SIZE ddr_size
|
|
+
|
|
+#define DDR_PHYS_END (DDR_PHYS_BASE_ADDR + DDR_SIZE)
|
|
+
|
|
+#define LS1012A_PFE_RESET_WA /*
|
|
+ * PFE doesn't have global reset and re-init
|
|
+ * should takecare few things to make PFE
|
|
+ * functional after reset
|
|
+ */
|
|
+#define PFE_CBUS_PHYS_BASE_ADDR 0xc0000000 /* CBUS physical base address
|
|
+ * as seen by PE's.
|
|
+ */
|
|
+/* CBUS physical base address as seen by PE's. */
|
|
+#define PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE 0xc0000000
|
|
+
|
|
+#define DDR_PHYS_TO_PFE(p) (((unsigned long int)(p)) & 0x7FFFFFFF)
|
|
+#define DDR_PFE_TO_PHYS(p) (((unsigned long int)(p)) | 0x80000000)
|
|
+#define CBUS_PHYS_TO_PFE(p) (((p) - PFE_CBUS_PHYS_BASE_ADDR) + \
|
|
+ PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE)
|
|
+/* Translates to PFE address map */
|
|
+
|
|
+#define DDR_PHYS_TO_VIRT(p) (((p) - DDR_PHYS_BASE_ADDR) + DDR_BASE_ADDR)
|
|
+#define DDR_VIRT_TO_PHYS(v) (((v) - DDR_BASE_ADDR) + DDR_PHYS_BASE_ADDR)
|
|
+#define DDR_VIRT_TO_PFE(p) (DDR_PHYS_TO_PFE(DDR_VIRT_TO_PHYS(p)))
|
|
+
|
|
+#define CBUS_VIRT_TO_PFE(v) (((v) - CBUS_BASE_ADDR) + \
|
|
+ PFE_CBUS_PHYS_BASE_ADDR)
|
|
+#define CBUS_PFE_TO_VIRT(p) (((unsigned long int)(p) - \
|
|
+ PFE_CBUS_PHYS_BASE_ADDR) + CBUS_BASE_ADDR)
|
|
+
|
|
+/* The below part of the code is used in QOS control driver from host */
|
|
+#define TMU_APB_BASE_ADDR 0xc1000000 /* TMU base address seen by
|
|
+ * pe's
|
|
+ */
|
|
+
|
|
+enum {
|
|
+ CLASS0_ID = 0,
|
|
+ CLASS1_ID,
|
|
+ CLASS2_ID,
|
|
+ CLASS3_ID,
|
|
+ CLASS4_ID,
|
|
+ CLASS5_ID,
|
|
+ TMU0_ID,
|
|
+ TMU1_ID,
|
|
+ TMU2_ID,
|
|
+ TMU3_ID,
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ UTIL_ID,
|
|
+#endif
|
|
+ MAX_PE
|
|
+};
|
|
+
|
|
+#define CLASS_MASK (BIT(CLASS0_ID) | BIT(CLASS1_ID) |\
|
|
+ BIT(CLASS2_ID) | BIT(CLASS3_ID) |\
|
|
+ BIT(CLASS4_ID) | BIT(CLASS5_ID))
|
|
+#define CLASS_MAX_ID CLASS5_ID
|
|
+
|
|
+#define TMU_MASK (BIT(TMU0_ID) | BIT(TMU1_ID) |\
|
|
+ BIT(TMU3_ID))
|
|
+
|
|
+#define TMU_MAX_ID TMU3_ID
|
|
+
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+#define UTIL_MASK BIT(UTIL_ID)
|
|
+#endif
|
|
+
|
|
+struct pe_status {
|
|
+ u32 cpu_state;
|
|
+ u32 activity_counter;
|
|
+ u32 rx;
|
|
+ union {
|
|
+ u32 tx;
|
|
+ u32 tmu_qstatus;
|
|
+ };
|
|
+ u32 drop;
|
|
+#if defined(CFG_PE_DEBUG)
|
|
+ u32 debug_indicator;
|
|
+ u32 debug[16];
|
|
+#endif
|
|
+} __aligned(16);
|
|
+
|
|
+struct pe_sync_mailbox {
|
|
+ u32 stop;
|
|
+ u32 stopped;
|
|
+};
|
|
+
|
|
+/* Drop counter definitions */
|
|
+
|
|
+#define CLASS_NUM_DROP_COUNTERS 13
|
|
+#define UTIL_NUM_DROP_COUNTERS 8
|
|
+
|
|
+/* PE information.
|
|
+ * Structure containing PE's specific information. It is used to create
|
|
+ * generic C functions common to all PE's.
|
|
+ * Before using the library functions this structure needs to be initialized
|
|
+ * with the different registers virtual addresses
|
|
+ * (according to the ARM MMU mmaping). The default initialization supports a
|
|
+ * virtual == physical mapping.
|
|
+ */
|
|
+struct pe_info {
|
|
+ u32 dmem_base_addr; /* PE's dmem base address */
|
|
+ u32 pmem_base_addr; /* PE's pmem base address */
|
|
+ u32 pmem_size; /* PE's pmem size */
|
|
+
|
|
+ void *mem_access_wdata; /* PE's _MEM_ACCESS_WDATA register
|
|
+ * address
|
|
+ */
|
|
+ void *mem_access_addr; /* PE's _MEM_ACCESS_ADDR register
|
|
+ * address
|
|
+ */
|
|
+ void *mem_access_rdata; /* PE's _MEM_ACCESS_RDATA register
|
|
+ * address
|
|
+ */
|
|
+};
|
|
+
|
|
+void pe_lmem_read(u32 *dst, u32 len, u32 offset);
|
|
+void pe_lmem_write(u32 *src, u32 len, u32 offset);
|
|
+
|
|
+void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
|
|
+void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
|
|
+
|
|
+u32 pe_pmem_read(int id, u32 addr, u8 size);
|
|
+
|
|
+void pe_dmem_write(int id, u32 val, u32 addr, u8 size);
|
|
+u32 pe_dmem_read(int id, u32 addr, u8 size);
|
|
+void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len);
|
|
+void class_pe_lmem_memset(u32 dst, int val, unsigned int len);
|
|
+void class_bus_write(u32 val, u32 addr, u8 size);
|
|
+u32 class_bus_read(u32 addr, u8 size);
|
|
+
|
|
+#define class_bus_readl(addr) class_bus_read(addr, 4)
|
|
+#define class_bus_readw(addr) class_bus_read(addr, 2)
|
|
+#define class_bus_readb(addr) class_bus_read(addr, 1)
|
|
+
|
|
+#define class_bus_writel(val, addr) class_bus_write(val, addr, 4)
|
|
+#define class_bus_writew(val, addr) class_bus_write(val, addr, 2)
|
|
+#define class_bus_writeb(val, addr) class_bus_write(val, addr, 1)
|
|
+
|
|
+#define pe_dmem_readl(id, addr) pe_dmem_read(id, addr, 4)
|
|
+#define pe_dmem_readw(id, addr) pe_dmem_read(id, addr, 2)
|
|
+#define pe_dmem_readb(id, addr) pe_dmem_read(id, addr, 1)
|
|
+
|
|
+#define pe_dmem_writel(id, val, addr) pe_dmem_write(id, val, addr, 4)
|
|
+#define pe_dmem_writew(id, val, addr) pe_dmem_write(id, val, addr, 2)
|
|
+#define pe_dmem_writeb(id, val, addr) pe_dmem_write(id, val, addr, 1)
|
|
+
|
|
+/*int pe_load_elf_section(int id, const void *data, elf32_shdr *shdr); */
|
|
+int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
|
|
+ struct device *dev);
|
|
+
|
|
+void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
|
|
+ unsigned int ddr_size);
|
|
+void bmu_init(void *base, struct BMU_CFG *cfg);
|
|
+void bmu_reset(void *base);
|
|
+void bmu_enable(void *base);
|
|
+void bmu_disable(void *base);
|
|
+void bmu_set_config(void *base, struct BMU_CFG *cfg);
|
|
+
|
|
+/*
|
|
+ * An enumerated type for loopback values. This can be one of three values, no
|
|
+ * loopback -normal operation, local loopback with internal loopback module of
|
|
+ * MAC or PHY loopback which is through the external PHY.
|
|
+ */
|
|
+#ifndef __MAC_LOOP_ENUM__
|
|
+#define __MAC_LOOP_ENUM__
|
|
+enum mac_loop {LB_NONE, LB_EXT, LB_LOCAL};
|
|
+#endif
|
|
+
|
|
+void gemac_init(void *base, void *config);
|
|
+void gemac_disable_rx_checksum_offload(void *base);
|
|
+void gemac_enable_rx_checksum_offload(void *base);
|
|
+void gemac_set_speed(void *base, enum mac_speed gem_speed);
|
|
+void gemac_set_duplex(void *base, int duplex);
|
|
+void gemac_set_mode(void *base, int mode);
|
|
+void gemac_enable(void *base);
|
|
+void gemac_tx_disable(void *base);
|
|
+void gemac_tx_enable(void *base);
|
|
+void gemac_disable(void *base);
|
|
+void gemac_reset(void *base);
|
|
+void gemac_set_address(void *base, struct spec_addr *addr);
|
|
+struct spec_addr gemac_get_address(void *base);
|
|
+void gemac_set_loop(void *base, enum mac_loop gem_loop);
|
|
+void gemac_set_laddr1(void *base, struct pfe_mac_addr *address);
|
|
+void gemac_set_laddr2(void *base, struct pfe_mac_addr *address);
|
|
+void gemac_set_laddr3(void *base, struct pfe_mac_addr *address);
|
|
+void gemac_set_laddr4(void *base, struct pfe_mac_addr *address);
|
|
+void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
|
|
+ unsigned int entry_index);
|
|
+void gemac_clear_laddr1(void *base);
|
|
+void gemac_clear_laddr2(void *base);
|
|
+void gemac_clear_laddr3(void *base);
|
|
+void gemac_clear_laddr4(void *base);
|
|
+void gemac_clear_laddrN(void *base, unsigned int entry_index);
|
|
+struct pfe_mac_addr gemac_get_hash(void *base);
|
|
+void gemac_set_hash(void *base, struct pfe_mac_addr *hash);
|
|
+struct pfe_mac_addr gem_get_laddr1(void *base);
|
|
+struct pfe_mac_addr gem_get_laddr2(void *base);
|
|
+struct pfe_mac_addr gem_get_laddr3(void *base);
|
|
+struct pfe_mac_addr gem_get_laddr4(void *base);
|
|
+struct pfe_mac_addr gem_get_laddrN(void *base, unsigned int entry_index);
|
|
+void gemac_set_config(void *base, struct gemac_cfg *cfg);
|
|
+void gemac_allow_broadcast(void *base);
|
|
+void gemac_no_broadcast(void *base);
|
|
+void gemac_enable_1536_rx(void *base);
|
|
+void gemac_disable_1536_rx(void *base);
|
|
+void gemac_set_rx_max_fl(void *base, int mtu);
|
|
+void gemac_enable_rx_jmb(void *base);
|
|
+void gemac_disable_rx_jmb(void *base);
|
|
+void gemac_enable_stacked_vlan(void *base);
|
|
+void gemac_disable_stacked_vlan(void *base);
|
|
+void gemac_enable_pause_rx(void *base);
|
|
+void gemac_disable_pause_rx(void *base);
|
|
+void gemac_enable_copy_all(void *base);
|
|
+void gemac_disable_copy_all(void *base);
|
|
+void gemac_set_bus_width(void *base, int width);
|
|
+void gemac_set_wol(void *base, u32 wol_conf);
|
|
+
|
|
+void gpi_init(void *base, struct gpi_cfg *cfg);
|
|
+void gpi_reset(void *base);
|
|
+void gpi_enable(void *base);
|
|
+void gpi_disable(void *base);
|
|
+void gpi_set_config(void *base, struct gpi_cfg *cfg);
|
|
+
|
|
+void class_init(struct class_cfg *cfg);
|
|
+void class_reset(void);
|
|
+void class_enable(void);
|
|
+void class_disable(void);
|
|
+void class_set_config(struct class_cfg *cfg);
|
|
+
|
|
+void tmu_reset(void);
|
|
+void tmu_init(struct tmu_cfg *cfg);
|
|
+void tmu_enable(u32 pe_mask);
|
|
+void tmu_disable(u32 pe_mask);
|
|
+u32 tmu_qstatus(u32 if_id);
|
|
+u32 tmu_pkts_processed(u32 if_id);
|
|
+
|
|
+void util_init(struct util_cfg *cfg);
|
|
+void util_reset(void);
|
|
+void util_enable(void);
|
|
+void util_disable(void);
|
|
+
|
|
+void hif_init(void);
|
|
+void hif_tx_enable(void);
|
|
+void hif_tx_disable(void);
|
|
+void hif_rx_enable(void);
|
|
+void hif_rx_disable(void);
|
|
+
|
|
+/* Get Chip Revision level
|
|
+ *
|
|
+ */
|
|
+static inline unsigned int CHIP_REVISION(void)
|
|
+{
|
|
+ /*For LS1012A return always 1 */
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/* Start HIF rx DMA
|
|
+ *
|
|
+ */
|
|
+static inline void hif_rx_dma_start(void)
|
|
+{
|
|
+ writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_RX_CTRL);
|
|
+}
|
|
+
|
|
+/* Start HIF tx DMA
|
|
+ *
|
|
+ */
|
|
+static inline void hif_tx_dma_start(void)
|
|
+{
|
|
+ writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_TX_CTRL);
|
|
+}
|
|
+
|
|
+#endif /* _PFE_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_cdev.c
|
|
@@ -0,0 +1,258 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Copyright 2018 NXP
|
|
+ */
|
|
+
|
|
+/* @pfe_cdev.c.
|
|
+ * Dummy device representing the PFE US in userspace.
|
|
+ * - used for interacting with the kernel layer for link status
|
|
+ */
|
|
+
|
|
+#include <linux/eventfd.h>
|
|
+#include <linux/irqreturn.h>
|
|
+#include <linux/io.h>
|
|
+#include <asm/irq.h>
|
|
+
|
|
+#include "pfe_cdev.h"
|
|
+#include "pfe_mod.h"
|
|
+
|
|
+static int pfe_majno;
|
|
+static struct class *pfe_char_class;
|
|
+static struct device *pfe_char_dev;
|
|
+struct eventfd_ctx *g_trigger;
|
|
+
|
|
+struct pfe_shared_info link_states[PFE_CDEV_ETH_COUNT];
|
|
+
|
|
+static int pfe_cdev_open(struct inode *inp, struct file *fp)
|
|
+{
|
|
+ pr_debug("PFE CDEV device opened.\n");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static ssize_t pfe_cdev_read(struct file *fp, char *buf,
|
|
+ size_t len, loff_t *off)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ pr_info("PFE CDEV attempt copying (%lu) size of user.\n",
|
|
+ sizeof(link_states));
|
|
+
|
|
+ pr_debug("Dump link_state on screen before copy_to_user\n");
|
|
+ for (; ret < PFE_CDEV_ETH_COUNT; ret++) {
|
|
+ pr_debug("%u %u", link_states[ret].phy_id,
|
|
+ link_states[ret].state);
|
|
+ pr_debug("\n");
|
|
+ }
|
|
+
|
|
+ /* Copy to user the value in buffer sized len */
|
|
+ ret = copy_to_user(buf, &link_states, sizeof(link_states));
|
|
+ if (ret != 0) {
|
|
+ pr_err("Failed to send (%d)bytes of (%lu) requested.\n",
|
|
+ ret, len);
|
|
+ return -EFAULT;
|
|
+ }
|
|
+
|
|
+ /* offset set back to 0 as there is contextual reading offset */
|
|
+ *off = 0;
|
|
+ pr_debug("Read of (%lu) bytes performed.\n", sizeof(link_states));
|
|
+
|
|
+ return sizeof(link_states);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * This function is for getting some commands from user through non-IOCTL
|
|
+ * channel. It can used to configure the device.
|
|
+ * TODO: To be filled in future, if require duplex communication with user
|
|
+ * space.
|
|
+ */
|
|
+static ssize_t pfe_cdev_write(struct file *fp, const char *buf,
|
|
+ size_t len, loff_t *off)
|
|
+{
|
|
+ pr_info("PFE CDEV Write operation not supported!\n");
|
|
+
|
|
+ return -EFAULT;
|
|
+}
|
|
+
|
|
+static int pfe_cdev_release(struct inode *inp, struct file *fp)
|
|
+{
|
|
+ if (g_trigger) {
|
|
+ free_irq(pfe->hif_irq, g_trigger);
|
|
+ eventfd_ctx_put(g_trigger);
|
|
+ g_trigger = NULL;
|
|
+ }
|
|
+
|
|
+ pr_info("PFE_CDEV: Device successfully closed\n");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * hif_us_isr-
|
|
+ * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
|
|
+ */
|
|
+static irqreturn_t hif_us_isr(int irq, void *arg)
|
|
+{
|
|
+ struct eventfd_ctx *trigger = (struct eventfd_ctx *)arg;
|
|
+ int int_status;
|
|
+ int int_enable_mask;
|
|
+
|
|
+ /*Read hif interrupt source register */
|
|
+ int_status = readl_relaxed(HIF_INT_SRC);
|
|
+ int_enable_mask = readl_relaxed(HIF_INT_ENABLE);
|
|
+
|
|
+ if ((int_status & HIF_INT) == 0)
|
|
+ return IRQ_NONE;
|
|
+
|
|
+ if (int_status & HIF_RXPKT_INT) {
|
|
+ int_enable_mask &= ~(HIF_RXPKT_INT);
|
|
+ /* Disable interrupts, they will be enabled after
|
|
+ * they are serviced
|
|
+ */
|
|
+ writel_relaxed(int_enable_mask, HIF_INT_ENABLE);
|
|
+
|
|
+ eventfd_signal(trigger, 1);
|
|
+ }
|
|
+
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+#define PFE_INTR_COAL_USECS 100
|
|
+static long pfe_cdev_ioctl(struct file *fp, unsigned int cmd,
|
|
+ unsigned long arg)
|
|
+{
|
|
+ int ret = -EFAULT;
|
|
+ int __user *argp = (int __user *)arg;
|
|
+
|
|
+ pr_debug("PFE CDEV IOCTL Called with cmd=(%u)\n", cmd);
|
|
+
|
|
+ switch (cmd) {
|
|
+ case PFE_CDEV_ETH0_STATE_GET:
|
|
+ /* Return an unsigned int (link state) for ETH0 */
|
|
+ *argp = link_states[0].state;
|
|
+ pr_debug("Returning state=%d for ETH0\n", *argp);
|
|
+ ret = 0;
|
|
+ break;
|
|
+ case PFE_CDEV_ETH1_STATE_GET:
|
|
+ /* Return an unsigned int (link state) for ETH0 */
|
|
+ *argp = link_states[1].state;
|
|
+ pr_debug("Returning state=%d for ETH1\n", *argp);
|
|
+ ret = 0;
|
|
+ break;
|
|
+ case PFE_CDEV_HIF_INTR_EN:
|
|
+ /* Return success/failure */
|
|
+ g_trigger = eventfd_ctx_fdget(*argp);
|
|
+ if (IS_ERR(g_trigger))
|
|
+ return PTR_ERR(g_trigger);
|
|
+ ret = request_irq(pfe->hif_irq, hif_us_isr, 0, "pfe_hif",
|
|
+ g_trigger);
|
|
+ if (ret) {
|
|
+ pr_err("%s: failed to get the hif IRQ = %d\n",
|
|
+ __func__, pfe->hif_irq);
|
|
+ eventfd_ctx_put(g_trigger);
|
|
+ g_trigger = NULL;
|
|
+ }
|
|
+ writel((PFE_INTR_COAL_USECS * (pfe->ctrl.sys_clk / 1000)) |
|
|
+ HIF_INT_COAL_ENABLE, HIF_INT_COAL);
|
|
+
|
|
+ pr_debug("request_irq for hif interrupt: %d\n", pfe->hif_irq);
|
|
+ ret = 0;
|
|
+ break;
|
|
+ default:
|
|
+ pr_info("Unsupport cmd (%d) for PFE CDEV.\n", cmd);
|
|
+ break;
|
|
+ };
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static unsigned int pfe_cdev_poll(struct file *fp,
|
|
+ struct poll_table_struct *wait)
|
|
+{
|
|
+ pr_info("PFE CDEV poll method not supported\n");
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct file_operations pfe_cdev_fops = {
|
|
+ .open = pfe_cdev_open,
|
|
+ .read = pfe_cdev_read,
|
|
+ .write = pfe_cdev_write,
|
|
+ .release = pfe_cdev_release,
|
|
+ .unlocked_ioctl = pfe_cdev_ioctl,
|
|
+ .poll = pfe_cdev_poll,
|
|
+};
|
|
+
|
|
+int pfe_cdev_init(void)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ pr_debug("PFE CDEV initialization begin\n");
|
|
+
|
|
+ /* Register the major number for the device */
|
|
+ pfe_majno = register_chrdev(0, PFE_CDEV_NAME, &pfe_cdev_fops);
|
|
+ if (pfe_majno < 0) {
|
|
+ pr_err("Unable to register PFE CDEV. PFE CDEV not available\n");
|
|
+ ret = pfe_majno;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ pr_debug("PFE CDEV assigned major number: %d\n", pfe_majno);
|
|
+
|
|
+ /* Register the class for the device */
|
|
+ pfe_char_class = class_create(THIS_MODULE, PFE_CLASS_NAME);
|
|
+ if (IS_ERR(pfe_char_class)) {
|
|
+ pr_err(
|
|
+ "Failed to init class for PFE CDEV. PFE CDEV not available.\n");
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ pr_debug("PFE CDEV Class created successfully.\n");
|
|
+
|
|
+ /* Create the device without any parent and without any callback data */
|
|
+ pfe_char_dev = device_create(pfe_char_class, NULL,
|
|
+ MKDEV(pfe_majno, 0), NULL,
|
|
+ PFE_CDEV_NAME);
|
|
+ if (IS_ERR(pfe_char_dev)) {
|
|
+ pr_err("Unable to PFE CDEV device. PFE CDEV not available.\n");
|
|
+ ret = PTR_ERR(pfe_char_dev);
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ /* Information structure being shared with the userspace */
|
|
+ memset(link_states, 0, sizeof(struct pfe_shared_info) *
|
|
+ PFE_CDEV_ETH_COUNT);
|
|
+
|
|
+ pr_info("PFE CDEV created: %s\n", PFE_CDEV_NAME);
|
|
+
|
|
+ ret = 0;
|
|
+ return ret;
|
|
+
|
|
+cleanup:
|
|
+ if (!IS_ERR(pfe_char_class))
|
|
+ class_destroy(pfe_char_class);
|
|
+
|
|
+ if (pfe_majno > 0)
|
|
+ unregister_chrdev(pfe_majno, PFE_CDEV_NAME);
|
|
+
|
|
+ ret = -EFAULT;
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void pfe_cdev_exit(void)
|
|
+{
|
|
+ if (!IS_ERR(pfe_char_dev))
|
|
+ device_destroy(pfe_char_class, MKDEV(pfe_majno, 0));
|
|
+
|
|
+ if (!IS_ERR(pfe_char_class)) {
|
|
+ class_unregister(pfe_char_class);
|
|
+ class_destroy(pfe_char_class);
|
|
+ }
|
|
+
|
|
+ if (pfe_majno > 0)
|
|
+ unregister_chrdev(pfe_majno, PFE_CDEV_NAME);
|
|
+
|
|
+ /* reset the variables */
|
|
+ pfe_majno = 0;
|
|
+ pfe_char_class = NULL;
|
|
+ pfe_char_dev = NULL;
|
|
+
|
|
+ pr_info("PFE CDEV Removed.\n");
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_cdev.h
|
|
@@ -0,0 +1,41 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Copyright 2018 NXP
|
|
+ */
|
|
+
|
|
+#ifndef _PFE_CDEV_H_
|
|
+#define _PFE_CDEV_H_
|
|
+
|
|
+#include <linux/init.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/err.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/fs.h>
|
|
+#include <linux/uaccess.h>
|
|
+#include <linux/poll.h>
|
|
+
|
|
+#define PFE_CDEV_NAME "pfe_us_cdev"
|
|
+#define PFE_CLASS_NAME "ppfe_us"
|
|
+
|
|
+/* Extracted from ls1012a_pfe_platform_data, there are 3 interfaces which are
|
|
+ * supported by PFE driver. Should be updated if number of eth devices are
|
|
+ * changed.
|
|
+ */
|
|
+#define PFE_CDEV_ETH_COUNT 3
|
|
+
|
|
+struct pfe_shared_info {
|
|
+ uint32_t phy_id; /* Link phy ID */
|
|
+ uint8_t state; /* Has either 0 or 1 */
|
|
+};
|
|
+
|
|
+extern struct pfe_shared_info link_states[PFE_CDEV_ETH_COUNT];
|
|
+
|
|
+/* IOCTL Commands */
|
|
+#define PFE_CDEV_ETH0_STATE_GET _IOR('R', 0, int)
|
|
+#define PFE_CDEV_ETH1_STATE_GET _IOR('R', 1, int)
|
|
+#define PFE_CDEV_HIF_INTR_EN _IOWR('R', 2, int)
|
|
+
|
|
+int pfe_cdev_init(void);
|
|
+void pfe_cdev_exit(void);
|
|
+
|
|
+#endif /* _PFE_CDEV_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_ctrl.c
|
|
@@ -0,0 +1,226 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/kthread.h>
|
|
+
|
|
+#include "pfe_mod.h"
|
|
+#include "pfe_ctrl.h"
|
|
+
|
|
+#define TIMEOUT_MS 1000
|
|
+
|
|
+int relax(unsigned long end)
|
|
+{
|
|
+ if (time_after(jiffies, end)) {
|
|
+ if (time_after(jiffies, end + (TIMEOUT_MS * HZ) / 1000))
|
|
+ return -1;
|
|
+
|
|
+ if (need_resched())
|
|
+ schedule();
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void pfe_ctrl_suspend(struct pfe_ctrl *ctrl)
|
|
+{
|
|
+ int id;
|
|
+
|
|
+ mutex_lock(&ctrl->mutex);
|
|
+
|
|
+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
|
|
+ pe_dmem_write(id, cpu_to_be32(0x1), CLASS_DM_RESUME, 4);
|
|
+
|
|
+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
|
|
+ if (id == TMU2_ID)
|
|
+ continue;
|
|
+ pe_dmem_write(id, cpu_to_be32(0x1), TMU_DM_RESUME, 4);
|
|
+ }
|
|
+
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ pe_dmem_write(UTIL_ID, cpu_to_be32(0x1), UTIL_DM_RESUME, 4);
|
|
+#endif
|
|
+ mutex_unlock(&ctrl->mutex);
|
|
+}
|
|
+
|
|
+void pfe_ctrl_resume(struct pfe_ctrl *ctrl)
|
|
+{
|
|
+ int pe_mask = CLASS_MASK | TMU_MASK;
|
|
+
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ pe_mask |= UTIL_MASK;
|
|
+#endif
|
|
+ mutex_lock(&ctrl->mutex);
|
|
+ pe_start(&pfe->ctrl, pe_mask);
|
|
+ mutex_unlock(&ctrl->mutex);
|
|
+}
|
|
+
|
|
+/* PE sync stop.
|
|
+ * Stops packet processing for a list of PE's (specified using a bitmask).
|
|
+ * The caller must hold ctrl->mutex.
|
|
+ *
|
|
+ * @param ctrl Control context
|
|
+ * @param pe_mask Mask of PE id's to stop
|
|
+ *
|
|
+ */
|
|
+int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask)
|
|
+{
|
|
+ struct pe_sync_mailbox *mbox;
|
|
+ int pe_stopped = 0;
|
|
+ unsigned long end = jiffies + 2;
|
|
+ int i;
|
|
+
|
|
+ pe_mask &= 0x2FF; /*Exclude Util + TMU2 */
|
|
+
|
|
+ for (i = 0; i < MAX_PE; i++)
|
|
+ if (pe_mask & (1 << i)) {
|
|
+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
|
|
+
|
|
+ pe_dmem_write(i, cpu_to_be32(0x1), (unsigned
|
|
+ long)&mbox->stop, 4);
|
|
+ }
|
|
+
|
|
+ while (pe_stopped != pe_mask) {
|
|
+ for (i = 0; i < MAX_PE; i++)
|
|
+ if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
|
|
+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
|
|
+
|
|
+ if (pe_dmem_read(i, (unsigned
|
|
+ long)&mbox->stopped, 4) &
|
|
+ cpu_to_be32(0x1))
|
|
+ pe_stopped |= (1 << i);
|
|
+ }
|
|
+
|
|
+ if (relax(end) < 0)
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err:
|
|
+ pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
|
|
+
|
|
+ for (i = 0; i < MAX_PE; i++)
|
|
+ if (pe_mask & (1 << i)) {
|
|
+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
|
|
+
|
|
+ pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
|
|
+ long)&mbox->stop, 4);
|
|
+ }
|
|
+
|
|
+ return -EIO;
|
|
+}
|
|
+
|
|
+/* PE start.
|
|
+ * Starts packet processing for a list of PE's (specified using a bitmask).
|
|
+ * The caller must hold ctrl->mutex.
|
|
+ *
|
|
+ * @param ctrl Control context
|
|
+ * @param pe_mask Mask of PE id's to start
|
|
+ *
|
|
+ */
|
|
+void pe_start(struct pfe_ctrl *ctrl, int pe_mask)
|
|
+{
|
|
+ struct pe_sync_mailbox *mbox;
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < MAX_PE; i++)
|
|
+ if (pe_mask & (1 << i)) {
|
|
+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
|
|
+
|
|
+ pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
|
|
+ long)&mbox->stop, 4);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* This function will ensure all PEs are put in to idle state */
|
|
+int pe_reset_all(struct pfe_ctrl *ctrl)
|
|
+{
|
|
+ struct pe_sync_mailbox *mbox;
|
|
+ int pe_stopped = 0;
|
|
+ unsigned long end = jiffies + 2;
|
|
+ int i;
|
|
+ int pe_mask = CLASS_MASK | TMU_MASK;
|
|
+
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ pe_mask |= UTIL_MASK;
|
|
+#endif
|
|
+
|
|
+ for (i = 0; i < MAX_PE; i++)
|
|
+ if (pe_mask & (1 << i)) {
|
|
+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
|
|
+
|
|
+ pe_dmem_write(i, cpu_to_be32(0x2), (unsigned
|
|
+ long)&mbox->stop, 4);
|
|
+ }
|
|
+
|
|
+ while (pe_stopped != pe_mask) {
|
|
+ for (i = 0; i < MAX_PE; i++)
|
|
+ if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
|
|
+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
|
|
+
|
|
+ if (pe_dmem_read(i, (unsigned long)
|
|
+ &mbox->stopped, 4) &
|
|
+ cpu_to_be32(0x1))
|
|
+ pe_stopped |= (1 << i);
|
|
+ }
|
|
+
|
|
+ if (relax(end) < 0)
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err:
|
|
+ pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
|
|
+ return -EIO;
|
|
+}
|
|
+
|
|
+int pfe_ctrl_init(struct pfe *pfe)
|
|
+{
|
|
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
|
|
+ int id;
|
|
+
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ mutex_init(&ctrl->mutex);
|
|
+ spin_lock_init(&ctrl->lock);
|
|
+
|
|
+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
|
|
+ ctrl->sync_mailbox_baseaddr[id] = CLASS_DM_SYNC_MBOX;
|
|
+ ctrl->msg_mailbox_baseaddr[id] = CLASS_DM_MSG_MBOX;
|
|
+ }
|
|
+
|
|
+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
|
|
+ if (id == TMU2_ID)
|
|
+ continue;
|
|
+ ctrl->sync_mailbox_baseaddr[id] = TMU_DM_SYNC_MBOX;
|
|
+ ctrl->msg_mailbox_baseaddr[id] = TMU_DM_MSG_MBOX;
|
|
+ }
|
|
+
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ ctrl->sync_mailbox_baseaddr[UTIL_ID] = UTIL_DM_SYNC_MBOX;
|
|
+ ctrl->msg_mailbox_baseaddr[UTIL_ID] = UTIL_DM_MSG_MBOX;
|
|
+#endif
|
|
+
|
|
+ ctrl->hash_array_baseaddr = pfe->ddr_baseaddr + ROUTE_TABLE_BASEADDR;
|
|
+ ctrl->hash_array_phys_baseaddr = pfe->ddr_phys_baseaddr +
|
|
+ ROUTE_TABLE_BASEADDR;
|
|
+
|
|
+ ctrl->dev = pfe->dev;
|
|
+
|
|
+ pr_info("%s finished\n", __func__);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void pfe_ctrl_exit(struct pfe *pfe)
|
|
+{
|
|
+ pr_info("%s\n", __func__);
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_ctrl.h
|
|
@@ -0,0 +1,100 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#ifndef _PFE_CTRL_H_
|
|
+#define _PFE_CTRL_H_
|
|
+
|
|
+#include <linux/dmapool.h>
|
|
+
|
|
+#include "pfe_mod.h"
|
|
+#include "pfe/pfe.h"
|
|
+
|
|
+#define DMA_BUF_SIZE_128 0x80 /* enough for 1 conntracks */
|
|
+#define DMA_BUF_SIZE_256 0x100
|
|
+/* enough for 2 conntracks, 1 bridge entry or 1 multicast entry */
|
|
+#define DMA_BUF_SIZE_512 0x200
|
|
+/* 512bytes dma allocated buffers used by rtp relay feature */
|
|
+#define DMA_BUF_MIN_ALIGNMENT 8
|
|
+#define DMA_BUF_BOUNDARY (4 * 1024)
|
|
+/* bursts can not cross 4k boundary */
|
|
+
|
|
+#define CMD_TX_ENABLE 0x0501
|
|
+#define CMD_TX_DISABLE 0x0502
|
|
+
|
|
+#define CMD_RX_LRO 0x0011
|
|
+#define CMD_PKTCAP_ENABLE 0x0d01
|
|
+#define CMD_QM_EXPT_RATE 0x020c
|
|
+
|
|
+#define CLASS_DM_SH_STATIC (0x800)
|
|
+#define CLASS_DM_CPU_TICKS (CLASS_DM_SH_STATIC)
|
|
+#define CLASS_DM_SYNC_MBOX (0x808)
|
|
+#define CLASS_DM_MSG_MBOX (0x810)
|
|
+#define CLASS_DM_DROP_CNTR (0x820)
|
|
+#define CLASS_DM_RESUME (0x854)
|
|
+#define CLASS_DM_PESTATUS (0x860)
|
|
+
|
|
+#define TMU_DM_SH_STATIC (0x80)
|
|
+#define TMU_DM_CPU_TICKS (TMU_DM_SH_STATIC)
|
|
+#define TMU_DM_SYNC_MBOX (0x88)
|
|
+#define TMU_DM_MSG_MBOX (0x90)
|
|
+#define TMU_DM_RESUME (0xA0)
|
|
+#define TMU_DM_PESTATUS (0xB0)
|
|
+#define TMU_DM_CONTEXT (0x300)
|
|
+#define TMU_DM_TX_TRANS (0x480)
|
|
+
|
|
+#define UTIL_DM_SH_STATIC (0x0)
|
|
+#define UTIL_DM_CPU_TICKS (UTIL_DM_SH_STATIC)
|
|
+#define UTIL_DM_SYNC_MBOX (0x8)
|
|
+#define UTIL_DM_MSG_MBOX (0x10)
|
|
+#define UTIL_DM_DROP_CNTR (0x20)
|
|
+#define UTIL_DM_RESUME (0x40)
|
|
+#define UTIL_DM_PESTATUS (0x50)
|
|
+
|
|
+struct pfe_ctrl {
|
|
+ struct mutex mutex; /* to serialize pfe control access */
|
|
+ spinlock_t lock;
|
|
+
|
|
+ void *dma_pool;
|
|
+ void *dma_pool_512;
|
|
+ void *dma_pool_128;
|
|
+
|
|
+ struct device *dev;
|
|
+
|
|
+ void *hash_array_baseaddr; /*
|
|
+ * Virtual base address of
|
|
+ * the conntrack hash array
|
|
+ */
|
|
+ unsigned long hash_array_phys_baseaddr; /*
|
|
+ * Physical base address of
|
|
+ * the conntrack hash array
|
|
+ */
|
|
+
|
|
+ int (*event_cb)(u16, u16, u16*);
|
|
+
|
|
+ unsigned long sync_mailbox_baseaddr[MAX_PE]; /*
|
|
+ * Sync mailbox PFE
|
|
+ * internal address,
|
|
+ * initialized
|
|
+ * when parsing elf images
|
|
+ */
|
|
+ unsigned long msg_mailbox_baseaddr[MAX_PE]; /*
|
|
+ * Msg mailbox PFE internal
|
|
+ * address, initialized
|
|
+ * when parsing elf images
|
|
+ */
|
|
+ unsigned int sys_clk; /* AXI clock value, in KHz */
|
|
+};
|
|
+
|
|
+int pfe_ctrl_init(struct pfe *pfe);
|
|
+void pfe_ctrl_exit(struct pfe *pfe);
|
|
+int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask);
|
|
+void pe_start(struct pfe_ctrl *ctrl, int pe_mask);
|
|
+int pe_reset_all(struct pfe_ctrl *ctrl);
|
|
+void pfe_ctrl_suspend(struct pfe_ctrl *ctrl);
|
|
+void pfe_ctrl_resume(struct pfe_ctrl *ctrl);
|
|
+int relax(unsigned long end);
|
|
+
|
|
+#endif /* _PFE_CTRL_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_debugfs.c
|
|
@@ -0,0 +1,99 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/debugfs.h>
|
|
+#include <linux/platform_device.h>
|
|
+
|
|
+#include "pfe_mod.h"
|
|
+
|
|
+static int dmem_show(struct seq_file *s, void *unused)
|
|
+{
|
|
+ u32 dmem_addr, val;
|
|
+ int id = (long int)s->private;
|
|
+ int i;
|
|
+
|
|
+ for (dmem_addr = 0; dmem_addr < CLASS_DMEM_SIZE; dmem_addr += 8 * 4) {
|
|
+ seq_printf(s, "%04x:", dmem_addr);
|
|
+
|
|
+ for (i = 0; i < 8; i++) {
|
|
+ val = pe_dmem_read(id, dmem_addr + i * 4, 4);
|
|
+ seq_printf(s, " %02x %02x %02x %02x", val & 0xff,
|
|
+ (val >> 8) & 0xff, (val >> 16) & 0xff,
|
|
+ (val >> 24) & 0xff);
|
|
+ }
|
|
+
|
|
+ seq_puts(s, "\n");
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int dmem_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, dmem_show, inode->i_private);
|
|
+}
|
|
+
|
|
+static const struct file_operations dmem_fops = {
|
|
+ .open = dmem_open,
|
|
+ .read = seq_read,
|
|
+ .llseek = seq_lseek,
|
|
+ .release = single_release,
|
|
+};
|
|
+
|
|
+int pfe_debugfs_init(struct pfe *pfe)
|
|
+{
|
|
+ struct dentry *d;
|
|
+
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ pfe->dentry = debugfs_create_dir("pfe", NULL);
|
|
+ if (IS_ERR_OR_NULL(pfe->dentry))
|
|
+ goto err_dir;
|
|
+
|
|
+ d = debugfs_create_file("pe0_dmem", 0444, pfe->dentry, (void *)0,
|
|
+ &dmem_fops);
|
|
+ if (IS_ERR_OR_NULL(d))
|
|
+ goto err_pe;
|
|
+
|
|
+ d = debugfs_create_file("pe1_dmem", 0444, pfe->dentry, (void *)1,
|
|
+ &dmem_fops);
|
|
+ if (IS_ERR_OR_NULL(d))
|
|
+ goto err_pe;
|
|
+
|
|
+ d = debugfs_create_file("pe2_dmem", 0444, pfe->dentry, (void *)2,
|
|
+ &dmem_fops);
|
|
+ if (IS_ERR_OR_NULL(d))
|
|
+ goto err_pe;
|
|
+
|
|
+ d = debugfs_create_file("pe3_dmem", 0444, pfe->dentry, (void *)3,
|
|
+ &dmem_fops);
|
|
+ if (IS_ERR_OR_NULL(d))
|
|
+ goto err_pe;
|
|
+
|
|
+ d = debugfs_create_file("pe4_dmem", 0444, pfe->dentry, (void *)4,
|
|
+ &dmem_fops);
|
|
+ if (IS_ERR_OR_NULL(d))
|
|
+ goto err_pe;
|
|
+
|
|
+ d = debugfs_create_file("pe5_dmem", 0444, pfe->dentry, (void *)5,
|
|
+ &dmem_fops);
|
|
+ if (IS_ERR_OR_NULL(d))
|
|
+ goto err_pe;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_pe:
|
|
+ debugfs_remove_recursive(pfe->dentry);
|
|
+
|
|
+err_dir:
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+void pfe_debugfs_exit(struct pfe *pfe)
|
|
+{
|
|
+ debugfs_remove_recursive(pfe->dentry);
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_debugfs.h
|
|
@@ -0,0 +1,13 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#ifndef _PFE_DEBUGFS_H_
|
|
+#define _PFE_DEBUGFS_H_
|
|
+
|
|
+int pfe_debugfs_init(struct pfe *pfe);
|
|
+void pfe_debugfs_exit(struct pfe *pfe);
|
|
+
|
|
+#endif /* _PFE_DEBUGFS_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_eth.c
|
|
@@ -0,0 +1,2554 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+/* @pfe_eth.c.
|
|
+ * Ethernet driver for to handle exception path for PFE.
|
|
+ * - uses HIF functions to send/receive packets.
|
|
+ * - uses ctrl function to start/stop interfaces.
|
|
+ * - uses direct register accesses to control phy operation.
|
|
+ */
|
|
+#include <linux/version.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/dma-mapping.h>
|
|
+#include <linux/dmapool.h>
|
|
+#include <linux/netdevice.h>
|
|
+#include <linux/etherdevice.h>
|
|
+#include <linux/ethtool.h>
|
|
+#include <linux/mii.h>
|
|
+#include <linux/phy.h>
|
|
+#include <linux/timer.h>
|
|
+#include <linux/hrtimer.h>
|
|
+#include <linux/platform_device.h>
|
|
+
|
|
+#include <net/ip.h>
|
|
+#include <net/sock.h>
|
|
+
|
|
+#include <linux/of.h>
|
|
+#include <linux/of_mdio.h>
|
|
+
|
|
+#include <linux/io.h>
|
|
+#include <asm/irq.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/regmap.h>
|
|
+#include <linux/i2c.h>
|
|
+#include <linux/fsl/guts.h>
|
|
+
|
|
+#if defined(CONFIG_NF_CONNTRACK_MARK)
|
|
+#include <net/netfilter/nf_conntrack.h>
|
|
+#endif
|
|
+
|
|
+#include "pfe_mod.h"
|
|
+#include "pfe_eth.h"
|
|
+#include "pfe_cdev.h"
|
|
+
|
|
+#define LS1012A_REV_1_0 0x87040010
|
|
+
|
|
+bool pfe_use_old_dts_phy;
|
|
+bool pfe_errata_a010897;
|
|
+
|
|
+static void *cbus_emac_base[3];
|
|
+static void *cbus_gpi_base[3];
|
|
+
|
|
+/* Forward Declaration */
|
|
+static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv);
|
|
+static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv);
|
|
+static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
|
|
+ from_tx, int n_desc);
|
|
+
|
|
+/* MDIO registers */
|
|
+#define MDIO_SGMII_CR 0x00
|
|
+#define MDIO_SGMII_SR 0x01
|
|
+#define MDIO_SGMII_DEV_ABIL_SGMII 0x04
|
|
+#define MDIO_SGMII_LINK_TMR_L 0x12
|
|
+#define MDIO_SGMII_LINK_TMR_H 0x13
|
|
+#define MDIO_SGMII_IF_MODE 0x14
|
|
+
|
|
+/* SGMII Control defines */
|
|
+#define SGMII_CR_RST 0x8000
|
|
+#define SGMII_CR_AN_EN 0x1000
|
|
+#define SGMII_CR_RESTART_AN 0x0200
|
|
+#define SGMII_CR_FD 0x0100
|
|
+#define SGMII_CR_SPEED_SEL1_1G 0x0040
|
|
+#define SGMII_CR_DEF_VAL (SGMII_CR_AN_EN | SGMII_CR_FD | \
|
|
+ SGMII_CR_SPEED_SEL1_1G)
|
|
+
|
|
+/* SGMII IF Mode */
|
|
+#define SGMII_DUPLEX_HALF 0x10
|
|
+#define SGMII_SPEED_10MBPS 0x00
|
|
+#define SGMII_SPEED_100MBPS 0x04
|
|
+#define SGMII_SPEED_1GBPS 0x08
|
|
+#define SGMII_USE_SGMII_AN 0x02
|
|
+#define SGMII_EN 0x01
|
|
+
|
|
+/* SGMII Device Ability for SGMII */
|
|
+#define SGMII_DEV_ABIL_ACK 0x4000
|
|
+#define SGMII_DEV_ABIL_EEE_CLK_STP_EN 0x0100
|
|
+#define SGMII_DEV_ABIL_SGMII 0x0001
|
|
+
|
|
+unsigned int gemac_regs[] = {
|
|
+ 0x0004, /* Interrupt event */
|
|
+ 0x0008, /* Interrupt mask */
|
|
+ 0x0024, /* Ethernet control */
|
|
+ 0x0064, /* MIB Control/Status */
|
|
+ 0x0084, /* Receive control/status */
|
|
+ 0x00C4, /* Transmit control */
|
|
+ 0x00E4, /* Physical address low */
|
|
+ 0x00E8, /* Physical address high */
|
|
+ 0x0144, /* Transmit FIFO Watermark and Store and Forward Control*/
|
|
+ 0x0190, /* Receive FIFO Section Full Threshold */
|
|
+ 0x01A0, /* Transmit FIFO Section Empty Threshold */
|
|
+ 0x01B0, /* Frame Truncation Length */
|
|
+};
|
|
+
|
|
+/********************************************************************/
|
|
+/* SYSFS INTERFACE */
|
|
+/********************************************************************/
|
|
+
|
|
+#ifdef PFE_ETH_NAPI_STATS
|
|
+/*
|
|
+ * pfe_eth_show_napi_stats
|
|
+ */
|
|
+static ssize_t pfe_eth_show_napi_stats(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
|
|
+ ssize_t len = 0;
|
|
+
|
|
+ len += sprintf(buf + len, "sched: %u\n",
|
|
+ priv->napi_counters[NAPI_SCHED_COUNT]);
|
|
+ len += sprintf(buf + len, "poll: %u\n",
|
|
+ priv->napi_counters[NAPI_POLL_COUNT]);
|
|
+ len += sprintf(buf + len, "packet: %u\n",
|
|
+ priv->napi_counters[NAPI_PACKET_COUNT]);
|
|
+ len += sprintf(buf + len, "budget: %u\n",
|
|
+ priv->napi_counters[NAPI_FULL_BUDGET_COUNT]);
|
|
+ len += sprintf(buf + len, "desc: %u\n",
|
|
+ priv->napi_counters[NAPI_DESC_COUNT]);
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_eth_set_napi_stats
|
|
+ */
|
|
+static ssize_t pfe_eth_set_napi_stats(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
|
|
+
|
|
+ memset(priv->napi_counters, 0, sizeof(priv->napi_counters));
|
|
+
|
|
+ return count;
|
|
+}
|
|
+#endif
|
|
+#ifdef PFE_ETH_TX_STATS
|
|
+/* pfe_eth_show_tx_stats
|
|
+ *
|
|
+ */
|
|
+static ssize_t pfe_eth_show_tx_stats(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
|
|
+ ssize_t len = 0;
|
|
+ int i;
|
|
+
|
|
+ len += sprintf(buf + len, "TX queues stats:\n");
|
|
+
|
|
+ for (i = 0; i < emac_txq_cnt; i++) {
|
|
+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
|
|
+ i);
|
|
+
|
|
+ len += sprintf(buf + len, "\n");
|
|
+ __netif_tx_lock_bh(tx_queue);
|
|
+
|
|
+ hif_tx_lock(&pfe->hif);
|
|
+ len += sprintf(buf + len,
|
|
+ "Queue %2d : credits = %10d\n"
|
|
+ , i, hif_lib_tx_credit_avail(pfe, priv->id, i));
|
|
+ len += sprintf(buf + len,
|
|
+ " tx packets = %10d\n"
|
|
+ , pfe->tmu_credit.tx_packets[priv->id][i]);
|
|
+ hif_tx_unlock(&pfe->hif);
|
|
+
|
|
+ /* Don't output additionnal stats if queue never used */
|
|
+ if (!pfe->tmu_credit.tx_packets[priv->id][i])
|
|
+ goto skip;
|
|
+
|
|
+ len += sprintf(buf + len,
|
|
+ " clean_fail = %10d\n"
|
|
+ , priv->clean_fail[i]);
|
|
+ len += sprintf(buf + len,
|
|
+ " stop_queue = %10d\n"
|
|
+ , priv->stop_queue_total[i]);
|
|
+ len += sprintf(buf + len,
|
|
+ " stop_queue_hif = %10d\n"
|
|
+ , priv->stop_queue_hif[i]);
|
|
+ len += sprintf(buf + len,
|
|
+ " stop_queue_hif_client = %10d\n"
|
|
+ , priv->stop_queue_hif_client[i]);
|
|
+ len += sprintf(buf + len,
|
|
+ " stop_queue_credit = %10d\n"
|
|
+ , priv->stop_queue_credit[i]);
|
|
+skip:
|
|
+ __netif_tx_unlock_bh(tx_queue);
|
|
+ }
|
|
+ return len;
|
|
+}
|
|
+
|
|
+/* pfe_eth_set_tx_stats
|
|
+ *
|
|
+ */
|
|
+static ssize_t pfe_eth_set_tx_stats(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < emac_txq_cnt; i++) {
|
|
+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
|
|
+ i);
|
|
+
|
|
+ __netif_tx_lock_bh(tx_queue);
|
|
+ priv->clean_fail[i] = 0;
|
|
+ priv->stop_queue_total[i] = 0;
|
|
+ priv->stop_queue_hif[i] = 0;
|
|
+ priv->stop_queue_hif_client[i] = 0;
|
|
+ priv->stop_queue_credit[i] = 0;
|
|
+ __netif_tx_unlock_bh(tx_queue);
|
|
+ }
|
|
+
|
|
+ return count;
|
|
+}
|
|
+#endif
|
|
+/* pfe_eth_show_txavail
|
|
+ *
|
|
+ */
|
|
+static ssize_t pfe_eth_show_txavail(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
|
|
+ ssize_t len = 0;
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < emac_txq_cnt; i++) {
|
|
+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
|
|
+ i);
|
|
+
|
|
+ __netif_tx_lock_bh(tx_queue);
|
|
+
|
|
+ len += sprintf(buf + len, "%d",
|
|
+ hif_lib_tx_avail(&priv->client, i));
|
|
+
|
|
+ __netif_tx_unlock_bh(tx_queue);
|
|
+
|
|
+ if (i == (emac_txq_cnt - 1))
|
|
+ len += sprintf(buf + len, "\n");
|
|
+ else
|
|
+ len += sprintf(buf + len, " ");
|
|
+ }
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+/* pfe_eth_show_default_priority
|
|
+ *
|
|
+ */
|
|
+static ssize_t pfe_eth_show_default_priority(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
|
|
+ unsigned long flags;
|
|
+ int rc;
|
|
+
|
|
+ spin_lock_irqsave(&priv->lock, flags);
|
|
+ rc = sprintf(buf, "%d\n", priv->default_priority);
|
|
+ spin_unlock_irqrestore(&priv->lock, flags);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/* pfe_eth_set_default_priority
|
|
+ *
|
|
+ */
|
|
+
|
|
+static ssize_t pfe_eth_set_default_priority(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
|
|
+ unsigned long flags;
|
|
+
|
|
+ spin_lock_irqsave(&priv->lock, flags);
|
|
+ priv->default_priority = kstrtoul(buf, 0, 0);
|
|
+ spin_unlock_irqrestore(&priv->lock, flags);
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static DEVICE_ATTR(txavail, 0444, pfe_eth_show_txavail, NULL);
|
|
+static DEVICE_ATTR(default_priority, 0644, pfe_eth_show_default_priority,
|
|
+ pfe_eth_set_default_priority);
|
|
+
|
|
+#ifdef PFE_ETH_NAPI_STATS
|
|
+static DEVICE_ATTR(napi_stats, 0644, pfe_eth_show_napi_stats,
|
|
+ pfe_eth_set_napi_stats);
|
|
+#endif
|
|
+
|
|
+#ifdef PFE_ETH_TX_STATS
|
|
+static DEVICE_ATTR(tx_stats, 0644, pfe_eth_show_tx_stats,
|
|
+ pfe_eth_set_tx_stats);
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * pfe_eth_sysfs_init
|
|
+ *
|
|
+ */
|
|
+static int pfe_eth_sysfs_init(struct net_device *ndev)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+ int err;
|
|
+
|
|
+ /* Initialize the default values */
|
|
+
|
|
+ /*
|
|
+ * By default, packets without conntrack will use this default low
|
|
+ * priority queue
|
|
+ */
|
|
+ priv->default_priority = 0;
|
|
+
|
|
+ /* Create our sysfs files */
|
|
+ err = device_create_file(&ndev->dev, &dev_attr_default_priority);
|
|
+ if (err) {
|
|
+ netdev_err(ndev,
|
|
+ "failed to create default_priority sysfs files\n");
|
|
+ goto err_priority;
|
|
+ }
|
|
+
|
|
+ err = device_create_file(&ndev->dev, &dev_attr_txavail);
|
|
+ if (err) {
|
|
+ netdev_err(ndev,
|
|
+ "failed to create default_priority sysfs files\n");
|
|
+ goto err_txavail;
|
|
+ }
|
|
+
|
|
+#ifdef PFE_ETH_NAPI_STATS
|
|
+ err = device_create_file(&ndev->dev, &dev_attr_napi_stats);
|
|
+ if (err) {
|
|
+ netdev_err(ndev, "failed to create napi stats sysfs files\n");
|
|
+ goto err_napi;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#ifdef PFE_ETH_TX_STATS
|
|
+ err = device_create_file(&ndev->dev, &dev_attr_tx_stats);
|
|
+ if (err) {
|
|
+ netdev_err(ndev, "failed to create tx stats sysfs files\n");
|
|
+ goto err_tx;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ return 0;
|
|
+
|
|
+#ifdef PFE_ETH_TX_STATS
|
|
+err_tx:
|
|
+#endif
|
|
+#ifdef PFE_ETH_NAPI_STATS
|
|
+ device_remove_file(&ndev->dev, &dev_attr_napi_stats);
|
|
+
|
|
+err_napi:
|
|
+#endif
|
|
+ device_remove_file(&ndev->dev, &dev_attr_txavail);
|
|
+
|
|
+err_txavail:
|
|
+ device_remove_file(&ndev->dev, &dev_attr_default_priority);
|
|
+
|
|
+err_priority:
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+/* pfe_eth_sysfs_exit
|
|
+ *
|
|
+ */
|
|
+void pfe_eth_sysfs_exit(struct net_device *ndev)
|
|
+{
|
|
+#ifdef PFE_ETH_TX_STATS
|
|
+ device_remove_file(&ndev->dev, &dev_attr_tx_stats);
|
|
+#endif
|
|
+
|
|
+#ifdef PFE_ETH_NAPI_STATS
|
|
+ device_remove_file(&ndev->dev, &dev_attr_napi_stats);
|
|
+#endif
|
|
+ device_remove_file(&ndev->dev, &dev_attr_txavail);
|
|
+ device_remove_file(&ndev->dev, &dev_attr_default_priority);
|
|
+}
|
|
+
|
|
+/*************************************************************************/
|
|
+/* ETHTOOL INTERCAE */
|
|
+/*************************************************************************/
|
|
+
|
|
+/*MTIP GEMAC */
|
|
+static const struct fec_stat {
|
|
+ char name[ETH_GSTRING_LEN];
|
|
+ u16 offset;
|
|
+} fec_stats[] = {
|
|
+ /* RMON TX */
|
|
+ { "tx_dropped", RMON_T_DROP },
|
|
+ { "tx_packets", RMON_T_PACKETS },
|
|
+ { "tx_broadcast", RMON_T_BC_PKT },
|
|
+ { "tx_multicast", RMON_T_MC_PKT },
|
|
+ { "tx_crc_errors", RMON_T_CRC_ALIGN },
|
|
+ { "tx_undersize", RMON_T_UNDERSIZE },
|
|
+ { "tx_oversize", RMON_T_OVERSIZE },
|
|
+ { "tx_fragment", RMON_T_FRAG },
|
|
+ { "tx_jabber", RMON_T_JAB },
|
|
+ { "tx_collision", RMON_T_COL },
|
|
+ { "tx_64byte", RMON_T_P64 },
|
|
+ { "tx_65to127byte", RMON_T_P65TO127 },
|
|
+ { "tx_128to255byte", RMON_T_P128TO255 },
|
|
+ { "tx_256to511byte", RMON_T_P256TO511 },
|
|
+ { "tx_512to1023byte", RMON_T_P512TO1023 },
|
|
+ { "tx_1024to2047byte", RMON_T_P1024TO2047 },
|
|
+ { "tx_GTE2048byte", RMON_T_P_GTE2048 },
|
|
+ { "tx_octets", RMON_T_OCTETS },
|
|
+
|
|
+ /* IEEE TX */
|
|
+ { "IEEE_tx_drop", IEEE_T_DROP },
|
|
+ { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
|
|
+ { "IEEE_tx_1col", IEEE_T_1COL },
|
|
+ { "IEEE_tx_mcol", IEEE_T_MCOL },
|
|
+ { "IEEE_tx_def", IEEE_T_DEF },
|
|
+ { "IEEE_tx_lcol", IEEE_T_LCOL },
|
|
+ { "IEEE_tx_excol", IEEE_T_EXCOL },
|
|
+ { "IEEE_tx_macerr", IEEE_T_MACERR },
|
|
+ { "IEEE_tx_cserr", IEEE_T_CSERR },
|
|
+ { "IEEE_tx_sqe", IEEE_T_SQE },
|
|
+ { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
|
|
+ { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
|
|
+
|
|
+ /* RMON RX */
|
|
+ { "rx_packets", RMON_R_PACKETS },
|
|
+ { "rx_broadcast", RMON_R_BC_PKT },
|
|
+ { "rx_multicast", RMON_R_MC_PKT },
|
|
+ { "rx_crc_errors", RMON_R_CRC_ALIGN },
|
|
+ { "rx_undersize", RMON_R_UNDERSIZE },
|
|
+ { "rx_oversize", RMON_R_OVERSIZE },
|
|
+ { "rx_fragment", RMON_R_FRAG },
|
|
+ { "rx_jabber", RMON_R_JAB },
|
|
+ { "rx_64byte", RMON_R_P64 },
|
|
+ { "rx_65to127byte", RMON_R_P65TO127 },
|
|
+ { "rx_128to255byte", RMON_R_P128TO255 },
|
|
+ { "rx_256to511byte", RMON_R_P256TO511 },
|
|
+ { "rx_512to1023byte", RMON_R_P512TO1023 },
|
|
+ { "rx_1024to2047byte", RMON_R_P1024TO2047 },
|
|
+ { "rx_GTE2048byte", RMON_R_P_GTE2048 },
|
|
+ { "rx_octets", RMON_R_OCTETS },
|
|
+
|
|
+ /* IEEE RX */
|
|
+ { "IEEE_rx_drop", IEEE_R_DROP },
|
|
+ { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
|
|
+ { "IEEE_rx_crc", IEEE_R_CRC },
|
|
+ { "IEEE_rx_align", IEEE_R_ALIGN },
|
|
+ { "IEEE_rx_macerr", IEEE_R_MACERR },
|
|
+ { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
|
|
+ { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
|
|
+};
|
|
+
|
|
+static void pfe_eth_fill_stats(struct net_device *ndev, struct ethtool_stats
|
|
+ *stats, u64 *data)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
|
|
+ data[i] = readl(priv->EMAC_baseaddr + fec_stats[i].offset);
|
|
+}
|
|
+
|
|
+static void pfe_eth_gstrings(struct net_device *netdev,
|
|
+ u32 stringset, u8 *data)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ switch (stringset) {
|
|
+ case ETH_SS_STATS:
|
|
+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
|
|
+ memcpy(data + i * ETH_GSTRING_LEN,
|
|
+ fec_stats[i].name, ETH_GSTRING_LEN);
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+static int pfe_eth_stats_count(struct net_device *ndev, int sset)
|
|
+{
|
|
+ switch (sset) {
|
|
+ case ETH_SS_STATS:
|
|
+ return ARRAY_SIZE(fec_stats);
|
|
+ default:
|
|
+ return -EOPNOTSUPP;
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_eth_gemac_reglen - Return the length of the register structure.
|
|
+ *
|
|
+ */
|
|
+static int pfe_eth_gemac_reglen(struct net_device *ndev)
|
|
+{
|
|
+ pr_info("%s()\n", __func__);
|
|
+ return (sizeof(gemac_regs) / sizeof(u32));
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_eth_gemac_get_regs - Return the gemac register structure.
|
|
+ *
|
|
+ */
|
|
+static void pfe_eth_gemac_get_regs(struct net_device *ndev, struct ethtool_regs
|
|
+ *regs, void *regbuf)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+ u32 *buf = (u32 *)regbuf;
|
|
+
|
|
+ pr_info("%s()\n", __func__);
|
|
+ for (i = 0; i < sizeof(gemac_regs) / sizeof(u32); i++)
|
|
+ buf[i] = readl(priv->EMAC_baseaddr + gemac_regs[i]);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_eth_set_wol - Set the magic packet option, in WoL register.
|
|
+ *
|
|
+ */
|
|
+static int pfe_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+
|
|
+ if (wol->wolopts & ~WAKE_MAGIC)
|
|
+ return -EOPNOTSUPP;
|
|
+
|
|
+ /* for MTIP we store wol->wolopts */
|
|
+ priv->wol = wol->wolopts;
|
|
+
|
|
+ device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ *
|
|
+ * pfe_eth_get_wol - Get the WoL options.
|
|
+ *
|
|
+ */
|
|
+static void pfe_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo
|
|
+ *wol)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+
|
|
+ wol->supported = WAKE_MAGIC;
|
|
+ wol->wolopts = 0;
|
|
+
|
|
+ if (priv->wol & WAKE_MAGIC)
|
|
+ wol->wolopts = WAKE_MAGIC;
|
|
+
|
|
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_eth_get_drvinfo - Fills in the drvinfo structure with some basic info
|
|
+ *
|
|
+ */
|
|
+static void pfe_eth_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo
|
|
+ *drvinfo)
|
|
+{
|
|
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
|
|
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
|
|
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
|
|
+ strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_eth_set_settings - Used to send commands to PHY.
|
|
+ *
|
|
+ */
|
|
+static int pfe_eth_set_settings(struct net_device *ndev,
|
|
+ const struct ethtool_link_ksettings *cmd)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+ struct phy_device *phydev = priv->phydev;
|
|
+
|
|
+ if (!phydev)
|
|
+ return -ENODEV;
|
|
+
|
|
+ return phy_ethtool_ksettings_set(phydev, cmd);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_eth_getsettings - Return the current settings in the ethtool_cmd
|
|
+ * structure.
|
|
+ *
|
|
+ */
|
|
+static int pfe_eth_get_settings(struct net_device *ndev,
|
|
+ struct ethtool_link_ksettings *cmd)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+ struct phy_device *phydev = priv->phydev;
|
|
+
|
|
+ if (!phydev)
|
|
+ return -ENODEV;
|
|
+
|
|
+ phy_ethtool_ksettings_get(phydev, cmd);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_eth_get_msglevel - Gets the debug message mask.
|
|
+ *
|
|
+ */
|
|
+static uint32_t pfe_eth_get_msglevel(struct net_device *ndev)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+
|
|
+ return priv->msg_enable;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_eth_set_msglevel - Sets the debug message mask.
|
|
+ *
|
|
+ */
|
|
+static void pfe_eth_set_msglevel(struct net_device *ndev, uint32_t data)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+
|
|
+ priv->msg_enable = data;
|
|
+}
|
|
+
|
|
+#define HIF_RX_COAL_MAX_CLKS (~(1 << 31))
|
|
+#define HIF_RX_COAL_CLKS_PER_USEC (pfe->ctrl.sys_clk / 1000)
|
|
+#define HIF_RX_COAL_MAX_USECS (HIF_RX_COAL_MAX_CLKS / \
|
|
+ HIF_RX_COAL_CLKS_PER_USEC)
|
|
+
|
|
+/*
|
|
+ * pfe_eth_set_coalesce - Sets rx interrupt coalescing timer.
|
|
+ *
|
|
+ */
|
|
+static int pfe_eth_set_coalesce(struct net_device *ndev,
|
|
+ struct ethtool_coalesce *ec)
|
|
+{
|
|
+ if (ec->rx_coalesce_usecs > HIF_RX_COAL_MAX_USECS)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (!ec->rx_coalesce_usecs) {
|
|
+ writel(0, HIF_INT_COAL);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ writel((ec->rx_coalesce_usecs * HIF_RX_COAL_CLKS_PER_USEC) |
|
|
+ HIF_INT_COAL_ENABLE, HIF_INT_COAL);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_eth_get_coalesce - Gets rx interrupt coalescing timer value.
|
|
+ *
|
|
+ */
|
|
+static int pfe_eth_get_coalesce(struct net_device *ndev,
|
|
+ struct ethtool_coalesce *ec)
|
|
+{
|
|
+ int reg_val = readl(HIF_INT_COAL);
|
|
+
|
|
+ if (reg_val & HIF_INT_COAL_ENABLE)
|
|
+ ec->rx_coalesce_usecs = (reg_val & HIF_RX_COAL_MAX_CLKS) /
|
|
+ HIF_RX_COAL_CLKS_PER_USEC;
|
|
+ else
|
|
+ ec->rx_coalesce_usecs = 0;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_eth_set_pauseparam - Sets pause parameters
|
|
+ *
|
|
+ */
|
|
+static int pfe_eth_set_pauseparam(struct net_device *ndev,
|
|
+ struct ethtool_pauseparam *epause)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+
|
|
+ if (epause->tx_pause != epause->rx_pause) {
|
|
+ netdev_info(ndev,
|
|
+ "hardware only support enable/disable both tx and rx\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ priv->pause_flag = 0;
|
|
+ priv->pause_flag |= epause->rx_pause ? PFE_PAUSE_FLAG_ENABLE : 0;
|
|
+ priv->pause_flag |= epause->autoneg ? PFE_PAUSE_FLAG_AUTONEG : 0;
|
|
+
|
|
+ if (epause->rx_pause || epause->autoneg) {
|
|
+ gemac_enable_pause_rx(priv->EMAC_baseaddr);
|
|
+ writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) |
|
|
+ EGPI_PAUSE_ENABLE),
|
|
+ priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
|
|
+ if (priv->phydev) {
|
|
+ priv->phydev->supported |= ADVERTISED_Pause |
|
|
+ ADVERTISED_Asym_Pause;
|
|
+ priv->phydev->advertising |= ADVERTISED_Pause |
|
|
+ ADVERTISED_Asym_Pause;
|
|
+ }
|
|
+ } else {
|
|
+ gemac_disable_pause_rx(priv->EMAC_baseaddr);
|
|
+ writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) &
|
|
+ ~EGPI_PAUSE_ENABLE),
|
|
+ priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
|
|
+ if (priv->phydev) {
|
|
+ priv->phydev->supported &= ~(ADVERTISED_Pause |
|
|
+ ADVERTISED_Asym_Pause);
|
|
+ priv->phydev->advertising &= ~(ADVERTISED_Pause |
|
|
+ ADVERTISED_Asym_Pause);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_eth_get_pauseparam - Gets pause parameters
|
|
+ *
|
|
+ */
|
|
+static void pfe_eth_get_pauseparam(struct net_device *ndev,
|
|
+ struct ethtool_pauseparam *epause)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+
|
|
+ epause->autoneg = (priv->pause_flag & PFE_PAUSE_FLAG_AUTONEG) != 0;
|
|
+ epause->tx_pause = (priv->pause_flag & PFE_PAUSE_FLAG_ENABLE) != 0;
|
|
+ epause->rx_pause = epause->tx_pause;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_eth_get_hash
|
|
+ */
|
|
+#define PFE_HASH_BITS 6 /* #bits in hash */
|
|
+#define CRC32_POLY 0xEDB88320
|
|
+
|
|
+static int pfe_eth_get_hash(u8 *addr)
|
|
+{
|
|
+ unsigned int i, bit, data, crc, hash;
|
|
+
|
|
+ /* calculate crc32 value of mac address */
|
|
+ crc = 0xffffffff;
|
|
+
|
|
+ for (i = 0; i < 6; i++) {
|
|
+ data = addr[i];
|
|
+ for (bit = 0; bit < 8; bit++, data >>= 1) {
|
|
+ crc = (crc >> 1) ^
|
|
+ (((crc ^ data) & 1) ? CRC32_POLY : 0);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * only upper 6 bits (PFE_HASH_BITS) are used
|
|
+ * which point to specific bit in the hash registers
|
|
+ */
|
|
+ hash = (crc >> (32 - PFE_HASH_BITS)) & 0x3f;
|
|
+
|
|
+ return hash;
|
|
+}
|
|
+
|
|
+const struct ethtool_ops pfe_ethtool_ops = {
|
|
+ .get_drvinfo = pfe_eth_get_drvinfo,
|
|
+ .get_regs_len = pfe_eth_gemac_reglen,
|
|
+ .get_regs = pfe_eth_gemac_get_regs,
|
|
+ .get_link = ethtool_op_get_link,
|
|
+ .get_wol = pfe_eth_get_wol,
|
|
+ .set_wol = pfe_eth_set_wol,
|
|
+ .set_pauseparam = pfe_eth_set_pauseparam,
|
|
+ .get_pauseparam = pfe_eth_get_pauseparam,
|
|
+ .get_strings = pfe_eth_gstrings,
|
|
+ .get_sset_count = pfe_eth_stats_count,
|
|
+ .get_ethtool_stats = pfe_eth_fill_stats,
|
|
+ .get_msglevel = pfe_eth_get_msglevel,
|
|
+ .set_msglevel = pfe_eth_set_msglevel,
|
|
+ .set_coalesce = pfe_eth_set_coalesce,
|
|
+ .get_coalesce = pfe_eth_get_coalesce,
|
|
+ .get_link_ksettings = pfe_eth_get_settings,
|
|
+ .set_link_ksettings = pfe_eth_set_settings,
|
|
+};
|
|
+
|
|
+/* pfe_eth_mdio_reset
|
|
+ */
|
|
+int pfe_eth_mdio_reset(struct mii_bus *bus)
|
|
+{
|
|
+ struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
|
|
+ u32 phy_speed;
|
|
+
|
|
+
|
|
+ mutex_lock(&bus->mdio_lock);
|
|
+
|
|
+ /*
|
|
+ * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
|
|
+ *
|
|
+ * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
|
|
+ * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.
|
|
+ */
|
|
+ phy_speed = (DIV_ROUND_UP((pfe->ctrl.sys_clk * 1000), 4000000)
|
|
+ << EMAC_MII_SPEED_SHIFT);
|
|
+ phy_speed |= EMAC_HOLDTIME(0x5);
|
|
+ __raw_writel(phy_speed, priv->mdio_base + EMAC_MII_CTRL_REG);
|
|
+
|
|
+ mutex_unlock(&bus->mdio_lock);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* pfe_eth_mdio_timeout
|
|
+ *
|
|
+ */
|
|
+static int pfe_eth_mdio_timeout(struct pfe_mdio_priv_s *priv, int timeout)
|
|
+{
|
|
+ while (!(__raw_readl(priv->mdio_base + EMAC_IEVENT_REG) &
|
|
+ EMAC_IEVENT_MII)) {
|
|
+ if (timeout-- <= 0)
|
|
+ return -1;
|
|
+ usleep_range(10, 20);
|
|
+ }
|
|
+ __raw_writel(EMAC_IEVENT_MII, priv->mdio_base + EMAC_IEVENT_REG);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int pfe_eth_mdio_mux(u8 muxval)
|
|
+{
|
|
+ struct i2c_adapter *a;
|
|
+ struct i2c_msg msg;
|
|
+ unsigned char buf[2];
|
|
+ int ret;
|
|
+
|
|
+ a = i2c_get_adapter(0);
|
|
+ if (!a)
|
|
+ return -ENODEV;
|
|
+
|
|
+ /* set bit 1 (the second bit) of chip at 0x09, register 0x13 */
|
|
+ buf[0] = 0x54; /* reg number */
|
|
+ buf[1] = (muxval << 6) | 0x3; /* data */
|
|
+ msg.addr = 0x66;
|
|
+ msg.buf = buf;
|
|
+ msg.len = 2;
|
|
+ msg.flags = 0;
|
|
+ ret = i2c_transfer(a, &msg, 1);
|
|
+ i2c_put_adapter(a);
|
|
+ if (ret != 1)
|
|
+ return -ENODEV;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int pfe_eth_mdio_write_addr(struct mii_bus *bus, int mii_id,
|
|
+ int dev_addr, int regnum)
|
|
+{
|
|
+ struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
|
|
+
|
|
+ __raw_writel(EMAC_MII_DATA_PA(mii_id) |
|
|
+ EMAC_MII_DATA_RA(dev_addr) |
|
|
+ EMAC_MII_DATA_TA | EMAC_MII_DATA(regnum),
|
|
+ priv->mdio_base + EMAC_MII_DATA_REG);
|
|
+
|
|
+ if (pfe_eth_mdio_timeout(priv, EMAC_MDIO_TIMEOUT)) {
|
|
+ dev_err(&bus->dev, "phy MDIO address write timeout\n");
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
|
|
+ u16 value)
|
|
+{
|
|
+ struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
|
|
+
|
|
+ /*To access external PHYs on QDS board mux needs to be configured*/
|
|
+ if ((mii_id) && (pfe->mdio_muxval[mii_id]))
|
|
+ pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
|
|
+
|
|
+ if (regnum & MII_ADDR_C45) {
|
|
+ pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
|
|
+ regnum & 0xffff);
|
|
+ __raw_writel(EMAC_MII_DATA_OP_CL45_WR |
|
|
+ EMAC_MII_DATA_PA(mii_id) |
|
|
+ EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
|
|
+ EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
|
|
+ priv->mdio_base + EMAC_MII_DATA_REG);
|
|
+ } else {
|
|
+ /* start a write op */
|
|
+ __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_WR |
|
|
+ EMAC_MII_DATA_PA(mii_id) |
|
|
+ EMAC_MII_DATA_RA(regnum) |
|
|
+ EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
|
|
+ priv->mdio_base + EMAC_MII_DATA_REG);
|
|
+ }
|
|
+
|
|
+ if (pfe_eth_mdio_timeout(priv, EMAC_MDIO_TIMEOUT)) {
|
|
+ dev_err(&bus->dev, "%s: phy MDIO write timeout\n", __func__);
|
|
+ return -1;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
|
|
+{
|
|
+ struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
|
|
+ u16 value = 0;
|
|
+
|
|
+ /*To access external PHYs on QDS board mux needs to be configured*/
|
|
+ if ((mii_id) && (pfe->mdio_muxval[mii_id]))
|
|
+ pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
|
|
+
|
|
+ if (regnum & MII_ADDR_C45) {
|
|
+ pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
|
|
+ regnum & 0xffff);
|
|
+ __raw_writel(EMAC_MII_DATA_OP_CL45_RD |
|
|
+ EMAC_MII_DATA_PA(mii_id) |
|
|
+ EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
|
|
+ EMAC_MII_DATA_TA,
|
|
+ priv->mdio_base + EMAC_MII_DATA_REG);
|
|
+ } else {
|
|
+ /* start a read op */
|
|
+ __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_RD |
|
|
+ EMAC_MII_DATA_PA(mii_id) |
|
|
+ EMAC_MII_DATA_RA(regnum) |
|
|
+ EMAC_MII_DATA_TA, priv->mdio_base +
|
|
+ EMAC_MII_DATA_REG);
|
|
+ }
|
|
+
|
|
+ if (pfe_eth_mdio_timeout(priv, EMAC_MDIO_TIMEOUT)) {
|
|
+ dev_err(&bus->dev, "%s: phy MDIO read timeout\n", __func__);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ value = EMAC_MII_DATA(__raw_readl(priv->mdio_base +
|
|
+ EMAC_MII_DATA_REG));
|
|
+ return value;
|
|
+}
|
|
+
|
|
+static int pfe_eth_mdio_init(struct pfe *pfe,
|
|
+ struct ls1012a_pfe_platform_data *pfe_info,
|
|
+ int ii)
|
|
+{
|
|
+ struct pfe_mdio_priv_s *priv = NULL;
|
|
+ struct ls1012a_mdio_platform_data *mdio_info;
|
|
+ struct mii_bus *bus;
|
|
+ struct device_node *mdio_node;
|
|
+ int rc = 0;
|
|
+
|
|
+ mdio_info = (struct ls1012a_mdio_platform_data *)
|
|
+ pfe_info->ls1012a_mdio_pdata;
|
|
+ mdio_info->id = ii;
|
|
+
|
|
+ bus = mdiobus_alloc_size(sizeof(struct pfe_mdio_priv_s));
|
|
+ if (!bus) {
|
|
+ pr_err("mdiobus_alloc() failed\n");
|
|
+ rc = -ENOMEM;
|
|
+ goto err_mdioalloc;
|
|
+ }
|
|
+
|
|
+ bus->name = "ls1012a MDIO Bus";
|
|
+ snprintf(bus->id, MII_BUS_ID_SIZE, "ls1012a-%x", mdio_info->id);
|
|
+
|
|
+ bus->read = &pfe_eth_mdio_read;
|
|
+ bus->write = &pfe_eth_mdio_write;
|
|
+ bus->reset = &pfe_eth_mdio_reset;
|
|
+ bus->parent = pfe->dev;
|
|
+ bus->phy_mask = mdio_info->phy_mask;
|
|
+ bus->irq[0] = mdio_info->irq[0];
|
|
+ priv = bus->priv;
|
|
+ priv->mdio_base = cbus_emac_base[ii];
|
|
+
|
|
+ priv->mdc_div = mdio_info->mdc_div;
|
|
+ if (!priv->mdc_div)
|
|
+ priv->mdc_div = 64;
|
|
+ dev_info(bus->parent, "%s: mdc_div: %d, phy_mask: %x\n",
|
|
+ __func__, priv->mdc_div, bus->phy_mask);
|
|
+
|
|
+ mdio_node = of_get_child_by_name(pfe->dev->of_node, "mdio");
|
|
+ if ((mdio_info->id == 0) && mdio_node) {
|
|
+ rc = of_mdiobus_register(bus, mdio_node);
|
|
+ of_node_put(mdio_node);
|
|
+ } else {
|
|
+ rc = mdiobus_register(bus);
|
|
+ }
|
|
+
|
|
+ if (rc) {
|
|
+ dev_err(bus->parent, "mdiobus_register(%s) failed\n",
|
|
+ bus->name);
|
|
+ goto err_mdioregister;
|
|
+ }
|
|
+
|
|
+ priv->mii_bus = bus;
|
|
+ pfe->mdio.mdio_priv[ii] = priv;
|
|
+
|
|
+ pfe_eth_mdio_reset(bus);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_mdioregister:
|
|
+ mdiobus_free(bus);
|
|
+err_mdioalloc:
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/* pfe_eth_mdio_exit
|
|
+ */
|
|
+static void pfe_eth_mdio_exit(struct pfe *pfe,
|
|
+ int ii)
|
|
+{
|
|
+ struct pfe_mdio_priv_s *mdio_priv = pfe->mdio.mdio_priv[ii];
|
|
+ struct mii_bus *bus = mdio_priv->mii_bus;
|
|
+
|
|
+ if (!bus)
|
|
+ return;
|
|
+ mdiobus_unregister(bus);
|
|
+ mdiobus_free(bus);
|
|
+}
|
|
+
|
|
+/* pfe_get_phydev_speed
|
|
+ */
|
|
+static int pfe_get_phydev_speed(struct phy_device *phydev)
|
|
+{
|
|
+ switch (phydev->speed) {
|
|
+ case 10:
|
|
+ return SPEED_10M;
|
|
+ case 100:
|
|
+ return SPEED_100M;
|
|
+ case 1000:
|
|
+ default:
|
|
+ return SPEED_1000M;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* pfe_set_rgmii_speed
|
|
+ */
|
|
+#define RGMIIPCR 0x434
|
|
+/* RGMIIPCR bit definitions*/
|
|
+#define SCFG_RGMIIPCR_EN_AUTO (0x00000008)
|
|
+#define SCFG_RGMIIPCR_SETSP_1000M (0x00000004)
|
|
+#define SCFG_RGMIIPCR_SETSP_100M (0x00000000)
|
|
+#define SCFG_RGMIIPCR_SETSP_10M (0x00000002)
|
|
+#define SCFG_RGMIIPCR_SETFD (0x00000001)
|
|
+
|
|
+#define MDIOSELCR 0x484
|
|
+#define MDIOSEL_SERDES 0x0
|
|
+#define MDIOSEL_EXTPHY 0x80000000
|
|
+
|
|
+static void pfe_set_rgmii_speed(struct phy_device *phydev)
|
|
+{
|
|
+ u32 rgmii_pcr;
|
|
+
|
|
+ regmap_read(pfe->scfg, RGMIIPCR, &rgmii_pcr);
|
|
+ rgmii_pcr &= ~(SCFG_RGMIIPCR_SETSP_1000M | SCFG_RGMIIPCR_SETSP_10M);
|
|
+
|
|
+ switch (phydev->speed) {
|
|
+ case 10:
|
|
+ rgmii_pcr |= SCFG_RGMIIPCR_SETSP_10M;
|
|
+ break;
|
|
+ case 1000:
|
|
+ rgmii_pcr |= SCFG_RGMIIPCR_SETSP_1000M;
|
|
+ break;
|
|
+ case 100:
|
|
+ default:
|
|
+ /* Default is 100M */
|
|
+ break;
|
|
+ }
|
|
+ regmap_write(pfe->scfg, RGMIIPCR, rgmii_pcr);
|
|
+}
|
|
+
|
|
+/* pfe_get_phydev_duplex
|
|
+ */
|
|
+static int pfe_get_phydev_duplex(struct phy_device *phydev)
|
|
+{
|
|
+ /*return (phydev->duplex == DUPLEX_HALF) ? DUP_HALF:DUP_FULL ; */
|
|
+ return DUPLEX_FULL;
|
|
+}
|
|
+
|
|
+/* pfe_eth_adjust_link
|
|
+ */
|
|
+static void pfe_eth_adjust_link(struct net_device *ndev)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+ unsigned long flags;
|
|
+ struct phy_device *phydev = priv->phydev;
|
|
+ int new_state = 0;
|
|
+
|
|
+ netif_info(priv, drv, ndev, "%s\n", __func__);
|
|
+
|
|
+ spin_lock_irqsave(&priv->lock, flags);
|
|
+
|
|
+ if (phydev->link) {
|
|
+ /*
|
|
+ * Now we make sure that we can be in full duplex mode.
|
|
+ * If not, we operate in half-duplex mode.
|
|
+ */
|
|
+ if (phydev->duplex != priv->oldduplex) {
|
|
+ new_state = 1;
|
|
+ gemac_set_duplex(priv->EMAC_baseaddr,
|
|
+ pfe_get_phydev_duplex(phydev));
|
|
+ priv->oldduplex = phydev->duplex;
|
|
+ }
|
|
+
|
|
+ if (phydev->speed != priv->oldspeed) {
|
|
+ new_state = 1;
|
|
+ gemac_set_speed(priv->EMAC_baseaddr,
|
|
+ pfe_get_phydev_speed(phydev));
|
|
+ if (priv->einfo->mii_config ==
|
|
+ PHY_INTERFACE_MODE_RGMII_TXID)
|
|
+ pfe_set_rgmii_speed(phydev);
|
|
+ priv->oldspeed = phydev->speed;
|
|
+ }
|
|
+
|
|
+ if (!priv->oldlink) {
|
|
+ new_state = 1;
|
|
+ priv->oldlink = 1;
|
|
+ }
|
|
+
|
|
+ } else if (priv->oldlink) {
|
|
+ new_state = 1;
|
|
+ priv->oldlink = 0;
|
|
+ priv->oldspeed = 0;
|
|
+ priv->oldduplex = -1;
|
|
+ }
|
|
+
|
|
+ if (new_state && netif_msg_link(priv))
|
|
+ phy_print_status(phydev);
|
|
+
|
|
+ spin_unlock_irqrestore(&priv->lock, flags);
|
|
+
|
|
+ /* Now, dump the details to the cdev.
|
|
+ * XXX: Locking would be required? (uniprocess arch)
|
|
+ * Or, maybe move it in spinlock above
|
|
+ */
|
|
+ if (us && priv->einfo->gem_id < PFE_CDEV_ETH_COUNT) {
|
|
+ pr_debug("Changing link state from (%u) to (%u) for ID=(%u)\n",
|
|
+ link_states[priv->einfo->gem_id].state,
|
|
+ phydev->link,
|
|
+ priv->einfo->gem_id);
|
|
+ link_states[priv->einfo->gem_id].phy_id = priv->einfo->gem_id;
|
|
+ link_states[priv->einfo->gem_id].state = phydev->link;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* pfe_phy_exit
|
|
+ */
|
|
+static void pfe_phy_exit(struct net_device *ndev)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+
|
|
+ netif_info(priv, drv, ndev, "%s\n", __func__);
|
|
+
|
|
+ phy_disconnect(priv->phydev);
|
|
+ priv->phydev = NULL;
|
|
+}
|
|
+
|
|
+/* pfe_eth_stop
|
|
+ */
|
|
+static void pfe_eth_stop(struct net_device *ndev, int wake)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+
|
|
+ netif_info(priv, drv, ndev, "%s\n", __func__);
|
|
+
|
|
+ if (wake) {
|
|
+ gemac_tx_disable(priv->EMAC_baseaddr);
|
|
+ } else {
|
|
+ gemac_disable(priv->EMAC_baseaddr);
|
|
+ gpi_disable(priv->GPI_baseaddr);
|
|
+
|
|
+ if (priv->phydev)
|
|
+ phy_stop(priv->phydev);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* pfe_eth_start
|
|
+ */
|
|
+static int pfe_eth_start(struct pfe_eth_priv_s *priv)
|
|
+{
|
|
+ netif_info(priv, drv, priv->ndev, "%s\n", __func__);
|
|
+
|
|
+ if (priv->phydev)
|
|
+ phy_start(priv->phydev);
|
|
+
|
|
+ gpi_enable(priv->GPI_baseaddr);
|
|
+ gemac_enable(priv->EMAC_baseaddr);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Configure on chip serdes through mdio
|
|
+ */
|
|
+static void ls1012a_configure_serdes(struct net_device *ndev)
|
|
+{
|
|
+ struct pfe_eth_priv_s *eth_priv = netdev_priv(ndev);
|
|
+ struct pfe_mdio_priv_s *mdio_priv = pfe->mdio.mdio_priv[eth_priv->id];
|
|
+ int sgmii_2500 = 0;
|
|
+ struct mii_bus *bus = mdio_priv->mii_bus;
|
|
+ u16 value = 0;
|
|
+
|
|
+ if (eth_priv->einfo->mii_config == PHY_INTERFACE_MODE_2500SGMII)
|
|
+ sgmii_2500 = 1;
|
|
+
|
|
+ netif_info(eth_priv, drv, ndev, "%s\n", __func__);
|
|
+ /* PCS configuration done with corresponding GEMAC */
|
|
+
|
|
+ pfe_eth_mdio_read(bus, 0, MDIO_SGMII_CR);
|
|
+ pfe_eth_mdio_read(bus, 0, MDIO_SGMII_SR);
|
|
+
|
|
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_CR, SGMII_CR_RST);
|
|
+
|
|
+ if (sgmii_2500) {
|
|
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_IF_MODE, SGMII_SPEED_1GBPS
|
|
+ | SGMII_EN);
|
|
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_DEV_ABIL_SGMII,
|
|
+ SGMII_DEV_ABIL_ACK | SGMII_DEV_ABIL_SGMII);
|
|
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_L, 0xa120);
|
|
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_H, 0x7);
|
|
+ /* Autonegotiation need to be disabled for 2.5G SGMII mode*/
|
|
+ value = SGMII_CR_FD | SGMII_CR_SPEED_SEL1_1G;
|
|
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_CR, value);
|
|
+ } else {
|
|
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_IF_MODE,
|
|
+ SGMII_SPEED_1GBPS
|
|
+ | SGMII_USE_SGMII_AN
|
|
+ | SGMII_EN);
|
|
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_DEV_ABIL_SGMII,
|
|
+ SGMII_DEV_ABIL_EEE_CLK_STP_EN
|
|
+ | 0xa0
|
|
+ | SGMII_DEV_ABIL_SGMII);
|
|
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_L, 0x400);
|
|
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_H, 0x0);
|
|
+ value = SGMII_CR_AN_EN | SGMII_CR_FD | SGMII_CR_SPEED_SEL1_1G;
|
|
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_CR, value);
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_phy_init
|
|
+ *
|
|
+ */
|
|
+static int pfe_phy_init(struct net_device *ndev)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+ struct phy_device *phydev;
|
|
+ char phy_id[MII_BUS_ID_SIZE + 3];
|
|
+ char bus_id[MII_BUS_ID_SIZE];
|
|
+ phy_interface_t interface;
|
|
+
|
|
+ priv->oldlink = 0;
|
|
+ priv->oldspeed = 0;
|
|
+ priv->oldduplex = -1;
|
|
+
|
|
+ snprintf(bus_id, MII_BUS_ID_SIZE, "ls1012a-%d", 0);
|
|
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
|
|
+ priv->einfo->phy_id);
|
|
+ netif_info(priv, drv, ndev, "%s: %s\n", __func__, phy_id);
|
|
+ interface = priv->einfo->mii_config;
|
|
+ if ((interface == PHY_INTERFACE_MODE_SGMII) ||
|
|
+ (interface == PHY_INTERFACE_MODE_2500SGMII)) {
|
|
+ /*Configure SGMII PCS */
|
|
+ if (pfe->scfg) {
|
|
+ /* Config MDIO from serdes */
|
|
+ regmap_write(pfe->scfg, MDIOSELCR, MDIOSEL_SERDES);
|
|
+ }
|
|
+ ls1012a_configure_serdes(ndev);
|
|
+ }
|
|
+
|
|
+ if (pfe->scfg) {
|
|
+ /*Config MDIO from PAD */
|
|
+ regmap_write(pfe->scfg, MDIOSELCR, MDIOSEL_EXTPHY);
|
|
+ }
|
|
+
|
|
+ priv->oldlink = 0;
|
|
+ priv->oldspeed = 0;
|
|
+ priv->oldduplex = -1;
|
|
+ pr_info("%s interface %x\n", __func__, interface);
|
|
+
|
|
+ if (priv->phy_node) {
|
|
+ phydev = of_phy_connect(ndev, priv->phy_node,
|
|
+ pfe_eth_adjust_link, 0,
|
|
+ priv->einfo->mii_config);
|
|
+ if (!(phydev)) {
|
|
+ netdev_err(ndev, "Unable to connect to phy\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ } else {
|
|
+ phydev = phy_connect(ndev, phy_id,
|
|
+ &pfe_eth_adjust_link, interface);
|
|
+ if (IS_ERR(phydev)) {
|
|
+ netdev_err(ndev, "Unable to connect to phy\n");
|
|
+ return PTR_ERR(phydev);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ priv->phydev = phydev;
|
|
+ phydev->irq = PHY_POLL;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* pfe_gemac_init
|
|
+ */
|
|
+static int pfe_gemac_init(struct pfe_eth_priv_s *priv)
|
|
+{
|
|
+ struct gemac_cfg cfg;
|
|
+
|
|
+ netif_info(priv, ifup, priv->ndev, "%s\n", __func__);
|
|
+
|
|
+ cfg.speed = SPEED_1000M;
|
|
+ cfg.duplex = DUPLEX_FULL;
|
|
+
|
|
+ gemac_set_config(priv->EMAC_baseaddr, &cfg);
|
|
+ gemac_allow_broadcast(priv->EMAC_baseaddr);
|
|
+ gemac_enable_1536_rx(priv->EMAC_baseaddr);
|
|
+ gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
|
|
+ gemac_enable_pause_rx(priv->EMAC_baseaddr);
|
|
+ gemac_set_bus_width(priv->EMAC_baseaddr, 64);
|
|
+
|
|
+ /*GEM will perform checksum verifications*/
|
|
+ if (priv->ndev->features & NETIF_F_RXCSUM)
|
|
+ gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
|
|
+ else
|
|
+ gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* pfe_eth_event_handler
|
|
+ */
|
|
+static int pfe_eth_event_handler(void *data, int event, int qno)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = data;
|
|
+
|
|
+ switch (event) {
|
|
+ case EVENT_RX_PKT_IND:
|
|
+
|
|
+ if (qno == 0) {
|
|
+ if (napi_schedule_prep(&priv->high_napi)) {
|
|
+ netif_info(priv, intr, priv->ndev,
|
|
+ "%s: schedule high prio poll\n"
|
|
+ , __func__);
|
|
+
|
|
+#ifdef PFE_ETH_NAPI_STATS
|
|
+ priv->napi_counters[NAPI_SCHED_COUNT]++;
|
|
+#endif
|
|
+
|
|
+ __napi_schedule(&priv->high_napi);
|
|
+ }
|
|
+ } else if (qno == 1) {
|
|
+ if (napi_schedule_prep(&priv->low_napi)) {
|
|
+ netif_info(priv, intr, priv->ndev,
|
|
+ "%s: schedule low prio poll\n"
|
|
+ , __func__);
|
|
+
|
|
+#ifdef PFE_ETH_NAPI_STATS
|
|
+ priv->napi_counters[NAPI_SCHED_COUNT]++;
|
|
+#endif
|
|
+ __napi_schedule(&priv->low_napi);
|
|
+ }
|
|
+ } else if (qno == 2) {
|
|
+ if (napi_schedule_prep(&priv->lro_napi)) {
|
|
+ netif_info(priv, intr, priv->ndev,
|
|
+ "%s: schedule lro prio poll\n"
|
|
+ , __func__);
|
|
+
|
|
+#ifdef PFE_ETH_NAPI_STATS
|
|
+ priv->napi_counters[NAPI_SCHED_COUNT]++;
|
|
+#endif
|
|
+ __napi_schedule(&priv->lro_napi);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ break;
|
|
+
|
|
+ case EVENT_TXDONE_IND:
|
|
+ pfe_eth_flush_tx(priv);
|
|
+ hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
|
|
+ break;
|
|
+ case EVENT_HIGH_RX_WM:
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int pfe_eth_change_mtu(struct net_device *ndev, int new_mtu)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+
|
|
+ ndev->mtu = new_mtu;
|
|
+ new_mtu += ETH_HLEN + ETH_FCS_LEN;
|
|
+ gemac_set_rx_max_fl(priv->EMAC_baseaddr, new_mtu);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* pfe_eth_open
|
|
+ */
|
|
+static int pfe_eth_open(struct net_device *ndev)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+ struct hif_client_s *client;
|
|
+ int rc;
|
|
+
|
|
+ netif_info(priv, ifup, ndev, "%s\n", __func__);
|
|
+
|
|
+ /* Register client driver with HIF */
|
|
+ client = &priv->client;
|
|
+ memset(client, 0, sizeof(*client));
|
|
+ client->id = PFE_CL_GEM0 + priv->id;
|
|
+ client->tx_qn = emac_txq_cnt;
|
|
+ client->rx_qn = EMAC_RXQ_CNT;
|
|
+ client->priv = priv;
|
|
+ client->pfe = priv->pfe;
|
|
+ client->event_handler = pfe_eth_event_handler;
|
|
+
|
|
+ client->tx_qsize = EMAC_TXQ_DEPTH;
|
|
+ client->rx_qsize = EMAC_RXQ_DEPTH;
|
|
+
|
|
+ rc = hif_lib_client_register(client);
|
|
+ if (rc) {
|
|
+ netdev_err(ndev, "%s: hif_lib_client_register(%d) failed\n",
|
|
+ __func__, client->id);
|
|
+ goto err0;
|
|
+ }
|
|
+
|
|
+ netif_info(priv, drv, ndev, "%s: registered client: %p\n", __func__,
|
|
+ client);
|
|
+
|
|
+ pfe_gemac_init(priv);
|
|
+
|
|
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
|
|
+ netdev_err(ndev, "%s: invalid MAC address\n", __func__);
|
|
+ rc = -EADDRNOTAVAIL;
|
|
+ goto err1;
|
|
+ }
|
|
+
|
|
+ gemac_set_laddrN(priv->EMAC_baseaddr,
|
|
+ (struct pfe_mac_addr *)ndev->dev_addr, 1);
|
|
+
|
|
+ napi_enable(&priv->high_napi);
|
|
+ napi_enable(&priv->low_napi);
|
|
+ napi_enable(&priv->lro_napi);
|
|
+
|
|
+ rc = pfe_eth_start(priv);
|
|
+
|
|
+ netif_tx_wake_all_queues(ndev);
|
|
+
|
|
+ return rc;
|
|
+
|
|
+err1:
|
|
+ hif_lib_client_unregister(&priv->client);
|
|
+
|
|
+err0:
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_eth_shutdown
|
|
+ */
|
|
+int pfe_eth_shutdown(struct net_device *ndev, int wake)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+ int i, qstatus;
|
|
+ unsigned long next_poll = jiffies + 1, end = jiffies +
|
|
+ (TX_POLL_TIMEOUT_MS * HZ) / 1000;
|
|
+ int tx_pkts, prv_tx_pkts;
|
|
+
|
|
+ netif_info(priv, ifdown, ndev, "%s\n", __func__);
|
|
+
|
|
+ for (i = 0; i < emac_txq_cnt; i++)
|
|
+ hrtimer_cancel(&priv->fast_tx_timeout[i].timer);
|
|
+
|
|
+ netif_tx_stop_all_queues(ndev);
|
|
+
|
|
+ do {
|
|
+ tx_pkts = 0;
|
|
+ pfe_eth_flush_tx(priv);
|
|
+
|
|
+ for (i = 0; i < emac_txq_cnt; i++)
|
|
+ tx_pkts += hif_lib_tx_pending(&priv->client, i);
|
|
+
|
|
+ if (tx_pkts) {
|
|
+ /*Don't wait forever, break if we cross max timeout */
|
|
+ if (time_after(jiffies, end)) {
|
|
+ pr_err(
|
|
+ "(%s)Tx is not complete after %dmsec\n",
|
|
+ ndev->name, TX_POLL_TIMEOUT_MS);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ pr_info("%s : (%s) Waiting for tx packets to free. Pending tx pkts = %d.\n"
|
|
+ , __func__, ndev->name, tx_pkts);
|
|
+ if (need_resched())
|
|
+ schedule();
|
|
+ }
|
|
+
|
|
+ } while (tx_pkts);
|
|
+
|
|
+ end = jiffies + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
|
|
+
|
|
+ prv_tx_pkts = tmu_pkts_processed(priv->id);
|
|
+ /*
|
|
+ * Wait till TMU transmits all pending packets
|
|
+ * poll tmu_qstatus and pkts processed by TMU for every 10ms
|
|
+ * Consider TMU is busy, If we see TMU qeueu pending or any packets
|
|
+ * processed by TMU
|
|
+ */
|
|
+ while (1) {
|
|
+ if (time_after(jiffies, next_poll)) {
|
|
+ tx_pkts = tmu_pkts_processed(priv->id);
|
|
+ qstatus = tmu_qstatus(priv->id) & 0x7ffff;
|
|
+
|
|
+ if (!qstatus && (tx_pkts == prv_tx_pkts))
|
|
+ break;
|
|
+ /* Don't wait forever, break if we cross max
|
|
+ * timeout(TX_POLL_TIMEOUT_MS)
|
|
+ */
|
|
+ if (time_after(jiffies, end)) {
|
|
+ pr_err("TMU%d is busy after %dmsec\n",
|
|
+ priv->id, TX_POLL_TIMEOUT_MS);
|
|
+ break;
|
|
+ }
|
|
+ prv_tx_pkts = tx_pkts;
|
|
+ next_poll++;
|
|
+ }
|
|
+ if (need_resched())
|
|
+ schedule();
|
|
+ }
|
|
+ /* Wait for some more time to complete transmitting packet if any */
|
|
+ next_poll = jiffies + 1;
|
|
+ while (1) {
|
|
+ if (time_after(jiffies, next_poll))
|
|
+ break;
|
|
+ if (need_resched())
|
|
+ schedule();
|
|
+ }
|
|
+
|
|
+ pfe_eth_stop(ndev, wake);
|
|
+
|
|
+ napi_disable(&priv->lro_napi);
|
|
+ napi_disable(&priv->low_napi);
|
|
+ napi_disable(&priv->high_napi);
|
|
+
|
|
+ hif_lib_client_unregister(&priv->client);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* pfe_eth_close
|
|
+ *
|
|
+ */
|
|
+static int pfe_eth_close(struct net_device *ndev)
|
|
+{
|
|
+ pfe_eth_shutdown(ndev, 0);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* pfe_eth_suspend
|
|
+ *
|
|
+ * return value : 1 if netdevice is configured to wakeup system
|
|
+ * 0 otherwise
|
|
+ */
|
|
+int pfe_eth_suspend(struct net_device *ndev)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+ int retval = 0;
|
|
+
|
|
+ if (priv->wol) {
|
|
+ gemac_set_wol(priv->EMAC_baseaddr, priv->wol);
|
|
+ retval = 1;
|
|
+ }
|
|
+ pfe_eth_shutdown(ndev, priv->wol);
|
|
+
|
|
+ return retval;
|
|
+}
|
|
+
|
|
+/* pfe_eth_resume
|
|
+ *
|
|
+ */
|
|
+int pfe_eth_resume(struct net_device *ndev)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+
|
|
+ if (priv->wol)
|
|
+ gemac_set_wol(priv->EMAC_baseaddr, 0);
|
|
+ gemac_tx_enable(priv->EMAC_baseaddr);
|
|
+
|
|
+ return pfe_eth_open(ndev);
|
|
+}
|
|
+
|
|
+/* pfe_eth_get_queuenum
|
|
+ */
|
|
+static int pfe_eth_get_queuenum(struct pfe_eth_priv_s *priv, struct sk_buff
|
|
+ *skb)
|
|
+{
|
|
+ int queuenum = 0;
|
|
+ unsigned long flags;
|
|
+
|
|
+ /* Get the Fast Path queue number */
|
|
+ /*
|
|
+ * Use conntrack mark (if conntrack exists), then packet mark (if any),
|
|
+ * then fallback to default
|
|
+ */
|
|
+#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
|
|
+ if (skb->_nfct) {
|
|
+ enum ip_conntrack_info cinfo;
|
|
+ struct nf_conn *ct;
|
|
+
|
|
+ ct = nf_ct_get(skb, &cinfo);
|
|
+
|
|
+ if (ct) {
|
|
+ u32 connmark;
|
|
+
|
|
+ connmark = ct->mark;
|
|
+
|
|
+ if ((connmark & 0x80000000) && priv->id != 0)
|
|
+ connmark >>= 16;
|
|
+
|
|
+ queuenum = connmark & EMAC_QUEUENUM_MASK;
|
|
+ }
|
|
+ } else {/* continued after #endif ... */
|
|
+#endif
|
|
+ if (skb->mark) {
|
|
+ queuenum = skb->mark & EMAC_QUEUENUM_MASK;
|
|
+ } else {
|
|
+ spin_lock_irqsave(&priv->lock, flags);
|
|
+ queuenum = priv->default_priority & EMAC_QUEUENUM_MASK;
|
|
+ spin_unlock_irqrestore(&priv->lock, flags);
|
|
+ }
|
|
+#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
|
|
+ }
|
|
+#endif
|
|
+ return queuenum;
|
|
+}
|
|
+
|
|
+/* pfe_eth_might_stop_tx
|
|
+ *
|
|
+ */
|
|
+static int pfe_eth_might_stop_tx(struct pfe_eth_priv_s *priv, int queuenum,
|
|
+ struct netdev_queue *tx_queue,
|
|
+ unsigned int n_desc,
|
|
+ unsigned int n_segs)
|
|
+{
|
|
+ ktime_t kt;
|
|
+ int tried = 0;
|
|
+
|
|
+try_again:
|
|
+ if (unlikely((__hif_tx_avail(&pfe->hif) < n_desc) ||
|
|
+ (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) ||
|
|
+ (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs))) {
|
|
+ if (!tried) {
|
|
+ __hif_lib_update_credit(&priv->client, queuenum);
|
|
+ tried = 1;
|
|
+ goto try_again;
|
|
+ }
|
|
+#ifdef PFE_ETH_TX_STATS
|
|
+ if (__hif_tx_avail(&pfe->hif) < n_desc) {
|
|
+ priv->stop_queue_hif[queuenum]++;
|
|
+ } else if (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) {
|
|
+ priv->stop_queue_hif_client[queuenum]++;
|
|
+ } else if (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) <
|
|
+ n_segs) {
|
|
+ priv->stop_queue_credit[queuenum]++;
|
|
+ }
|
|
+ priv->stop_queue_total[queuenum]++;
|
|
+#endif
|
|
+ netif_tx_stop_queue(tx_queue);
|
|
+
|
|
+ kt = ktime_set(0, LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS *
|
|
+ NSEC_PER_MSEC);
|
|
+ hrtimer_start(&priv->fast_tx_timeout[queuenum].timer, kt,
|
|
+ HRTIMER_MODE_REL);
|
|
+ return -1;
|
|
+ } else {
|
|
+ return 0;
|
|
+ }
|
|
+}
|
|
+
|
|
+#define SA_MAX_OP 2
|
|
+/* pfe_hif_send_packet
|
|
+ *
|
|
+ * At this level if TX fails we drop the packet
|
|
+ */
|
|
+static void pfe_hif_send_packet(struct sk_buff *skb, struct pfe_eth_priv_s
|
|
+ *priv, int queuenum)
|
|
+{
|
|
+ struct skb_shared_info *sh = skb_shinfo(skb);
|
|
+ unsigned int nr_frags;
|
|
+ u32 ctrl = 0;
|
|
+
|
|
+ netif_info(priv, tx_queued, priv->ndev, "%s\n", __func__);
|
|
+
|
|
+ if (skb_is_gso(skb)) {
|
|
+ priv->stats.tx_dropped++;
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
|
|
+ ctrl = HIF_CTRL_TX_CHECKSUM;
|
|
+
|
|
+ nr_frags = sh->nr_frags;
|
|
+
|
|
+ if (nr_frags) {
|
|
+ skb_frag_t *f;
|
|
+ int i;
|
|
+
|
|
+ __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
|
|
+ skb_headlen(skb), ctrl, HIF_FIRST_BUFFER,
|
|
+ skb);
|
|
+
|
|
+ for (i = 0; i < nr_frags - 1; i++) {
|
|
+ f = &sh->frags[i];
|
|
+ __hif_lib_xmit_pkt(&priv->client, queuenum,
|
|
+ skb_frag_address(f),
|
|
+ skb_frag_size(f),
|
|
+ 0x0, 0x0, skb);
|
|
+ }
|
|
+
|
|
+ f = &sh->frags[i];
|
|
+
|
|
+ __hif_lib_xmit_pkt(&priv->client, queuenum,
|
|
+ skb_frag_address(f), skb_frag_size(f),
|
|
+ 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
|
|
+ skb);
|
|
+
|
|
+ netif_info(priv, tx_queued, priv->ndev,
|
|
+ "%s: pkt sent successfully skb:%p nr_frags:%d len:%d\n",
|
|
+ __func__, skb, nr_frags, skb->len);
|
|
+ } else {
|
|
+ __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
|
|
+ skb->len, ctrl, HIF_FIRST_BUFFER |
|
|
+ HIF_LAST_BUFFER | HIF_DATA_VALID,
|
|
+ skb);
|
|
+ netif_info(priv, tx_queued, priv->ndev,
|
|
+ "%s: pkt sent successfully skb:%p len:%d\n",
|
|
+ __func__, skb, skb->len);
|
|
+ }
|
|
+ hif_tx_dma_start();
|
|
+ priv->stats.tx_packets++;
|
|
+ priv->stats.tx_bytes += skb->len;
|
|
+ hif_lib_tx_credit_use(pfe, priv->id, queuenum, 1);
|
|
+}
|
|
+
|
|
+/* pfe_eth_flush_txQ
|
|
+ */
|
|
+static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
|
|
+ from_tx, int n_desc)
|
|
+{
|
|
+ struct sk_buff *skb;
|
|
+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
|
|
+ tx_q_num);
|
|
+ unsigned int flags;
|
|
+
|
|
+ netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
|
|
+
|
|
+ if (!from_tx)
|
|
+ __netif_tx_lock_bh(tx_queue);
|
|
+
|
|
+ /* Clean HIF and client queue */
|
|
+ while ((skb = hif_lib_tx_get_next_complete(&priv->client,
|
|
+ tx_q_num, &flags,
|
|
+ HIF_TX_DESC_NT))) {
|
|
+ if (flags & HIF_DATA_VALID)
|
|
+ dev_kfree_skb_any(skb);
|
|
+ }
|
|
+ if (!from_tx)
|
|
+ __netif_tx_unlock_bh(tx_queue);
|
|
+}
|
|
+
|
|
+/* pfe_eth_flush_tx
|
|
+ */
|
|
+static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
|
|
+{
|
|
+ int ii;
|
|
+
|
|
+ netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
|
|
+
|
|
+ for (ii = 0; ii < emac_txq_cnt; ii++) {
|
|
+ pfe_eth_flush_txQ(priv, ii, 0, 0);
|
|
+ __hif_lib_update_credit(&priv->client, ii);
|
|
+ }
|
|
+}
|
|
+
|
|
+void pfe_tx_get_req_desc(struct sk_buff *skb, unsigned int *n_desc, unsigned int
|
|
+ *n_segs)
|
|
+{
|
|
+ struct skb_shared_info *sh = skb_shinfo(skb);
|
|
+
|
|
+ /* Scattered data */
|
|
+ if (sh->nr_frags) {
|
|
+ *n_desc = sh->nr_frags + 1;
|
|
+ *n_segs = 1;
|
|
+ /* Regular case */
|
|
+ } else {
|
|
+ *n_desc = 1;
|
|
+ *n_segs = 1;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* pfe_eth_send_packet
|
|
+ */
|
|
+static int pfe_eth_send_packet(struct sk_buff *skb, struct net_device *ndev)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+ int tx_q_num = skb_get_queue_mapping(skb);
|
|
+ int n_desc, n_segs;
|
|
+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
|
|
+ tx_q_num);
|
|
+
|
|
+ netif_info(priv, tx_queued, ndev, "%s\n", __func__);
|
|
+
|
|
+ if ((!skb_is_gso(skb)) && (skb_headroom(skb) < (PFE_PKT_HEADER_SZ +
|
|
+ sizeof(unsigned long)))) {
|
|
+ netif_warn(priv, tx_err, priv->ndev, "%s: copying skb\n",
|
|
+ __func__);
|
|
+
|
|
+ if (pskb_expand_head(skb, (PFE_PKT_HEADER_SZ + sizeof(unsigned
|
|
+ long)), 0, GFP_ATOMIC)) {
|
|
+ /* No need to re-transmit, no way to recover*/
|
|
+ kfree_skb(skb);
|
|
+ priv->stats.tx_dropped++;
|
|
+ return NETDEV_TX_OK;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ pfe_tx_get_req_desc(skb, &n_desc, &n_segs);
|
|
+
|
|
+ hif_tx_lock(&pfe->hif);
|
|
+ if (unlikely(pfe_eth_might_stop_tx(priv, tx_q_num, tx_queue, n_desc,
|
|
+ n_segs))) {
|
|
+#ifdef PFE_ETH_TX_STATS
|
|
+ if (priv->was_stopped[tx_q_num]) {
|
|
+ priv->clean_fail[tx_q_num]++;
|
|
+ priv->was_stopped[tx_q_num] = 0;
|
|
+ }
|
|
+#endif
|
|
+ hif_tx_unlock(&pfe->hif);
|
|
+ return NETDEV_TX_BUSY;
|
|
+ }
|
|
+
|
|
+ pfe_hif_send_packet(skb, priv, tx_q_num);
|
|
+
|
|
+ hif_tx_unlock(&pfe->hif);
|
|
+
|
|
+ tx_queue->trans_start = jiffies;
|
|
+
|
|
+#ifdef PFE_ETH_TX_STATS
|
|
+ priv->was_stopped[tx_q_num] = 0;
|
|
+#endif
|
|
+
|
|
+ return NETDEV_TX_OK;
|
|
+}
|
|
+
|
|
+/* pfe_eth_select_queue
|
|
+ *
|
|
+ */
|
|
+static u16 pfe_eth_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
|
+ void *accel_priv,
|
|
+ select_queue_fallback_t fallback)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+
|
|
+ return pfe_eth_get_queuenum(priv, skb);
|
|
+}
|
|
+
|
|
+/* pfe_eth_get_stats
|
|
+ */
|
|
+static struct net_device_stats *pfe_eth_get_stats(struct net_device *ndev)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+
|
|
+ netif_info(priv, drv, ndev, "%s\n", __func__);
|
|
+
|
|
+ return &priv->stats;
|
|
+}
|
|
+
|
|
+/* pfe_eth_set_mac_address
|
|
+ */
|
|
+static int pfe_eth_set_mac_address(struct net_device *ndev, void *addr)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+ struct sockaddr *sa = addr;
|
|
+
|
|
+ netif_info(priv, drv, ndev, "%s\n", __func__);
|
|
+
|
|
+ if (!is_valid_ether_addr(sa->sa_data))
|
|
+ return -EADDRNOTAVAIL;
|
|
+
|
|
+ memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
|
|
+
|
|
+ gemac_set_laddrN(priv->EMAC_baseaddr,
|
|
+ (struct pfe_mac_addr *)ndev->dev_addr, 1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* pfe_eth_enet_addr_byte_mac
|
|
+ */
|
|
+int pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
|
|
+ struct pfe_mac_addr *enet_addr)
|
|
+{
|
|
+ if (!enet_byte_addr || !enet_addr) {
|
|
+ return -1;
|
|
+
|
|
+ } else {
|
|
+ enet_addr->bottom = enet_byte_addr[0] |
|
|
+ (enet_byte_addr[1] << 8) |
|
|
+ (enet_byte_addr[2] << 16) |
|
|
+ (enet_byte_addr[3] << 24);
|
|
+ enet_addr->top = enet_byte_addr[4] |
|
|
+ (enet_byte_addr[5] << 8);
|
|
+ return 0;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* pfe_eth_set_multi
|
|
+ */
|
|
+static void pfe_eth_set_multi(struct net_device *ndev)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+ struct pfe_mac_addr hash_addr; /* hash register structure */
|
|
+ /* specific mac address register structure */
|
|
+ struct pfe_mac_addr spec_addr;
|
|
+ int result; /* index into hash register to set.. */
|
|
+ int uc_count = 0;
|
|
+ struct netdev_hw_addr *ha;
|
|
+
|
|
+ if (ndev->flags & IFF_PROMISC) {
|
|
+ netif_info(priv, drv, ndev, "entering promiscuous mode\n");
|
|
+
|
|
+ priv->promisc = 1;
|
|
+ gemac_enable_copy_all(priv->EMAC_baseaddr);
|
|
+ } else {
|
|
+ priv->promisc = 0;
|
|
+ gemac_disable_copy_all(priv->EMAC_baseaddr);
|
|
+ }
|
|
+
|
|
+ /* Enable broadcast frame reception if required. */
|
|
+ if (ndev->flags & IFF_BROADCAST) {
|
|
+ gemac_allow_broadcast(priv->EMAC_baseaddr);
|
|
+ } else {
|
|
+ netif_info(priv, drv, ndev,
|
|
+ "disabling broadcast frame reception\n");
|
|
+
|
|
+ gemac_no_broadcast(priv->EMAC_baseaddr);
|
|
+ }
|
|
+
|
|
+ if (ndev->flags & IFF_ALLMULTI) {
|
|
+ /* Set the hash to rx all multicast frames */
|
|
+ hash_addr.bottom = 0xFFFFFFFF;
|
|
+ hash_addr.top = 0xFFFFFFFF;
|
|
+ gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
|
|
+ netdev_for_each_uc_addr(ha, ndev) {
|
|
+ if (uc_count >= MAX_UC_SPEC_ADDR_REG)
|
|
+ break;
|
|
+ pfe_eth_enet_addr_byte_mac(ha->addr, &spec_addr);
|
|
+ gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr,
|
|
+ uc_count + 2);
|
|
+ uc_count++;
|
|
+ }
|
|
+ } else if ((netdev_mc_count(ndev) > 0) || (netdev_uc_count(ndev))) {
|
|
+ u8 *addr;
|
|
+
|
|
+ hash_addr.bottom = 0;
|
|
+ hash_addr.top = 0;
|
|
+
|
|
+ netdev_for_each_mc_addr(ha, ndev) {
|
|
+ addr = ha->addr;
|
|
+
|
|
+ netif_info(priv, drv, ndev,
|
|
+ "adding multicast address %X:%X:%X:%X:%X:%X to gem filter\n",
|
|
+ addr[0], addr[1], addr[2],
|
|
+ addr[3], addr[4], addr[5]);
|
|
+
|
|
+ result = pfe_eth_get_hash(addr);
|
|
+
|
|
+ if (result < EMAC_HASH_REG_BITS) {
|
|
+ if (result < 32)
|
|
+ hash_addr.bottom |= (1 << result);
|
|
+ else
|
|
+ hash_addr.top |= (1 << (result - 32));
|
|
+ } else {
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ uc_count = -1;
|
|
+ netdev_for_each_uc_addr(ha, ndev) {
|
|
+ addr = ha->addr;
|
|
+
|
|
+ if (++uc_count < MAX_UC_SPEC_ADDR_REG) {
|
|
+ netdev_info(ndev,
|
|
+ "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem filter\n",
|
|
+ addr[0], addr[1], addr[2],
|
|
+ addr[3], addr[4], addr[5]);
|
|
+ pfe_eth_enet_addr_byte_mac(addr, &spec_addr);
|
|
+ gemac_set_laddrN(priv->EMAC_baseaddr,
|
|
+ &spec_addr, uc_count + 2);
|
|
+ } else {
|
|
+ netif_info(priv, drv, ndev,
|
|
+ "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem hash\n",
|
|
+ addr[0], addr[1], addr[2],
|
|
+ addr[3], addr[4], addr[5]);
|
|
+
|
|
+ result = pfe_eth_get_hash(addr);
|
|
+ if (result >= EMAC_HASH_REG_BITS) {
|
|
+ break;
|
|
+
|
|
+ } else {
|
|
+ if (result < 32)
|
|
+ hash_addr.bottom |= (1 <<
|
|
+ result);
|
|
+ else
|
|
+ hash_addr.top |= (1 <<
|
|
+ (result - 32));
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
|
|
+ }
|
|
+
|
|
+ if (!(netdev_uc_count(ndev) >= MAX_UC_SPEC_ADDR_REG)) {
|
|
+ /*
|
|
+ * Check if there are any specific address HW registers that
|
|
+ * need to be flushed
|
|
+ */
|
|
+ for (uc_count = netdev_uc_count(ndev); uc_count <
|
|
+ MAX_UC_SPEC_ADDR_REG; uc_count++)
|
|
+ gemac_clear_laddrN(priv->EMAC_baseaddr, uc_count + 2);
|
|
+ }
|
|
+
|
|
+ if (ndev->flags & IFF_LOOPBACK)
|
|
+ gemac_set_loop(priv->EMAC_baseaddr, LB_LOCAL);
|
|
+}
|
|
+
|
|
+/* pfe_eth_set_features
|
|
+ */
|
|
+static int pfe_eth_set_features(struct net_device *ndev, netdev_features_t
|
|
+ features)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
|
|
+ int rc = 0;
|
|
+
|
|
+ if (features & NETIF_F_RXCSUM)
|
|
+ gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
|
|
+ else
|
|
+ gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/* pfe_eth_fast_tx_timeout
|
|
+ */
|
|
+static enum hrtimer_restart pfe_eth_fast_tx_timeout(struct hrtimer *timer)
|
|
+{
|
|
+ struct pfe_eth_fast_timer *fast_tx_timeout = container_of(timer, struct
|
|
+ pfe_eth_fast_timer,
|
|
+ timer);
|
|
+ struct pfe_eth_priv_s *priv = container_of(fast_tx_timeout->base,
|
|
+ struct pfe_eth_priv_s,
|
|
+ fast_tx_timeout);
|
|
+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
|
|
+ fast_tx_timeout->queuenum);
|
|
+
|
|
+ if (netif_tx_queue_stopped(tx_queue)) {
|
|
+#ifdef PFE_ETH_TX_STATS
|
|
+ priv->was_stopped[fast_tx_timeout->queuenum] = 1;
|
|
+#endif
|
|
+ netif_tx_wake_queue(tx_queue);
|
|
+ }
|
|
+
|
|
+ return HRTIMER_NORESTART;
|
|
+}
|
|
+
|
|
+/* pfe_eth_fast_tx_timeout_init
|
|
+ */
|
|
+static void pfe_eth_fast_tx_timeout_init(struct pfe_eth_priv_s *priv)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < emac_txq_cnt; i++) {
|
|
+ priv->fast_tx_timeout[i].queuenum = i;
|
|
+ hrtimer_init(&priv->fast_tx_timeout[i].timer, CLOCK_MONOTONIC,
|
|
+ HRTIMER_MODE_REL);
|
|
+ priv->fast_tx_timeout[i].timer.function =
|
|
+ pfe_eth_fast_tx_timeout;
|
|
+ priv->fast_tx_timeout[i].base = priv->fast_tx_timeout;
|
|
+ }
|
|
+}
|
|
+
|
|
+static struct sk_buff *pfe_eth_rx_skb(struct net_device *ndev,
|
|
+ struct pfe_eth_priv_s *priv,
|
|
+ unsigned int qno)
|
|
+{
|
|
+ void *buf_addr;
|
|
+ unsigned int rx_ctrl;
|
|
+ unsigned int desc_ctrl = 0;
|
|
+ struct hif_ipsec_hdr *ipsec_hdr = NULL;
|
|
+ struct sk_buff *skb;
|
|
+ struct sk_buff *skb_frag, *skb_frag_last = NULL;
|
|
+ int length = 0, offset;
|
|
+
|
|
+ skb = priv->skb_inflight[qno];
|
|
+
|
|
+ if (skb) {
|
|
+ skb_frag_last = skb_shinfo(skb)->frag_list;
|
|
+ if (skb_frag_last) {
|
|
+ while (skb_frag_last->next)
|
|
+ skb_frag_last = skb_frag_last->next;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ while (!(desc_ctrl & CL_DESC_LAST)) {
|
|
+ buf_addr = hif_lib_receive_pkt(&priv->client, qno, &length,
|
|
+ &offset, &rx_ctrl, &desc_ctrl,
|
|
+ (void **)&ipsec_hdr);
|
|
+ if (!buf_addr)
|
|
+ goto incomplete;
|
|
+
|
|
+#ifdef PFE_ETH_NAPI_STATS
|
|
+ priv->napi_counters[NAPI_DESC_COUNT]++;
|
|
+#endif
|
|
+
|
|
+ /* First frag */
|
|
+ if (desc_ctrl & CL_DESC_FIRST) {
|
|
+ skb = build_skb(buf_addr, 0);
|
|
+ if (unlikely(!skb))
|
|
+ goto pkt_drop;
|
|
+
|
|
+ skb_reserve(skb, offset);
|
|
+ skb_put(skb, length);
|
|
+ skb->dev = ndev;
|
|
+
|
|
+ if ((ndev->features & NETIF_F_RXCSUM) && (rx_ctrl &
|
|
+ HIF_CTRL_RX_CHECKSUMMED))
|
|
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
+ else
|
|
+ skb_checksum_none_assert(skb);
|
|
+
|
|
+ } else {
|
|
+ /* Next frags */
|
|
+ if (unlikely(!skb)) {
|
|
+ pr_err("%s: NULL skb_inflight\n",
|
|
+ __func__);
|
|
+ goto pkt_drop;
|
|
+ }
|
|
+
|
|
+ skb_frag = build_skb(buf_addr, 0);
|
|
+
|
|
+ if (unlikely(!skb_frag)) {
|
|
+ kfree(buf_addr);
|
|
+ goto pkt_drop;
|
|
+ }
|
|
+
|
|
+ skb_reserve(skb_frag, offset);
|
|
+ skb_put(skb_frag, length);
|
|
+
|
|
+ skb_frag->dev = ndev;
|
|
+
|
|
+ if (skb_shinfo(skb)->frag_list)
|
|
+ skb_frag_last->next = skb_frag;
|
|
+ else
|
|
+ skb_shinfo(skb)->frag_list = skb_frag;
|
|
+
|
|
+ skb->truesize += skb_frag->truesize;
|
|
+ skb->data_len += length;
|
|
+ skb->len += length;
|
|
+ skb_frag_last = skb_frag;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ priv->skb_inflight[qno] = NULL;
|
|
+ return skb;
|
|
+
|
|
+incomplete:
|
|
+ priv->skb_inflight[qno] = skb;
|
|
+ return NULL;
|
|
+
|
|
+pkt_drop:
|
|
+ priv->skb_inflight[qno] = NULL;
|
|
+
|
|
+ if (skb)
|
|
+ kfree_skb(skb);
|
|
+ else
|
|
+ kfree(buf_addr);
|
|
+
|
|
+ priv->stats.rx_errors++;
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/* pfe_eth_poll
|
|
+ */
|
|
+static int pfe_eth_poll(struct pfe_eth_priv_s *priv, struct napi_struct *napi,
|
|
+ unsigned int qno, int budget)
|
|
+{
|
|
+ struct net_device *ndev = priv->ndev;
|
|
+ struct sk_buff *skb;
|
|
+ int work_done = 0;
|
|
+ unsigned int len;
|
|
+
|
|
+ netif_info(priv, intr, priv->ndev, "%s\n", __func__);
|
|
+
|
|
+#ifdef PFE_ETH_NAPI_STATS
|
|
+ priv->napi_counters[NAPI_POLL_COUNT]++;
|
|
+#endif
|
|
+
|
|
+ do {
|
|
+ skb = pfe_eth_rx_skb(ndev, priv, qno);
|
|
+
|
|
+ if (!skb)
|
|
+ break;
|
|
+
|
|
+ len = skb->len;
|
|
+
|
|
+ /* Packet will be processed */
|
|
+ skb->protocol = eth_type_trans(skb, ndev);
|
|
+
|
|
+ netif_receive_skb(skb);
|
|
+
|
|
+ priv->stats.rx_packets++;
|
|
+ priv->stats.rx_bytes += len;
|
|
+
|
|
+ work_done++;
|
|
+
|
|
+#ifdef PFE_ETH_NAPI_STATS
|
|
+ priv->napi_counters[NAPI_PACKET_COUNT]++;
|
|
+#endif
|
|
+
|
|
+ } while (work_done < budget);
|
|
+
|
|
+ /*
|
|
+ * If no Rx receive nor cleanup work was done, exit polling mode.
|
|
+ * No more netif_running(dev) check is required here , as this is
|
|
+ * checked in net/core/dev.c (2.6.33.5 kernel specific).
|
|
+ */
|
|
+ if (work_done < budget) {
|
|
+ napi_complete(napi);
|
|
+
|
|
+ hif_lib_event_handler_start(&priv->client, EVENT_RX_PKT_IND,
|
|
+ qno);
|
|
+ }
|
|
+#ifdef PFE_ETH_NAPI_STATS
|
|
+ else
|
|
+ priv->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
|
|
+#endif
|
|
+
|
|
+ return work_done;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_eth_lro_poll
|
|
+ */
|
|
+static int pfe_eth_lro_poll(struct napi_struct *napi, int budget)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
|
|
+ lro_napi);
|
|
+
|
|
+ netif_info(priv, intr, priv->ndev, "%s\n", __func__);
|
|
+
|
|
+ return pfe_eth_poll(priv, napi, 2, budget);
|
|
+}
|
|
+
|
|
+/* pfe_eth_low_poll
|
|
+ */
|
|
+static int pfe_eth_low_poll(struct napi_struct *napi, int budget)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
|
|
+ low_napi);
|
|
+
|
|
+ netif_info(priv, intr, priv->ndev, "%s\n", __func__);
|
|
+
|
|
+ return pfe_eth_poll(priv, napi, 1, budget);
|
|
+}
|
|
+
|
|
+/* pfe_eth_high_poll
|
|
+ */
|
|
+static int pfe_eth_high_poll(struct napi_struct *napi, int budget)
|
|
+{
|
|
+ struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
|
|
+ high_napi);
|
|
+
|
|
+ netif_info(priv, intr, priv->ndev, "%s\n", __func__);
|
|
+
|
|
+ return pfe_eth_poll(priv, napi, 0, budget);
|
|
+}
|
|
+
|
|
+static const struct net_device_ops pfe_netdev_ops = {
|
|
+ .ndo_open = pfe_eth_open,
|
|
+ .ndo_stop = pfe_eth_close,
|
|
+ .ndo_start_xmit = pfe_eth_send_packet,
|
|
+ .ndo_select_queue = pfe_eth_select_queue,
|
|
+ .ndo_set_rx_mode = pfe_eth_set_multi,
|
|
+ .ndo_set_mac_address = pfe_eth_set_mac_address,
|
|
+ .ndo_validate_addr = eth_validate_addr,
|
|
+ .ndo_change_mtu = pfe_eth_change_mtu,
|
|
+ .ndo_get_stats = pfe_eth_get_stats,
|
|
+ .ndo_set_features = pfe_eth_set_features,
|
|
+};
|
|
+
|
|
+/* pfe_eth_init_one
|
|
+ */
|
|
+static int pfe_eth_init_one(struct pfe *pfe,
|
|
+ struct ls1012a_pfe_platform_data *pfe_info,
|
|
+ int id)
|
|
+{
|
|
+ struct net_device *ndev = NULL;
|
|
+ struct pfe_eth_priv_s *priv = NULL;
|
|
+ struct ls1012a_eth_platform_data *einfo;
|
|
+ int err;
|
|
+
|
|
+ einfo = (struct ls1012a_eth_platform_data *)
|
|
+ pfe_info->ls1012a_eth_pdata;
|
|
+
|
|
+ /* einfo never be NULL, but no harm in having this check */
|
|
+ if (!einfo) {
|
|
+ pr_err(
|
|
+ "%s: pfe missing additional gemacs platform data\n"
|
|
+ , __func__);
|
|
+ err = -ENODEV;
|
|
+ goto err0;
|
|
+ }
|
|
+
|
|
+ if (us)
|
|
+ emac_txq_cnt = EMAC_TXQ_CNT;
|
|
+ /* Create an ethernet device instance */
|
|
+ ndev = alloc_etherdev_mq(sizeof(*priv), emac_txq_cnt);
|
|
+
|
|
+ if (!ndev) {
|
|
+ pr_err("%s: gemac %d device allocation failed\n",
|
|
+ __func__, einfo[id].gem_id);
|
|
+ err = -ENOMEM;
|
|
+ goto err0;
|
|
+ }
|
|
+
|
|
+ priv = netdev_priv(ndev);
|
|
+ priv->ndev = ndev;
|
|
+ priv->id = einfo[id].gem_id;
|
|
+ priv->pfe = pfe;
|
|
+ priv->phy_node = einfo[id].phy_node;
|
|
+
|
|
+ SET_NETDEV_DEV(priv->ndev, priv->pfe->dev);
|
|
+
|
|
+ pfe->eth.eth_priv[id] = priv;
|
|
+
|
|
+ /* Set the info in the priv to the current info */
|
|
+ priv->einfo = &einfo[id];
|
|
+ priv->EMAC_baseaddr = cbus_emac_base[id];
|
|
+ priv->GPI_baseaddr = cbus_gpi_base[id];
|
|
+
|
|
+ spin_lock_init(&priv->lock);
|
|
+
|
|
+ pfe_eth_fast_tx_timeout_init(priv);
|
|
+
|
|
+ /* Copy the station address into the dev structure, */
|
|
+ memcpy(ndev->dev_addr, einfo[id].mac_addr, ETH_ALEN);
|
|
+
|
|
+ if (us)
|
|
+ goto phy_init;
|
|
+
|
|
+ ndev->mtu = 1500;
|
|
+
|
|
+ /* Set MTU limits */
|
|
+ ndev->min_mtu = ETH_MIN_MTU;
|
|
+
|
|
+/*
|
|
+ * Jumbo frames are not supported on LS1012A rev-1.0.
|
|
+ * So max mtu should be restricted to supported frame length.
|
|
+ */
|
|
+ if (pfe_errata_a010897)
|
|
+ ndev->max_mtu = JUMBO_FRAME_SIZE_V1 - ETH_HLEN - ETH_FCS_LEN;
|
|
+ else
|
|
+ ndev->max_mtu = JUMBO_FRAME_SIZE_V2 - ETH_HLEN - ETH_FCS_LEN;
|
|
+
|
|
+ /*Enable after checksum offload is validated */
|
|
+ ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
|
|
+ NETIF_F_IPV6_CSUM | NETIF_F_SG;
|
|
+
|
|
+ /* enabled by default */
|
|
+ ndev->features = ndev->hw_features;
|
|
+
|
|
+ priv->usr_features = ndev->features;
|
|
+
|
|
+ ndev->netdev_ops = &pfe_netdev_ops;
|
|
+
|
|
+ ndev->ethtool_ops = &pfe_ethtool_ops;
|
|
+
|
|
+ /* Enable basic messages by default */
|
|
+ priv->msg_enable = NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK |
|
|
+ NETIF_MSG_PROBE;
|
|
+
|
|
+ netif_napi_add(ndev, &priv->low_napi, pfe_eth_low_poll,
|
|
+ HIF_RX_POLL_WEIGHT - 16);
|
|
+ netif_napi_add(ndev, &priv->high_napi, pfe_eth_high_poll,
|
|
+ HIF_RX_POLL_WEIGHT - 16);
|
|
+ netif_napi_add(ndev, &priv->lro_napi, pfe_eth_lro_poll,
|
|
+ HIF_RX_POLL_WEIGHT - 16);
|
|
+
|
|
+ err = register_netdev(ndev);
|
|
+ if (err) {
|
|
+ netdev_err(ndev, "register_netdev() failed\n");
|
|
+ goto err1;
|
|
+ }
|
|
+
|
|
+ if ((!(pfe_use_old_dts_phy) && !(priv->phy_node)) ||
|
|
+ ((pfe_use_old_dts_phy) &&
|
|
+ (priv->einfo->phy_flags & GEMAC_NO_PHY))) {
|
|
+ pr_info("%s: No PHY or fixed-link\n", __func__);
|
|
+ goto skip_phy_init;
|
|
+ }
|
|
+
|
|
+phy_init:
|
|
+ device_init_wakeup(&ndev->dev, WAKE_MAGIC);
|
|
+
|
|
+ err = pfe_phy_init(ndev);
|
|
+ if (err) {
|
|
+ netdev_err(ndev, "%s: pfe_phy_init() failed\n",
|
|
+ __func__);
|
|
+ goto err2;
|
|
+ }
|
|
+
|
|
+ if (us) {
|
|
+ if (priv->phydev)
|
|
+ phy_start(priv->phydev);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ netif_carrier_on(ndev);
|
|
+
|
|
+skip_phy_init:
|
|
+ /* Create all the sysfs files */
|
|
+ if (pfe_eth_sysfs_init(ndev))
|
|
+ goto err3;
|
|
+
|
|
+ netif_info(priv, probe, ndev, "%s: created interface, baseaddr: %p\n",
|
|
+ __func__, priv->EMAC_baseaddr);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err3:
|
|
+ pfe_phy_exit(priv->ndev);
|
|
+err2:
|
|
+ if (us)
|
|
+ goto err1;
|
|
+ unregister_netdev(ndev);
|
|
+err1:
|
|
+ free_netdev(priv->ndev);
|
|
+err0:
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/* pfe_eth_init
|
|
+ */
|
|
+int pfe_eth_init(struct pfe *pfe)
|
|
+{
|
|
+ int ii = 0;
|
|
+ int err;
|
|
+ struct ls1012a_pfe_platform_data *pfe_info;
|
|
+
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ cbus_emac_base[0] = EMAC1_BASE_ADDR;
|
|
+ cbus_emac_base[1] = EMAC2_BASE_ADDR;
|
|
+
|
|
+ cbus_gpi_base[0] = EGPI1_BASE_ADDR;
|
|
+ cbus_gpi_base[1] = EGPI2_BASE_ADDR;
|
|
+
|
|
+ pfe_info = (struct ls1012a_pfe_platform_data *)
|
|
+ pfe->dev->platform_data;
|
|
+ if (!pfe_info) {
|
|
+ pr_err("%s: pfe missing additional platform data\n", __func__);
|
|
+ err = -ENODEV;
|
|
+ goto err_pdata;
|
|
+ }
|
|
+
|
|
+ for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
|
|
+ err = pfe_eth_mdio_init(pfe, pfe_info, ii);
|
|
+ if (err) {
|
|
+ pr_err("%s: pfe_eth_mdio_init() failed\n", __func__);
|
|
+ goto err_mdio_init;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (fsl_guts_get_svr() == LS1012A_REV_1_0)
|
|
+ pfe_errata_a010897 = true;
|
|
+ else
|
|
+ pfe_errata_a010897 = false;
|
|
+
|
|
+ for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
|
|
+ err = pfe_eth_init_one(pfe, pfe_info, ii);
|
|
+ if (err)
|
|
+ goto err_eth_init;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_eth_init:
|
|
+ while (ii--) {
|
|
+ pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
|
|
+ pfe_eth_mdio_exit(pfe, ii);
|
|
+ }
|
|
+
|
|
+err_mdio_init:
|
|
+err_pdata:
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/* pfe_eth_exit_one
|
|
+ */
|
|
+static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv)
|
|
+{
|
|
+ netif_info(priv, probe, priv->ndev, "%s\n", __func__);
|
|
+
|
|
+ if (!us)
|
|
+ pfe_eth_sysfs_exit(priv->ndev);
|
|
+
|
|
+ if ((!(pfe_use_old_dts_phy) && !(priv->phy_node)) ||
|
|
+ ((pfe_use_old_dts_phy) &&
|
|
+ (priv->einfo->phy_flags & GEMAC_NO_PHY))) {
|
|
+ pr_info("%s: No PHY or fixed-link\n", __func__);
|
|
+ goto skip_phy_exit;
|
|
+ }
|
|
+
|
|
+ pfe_phy_exit(priv->ndev);
|
|
+
|
|
+skip_phy_exit:
|
|
+ if (!us)
|
|
+ unregister_netdev(priv->ndev);
|
|
+
|
|
+ free_netdev(priv->ndev);
|
|
+}
|
|
+
|
|
+/* pfe_eth_exit
|
|
+ */
|
|
+void pfe_eth_exit(struct pfe *pfe)
|
|
+{
|
|
+ int ii;
|
|
+
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
|
|
+ pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
|
|
+
|
|
+ for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
|
|
+ pfe_eth_mdio_exit(pfe, ii);
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_eth.h
|
|
@@ -0,0 +1,175 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#ifndef _PFE_ETH_H_
|
|
+#define _PFE_ETH_H_
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/netdevice.h>
|
|
+#include <linux/etherdevice.h>
|
|
+#include <linux/ethtool.h>
|
|
+#include <linux/mii.h>
|
|
+#include <linux/phy.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/time.h>
|
|
+
|
|
+#define PFE_ETH_NAPI_STATS
|
|
+#define PFE_ETH_TX_STATS
|
|
+
|
|
+#define PFE_ETH_FRAGS_MAX (65536 / HIF_RX_PKT_MIN_SIZE)
|
|
+#define LRO_LEN_COUNT_MAX 32
|
|
+#define LRO_NB_COUNT_MAX 32
|
|
+
|
|
+#define PFE_PAUSE_FLAG_ENABLE 1
|
|
+#define PFE_PAUSE_FLAG_AUTONEG 2
|
|
+
|
|
+/* GEMAC configured by SW */
|
|
+/* GEMAC configured by phy lines (not for MII/GMII) */
|
|
+
|
|
+#define GEMAC_SW_FULL_DUPLEX BIT(9)
|
|
+#define GEMAC_SW_SPEED_10M (0 << 12)
|
|
+#define GEMAC_SW_SPEED_100M BIT(12)
|
|
+#define GEMAC_SW_SPEED_1G (2 << 12)
|
|
+
|
|
+#define GEMAC_NO_PHY BIT(0)
|
|
+
|
|
+struct ls1012a_eth_platform_data {
|
|
+ /* board specific information */
|
|
+ u32 mii_config;
|
|
+ u32 phy_flags;
|
|
+ u32 gem_id;
|
|
+ u32 phy_id;
|
|
+ u32 mdio_muxval;
|
|
+ u8 mac_addr[ETH_ALEN];
|
|
+ struct device_node *phy_node;
|
|
+};
|
|
+
|
|
+struct ls1012a_mdio_platform_data {
|
|
+ int id;
|
|
+ int irq[32];
|
|
+ u32 phy_mask;
|
|
+ int mdc_div;
|
|
+};
|
|
+
|
|
+struct ls1012a_pfe_platform_data {
|
|
+ struct ls1012a_eth_platform_data ls1012a_eth_pdata[3];
|
|
+ struct ls1012a_mdio_platform_data ls1012a_mdio_pdata[3];
|
|
+};
|
|
+
|
|
+#define NUM_GEMAC_SUPPORT 2
|
|
+#define DRV_NAME "pfe-eth"
|
|
+#define DRV_VERSION "1.0"
|
|
+
|
|
+#define LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS 3
|
|
+#define TX_POLL_TIMEOUT_MS 1000
|
|
+
|
|
+#define EMAC_TXQ_CNT 16
|
|
+#define EMAC_TXQ_DEPTH (HIF_TX_DESC_NT)
|
|
+
|
|
+#define JUMBO_FRAME_SIZE_V1 1900
|
|
+#define JUMBO_FRAME_SIZE_V2 10258
|
|
+/*
|
|
+ * Client Tx queue threshold, for txQ flush condition.
|
|
+ * It must be smaller than the queue size (in case we ever change it in the
|
|
+ * future).
|
|
+ */
|
|
+#define HIF_CL_TX_FLUSH_MARK 32
|
|
+
|
|
+/*
|
|
+ * Max number of TX resources (HIF descriptors or skbs) that will be released
|
|
+ * in a single go during batch recycling.
|
|
+ * Should be lower than the flush mark so the SW can provide the HW with a
|
|
+ * continuous stream of packets instead of bursts.
|
|
+ */
|
|
+#define TX_FREE_MAX_COUNT 16
|
|
+#define EMAC_RXQ_CNT 3
|
|
+#define EMAC_RXQ_DEPTH HIF_RX_DESC_NT
|
|
+/* make sure clients can receive a full burst of packets */
|
|
+#define EMAC_RMON_TXBYTES_POS 0x00
|
|
+#define EMAC_RMON_RXBYTES_POS 0x14
|
|
+
|
|
+#define EMAC_QUEUENUM_MASK (emac_txq_cnt - 1)
|
|
+#define EMAC_MDIO_TIMEOUT 1000
|
|
+#define MAX_UC_SPEC_ADDR_REG 31
|
|
+
|
|
+struct pfe_eth_fast_timer {
|
|
+ int queuenum;
|
|
+ struct hrtimer timer;
|
|
+ void *base;
|
|
+};
|
|
+
|
|
+struct pfe_eth_priv_s {
|
|
+ struct pfe *pfe;
|
|
+ struct hif_client_s client;
|
|
+ struct napi_struct lro_napi;
|
|
+ struct napi_struct low_napi;
|
|
+ struct napi_struct high_napi;
|
|
+ int low_tmu_q;
|
|
+ int high_tmu_q;
|
|
+ struct net_device_stats stats;
|
|
+ struct net_device *ndev;
|
|
+ int id;
|
|
+ int promisc;
|
|
+ unsigned int msg_enable;
|
|
+ unsigned int usr_features;
|
|
+
|
|
+ spinlock_t lock; /* protect member variables */
|
|
+ unsigned int event_status;
|
|
+ int irq;
|
|
+ void *EMAC_baseaddr;
|
|
+ void *GPI_baseaddr;
|
|
+ /* PHY stuff */
|
|
+ struct phy_device *phydev;
|
|
+ int oldspeed;
|
|
+ int oldduplex;
|
|
+ int oldlink;
|
|
+ struct device_node *phy_node;
|
|
+ struct clk *gemtx_clk;
|
|
+ int wol;
|
|
+ int pause_flag;
|
|
+
|
|
+ int default_priority;
|
|
+ struct pfe_eth_fast_timer fast_tx_timeout[EMAC_TXQ_CNT];
|
|
+
|
|
+ struct ls1012a_eth_platform_data *einfo;
|
|
+ struct sk_buff *skb_inflight[EMAC_RXQ_CNT + 6];
|
|
+
|
|
+#ifdef PFE_ETH_TX_STATS
|
|
+ unsigned int stop_queue_total[EMAC_TXQ_CNT];
|
|
+ unsigned int stop_queue_hif[EMAC_TXQ_CNT];
|
|
+ unsigned int stop_queue_hif_client[EMAC_TXQ_CNT];
|
|
+ unsigned int stop_queue_credit[EMAC_TXQ_CNT];
|
|
+ unsigned int clean_fail[EMAC_TXQ_CNT];
|
|
+ unsigned int was_stopped[EMAC_TXQ_CNT];
|
|
+#endif
|
|
+
|
|
+#ifdef PFE_ETH_NAPI_STATS
|
|
+ unsigned int napi_counters[NAPI_MAX_COUNT];
|
|
+#endif
|
|
+ unsigned int frags_inflight[EMAC_RXQ_CNT + 6];
|
|
+};
|
|
+
|
|
+struct pfe_eth {
|
|
+ struct pfe_eth_priv_s *eth_priv[3];
|
|
+};
|
|
+
|
|
+struct pfe_mdio_priv_s {
|
|
+ void __iomem *mdio_base;
|
|
+ int mdc_div;
|
|
+ struct mii_bus *mii_bus;
|
|
+};
|
|
+
|
|
+struct pfe_mdio {
|
|
+ struct pfe_mdio_priv_s *mdio_priv[3];
|
|
+};
|
|
+
|
|
+int pfe_eth_init(struct pfe *pfe);
|
|
+void pfe_eth_exit(struct pfe *pfe);
|
|
+int pfe_eth_suspend(struct net_device *dev);
|
|
+int pfe_eth_resume(struct net_device *dev);
|
|
+int pfe_eth_mdio_reset(struct mii_bus *bus);
|
|
+
|
|
+#endif /* _PFE_ETH_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_firmware.c
|
|
@@ -0,0 +1,302 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+/*
|
|
+ * @file
|
|
+ * Contains all the functions to handle parsing and loading of PE firmware
|
|
+ * files.
|
|
+ */
|
|
+#include <linux/firmware.h>
|
|
+
|
|
+#include "pfe_mod.h"
|
|
+#include "pfe_firmware.h"
|
|
+#include "pfe/pfe.h"
|
|
+
|
|
+static struct elf32_shdr *get_elf_section_header(const struct firmware *fw,
|
|
+ const char *section)
|
|
+{
|
|
+ struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
|
|
+ struct elf32_shdr *shdr;
|
|
+ struct elf32_shdr *shdr_shstr;
|
|
+ Elf32_Off e_shoff = be32_to_cpu(elf_hdr->e_shoff);
|
|
+ Elf32_Half e_shentsize = be16_to_cpu(elf_hdr->e_shentsize);
|
|
+ Elf32_Half e_shnum = be16_to_cpu(elf_hdr->e_shnum);
|
|
+ Elf32_Half e_shstrndx = be16_to_cpu(elf_hdr->e_shstrndx);
|
|
+ Elf32_Off shstr_offset;
|
|
+ Elf32_Word sh_name;
|
|
+ const char *name;
|
|
+ int i;
|
|
+
|
|
+ /* Section header strings */
|
|
+ shdr_shstr = (struct elf32_shdr *)(fw->data + e_shoff + e_shstrndx *
|
|
+ e_shentsize);
|
|
+ shstr_offset = be32_to_cpu(shdr_shstr->sh_offset);
|
|
+
|
|
+ for (i = 0; i < e_shnum; i++) {
|
|
+ shdr = (struct elf32_shdr *)(fw->data + e_shoff
|
|
+ + i * e_shentsize);
|
|
+
|
|
+ sh_name = be32_to_cpu(shdr->sh_name);
|
|
+
|
|
+ name = (const char *)(fw->data + shstr_offset + sh_name);
|
|
+
|
|
+ if (!strcmp(name, section))
|
|
+ return shdr;
|
|
+ }
|
|
+
|
|
+ pr_err("%s: didn't find section %s\n", __func__, section);
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+#if defined(CFG_DIAGS)
|
|
+static int pfe_get_diags_info(const struct firmware *fw, struct pfe_diags_info
|
|
+ *diags_info)
|
|
+{
|
|
+ struct elf32_shdr *shdr;
|
|
+ unsigned long offset, size;
|
|
+
|
|
+ shdr = get_elf_section_header(fw, ".pfe_diags_str");
|
|
+ if (shdr) {
|
|
+ offset = be32_to_cpu(shdr->sh_offset);
|
|
+ size = be32_to_cpu(shdr->sh_size);
|
|
+ diags_info->diags_str_base = be32_to_cpu(shdr->sh_addr);
|
|
+ diags_info->diags_str_size = size;
|
|
+ diags_info->diags_str_array = kmalloc(size, GFP_KERNEL);
|
|
+ memcpy(diags_info->diags_str_array, fw->data + offset, size);
|
|
+
|
|
+ return 0;
|
|
+ } else {
|
|
+ return -1;
|
|
+ }
|
|
+}
|
|
+#endif
|
|
+
|
|
+static void pfe_check_version_info(const struct firmware *fw)
|
|
+{
|
|
+ /*static char *version = NULL;*/
|
|
+ static char *version;
|
|
+
|
|
+ struct elf32_shdr *shdr = get_elf_section_header(fw, ".version");
|
|
+
|
|
+ if (shdr) {
|
|
+ if (!version) {
|
|
+ /*
|
|
+ * this is the first fw we load, use its version
|
|
+ * string as reference (whatever it is)
|
|
+ */
|
|
+ version = (char *)(fw->data +
|
|
+ be32_to_cpu(shdr->sh_offset));
|
|
+
|
|
+ pr_info("PFE binary version: %s\n", version);
|
|
+ } else {
|
|
+ /*
|
|
+ * already have loaded at least one firmware, check
|
|
+ * sequence can start now
|
|
+ */
|
|
+ if (strcmp(version, (char *)(fw->data +
|
|
+ be32_to_cpu(shdr->sh_offset)))) {
|
|
+ pr_info(
|
|
+ "WARNING: PFE firmware binaries from incompatible version\n");
|
|
+ }
|
|
+ }
|
|
+ } else {
|
|
+ /*
|
|
+ * version cannot be verified, a potential issue that should
|
|
+ * be reported
|
|
+ */
|
|
+ pr_info(
|
|
+ "WARNING: PFE firmware binaries from incompatible version\n");
|
|
+ }
|
|
+}
|
|
+
|
|
+/* PFE elf firmware loader.
|
|
+ * Loads an elf firmware image into a list of PE's (specified using a bitmask)
|
|
+ *
|
|
+ * @param pe_mask Mask of PE id's to load firmware to
|
|
+ * @param fw Pointer to the firmware image
|
|
+ *
|
|
+ * @return 0 on success, a negative value on error
|
|
+ *
|
|
+ */
|
|
+int pfe_load_elf(int pe_mask, const struct firmware *fw, struct pfe *pfe)
|
|
+{
|
|
+ struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
|
|
+ Elf32_Half sections = be16_to_cpu(elf_hdr->e_shnum);
|
|
+ struct elf32_shdr *shdr = (struct elf32_shdr *)(fw->data +
|
|
+ be32_to_cpu(elf_hdr->e_shoff));
|
|
+ int id, section;
|
|
+ int rc;
|
|
+
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ /* Some sanity checks */
|
|
+ if (strncmp(&elf_hdr->e_ident[EI_MAG0], ELFMAG, SELFMAG)) {
|
|
+ pr_err("%s: incorrect elf magic number\n", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
|
|
+ pr_err("%s: incorrect elf class(%x)\n", __func__,
|
|
+ elf_hdr->e_ident[EI_CLASS]);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (elf_hdr->e_ident[EI_DATA] != ELFDATA2MSB) {
|
|
+ pr_err("%s: incorrect elf data(%x)\n", __func__,
|
|
+ elf_hdr->e_ident[EI_DATA]);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (be16_to_cpu(elf_hdr->e_type) != ET_EXEC) {
|
|
+ pr_err("%s: incorrect elf file type(%x)\n", __func__,
|
|
+ be16_to_cpu(elf_hdr->e_type));
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ for (section = 0; section < sections; section++, shdr++) {
|
|
+ if (!(be32_to_cpu(shdr->sh_flags) & (SHF_WRITE | SHF_ALLOC |
|
|
+ SHF_EXECINSTR)))
|
|
+ continue;
|
|
+
|
|
+ for (id = 0; id < MAX_PE; id++)
|
|
+ if (pe_mask & (1 << id)) {
|
|
+ rc = pe_load_elf_section(id, fw->data, shdr,
|
|
+ pfe->dev);
|
|
+ if (rc < 0)
|
|
+ goto err;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ pfe_check_version_info(fw);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err:
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/* PFE firmware initialization.
|
|
+ * Loads different firmware files from filesystem.
|
|
+ * Initializes PE IMEM/DMEM and UTIL-PE DDR
|
|
+ * Initializes control path symbol addresses (by looking them up in the elf
|
|
+ * firmware files
|
|
+ * Takes PE's out of reset
|
|
+ *
|
|
+ * @return 0 on success, a negative value on error
|
|
+ *
|
|
+ */
|
|
+int pfe_firmware_init(struct pfe *pfe)
|
|
+{
|
|
+ const struct firmware *class_fw, *tmu_fw;
|
|
+ int rc = 0;
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ const char *util_fw_name;
|
|
+ const struct firmware *util_fw;
|
|
+#endif
|
|
+
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ if (request_firmware(&class_fw, CLASS_FIRMWARE_FILENAME, pfe->dev)) {
|
|
+ pr_err("%s: request firmware %s failed\n", __func__,
|
|
+ CLASS_FIRMWARE_FILENAME);
|
|
+ rc = -ETIMEDOUT;
|
|
+ goto err0;
|
|
+ }
|
|
+
|
|
+ if (request_firmware(&tmu_fw, TMU_FIRMWARE_FILENAME, pfe->dev)) {
|
|
+ pr_err("%s: request firmware %s failed\n", __func__,
|
|
+ TMU_FIRMWARE_FILENAME);
|
|
+ rc = -ETIMEDOUT;
|
|
+ goto err1;
|
|
+}
|
|
+
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ util_fw_name = UTIL_FIRMWARE_FILENAME;
|
|
+
|
|
+ if (request_firmware(&util_fw, util_fw_name, pfe->dev)) {
|
|
+ pr_err("%s: request firmware %s failed\n", __func__,
|
|
+ util_fw_name);
|
|
+ rc = -ETIMEDOUT;
|
|
+ goto err2;
|
|
+ }
|
|
+#endif
|
|
+ rc = pfe_load_elf(CLASS_MASK, class_fw, pfe);
|
|
+ if (rc < 0) {
|
|
+ pr_err("%s: class firmware load failed\n", __func__);
|
|
+ goto err3;
|
|
+ }
|
|
+
|
|
+#if defined(CFG_DIAGS)
|
|
+ rc = pfe_get_diags_info(class_fw, &pfe->diags.class_diags_info);
|
|
+ if (rc < 0) {
|
|
+ pr_warn(
|
|
+ "PFE diags won't be available for class PEs\n");
|
|
+ rc = 0;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ rc = pfe_load_elf(TMU_MASK, tmu_fw, pfe);
|
|
+ if (rc < 0) {
|
|
+ pr_err("%s: tmu firmware load failed\n", __func__);
|
|
+ goto err3;
|
|
+ }
|
|
+
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ rc = pfe_load_elf(UTIL_MASK, util_fw, pfe);
|
|
+ if (rc < 0) {
|
|
+ pr_err("%s: util firmware load failed\n", __func__);
|
|
+ goto err3;
|
|
+ }
|
|
+
|
|
+#if defined(CFG_DIAGS)
|
|
+ rc = pfe_get_diags_info(util_fw, &pfe->diags.util_diags_info);
|
|
+ if (rc < 0) {
|
|
+ pr_warn(
|
|
+ "PFE diags won't be available for util PE\n");
|
|
+ rc = 0;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ util_enable();
|
|
+#endif
|
|
+
|
|
+ tmu_enable(0xf);
|
|
+ class_enable();
|
|
+
|
|
+err3:
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ release_firmware(util_fw);
|
|
+
|
|
+err2:
|
|
+#endif
|
|
+ release_firmware(tmu_fw);
|
|
+
|
|
+err1:
|
|
+ release_firmware(class_fw);
|
|
+
|
|
+err0:
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/* PFE firmware cleanup
|
|
+ * Puts PE's in reset
|
|
+ *
|
|
+ *
|
|
+ */
|
|
+void pfe_firmware_exit(struct pfe *pfe)
|
|
+{
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ if (pe_reset_all(&pfe->ctrl) != 0)
|
|
+ pr_err("Error: Failed to stop PEs, PFE reload may not work correctly\n");
|
|
+
|
|
+ class_disable();
|
|
+ tmu_disable(0xf);
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ util_disable();
|
|
+#endif
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_firmware.h
|
|
@@ -0,0 +1,20 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#ifndef _PFE_FIRMWARE_H_
|
|
+#define _PFE_FIRMWARE_H_
|
|
+
|
|
+#define CLASS_FIRMWARE_FILENAME "ppfe_class_ls1012a.elf"
|
|
+#define TMU_FIRMWARE_FILENAME "ppfe_tmu_ls1012a.elf"
|
|
+
|
|
+#define PFE_FW_CHECK_PASS 0
|
|
+#define PFE_FW_CHECK_FAIL 1
|
|
+#define NUM_PFE_FW 3
|
|
+
|
|
+int pfe_firmware_init(struct pfe *pfe);
|
|
+void pfe_firmware_exit(struct pfe *pfe);
|
|
+
|
|
+#endif /* _PFE_FIRMWARE_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_hal.c
|
|
@@ -0,0 +1,1516 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#include "pfe_mod.h"
|
|
+#include "pfe/pfe.h"
|
|
+
|
|
+/* A-010897: Jumbo frame is not supported */
|
|
+extern bool pfe_errata_a010897;
|
|
+
|
|
+#define PFE_RCR_MAX_FL_MASK 0xC000FFFF
|
|
+
|
|
+void *cbus_base_addr;
|
|
+void *ddr_base_addr;
|
|
+unsigned long ddr_phys_base_addr;
|
|
+unsigned int ddr_size;
|
|
+
|
|
+static struct pe_info pe[MAX_PE];
|
|
+
|
|
+/* Initializes the PFE library.
|
|
+ * Must be called before using any of the library functions.
|
|
+ *
|
|
+ * @param[in] cbus_base CBUS virtual base address (as mapped in
|
|
+ * the host CPU address space)
|
|
+ * @param[in] ddr_base PFE DDR range virtual base address (as
|
|
+ * mapped in the host CPU address space)
|
|
+ * @param[in] ddr_phys_base PFE DDR range physical base address (as
|
|
+ * mapped in platform)
|
|
+ * @param[in] size PFE DDR range size (as defined by the host
|
|
+ * software)
|
|
+ */
|
|
+void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
|
|
+ unsigned int size)
|
|
+{
|
|
+ cbus_base_addr = cbus_base;
|
|
+ ddr_base_addr = ddr_base;
|
|
+ ddr_phys_base_addr = ddr_phys_base;
|
|
+ ddr_size = size;
|
|
+
|
|
+ pe[CLASS0_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(0);
|
|
+ pe[CLASS0_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(0);
|
|
+ pe[CLASS0_ID].pmem_size = CLASS_IMEM_SIZE;
|
|
+ pe[CLASS0_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
|
|
+ pe[CLASS0_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
|
|
+ pe[CLASS0_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
|
|
+
|
|
+ pe[CLASS1_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(1);
|
|
+ pe[CLASS1_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(1);
|
|
+ pe[CLASS1_ID].pmem_size = CLASS_IMEM_SIZE;
|
|
+ pe[CLASS1_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
|
|
+ pe[CLASS1_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
|
|
+ pe[CLASS1_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
|
|
+
|
|
+ pe[CLASS2_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(2);
|
|
+ pe[CLASS2_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(2);
|
|
+ pe[CLASS2_ID].pmem_size = CLASS_IMEM_SIZE;
|
|
+ pe[CLASS2_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
|
|
+ pe[CLASS2_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
|
|
+ pe[CLASS2_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
|
|
+
|
|
+ pe[CLASS3_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(3);
|
|
+ pe[CLASS3_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(3);
|
|
+ pe[CLASS3_ID].pmem_size = CLASS_IMEM_SIZE;
|
|
+ pe[CLASS3_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
|
|
+ pe[CLASS3_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
|
|
+ pe[CLASS3_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
|
|
+
|
|
+ pe[CLASS4_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(4);
|
|
+ pe[CLASS4_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(4);
|
|
+ pe[CLASS4_ID].pmem_size = CLASS_IMEM_SIZE;
|
|
+ pe[CLASS4_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
|
|
+ pe[CLASS4_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
|
|
+ pe[CLASS4_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
|
|
+
|
|
+ pe[CLASS5_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(5);
|
|
+ pe[CLASS5_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(5);
|
|
+ pe[CLASS5_ID].pmem_size = CLASS_IMEM_SIZE;
|
|
+ pe[CLASS5_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
|
|
+ pe[CLASS5_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
|
|
+ pe[CLASS5_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
|
|
+
|
|
+ pe[TMU0_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(0);
|
|
+ pe[TMU0_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(0);
|
|
+ pe[TMU0_ID].pmem_size = TMU_IMEM_SIZE;
|
|
+ pe[TMU0_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
|
|
+ pe[TMU0_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
|
|
+ pe[TMU0_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
|
|
+
|
|
+ pe[TMU1_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(1);
|
|
+ pe[TMU1_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(1);
|
|
+ pe[TMU1_ID].pmem_size = TMU_IMEM_SIZE;
|
|
+ pe[TMU1_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
|
|
+ pe[TMU1_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
|
|
+ pe[TMU1_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
|
|
+
|
|
+ pe[TMU3_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(3);
|
|
+ pe[TMU3_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(3);
|
|
+ pe[TMU3_ID].pmem_size = TMU_IMEM_SIZE;
|
|
+ pe[TMU3_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
|
|
+ pe[TMU3_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
|
|
+ pe[TMU3_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
|
|
+
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ pe[UTIL_ID].dmem_base_addr = UTIL_DMEM_BASE_ADDR;
|
|
+ pe[UTIL_ID].mem_access_wdata = UTIL_MEM_ACCESS_WDATA;
|
|
+ pe[UTIL_ID].mem_access_addr = UTIL_MEM_ACCESS_ADDR;
|
|
+ pe[UTIL_ID].mem_access_rdata = UTIL_MEM_ACCESS_RDATA;
|
|
+#endif
|
|
+}
|
|
+
|
|
+/* Writes a buffer to PE internal memory from the host
|
|
+ * through indirect access registers.
|
|
+ *
|
|
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
|
|
+ * ..., UTIL_ID)
|
|
+ * @param[in] src Buffer source address
|
|
+ * @param[in] mem_access_addr DMEM destination address (must be 32bit
|
|
+ * aligned)
|
|
+ * @param[in] len Number of bytes to copy
|
|
+ */
|
|
+void pe_mem_memcpy_to32(int id, u32 mem_access_addr, const void *src, unsigned
|
|
+int len)
|
|
+{
|
|
+ u32 offset = 0, val, addr;
|
|
+ unsigned int len32 = len >> 2;
|
|
+ int i;
|
|
+
|
|
+ addr = mem_access_addr | PE_MEM_ACCESS_WRITE |
|
|
+ PE_MEM_ACCESS_BYTE_ENABLE(0, 4);
|
|
+
|
|
+ for (i = 0; i < len32; i++, offset += 4, src += 4) {
|
|
+ val = *(u32 *)src;
|
|
+ writel(cpu_to_be32(val), pe[id].mem_access_wdata);
|
|
+ writel(addr + offset, pe[id].mem_access_addr);
|
|
+ }
|
|
+
|
|
+ len = (len & 0x3);
|
|
+ if (len) {
|
|
+ val = 0;
|
|
+
|
|
+ addr = (mem_access_addr | PE_MEM_ACCESS_WRITE |
|
|
+ PE_MEM_ACCESS_BYTE_ENABLE(0, len)) + offset;
|
|
+
|
|
+ for (i = 0; i < len; i++, src++)
|
|
+ val |= (*(u8 *)src) << (8 * i);
|
|
+
|
|
+ writel(cpu_to_be32(val), pe[id].mem_access_wdata);
|
|
+ writel(addr, pe[id].mem_access_addr);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Writes a buffer to PE internal data memory (DMEM) from the host
|
|
+ * through indirect access registers.
|
|
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
|
|
+ * ..., UTIL_ID)
|
|
+ * @param[in] src Buffer source address
|
|
+ * @param[in] dst DMEM destination address (must be 32bit
|
|
+ * aligned)
|
|
+ * @param[in] len Number of bytes to copy
|
|
+ */
|
|
+void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
|
|
+{
|
|
+ pe_mem_memcpy_to32(id, pe[id].dmem_base_addr | dst |
|
|
+ PE_MEM_ACCESS_DMEM, src, len);
|
|
+}
|
|
+
|
|
+/* Writes a buffer to PE internal program memory (PMEM) from the host
|
|
+ * through indirect access registers.
|
|
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
|
|
+ * ..., TMU3_ID)
|
|
+ * @param[in] src Buffer source address
|
|
+ * @param[in] dst PMEM destination address (must be 32bit
|
|
+ * aligned)
|
|
+ * @param[in] len Number of bytes to copy
|
|
+ */
|
|
+void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
|
|
+{
|
|
+ pe_mem_memcpy_to32(id, pe[id].pmem_base_addr | (dst & (pe[id].pmem_size
|
|
+ - 1)) | PE_MEM_ACCESS_IMEM, src, len);
|
|
+}
|
|
+
|
|
+/* Reads PE internal program memory (IMEM) from the host
|
|
+ * through indirect access registers.
|
|
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
|
|
+ * ..., TMU3_ID)
|
|
+ * @param[in] addr PMEM read address (must be aligned on size)
|
|
+ * @param[in] size Number of bytes to read (maximum 4, must not
|
|
+ * cross 32bit boundaries)
|
|
+ * @return the data read (in PE endianness, i.e BE).
|
|
+ */
|
|
+u32 pe_pmem_read(int id, u32 addr, u8 size)
|
|
+{
|
|
+ u32 offset = addr & 0x3;
|
|
+ u32 mask = 0xffffffff >> ((4 - size) << 3);
|
|
+ u32 val;
|
|
+
|
|
+ addr = pe[id].pmem_base_addr | ((addr & ~0x3) & (pe[id].pmem_size - 1))
|
|
+ | PE_MEM_ACCESS_IMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
|
|
+
|
|
+ writel(addr, pe[id].mem_access_addr);
|
|
+ val = be32_to_cpu(readl(pe[id].mem_access_rdata));
|
|
+
|
|
+ return (val >> (offset << 3)) & mask;
|
|
+}
|
|
+
|
|
+/* Writes PE internal data memory (DMEM) from the host
|
|
+ * through indirect access registers.
|
|
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
|
|
+ * ..., UTIL_ID)
|
|
+ * @param[in] addr DMEM write address (must be aligned on size)
|
|
+ * @param[in] val Value to write (in PE endianness, i.e BE)
|
|
+ * @param[in] size Number of bytes to write (maximum 4, must not
|
|
+ * cross 32bit boundaries)
|
|
+ */
|
|
+void pe_dmem_write(int id, u32 val, u32 addr, u8 size)
|
|
+{
|
|
+ u32 offset = addr & 0x3;
|
|
+
|
|
+ addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_WRITE |
|
|
+ PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
|
|
+
|
|
+ /* Indirect access interface is byte swapping data being written */
|
|
+ writel(cpu_to_be32(val << (offset << 3)), pe[id].mem_access_wdata);
|
|
+ writel(addr, pe[id].mem_access_addr);
|
|
+}
|
|
+
|
|
+/* Reads PE internal data memory (DMEM) from the host
|
|
+ * through indirect access registers.
|
|
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
|
|
+ * ..., UTIL_ID)
|
|
+ * @param[in] addr DMEM read address (must be aligned on size)
|
|
+ * @param[in] size Number of bytes to read (maximum 4, must not
|
|
+ * cross 32bit boundaries)
|
|
+ * @return the data read (in PE endianness, i.e BE).
|
|
+ */
|
|
+u32 pe_dmem_read(int id, u32 addr, u8 size)
|
|
+{
|
|
+ u32 offset = addr & 0x3;
|
|
+ u32 mask = 0xffffffff >> ((4 - size) << 3);
|
|
+ u32 val;
|
|
+
|
|
+ addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_DMEM |
|
|
+ PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
|
|
+
|
|
+ writel(addr, pe[id].mem_access_addr);
|
|
+
|
|
+ /* Indirect access interface is byte swapping data being read */
|
|
+ val = be32_to_cpu(readl(pe[id].mem_access_rdata));
|
|
+
|
|
+ return (val >> (offset << 3)) & mask;
|
|
+}
|
|
+
|
|
+/* This function is used to write to CLASS internal bus peripherals (ccu,
|
|
+ * pe-lem) from the host
|
|
+ * through indirect access registers.
|
|
+ * @param[in] val value to write
|
|
+ * @param[in] addr Address to write to (must be aligned on size)
|
|
+ * @param[in] size Number of bytes to write (1, 2 or 4)
|
|
+ *
|
|
+ */
|
|
+void class_bus_write(u32 val, u32 addr, u8 size)
|
|
+{
|
|
+ u32 offset = addr & 0x3;
|
|
+
|
|
+ writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
|
|
+
|
|
+ addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | PE_MEM_ACCESS_WRITE |
|
|
+ (size << 24);
|
|
+
|
|
+ writel(cpu_to_be32(val << (offset << 3)), CLASS_BUS_ACCESS_WDATA);
|
|
+ writel(addr, CLASS_BUS_ACCESS_ADDR);
|
|
+}
|
|
+
|
|
+/* Reads from CLASS internal bus peripherals (ccu, pe-lem) from the host
|
|
+ * through indirect access registers.
|
|
+ * @param[in] addr Address to read from (must be aligned on size)
|
|
+ * @param[in] size Number of bytes to read (1, 2 or 4)
|
|
+ * @return the read data
|
|
+ *
|
|
+ */
|
|
+u32 class_bus_read(u32 addr, u8 size)
|
|
+{
|
|
+ u32 offset = addr & 0x3;
|
|
+ u32 mask = 0xffffffff >> ((4 - size) << 3);
|
|
+ u32 val;
|
|
+
|
|
+ writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
|
|
+
|
|
+ addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | (size << 24);
|
|
+
|
|
+ writel(addr, CLASS_BUS_ACCESS_ADDR);
|
|
+ val = be32_to_cpu(readl(CLASS_BUS_ACCESS_RDATA));
|
|
+
|
|
+ return (val >> (offset << 3)) & mask;
|
|
+}
|
|
+
|
|
+/* Writes data to the cluster memory (PE_LMEM)
|
|
+ * @param[in] dst PE LMEM destination address (must be 32bit aligned)
|
|
+ * @param[in] src Buffer source address
|
|
+ * @param[in] len Number of bytes to copy
|
|
+ */
|
|
+void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len)
|
|
+{
|
|
+ u32 len32 = len >> 2;
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < len32; i++, src += 4, dst += 4)
|
|
+ class_bus_write(*(u32 *)src, dst, 4);
|
|
+
|
|
+ if (len & 0x2) {
|
|
+ class_bus_write(*(u16 *)src, dst, 2);
|
|
+ src += 2;
|
|
+ dst += 2;
|
|
+ }
|
|
+
|
|
+ if (len & 0x1) {
|
|
+ class_bus_write(*(u8 *)src, dst, 1);
|
|
+ src++;
|
|
+ dst++;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Writes value to the cluster memory (PE_LMEM)
|
|
+ * @param[in] dst PE LMEM destination address (must be 32bit aligned)
|
|
+ * @param[in] val Value to write
|
|
+ * @param[in] len Number of bytes to write
|
|
+ */
|
|
+void class_pe_lmem_memset(u32 dst, int val, unsigned int len)
|
|
+{
|
|
+ u32 len32 = len >> 2;
|
|
+ int i;
|
|
+
|
|
+ val = val | (val << 8) | (val << 16) | (val << 24);
|
|
+
|
|
+ for (i = 0; i < len32; i++, dst += 4)
|
|
+ class_bus_write(val, dst, 4);
|
|
+
|
|
+ if (len & 0x2) {
|
|
+ class_bus_write(val, dst, 2);
|
|
+ dst += 2;
|
|
+ }
|
|
+
|
|
+ if (len & 0x1) {
|
|
+ class_bus_write(val, dst, 1);
|
|
+ dst++;
|
|
+ }
|
|
+}
|
|
+
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+
|
|
+/* Writes UTIL program memory (DDR) from the host.
|
|
+ *
|
|
+ * @param[in] addr Address to write (virtual, must be aligned on size)
|
|
+ * @param[in] val Value to write (in PE endianness, i.e BE)
|
|
+ * @param[in] size Number of bytes to write (2 or 4)
|
|
+ */
|
|
+static void util_pmem_write(u32 val, void *addr, u8 size)
|
|
+{
|
|
+ void *addr64 = (void *)((unsigned long)addr & ~0x7);
|
|
+ unsigned long off = 8 - ((unsigned long)addr & 0x7) - size;
|
|
+
|
|
+ /*
|
|
+ * IMEM should be loaded as a 64bit swapped value in a 64bit aligned
|
|
+ * location
|
|
+ */
|
|
+ if (size == 4)
|
|
+ writel(be32_to_cpu(val), addr64 + off);
|
|
+ else
|
|
+ writew(be16_to_cpu((u16)val), addr64 + off);
|
|
+}
|
|
+
|
|
+/* Writes a buffer to UTIL program memory (DDR) from the host.
|
|
+ *
|
|
+ * @param[in] dst Address to write (virtual, must be at least 16bit
|
|
+ * aligned)
|
|
+ * @param[in] src Buffer to write (in PE endianness, i.e BE, must have
|
|
+ * same alignment as dst)
|
|
+ * @param[in] len Number of bytes to write (must be at least 16bit
|
|
+ * aligned)
|
|
+ */
|
|
+static void util_pmem_memcpy(void *dst, const void *src, unsigned int len)
|
|
+{
|
|
+ unsigned int len32;
|
|
+ int i;
|
|
+
|
|
+ if ((unsigned long)src & 0x2) {
|
|
+ util_pmem_write(*(u16 *)src, dst, 2);
|
|
+ src += 2;
|
|
+ dst += 2;
|
|
+ len -= 2;
|
|
+ }
|
|
+
|
|
+ len32 = len >> 2;
|
|
+
|
|
+ for (i = 0; i < len32; i++, dst += 4, src += 4)
|
|
+ util_pmem_write(*(u32 *)src, dst, 4);
|
|
+
|
|
+ if (len & 0x2)
|
|
+ util_pmem_write(*(u16 *)src, dst, len & 0x2);
|
|
+}
|
|
+#endif
|
|
+
|
|
+/* Loads an elf section into pmem
|
|
+ * Code needs to be at least 16bit aligned and only PROGBITS sections are
|
|
+ * supported
|
|
+ *
|
|
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ...,
|
|
+ * TMU3_ID)
|
|
+ * @param[in] data pointer to the elf firmware
|
|
+ * @param[in] shdr pointer to the elf section header
|
|
+ *
|
|
+ */
|
|
+static int pe_load_pmem_section(int id, const void *data,
|
|
+ struct elf32_shdr *shdr)
|
|
+{
|
|
+ u32 offset = be32_to_cpu(shdr->sh_offset);
|
|
+ u32 addr = be32_to_cpu(shdr->sh_addr);
|
|
+ u32 size = be32_to_cpu(shdr->sh_size);
|
|
+ u32 type = be32_to_cpu(shdr->sh_type);
|
|
+
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ if (id == UTIL_ID) {
|
|
+ pr_err("%s: unsupported pmem section for UTIL\n",
|
|
+ __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
|
|
+ pr_err(
|
|
+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
|
|
+ , __func__, addr, (unsigned long)data + offset);
|
|
+
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (addr & 0x1) {
|
|
+ pr_err("%s: load address(%x) is not 16bit aligned\n",
|
|
+ __func__, addr);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (size & 0x1) {
|
|
+ pr_err("%s: load size(%x) is not 16bit aligned\n",
|
|
+ __func__, size);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ switch (type) {
|
|
+ case SHT_PROGBITS:
|
|
+ pe_pmem_memcpy_to32(id, addr, data + offset, size);
|
|
+
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ pr_err("%s: unsupported section type(%x)\n", __func__,
|
|
+ type);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* Loads an elf section into dmem
|
|
+ * Data needs to be at least 32bit aligned, NOBITS sections are correctly
|
|
+ * initialized to 0
|
|
+ *
|
|
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
|
|
+ * ..., UTIL_ID)
|
|
+ * @param[in] data pointer to the elf firmware
|
|
+ * @param[in] shdr pointer to the elf section header
|
|
+ *
|
|
+ */
|
|
+static int pe_load_dmem_section(int id, const void *data,
|
|
+ struct elf32_shdr *shdr)
|
|
+{
|
|
+ u32 offset = be32_to_cpu(shdr->sh_offset);
|
|
+ u32 addr = be32_to_cpu(shdr->sh_addr);
|
|
+ u32 size = be32_to_cpu(shdr->sh_size);
|
|
+ u32 type = be32_to_cpu(shdr->sh_type);
|
|
+ u32 size32 = size >> 2;
|
|
+ int i;
|
|
+
|
|
+ if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
|
|
+ pr_err(
|
|
+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
|
|
+ __func__, addr, (unsigned long)data + offset);
|
|
+
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (addr & 0x3) {
|
|
+ pr_err("%s: load address(%x) is not 32bit aligned\n",
|
|
+ __func__, addr);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ switch (type) {
|
|
+ case SHT_PROGBITS:
|
|
+ pe_dmem_memcpy_to32(id, addr, data + offset, size);
|
|
+ break;
|
|
+
|
|
+ case SHT_NOBITS:
|
|
+ for (i = 0; i < size32; i++, addr += 4)
|
|
+ pe_dmem_write(id, 0, addr, 4);
|
|
+
|
|
+ if (size & 0x3)
|
|
+ pe_dmem_write(id, 0, addr, size & 0x3);
|
|
+
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ pr_err("%s: unsupported section type(%x)\n", __func__,
|
|
+ type);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* Loads an elf section into DDR
|
|
+ * Data needs to be at least 32bit aligned, NOBITS sections are correctly
|
|
+ * initialized to 0
|
|
+ *
|
|
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
|
|
+ * ..., UTIL_ID)
|
|
+ * @param[in] data pointer to the elf firmware
|
|
+ * @param[in] shdr pointer to the elf section header
|
|
+ *
|
|
+ */
|
|
+static int pe_load_ddr_section(int id, const void *data,
|
|
+ struct elf32_shdr *shdr,
|
|
+ struct device *dev) {
|
|
+ u32 offset = be32_to_cpu(shdr->sh_offset);
|
|
+ u32 addr = be32_to_cpu(shdr->sh_addr);
|
|
+ u32 size = be32_to_cpu(shdr->sh_size);
|
|
+ u32 type = be32_to_cpu(shdr->sh_type);
|
|
+ u32 flags = be32_to_cpu(shdr->sh_flags);
|
|
+
|
|
+ switch (type) {
|
|
+ case SHT_PROGBITS:
|
|
+ if (flags & SHF_EXECINSTR) {
|
|
+ if (id <= CLASS_MAX_ID) {
|
|
+ /* DO the loading only once in DDR */
|
|
+ if (id == CLASS0_ID) {
|
|
+ pr_err(
|
|
+ "%s: load address(%x) and elf file address(%lx) rcvd\n",
|
|
+ __func__, addr,
|
|
+ (unsigned long)data + offset);
|
|
+ if (((unsigned long)(data + offset)
|
|
+ & 0x3) != (addr & 0x3)) {
|
|
+ pr_err(
|
|
+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
|
|
+ , __func__, addr,
|
|
+ (unsigned long)data + offset);
|
|
+
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (addr & 0x1) {
|
|
+ pr_err(
|
|
+ "%s: load address(%x) is not 16bit aligned\n"
|
|
+ , __func__, addr);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (size & 0x1) {
|
|
+ pr_err(
|
|
+ "%s: load length(%x) is not 16bit aligned\n"
|
|
+ , __func__, size);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ memcpy(DDR_PHYS_TO_VIRT(
|
|
+ DDR_PFE_TO_PHYS(addr)),
|
|
+ data + offset, size);
|
|
+ }
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ } else if (id == UTIL_ID) {
|
|
+ if (((unsigned long)(data + offset) & 0x3)
|
|
+ != (addr & 0x3)) {
|
|
+ pr_err(
|
|
+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
|
|
+ , __func__, addr,
|
|
+ (unsigned long)data + offset);
|
|
+
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (addr & 0x1) {
|
|
+ pr_err(
|
|
+ "%s: load address(%x) is not 16bit aligned\n"
|
|
+ , __func__, addr);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (size & 0x1) {
|
|
+ pr_err(
|
|
+ "%s: load length(%x) is not 16bit aligned\n"
|
|
+ , __func__, size);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ util_pmem_memcpy(DDR_PHYS_TO_VIRT(
|
|
+ DDR_PFE_TO_PHYS(addr)),
|
|
+ data + offset, size);
|
|
+ }
|
|
+#endif
|
|
+ } else {
|
|
+ pr_err(
|
|
+ "%s: unsupported ddr section type(%x) for PE(%d)\n"
|
|
+ , __func__, type, id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ } else {
|
|
+ memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data
|
|
+ + offset, size);
|
|
+ }
|
|
+
|
|
+ break;
|
|
+
|
|
+ case SHT_NOBITS:
|
|
+ memset(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), 0, size);
|
|
+
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ pr_err("%s: unsupported section type(%x)\n", __func__,
|
|
+ type);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* Loads an elf section into pe lmem
|
|
+ * Data needs to be at least 32bit aligned, NOBITS sections are correctly
|
|
+ * initialized to 0
|
|
+ *
|
|
+ * @param[in] id PE identification (CLASS0_ID,..., CLASS5_ID)
|
|
+ * @param[in] data pointer to the elf firmware
|
|
+ * @param[in] shdr pointer to the elf section header
|
|
+ *
|
|
+ */
|
|
+static int pe_load_pe_lmem_section(int id, const void *data,
|
|
+ struct elf32_shdr *shdr)
|
|
+{
|
|
+ u32 offset = be32_to_cpu(shdr->sh_offset);
|
|
+ u32 addr = be32_to_cpu(shdr->sh_addr);
|
|
+ u32 size = be32_to_cpu(shdr->sh_size);
|
|
+ u32 type = be32_to_cpu(shdr->sh_type);
|
|
+
|
|
+ if (id > CLASS_MAX_ID) {
|
|
+ pr_err(
|
|
+ "%s: unsupported pe-lmem section type(%x) for PE(%d)\n",
|
|
+ __func__, type, id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
|
|
+ pr_err(
|
|
+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
|
|
+ __func__, addr, (unsigned long)data + offset);
|
|
+
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (addr & 0x3) {
|
|
+ pr_err("%s: load address(%x) is not 32bit aligned\n",
|
|
+ __func__, addr);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ switch (type) {
|
|
+ case SHT_PROGBITS:
|
|
+ class_pe_lmem_memcpy_to32(addr, data + offset, size);
|
|
+ break;
|
|
+
|
|
+ case SHT_NOBITS:
|
|
+ class_pe_lmem_memset(addr, 0, size);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ pr_err("%s: unsupported section type(%x)\n", __func__,
|
|
+ type);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* Loads an elf section into a PE
|
|
+ * For now only supports loading a section to dmem (all PE's), pmem (class and
|
|
+ * tmu PE's),
|
|
+ * DDDR (util PE code)
|
|
+ *
|
|
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
|
|
+ * ..., UTIL_ID)
|
|
+ * @param[in] data pointer to the elf firmware
|
|
+ * @param[in] shdr pointer to the elf section header
|
|
+ *
|
|
+ */
|
|
+int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
|
|
+ struct device *dev) {
|
|
+ u32 addr = be32_to_cpu(shdr->sh_addr);
|
|
+ u32 size = be32_to_cpu(shdr->sh_size);
|
|
+
|
|
+ if (IS_DMEM(addr, size))
|
|
+ return pe_load_dmem_section(id, data, shdr);
|
|
+ else if (IS_PMEM(addr, size))
|
|
+ return pe_load_pmem_section(id, data, shdr);
|
|
+ else if (IS_PFE_LMEM(addr, size))
|
|
+ return 0;
|
|
+ else if (IS_PHYS_DDR(addr, size))
|
|
+ return pe_load_ddr_section(id, data, shdr, dev);
|
|
+ else if (IS_PE_LMEM(addr, size))
|
|
+ return pe_load_pe_lmem_section(id, data, shdr);
|
|
+
|
|
+ pr_err("%s: unsupported memory range(%x)\n", __func__,
|
|
+ addr);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**************************** BMU ***************************/
|
|
+
|
|
+/* Initializes a BMU block.
|
|
+ * @param[in] base BMU block base address
|
|
+ * @param[in] cfg BMU configuration
|
|
+ */
|
|
+void bmu_init(void *base, struct BMU_CFG *cfg)
|
|
+{
|
|
+ bmu_disable(base);
|
|
+
|
|
+ bmu_set_config(base, cfg);
|
|
+
|
|
+ bmu_reset(base);
|
|
+}
|
|
+
|
|
+/* Resets a BMU block.
|
|
+ * @param[in] base BMU block base address
|
|
+ */
|
|
+void bmu_reset(void *base)
|
|
+{
|
|
+ writel(CORE_SW_RESET, base + BMU_CTRL);
|
|
+
|
|
+ /* Wait for self clear */
|
|
+ while (readl(base + BMU_CTRL) & CORE_SW_RESET)
|
|
+ ;
|
|
+}
|
|
+
|
|
+/* Enabled a BMU block.
|
|
+ * @param[in] base BMU block base address
|
|
+ */
|
|
+void bmu_enable(void *base)
|
|
+{
|
|
+ writel(CORE_ENABLE, base + BMU_CTRL);
|
|
+}
|
|
+
|
|
+/* Disables a BMU block.
|
|
+ * @param[in] base BMU block base address
|
|
+ */
|
|
+void bmu_disable(void *base)
|
|
+{
|
|
+ writel(CORE_DISABLE, base + BMU_CTRL);
|
|
+}
|
|
+
|
|
+/* Sets the configuration of a BMU block.
|
|
+ * @param[in] base BMU block base address
|
|
+ * @param[in] cfg BMU configuration
|
|
+ */
|
|
+void bmu_set_config(void *base, struct BMU_CFG *cfg)
|
|
+{
|
|
+ writel(cfg->baseaddr, base + BMU_UCAST_BASE_ADDR);
|
|
+ writel(cfg->count & 0xffff, base + BMU_UCAST_CONFIG);
|
|
+ writel(cfg->size & 0xffff, base + BMU_BUF_SIZE);
|
|
+
|
|
+ /* Interrupts are never used */
|
|
+ writel(cfg->low_watermark, base + BMU_LOW_WATERMARK);
|
|
+ writel(cfg->high_watermark, base + BMU_HIGH_WATERMARK);
|
|
+ writel(0x0, base + BMU_INT_ENABLE);
|
|
+}
|
|
+
|
|
+/**************************** MTIP GEMAC ***************************/
|
|
+
|
|
+/* Enable Rx Checksum Engine. With this enabled, Frame with bad IP,
|
|
+ * TCP or UDP checksums are discarded
|
|
+ *
|
|
+ * @param[in] base GEMAC base address.
|
|
+ */
|
|
+void gemac_enable_rx_checksum_offload(void *base)
|
|
+{
|
|
+ /*Do not find configuration to do this */
|
|
+}
|
|
+
|
|
+/* Disable Rx Checksum Engine.
|
|
+ *
|
|
+ * @param[in] base GEMAC base address.
|
|
+ */
|
|
+void gemac_disable_rx_checksum_offload(void *base)
|
|
+{
|
|
+ /*Do not find configuration to do this */
|
|
+}
|
|
+
|
|
+/* GEMAC set speed.
|
|
+ * @param[in] base GEMAC base address
|
|
+ * @param[in] speed GEMAC speed (10, 100 or 1000 Mbps)
|
|
+ */
|
|
+void gemac_set_speed(void *base, enum mac_speed gem_speed)
|
|
+{
|
|
+ u32 ecr = readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_SPEED;
|
|
+ u32 rcr = readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_RMII_10T;
|
|
+
|
|
+ switch (gem_speed) {
|
|
+ case SPEED_10M:
|
|
+ rcr |= EMAC_RCNTRL_RMII_10T;
|
|
+ break;
|
|
+
|
|
+ case SPEED_1000M:
|
|
+ ecr |= EMAC_ECNTRL_SPEED;
|
|
+ break;
|
|
+
|
|
+ case SPEED_100M:
|
|
+ default:
|
|
+ /*It is in 100M mode */
|
|
+ break;
|
|
+ }
|
|
+ writel(ecr, (base + EMAC_ECNTRL_REG));
|
|
+ writel(rcr, (base + EMAC_RCNTRL_REG));
|
|
+}
|
|
+
|
|
+/* GEMAC set duplex.
|
|
+ * @param[in] base GEMAC base address
|
|
+ * @param[in] duplex GEMAC duplex mode (Full, Half)
|
|
+ */
|
|
+void gemac_set_duplex(void *base, int duplex)
|
|
+{
|
|
+ if (duplex == DUPLEX_HALF) {
|
|
+ writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_FDEN, base
|
|
+ + EMAC_TCNTRL_REG);
|
|
+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_DRT, (base
|
|
+ + EMAC_RCNTRL_REG));
|
|
+ } else{
|
|
+ writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_FDEN, base
|
|
+ + EMAC_TCNTRL_REG);
|
|
+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_DRT, (base
|
|
+ + EMAC_RCNTRL_REG));
|
|
+ }
|
|
+}
|
|
+
|
|
+/* GEMAC set mode.
|
|
+ * @param[in] base GEMAC base address
|
|
+ * @param[in] mode GEMAC operation mode (MII, RMII, RGMII, SGMII)
|
|
+ */
|
|
+void gemac_set_mode(void *base, int mode)
|
|
+{
|
|
+ u32 val = readl(base + EMAC_RCNTRL_REG);
|
|
+
|
|
+ /*Remove loopbank*/
|
|
+ val &= ~EMAC_RCNTRL_LOOP;
|
|
+
|
|
+ /* Enable flow control and MII mode and terminate received CRC */
|
|
+ val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE | EMAC_RCNTRL_CRC_FWD);
|
|
+
|
|
+ writel(val, base + EMAC_RCNTRL_REG);
|
|
+}
|
|
+
|
|
+/* GEMAC enable function.
|
|
+ * @param[in] base GEMAC base address
|
|
+ */
|
|
+void gemac_enable(void *base)
|
|
+{
|
|
+ writel(readl(base + EMAC_ECNTRL_REG) | EMAC_ECNTRL_ETHER_EN, base +
|
|
+ EMAC_ECNTRL_REG);
|
|
+}
|
|
+
|
|
+/* GEMAC disable function.
|
|
+ * @param[in] base GEMAC base address
|
|
+ */
|
|
+void gemac_disable(void *base)
|
|
+{
|
|
+ writel(readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_ETHER_EN, base +
|
|
+ EMAC_ECNTRL_REG);
|
|
+}
|
|
+
|
|
+/* GEMAC TX disable function.
|
|
+ * @param[in] base GEMAC base address
|
|
+ */
|
|
+void gemac_tx_disable(void *base)
|
|
+{
|
|
+ writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_GTS, base +
|
|
+ EMAC_TCNTRL_REG);
|
|
+}
|
|
+
|
|
+void gemac_tx_enable(void *base)
|
|
+{
|
|
+ writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_GTS, base +
|
|
+ EMAC_TCNTRL_REG);
|
|
+}
|
|
+
|
|
+/* Sets the hash register of the MAC.
|
|
+ * This register is used for matching unicast and multicast frames.
|
|
+ *
|
|
+ * @param[in] base GEMAC base address.
|
|
+ * @param[in] hash 64-bit hash to be configured.
|
|
+ */
|
|
+void gemac_set_hash(void *base, struct pfe_mac_addr *hash)
|
|
+{
|
|
+ writel(hash->bottom, base + EMAC_GALR);
|
|
+ writel(hash->top, base + EMAC_GAUR);
|
|
+}
|
|
+
|
|
+void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
|
|
+ unsigned int entry_index)
|
|
+{
|
|
+ if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
|
|
+ return;
|
|
+
|
|
+ entry_index = entry_index - 1;
|
|
+ if (entry_index < 1) {
|
|
+ writel(htonl(address->bottom), base + EMAC_PHY_ADDR_LOW);
|
|
+ writel((htonl(address->top) | 0x8808), base +
|
|
+ EMAC_PHY_ADDR_HIGH);
|
|
+ } else {
|
|
+ writel(htonl(address->bottom), base + ((entry_index - 1) * 8)
|
|
+ + EMAC_SMAC_0_0);
|
|
+ writel((htonl(address->top) | 0x8808), base + ((entry_index -
|
|
+ 1) * 8) + EMAC_SMAC_0_1);
|
|
+ }
|
|
+}
|
|
+
|
|
+void gemac_clear_laddrN(void *base, unsigned int entry_index)
|
|
+{
|
|
+ if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
|
|
+ return;
|
|
+
|
|
+ entry_index = entry_index - 1;
|
|
+ if (entry_index < 1) {
|
|
+ writel(0, base + EMAC_PHY_ADDR_LOW);
|
|
+ writel(0, base + EMAC_PHY_ADDR_HIGH);
|
|
+ } else {
|
|
+ writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0);
|
|
+ writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Set the loopback mode of the MAC. This can be either no loopback for
|
|
+ * normal operation, local loopback through MAC internal loopback module or PHY
|
|
+ * loopback for external loopback through a PHY. This asserts the external
|
|
+ * loop pin.
|
|
+ *
|
|
+ * @param[in] base GEMAC base address.
|
|
+ * @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC
|
|
+ * Loopback,
|
|
+ * LB_EXT - PHY Loopback.
|
|
+ */
|
|
+void gemac_set_loop(void *base, enum mac_loop gem_loop)
|
|
+{
|
|
+ pr_info("%s()\n", __func__);
|
|
+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_LOOP, (base +
|
|
+ EMAC_RCNTRL_REG));
|
|
+}
|
|
+
|
|
+/* GEMAC allow frames
|
|
+ * @param[in] base GEMAC base address
|
|
+ */
|
|
+void gemac_enable_copy_all(void *base)
|
|
+{
|
|
+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_PROM, (base +
|
|
+ EMAC_RCNTRL_REG));
|
|
+}
|
|
+
|
|
+/* GEMAC do not allow frames
|
|
+ * @param[in] base GEMAC base address
|
|
+ */
|
|
+void gemac_disable_copy_all(void *base)
|
|
+{
|
|
+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_PROM, (base +
|
|
+ EMAC_RCNTRL_REG));
|
|
+}
|
|
+
|
|
+/* GEMAC allow broadcast function.
|
|
+ * @param[in] base GEMAC base address
|
|
+ */
|
|
+void gemac_allow_broadcast(void *base)
|
|
+{
|
|
+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_BC_REJ, base +
|
|
+ EMAC_RCNTRL_REG);
|
|
+}
|
|
+
|
|
+/* GEMAC no broadcast function.
|
|
+ * @param[in] base GEMAC base address
|
|
+ */
|
|
+void gemac_no_broadcast(void *base)
|
|
+{
|
|
+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_BC_REJ, base +
|
|
+ EMAC_RCNTRL_REG);
|
|
+}
|
|
+
|
|
+/* GEMAC enable 1536 rx function.
|
|
+ * @param[in] base GEMAC base address
|
|
+ */
|
|
+void gemac_enable_1536_rx(void *base)
|
|
+{
|
|
+ /* Set 1536 as Maximum frame length */
|
|
+ writel((readl(base + EMAC_RCNTRL_REG) & PFE_RCR_MAX_FL_MASK)
|
|
+ | (1536 << 16), base + EMAC_RCNTRL_REG);
|
|
+}
|
|
+
|
|
+/* GEMAC set rx Max frame length.
|
|
+ * @param[in] base GEMAC base address
|
|
+ * @param[in] mtu new mtu
|
|
+ */
|
|
+void gemac_set_rx_max_fl(void *base, int mtu)
|
|
+{
|
|
+ /* Set mtu as Maximum frame length */
|
|
+ writel((readl(base + EMAC_RCNTRL_REG) & PFE_RCR_MAX_FL_MASK)
|
|
+ | (mtu << 16), base + EMAC_RCNTRL_REG);
|
|
+}
|
|
+
|
|
+/* GEMAC enable stacked vlan function.
|
|
+ * @param[in] base GEMAC base address
|
|
+ */
|
|
+void gemac_enable_stacked_vlan(void *base)
|
|
+{
|
|
+ /* MTIP doesn't support stacked vlan */
|
|
+}
|
|
+
|
|
+/* GEMAC enable pause rx function.
|
|
+ * @param[in] base GEMAC base address
|
|
+ */
|
|
+void gemac_enable_pause_rx(void *base)
|
|
+{
|
|
+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_FCE,
|
|
+ base + EMAC_RCNTRL_REG);
|
|
+}
|
|
+
|
|
+/* GEMAC disable pause rx function.
|
|
+ * @param[in] base GEMAC base address
|
|
+ */
|
|
+void gemac_disable_pause_rx(void *base)
|
|
+{
|
|
+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_FCE,
|
|
+ base + EMAC_RCNTRL_REG);
|
|
+}
|
|
+
|
|
+/* GEMAC enable pause tx function.
|
|
+ * @param[in] base GEMAC base address
|
|
+ */
|
|
+void gemac_enable_pause_tx(void *base)
|
|
+{
|
|
+ writel(EMAC_RX_SECTION_EMPTY_V, base + EMAC_RX_SECTION_EMPTY);
|
|
+}
|
|
+
|
|
+/* GEMAC disable pause tx function.
|
|
+ * @param[in] base GEMAC base address
|
|
+ */
|
|
+void gemac_disable_pause_tx(void *base)
|
|
+{
|
|
+ writel(0x0, base + EMAC_RX_SECTION_EMPTY);
|
|
+}
|
|
+
|
|
+/* GEMAC wol configuration
|
|
+ * @param[in] base GEMAC base address
|
|
+ * @param[in] wol_conf WoL register configuration
|
|
+ */
|
|
+void gemac_set_wol(void *base, u32 wol_conf)
|
|
+{
|
|
+ u32 val = readl(base + EMAC_ECNTRL_REG);
|
|
+
|
|
+ if (wol_conf)
|
|
+ val |= (EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
|
|
+ else
|
|
+ val &= ~(EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
|
|
+ writel(val, base + EMAC_ECNTRL_REG);
|
|
+}
|
|
+
|
|
+/* Sets Gemac bus width to 64bit
|
|
+ * @param[in] base GEMAC base address
|
|
+ * @param[in] width gemac bus width to be set possible values are 32/64/128
|
|
+ */
|
|
+void gemac_set_bus_width(void *base, int width)
|
|
+{
|
|
+}
|
|
+
|
|
+/* Sets Gemac configuration.
|
|
+ * @param[in] base GEMAC base address
|
|
+ * @param[in] cfg GEMAC configuration
|
|
+ */
|
|
+void gemac_set_config(void *base, struct gemac_cfg *cfg)
|
|
+{
|
|
+ /*GEMAC config taken from VLSI */
|
|
+ writel(0x00000004, base + EMAC_TFWR_STR_FWD);
|
|
+ writel(0x00000005, base + EMAC_RX_SECTION_FULL);
|
|
+
|
|
+ if (pfe_errata_a010897)
|
|
+ writel(0x0000076c, base + EMAC_TRUNC_FL);
|
|
+ else
|
|
+ writel(0x00003fff, base + EMAC_TRUNC_FL);
|
|
+
|
|
+ writel(0x00000030, base + EMAC_TX_SECTION_EMPTY);
|
|
+ writel(0x00000000, base + EMAC_MIB_CTRL_STS_REG);
|
|
+
|
|
+ gemac_set_mode(base, cfg->mode);
|
|
+
|
|
+ gemac_set_speed(base, cfg->speed);
|
|
+
|
|
+ gemac_set_duplex(base, cfg->duplex);
|
|
+}
|
|
+
|
|
+/**************************** GPI ***************************/
|
|
+
|
|
+/* Initializes a GPI block.
|
|
+ * @param[in] base GPI base address
|
|
+ * @param[in] cfg GPI configuration
|
|
+ */
|
|
+void gpi_init(void *base, struct gpi_cfg *cfg)
|
|
+{
|
|
+ gpi_reset(base);
|
|
+
|
|
+ gpi_disable(base);
|
|
+
|
|
+ gpi_set_config(base, cfg);
|
|
+}
|
|
+
|
|
+/* Resets a GPI block.
|
|
+ * @param[in] base GPI base address
|
|
+ */
|
|
+void gpi_reset(void *base)
|
|
+{
|
|
+ writel(CORE_SW_RESET, base + GPI_CTRL);
|
|
+}
|
|
+
|
|
+/* Enables a GPI block.
|
|
+ * @param[in] base GPI base address
|
|
+ */
|
|
+void gpi_enable(void *base)
|
|
+{
|
|
+ writel(CORE_ENABLE, base + GPI_CTRL);
|
|
+}
|
|
+
|
|
+/* Disables a GPI block.
|
|
+ * @param[in] base GPI base address
|
|
+ */
|
|
+void gpi_disable(void *base)
|
|
+{
|
|
+ writel(CORE_DISABLE, base + GPI_CTRL);
|
|
+}
|
|
+
|
|
+/* Sets the configuration of a GPI block.
|
|
+ * @param[in] base GPI base address
|
|
+ * @param[in] cfg GPI configuration
|
|
+ */
|
|
+void gpi_set_config(void *base, struct gpi_cfg *cfg)
|
|
+{
|
|
+ writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), base
|
|
+ + GPI_LMEM_ALLOC_ADDR);
|
|
+ writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL), base
|
|
+ + GPI_LMEM_FREE_ADDR);
|
|
+ writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL), base
|
|
+ + GPI_DDR_ALLOC_ADDR);
|
|
+ writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), base
|
|
+ + GPI_DDR_FREE_ADDR);
|
|
+ writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
|
|
+ writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
|
|
+ writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
|
|
+ writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
|
|
+ writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
|
|
+ writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, base + GPI_HDR_SIZE);
|
|
+ writel((DDR_BUF_SIZE << 16) | LMEM_BUF_SIZE, base + GPI_BUF_SIZE);
|
|
+
|
|
+ writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) |
|
|
+ GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
|
|
+ writel(cfg->tmlf_txthres, base + GPI_TMLF_TX);
|
|
+ writel(cfg->aseq_len, base + GPI_DTX_ASEQ);
|
|
+ writel(1, base + GPI_TOE_CHKSUM_EN);
|
|
+
|
|
+ if (cfg->mtip_pause_reg) {
|
|
+ writel(cfg->mtip_pause_reg, base + GPI_CSR_MTIP_PAUSE_REG);
|
|
+ writel(EGPI_PAUSE_TIME, base + GPI_TX_PAUSE_TIME);
|
|
+ }
|
|
+}
|
|
+
|
|
+/**************************** CLASSIFIER ***************************/
|
|
+
|
|
+/* Initializes CLASSIFIER block.
|
|
+ * @param[in] cfg CLASSIFIER configuration
|
|
+ */
|
|
+void class_init(struct class_cfg *cfg)
|
|
+{
|
|
+ class_reset();
|
|
+
|
|
+ class_disable();
|
|
+
|
|
+ class_set_config(cfg);
|
|
+}
|
|
+
|
|
+/* Resets CLASSIFIER block.
|
|
+ *
|
|
+ */
|
|
+void class_reset(void)
|
|
+{
|
|
+ writel(CORE_SW_RESET, CLASS_TX_CTRL);
|
|
+}
|
|
+
|
|
+/* Enables all CLASS-PE's cores.
|
|
+ *
|
|
+ */
|
|
+void class_enable(void)
|
|
+{
|
|
+ writel(CORE_ENABLE, CLASS_TX_CTRL);
|
|
+}
|
|
+
|
|
+/* Disables all CLASS-PE's cores.
|
|
+ *
|
|
+ */
|
|
+void class_disable(void)
|
|
+{
|
|
+ writel(CORE_DISABLE, CLASS_TX_CTRL);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Sets the configuration of the CLASSIFIER block.
|
|
+ * @param[in] cfg CLASSIFIER configuration
|
|
+ */
|
|
+void class_set_config(struct class_cfg *cfg)
|
|
+{
|
|
+ u32 val;
|
|
+
|
|
+ /* Initialize route table */
|
|
+ if (!cfg->resume)
|
|
+ memset(DDR_PHYS_TO_VIRT(cfg->route_table_baseaddr), 0, (1 <<
|
|
+ cfg->route_table_hash_bits) * CLASS_ROUTE_SIZE);
|
|
+
|
|
+#if !defined(LS1012A_PFE_RESET_WA)
|
|
+ writel(cfg->pe_sys_clk_ratio, CLASS_PE_SYS_CLK_RATIO);
|
|
+#endif
|
|
+
|
|
+ writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, CLASS_HDR_SIZE);
|
|
+ writel(LMEM_BUF_SIZE, CLASS_LMEM_BUF_SIZE);
|
|
+ writel(CLASS_ROUTE_ENTRY_SIZE(CLASS_ROUTE_SIZE) |
|
|
+ CLASS_ROUTE_HASH_SIZE(cfg->route_table_hash_bits),
|
|
+ CLASS_ROUTE_HASH_ENTRY_SIZE);
|
|
+ writel(HIF_PKT_CLASS_EN | HIF_PKT_OFFSET(sizeof(struct hif_hdr)),
|
|
+ CLASS_HIF_PARSE);
|
|
+
|
|
+ val = HASH_CRC_PORT_IP | QB2BUS_LE;
|
|
+
|
|
+#if defined(CONFIG_IP_ALIGNED)
|
|
+ val |= IP_ALIGNED;
|
|
+#endif
|
|
+
|
|
+ /*
|
|
+ * Class PE packet steering will only work if TOE mode, bridge fetch or
|
|
+ * route fetch are enabled (see class/qb_fet.v). Route fetch would
|
|
+ * trigger additional memory copies (likely from DDR because of hash
|
|
+ * table size, which cannot be reduced because PE software still
|
|
+ * relies on hash value computed in HW), so when not in TOE mode we
|
|
+ * simply enable HW bridge fetch even though we don't use it.
|
|
+ */
|
|
+ if (cfg->toe_mode)
|
|
+ val |= CLASS_TOE;
|
|
+ else
|
|
+ val |= HW_BRIDGE_FETCH;
|
|
+
|
|
+ writel(val, CLASS_ROUTE_MULTI);
|
|
+
|
|
+ writel(DDR_PHYS_TO_PFE(cfg->route_table_baseaddr),
|
|
+ CLASS_ROUTE_TABLE_BASE);
|
|
+ writel(CLASS_PE0_RO_DM_ADDR0_VAL, CLASS_PE0_RO_DM_ADDR0);
|
|
+ writel(CLASS_PE0_RO_DM_ADDR1_VAL, CLASS_PE0_RO_DM_ADDR1);
|
|
+ writel(CLASS_PE0_QB_DM_ADDR0_VAL, CLASS_PE0_QB_DM_ADDR0);
|
|
+ writel(CLASS_PE0_QB_DM_ADDR1_VAL, CLASS_PE0_QB_DM_ADDR1);
|
|
+ writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR), CLASS_TM_INQ_ADDR);
|
|
+
|
|
+ writel(23, CLASS_AFULL_THRES);
|
|
+ writel(23, CLASS_TSQ_FIFO_THRES);
|
|
+
|
|
+ writel(24, CLASS_MAX_BUF_CNT);
|
|
+ writel(24, CLASS_TSQ_MAX_CNT);
|
|
+}
|
|
+
|
|
+/**************************** TMU ***************************/
|
|
+
|
|
+void tmu_reset(void)
|
|
+{
|
|
+ writel(SW_RESET, TMU_CTRL);
|
|
+}
|
|
+
|
|
+/* Initializes TMU block.
|
|
+ * @param[in] cfg TMU configuration
|
|
+ */
|
|
+void tmu_init(struct tmu_cfg *cfg)
|
|
+{
|
|
+ int q, phyno;
|
|
+
|
|
+ tmu_disable(0xF);
|
|
+ mdelay(10);
|
|
+
|
|
+#if !defined(LS1012A_PFE_RESET_WA)
|
|
+ /* keep in soft reset */
|
|
+ writel(SW_RESET, TMU_CTRL);
|
|
+#endif
|
|
+ writel(0x3, TMU_SYS_GENERIC_CONTROL);
|
|
+ writel(750, TMU_INQ_WATERMARK);
|
|
+ writel(CBUS_VIRT_TO_PFE(EGPI1_BASE_ADDR +
|
|
+ GPI_INQ_PKTPTR), TMU_PHY0_INQ_ADDR);
|
|
+ writel(CBUS_VIRT_TO_PFE(EGPI2_BASE_ADDR +
|
|
+ GPI_INQ_PKTPTR), TMU_PHY1_INQ_ADDR);
|
|
+ writel(CBUS_VIRT_TO_PFE(HGPI_BASE_ADDR +
|
|
+ GPI_INQ_PKTPTR), TMU_PHY3_INQ_ADDR);
|
|
+ writel(CBUS_VIRT_TO_PFE(HIF_NOCPY_RX_INQ0_PKTPTR), TMU_PHY4_INQ_ADDR);
|
|
+ writel(CBUS_VIRT_TO_PFE(UTIL_INQ_PKTPTR), TMU_PHY5_INQ_ADDR);
|
|
+ writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),
|
|
+ TMU_BMU_INQ_ADDR);
|
|
+
|
|
+ writel(0x3FF, TMU_TDQ0_SCH_CTRL); /*
|
|
+ * enabling all 10
|
|
+ * schedulers [9:0] of each TDQ
|
|
+ */
|
|
+ writel(0x3FF, TMU_TDQ1_SCH_CTRL);
|
|
+ writel(0x3FF, TMU_TDQ3_SCH_CTRL);
|
|
+
|
|
+#if !defined(LS1012A_PFE_RESET_WA)
|
|
+ writel(cfg->pe_sys_clk_ratio, TMU_PE_SYS_CLK_RATIO);
|
|
+#endif
|
|
+
|
|
+#if !defined(LS1012A_PFE_RESET_WA)
|
|
+ writel(DDR_PHYS_TO_PFE(cfg->llm_base_addr), TMU_LLM_BASE_ADDR);
|
|
+ /* Extra packet pointers will be stored from this address onwards */
|
|
+
|
|
+ writel(cfg->llm_queue_len, TMU_LLM_QUE_LEN);
|
|
+ writel(5, TMU_TDQ_IIFG_CFG);
|
|
+ writel(DDR_BUF_SIZE, TMU_BMU_BUF_SIZE);
|
|
+
|
|
+ writel(0x0, TMU_CTRL);
|
|
+
|
|
+ /* MEM init */
|
|
+ pr_info("%s: mem init\n", __func__);
|
|
+ writel(MEM_INIT, TMU_CTRL);
|
|
+
|
|
+ while (!(readl(TMU_CTRL) & MEM_INIT_DONE))
|
|
+ ;
|
|
+
|
|
+ /* LLM init */
|
|
+ pr_info("%s: lmem init\n", __func__);
|
|
+ writel(LLM_INIT, TMU_CTRL);
|
|
+
|
|
+ while (!(readl(TMU_CTRL) & LLM_INIT_DONE))
|
|
+ ;
|
|
+#endif
|
|
+ /* set up each queue for tail drop */
|
|
+ for (phyno = 0; phyno < 4; phyno++) {
|
|
+ if (phyno == 2)
|
|
+ continue;
|
|
+ for (q = 0; q < 16; q++) {
|
|
+ u32 qdepth;
|
|
+
|
|
+ writel((phyno << 8) | q, TMU_TEQ_CTRL);
|
|
+ writel(1 << 22, TMU_TEQ_QCFG); /*Enable tail drop */
|
|
+
|
|
+ if (phyno == 3)
|
|
+ qdepth = DEFAULT_TMU3_QDEPTH;
|
|
+ else
|
|
+ qdepth = (q == 0) ? DEFAULT_Q0_QDEPTH :
|
|
+ DEFAULT_MAX_QDEPTH;
|
|
+
|
|
+ /* LOG: 68855 */
|
|
+ /*
|
|
+ * The following is a workaround for the reordered
|
|
+ * packet and BMU2 buffer leakage issue.
|
|
+ */
|
|
+ if (CHIP_REVISION() == 0)
|
|
+ qdepth = 31;
|
|
+
|
|
+ writel(qdepth << 18, TMU_TEQ_HW_PROB_CFG2);
|
|
+ writel(qdepth >> 14, TMU_TEQ_HW_PROB_CFG3);
|
|
+ }
|
|
+ }
|
|
+
|
|
+#ifdef CFG_LRO
|
|
+ /* Set TMU-3 queue 5 (LRO) in no-drop mode */
|
|
+ writel((3 << 8) | TMU_QUEUE_LRO, TMU_TEQ_CTRL);
|
|
+ writel(0, TMU_TEQ_QCFG);
|
|
+#endif
|
|
+
|
|
+ writel(0x05, TMU_TEQ_DISABLE_DROPCHK);
|
|
+
|
|
+ writel(0x0, TMU_CTRL);
|
|
+}
|
|
+
|
|
+/* Enables TMU-PE cores.
|
|
+ * @param[in] pe_mask TMU PE mask
|
|
+ */
|
|
+void tmu_enable(u32 pe_mask)
|
|
+{
|
|
+ writel(readl(TMU_TX_CTRL) | (pe_mask & 0xF), TMU_TX_CTRL);
|
|
+}
|
|
+
|
|
+/* Disables TMU cores.
|
|
+ * @param[in] pe_mask TMU PE mask
|
|
+ */
|
|
+void tmu_disable(u32 pe_mask)
|
|
+{
|
|
+ writel(readl(TMU_TX_CTRL) & ~(pe_mask & 0xF), TMU_TX_CTRL);
|
|
+}
|
|
+
|
|
+/* This will return the tmu queue status
|
|
+ * @param[in] if_id gem interface id or TMU index
|
|
+ * @return returns the bit mask of busy queues, zero means all
|
|
+ * queues are empty
|
|
+ */
|
|
+u32 tmu_qstatus(u32 if_id)
|
|
+{
|
|
+ return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
|
|
+ offsetof(struct pe_status, tmu_qstatus), 4));
|
|
+}
|
|
+
|
|
+u32 tmu_pkts_processed(u32 if_id)
|
|
+{
|
|
+ return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
|
|
+ offsetof(struct pe_status, rx), 4));
|
|
+}
|
|
+
|
|
+/**************************** UTIL ***************************/
|
|
+
|
|
+/* Resets UTIL block.
|
|
+ */
|
|
+void util_reset(void)
|
|
+{
|
|
+ writel(CORE_SW_RESET, UTIL_TX_CTRL);
|
|
+}
|
|
+
|
|
+/* Initializes UTIL block.
|
|
+ * @param[in] cfg UTIL configuration
|
|
+ */
|
|
+void util_init(struct util_cfg *cfg)
|
|
+{
|
|
+ writel(cfg->pe_sys_clk_ratio, UTIL_PE_SYS_CLK_RATIO);
|
|
+}
|
|
+
|
|
+/* Enables UTIL-PE core.
|
|
+ *
|
|
+ */
|
|
+void util_enable(void)
|
|
+{
|
|
+ writel(CORE_ENABLE, UTIL_TX_CTRL);
|
|
+}
|
|
+
|
|
+/* Disables UTIL-PE core.
|
|
+ *
|
|
+ */
|
|
+void util_disable(void)
|
|
+{
|
|
+ writel(CORE_DISABLE, UTIL_TX_CTRL);
|
|
+}
|
|
+
|
|
+/**************************** HIF ***************************/
|
|
+/* Initializes HIF copy block.
|
|
+ *
|
|
+ */
|
|
+void hif_init(void)
|
|
+{
|
|
+ /*Initialize HIF registers*/
|
|
+ writel((HIF_RX_POLL_CTRL_CYCLE << 16) | HIF_TX_POLL_CTRL_CYCLE,
|
|
+ HIF_POLL_CTRL);
|
|
+}
|
|
+
|
|
+/* Enable hif tx DMA and interrupt
|
|
+ *
|
|
+ */
|
|
+void hif_tx_enable(void)
|
|
+{
|
|
+ writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
|
|
+ writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN),
|
|
+ HIF_INT_ENABLE);
|
|
+}
|
|
+
|
|
+/* Disable hif tx DMA and interrupt
|
|
+ *
|
|
+ */
|
|
+void hif_tx_disable(void)
|
|
+{
|
|
+ u32 hif_int;
|
|
+
|
|
+ writel(0, HIF_TX_CTRL);
|
|
+
|
|
+ hif_int = readl(HIF_INT_ENABLE);
|
|
+ hif_int &= HIF_TXPKT_INT_EN;
|
|
+ writel(hif_int, HIF_INT_ENABLE);
|
|
+}
|
|
+
|
|
+/* Enable hif rx DMA and interrupt
|
|
+ *
|
|
+ */
|
|
+void hif_rx_enable(void)
|
|
+{
|
|
+ hif_rx_dma_start();
|
|
+ writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN),
|
|
+ HIF_INT_ENABLE);
|
|
+}
|
|
+
|
|
+/* Disable hif rx DMA and interrupt
|
|
+ *
|
|
+ */
|
|
+void hif_rx_disable(void)
|
|
+{
|
|
+ u32 hif_int;
|
|
+
|
|
+ writel(0, HIF_RX_CTRL);
|
|
+
|
|
+ hif_int = readl(HIF_INT_ENABLE);
|
|
+ hif_int &= HIF_RXPKT_INT_EN;
|
|
+ writel(hif_int, HIF_INT_ENABLE);
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_hif.c
|
|
@@ -0,0 +1,1060 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/dma-mapping.h>
|
|
+#include <linux/dmapool.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/kthread.h>
|
|
+#include <linux/slab.h>
|
|
+
|
|
+#include <linux/io.h>
|
|
+#include <asm/irq.h>
|
|
+
|
|
+#include "pfe_mod.h"
|
|
+
|
|
+#define HIF_INT_MASK (HIF_INT | HIF_RXPKT_INT | HIF_TXPKT_INT)
|
|
+
|
|
+unsigned char napi_first_batch;
|
|
+
|
|
+static void pfe_tx_do_cleanup(unsigned long data);
|
|
+
|
|
+static int pfe_hif_alloc_descr(struct pfe_hif *hif)
|
|
+{
|
|
+ void *addr;
|
|
+ dma_addr_t dma_addr;
|
|
+ int err = 0;
|
|
+
|
|
+ pr_info("%s\n", __func__);
|
|
+ addr = dma_alloc_coherent(pfe->dev,
|
|
+ HIF_RX_DESC_NT * sizeof(struct hif_desc) +
|
|
+ HIF_TX_DESC_NT * sizeof(struct hif_desc),
|
|
+ &dma_addr, GFP_KERNEL);
|
|
+
|
|
+ if (!addr) {
|
|
+ pr_err("%s: Could not allocate buffer descriptors!\n"
|
|
+ , __func__);
|
|
+ err = -ENOMEM;
|
|
+ goto err0;
|
|
+ }
|
|
+
|
|
+ hif->descr_baseaddr_p = dma_addr;
|
|
+ hif->descr_baseaddr_v = addr;
|
|
+ hif->rx_ring_size = HIF_RX_DESC_NT;
|
|
+ hif->tx_ring_size = HIF_TX_DESC_NT;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err0:
|
|
+ return err;
|
|
+}
|
|
+
|
|
+#if defined(LS1012A_PFE_RESET_WA)
|
|
+static void pfe_hif_disable_rx_desc(struct pfe_hif *hif)
|
|
+{
|
|
+ int ii;
|
|
+ struct hif_desc *desc = hif->rx_base;
|
|
+
|
|
+ /*Mark all descriptors as LAST_BD */
|
|
+ for (ii = 0; ii < hif->rx_ring_size; ii++) {
|
|
+ desc->ctrl |= BD_CTRL_LAST_BD;
|
|
+ desc++;
|
|
+ }
|
|
+}
|
|
+
|
|
+struct class_rx_hdr_t {
|
|
+ u32 next_ptr; /* ptr to the start of the first DDR buffer */
|
|
+ u16 length; /* total packet length */
|
|
+ u16 phyno; /* input physical port number */
|
|
+ u32 status; /* gemac status bits */
|
|
+ u32 status2; /* reserved for software usage */
|
|
+};
|
|
+
|
|
+/* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
|
|
+ * except overflow
|
|
+ */
|
|
+#define STATUS_BAD_FRAME_ERR BIT(16)
|
|
+#define STATUS_LENGTH_ERR BIT(17)
|
|
+#define STATUS_CRC_ERR BIT(18)
|
|
+#define STATUS_TOO_SHORT_ERR BIT(19)
|
|
+#define STATUS_TOO_LONG_ERR BIT(20)
|
|
+#define STATUS_CODE_ERR BIT(21)
|
|
+#define STATUS_MC_HASH_MATCH BIT(22)
|
|
+#define STATUS_CUMULATIVE_ARC_HIT BIT(23)
|
|
+#define STATUS_UNICAST_HASH_MATCH BIT(24)
|
|
+#define STATUS_IP_CHECKSUM_CORRECT BIT(25)
|
|
+#define STATUS_TCP_CHECKSUM_CORRECT BIT(26)
|
|
+#define STATUS_UDP_CHECKSUM_CORRECT BIT(27)
|
|
+#define STATUS_OVERFLOW_ERR BIT(28) /* GPI error */
|
|
+#define MIN_PKT_SIZE 64
|
|
+
|
|
+static inline void copy_to_lmem(u32 *dst, u32 *src, int len)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < len; i += sizeof(u32)) {
|
|
+ *dst = htonl(*src);
|
|
+ dst++; src++;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void send_dummy_pkt_to_hif(void)
|
|
+{
|
|
+ void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
|
|
+ u32 physaddr;
|
|
+ struct class_rx_hdr_t local_hdr;
|
|
+ static u32 dummy_pkt[] = {
|
|
+ 0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
|
|
+ 0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
|
|
+ 0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
|
|
+ 0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
|
|
+
|
|
+ ddr_ptr = (void *)((u64)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL));
|
|
+ if (!ddr_ptr)
|
|
+ return;
|
|
+
|
|
+ lmem_ptr = (void *)((u64)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL));
|
|
+ if (!lmem_ptr)
|
|
+ return;
|
|
+
|
|
+ pr_info("Sending a dummy pkt to HIF %p %p\n", ddr_ptr, lmem_ptr);
|
|
+ physaddr = (u32)DDR_VIRT_TO_PFE(ddr_ptr);
|
|
+
|
|
+ lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long int)lmem_ptr);
|
|
+
|
|
+ local_hdr.phyno = htons(0); /* RX_PHY_0 */
|
|
+ local_hdr.length = htons(MIN_PKT_SIZE);
|
|
+
|
|
+ local_hdr.next_ptr = htonl((u32)physaddr);
|
|
+ /*Mark checksum is correct */
|
|
+ local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
|
|
+ STATUS_UDP_CHECKSUM_CORRECT |
|
|
+ STATUS_TCP_CHECKSUM_CORRECT |
|
|
+ STATUS_UNICAST_HASH_MATCH |
|
|
+ STATUS_CUMULATIVE_ARC_HIT));
|
|
+ copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
|
|
+ sizeof(local_hdr));
|
|
+
|
|
+ copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
|
|
+ 0x40);
|
|
+
|
|
+ writel((unsigned long int)lmem_ptr, CLASS_INQ_PKTPTR);
|
|
+}
|
|
+
|
|
+void pfe_hif_rx_idle(struct pfe_hif *hif)
|
|
+{
|
|
+ int hif_stop_loop = 10;
|
|
+ u32 rx_status;
|
|
+
|
|
+ pfe_hif_disable_rx_desc(hif);
|
|
+ pr_info("Bringing hif to idle state...");
|
|
+ writel(0, HIF_INT_ENABLE);
|
|
+ /*If HIF Rx BDP is busy send a dummy packet */
|
|
+ do {
|
|
+ rx_status = readl(HIF_RX_STATUS);
|
|
+ if (rx_status & BDP_CSR_RX_DMA_ACTV)
|
|
+ send_dummy_pkt_to_hif();
|
|
+
|
|
+ usleep_range(100, 150);
|
|
+ } while (--hif_stop_loop);
|
|
+
|
|
+ if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
|
|
+ pr_info("Failed\n");
|
|
+ else
|
|
+ pr_info("Done\n");
|
|
+}
|
|
+#endif
|
|
+
|
|
+static void pfe_hif_free_descr(struct pfe_hif *hif)
|
|
+{
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ dma_free_coherent(pfe->dev,
|
|
+ hif->rx_ring_size * sizeof(struct hif_desc) +
|
|
+ hif->tx_ring_size * sizeof(struct hif_desc),
|
|
+ hif->descr_baseaddr_v, hif->descr_baseaddr_p);
|
|
+}
|
|
+
|
|
+void pfe_hif_desc_dump(struct pfe_hif *hif)
|
|
+{
|
|
+ struct hif_desc *desc;
|
|
+ unsigned long desc_p;
|
|
+ int ii = 0;
|
|
+
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ desc = hif->rx_base;
|
|
+ desc_p = (u32)((u64)desc - (u64)hif->descr_baseaddr_v +
|
|
+ hif->descr_baseaddr_p);
|
|
+
|
|
+ pr_info("HIF Rx desc base %p physical %x\n", desc, (u32)desc_p);
|
|
+ for (ii = 0; ii < hif->rx_ring_size; ii++) {
|
|
+ pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
|
|
+ readl(&desc->status), readl(&desc->ctrl),
|
|
+ readl(&desc->data), readl(&desc->next));
|
|
+ desc++;
|
|
+ }
|
|
+
|
|
+ desc = hif->tx_base;
|
|
+ desc_p = ((u64)desc - (u64)hif->descr_baseaddr_v +
|
|
+ hif->descr_baseaddr_p);
|
|
+
|
|
+ pr_info("HIF Tx desc base %p physical %x\n", desc, (u32)desc_p);
|
|
+ for (ii = 0; ii < hif->tx_ring_size; ii++) {
|
|
+ pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
|
|
+ readl(&desc->status), readl(&desc->ctrl),
|
|
+ readl(&desc->data), readl(&desc->next));
|
|
+ desc++;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* pfe_hif_release_buffers */
|
|
+static void pfe_hif_release_buffers(struct pfe_hif *hif)
|
|
+{
|
|
+ struct hif_desc *desc;
|
|
+ int i = 0;
|
|
+
|
|
+ hif->rx_base = hif->descr_baseaddr_v;
|
|
+
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ /*Free Rx buffers */
|
|
+ desc = hif->rx_base;
|
|
+ for (i = 0; i < hif->rx_ring_size; i++) {
|
|
+ if (readl(&desc->data)) {
|
|
+ if ((i < hif->shm->rx_buf_pool_cnt) &&
|
|
+ (!hif->shm->rx_buf_pool[i])) {
|
|
+ /*
|
|
+ * dma_unmap_single(hif->dev, desc->data,
|
|
+ * hif->rx_buf_len[i], DMA_FROM_DEVICE);
|
|
+ */
|
|
+ dma_unmap_single(hif->dev,
|
|
+ DDR_PFE_TO_PHYS(
|
|
+ readl(&desc->data)),
|
|
+ hif->rx_buf_len[i],
|
|
+ DMA_FROM_DEVICE);
|
|
+ hif->shm->rx_buf_pool[i] = hif->rx_buf_addr[i];
|
|
+ } else {
|
|
+ pr_err("%s: buffer pool already full\n"
|
|
+ , __func__);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ writel(0, &desc->data);
|
|
+ writel(0, &desc->status);
|
|
+ writel(0, &desc->ctrl);
|
|
+ desc++;
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_hif_init_buffers
|
|
+ * This function initializes the HIF Rx/Tx ring descriptors and
|
|
+ * initialize Rx queue with buffers.
|
|
+ */
|
|
+static int pfe_hif_init_buffers(struct pfe_hif *hif)
|
|
+{
|
|
+ struct hif_desc *desc, *first_desc_p;
|
|
+ u32 data;
|
|
+ int i = 0;
|
|
+
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ /* Check enough Rx buffers available in the shared memory */
|
|
+ if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ hif->rx_base = hif->descr_baseaddr_v;
|
|
+ memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
|
|
+
|
|
+ /*Initialize Rx descriptors */
|
|
+ desc = hif->rx_base;
|
|
+ first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
|
|
+
|
|
+ for (i = 0; i < hif->rx_ring_size; i++) {
|
|
+ /* Initialize Rx buffers from the shared memory */
|
|
+
|
|
+ data = (u32)dma_map_single(hif->dev, hif->shm->rx_buf_pool[i],
|
|
+ pfe_pkt_size, DMA_FROM_DEVICE);
|
|
+ hif->rx_buf_addr[i] = hif->shm->rx_buf_pool[i];
|
|
+ hif->rx_buf_len[i] = pfe_pkt_size;
|
|
+ hif->shm->rx_buf_pool[i] = NULL;
|
|
+
|
|
+ if (likely(dma_mapping_error(hif->dev, data) == 0)) {
|
|
+ writel(DDR_PHYS_TO_PFE(data), &desc->data);
|
|
+ } else {
|
|
+ pr_err("%s : low on mem\n", __func__);
|
|
+
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ writel(0, &desc->status);
|
|
+
|
|
+ /*
|
|
+ * Ensure everything else is written to DDR before
|
|
+ * writing bd->ctrl
|
|
+ */
|
|
+ wmb();
|
|
+
|
|
+ writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
|
|
+ | BD_CTRL_DIR | BD_CTRL_DESC_EN
|
|
+ | BD_BUF_LEN(pfe_pkt_size)), &desc->ctrl);
|
|
+
|
|
+ /* Chain descriptors */
|
|
+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
|
|
+ desc++;
|
|
+ }
|
|
+
|
|
+ /* Overwrite last descriptor to chain it to first one*/
|
|
+ desc--;
|
|
+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
|
|
+
|
|
+ hif->rxtoclean_index = 0;
|
|
+
|
|
+ /*Initialize Rx buffer descriptor ring base address */
|
|
+ writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
|
|
+
|
|
+ hif->tx_base = hif->rx_base + hif->rx_ring_size;
|
|
+ first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
|
|
+ hif->rx_ring_size;
|
|
+ memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
|
|
+
|
|
+ /*Initialize tx descriptors */
|
|
+ desc = hif->tx_base;
|
|
+
|
|
+ for (i = 0; i < hif->tx_ring_size; i++) {
|
|
+ /* Chain descriptors */
|
|
+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
|
|
+ writel(0, &desc->ctrl);
|
|
+ desc++;
|
|
+ }
|
|
+
|
|
+ /* Overwrite last descriptor to chain it to first one */
|
|
+ desc--;
|
|
+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
|
|
+ hif->txavail = hif->tx_ring_size;
|
|
+ hif->txtosend = 0;
|
|
+ hif->txtoclean = 0;
|
|
+ hif->txtoflush = 0;
|
|
+
|
|
+ /*Initialize Tx buffer descriptor ring base address */
|
|
+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err:
|
|
+ pfe_hif_release_buffers(hif);
|
|
+ return -ENOMEM;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_hif_client_register
|
|
+ *
|
|
+ * This function used to register a client driver with the HIF driver.
|
|
+ *
|
|
+ * Return value:
|
|
+ * 0 - on Successful registration
|
|
+ */
|
|
+static int pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
|
|
+ struct hif_client_shm *client_shm)
|
|
+{
|
|
+ struct hif_client *client = &hif->client[client_id];
|
|
+ u32 i, cnt;
|
|
+ struct rx_queue_desc *rx_qbase;
|
|
+ struct tx_queue_desc *tx_qbase;
|
|
+ struct hif_rx_queue *rx_queue;
|
|
+ struct hif_tx_queue *tx_queue;
|
|
+ int err = 0;
|
|
+
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ spin_lock_bh(&hif->tx_lock);
|
|
+
|
|
+ if (test_bit(client_id, &hif->shm->g_client_status[0])) {
|
|
+ pr_err("%s: client %d already registered\n",
|
|
+ __func__, client_id);
|
|
+ err = -1;
|
|
+ goto unlock;
|
|
+ }
|
|
+
|
|
+ memset(client, 0, sizeof(struct hif_client));
|
|
+
|
|
+ /* Initialize client Rx queues baseaddr, size */
|
|
+
|
|
+ cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
|
|
+ /* Check if client is requesting for more queues than supported */
|
|
+ if (cnt > HIF_CLIENT_QUEUES_MAX)
|
|
+ cnt = HIF_CLIENT_QUEUES_MAX;
|
|
+
|
|
+ client->rx_qn = cnt;
|
|
+ rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
|
|
+ for (i = 0; i < cnt; i++) {
|
|
+ rx_queue = &client->rx_q[i];
|
|
+ rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
|
|
+ rx_queue->size = client_shm->rx_qsize;
|
|
+ rx_queue->write_idx = 0;
|
|
+ }
|
|
+
|
|
+ /* Initialize client Tx queues baseaddr, size */
|
|
+ cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
|
|
+
|
|
+ /* Check if client is requesting for more queues than supported */
|
|
+ if (cnt > HIF_CLIENT_QUEUES_MAX)
|
|
+ cnt = HIF_CLIENT_QUEUES_MAX;
|
|
+
|
|
+ client->tx_qn = cnt;
|
|
+ tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
|
|
+ for (i = 0; i < cnt; i++) {
|
|
+ tx_queue = &client->tx_q[i];
|
|
+ tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
|
|
+ tx_queue->size = client_shm->tx_qsize;
|
|
+ tx_queue->ack_idx = 0;
|
|
+ }
|
|
+
|
|
+ set_bit(client_id, &hif->shm->g_client_status[0]);
|
|
+
|
|
+unlock:
|
|
+ spin_unlock_bh(&hif->tx_lock);
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_hif_client_unregister
|
|
+ *
|
|
+ * This function used to unregister a client from the HIF driver.
|
|
+ *
|
|
+ */
|
|
+static void pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
|
|
+{
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ /*
|
|
+ * Mark client as no longer available (which prevents further packet
|
|
+ * receive for this client)
|
|
+ */
|
|
+ spin_lock_bh(&hif->tx_lock);
|
|
+
|
|
+ if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
|
|
+ pr_err("%s: client %d not registered\n", __func__,
|
|
+ client_id);
|
|
+
|
|
+ spin_unlock_bh(&hif->tx_lock);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ clear_bit(client_id, &hif->shm->g_client_status[0]);
|
|
+
|
|
+ spin_unlock_bh(&hif->tx_lock);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * client_put_rxpacket-
|
|
+ * This functions puts the Rx pkt in the given client Rx queue.
|
|
+ * It actually swap the Rx pkt in the client Rx descriptor buffer
|
|
+ * and returns the free buffer from it.
|
|
+ *
|
|
+ * If the function returns NULL means client Rx queue is full and
|
|
+ * packet couldn't send to client queue.
|
|
+ */
|
|
+static void *client_put_rxpacket(struct hif_rx_queue *queue, void *pkt, u32 len,
|
|
+ u32 flags, u32 client_ctrl, u32 *rem_len)
|
|
+{
|
|
+ void *free_pkt = NULL;
|
|
+ struct rx_queue_desc *desc = queue->base + queue->write_idx;
|
|
+
|
|
+ if (readl(&desc->ctrl) & CL_DESC_OWN) {
|
|
+ if (page_mode) {
|
|
+ int rem_page_size = PAGE_SIZE -
|
|
+ PRESENT_OFST_IN_PAGE(pkt);
|
|
+ int cur_pkt_size = ROUND_MIN_RX_SIZE(len +
|
|
+ pfe_pkt_headroom);
|
|
+ *rem_len = (rem_page_size - cur_pkt_size);
|
|
+ if (*rem_len) {
|
|
+ free_pkt = pkt + cur_pkt_size;
|
|
+ get_page(virt_to_page(free_pkt));
|
|
+ } else {
|
|
+ free_pkt = (void
|
|
+ *)__get_free_page(GFP_ATOMIC | GFP_DMA_PFE);
|
|
+ *rem_len = pfe_pkt_size;
|
|
+ }
|
|
+ } else {
|
|
+ free_pkt = kmalloc(PFE_BUF_SIZE, GFP_ATOMIC |
|
|
+ GFP_DMA_PFE);
|
|
+ *rem_len = PFE_BUF_SIZE - pfe_pkt_headroom;
|
|
+ }
|
|
+
|
|
+ if (free_pkt) {
|
|
+ desc->data = pkt;
|
|
+ desc->client_ctrl = client_ctrl;
|
|
+ /*
|
|
+ * Ensure everything else is written to DDR before
|
|
+ * writing bd->ctrl
|
|
+ */
|
|
+ smp_wmb();
|
|
+ writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
|
|
+ queue->write_idx = (queue->write_idx + 1)
|
|
+ & (queue->size - 1);
|
|
+
|
|
+ free_pkt += pfe_pkt_headroom;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return free_pkt;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_hif_rx_process-
|
|
+ * This function does pfe hif rx queue processing.
|
|
+ * Dequeue packet from Rx queue and send it to corresponding client queue
|
|
+ */
|
|
+static int pfe_hif_rx_process(struct pfe_hif *hif, int budget)
|
|
+{
|
|
+ struct hif_desc *desc;
|
|
+ struct hif_hdr *pkt_hdr;
|
|
+ struct __hif_hdr hif_hdr;
|
|
+ void *free_buf;
|
|
+ int rtc, len, rx_processed = 0;
|
|
+ struct __hif_desc local_desc;
|
|
+ int flags;
|
|
+ unsigned int desc_p;
|
|
+ unsigned int buf_size = 0;
|
|
+
|
|
+ spin_lock_bh(&hif->lock);
|
|
+
|
|
+ rtc = hif->rxtoclean_index;
|
|
+
|
|
+ while (rx_processed < budget) {
|
|
+ desc = hif->rx_base + rtc;
|
|
+
|
|
+ __memcpy12(&local_desc, desc);
|
|
+
|
|
+ /* ACK pending Rx interrupt */
|
|
+ if (local_desc.ctrl & BD_CTRL_DESC_EN) {
|
|
+ writel(HIF_INT | HIF_RXPKT_INT, HIF_INT_SRC);
|
|
+
|
|
+ if (rx_processed == 0) {
|
|
+ if (napi_first_batch == 1) {
|
|
+ desc_p = hif->descr_baseaddr_p +
|
|
+ ((unsigned long int)(desc) -
|
|
+ (unsigned long
|
|
+ int)hif->descr_baseaddr_v);
|
|
+ napi_first_batch = 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ __memcpy12(&local_desc, desc);
|
|
+
|
|
+ if (local_desc.ctrl & BD_CTRL_DESC_EN)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ napi_first_batch = 0;
|
|
+
|
|
+#ifdef HIF_NAPI_STATS
|
|
+ hif->napi_counters[NAPI_DESC_COUNT]++;
|
|
+#endif
|
|
+ len = BD_BUF_LEN(local_desc.ctrl);
|
|
+ /*
|
|
+ * dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
|
|
+ * hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
|
|
+ */
|
|
+ dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
|
|
+ hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
|
|
+
|
|
+ pkt_hdr = (struct hif_hdr *)hif->rx_buf_addr[rtc];
|
|
+
|
|
+ /* Track last HIF header received */
|
|
+ if (!hif->started) {
|
|
+ hif->started = 1;
|
|
+
|
|
+ __memcpy8(&hif_hdr, pkt_hdr);
|
|
+
|
|
+ hif->qno = hif_hdr.hdr.q_num;
|
|
+ hif->client_id = hif_hdr.hdr.client_id;
|
|
+ hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
|
|
+ hif_hdr.hdr.client_ctrl;
|
|
+ flags = CL_DESC_FIRST;
|
|
+
|
|
+ } else {
|
|
+ flags = 0;
|
|
+ }
|
|
+
|
|
+ if (local_desc.ctrl & BD_CTRL_LIFM)
|
|
+ flags |= CL_DESC_LAST;
|
|
+
|
|
+ /* Check for valid client id and still registered */
|
|
+ if ((hif->client_id >= HIF_CLIENTS_MAX) ||
|
|
+ !(test_bit(hif->client_id,
|
|
+ &hif->shm->g_client_status[0]))) {
|
|
+ printk_ratelimited("%s: packet with invalid client id %d q_num %d\n",
|
|
+ __func__,
|
|
+ hif->client_id,
|
|
+ hif->qno);
|
|
+
|
|
+ free_buf = pkt_hdr;
|
|
+
|
|
+ goto pkt_drop;
|
|
+ }
|
|
+
|
|
+ /* Check to valid queue number */
|
|
+ if (hif->client[hif->client_id].rx_qn <= hif->qno) {
|
|
+ pr_info("%s: packet with invalid queue: %d\n"
|
|
+ , __func__, hif->qno);
|
|
+ hif->qno = 0;
|
|
+ }
|
|
+
|
|
+ free_buf =
|
|
+ client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
|
|
+ (void *)pkt_hdr, len, flags,
|
|
+ hif->client_ctrl, &buf_size);
|
|
+
|
|
+ hif_lib_indicate_client(hif->client_id, EVENT_RX_PKT_IND,
|
|
+ hif->qno);
|
|
+
|
|
+ if (unlikely(!free_buf)) {
|
|
+#ifdef HIF_NAPI_STATS
|
|
+ hif->napi_counters[NAPI_CLIENT_FULL_COUNT]++;
|
|
+#endif
|
|
+ /*
|
|
+ * If we want to keep in polling mode to retry later,
|
|
+ * we need to tell napi that we consumed
|
|
+ * the full budget or we will hit a livelock scenario.
|
|
+ * The core code keeps this napi instance
|
|
+ * at the head of the list and none of the other
|
|
+ * instances get to run
|
|
+ */
|
|
+ rx_processed = budget;
|
|
+
|
|
+ if (flags & CL_DESC_FIRST)
|
|
+ hif->started = 0;
|
|
+
|
|
+ break;
|
|
+ }
|
|
+
|
|
+pkt_drop:
|
|
+ /*Fill free buffer in the descriptor */
|
|
+ hif->rx_buf_addr[rtc] = free_buf;
|
|
+ hif->rx_buf_len[rtc] = min(pfe_pkt_size, buf_size);
|
|
+ writel((DDR_PHYS_TO_PFE
|
|
+ ((u32)dma_map_single(hif->dev,
|
|
+ free_buf, hif->rx_buf_len[rtc], DMA_FROM_DEVICE))),
|
|
+ &desc->data);
|
|
+ /*
|
|
+ * Ensure everything else is written to DDR before
|
|
+ * writing bd->ctrl
|
|
+ */
|
|
+ wmb();
|
|
+ writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
|
|
+ BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
|
|
+ &desc->ctrl);
|
|
+
|
|
+ rtc = (rtc + 1) & (hif->rx_ring_size - 1);
|
|
+
|
|
+ if (local_desc.ctrl & BD_CTRL_LIFM) {
|
|
+ if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED)) {
|
|
+ rx_processed++;
|
|
+
|
|
+#ifdef HIF_NAPI_STATS
|
|
+ hif->napi_counters[NAPI_PACKET_COUNT]++;
|
|
+#endif
|
|
+ }
|
|
+ hif->started = 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ hif->rxtoclean_index = rtc;
|
|
+ spin_unlock_bh(&hif->lock);
|
|
+
|
|
+ /* we made some progress, re-start rx dma in case it stopped */
|
|
+ hif_rx_dma_start();
|
|
+
|
|
+ return rx_processed;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * client_ack_txpacket-
|
|
+ * This function ack the Tx packet in the give client Tx queue by resetting
|
|
+ * ownership bit in the descriptor.
|
|
+ */
|
|
+static int client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
|
|
+ unsigned int q_no)
|
|
+{
|
|
+ struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
|
|
+ struct tx_queue_desc *desc = queue->base + queue->ack_idx;
|
|
+
|
|
+ if (readl(&desc->ctrl) & CL_DESC_OWN) {
|
|
+ writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
|
|
+ queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+ } else {
|
|
+ /*This should not happen */
|
|
+ pr_err("%s: %d %d %d %d %d %p %d\n", __func__,
|
|
+ hif->txtosend, hif->txtoclean, hif->txavail,
|
|
+ client_id, q_no, queue, queue->ack_idx);
|
|
+ WARN(1, "%s: doesn't own this descriptor", __func__);
|
|
+ return 1;
|
|
+ }
|
|
+}
|
|
+
|
|
+void __hif_tx_done_process(struct pfe_hif *hif, int count)
|
|
+{
|
|
+ struct hif_desc *desc;
|
|
+ struct hif_desc_sw *desc_sw;
|
|
+ int ttc, tx_avl;
|
|
+ int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
|
|
+
|
|
+ ttc = hif->txtoclean;
|
|
+ tx_avl = hif->txavail;
|
|
+
|
|
+ while ((tx_avl < hif->tx_ring_size) && count--) {
|
|
+ desc = hif->tx_base + ttc;
|
|
+
|
|
+ if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
|
|
+ break;
|
|
+
|
|
+ desc_sw = &hif->tx_sw_queue[ttc];
|
|
+
|
|
+ if (desc_sw->data) {
|
|
+ /*
|
|
+ * dmap_unmap_single(hif->dev, desc_sw->data,
|
|
+ * desc_sw->len, DMA_TO_DEVICE);
|
|
+ */
|
|
+ dma_unmap_single(hif->dev, desc_sw->data,
|
|
+ desc_sw->len, DMA_TO_DEVICE);
|
|
+ }
|
|
+
|
|
+ if (desc_sw->client_id > HIF_CLIENTS_MAX)
|
|
+ pr_err("Invalid cl id %d\n", desc_sw->client_id);
|
|
+
|
|
+ pkts_done[desc_sw->client_id]++;
|
|
+
|
|
+ client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
|
|
+
|
|
+ ttc = (ttc + 1) & (hif->tx_ring_size - 1);
|
|
+ tx_avl++;
|
|
+ }
|
|
+
|
|
+ if (pkts_done[0])
|
|
+ hif_lib_indicate_client(0, EVENT_TXDONE_IND, 0);
|
|
+ if (pkts_done[1])
|
|
+ hif_lib_indicate_client(1, EVENT_TXDONE_IND, 0);
|
|
+
|
|
+ hif->txtoclean = ttc;
|
|
+ hif->txavail = tx_avl;
|
|
+
|
|
+ if (!count) {
|
|
+ tasklet_schedule(&hif->tx_cleanup_tasklet);
|
|
+ } else {
|
|
+ /*Enable Tx done interrupt */
|
|
+ writel(readl_relaxed(HIF_INT_ENABLE) | HIF_TXPKT_INT,
|
|
+ HIF_INT_ENABLE);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void pfe_tx_do_cleanup(unsigned long data)
|
|
+{
|
|
+ struct pfe_hif *hif = (struct pfe_hif *)data;
|
|
+
|
|
+ writel(HIF_INT | HIF_TXPKT_INT, HIF_INT_SRC);
|
|
+
|
|
+ hif_tx_done_process(hif, 64);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * __hif_xmit_pkt -
|
|
+ * This function puts one packet in the HIF Tx queue
|
|
+ */
|
|
+void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
|
|
+ q_no, void *data, u32 len, unsigned int flags)
|
|
+{
|
|
+ struct hif_desc *desc;
|
|
+ struct hif_desc_sw *desc_sw;
|
|
+
|
|
+ desc = hif->tx_base + hif->txtosend;
|
|
+ desc_sw = &hif->tx_sw_queue[hif->txtosend];
|
|
+
|
|
+ desc_sw->len = len;
|
|
+ desc_sw->client_id = client_id;
|
|
+ desc_sw->q_no = q_no;
|
|
+ desc_sw->flags = flags;
|
|
+
|
|
+ if (flags & HIF_DONT_DMA_MAP) {
|
|
+ desc_sw->data = 0;
|
|
+ writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
|
|
+ } else {
|
|
+ desc_sw->data = dma_map_single(hif->dev, data, len,
|
|
+ DMA_TO_DEVICE);
|
|
+ writel((u32)DDR_PHYS_TO_PFE(desc_sw->data), &desc->data);
|
|
+ }
|
|
+
|
|
+ hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
|
|
+ hif->txavail--;
|
|
+
|
|
+ if ((!((flags & HIF_DATA_VALID) && (flags &
|
|
+ HIF_LAST_BUFFER))))
|
|
+ goto skip_tx;
|
|
+
|
|
+ /*
|
|
+ * Ensure everything else is written to DDR before
|
|
+ * writing bd->ctrl
|
|
+ */
|
|
+ wmb();
|
|
+
|
|
+ do {
|
|
+ desc_sw = &hif->tx_sw_queue[hif->txtoflush];
|
|
+ desc = hif->tx_base + hif->txtoflush;
|
|
+
|
|
+ if (desc_sw->flags & HIF_LAST_BUFFER) {
|
|
+ writel((BD_CTRL_LIFM |
|
|
+ BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
|
|
+ | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
|
|
+ BD_CTRL_PKT_INT_EN | BD_BUF_LEN(desc_sw->len)),
|
|
+ &desc->ctrl);
|
|
+ } else {
|
|
+ writel((BD_CTRL_DESC_EN |
|
|
+ BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
|
|
+ }
|
|
+ hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
|
|
+ }
|
|
+ while (hif->txtoflush != hif->txtosend)
|
|
+ ;
|
|
+
|
|
+skip_tx:
|
|
+ return;
|
|
+}
|
|
+
|
|
+static irqreturn_t wol_isr(int irq, void *dev_id)
|
|
+{
|
|
+ pr_info("WoL\n");
|
|
+ gemac_set_wol(EMAC1_BASE_ADDR, 0);
|
|
+ gemac_set_wol(EMAC2_BASE_ADDR, 0);
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * hif_isr-
|
|
+ * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
|
|
+ */
|
|
+static irqreturn_t hif_isr(int irq, void *dev_id)
|
|
+{
|
|
+ struct pfe_hif *hif = (struct pfe_hif *)dev_id;
|
|
+ int int_status;
|
|
+ int int_enable_mask;
|
|
+
|
|
+ /*Read hif interrupt source register */
|
|
+ int_status = readl_relaxed(HIF_INT_SRC);
|
|
+ int_enable_mask = readl_relaxed(HIF_INT_ENABLE);
|
|
+
|
|
+ if ((int_status & HIF_INT) == 0)
|
|
+ return IRQ_NONE;
|
|
+
|
|
+ int_status &= ~(HIF_INT);
|
|
+
|
|
+ if (int_status & HIF_RXPKT_INT) {
|
|
+ int_status &= ~(HIF_RXPKT_INT);
|
|
+ int_enable_mask &= ~(HIF_RXPKT_INT);
|
|
+
|
|
+ napi_first_batch = 1;
|
|
+
|
|
+ if (napi_schedule_prep(&hif->napi)) {
|
|
+#ifdef HIF_NAPI_STATS
|
|
+ hif->napi_counters[NAPI_SCHED_COUNT]++;
|
|
+#endif
|
|
+ __napi_schedule(&hif->napi);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (int_status & HIF_TXPKT_INT) {
|
|
+ int_status &= ~(HIF_TXPKT_INT);
|
|
+ int_enable_mask &= ~(HIF_TXPKT_INT);
|
|
+ /*Schedule tx cleanup tassklet */
|
|
+ tasklet_schedule(&hif->tx_cleanup_tasklet);
|
|
+ }
|
|
+
|
|
+ /*Disable interrupts, they will be enabled after they are serviced */
|
|
+ writel_relaxed(int_enable_mask, HIF_INT_ENABLE);
|
|
+
|
|
+ if (int_status) {
|
|
+ pr_info("%s : Invalid interrupt : %d\n", __func__,
|
|
+ int_status);
|
|
+ writel(int_status, HIF_INT_SRC);
|
|
+ }
|
|
+
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2)
|
|
+{
|
|
+ unsigned int client_id = data1;
|
|
+
|
|
+ if (client_id >= HIF_CLIENTS_MAX) {
|
|
+ pr_err("%s: client id %d out of bounds\n", __func__,
|
|
+ client_id);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ switch (req) {
|
|
+ case REQUEST_CL_REGISTER:
|
|
+ /* Request for register a client */
|
|
+ pr_info("%s: register client_id %d\n",
|
|
+ __func__, client_id);
|
|
+ pfe_hif_client_register(hif, client_id, (struct
|
|
+ hif_client_shm *)&hif->shm->client[client_id]);
|
|
+ break;
|
|
+
|
|
+ case REQUEST_CL_UNREGISTER:
|
|
+ pr_info("%s: unregister client_id %d\n",
|
|
+ __func__, client_id);
|
|
+
|
|
+ /* Request for unregister a client */
|
|
+ pfe_hif_client_unregister(hif, client_id);
|
|
+
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ pr_err("%s: unsupported request %d\n",
|
|
+ __func__, req);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Process client Tx queues
|
|
+ * Currently we don't have checking for tx pending
|
|
+ */
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_hif_rx_poll
|
|
+ * This function is NAPI poll function to process HIF Rx queue.
|
|
+ */
|
|
+static int pfe_hif_rx_poll(struct napi_struct *napi, int budget)
|
|
+{
|
|
+ struct pfe_hif *hif = container_of(napi, struct pfe_hif, napi);
|
|
+ int work_done;
|
|
+
|
|
+#ifdef HIF_NAPI_STATS
|
|
+ hif->napi_counters[NAPI_POLL_COUNT]++;
|
|
+#endif
|
|
+
|
|
+ work_done = pfe_hif_rx_process(hif, budget);
|
|
+
|
|
+ if (work_done < budget) {
|
|
+ napi_complete(napi);
|
|
+ writel(readl_relaxed(HIF_INT_ENABLE) | HIF_RXPKT_INT,
|
|
+ HIF_INT_ENABLE);
|
|
+ }
|
|
+#ifdef HIF_NAPI_STATS
|
|
+ else
|
|
+ hif->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
|
|
+#endif
|
|
+
|
|
+ return work_done;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_hif_init
|
|
+ * This function initializes the baseaddresses and irq, etc.
|
|
+ */
|
|
+int pfe_hif_init(struct pfe *pfe)
|
|
+{
|
|
+ struct pfe_hif *hif = &pfe->hif;
|
|
+ int err;
|
|
+
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ hif->dev = pfe->dev;
|
|
+ hif->irq = pfe->hif_irq;
|
|
+
|
|
+ err = pfe_hif_alloc_descr(hif);
|
|
+ if (err)
|
|
+ goto err0;
|
|
+
|
|
+ if (pfe_hif_init_buffers(hif)) {
|
|
+ pr_err("%s: Could not initialize buffer descriptors\n"
|
|
+ , __func__);
|
|
+ err = -ENOMEM;
|
|
+ goto err1;
|
|
+ }
|
|
+
|
|
+ /* Initialize NAPI for Rx processing */
|
|
+ init_dummy_netdev(&hif->dummy_dev);
|
|
+ netif_napi_add(&hif->dummy_dev, &hif->napi, pfe_hif_rx_poll,
|
|
+ HIF_RX_POLL_WEIGHT);
|
|
+ napi_enable(&hif->napi);
|
|
+
|
|
+ spin_lock_init(&hif->tx_lock);
|
|
+ spin_lock_init(&hif->lock);
|
|
+
|
|
+ hif_init();
|
|
+ hif_rx_enable();
|
|
+ hif_tx_enable();
|
|
+
|
|
+ /* Disable tx done interrupt */
|
|
+ writel(HIF_INT_MASK, HIF_INT_ENABLE);
|
|
+
|
|
+ gpi_enable(HGPI_BASE_ADDR);
|
|
+
|
|
+ err = request_irq(hif->irq, hif_isr, 0, "pfe_hif", hif);
|
|
+ if (err) {
|
|
+ pr_err("%s: failed to get the hif IRQ = %d\n",
|
|
+ __func__, hif->irq);
|
|
+ goto err1;
|
|
+ }
|
|
+
|
|
+ err = request_irq(pfe->wol_irq, wol_isr, 0, "pfe_wol", pfe);
|
|
+ if (err) {
|
|
+ pr_err("%s: failed to get the wol IRQ = %d\n",
|
|
+ __func__, pfe->wol_irq);
|
|
+ goto err1;
|
|
+ }
|
|
+
|
|
+ tasklet_init(&hif->tx_cleanup_tasklet,
|
|
+ (void(*)(unsigned long))pfe_tx_do_cleanup,
|
|
+ (unsigned long)hif);
|
|
+
|
|
+ return 0;
|
|
+err1:
|
|
+ pfe_hif_free_descr(hif);
|
|
+err0:
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/* pfe_hif_exit- */
|
|
+void pfe_hif_exit(struct pfe *pfe)
|
|
+{
|
|
+ struct pfe_hif *hif = &pfe->hif;
|
|
+
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ tasklet_kill(&hif->tx_cleanup_tasklet);
|
|
+
|
|
+ spin_lock_bh(&hif->lock);
|
|
+ hif->shm->g_client_status[0] = 0;
|
|
+ /* Make sure all clients are disabled*/
|
|
+ hif->shm->g_client_status[1] = 0;
|
|
+
|
|
+ spin_unlock_bh(&hif->lock);
|
|
+
|
|
+ /*Disable Rx/Tx */
|
|
+ gpi_disable(HGPI_BASE_ADDR);
|
|
+ hif_rx_disable();
|
|
+ hif_tx_disable();
|
|
+
|
|
+ napi_disable(&hif->napi);
|
|
+ netif_napi_del(&hif->napi);
|
|
+
|
|
+ free_irq(pfe->wol_irq, pfe);
|
|
+ free_irq(hif->irq, hif);
|
|
+
|
|
+ pfe_hif_release_buffers(hif);
|
|
+ pfe_hif_free_descr(hif);
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_hif.h
|
|
@@ -0,0 +1,200 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#ifndef _PFE_HIF_H_
|
|
+#define _PFE_HIF_H_
|
|
+
|
|
+#include <linux/netdevice.h>
|
|
+#include <linux/interrupt.h>
|
|
+
|
|
+#define HIF_NAPI_STATS
|
|
+
|
|
+#define HIF_CLIENT_QUEUES_MAX 16
|
|
+#define HIF_RX_POLL_WEIGHT 64
|
|
+
|
|
+#define HIF_RX_PKT_MIN_SIZE 0x800 /* 2KB */
|
|
+#define HIF_RX_PKT_MIN_SIZE_MASK ~(HIF_RX_PKT_MIN_SIZE - 1)
|
|
+#define ROUND_MIN_RX_SIZE(_sz) (((_sz) + (HIF_RX_PKT_MIN_SIZE - 1)) \
|
|
+ & HIF_RX_PKT_MIN_SIZE_MASK)
|
|
+#define PRESENT_OFST_IN_PAGE(_buf) (((unsigned long int)(_buf) & (PAGE_SIZE \
|
|
+ - 1)) & HIF_RX_PKT_MIN_SIZE_MASK)
|
|
+
|
|
+enum {
|
|
+ NAPI_SCHED_COUNT = 0,
|
|
+ NAPI_POLL_COUNT,
|
|
+ NAPI_PACKET_COUNT,
|
|
+ NAPI_DESC_COUNT,
|
|
+ NAPI_FULL_BUDGET_COUNT,
|
|
+ NAPI_CLIENT_FULL_COUNT,
|
|
+ NAPI_MAX_COUNT
|
|
+};
|
|
+
|
|
+/*
|
|
+ * HIF_TX_DESC_NT value should be always greter than 4,
|
|
+ * Otherwise HIF_TX_POLL_MARK will become zero.
|
|
+ */
|
|
+#define HIF_RX_DESC_NT 256
|
|
+#define HIF_TX_DESC_NT 2048
|
|
+
|
|
+#define HIF_FIRST_BUFFER BIT(0)
|
|
+#define HIF_LAST_BUFFER BIT(1)
|
|
+#define HIF_DONT_DMA_MAP BIT(2)
|
|
+#define HIF_DATA_VALID BIT(3)
|
|
+#define HIF_TSO BIT(4)
|
|
+
|
|
+enum {
|
|
+ PFE_CL_GEM0 = 0,
|
|
+ PFE_CL_GEM1,
|
|
+ HIF_CLIENTS_MAX
|
|
+};
|
|
+
|
|
+/*structure to store client queue info */
|
|
+struct hif_rx_queue {
|
|
+ struct rx_queue_desc *base;
|
|
+ u32 size;
|
|
+ u32 write_idx;
|
|
+};
|
|
+
|
|
+struct hif_tx_queue {
|
|
+ struct tx_queue_desc *base;
|
|
+ u32 size;
|
|
+ u32 ack_idx;
|
|
+};
|
|
+
|
|
+/*Structure to store the client info */
|
|
+struct hif_client {
|
|
+ int rx_qn;
|
|
+ struct hif_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
|
|
+ int tx_qn;
|
|
+ struct hif_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
|
|
+};
|
|
+
|
|
+/*HIF hardware buffer descriptor */
|
|
+struct hif_desc {
|
|
+ u32 ctrl;
|
|
+ u32 status;
|
|
+ u32 data;
|
|
+ u32 next;
|
|
+};
|
|
+
|
|
+struct __hif_desc {
|
|
+ u32 ctrl;
|
|
+ u32 status;
|
|
+ u32 data;
|
|
+};
|
|
+
|
|
+struct hif_desc_sw {
|
|
+ dma_addr_t data;
|
|
+ u16 len;
|
|
+ u8 client_id;
|
|
+ u8 q_no;
|
|
+ u16 flags;
|
|
+};
|
|
+
|
|
+struct hif_hdr {
|
|
+ u8 client_id;
|
|
+ u8 q_num;
|
|
+ u16 client_ctrl;
|
|
+ u16 client_ctrl1;
|
|
+};
|
|
+
|
|
+struct __hif_hdr {
|
|
+ union {
|
|
+ struct hif_hdr hdr;
|
|
+ u32 word[2];
|
|
+ };
|
|
+};
|
|
+
|
|
+struct hif_ipsec_hdr {
|
|
+ u16 sa_handle[2];
|
|
+} __packed;
|
|
+
|
|
+/* HIF_CTRL_TX... defines */
|
|
+#define HIF_CTRL_TX_CHECKSUM BIT(2)
|
|
+
|
|
+/* HIF_CTRL_RX... defines */
|
|
+#define HIF_CTRL_RX_OFFSET_OFST (24)
|
|
+#define HIF_CTRL_RX_CHECKSUMMED BIT(2)
|
|
+#define HIF_CTRL_RX_CONTINUED BIT(1)
|
|
+
|
|
+struct pfe_hif {
|
|
+ /* To store registered clients in hif layer */
|
|
+ struct hif_client client[HIF_CLIENTS_MAX];
|
|
+ struct hif_shm *shm;
|
|
+ int irq;
|
|
+
|
|
+ void *descr_baseaddr_v;
|
|
+ unsigned long descr_baseaddr_p;
|
|
+
|
|
+ struct hif_desc *rx_base;
|
|
+ u32 rx_ring_size;
|
|
+ u32 rxtoclean_index;
|
|
+ void *rx_buf_addr[HIF_RX_DESC_NT];
|
|
+ int rx_buf_len[HIF_RX_DESC_NT];
|
|
+ unsigned int qno;
|
|
+ unsigned int client_id;
|
|
+ unsigned int client_ctrl;
|
|
+ unsigned int started;
|
|
+
|
|
+ struct hif_desc *tx_base;
|
|
+ u32 tx_ring_size;
|
|
+ u32 txtosend;
|
|
+ u32 txtoclean;
|
|
+ u32 txavail;
|
|
+ u32 txtoflush;
|
|
+ struct hif_desc_sw tx_sw_queue[HIF_TX_DESC_NT];
|
|
+
|
|
+/* tx_lock synchronizes hif packet tx as well as pfe_hif structure access */
|
|
+ spinlock_t tx_lock;
|
|
+/* lock synchronizes hif rx queue processing */
|
|
+ spinlock_t lock;
|
|
+ struct net_device dummy_dev;
|
|
+ struct napi_struct napi;
|
|
+ struct device *dev;
|
|
+
|
|
+#ifdef HIF_NAPI_STATS
|
|
+ unsigned int napi_counters[NAPI_MAX_COUNT];
|
|
+#endif
|
|
+ struct tasklet_struct tx_cleanup_tasklet;
|
|
+};
|
|
+
|
|
+void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
|
|
+ q_no, void *data, u32 len, unsigned int flags);
|
|
+int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no,
|
|
+ void *data, unsigned int len);
|
|
+void __hif_tx_done_process(struct pfe_hif *hif, int count);
|
|
+void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int
|
|
+ data2);
|
|
+int pfe_hif_init(struct pfe *pfe);
|
|
+void pfe_hif_exit(struct pfe *pfe);
|
|
+void pfe_hif_rx_idle(struct pfe_hif *hif);
|
|
+static inline void hif_tx_done_process(struct pfe_hif *hif, int count)
|
|
+{
|
|
+ spin_lock_bh(&hif->tx_lock);
|
|
+ __hif_tx_done_process(hif, count);
|
|
+ spin_unlock_bh(&hif->tx_lock);
|
|
+}
|
|
+
|
|
+static inline void hif_tx_lock(struct pfe_hif *hif)
|
|
+{
|
|
+ spin_lock_bh(&hif->tx_lock);
|
|
+}
|
|
+
|
|
+static inline void hif_tx_unlock(struct pfe_hif *hif)
|
|
+{
|
|
+ spin_unlock_bh(&hif->tx_lock);
|
|
+}
|
|
+
|
|
+static inline int __hif_tx_avail(struct pfe_hif *hif)
|
|
+{
|
|
+ return hif->txavail;
|
|
+}
|
|
+
|
|
+#define __memcpy8(dst, src) memcpy(dst, src, 8)
|
|
+#define __memcpy12(dst, src) memcpy(dst, src, 12)
|
|
+#define __memcpy(dst, src, len) memcpy(dst, src, len)
|
|
+
|
|
+#endif /* _PFE_HIF_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
|
|
@@ -0,0 +1,628 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#include <linux/version.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/workqueue.h>
|
|
+#include <linux/dma-mapping.h>
|
|
+#include <linux/dmapool.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/skbuff.h>
|
|
+#include <linux/moduleparam.h>
|
|
+#include <linux/cpu.h>
|
|
+
|
|
+#include "pfe_mod.h"
|
|
+#include "pfe_hif.h"
|
|
+#include "pfe_hif_lib.h"
|
|
+
|
|
+unsigned int lro_mode;
|
|
+unsigned int page_mode;
|
|
+unsigned int tx_qos = 1;
|
|
+module_param(tx_qos, uint, 0444);
|
|
+MODULE_PARM_DESC(tx_qos, "0: disable ,\n"
|
|
+ "1: enable (default), guarantee no packet drop at TMU level\n");
|
|
+unsigned int pfe_pkt_size;
|
|
+unsigned int pfe_pkt_headroom;
|
|
+unsigned int emac_txq_cnt;
|
|
+
|
|
+/*
|
|
+ * @pfe_hal_lib.c.
|
|
+ * Common functions used by HIF client drivers
|
|
+ */
|
|
+
|
|
+/*HIF shared memory Global variable */
|
|
+struct hif_shm ghif_shm;
|
|
+
|
|
+/* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
|
|
+ * This function should be called after pfe_hif_exit
|
|
+ *
|
|
+ * @param[in] hif_shm Shared memory address location in DDR
|
|
+ */
|
|
+static void pfe_hif_shm_clean(struct hif_shm *hif_shm)
|
|
+{
|
|
+ int i;
|
|
+ void *pkt;
|
|
+
|
|
+ for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
|
|
+ pkt = hif_shm->rx_buf_pool[i];
|
|
+ if (pkt) {
|
|
+ hif_shm->rx_buf_pool[i] = NULL;
|
|
+ pkt -= pfe_pkt_headroom;
|
|
+
|
|
+ if (page_mode)
|
|
+ put_page(virt_to_page(pkt));
|
|
+ else
|
|
+ kfree(pkt);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Initialize shared memory used between HIF driver and clients,
|
|
+ * allocate rx_buffer_pool required for HIF Rx descriptors.
|
|
+ * This function should be called before initializing HIF driver.
|
|
+ *
|
|
+ * @param[in] hif_shm Shared memory address location in DDR
|
|
+ * @rerurn 0 - on succes, <0 on fail to initialize
|
|
+ */
|
|
+static int pfe_hif_shm_init(struct hif_shm *hif_shm)
|
|
+{
|
|
+ int i;
|
|
+ void *pkt;
|
|
+
|
|
+ memset(hif_shm, 0, sizeof(struct hif_shm));
|
|
+ hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
|
|
+
|
|
+ for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
|
|
+ if (page_mode) {
|
|
+ pkt = (void *)__get_free_page(GFP_KERNEL |
|
|
+ GFP_DMA_PFE);
|
|
+ } else {
|
|
+ pkt = kmalloc(PFE_BUF_SIZE, GFP_KERNEL | GFP_DMA_PFE);
|
|
+ }
|
|
+
|
|
+ if (pkt)
|
|
+ hif_shm->rx_buf_pool[i] = pkt + pfe_pkt_headroom;
|
|
+ else
|
|
+ goto err0;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err0:
|
|
+ pr_err("%s Low memory\n", __func__);
|
|
+ pfe_hif_shm_clean(hif_shm);
|
|
+ return -ENOMEM;
|
|
+}
|
|
+
|
|
+/*This function sends indication to HIF driver
|
|
+ *
|
|
+ * @param[in] hif hif context
|
|
+ */
|
|
+static void hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int
|
|
+ data2)
|
|
+{
|
|
+ hif_process_client_req(hif, req, data1, data2);
|
|
+}
|
|
+
|
|
+void hif_lib_indicate_client(int client_id, int event_type, int qno)
|
|
+{
|
|
+ struct hif_client_s *client = pfe->hif_client[client_id];
|
|
+
|
|
+ if (!client || (event_type >= HIF_EVENT_MAX) || (qno >=
|
|
+ HIF_CLIENT_QUEUES_MAX))
|
|
+ return;
|
|
+
|
|
+ if (!test_and_set_bit(qno, &client->queue_mask[event_type]))
|
|
+ client->event_handler(client->priv, event_type, qno);
|
|
+}
|
|
+
|
|
+/*This function releases Rx queue descriptors memory and pre-filled buffers
|
|
+ *
|
|
+ * @param[in] client hif_client context
|
|
+ */
|
|
+static void hif_lib_client_release_rx_buffers(struct hif_client_s *client)
|
|
+{
|
|
+ struct rx_queue_desc *desc;
|
|
+ int qno, ii;
|
|
+ void *buf;
|
|
+
|
|
+ for (qno = 0; qno < client->rx_qn; qno++) {
|
|
+ desc = client->rx_q[qno].base;
|
|
+
|
|
+ for (ii = 0; ii < client->rx_q[qno].size; ii++) {
|
|
+ buf = (void *)desc->data;
|
|
+ if (buf) {
|
|
+ buf -= pfe_pkt_headroom;
|
|
+
|
|
+ if (page_mode)
|
|
+ free_page((unsigned long)buf);
|
|
+ else
|
|
+ kfree(buf);
|
|
+
|
|
+ desc->ctrl = 0;
|
|
+ }
|
|
+
|
|
+ desc++;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ kfree(client->rx_qbase);
|
|
+}
|
|
+
|
|
+/*This function allocates memory for the rxq descriptors and pre-fill rx queues
|
|
+ * with buffers.
|
|
+ * @param[in] client client context
|
|
+ * @param[in] q_size size of the rxQ, all queues are of same size
|
|
+ */
|
|
+static int hif_lib_client_init_rx_buffers(struct hif_client_s *client, int
|
|
+ q_size)
|
|
+{
|
|
+ struct rx_queue_desc *desc;
|
|
+ struct hif_client_rx_queue *queue;
|
|
+ int ii, qno;
|
|
+
|
|
+ /*Allocate memory for the client queues */
|
|
+ client->rx_qbase = kzalloc(client->rx_qn * q_size * sizeof(struct
|
|
+ rx_queue_desc), GFP_KERNEL);
|
|
+ if (!client->rx_qbase)
|
|
+ goto err;
|
|
+
|
|
+ for (qno = 0; qno < client->rx_qn; qno++) {
|
|
+ queue = &client->rx_q[qno];
|
|
+
|
|
+ queue->base = client->rx_qbase + qno * q_size * sizeof(struct
|
|
+ rx_queue_desc);
|
|
+ queue->size = q_size;
|
|
+ queue->read_idx = 0;
|
|
+ queue->write_idx = 0;
|
|
+
|
|
+ pr_debug("rx queue: %d, base: %p, size: %d\n", qno,
|
|
+ queue->base, queue->size);
|
|
+ }
|
|
+
|
|
+ for (qno = 0; qno < client->rx_qn; qno++) {
|
|
+ queue = &client->rx_q[qno];
|
|
+ desc = queue->base;
|
|
+
|
|
+ for (ii = 0; ii < queue->size; ii++) {
|
|
+ desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) |
|
|
+ CL_DESC_OWN;
|
|
+ desc++;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err:
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+
|
|
+static void hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
|
|
+{
|
|
+ pr_debug("%s\n", __func__);
|
|
+
|
|
+ /*
|
|
+ * Check if there are any pending packets. Client must flush the tx
|
|
+ * queues before unregistering, by calling by calling
|
|
+ * hif_lib_tx_get_next_complete()
|
|
+ *
|
|
+ * Hif no longer calls since we are no longer registered
|
|
+ */
|
|
+ if (queue->tx_pending)
|
|
+ pr_err("%s: pending transmit packets\n", __func__);
|
|
+}
|
|
+
|
|
+static void hif_lib_client_release_tx_buffers(struct hif_client_s *client)
|
|
+{
|
|
+ int qno;
|
|
+
|
|
+ pr_debug("%s\n", __func__);
|
|
+
|
|
+ for (qno = 0; qno < client->tx_qn; qno++)
|
|
+ hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
|
|
+
|
|
+ kfree(client->tx_qbase);
|
|
+}
|
|
+
|
|
+static int hif_lib_client_init_tx_buffers(struct hif_client_s *client, int
|
|
+ q_size)
|
|
+{
|
|
+ struct hif_client_tx_queue *queue;
|
|
+ int qno;
|
|
+
|
|
+ client->tx_qbase = kzalloc(client->tx_qn * q_size * sizeof(struct
|
|
+ tx_queue_desc), GFP_KERNEL);
|
|
+ if (!client->tx_qbase)
|
|
+ return 1;
|
|
+
|
|
+ for (qno = 0; qno < client->tx_qn; qno++) {
|
|
+ queue = &client->tx_q[qno];
|
|
+
|
|
+ queue->base = client->tx_qbase + qno * q_size * sizeof(struct
|
|
+ tx_queue_desc);
|
|
+ queue->size = q_size;
|
|
+ queue->read_idx = 0;
|
|
+ queue->write_idx = 0;
|
|
+ queue->tx_pending = 0;
|
|
+ queue->nocpy_flag = 0;
|
|
+ queue->prev_tmu_tx_pkts = 0;
|
|
+ queue->done_tmu_tx_pkts = 0;
|
|
+
|
|
+ pr_debug("tx queue: %d, base: %p, size: %d\n", qno,
|
|
+ queue->base, queue->size);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int hif_lib_event_dummy(void *priv, int event_type, int qno)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int hif_lib_client_register(struct hif_client_s *client)
|
|
+{
|
|
+ struct hif_shm *hif_shm;
|
|
+ struct hif_client_shm *client_shm;
|
|
+ int err, i;
|
|
+ /* int loop_cnt = 0; */
|
|
+
|
|
+ pr_debug("%s\n", __func__);
|
|
+
|
|
+ /*Allocate memory before spin_lock*/
|
|
+ if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
|
|
+ err = -ENOMEM;
|
|
+ goto err_rx;
|
|
+ }
|
|
+
|
|
+ if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
|
|
+ err = -ENOMEM;
|
|
+ goto err_tx;
|
|
+ }
|
|
+
|
|
+ spin_lock_bh(&pfe->hif.lock);
|
|
+ if (!(client->pfe) || (client->id >= HIF_CLIENTS_MAX) ||
|
|
+ (pfe->hif_client[client->id])) {
|
|
+ err = -EINVAL;
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ hif_shm = client->pfe->hif.shm;
|
|
+
|
|
+ if (!client->event_handler)
|
|
+ client->event_handler = hif_lib_event_dummy;
|
|
+
|
|
+ /*Initialize client specific shared memory */
|
|
+ client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
|
|
+ client_shm->rx_qbase = (unsigned long int)client->rx_qbase;
|
|
+ client_shm->rx_qsize = client->rx_qsize;
|
|
+ client_shm->tx_qbase = (unsigned long int)client->tx_qbase;
|
|
+ client_shm->tx_qsize = client->tx_qsize;
|
|
+ client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) |
|
|
+ (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
|
|
+ /* spin_lock_init(&client->rx_lock); */
|
|
+
|
|
+ for (i = 0; i < HIF_EVENT_MAX; i++) {
|
|
+ client->queue_mask[i] = 0; /*
|
|
+ * By default all events are
|
|
+ * unmasked
|
|
+ */
|
|
+ }
|
|
+
|
|
+ /*Indicate to HIF driver*/
|
|
+ hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_REGISTER, client->id, 0);
|
|
+
|
|
+ pr_debug("%s: client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d\n",
|
|
+ __func__, client, client->id, client->tx_qsize,
|
|
+ client->rx_qsize);
|
|
+
|
|
+ client->cpu_id = -1;
|
|
+
|
|
+ pfe->hif_client[client->id] = client;
|
|
+ spin_unlock_bh(&pfe->hif.lock);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err:
|
|
+ spin_unlock_bh(&pfe->hif.lock);
|
|
+ hif_lib_client_release_tx_buffers(client);
|
|
+
|
|
+err_tx:
|
|
+ hif_lib_client_release_rx_buffers(client);
|
|
+
|
|
+err_rx:
|
|
+ return err;
|
|
+}
|
|
+
|
|
+int hif_lib_client_unregister(struct hif_client_s *client)
|
|
+{
|
|
+ struct pfe *pfe = client->pfe;
|
|
+ u32 client_id = client->id;
|
|
+
|
|
+ pr_info(
|
|
+ "%s : client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d\n"
|
|
+ , __func__, client, client->id, client->tx_qsize,
|
|
+ client->rx_qsize);
|
|
+
|
|
+ spin_lock_bh(&pfe->hif.lock);
|
|
+ hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
|
|
+
|
|
+ hif_lib_client_release_tx_buffers(client);
|
|
+ hif_lib_client_release_rx_buffers(client);
|
|
+ pfe->hif_client[client_id] = NULL;
|
|
+ spin_unlock_bh(&pfe->hif.lock);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int hif_lib_event_handler_start(struct hif_client_s *client, int event,
|
|
+ int qno)
|
|
+{
|
|
+ struct hif_client_rx_queue *queue = &client->rx_q[qno];
|
|
+ struct rx_queue_desc *desc = queue->base + queue->read_idx;
|
|
+
|
|
+ if ((event >= HIF_EVENT_MAX) || (qno >= HIF_CLIENT_QUEUES_MAX)) {
|
|
+ pr_debug("%s: Unsupported event : %d queue number : %d\n",
|
|
+ __func__, event, qno);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ test_and_clear_bit(qno, &client->queue_mask[event]);
|
|
+
|
|
+ switch (event) {
|
|
+ case EVENT_RX_PKT_IND:
|
|
+ if (!(desc->ctrl & CL_DESC_OWN))
|
|
+ hif_lib_indicate_client(client->id,
|
|
+ EVENT_RX_PKT_IND, qno);
|
|
+ break;
|
|
+
|
|
+ case EVENT_HIGH_RX_WM:
|
|
+ case EVENT_TXDONE_IND:
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function gets one packet from the specified client queue
|
|
+ * It also refill the rx buffer
|
|
+ */
|
|
+void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
|
|
+ *ofst, unsigned int *rx_ctrl,
|
|
+ unsigned int *desc_ctrl, void **priv_data)
|
|
+{
|
|
+ struct hif_client_rx_queue *queue = &client->rx_q[qno];
|
|
+ struct rx_queue_desc *desc;
|
|
+ void *pkt = NULL;
|
|
+
|
|
+ /*
|
|
+ * Following lock is to protect rx queue access from,
|
|
+ * hif_lib_event_handler_start.
|
|
+ * In general below lock is not required, because hif_lib_xmit_pkt and
|
|
+ * hif_lib_event_handler_start are called from napi poll and which is
|
|
+ * not re-entrant. But if some client use in different way this lock is
|
|
+ * required.
|
|
+ */
|
|
+ /*spin_lock_irqsave(&client->rx_lock, flags); */
|
|
+ desc = queue->base + queue->read_idx;
|
|
+ if (!(desc->ctrl & CL_DESC_OWN)) {
|
|
+ pkt = desc->data - pfe_pkt_headroom;
|
|
+
|
|
+ *rx_ctrl = desc->client_ctrl;
|
|
+ *desc_ctrl = desc->ctrl;
|
|
+
|
|
+ if (desc->ctrl & CL_DESC_FIRST) {
|
|
+ u16 size = *rx_ctrl >> HIF_CTRL_RX_OFFSET_OFST;
|
|
+
|
|
+ if (size) {
|
|
+ size += PFE_PARSE_INFO_SIZE;
|
|
+ *len = CL_DESC_BUF_LEN(desc->ctrl) -
|
|
+ PFE_PKT_HEADER_SZ - size;
|
|
+ *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ
|
|
+ + size;
|
|
+ *priv_data = desc->data + PFE_PKT_HEADER_SZ;
|
|
+ } else {
|
|
+ *len = CL_DESC_BUF_LEN(desc->ctrl) -
|
|
+ PFE_PKT_HEADER_SZ - PFE_PARSE_INFO_SIZE;
|
|
+ *ofst = pfe_pkt_headroom
|
|
+ + PFE_PKT_HEADER_SZ
|
|
+ + PFE_PARSE_INFO_SIZE;
|
|
+ *priv_data = NULL;
|
|
+ }
|
|
+
|
|
+ } else {
|
|
+ *len = CL_DESC_BUF_LEN(desc->ctrl);
|
|
+ *ofst = pfe_pkt_headroom;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Needed so we don't free a buffer/page
|
|
+ * twice on module_exit
|
|
+ */
|
|
+ desc->data = NULL;
|
|
+
|
|
+ /*
|
|
+ * Ensure everything else is written to DDR before
|
|
+ * writing bd->ctrl
|
|
+ */
|
|
+ smp_wmb();
|
|
+
|
|
+ desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN;
|
|
+ queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
|
|
+ }
|
|
+
|
|
+ /*spin_unlock_irqrestore(&client->rx_lock, flags); */
|
|
+ return pkt;
|
|
+}
|
|
+
|
|
+static inline void hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
|
|
+ client_id, unsigned int qno,
|
|
+ u32 client_ctrl)
|
|
+{
|
|
+ /* Optimize the write since the destinaton may be non-cacheable */
|
|
+ if (!((unsigned long)pkt_hdr & 0x3)) {
|
|
+ ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
|
|
+ client_id;
|
|
+ } else {
|
|
+ ((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
|
|
+ ((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
|
|
+ }
|
|
+}
|
|
+
|
|
+/*This function puts the given packet in the specific client queue */
|
|
+void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
|
|
+ *data, unsigned int len, u32 client_ctrl,
|
|
+ unsigned int flags, void *client_data)
|
|
+{
|
|
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
|
|
+ struct tx_queue_desc *desc = queue->base + queue->write_idx;
|
|
+
|
|
+ /* First buffer */
|
|
+ if (flags & HIF_FIRST_BUFFER) {
|
|
+ data -= sizeof(struct hif_hdr);
|
|
+ len += sizeof(struct hif_hdr);
|
|
+
|
|
+ hif_hdr_write(data, client->id, qno, client_ctrl);
|
|
+ }
|
|
+
|
|
+ desc->data = client_data;
|
|
+ desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
|
|
+
|
|
+ __hif_xmit_pkt(&pfe->hif, client->id, qno, data, len, flags);
|
|
+
|
|
+ queue->write_idx = (queue->write_idx + 1) & (queue->size - 1);
|
|
+ queue->tx_pending++;
|
|
+ queue->jiffies_last_packet = jiffies;
|
|
+}
|
|
+
|
|
+void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
|
|
+ unsigned int *flags, int count)
|
|
+{
|
|
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
|
|
+ struct tx_queue_desc *desc = queue->base + queue->read_idx;
|
|
+
|
|
+ pr_debug("%s: qno : %d rd_indx: %d pending:%d\n", __func__, qno,
|
|
+ queue->read_idx, queue->tx_pending);
|
|
+
|
|
+ if (!queue->tx_pending)
|
|
+ return NULL;
|
|
+
|
|
+ if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
|
|
+ u32 tmu_tx_pkts = be32_to_cpu(pe_dmem_read(TMU0_ID +
|
|
+ client->id, TMU_DM_TX_TRANS, 4));
|
|
+
|
|
+ if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
|
|
+ queue->done_tmu_tx_pkts = UINT_MAX -
|
|
+ queue->prev_tmu_tx_pkts + tmu_tx_pkts;
|
|
+ else
|
|
+ queue->done_tmu_tx_pkts = tmu_tx_pkts -
|
|
+ queue->prev_tmu_tx_pkts;
|
|
+
|
|
+ queue->prev_tmu_tx_pkts = tmu_tx_pkts;
|
|
+
|
|
+ if (!queue->done_tmu_tx_pkts)
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (desc->ctrl & CL_DESC_OWN)
|
|
+ return NULL;
|
|
+
|
|
+ queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
|
|
+ queue->tx_pending--;
|
|
+
|
|
+ *flags = CL_DESC_GET_FLAGS(desc->ctrl);
|
|
+
|
|
+ if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
|
|
+ queue->done_tmu_tx_pkts--;
|
|
+
|
|
+ return desc->data;
|
|
+}
|
|
+
|
|
+static void hif_lib_tmu_credit_init(struct pfe *pfe)
|
|
+{
|
|
+ int i, q;
|
|
+
|
|
+ for (i = 0; i < NUM_GEMAC_SUPPORT; i++)
|
|
+ for (q = 0; q < emac_txq_cnt; q++) {
|
|
+ pfe->tmu_credit.tx_credit_max[i][q] = (q == 0) ?
|
|
+ DEFAULT_Q0_QDEPTH : DEFAULT_MAX_QDEPTH;
|
|
+ pfe->tmu_credit.tx_credit[i][q] =
|
|
+ pfe->tmu_credit.tx_credit_max[i][q];
|
|
+ }
|
|
+}
|
|
+
|
|
+/* __hif_lib_update_credit
|
|
+ *
|
|
+ * @param[in] client hif client context
|
|
+ * @param[in] queue queue number in match with TMU
|
|
+ */
|
|
+void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue)
|
|
+{
|
|
+ unsigned int tmu_tx_packets, tmp;
|
|
+
|
|
+ if (tx_qos) {
|
|
+ tmu_tx_packets = be32_to_cpu(pe_dmem_read(TMU0_ID +
|
|
+ client->id, (TMU_DM_TX_TRANS + (queue * 4)), 4));
|
|
+
|
|
+ /* tx_packets counter overflowed */
|
|
+ if (tmu_tx_packets >
|
|
+ pfe->tmu_credit.tx_packets[client->id][queue]) {
|
|
+ tmp = UINT_MAX - tmu_tx_packets +
|
|
+ pfe->tmu_credit.tx_packets[client->id][queue];
|
|
+
|
|
+ pfe->tmu_credit.tx_credit[client->id][queue] =
|
|
+ pfe->tmu_credit.tx_credit_max[client->id][queue] - tmp;
|
|
+ } else {
|
|
+ /* TMU tx <= pfe_eth tx, normal case or both OF since
|
|
+ * last time
|
|
+ */
|
|
+ pfe->tmu_credit.tx_credit[client->id][queue] =
|
|
+ pfe->tmu_credit.tx_credit_max[client->id][queue] -
|
|
+ (pfe->tmu_credit.tx_packets[client->id][queue] -
|
|
+ tmu_tx_packets);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+int pfe_hif_lib_init(struct pfe *pfe)
|
|
+{
|
|
+ int rc;
|
|
+
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ if (lro_mode) {
|
|
+ page_mode = 1;
|
|
+ pfe_pkt_size = min(PAGE_SIZE, MAX_PFE_PKT_SIZE);
|
|
+ pfe_pkt_headroom = 0;
|
|
+ } else {
|
|
+ page_mode = 0;
|
|
+ pfe_pkt_size = PFE_PKT_SIZE;
|
|
+ pfe_pkt_headroom = PFE_PKT_HEADROOM;
|
|
+ }
|
|
+
|
|
+ if (tx_qos)
|
|
+ emac_txq_cnt = EMAC_TXQ_CNT / 2;
|
|
+ else
|
|
+ emac_txq_cnt = EMAC_TXQ_CNT;
|
|
+
|
|
+ hif_lib_tmu_credit_init(pfe);
|
|
+ pfe->hif.shm = &ghif_shm;
|
|
+ rc = pfe_hif_shm_init(pfe->hif.shm);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+void pfe_hif_lib_exit(struct pfe *pfe)
|
|
+{
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ pfe_hif_shm_clean(pfe->hif.shm);
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.h
|
|
@@ -0,0 +1,229 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#ifndef _PFE_HIF_LIB_H_
|
|
+#define _PFE_HIF_LIB_H_
|
|
+
|
|
+#include "pfe_hif.h"
|
|
+
|
|
+#define HIF_CL_REQ_TIMEOUT 10
|
|
+#define GFP_DMA_PFE 0
|
|
+#define PFE_PARSE_INFO_SIZE 16
|
|
+
|
|
+enum {
|
|
+ REQUEST_CL_REGISTER = 0,
|
|
+ REQUEST_CL_UNREGISTER,
|
|
+ HIF_REQUEST_MAX
|
|
+};
|
|
+
|
|
+enum {
|
|
+ /* Event to indicate that client rx queue is reached water mark level */
|
|
+ EVENT_HIGH_RX_WM = 0,
|
|
+ /* Event to indicate that, packet received for client */
|
|
+ EVENT_RX_PKT_IND,
|
|
+ /* Event to indicate that, packet tx done for client */
|
|
+ EVENT_TXDONE_IND,
|
|
+ HIF_EVENT_MAX
|
|
+};
|
|
+
|
|
+/*structure to store client queue info */
|
|
+
|
|
+/*structure to store client queue info */
|
|
+struct hif_client_rx_queue {
|
|
+ struct rx_queue_desc *base;
|
|
+ u32 size;
|
|
+ u32 read_idx;
|
|
+ u32 write_idx;
|
|
+};
|
|
+
|
|
+struct hif_client_tx_queue {
|
|
+ struct tx_queue_desc *base;
|
|
+ u32 size;
|
|
+ u32 read_idx;
|
|
+ u32 write_idx;
|
|
+ u32 tx_pending;
|
|
+ unsigned long jiffies_last_packet;
|
|
+ u32 nocpy_flag;
|
|
+ u32 prev_tmu_tx_pkts;
|
|
+ u32 done_tmu_tx_pkts;
|
|
+};
|
|
+
|
|
+struct hif_client_s {
|
|
+ int id;
|
|
+ int tx_qn;
|
|
+ int rx_qn;
|
|
+ void *rx_qbase;
|
|
+ void *tx_qbase;
|
|
+ int tx_qsize;
|
|
+ int rx_qsize;
|
|
+ int cpu_id;
|
|
+ struct hif_client_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
|
|
+ struct hif_client_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
|
|
+ int (*event_handler)(void *priv, int event, int data);
|
|
+ unsigned long queue_mask[HIF_EVENT_MAX];
|
|
+ struct pfe *pfe;
|
|
+ void *priv;
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Client specific shared memory
|
|
+ * It contains number of Rx/Tx queues, base addresses and queue sizes
|
|
+ */
|
|
+struct hif_client_shm {
|
|
+ u32 ctrl; /*0-7: number of Rx queues, 8-15: number of tx queues */
|
|
+ unsigned long rx_qbase; /*Rx queue base address */
|
|
+ u32 rx_qsize; /*each Rx queue size, all Rx queues are of same size */
|
|
+ unsigned long tx_qbase; /* Tx queue base address */
|
|
+ u32 tx_qsize; /*each Tx queue size, all Tx queues are of same size */
|
|
+};
|
|
+
|
|
+/*Client shared memory ctrl bit description */
|
|
+#define CLIENT_CTRL_RX_Q_CNT_OFST 0
|
|
+#define CLIENT_CTRL_TX_Q_CNT_OFST 8
|
|
+#define CLIENT_CTRL_RX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_RX_Q_CNT_OFST) \
|
|
+ & 0xFF)
|
|
+#define CLIENT_CTRL_TX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_TX_Q_CNT_OFST) \
|
|
+ & 0xFF)
|
|
+
|
|
+/*
|
|
+ * Shared memory used to communicate between HIF driver and host/client drivers
|
|
+ * Before starting the hif driver rx_buf_pool ans rx_buf_pool_cnt should be
|
|
+ * initialized with host buffers and buffers count in the pool.
|
|
+ * rx_buf_pool_cnt should be >= HIF_RX_DESC_NT.
|
|
+ *
|
|
+ */
|
|
+struct hif_shm {
|
|
+ u32 rx_buf_pool_cnt; /*Number of rx buffers available*/
|
|
+ /*Rx buffers required to initialize HIF rx descriptors */
|
|
+ void *rx_buf_pool[HIF_RX_DESC_NT];
|
|
+ unsigned long g_client_status[2]; /*Global client status bit mask */
|
|
+ /* Client specific shared memory */
|
|
+ struct hif_client_shm client[HIF_CLIENTS_MAX];
|
|
+};
|
|
+
|
|
+#define CL_DESC_OWN BIT(31)
|
|
+/* This sets owner ship to HIF driver */
|
|
+#define CL_DESC_LAST BIT(30)
|
|
+/* This indicates last packet for multi buffers handling */
|
|
+#define CL_DESC_FIRST BIT(29)
|
|
+/* This indicates first packet for multi buffers handling */
|
|
+
|
|
+#define CL_DESC_BUF_LEN(x) ((x) & 0xFFFF)
|
|
+#define CL_DESC_FLAGS(x) (((x) & 0xF) << 16)
|
|
+#define CL_DESC_GET_FLAGS(x) (((x) >> 16) & 0xF)
|
|
+
|
|
+struct rx_queue_desc {
|
|
+ void *data;
|
|
+ u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
|
|
+ u32 client_ctrl;
|
|
+};
|
|
+
|
|
+struct tx_queue_desc {
|
|
+ void *data;
|
|
+ u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
|
|
+};
|
|
+
|
|
+/* HIF Rx is not working properly for 2-byte aligned buffers and
|
|
+ * ip_header should be 4byte aligned for better iperformance.
|
|
+ * "ip_header = 64 + 6(hif_header) + 14 (MAC Header)" will be 4byte aligned.
|
|
+ */
|
|
+#define PFE_PKT_HEADER_SZ sizeof(struct hif_hdr)
|
|
+/* must be big enough for headroom, pkt size and skb shared info */
|
|
+#define PFE_BUF_SIZE 2048
|
|
+#define PFE_PKT_HEADROOM 128
|
|
+
|
|
+#define SKB_SHARED_INFO_SIZE SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
|
|
+#define PFE_PKT_SIZE (PFE_BUF_SIZE - PFE_PKT_HEADROOM \
|
|
+ - SKB_SHARED_INFO_SIZE)
|
|
+#define MAX_L2_HDR_SIZE 14 /* Not correct for VLAN/PPPoE */
|
|
+#define MAX_L3_HDR_SIZE 20 /* Not correct for IPv6 */
|
|
+#define MAX_L4_HDR_SIZE 60 /* TCP with maximum options */
|
|
+#define MAX_HDR_SIZE (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE \
|
|
+ + MAX_L4_HDR_SIZE)
|
|
+/* Used in page mode to clamp packet size to the maximum supported by the hif
|
|
+ *hw interface (<16KiB)
|
|
+ */
|
|
+#define MAX_PFE_PKT_SIZE 16380UL
|
|
+
|
|
+extern unsigned int pfe_pkt_size;
|
|
+extern unsigned int pfe_pkt_headroom;
|
|
+extern unsigned int page_mode;
|
|
+extern unsigned int lro_mode;
|
|
+extern unsigned int tx_qos;
|
|
+extern unsigned int emac_txq_cnt;
|
|
+
|
|
+int pfe_hif_lib_init(struct pfe *pfe);
|
|
+void pfe_hif_lib_exit(struct pfe *pfe);
|
|
+int hif_lib_client_register(struct hif_client_s *client);
|
|
+int hif_lib_client_unregister(struct hif_client_s *client);
|
|
+void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
|
|
+ *data, unsigned int len, u32 client_ctrl,
|
|
+ unsigned int flags, void *client_data);
|
|
+int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data,
|
|
+ unsigned int len, u32 client_ctrl, void *client_data);
|
|
+void hif_lib_indicate_client(int cl_id, int event, int data);
|
|
+int hif_lib_event_handler_start(struct hif_client_s *client, int event, int
|
|
+ data);
|
|
+int hif_lib_tmu_queue_start(struct hif_client_s *client, int qno);
|
|
+int hif_lib_tmu_queue_stop(struct hif_client_s *client, int qno);
|
|
+void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
|
|
+ unsigned int *flags, int count);
|
|
+void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
|
|
+ *ofst, unsigned int *rx_ctrl,
|
|
+ unsigned int *desc_ctrl, void **priv_data);
|
|
+void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue);
|
|
+void hif_lib_set_rx_cpu_affinity(struct hif_client_s *client, int cpu_id);
|
|
+void hif_lib_set_tx_queue_nocpy(struct hif_client_s *client, int qno, int
|
|
+ enable);
|
|
+static inline int hif_lib_tx_avail(struct hif_client_s *client, unsigned int
|
|
+ qno)
|
|
+{
|
|
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
|
|
+
|
|
+ return (queue->size - queue->tx_pending);
|
|
+}
|
|
+
|
|
+static inline int hif_lib_get_tx_wr_index(struct hif_client_s *client, unsigned
|
|
+ int qno)
|
|
+{
|
|
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
|
|
+
|
|
+ return queue->write_idx;
|
|
+}
|
|
+
|
|
+static inline int hif_lib_tx_pending(struct hif_client_s *client, unsigned int
|
|
+ qno)
|
|
+{
|
|
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
|
|
+
|
|
+ return queue->tx_pending;
|
|
+}
|
|
+
|
|
+#define hif_lib_tx_credit_avail(pfe, id, qno) \
|
|
+ ((pfe)->tmu_credit.tx_credit[id][qno])
|
|
+
|
|
+#define hif_lib_tx_credit_max(pfe, id, qno) \
|
|
+ ((pfe)->tmu_credit.tx_credit_max[id][qno])
|
|
+
|
|
+/*
|
|
+ * Test comment
|
|
+ */
|
|
+#define hif_lib_tx_credit_use(pfe, id, qno, credit) \
|
|
+ ({ typeof(pfe) pfe_ = pfe; \
|
|
+ typeof(id) id_ = id; \
|
|
+ typeof(qno) qno_ = qno_; \
|
|
+ typeof(credit) credit_ = credit; \
|
|
+ do { \
|
|
+ if (tx_qos) { \
|
|
+ (pfe_)->tmu_credit.tx_credit[id_][qno_]\
|
|
+ -= credit_; \
|
|
+ (pfe_)->tmu_credit.tx_packets[id_][qno_]\
|
|
+ += credit_; \
|
|
+ } \
|
|
+ } while (0); \
|
|
+ })
|
|
+
|
|
+#endif /* _PFE_HIF_LIB_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_hw.c
|
|
@@ -0,0 +1,164 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#include "pfe_mod.h"
|
|
+#include "pfe_hw.h"
|
|
+
|
|
+/* Functions to handle most of pfe hw register initialization */
|
|
+int pfe_hw_init(struct pfe *pfe, int resume)
|
|
+{
|
|
+ struct class_cfg class_cfg = {
|
|
+ .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
|
|
+ .route_table_baseaddr = pfe->ddr_phys_baseaddr +
|
|
+ ROUTE_TABLE_BASEADDR,
|
|
+ .route_table_hash_bits = ROUTE_TABLE_HASH_BITS,
|
|
+ };
|
|
+
|
|
+ struct tmu_cfg tmu_cfg = {
|
|
+ .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
|
|
+ .llm_base_addr = pfe->ddr_phys_baseaddr + TMU_LLM_BASEADDR,
|
|
+ .llm_queue_len = TMU_LLM_QUEUE_LEN,
|
|
+ };
|
|
+
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ struct util_cfg util_cfg = {
|
|
+ .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
|
|
+ };
|
|
+#endif
|
|
+
|
|
+ struct BMU_CFG bmu1_cfg = {
|
|
+ .baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR +
|
|
+ BMU1_LMEM_BASEADDR),
|
|
+ .count = BMU1_BUF_COUNT,
|
|
+ .size = BMU1_BUF_SIZE,
|
|
+ .low_watermark = 10,
|
|
+ .high_watermark = 15,
|
|
+ };
|
|
+
|
|
+ struct BMU_CFG bmu2_cfg = {
|
|
+ .baseaddr = DDR_PHYS_TO_PFE(pfe->ddr_phys_baseaddr +
|
|
+ BMU2_DDR_BASEADDR),
|
|
+ .count = BMU2_BUF_COUNT,
|
|
+ .size = BMU2_BUF_SIZE,
|
|
+ .low_watermark = 250,
|
|
+ .high_watermark = 253,
|
|
+ };
|
|
+
|
|
+ struct gpi_cfg egpi1_cfg = {
|
|
+ .lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT,
|
|
+ .tmlf_txthres = EGPI1_TMLF_TXTHRES,
|
|
+ .aseq_len = EGPI1_ASEQ_LEN,
|
|
+ .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC1_BASE_ADDR +
|
|
+ EMAC_TCNTRL_REG),
|
|
+ };
|
|
+
|
|
+ struct gpi_cfg egpi2_cfg = {
|
|
+ .lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT,
|
|
+ .tmlf_txthres = EGPI2_TMLF_TXTHRES,
|
|
+ .aseq_len = EGPI2_ASEQ_LEN,
|
|
+ .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC2_BASE_ADDR +
|
|
+ EMAC_TCNTRL_REG),
|
|
+ };
|
|
+
|
|
+ struct gpi_cfg hgpi_cfg = {
|
|
+ .lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT,
|
|
+ .tmlf_txthres = HGPI_TMLF_TXTHRES,
|
|
+ .aseq_len = HGPI_ASEQ_LEN,
|
|
+ .mtip_pause_reg = 0,
|
|
+ };
|
|
+
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+#if !defined(LS1012A_PFE_RESET_WA)
|
|
+ /* LS1012A needs this to make PE work correctly */
|
|
+ writel(0x3, CLASS_PE_SYS_CLK_RATIO);
|
|
+ writel(0x3, TMU_PE_SYS_CLK_RATIO);
|
|
+ writel(0x3, UTIL_PE_SYS_CLK_RATIO);
|
|
+ usleep_range(10, 20);
|
|
+#endif
|
|
+
|
|
+ pr_info("CLASS version: %x\n", readl(CLASS_VERSION));
|
|
+ pr_info("TMU version: %x\n", readl(TMU_VERSION));
|
|
+
|
|
+ pr_info("BMU1 version: %x\n", readl(BMU1_BASE_ADDR +
|
|
+ BMU_VERSION));
|
|
+ pr_info("BMU2 version: %x\n", readl(BMU2_BASE_ADDR +
|
|
+ BMU_VERSION));
|
|
+
|
|
+ pr_info("EGPI1 version: %x\n", readl(EGPI1_BASE_ADDR +
|
|
+ GPI_VERSION));
|
|
+ pr_info("EGPI2 version: %x\n", readl(EGPI2_BASE_ADDR +
|
|
+ GPI_VERSION));
|
|
+ pr_info("HGPI version: %x\n", readl(HGPI_BASE_ADDR +
|
|
+ GPI_VERSION));
|
|
+
|
|
+ pr_info("HIF version: %x\n", readl(HIF_VERSION));
|
|
+ pr_info("HIF NOPCY version: %x\n", readl(HIF_NOCPY_VERSION));
|
|
+
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ pr_info("UTIL version: %x\n", readl(UTIL_VERSION));
|
|
+#endif
|
|
+ while (!(readl(TMU_CTRL) & ECC_MEM_INIT_DONE))
|
|
+ ;
|
|
+
|
|
+ hif_rx_disable();
|
|
+ hif_tx_disable();
|
|
+
|
|
+ bmu_init(BMU1_BASE_ADDR, &bmu1_cfg);
|
|
+
|
|
+ pr_info("bmu_init(1) done\n");
|
|
+
|
|
+ bmu_init(BMU2_BASE_ADDR, &bmu2_cfg);
|
|
+
|
|
+ pr_info("bmu_init(2) done\n");
|
|
+
|
|
+ class_cfg.resume = resume ? 1 : 0;
|
|
+
|
|
+ class_init(&class_cfg);
|
|
+
|
|
+ pr_info("class_init() done\n");
|
|
+
|
|
+ tmu_init(&tmu_cfg);
|
|
+
|
|
+ pr_info("tmu_init() done\n");
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ util_init(&util_cfg);
|
|
+
|
|
+ pr_info("util_init() done\n");
|
|
+#endif
|
|
+ gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg);
|
|
+
|
|
+ pr_info("gpi_init(1) done\n");
|
|
+
|
|
+ gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg);
|
|
+
|
|
+ pr_info("gpi_init(2) done\n");
|
|
+
|
|
+ gpi_init(HGPI_BASE_ADDR, &hgpi_cfg);
|
|
+
|
|
+ pr_info("gpi_init(hif) done\n");
|
|
+
|
|
+ bmu_enable(BMU1_BASE_ADDR);
|
|
+
|
|
+ pr_info("bmu_enable(1) done\n");
|
|
+
|
|
+ bmu_enable(BMU2_BASE_ADDR);
|
|
+
|
|
+ pr_info("bmu_enable(2) done\n");
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void pfe_hw_exit(struct pfe *pfe)
|
|
+{
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ bmu_disable(BMU1_BASE_ADDR);
|
|
+ bmu_reset(BMU1_BASE_ADDR);
|
|
+
|
|
+ bmu_disable(BMU2_BASE_ADDR);
|
|
+ bmu_reset(BMU2_BASE_ADDR);
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_hw.h
|
|
@@ -0,0 +1,15 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#ifndef _PFE_HW_H_
|
|
+#define _PFE_HW_H_
|
|
+
|
|
+#define PE_SYS_CLK_RATIO 1 /* SYS/AXI = 250MHz, HFE = 500MHz */
|
|
+
|
|
+int pfe_hw_init(struct pfe *pfe, int resume);
|
|
+void pfe_hw_exit(struct pfe *pfe);
|
|
+
|
|
+#endif /* _PFE_HW_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
|
|
@@ -0,0 +1,368 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/of.h>
|
|
+#include <linux/of_net.h>
|
|
+#include <linux/of_address.h>
|
|
+#include <linux/of_mdio.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/mfd/syscon.h>
|
|
+#include <linux/regmap.h>
|
|
+
|
|
+#include "pfe_mod.h"
|
|
+
|
|
+extern bool pfe_use_old_dts_phy;
|
|
+struct ls1012a_pfe_platform_data pfe_platform_data;
|
|
+
|
|
+static int pfe_get_gemac_if_properties(struct device_node *gem,
|
|
+ int port,
|
|
+ struct ls1012a_pfe_platform_data *pdata)
|
|
+{
|
|
+ struct device_node *phy_node = NULL;
|
|
+ int size;
|
|
+ int phy_id = 0;
|
|
+ const u32 *addr;
|
|
+ const void *mac_addr;
|
|
+
|
|
+ addr = of_get_property(gem, "reg", &size);
|
|
+ port = be32_to_cpup(addr);
|
|
+
|
|
+ pdata->ls1012a_eth_pdata[port].gem_id = port;
|
|
+
|
|
+ mac_addr = of_get_mac_address(gem);
|
|
+ if (mac_addr) {
|
|
+ memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
|
|
+ ETH_ALEN);
|
|
+ }
|
|
+
|
|
+ phy_node = of_parse_phandle(gem, "phy-handle", 0);
|
|
+ pdata->ls1012a_eth_pdata[port].phy_node = phy_node;
|
|
+ if (phy_node) {
|
|
+ pfe_use_old_dts_phy = false;
|
|
+ goto process_phynode;
|
|
+ } else if (of_phy_is_fixed_link(gem)) {
|
|
+ pfe_use_old_dts_phy = false;
|
|
+ if (of_phy_register_fixed_link(gem) < 0) {
|
|
+ pr_err("broken fixed-link specification\n");
|
|
+ goto err;
|
|
+ }
|
|
+ phy_node = of_node_get(gem);
|
|
+ pdata->ls1012a_eth_pdata[port].phy_node = phy_node;
|
|
+ } else if (of_get_property(gem, "fsl,pfe-phy-if-flags", &size)) {
|
|
+ pfe_use_old_dts_phy = true;
|
|
+ /* Use old dts properties for phy handling */
|
|
+ addr = of_get_property(gem, "fsl,pfe-phy-if-flags", &size);
|
|
+ pdata->ls1012a_eth_pdata[port].phy_flags = be32_to_cpup(addr);
|
|
+
|
|
+ addr = of_get_property(gem, "fsl,gemac-phy-id", &size);
|
|
+ if (!addr) {
|
|
+ pr_err("%s:%d Invalid gemac-phy-id....\n", __func__,
|
|
+ __LINE__);
|
|
+ } else {
|
|
+ phy_id = be32_to_cpup(addr);
|
|
+ pdata->ls1012a_eth_pdata[port].phy_id = phy_id;
|
|
+ pdata->ls1012a_mdio_pdata[0].phy_mask &= ~(1 << phy_id);
|
|
+ }
|
|
+
|
|
+ /* If PHY is enabled, read mdio properties */
|
|
+ if (pdata->ls1012a_eth_pdata[port].phy_flags & GEMAC_NO_PHY)
|
|
+ goto done;
|
|
+
|
|
+ } else {
|
|
+ pr_info("%s: No PHY or fixed-link\n", __func__);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+process_phynode:
|
|
+ pdata->ls1012a_eth_pdata[port].mii_config = of_get_phy_mode(gem);
|
|
+ if ((pdata->ls1012a_eth_pdata[port].mii_config) < 0)
|
|
+ pr_err("%s:%d Incorrect Phy mode....\n", __func__,
|
|
+ __LINE__);
|
|
+
|
|
+ addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
|
|
+ if (!addr) {
|
|
+ pr_err("%s: Invalid mdio-mux-val....\n", __func__);
|
|
+ } else {
|
|
+ phy_id = be32_to_cpup(addr);
|
|
+ pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
|
|
+ }
|
|
+
|
|
+ if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
|
|
+ pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
|
|
+ pdata->ls1012a_eth_pdata[port].mdio_muxval;
|
|
+
|
|
+
|
|
+ pdata->ls1012a_mdio_pdata[port].irq[0] = PHY_POLL;
|
|
+
|
|
+done:
|
|
+ return 0;
|
|
+
|
|
+err:
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+/*
|
|
+ *
|
|
+ * pfe_platform_probe -
|
|
+ *
|
|
+ *
|
|
+ */
|
|
+static int pfe_platform_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct resource res;
|
|
+ int ii, rc, interface_count = 0, size = 0;
|
|
+ const u32 *prop;
|
|
+ struct device_node *np, *gem = NULL;
|
|
+ struct clk *pfe_clk;
|
|
+
|
|
+ np = pdev->dev.of_node;
|
|
+
|
|
+ if (!np) {
|
|
+ pr_err("Invalid device node\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ pfe = kzalloc(sizeof(*pfe), GFP_KERNEL);
|
|
+ if (!pfe) {
|
|
+ rc = -ENOMEM;
|
|
+ goto err_alloc;
|
|
+ }
|
|
+
|
|
+ platform_set_drvdata(pdev, pfe);
|
|
+
|
|
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
|
+
|
|
+ if (of_address_to_resource(np, 1, &res)) {
|
|
+ rc = -ENOMEM;
|
|
+ pr_err("failed to get ddr resource\n");
|
|
+ goto err_ddr;
|
|
+ }
|
|
+
|
|
+ pfe->ddr_phys_baseaddr = res.start;
|
|
+ pfe->ddr_size = resource_size(&res);
|
|
+ pfe->ddr_baseaddr = phys_to_virt(res.start);
|
|
+
|
|
+ pfe->scfg =
|
|
+ syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
|
|
+ "fsl,pfe-scfg");
|
|
+ if (IS_ERR(pfe->scfg)) {
|
|
+ dev_err(&pdev->dev, "No syscfg phandle specified\n");
|
|
+ return PTR_ERR(pfe->scfg);
|
|
+ }
|
|
+
|
|
+ pfe->cbus_baseaddr = of_iomap(np, 0);
|
|
+ if (!pfe->cbus_baseaddr) {
|
|
+ rc = -ENOMEM;
|
|
+ pr_err("failed to get axi resource\n");
|
|
+ goto err_axi;
|
|
+ }
|
|
+
|
|
+ pfe->hif_irq = platform_get_irq(pdev, 0);
|
|
+ if (pfe->hif_irq < 0) {
|
|
+ pr_err("platform_get_irq for hif failed\n");
|
|
+ rc = pfe->hif_irq;
|
|
+ goto err_hif_irq;
|
|
+ }
|
|
+
|
|
+ pfe->wol_irq = platform_get_irq(pdev, 2);
|
|
+ if (pfe->wol_irq < 0) {
|
|
+ pr_err("platform_get_irq for WoL failed\n");
|
|
+ rc = pfe->wol_irq;
|
|
+ goto err_hif_irq;
|
|
+ }
|
|
+
|
|
+ /* Read interface count */
|
|
+ prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
|
|
+ if (!prop) {
|
|
+ pr_err("Failed to read number of interfaces\n");
|
|
+ rc = -ENXIO;
|
|
+ goto err_prop;
|
|
+ }
|
|
+
|
|
+ interface_count = be32_to_cpup(prop);
|
|
+ if (interface_count <= 0) {
|
|
+ pr_err("No ethernet interface count : %d\n",
|
|
+ interface_count);
|
|
+ rc = -ENXIO;
|
|
+ goto err_prop;
|
|
+ }
|
|
+
|
|
+ pfe_platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
|
|
+
|
|
+ for (ii = 0; ii < interface_count; ii++) {
|
|
+ gem = of_get_next_child(np, gem);
|
|
+ if (gem)
|
|
+ pfe_get_gemac_if_properties(gem, ii,
|
|
+ &pfe_platform_data);
|
|
+ else
|
|
+ pr_err("Unable to find interface %d\n", ii);
|
|
+
|
|
+ }
|
|
+
|
|
+ pfe->dev = &pdev->dev;
|
|
+
|
|
+ pfe->dev->platform_data = &pfe_platform_data;
|
|
+
|
|
+ /* declare WoL capabilities */
|
|
+ device_init_wakeup(&pdev->dev, true);
|
|
+
|
|
+ /* find the clocks */
|
|
+ pfe_clk = devm_clk_get(pfe->dev, "pfe");
|
|
+ if (IS_ERR(pfe_clk))
|
|
+ return PTR_ERR(pfe_clk);
|
|
+
|
|
+ /* PFE clock is (platform clock / 2) */
|
|
+ /* save sys_clk value as KHz */
|
|
+ pfe->ctrl.sys_clk = clk_get_rate(pfe_clk) / (2 * 1000);
|
|
+
|
|
+ rc = pfe_probe(pfe);
|
|
+ if (rc < 0)
|
|
+ goto err_probe;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_probe:
|
|
+err_prop:
|
|
+err_hif_irq:
|
|
+ iounmap(pfe->cbus_baseaddr);
|
|
+
|
|
+err_axi:
|
|
+err_ddr:
|
|
+ platform_set_drvdata(pdev, NULL);
|
|
+
|
|
+ kfree(pfe);
|
|
+
|
|
+err_alloc:
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_platform_remove -
|
|
+ */
|
|
+static int pfe_platform_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct pfe *pfe = platform_get_drvdata(pdev);
|
|
+ int rc;
|
|
+
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ rc = pfe_remove(pfe);
|
|
+
|
|
+ iounmap(pfe->cbus_baseaddr);
|
|
+
|
|
+ platform_set_drvdata(pdev, NULL);
|
|
+
|
|
+ kfree(pfe);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PM
|
|
+#ifdef CONFIG_PM_SLEEP
|
|
+int pfe_platform_suspend(struct device *dev)
|
|
+{
|
|
+ struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
|
|
+ struct net_device *netdev;
|
|
+ int i;
|
|
+
|
|
+ pfe->wake = 0;
|
|
+
|
|
+ for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
|
|
+ netdev = pfe->eth.eth_priv[i]->ndev;
|
|
+
|
|
+ netif_device_detach(netdev);
|
|
+
|
|
+ if (netif_running(netdev))
|
|
+ if (pfe_eth_suspend(netdev))
|
|
+ pfe->wake = 1;
|
|
+ }
|
|
+
|
|
+ /* Shutdown PFE only if we're not waking up the system */
|
|
+ if (!pfe->wake) {
|
|
+#if defined(LS1012A_PFE_RESET_WA)
|
|
+ pfe_hif_rx_idle(&pfe->hif);
|
|
+#endif
|
|
+ pfe_ctrl_suspend(&pfe->ctrl);
|
|
+ pfe_firmware_exit(pfe);
|
|
+
|
|
+ pfe_hif_exit(pfe);
|
|
+ pfe_hif_lib_exit(pfe);
|
|
+
|
|
+ pfe_hw_exit(pfe);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int pfe_platform_resume(struct device *dev)
|
|
+{
|
|
+ struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
|
|
+ struct net_device *netdev;
|
|
+ int i;
|
|
+
|
|
+ if (!pfe->wake) {
|
|
+ pfe_hw_init(pfe, 1);
|
|
+ pfe_hif_lib_init(pfe);
|
|
+ pfe_hif_init(pfe);
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ util_enable();
|
|
+#endif
|
|
+ tmu_enable(0xf);
|
|
+ class_enable();
|
|
+ pfe_ctrl_resume(&pfe->ctrl);
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
|
|
+ netdev = pfe->eth.eth_priv[i]->ndev;
|
|
+
|
|
+ if (pfe->mdio.mdio_priv[i]->mii_bus)
|
|
+ pfe_eth_mdio_reset(pfe->mdio.mdio_priv[i]->mii_bus);
|
|
+
|
|
+ if (netif_running(netdev))
|
|
+ pfe_eth_resume(netdev);
|
|
+
|
|
+ netif_device_attach(netdev);
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+#else
|
|
+#define pfe_platform_suspend NULL
|
|
+#define pfe_platform_resume NULL
|
|
+#endif
|
|
+
|
|
+static const struct dev_pm_ops pfe_platform_pm_ops = {
|
|
+ SET_SYSTEM_SLEEP_PM_OPS(pfe_platform_suspend, pfe_platform_resume)
|
|
+};
|
|
+#endif
|
|
+
|
|
+static const struct of_device_id pfe_match[] = {
|
|
+ {
|
|
+ .compatible = "fsl,pfe",
|
|
+ },
|
|
+ {},
|
|
+};
|
|
+MODULE_DEVICE_TABLE(of, pfe_match);
|
|
+
|
|
+static struct platform_driver pfe_platform_driver = {
|
|
+ .probe = pfe_platform_probe,
|
|
+ .remove = pfe_platform_remove,
|
|
+ .driver = {
|
|
+ .name = "pfe",
|
|
+ .of_match_table = pfe_match,
|
|
+#ifdef CONFIG_PM
|
|
+ .pm = &pfe_platform_pm_ops,
|
|
+#endif
|
|
+ },
|
|
+};
|
|
+
|
|
+module_platform_driver(pfe_platform_driver);
|
|
+MODULE_LICENSE("GPL");
|
|
+MODULE_DESCRIPTION("PFE Ethernet driver");
|
|
+MODULE_AUTHOR("NXP DNCPE");
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_mod.c
|
|
@@ -0,0 +1,158 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#include <linux/dma-mapping.h>
|
|
+#include "pfe_mod.h"
|
|
+#include "pfe_cdev.h"
|
|
+
|
|
+unsigned int us;
|
|
+module_param(us, uint, 0444);
|
|
+MODULE_PARM_DESC(us, "0: module enabled for kernel networking (DEFAULT)\n"
|
|
+ "1: module enabled for userspace networking\n");
|
|
+struct pfe *pfe;
|
|
+
|
|
+/*
|
|
+ * pfe_probe -
|
|
+ */
|
|
+int pfe_probe(struct pfe *pfe)
|
|
+{
|
|
+ int rc;
|
|
+
|
|
+ if (pfe->ddr_size < DDR_MAX_SIZE) {
|
|
+ pr_err("%s: required DDR memory (%x) above platform ddr memory (%x)\n",
|
|
+ __func__, (unsigned int)DDR_MAX_SIZE, pfe->ddr_size);
|
|
+ rc = -ENOMEM;
|
|
+ goto err_hw;
|
|
+ }
|
|
+
|
|
+ if (((int)(pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR) &
|
|
+ (8 * SZ_1M - 1)) != 0) {
|
|
+ pr_err("%s: BMU2 base address (0x%x) must be aligned on 8MB boundary\n",
|
|
+ __func__, (int)pfe->ddr_phys_baseaddr +
|
|
+ BMU2_DDR_BASEADDR);
|
|
+ rc = -ENOMEM;
|
|
+ goto err_hw;
|
|
+ }
|
|
+
|
|
+ pr_info("cbus_baseaddr: %lx, ddr_baseaddr: %lx, ddr_phys_baseaddr: %lx, ddr_size: %x\n",
|
|
+ (unsigned long)pfe->cbus_baseaddr,
|
|
+ (unsigned long)pfe->ddr_baseaddr,
|
|
+ pfe->ddr_phys_baseaddr, pfe->ddr_size);
|
|
+
|
|
+ pfe_lib_init(pfe->cbus_baseaddr, pfe->ddr_baseaddr,
|
|
+ pfe->ddr_phys_baseaddr, pfe->ddr_size);
|
|
+
|
|
+ rc = pfe_hw_init(pfe, 0);
|
|
+ if (rc < 0)
|
|
+ goto err_hw;
|
|
+
|
|
+ if (us)
|
|
+ goto firmware_init;
|
|
+
|
|
+ rc = pfe_hif_lib_init(pfe);
|
|
+ if (rc < 0)
|
|
+ goto err_hif_lib;
|
|
+
|
|
+ rc = pfe_hif_init(pfe);
|
|
+ if (rc < 0)
|
|
+ goto err_hif;
|
|
+
|
|
+firmware_init:
|
|
+ rc = pfe_firmware_init(pfe);
|
|
+ if (rc < 0)
|
|
+ goto err_firmware;
|
|
+
|
|
+ rc = pfe_ctrl_init(pfe);
|
|
+ if (rc < 0)
|
|
+ goto err_ctrl;
|
|
+
|
|
+ rc = pfe_eth_init(pfe);
|
|
+ if (rc < 0)
|
|
+ goto err_eth;
|
|
+
|
|
+ rc = pfe_sysfs_init(pfe);
|
|
+ if (rc < 0)
|
|
+ goto err_sysfs;
|
|
+
|
|
+ rc = pfe_debugfs_init(pfe);
|
|
+ if (rc < 0)
|
|
+ goto err_debugfs;
|
|
+
|
|
+ if (us) {
|
|
+ /* Creating a character device */
|
|
+ rc = pfe_cdev_init();
|
|
+ if (rc < 0)
|
|
+ goto err_cdev;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_cdev:
|
|
+ pfe_debugfs_exit(pfe);
|
|
+
|
|
+err_debugfs:
|
|
+ pfe_sysfs_exit(pfe);
|
|
+
|
|
+err_sysfs:
|
|
+ pfe_eth_exit(pfe);
|
|
+
|
|
+err_eth:
|
|
+ pfe_ctrl_exit(pfe);
|
|
+
|
|
+err_ctrl:
|
|
+ pfe_firmware_exit(pfe);
|
|
+
|
|
+err_firmware:
|
|
+ if (us)
|
|
+ goto err_hif_lib;
|
|
+
|
|
+ pfe_hif_exit(pfe);
|
|
+
|
|
+err_hif:
|
|
+ pfe_hif_lib_exit(pfe);
|
|
+
|
|
+err_hif_lib:
|
|
+ pfe_hw_exit(pfe);
|
|
+
|
|
+err_hw:
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pfe_remove -
|
|
+ */
|
|
+int pfe_remove(struct pfe *pfe)
|
|
+{
|
|
+ pr_info("%s\n", __func__);
|
|
+
|
|
+ if (us)
|
|
+ pfe_cdev_exit();
|
|
+
|
|
+ pfe_debugfs_exit(pfe);
|
|
+
|
|
+ pfe_sysfs_exit(pfe);
|
|
+
|
|
+ pfe_eth_exit(pfe);
|
|
+
|
|
+ pfe_ctrl_exit(pfe);
|
|
+
|
|
+#if defined(LS1012A_PFE_RESET_WA)
|
|
+ pfe_hif_rx_idle(&pfe->hif);
|
|
+#endif
|
|
+ pfe_firmware_exit(pfe);
|
|
+
|
|
+ if (us)
|
|
+ goto hw_exit;
|
|
+
|
|
+ pfe_hif_exit(pfe);
|
|
+
|
|
+ pfe_hif_lib_exit(pfe);
|
|
+
|
|
+hw_exit:
|
|
+ pfe_hw_exit(pfe);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_mod.h
|
|
@@ -0,0 +1,103 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#ifndef _PFE_MOD_H_
|
|
+#define _PFE_MOD_H_
|
|
+
|
|
+#include <linux/device.h>
|
|
+#include <linux/elf.h>
|
|
+
|
|
+extern unsigned int us;
|
|
+
|
|
+struct pfe;
|
|
+
|
|
+#include "pfe_hw.h"
|
|
+#include "pfe_firmware.h"
|
|
+#include "pfe_ctrl.h"
|
|
+#include "pfe_hif.h"
|
|
+#include "pfe_hif_lib.h"
|
|
+#include "pfe_eth.h"
|
|
+#include "pfe_sysfs.h"
|
|
+#include "pfe_perfmon.h"
|
|
+#include "pfe_debugfs.h"
|
|
+
|
|
+#define PHYID_MAX_VAL 32
|
|
+
|
|
+struct pfe_tmu_credit {
|
|
+ /* Number of allowed TX packet in-flight, matches TMU queue size */
|
|
+ unsigned int tx_credit[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
|
|
+ unsigned int tx_credit_max[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
|
|
+ unsigned int tx_packets[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
|
|
+};
|
|
+
|
|
+struct pfe {
|
|
+ struct regmap *scfg;
|
|
+ unsigned long ddr_phys_baseaddr;
|
|
+ void *ddr_baseaddr;
|
|
+ unsigned int ddr_size;
|
|
+ void *cbus_baseaddr;
|
|
+ void *apb_baseaddr;
|
|
+ unsigned long iram_phys_baseaddr;
|
|
+ void *iram_baseaddr;
|
|
+ unsigned long ipsec_phys_baseaddr;
|
|
+ void *ipsec_baseaddr;
|
|
+ int hif_irq;
|
|
+ int wol_irq;
|
|
+ int hif_client_irq;
|
|
+ struct device *dev;
|
|
+ struct dentry *dentry;
|
|
+ struct pfe_ctrl ctrl;
|
|
+ struct pfe_hif hif;
|
|
+ struct pfe_eth eth;
|
|
+ struct pfe_mdio mdio;
|
|
+ struct hif_client_s *hif_client[HIF_CLIENTS_MAX];
|
|
+#if defined(CFG_DIAGS)
|
|
+ struct pfe_diags diags;
|
|
+#endif
|
|
+ struct pfe_tmu_credit tmu_credit;
|
|
+ struct pfe_cpumon cpumon;
|
|
+ struct pfe_memmon memmon;
|
|
+ int wake;
|
|
+ int mdio_muxval[PHYID_MAX_VAL];
|
|
+ struct clk *hfe_clock;
|
|
+};
|
|
+
|
|
+extern struct pfe *pfe;
|
|
+
|
|
+int pfe_probe(struct pfe *pfe);
|
|
+int pfe_remove(struct pfe *pfe);
|
|
+
|
|
+/* DDR Mapping in reserved memory*/
|
|
+#define ROUTE_TABLE_BASEADDR 0
|
|
+#define ROUTE_TABLE_HASH_BITS 15 /* 32K entries */
|
|
+#define ROUTE_TABLE_SIZE ((1 << ROUTE_TABLE_HASH_BITS) \
|
|
+ * CLASS_ROUTE_SIZE)
|
|
+#define BMU2_DDR_BASEADDR (ROUTE_TABLE_BASEADDR + ROUTE_TABLE_SIZE)
|
|
+#define BMU2_BUF_COUNT (4096 - 256)
|
|
+/* This is to get a total DDR size of 12MiB */
|
|
+#define BMU2_DDR_SIZE (DDR_BUF_SIZE * BMU2_BUF_COUNT)
|
|
+#define UTIL_CODE_BASEADDR (BMU2_DDR_BASEADDR + BMU2_DDR_SIZE)
|
|
+#define UTIL_CODE_SIZE (128 * SZ_1K)
|
|
+#define UTIL_DDR_DATA_BASEADDR (UTIL_CODE_BASEADDR + UTIL_CODE_SIZE)
|
|
+#define UTIL_DDR_DATA_SIZE (64 * SZ_1K)
|
|
+#define CLASS_DDR_DATA_BASEADDR (UTIL_DDR_DATA_BASEADDR + UTIL_DDR_DATA_SIZE)
|
|
+#define CLASS_DDR_DATA_SIZE (32 * SZ_1K)
|
|
+#define TMU_DDR_DATA_BASEADDR (CLASS_DDR_DATA_BASEADDR + CLASS_DDR_DATA_SIZE)
|
|
+#define TMU_DDR_DATA_SIZE (32 * SZ_1K)
|
|
+#define TMU_LLM_BASEADDR (TMU_DDR_DATA_BASEADDR + TMU_DDR_DATA_SIZE)
|
|
+#define TMU_LLM_QUEUE_LEN (8 * 512)
|
|
+/* Must be power of two and at least 16 * 8 = 128 bytes */
|
|
+#define TMU_LLM_SIZE (4 * 16 * TMU_LLM_QUEUE_LEN)
|
|
+/* (4 TMU's x 16 queues x queue_len) */
|
|
+
|
|
+#define DDR_MAX_SIZE (TMU_LLM_BASEADDR + TMU_LLM_SIZE)
|
|
+
|
|
+/* LMEM Mapping */
|
|
+#define BMU1_LMEM_BASEADDR 0
|
|
+#define BMU1_BUF_COUNT 256
|
|
+#define BMU1_LMEM_SIZE (LMEM_BUF_SIZE * BMU1_BUF_COUNT)
|
|
+
|
|
+#endif /* _PFE_MOD_H */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_perfmon.h
|
|
@@ -0,0 +1,26 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#ifndef _PFE_PERFMON_H_
|
|
+#define _PFE_PERFMON_H_
|
|
+
|
|
+#include "pfe/pfe.h"
|
|
+
|
|
+#define CT_CPUMON_INTERVAL (1 * TIMER_TICKS_PER_SEC)
|
|
+
|
|
+struct pfe_cpumon {
|
|
+ u32 cpu_usage_pct[MAX_PE];
|
|
+ u32 class_usage_pct;
|
|
+};
|
|
+
|
|
+struct pfe_memmon {
|
|
+ u32 kernel_memory_allocated;
|
|
+};
|
|
+
|
|
+int pfe_perfmon_init(struct pfe *pfe);
|
|
+void pfe_perfmon_exit(struct pfe *pfe);
|
|
+
|
|
+#endif /* _PFE_PERFMON_H_ */
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_sysfs.c
|
|
@@ -0,0 +1,806 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/platform_device.h>
|
|
+
|
|
+#include "pfe_mod.h"
|
|
+
|
|
+#define PE_EXCEPTION_DUMP_ADDRESS 0x1fa8
|
|
+#define NUM_QUEUES 16
|
|
+
|
|
+static char register_name[20][5] = {
|
|
+ "EPC", "ECAS", "EID", "ED",
|
|
+ "r0", "r1", "r2", "r3",
|
|
+ "r4", "r5", "r6", "r7",
|
|
+ "r8", "r9", "r10", "r11",
|
|
+ "r12", "r13", "r14", "r15",
|
|
+};
|
|
+
|
|
+static char exception_name[14][20] = {
|
|
+ "Reset",
|
|
+ "HardwareFailure",
|
|
+ "NMI",
|
|
+ "InstBreakpoint",
|
|
+ "DataBreakpoint",
|
|
+ "Unsupported",
|
|
+ "PrivilegeViolation",
|
|
+ "InstBusError",
|
|
+ "DataBusError",
|
|
+ "AlignmentError",
|
|
+ "ArithmeticError",
|
|
+ "SystemCall",
|
|
+ "MemoryManagement",
|
|
+ "Interrupt",
|
|
+};
|
|
+
|
|
+static unsigned long class_do_clear;
|
|
+static unsigned long tmu_do_clear;
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+static unsigned long util_do_clear;
|
|
+#endif
|
|
+
|
|
+static ssize_t display_pe_status(char *buf, int id, u32 dmem_addr, unsigned long
|
|
+ do_clear)
|
|
+{
|
|
+ ssize_t len = 0;
|
|
+ u32 val;
|
|
+ char statebuf[5];
|
|
+ struct pfe_cpumon *cpumon = &pfe->cpumon;
|
|
+ u32 debug_indicator;
|
|
+ u32 debug[20];
|
|
+
|
|
+ *(u32 *)statebuf = pe_dmem_read(id, dmem_addr, 4);
|
|
+ dmem_addr += 4;
|
|
+
|
|
+ statebuf[4] = '\0';
|
|
+ len += sprintf(buf + len, "state=%4s ", statebuf);
|
|
+
|
|
+ val = pe_dmem_read(id, dmem_addr, 4);
|
|
+ dmem_addr += 4;
|
|
+ len += sprintf(buf + len, "ctr=%08x ", cpu_to_be32(val));
|
|
+
|
|
+ val = pe_dmem_read(id, dmem_addr, 4);
|
|
+ if (do_clear && val)
|
|
+ pe_dmem_write(id, 0, dmem_addr, 4);
|
|
+ dmem_addr += 4;
|
|
+ len += sprintf(buf + len, "rx=%u ", cpu_to_be32(val));
|
|
+
|
|
+ val = pe_dmem_read(id, dmem_addr, 4);
|
|
+ if (do_clear && val)
|
|
+ pe_dmem_write(id, 0, dmem_addr, 4);
|
|
+ dmem_addr += 4;
|
|
+ if (id >= TMU0_ID && id <= TMU_MAX_ID)
|
|
+ len += sprintf(buf + len, "qstatus=%x", cpu_to_be32(val));
|
|
+ else
|
|
+ len += sprintf(buf + len, "tx=%u", cpu_to_be32(val));
|
|
+
|
|
+ val = pe_dmem_read(id, dmem_addr, 4);
|
|
+ if (do_clear && val)
|
|
+ pe_dmem_write(id, 0, dmem_addr, 4);
|
|
+ dmem_addr += 4;
|
|
+ if (val)
|
|
+ len += sprintf(buf + len, " drop=%u", cpu_to_be32(val));
|
|
+
|
|
+ len += sprintf(buf + len, " load=%d%%", cpumon->cpu_usage_pct[id]);
|
|
+
|
|
+ len += sprintf(buf + len, "\n");
|
|
+
|
|
+ debug_indicator = pe_dmem_read(id, dmem_addr, 4);
|
|
+ dmem_addr += 4;
|
|
+ if (!strncmp((char *)&debug_indicator, "DBUG", 4)) {
|
|
+ int j, last = 0;
|
|
+
|
|
+ for (j = 0; j < 16; j++) {
|
|
+ debug[j] = pe_dmem_read(id, dmem_addr, 4);
|
|
+ if (debug[j]) {
|
|
+ if (do_clear)
|
|
+ pe_dmem_write(id, 0, dmem_addr, 4);
|
|
+ last = j + 1;
|
|
+ }
|
|
+ dmem_addr += 4;
|
|
+ }
|
|
+ for (j = 0; j < last; j++) {
|
|
+ len += sprintf(buf + len, "%08x%s",
|
|
+ cpu_to_be32(debug[j]),
|
|
+ (j & 0x7) == 0x7 || j == last - 1 ? "\n" : " ");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!strncmp(statebuf, "DEAD", 4)) {
|
|
+ u32 i, dump = PE_EXCEPTION_DUMP_ADDRESS;
|
|
+
|
|
+ len += sprintf(buf + len, "Exception details:\n");
|
|
+ for (i = 0; i < 20; i++) {
|
|
+ debug[i] = pe_dmem_read(id, dump, 4);
|
|
+ dump += 4;
|
|
+ if (i == 2)
|
|
+ len += sprintf(buf + len, "%4s = %08x (=%s) ",
|
|
+ register_name[i], cpu_to_be32(debug[i]),
|
|
+ exception_name[min((u32)
|
|
+ cpu_to_be32(debug[i]), (u32)13)]);
|
|
+ else
|
|
+ len += sprintf(buf + len, "%4s = %08x%s",
|
|
+ register_name[i], cpu_to_be32(debug[i]),
|
|
+ (i & 0x3) == 0x3 || i == 19 ? "\n" : " ");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static ssize_t class_phy_stats(char *buf, int phy)
|
|
+{
|
|
+ ssize_t len = 0;
|
|
+ int off1 = phy * 0x28;
|
|
+ int off2 = phy * 0x10;
|
|
+
|
|
+ if (phy == 3)
|
|
+ off1 = CLASS_PHY4_RX_PKTS - CLASS_PHY1_RX_PKTS;
|
|
+
|
|
+ len += sprintf(buf + len, "phy: %d\n", phy);
|
|
+ len += sprintf(buf + len,
|
|
+ " rx: %10u, tx: %10u, intf: %10u, ipv4: %10u, ipv6: %10u\n",
|
|
+ readl(CLASS_PHY1_RX_PKTS + off1),
|
|
+ readl(CLASS_PHY1_TX_PKTS + off1),
|
|
+ readl(CLASS_PHY1_INTF_MATCH_PKTS + off1),
|
|
+ readl(CLASS_PHY1_V4_PKTS + off1),
|
|
+ readl(CLASS_PHY1_V6_PKTS + off1));
|
|
+
|
|
+ len += sprintf(buf + len,
|
|
+ " icmp: %10u, igmp: %10u, tcp: %10u, udp: %10u\n",
|
|
+ readl(CLASS_PHY1_ICMP_PKTS + off2),
|
|
+ readl(CLASS_PHY1_IGMP_PKTS + off2),
|
|
+ readl(CLASS_PHY1_TCP_PKTS + off2),
|
|
+ readl(CLASS_PHY1_UDP_PKTS + off2));
|
|
+
|
|
+ len += sprintf(buf + len, " err\n");
|
|
+ len += sprintf(buf + len,
|
|
+ " lp: %10u, intf: %10u, l3: %10u, chcksum: %10u, ttl: %10u\n",
|
|
+ readl(CLASS_PHY1_LP_FAIL_PKTS + off1),
|
|
+ readl(CLASS_PHY1_INTF_FAIL_PKTS + off1),
|
|
+ readl(CLASS_PHY1_L3_FAIL_PKTS + off1),
|
|
+ readl(CLASS_PHY1_CHKSUM_ERR_PKTS + off1),
|
|
+ readl(CLASS_PHY1_TTL_ERR_PKTS + off1));
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+/* qm_read_drop_stat
|
|
+ * This function is used to read the drop statistics from the TMU
|
|
+ * hw drop counter. Since the hw counter is always cleared afer
|
|
+ * reading, this function maintains the previous drop count, and
|
|
+ * adds the new value to it. That value can be retrieved by
|
|
+ * passing a pointer to it with the total_drops arg.
|
|
+ *
|
|
+ * @param tmu TMU number (0 - 3)
|
|
+ * @param queue queue number (0 - 15)
|
|
+ * @param total_drops pointer to location to store total drops (or NULL)
|
|
+ * @param do_reset if TRUE, clear total drops after updating
|
|
+ */
|
|
+u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset)
|
|
+{
|
|
+ static u32 qtotal[TMU_MAX_ID + 1][NUM_QUEUES];
|
|
+ u32 val;
|
|
+
|
|
+ writel((tmu << 8) | queue, TMU_TEQ_CTRL);
|
|
+ writel((tmu << 8) | queue, TMU_LLM_CTRL);
|
|
+ val = readl(TMU_TEQ_DROP_STAT);
|
|
+ qtotal[tmu][queue] += val;
|
|
+ if (total_drops)
|
|
+ *total_drops = qtotal[tmu][queue];
|
|
+ if (do_reset)
|
|
+ qtotal[tmu][queue] = 0;
|
|
+ return val;
|
|
+}
|
|
+
|
|
+static ssize_t tmu_queue_stats(char *buf, int tmu, int queue)
|
|
+{
|
|
+ ssize_t len = 0;
|
|
+ u32 drops;
|
|
+
|
|
+ len += sprintf(buf + len, "%d-%02d, ", tmu, queue);
|
|
+
|
|
+ drops = qm_read_drop_stat(tmu, queue, NULL, 0);
|
|
+
|
|
+ /* Select queue */
|
|
+ writel((tmu << 8) | queue, TMU_TEQ_CTRL);
|
|
+ writel((tmu << 8) | queue, TMU_LLM_CTRL);
|
|
+
|
|
+ len += sprintf(buf + len,
|
|
+ "(teq) drop: %10u, tx: %10u (llm) head: %08x, tail: %08x, drop: %10u\n",
|
|
+ drops, readl(TMU_TEQ_TRANS_STAT),
|
|
+ readl(TMU_LLM_QUE_HEADPTR), readl(TMU_LLM_QUE_TAILPTR),
|
|
+ readl(TMU_LLM_QUE_DROPCNT));
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static ssize_t tmu_queues(char *buf, int tmu)
|
|
+{
|
|
+ ssize_t len = 0;
|
|
+ int queue;
|
|
+
|
|
+ for (queue = 0; queue < 16; queue++)
|
|
+ len += tmu_queue_stats(buf + len, tmu, queue);
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static ssize_t block_version(char *buf, void *addr)
|
|
+{
|
|
+ ssize_t len = 0;
|
|
+ u32 val;
|
|
+
|
|
+ val = readl(addr);
|
|
+ len += sprintf(buf + len, "revision: %x, version: %x, id: %x\n",
|
|
+ (val >> 24) & 0xff, (val >> 16) & 0xff, val & 0xffff);
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static ssize_t bmu(char *buf, int id, void *base)
|
|
+{
|
|
+ ssize_t len = 0;
|
|
+
|
|
+ len += sprintf(buf + len, "%s: %d\n ", __func__, id);
|
|
+
|
|
+ len += block_version(buf + len, base + BMU_VERSION);
|
|
+
|
|
+ len += sprintf(buf + len, " buf size: %x\n", (1 << readl(base +
|
|
+ BMU_BUF_SIZE)));
|
|
+ len += sprintf(buf + len, " buf count: %x\n", readl(base +
|
|
+ BMU_BUF_CNT));
|
|
+ len += sprintf(buf + len, " buf rem: %x\n", readl(base +
|
|
+ BMU_REM_BUF_CNT));
|
|
+ len += sprintf(buf + len, " buf curr: %x\n", readl(base +
|
|
+ BMU_CURR_BUF_CNT));
|
|
+ len += sprintf(buf + len, " free err: %x\n", readl(base +
|
|
+ BMU_FREE_ERR_ADDR));
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static ssize_t gpi(char *buf, int id, void *base)
|
|
+{
|
|
+ ssize_t len = 0;
|
|
+ u32 val;
|
|
+
|
|
+ len += sprintf(buf + len, "%s%d:\n ", __func__, id);
|
|
+ len += block_version(buf + len, base + GPI_VERSION);
|
|
+
|
|
+ len += sprintf(buf + len, " tx under stick: %x\n", readl(base +
|
|
+ GPI_FIFO_STATUS));
|
|
+ val = readl(base + GPI_FIFO_DEBUG);
|
|
+ len += sprintf(buf + len, " tx pkts: %x\n", (val >> 23) &
|
|
+ 0x3f);
|
|
+ len += sprintf(buf + len, " rx pkts: %x\n", (val >> 18) &
|
|
+ 0x3f);
|
|
+ len += sprintf(buf + len, " tx bytes: %x\n", (val >> 9) &
|
|
+ 0x1ff);
|
|
+ len += sprintf(buf + len, " rx bytes: %x\n", (val >> 0) &
|
|
+ 0x1ff);
|
|
+ len += sprintf(buf + len, " overrun: %x\n", readl(base +
|
|
+ GPI_OVERRUN_DROPCNT));
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static ssize_t pfe_set_class(struct device *dev, struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ class_do_clear = kstrtoul(buf, 0, 0);
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static ssize_t pfe_show_class(struct device *dev, struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ ssize_t len = 0;
|
|
+ int id;
|
|
+ u32 val;
|
|
+ struct pfe_cpumon *cpumon = &pfe->cpumon;
|
|
+
|
|
+ len += block_version(buf + len, CLASS_VERSION);
|
|
+
|
|
+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
|
|
+ len += sprintf(buf + len, "%d: ", id - CLASS0_ID);
|
|
+
|
|
+ val = readl(CLASS_PE0_DEBUG + id * 4);
|
|
+ len += sprintf(buf + len, "pc=1%04x ", val & 0xffff);
|
|
+
|
|
+ len += display_pe_status(buf + len, id, CLASS_DM_PESTATUS,
|
|
+ class_do_clear);
|
|
+ }
|
|
+ len += sprintf(buf + len, "aggregate load=%d%%\n\n",
|
|
+ cpumon->class_usage_pct);
|
|
+
|
|
+ len += sprintf(buf + len, "pe status: 0x%x\n",
|
|
+ readl(CLASS_PE_STATUS));
|
|
+ len += sprintf(buf + len, "max buf cnt: 0x%x afull thres: 0x%x\n",
|
|
+ readl(CLASS_MAX_BUF_CNT), readl(CLASS_AFULL_THRES));
|
|
+ len += sprintf(buf + len, "tsq max cnt: 0x%x tsq fifo thres: 0x%x\n",
|
|
+ readl(CLASS_TSQ_MAX_CNT), readl(CLASS_TSQ_FIFO_THRES));
|
|
+ len += sprintf(buf + len, "state: 0x%x\n", readl(CLASS_STATE));
|
|
+
|
|
+ len += class_phy_stats(buf + len, 0);
|
|
+ len += class_phy_stats(buf + len, 1);
|
|
+ len += class_phy_stats(buf + len, 2);
|
|
+ len += class_phy_stats(buf + len, 3);
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static ssize_t pfe_set_tmu(struct device *dev, struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ tmu_do_clear = kstrtoul(buf, 0, 0);
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static ssize_t pfe_show_tmu(struct device *dev, struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ ssize_t len = 0;
|
|
+ int id;
|
|
+ u32 val;
|
|
+
|
|
+ len += block_version(buf + len, TMU_VERSION);
|
|
+
|
|
+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
|
|
+ if (id == TMU2_ID)
|
|
+ continue;
|
|
+ len += sprintf(buf + len, "%d: ", id - TMU0_ID);
|
|
+
|
|
+ len += display_pe_status(buf + len, id, TMU_DM_PESTATUS,
|
|
+ tmu_do_clear);
|
|
+ }
|
|
+
|
|
+ len += sprintf(buf + len, "pe status: %x\n", readl(TMU_PE_STATUS));
|
|
+ len += sprintf(buf + len, "inq fifo cnt: %x\n",
|
|
+ readl(TMU_PHY_INQ_FIFO_CNT));
|
|
+ val = readl(TMU_INQ_STAT);
|
|
+ len += sprintf(buf + len, "inq wr ptr: %x\n", val & 0x3ff);
|
|
+ len += sprintf(buf + len, "inq rd ptr: %x\n", val >> 10);
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static unsigned long drops_do_clear;
|
|
+static u32 class_drop_counter[CLASS_NUM_DROP_COUNTERS];
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+static u32 util_drop_counter[UTIL_NUM_DROP_COUNTERS];
|
|
+#endif
|
|
+
|
|
+char *class_drop_description[CLASS_NUM_DROP_COUNTERS] = {
|
|
+ "ICC",
|
|
+ "Host Pkt Error",
|
|
+ "Rx Error",
|
|
+ "IPsec Outbound",
|
|
+ "IPsec Inbound",
|
|
+ "EXPT IPsec Error",
|
|
+ "Reassembly",
|
|
+ "Fragmenter",
|
|
+ "NAT-T",
|
|
+ "Socket",
|
|
+ "Multicast",
|
|
+ "NAT-PT",
|
|
+ "Tx Disabled",
|
|
+};
|
|
+
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+char *util_drop_description[UTIL_NUM_DROP_COUNTERS] = {
|
|
+ "IPsec Outbound",
|
|
+ "IPsec Inbound",
|
|
+ "IPsec Rate Limiter",
|
|
+ "Fragmenter",
|
|
+ "Socket",
|
|
+ "Tx Disabled",
|
|
+ "Rx Error",
|
|
+};
|
|
+#endif
|
|
+
|
|
+static ssize_t pfe_set_drops(struct device *dev, struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ drops_do_clear = kstrtoul(buf, 0, 0);
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static u32 tmu_drops[4][16];
|
|
+static ssize_t pfe_show_drops(struct device *dev, struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ ssize_t len = 0;
|
|
+ int id, dropnum;
|
|
+ int tmu, queue;
|
|
+ u32 val;
|
|
+ u32 dmem_addr;
|
|
+ int num_class_drops = 0, num_tmu_drops = 0, num_util_drops = 0;
|
|
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
|
|
+
|
|
+ memset(class_drop_counter, 0, sizeof(class_drop_counter));
|
|
+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
|
|
+ if (drops_do_clear)
|
|
+ pe_sync_stop(ctrl, (1 << id));
|
|
+ for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
|
|
+ dropnum++) {
|
|
+ dmem_addr = CLASS_DM_DROP_CNTR;
|
|
+ val = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
|
|
+ class_drop_counter[dropnum] += val;
|
|
+ num_class_drops += val;
|
|
+ if (drops_do_clear)
|
|
+ pe_dmem_write(id, 0, dmem_addr, 4);
|
|
+ }
|
|
+ if (drops_do_clear)
|
|
+ pe_start(ctrl, (1 << id));
|
|
+ }
|
|
+
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ if (drops_do_clear)
|
|
+ pe_sync_stop(ctrl, (1 << UTIL_ID));
|
|
+ for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
|
|
+ dmem_addr = UTIL_DM_DROP_CNTR;
|
|
+ val = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4));
|
|
+ util_drop_counter[dropnum] = val;
|
|
+ num_util_drops += val;
|
|
+ if (drops_do_clear)
|
|
+ pe_dmem_write(UTIL_ID, 0, dmem_addr, 4);
|
|
+ }
|
|
+ if (drops_do_clear)
|
|
+ pe_start(ctrl, (1 << UTIL_ID));
|
|
+#endif
|
|
+ for (tmu = 0; tmu < 4; tmu++) {
|
|
+ for (queue = 0; queue < 16; queue++) {
|
|
+ qm_read_drop_stat(tmu, queue, &tmu_drops[tmu][queue],
|
|
+ drops_do_clear);
|
|
+ num_tmu_drops += tmu_drops[tmu][queue];
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (num_class_drops == 0 && num_util_drops == 0 && num_tmu_drops == 0)
|
|
+ len += sprintf(buf + len, "No PE drops\n\n");
|
|
+
|
|
+ if (num_class_drops > 0) {
|
|
+ len += sprintf(buf + len, "Class PE drops --\n");
|
|
+ for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
|
|
+ dropnum++) {
|
|
+ if (class_drop_counter[dropnum] > 0)
|
|
+ len += sprintf(buf + len, " %s: %d\n",
|
|
+ class_drop_description[dropnum],
|
|
+ class_drop_counter[dropnum]);
|
|
+ }
|
|
+ len += sprintf(buf + len, "\n");
|
|
+ }
|
|
+
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ if (num_util_drops > 0) {
|
|
+ len += sprintf(buf + len, "Util PE drops --\n");
|
|
+ for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
|
|
+ if (util_drop_counter[dropnum] > 0)
|
|
+ len += sprintf(buf + len, " %s: %d\n",
|
|
+ util_drop_description[dropnum],
|
|
+ util_drop_counter[dropnum]);
|
|
+ }
|
|
+ len += sprintf(buf + len, "\n");
|
|
+ }
|
|
+#endif
|
|
+ if (num_tmu_drops > 0) {
|
|
+ len += sprintf(buf + len, "TMU drops --\n");
|
|
+ for (tmu = 0; tmu < 4; tmu++) {
|
|
+ for (queue = 0; queue < 16; queue++) {
|
|
+ if (tmu_drops[tmu][queue] > 0)
|
|
+ len += sprintf(buf + len,
|
|
+ " TMU%d-Q%d: %d\n"
|
|
+ , tmu, queue, tmu_drops[tmu][queue]);
|
|
+ }
|
|
+ }
|
|
+ len += sprintf(buf + len, "\n");
|
|
+ }
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static ssize_t pfe_show_tmu0_queues(struct device *dev, struct device_attribute
|
|
+ *attr, char *buf)
|
|
+{
|
|
+ return tmu_queues(buf, 0);
|
|
+}
|
|
+
|
|
+static ssize_t pfe_show_tmu1_queues(struct device *dev, struct device_attribute
|
|
+ *attr, char *buf)
|
|
+{
|
|
+ return tmu_queues(buf, 1);
|
|
+}
|
|
+
|
|
+static ssize_t pfe_show_tmu2_queues(struct device *dev, struct device_attribute
|
|
+ *attr, char *buf)
|
|
+{
|
|
+ return tmu_queues(buf, 2);
|
|
+}
|
|
+
|
|
+static ssize_t pfe_show_tmu3_queues(struct device *dev, struct device_attribute
|
|
+ *attr, char *buf)
|
|
+{
|
|
+ return tmu_queues(buf, 3);
|
|
+}
|
|
+
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+static ssize_t pfe_set_util(struct device *dev, struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ util_do_clear = kstrtoul(buf, NULL, 0);
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static ssize_t pfe_show_util(struct device *dev, struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ ssize_t len = 0;
|
|
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
|
|
+
|
|
+ len += block_version(buf + len, UTIL_VERSION);
|
|
+
|
|
+ pe_sync_stop(ctrl, (1 << UTIL_ID));
|
|
+ len += display_pe_status(buf + len, UTIL_ID, UTIL_DM_PESTATUS,
|
|
+ util_do_clear);
|
|
+ pe_start(ctrl, (1 << UTIL_ID));
|
|
+
|
|
+ len += sprintf(buf + len, "pe status: %x\n", readl(UTIL_PE_STATUS));
|
|
+ len += sprintf(buf + len, "max buf cnt: %x\n",
|
|
+ readl(UTIL_MAX_BUF_CNT));
|
|
+ len += sprintf(buf + len, "tsq max cnt: %x\n",
|
|
+ readl(UTIL_TSQ_MAX_CNT));
|
|
+
|
|
+ return len;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static ssize_t pfe_show_bmu(struct device *dev, struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ ssize_t len = 0;
|
|
+
|
|
+ len += bmu(buf + len, 1, BMU1_BASE_ADDR);
|
|
+ len += bmu(buf + len, 2, BMU2_BASE_ADDR);
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static ssize_t pfe_show_hif(struct device *dev, struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ ssize_t len = 0;
|
|
+
|
|
+ len += sprintf(buf + len, "hif:\n ");
|
|
+ len += block_version(buf + len, HIF_VERSION);
|
|
+
|
|
+ len += sprintf(buf + len, " tx curr bd: %x\n",
|
|
+ readl(HIF_TX_CURR_BD_ADDR));
|
|
+ len += sprintf(buf + len, " tx status: %x\n",
|
|
+ readl(HIF_TX_STATUS));
|
|
+ len += sprintf(buf + len, " tx dma status: %x\n",
|
|
+ readl(HIF_TX_DMA_STATUS));
|
|
+
|
|
+ len += sprintf(buf + len, " rx curr bd: %x\n",
|
|
+ readl(HIF_RX_CURR_BD_ADDR));
|
|
+ len += sprintf(buf + len, " rx status: %x\n",
|
|
+ readl(HIF_RX_STATUS));
|
|
+ len += sprintf(buf + len, " rx dma status: %x\n",
|
|
+ readl(HIF_RX_DMA_STATUS));
|
|
+
|
|
+ len += sprintf(buf + len, "hif nocopy:\n ");
|
|
+ len += block_version(buf + len, HIF_NOCPY_VERSION);
|
|
+
|
|
+ len += sprintf(buf + len, " tx curr bd: %x\n",
|
|
+ readl(HIF_NOCPY_TX_CURR_BD_ADDR));
|
|
+ len += sprintf(buf + len, " tx status: %x\n",
|
|
+ readl(HIF_NOCPY_TX_STATUS));
|
|
+ len += sprintf(buf + len, " tx dma status: %x\n",
|
|
+ readl(HIF_NOCPY_TX_DMA_STATUS));
|
|
+
|
|
+ len += sprintf(buf + len, " rx curr bd: %x\n",
|
|
+ readl(HIF_NOCPY_RX_CURR_BD_ADDR));
|
|
+ len += sprintf(buf + len, " rx status: %x\n",
|
|
+ readl(HIF_NOCPY_RX_STATUS));
|
|
+ len += sprintf(buf + len, " rx dma status: %x\n",
|
|
+ readl(HIF_NOCPY_RX_DMA_STATUS));
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static ssize_t pfe_show_gpi(struct device *dev, struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ ssize_t len = 0;
|
|
+
|
|
+ len += gpi(buf + len, 0, EGPI1_BASE_ADDR);
|
|
+ len += gpi(buf + len, 1, EGPI2_BASE_ADDR);
|
|
+ len += gpi(buf + len, 3, HGPI_BASE_ADDR);
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static ssize_t pfe_show_pfemem(struct device *dev, struct device_attribute
|
|
+ *attr, char *buf)
|
|
+{
|
|
+ ssize_t len = 0;
|
|
+ struct pfe_memmon *memmon = &pfe->memmon;
|
|
+
|
|
+ len += sprintf(buf + len, "Kernel Memory: %d Bytes (%d KB)\n",
|
|
+ memmon->kernel_memory_allocated,
|
|
+ (memmon->kernel_memory_allocated + 1023) / 1024);
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+#ifdef HIF_NAPI_STATS
|
|
+static ssize_t pfe_show_hif_napi_stats(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ struct platform_device *pdev = to_platform_device(dev);
|
|
+ struct pfe *pfe = platform_get_drvdata(pdev);
|
|
+ ssize_t len = 0;
|
|
+
|
|
+ len += sprintf(buf + len, "sched: %u\n",
|
|
+ pfe->hif.napi_counters[NAPI_SCHED_COUNT]);
|
|
+ len += sprintf(buf + len, "poll: %u\n",
|
|
+ pfe->hif.napi_counters[NAPI_POLL_COUNT]);
|
|
+ len += sprintf(buf + len, "packet: %u\n",
|
|
+ pfe->hif.napi_counters[NAPI_PACKET_COUNT]);
|
|
+ len += sprintf(buf + len, "budget: %u\n",
|
|
+ pfe->hif.napi_counters[NAPI_FULL_BUDGET_COUNT]);
|
|
+ len += sprintf(buf + len, "desc: %u\n",
|
|
+ pfe->hif.napi_counters[NAPI_DESC_COUNT]);
|
|
+ len += sprintf(buf + len, "full: %u\n",
|
|
+ pfe->hif.napi_counters[NAPI_CLIENT_FULL_COUNT]);
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static ssize_t pfe_set_hif_napi_stats(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ const char *buf, size_t count)
|
|
+{
|
|
+ struct platform_device *pdev = to_platform_device(dev);
|
|
+ struct pfe *pfe = platform_get_drvdata(pdev);
|
|
+
|
|
+ memset(pfe->hif.napi_counters, 0, sizeof(pfe->hif.napi_counters));
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static DEVICE_ATTR(hif_napi_stats, 0644, pfe_show_hif_napi_stats,
|
|
+ pfe_set_hif_napi_stats);
|
|
+#endif
|
|
+
|
|
+static DEVICE_ATTR(class, 0644, pfe_show_class, pfe_set_class);
|
|
+static DEVICE_ATTR(tmu, 0644, pfe_show_tmu, pfe_set_tmu);
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+static DEVICE_ATTR(util, 0644, pfe_show_util, pfe_set_util);
|
|
+#endif
|
|
+static DEVICE_ATTR(bmu, 0444, pfe_show_bmu, NULL);
|
|
+static DEVICE_ATTR(hif, 0444, pfe_show_hif, NULL);
|
|
+static DEVICE_ATTR(gpi, 0444, pfe_show_gpi, NULL);
|
|
+static DEVICE_ATTR(drops, 0644, pfe_show_drops, pfe_set_drops);
|
|
+static DEVICE_ATTR(tmu0_queues, 0444, pfe_show_tmu0_queues, NULL);
|
|
+static DEVICE_ATTR(tmu1_queues, 0444, pfe_show_tmu1_queues, NULL);
|
|
+static DEVICE_ATTR(tmu2_queues, 0444, pfe_show_tmu2_queues, NULL);
|
|
+static DEVICE_ATTR(tmu3_queues, 0444, pfe_show_tmu3_queues, NULL);
|
|
+static DEVICE_ATTR(pfemem, 0444, pfe_show_pfemem, NULL);
|
|
+
|
|
+int pfe_sysfs_init(struct pfe *pfe)
|
|
+{
|
|
+ if (device_create_file(pfe->dev, &dev_attr_class))
|
|
+ goto err_class;
|
|
+
|
|
+ if (device_create_file(pfe->dev, &dev_attr_tmu))
|
|
+ goto err_tmu;
|
|
+
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ if (device_create_file(pfe->dev, &dev_attr_util))
|
|
+ goto err_util;
|
|
+#endif
|
|
+
|
|
+ if (device_create_file(pfe->dev, &dev_attr_bmu))
|
|
+ goto err_bmu;
|
|
+
|
|
+ if (device_create_file(pfe->dev, &dev_attr_hif))
|
|
+ goto err_hif;
|
|
+
|
|
+ if (device_create_file(pfe->dev, &dev_attr_gpi))
|
|
+ goto err_gpi;
|
|
+
|
|
+ if (device_create_file(pfe->dev, &dev_attr_drops))
|
|
+ goto err_drops;
|
|
+
|
|
+ if (device_create_file(pfe->dev, &dev_attr_tmu0_queues))
|
|
+ goto err_tmu0_queues;
|
|
+
|
|
+ if (device_create_file(pfe->dev, &dev_attr_tmu1_queues))
|
|
+ goto err_tmu1_queues;
|
|
+
|
|
+ if (device_create_file(pfe->dev, &dev_attr_tmu2_queues))
|
|
+ goto err_tmu2_queues;
|
|
+
|
|
+ if (device_create_file(pfe->dev, &dev_attr_tmu3_queues))
|
|
+ goto err_tmu3_queues;
|
|
+
|
|
+ if (device_create_file(pfe->dev, &dev_attr_pfemem))
|
|
+ goto err_pfemem;
|
|
+
|
|
+#ifdef HIF_NAPI_STATS
|
|
+ if (device_create_file(pfe->dev, &dev_attr_hif_napi_stats))
|
|
+ goto err_hif_napi_stats;
|
|
+#endif
|
|
+
|
|
+ return 0;
|
|
+
|
|
+#ifdef HIF_NAPI_STATS
|
|
+err_hif_napi_stats:
|
|
+ device_remove_file(pfe->dev, &dev_attr_pfemem);
|
|
+#endif
|
|
+
|
|
+err_pfemem:
|
|
+ device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
|
|
+
|
|
+err_tmu3_queues:
|
|
+ device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
|
|
+
|
|
+err_tmu2_queues:
|
|
+ device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
|
|
+
|
|
+err_tmu1_queues:
|
|
+ device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
|
|
+
|
|
+err_tmu0_queues:
|
|
+ device_remove_file(pfe->dev, &dev_attr_drops);
|
|
+
|
|
+err_drops:
|
|
+ device_remove_file(pfe->dev, &dev_attr_gpi);
|
|
+
|
|
+err_gpi:
|
|
+ device_remove_file(pfe->dev, &dev_attr_hif);
|
|
+
|
|
+err_hif:
|
|
+ device_remove_file(pfe->dev, &dev_attr_bmu);
|
|
+
|
|
+err_bmu:
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ device_remove_file(pfe->dev, &dev_attr_util);
|
|
+
|
|
+err_util:
|
|
+#endif
|
|
+ device_remove_file(pfe->dev, &dev_attr_tmu);
|
|
+
|
|
+err_tmu:
|
|
+ device_remove_file(pfe->dev, &dev_attr_class);
|
|
+
|
|
+err_class:
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+void pfe_sysfs_exit(struct pfe *pfe)
|
|
+{
|
|
+#ifdef HIF_NAPI_STATS
|
|
+ device_remove_file(pfe->dev, &dev_attr_hif_napi_stats);
|
|
+#endif
|
|
+ device_remove_file(pfe->dev, &dev_attr_pfemem);
|
|
+ device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
|
|
+ device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
|
|
+ device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
|
|
+ device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
|
|
+ device_remove_file(pfe->dev, &dev_attr_drops);
|
|
+ device_remove_file(pfe->dev, &dev_attr_gpi);
|
|
+ device_remove_file(pfe->dev, &dev_attr_hif);
|
|
+ device_remove_file(pfe->dev, &dev_attr_bmu);
|
|
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
|
|
+ device_remove_file(pfe->dev, &dev_attr_util);
|
|
+#endif
|
|
+ device_remove_file(pfe->dev, &dev_attr_tmu);
|
|
+ device_remove_file(pfe->dev, &dev_attr_class);
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/staging/fsl_ppfe/pfe_sysfs.h
|
|
@@ -0,0 +1,17 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
|
|
+ * Copyright 2017 NXP
|
|
+ */
|
|
+
|
|
+#ifndef _PFE_SYSFS_H_
|
|
+#define _PFE_SYSFS_H_
|
|
+
|
|
+#include <linux/proc_fs.h>
|
|
+
|
|
+u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset);
|
|
+
|
|
+int pfe_sysfs_init(struct pfe *pfe);
|
|
+void pfe_sysfs_exit(struct pfe *pfe);
|
|
+
|
|
+#endif /* _PFE_SYSFS_H_ */
|