2019-08-21 06:36:05 +00:00
|
|
|
The existing software receive and transmit path does not actually work.
|
|
|
|
This replaces it by a basic working implementation.
|
|
|
|
|
|
|
|
--- a/dcdp/atm_tc.c
|
|
|
|
+++ b/dcdp/atm_tc.c
|
|
|
|
@@ -603,7 +603,11 @@ static void atm_aca_init(struct atm_priv
|
|
|
|
cfg = &priv->tc_priv->cfg;
|
|
|
|
|
|
|
|
txin = ¶m.aca_txin;
|
|
|
|
+#if defined(__LITTLE_ENDIAN)
|
|
|
|
+ txin->byteswap = 0;
|
|
|
|
+#else
|
|
|
|
txin->byteswap = 1;
|
|
|
|
+#endif
|
|
|
|
txin->hd_size_in_dw = cfg->txin.soc_desc_dwsz;
|
|
|
|
txin->pd_desc_base = SB_XBAR_ADDR(__ACA_TX_IN_PD_LIST_BASE);
|
|
|
|
txin->pd_desc_num = __ACA_TX_IN_PD_LIST_NUM;
|
|
|
|
@@ -625,7 +629,11 @@ static void atm_aca_init(struct atm_priv
|
|
|
|
txin->soc_cmlt_cnt_addr);
|
|
|
|
|
|
|
|
txout = ¶m.aca_txout;
|
|
|
|
+#if defined(__LITTLE_ENDIAN)
|
|
|
|
+ txout->byteswap = 0;
|
|
|
|
+#else
|
|
|
|
txout->byteswap = 1;
|
|
|
|
+#endif
|
|
|
|
txout->hd_size_in_dw = cfg->txout.soc_desc_dwsz;
|
|
|
|
txout->pd_desc_base = SB_XBAR_ADDR(__ACA_TX_OUT_PD_LIST_BASE);
|
|
|
|
txout->pd_desc_num = __ACA_TX_OUT_PD_LIST_NUM;
|
|
|
|
@@ -647,7 +655,11 @@ static void atm_aca_init(struct atm_priv
|
|
|
|
txout->soc_cmlt_cnt_addr);
|
|
|
|
|
|
|
|
rxout = ¶m.aca_rxout;
|
|
|
|
+#if defined(__LITTLE_ENDIAN)
|
|
|
|
+ rxout->byteswap = 0;
|
|
|
|
+#else
|
|
|
|
rxout->byteswap = 1;
|
|
|
|
+#endif
|
|
|
|
rxout->hd_size_in_dw = cfg->rxout.soc_desc_dwsz;
|
|
|
|
rxout->pd_desc_base = SB_XBAR_ADDR(__ACA_RX_OUT_PD_LIST_BASE);
|
|
|
|
rxout->pd_desc_num = __ACA_RX_OUT_PD_LIST_NUM;
|
|
|
|
@@ -669,7 +681,11 @@ static void atm_aca_init(struct atm_priv
|
|
|
|
rxout->soc_cmlt_cnt_addr);
|
|
|
|
|
|
|
|
rxin = ¶m.aca_rxin;
|
|
|
|
+#if defined(__LITTLE_ENDIAN)
|
|
|
|
+ rxin->byteswap = 0;
|
|
|
|
+#else
|
|
|
|
rxin->byteswap = 1;
|
|
|
|
+#endif
|
|
|
|
rxin->hd_size_in_dw = cfg->rxin.soc_desc_dwsz;
|
|
|
|
rxin->pd_desc_base = SB_XBAR_ADDR(__RX_IN_PD_DES_LIST_BASE);
|
|
|
|
rxin->pd_desc_num = __ACA_RX_IN_PD_LIST_NUM;
|
|
|
|
@@ -1261,7 +1277,7 @@ static int ppe_ioctl(struct atm_dev *dev
|
|
|
|
static int ppe_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
int ret, qid, mpoa_pt, mpoa_type, vid;
|
|
|
|
- unsigned int prio, conn;
|
|
|
|
+ unsigned int prio, conn, len;
|
|
|
|
struct atm_priv *priv;
|
|
|
|
|
|
|
|
if (!vcc) {
|
|
|
|
@@ -1327,12 +1343,14 @@ static int ppe_send(struct atm_vcc *vcc,
|
|
|
|
tc_dbg(priv->tc_priv, MSG_TX, "vid: 0x%x, qid: 0x%x\n",
|
|
|
|
vid, qid);
|
|
|
|
|
|
|
|
+ len = skb->len;
|
|
|
|
+
|
|
|
|
if (priv->tc_priv->tc_ops.send(NULL,
|
|
|
|
skb, qid, ATM_SL_PKT) == 0) {
|
|
|
|
priv->stats.aal5_tx_pkts++;
|
|
|
|
- priv->stats.aal5_tx_bytes += skb->len;
|
|
|
|
+ priv->stats.aal5_tx_bytes += len;
|
|
|
|
priv->conn[conn].stats.aal5_tx_pkts++;
|
|
|
|
- priv->conn[conn].stats.aal5_tx_bytes += skb->len;
|
|
|
|
+ priv->conn[conn].stats.aal5_tx_bytes += len;
|
|
|
|
priv->conn[conn].prio_tx_packets[prio]++;
|
|
|
|
} else {
|
|
|
|
tc_dbg(priv->tc_priv, MSG_TX, "ATM: TX fail\n");
|
|
|
|
--- a/dcdp/ptm_tc.c
|
|
|
|
+++ b/dcdp/ptm_tc.c
|
2023-08-22 12:19:43 +00:00
|
|
|
@@ -506,6 +506,7 @@ static int ptm_xmit(struct sk_buff *skb,
|
2019-08-21 06:36:05 +00:00
|
|
|
struct ptm_priv *ptm_tc = netdev_priv(dev);
|
|
|
|
int qid;
|
|
|
|
enum tc_pkt_type type;
|
|
|
|
+ unsigned int len;
|
|
|
|
|
|
|
|
if (!showtime_stat(ptm_tc->tc_priv))
|
|
|
|
goto PTM_XMIT_DROP;
|
2023-08-22 12:19:43 +00:00
|
|
|
@@ -519,11 +520,13 @@ static int ptm_xmit(struct sk_buff *skb,
|
2019-08-21 06:36:05 +00:00
|
|
|
type = ptm_tc->tc_priv->tc_mode == TC_PTM_BND_MODE
|
|
|
|
? PTM_BOND_PKT : PTM_SL_PKT;
|
|
|
|
|
|
|
|
+ len = skb->len;
|
|
|
|
+
|
|
|
|
if (ptm_tc->tc_priv->tc_ops.send(dev, skb, qid, type) < 0)
|
|
|
|
ptm_tc->stats64.tx_dropped++;
|
|
|
|
else {
|
|
|
|
ptm_tc->stats64.tx_packets++;
|
|
|
|
- ptm_tc->stats64.tx_bytes += skb->len;
|
|
|
|
+ ptm_tc->stats64.tx_bytes += len;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2023-08-22 12:19:43 +00:00
|
|
|
@@ -640,7 +643,7 @@ static int ptm_dev_init(struct tc_priv *
|
2019-08-21 06:36:05 +00:00
|
|
|
const char macaddr[ETH_ALEN]
|
|
|
|
= {0xAC, 0x9A, 0x96, 0x11, 0x22, 0x33};
|
|
|
|
|
|
|
|
- dev = alloc_netdev_mq(sizeof(*ptm_tc), "dsl%d", NET_NAME_ENUM, ptm_setup, 4);
|
|
|
|
+ dev = alloc_netdev(sizeof(*ptm_tc), "dsl%d", NET_NAME_ENUM, ptm_setup);
|
|
|
|
if (!dev) {
|
|
|
|
tc_dbg(tc_priv, MSG_INIT, "Cannot alloc net device\n");
|
|
|
|
return -ENOMEM;
|
2023-08-22 12:19:43 +00:00
|
|
|
@@ -2337,7 +2340,11 @@ static void ptm_aca_init(struct ptm_ep_p
|
2019-08-21 06:36:05 +00:00
|
|
|
cfg = &priv->tc_priv->cfg;
|
|
|
|
|
|
|
|
txin = ¶m.aca_txin;
|
|
|
|
+#if defined(__LITTLE_ENDIAN)
|
|
|
|
+ txin->byteswap = 0;
|
|
|
|
+#else
|
|
|
|
txin->byteswap = 1;
|
|
|
|
+#endif
|
|
|
|
txin->hd_size_in_dw = cfg->txin.soc_desc_dwsz;
|
|
|
|
txin->pd_desc_base = SB_XBAR_ADDR(__ACA_TX_IN_PD_LIST_BASE);
|
|
|
|
txin->pd_desc_num = __ACA_TX_IN_PD_LIST_NUM;
|
2023-08-22 12:19:43 +00:00
|
|
|
@@ -2360,7 +2367,11 @@ static void ptm_aca_init(struct ptm_ep_p
|
2019-08-21 06:36:05 +00:00
|
|
|
txin->soc_cmlt_cnt_addr);
|
|
|
|
|
|
|
|
txout = ¶m.aca_txout;
|
|
|
|
+#if defined(__LITTLE_ENDIAN)
|
|
|
|
+ txout->byteswap = 0;
|
|
|
|
+#else
|
|
|
|
txout->byteswap = 1;
|
|
|
|
+#endif
|
|
|
|
txout->hd_size_in_dw = cfg->txout.soc_desc_dwsz;
|
|
|
|
if (priv->tc_priv->param.cdma_desc_loc == LOC_IN_FPI)
|
|
|
|
txout->pd_desc_base = sb_r32(__TX_OUT_SHADOW_PTR) - phybase;
|
2023-08-22 12:19:43 +00:00
|
|
|
@@ -2386,7 +2397,11 @@ static void ptm_aca_init(struct ptm_ep_p
|
2019-08-21 06:36:05 +00:00
|
|
|
txout->soc_cmlt_cnt_addr);
|
|
|
|
|
|
|
|
rxout = ¶m.aca_rxout;
|
|
|
|
+#if defined(__LITTLE_ENDIAN)
|
|
|
|
+ rxout->byteswap = 0;
|
|
|
|
+#else
|
|
|
|
rxout->byteswap = 1;
|
|
|
|
+#endif
|
|
|
|
rxout->hd_size_in_dw = cfg->rxout.soc_desc_dwsz;
|
|
|
|
if (priv->tc_priv->param.cdma_desc_loc == LOC_IN_FPI)
|
|
|
|
rxout->pd_desc_base = sb_r32(__RX_OUT_SHADOW_PTR) - phybase;
|
2023-08-22 12:19:43 +00:00
|
|
|
@@ -2412,7 +2427,11 @@ static void ptm_aca_init(struct ptm_ep_p
|
2019-08-21 06:36:05 +00:00
|
|
|
rxout->soc_cmlt_cnt_addr);
|
|
|
|
|
|
|
|
rxin = ¶m.aca_rxin;
|
|
|
|
+#if defined(__LITTLE_ENDIAN)
|
|
|
|
+ rxin->byteswap = 0;
|
|
|
|
+#else
|
|
|
|
rxin->byteswap = 1;
|
|
|
|
+#endif
|
|
|
|
rxin->hd_size_in_dw = cfg->rxin.soc_desc_dwsz;
|
|
|
|
rxin->pd_desc_base = SB_XBAR_ADDR(__RX_IN_PD_DES_LIST_BASE);
|
|
|
|
rxin->pd_desc_num = __ACA_RX_IN_PD_LIST_NUM;
|
|
|
|
--- a/dcdp/platform/sw_plat.c
|
|
|
|
+++ b/dcdp/platform/sw_plat.c
|
|
|
|
@@ -36,10 +36,13 @@
|
|
|
|
#include <linux/printk.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/workqueue.h>
|
|
|
|
-#include "inc/dsl_tc.h"
|
|
|
|
+#include "../inc/dsl_tc.h"
|
|
|
|
|
|
|
|
#include "../inc/tc_main.h"
|
|
|
|
#include "../inc/reg_addr.h"
|
|
|
|
+#include "../inc/tc_common.h"
|
|
|
|
+
|
|
|
|
+#include "../inc/fw/vrx518_addr_def.h"
|
|
|
|
|
|
|
|
|
|
|
|
#define PMAC_SIZE 8
|
|
|
|
@@ -70,7 +73,7 @@ enum {
|
|
|
|
#define TXIN_DNUM 128
|
|
|
|
#define TXOUT_DNUM 128
|
|
|
|
#define RXOUT_DNUM 1024
|
|
|
|
-#define RXIN_DNUM 1024
|
|
|
|
+#define RXIN_DNUM 0
|
|
|
|
|
|
|
|
#define TXIN_CHECK_NUM 32
|
|
|
|
|
|
|
|
@@ -80,22 +83,32 @@ struct aca_ring {
|
|
|
|
void *umt_dst;
|
|
|
|
u32 umt_phydst;
|
|
|
|
u32 dnum;
|
|
|
|
+ u32 dsize;
|
|
|
|
int idx; /* SoC RX/TX index */
|
|
|
|
- int cnt;
|
|
|
|
- void *cnt_addr;
|
|
|
|
- u32 cnt_phyaddr;
|
|
|
|
int ep_dev_idx;
|
|
|
|
};
|
|
|
|
|
|
|
|
+struct tx_list_item {
|
|
|
|
+ size_t len;
|
|
|
|
+ void *buf;
|
|
|
|
+ dma_addr_t phyaddr;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct tx_list {
|
|
|
|
+ struct tx_list_item *data;
|
|
|
|
+ u32 dnum;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
struct aca_ring_grp {
|
|
|
|
struct aca_ring rxin;
|
|
|
|
struct aca_ring txin;
|
|
|
|
struct aca_ring rxout;
|
|
|
|
struct aca_ring txout;
|
|
|
|
+ struct tx_list txlist;
|
|
|
|
};
|
|
|
|
|
|
|
|
-#if 1
|
|
|
|
-struct dma_desc {
|
|
|
|
+#if defined(__LITTLE_ENDIAN)
|
|
|
|
+struct dma_tx_desc {
|
|
|
|
/* DW 0 */
|
|
|
|
u32 qid;
|
|
|
|
/* DW 1 */
|
|
|
|
@@ -112,8 +125,26 @@ struct dma_desc {
|
|
|
|
u32 c:1;
|
|
|
|
u32 own:1;
|
|
|
|
}__packed;
|
|
|
|
+
|
|
|
|
+struct dma_rx_desc {
|
|
|
|
+ /* DW 0 */
|
|
|
|
+ u32 qid;
|
|
|
|
+ /* DW 1 */
|
|
|
|
+ u32 res2;
|
|
|
|
+ /* DW 2 */
|
|
|
|
+ u32 data_len:16;
|
|
|
|
+ u32 res0:7;
|
|
|
|
+ u32 byte_off:3;
|
|
|
|
+ u32 res1:2;
|
|
|
|
+ u32 eop:1;
|
|
|
|
+ u32 sop:1;
|
|
|
|
+ u32 c:1;
|
|
|
|
+ u32 own:1;
|
|
|
|
+ /* DW 3 */
|
|
|
|
+ u32 data_ptr;
|
|
|
|
+}__packed;
|
|
|
|
#else
|
|
|
|
-struct dma_desc {
|
|
|
|
+struct dma_tx_desc {
|
|
|
|
/* DW 0 */
|
|
|
|
u32 qid;
|
|
|
|
/* DW 1 */
|
|
|
|
@@ -131,14 +162,25 @@ struct dma_desc {
|
|
|
|
u32 data_len:16;
|
|
|
|
}__packed;
|
|
|
|
|
|
|
|
+struct dma_rx_desc {
|
|
|
|
+ /* DW 0 */
|
|
|
|
+ u32 qid;
|
|
|
|
+ /* DW 1 */
|
|
|
|
+ u32 res;
|
|
|
|
+ /* DW 2 */
|
|
|
|
+ u32 own:1;
|
|
|
|
+ u32 c:1;
|
|
|
|
+ u32 sop:1;
|
|
|
|
+ u32 eop:1;
|
|
|
|
+ u32 res1:2;
|
|
|
|
+ u32 byte_off:3;
|
|
|
|
+ u32 res0:7;
|
|
|
|
+ u32 data_len:16;
|
|
|
|
+ /* DW 3 */
|
|
|
|
+ u32 data_ptr;
|
|
|
|
+}__packed;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
-struct plat_dma {
|
|
|
|
- u32 chan; /* CHAN IID */
|
|
|
|
- u32 dma_chan; /* CONTROLLER/PORT/CHAN ID */
|
|
|
|
- u32 ds_dnum; /* DS descriptor number */
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
struct plat_umt {
|
|
|
|
u32 id;
|
|
|
|
u32 cbm_id;
|
|
|
|
@@ -152,28 +194,28 @@ struct tc_req {
|
|
|
|
enum dsl_tc_mode tc_mode;
|
|
|
|
};
|
|
|
|
|
|
|
|
-#if 0
|
|
|
|
-struct tc_coc {
|
|
|
|
- enum ltq_cpufreq_state coc_stat;
|
|
|
|
- struct tasklet_struct coc_task;
|
|
|
|
+struct mem_map_entry {
|
|
|
|
+ dma_addr_t phyaddr;
|
|
|
|
+ void *mem;
|
|
|
|
+ size_t size;
|
|
|
|
+ struct hlist_node node;
|
|
|
|
};
|
|
|
|
-#endif
|
|
|
|
|
|
|
|
struct plat_priv {
|
|
|
|
struct tc_priv *tc_priv;
|
|
|
|
struct plat_umt umt[EP_MAX_NUM];
|
|
|
|
- struct plat_dma dma[EP_MAX_NUM];
|
|
|
|
struct ltq_mei_atm_showtime_info dsl_ops;
|
|
|
|
struct tc_req req_work;
|
|
|
|
struct aca_ring_grp soc_rings;
|
|
|
|
- /* struct tc_coc coc;*/
|
|
|
|
+ struct net_device *netdev;
|
|
|
|
+ DECLARE_HASHTABLE(mem_map, 8);
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct plat_priv *g_plat_priv;
|
|
|
|
struct tasklet_struct txout_task;
|
|
|
|
struct tasklet_struct rxout_task;
|
|
|
|
|
|
|
|
-static void txout_action(struct tc_priv *priv, struct aca_ring *txout);
|
|
|
|
+static DEFINE_SPINLOCK(tx_spinlock);
|
|
|
|
|
|
|
|
void *ppa_callback_get(e_ltq_mei_cb_type type)
|
|
|
|
{
|
|
|
|
@@ -259,122 +301,65 @@ static inline struct tc_priv *plat_to_tc
|
|
|
|
return g_plat_priv->tc_priv;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static int32_t plat_rx(struct net_device *rxdev, struct net_device *txdev,
|
|
|
|
- struct sk_buff *skb, int32_t len)
|
|
|
|
-{
|
|
|
|
- int32_t err;
|
|
|
|
- struct tc_priv *tc_priv = plat_to_tcpriv();
|
|
|
|
-
|
|
|
|
- if (unlikely(!rxdev)) {
|
|
|
|
- if (txdev != NULL)
|
|
|
|
- tc_dbg(tc_priv, MSG_RX,
|
|
|
|
- "Recv undelivered packet from DP lib\n");
|
|
|
|
- else
|
|
|
|
- tc_dbg(tc_priv, MSG_RX, "Recv unknown packet\n");
|
|
|
|
- err = -ENODEV;
|
|
|
|
- goto err1;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- tc_priv->tc_ops.recv(rxdev, skb);
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
-err1:
|
|
|
|
- dev_kfree_skb_any(skb);
|
|
|
|
-
|
|
|
|
- return err;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-#if 0
|
|
|
|
-static int32_t plat_get_subifid(struct net_device *dev, struct sk_buff *skb,
|
|
|
|
- void *subif_data, uint8_t dst_mac[MAX_ETH_ALEN],
|
|
|
|
- dp_subif_t *subif, uint32_t flags)
|
|
|
|
-{
|
|
|
|
- int qid;
|
|
|
|
- struct tc_priv *priv = plat_to_tcpriv();
|
|
|
|
-
|
|
|
|
- qid = priv->tc_ops.get_qid(dev, skb, subif_data, flags);
|
|
|
|
- if (qid < 0)
|
|
|
|
- return qid;
|
|
|
|
- else
|
|
|
|
- subif->subif = qid;
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
-#if 0
|
|
|
|
-static void plat_coc_tasklet(unsigned long arg)
|
|
|
|
-{
|
|
|
|
- /* change state to D0 */
|
|
|
|
- if (g_plat_priv->coc.coc_stat == LTQ_CPUFREQ_PS_D0)
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- g_plat_priv->coc.coc_stat = LTQ_CPUFREQ_PS_D0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void plat_coc_req(void)
|
|
|
|
-{
|
|
|
|
- tasklet_schedule(&g_plat_priv->coc.coc_task);
|
|
|
|
-}
|
|
|
|
+static void *plat_mem_alloc(size_t size, enum tc_dir dir, u32 *phyaddr);
|
|
|
|
+static void *plat_mem_virt(u32 phyaddr);
|
|
|
|
+static void plat_mem_free(u32 phyaddr, enum tc_dir dir);
|
|
|
|
|
|
|
|
+static void txlist_free(struct tx_list *list);
|
|
|
|
|
|
|
|
-static int32_t plat_coc_stat(enum ltq_cpufreq_state new_state,
|
|
|
|
- enum ltq_cpufreq_state old_state, uint32_t flags)
|
|
|
|
+static int txlist_init(struct tx_list *list, u32 dnum)
|
|
|
|
{
|
|
|
|
- struct tc_priv *priv = plat_to_tcpriv();
|
|
|
|
- tc_dbg(priv, MSG_COC,
|
|
|
|
- "COC current state: %d, new state: %d, old state: %d\n",
|
|
|
|
- g_plat_priv->coc.coc_stat, new_state, old_state);
|
|
|
|
+ struct tx_list_item *item;
|
|
|
|
+ int i;
|
|
|
|
|
|
|
|
- if (g_plat_priv->coc.coc_stat != new_state) {
|
|
|
|
- g_plat_priv->coc.coc_stat = new_state;
|
|
|
|
+ list->dnum = dnum;
|
|
|
|
|
|
|
|
- if (new_state == LTQ_CPUFREQ_PS_D3) {
|
|
|
|
- /* Enable interrupt for DS packet */
|
|
|
|
- priv->tc_ops.irq_on(MBOX_PKT_RX);
|
|
|
|
- } else {
|
|
|
|
- /* Disable interrupt for DS packet */
|
|
|
|
- priv->tc_ops.irq_off(MBOX_PKT_RX);
|
|
|
|
+ list->data = kcalloc(dnum, sizeof(struct tx_list_item), GFP_KERNEL);
|
|
|
|
+ if (!list->data) {
|
|
|
|
+ pr_err("Failed to allocate TX list!\n");
|
|
|
|
+ goto err;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < list->dnum; i++) {
|
|
|
|
+ item = &list->data[i];
|
|
|
|
+
|
|
|
|
+ // use plat_mem_alloc as these buffers will be mixed with buffers allocated in ptm_tc.c / atm_tc.c
|
|
|
|
+ item->buf = plat_mem_alloc(DMA_PACKET_SZ, US_DIR, &item->phyaddr);
|
|
|
|
+ if (!item->buf) {
|
|
|
|
+ pr_err("Failed to allocate TX buffer!\n");
|
|
|
|
+ goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
-}
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
-static inline int ring_dist(int idx1, int idx2, int size)
|
|
|
|
-{
|
|
|
|
- if (idx1 >= idx2)
|
|
|
|
- return (idx1 - idx2);
|
|
|
|
- else
|
|
|
|
- return (idx1 + size - idx2);
|
|
|
|
-}
|
|
|
|
|
|
|
|
-static inline int __ring_full(int idx, int cnt, u32 dnum)
|
|
|
|
-{
|
|
|
|
- if (ring_dist(idx, cnt, dnum) < dnum - 1)
|
|
|
|
- return 0;
|
|
|
|
- else
|
|
|
|
- return 1;
|
|
|
|
+err:
|
|
|
|
+ txlist_free(list);
|
|
|
|
+ return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static inline int ring_full(struct aca_ring *ring)
|
|
|
|
+static void txlist_free(struct tx_list *list)
|
|
|
|
{
|
|
|
|
- if (!__ring_full(ring->idx, ring->cnt, ring->dnum))
|
|
|
|
- return 0;
|
|
|
|
+ struct tx_list_item *item;
|
|
|
|
+ int i;
|
|
|
|
|
|
|
|
- /* if ring full, update cumulative counter and check again */
|
|
|
|
- ring->cnt = readl(ring->cnt_addr) % ring->dnum;
|
|
|
|
+ if (list->data) {
|
|
|
|
+ for (i = 0; i < list->dnum; i++) {
|
|
|
|
+ item = &list->data[i];
|
|
|
|
+
|
|
|
|
+ if (item->buf) {
|
|
|
|
+ // use plat_mem_free as these buffers are mixed with buffers allocated in ptm_tc.c / atm_tc.c
|
|
|
|
+ plat_mem_free(item->phyaddr, US_DIR);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- return __ring_full(ring->idx, ring->cnt, ring->dnum);
|
|
|
|
+ kfree(list->data);
|
|
|
|
}
|
|
|
|
|
|
|
|
-#define ring_idx_inc(ring, idx) \
|
|
|
|
- do { ring->idx = (ring->idx + 1) % ring->dnum; } while (0);
|
|
|
|
-
|
|
|
|
-static inline void ring_cnt_update(struct aca_ring *ring)
|
|
|
|
+static inline void ring_idx_inc(struct aca_ring *ring)
|
|
|
|
{
|
|
|
|
- ring->cnt = readl(ring->cnt_addr) % ring->dnum;
|
|
|
|
+ ring->idx = (ring->idx + 1) % ring->dnum;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sk_buff *txin_skb_prepare(struct sk_buff *skb)
|
|
|
|
@@ -399,252 +384,220 @@ static struct sk_buff *txin_skb_prepare(
|
|
|
|
return nskb;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static int ring_mmap(void *mem, int size,
|
|
|
|
- enum dma_data_direction dir, u32 *addr)
|
|
|
|
-{
|
|
|
|
- struct device *pdev;
|
|
|
|
- dma_addr_t phy_addr;
|
|
|
|
- struct tc_priv *priv;
|
|
|
|
- u32 addr1;
|
|
|
|
-
|
|
|
|
- priv = g_plat_priv->tc_priv;
|
|
|
|
- pdev = priv->ep_dev[0].dev;
|
|
|
|
-
|
|
|
|
- phy_addr = dma_map_single(pdev, mem, size, dir);
|
|
|
|
- if (unlikely(dma_mapping_error(pdev, phy_addr))) {
|
|
|
|
- tc_err(priv, MSG_INIT,
|
|
|
|
- "DMA address mapping error: buf: 0x%x, size: %d, dir: %d\n",
|
|
|
|
- (u32)mem, size, dir);
|
|
|
|
- return -ENOMEM;
|
|
|
|
- }
|
|
|
|
- dma_unmap_single(pdev, phy_addr, size, dir);
|
|
|
|
-
|
|
|
|
- pr_info("vaddr: 0x%x, phyaddr: 0x%lx\n", (u32)mem, phy_addr);
|
|
|
|
- addr1 = (u32)phy_addr;
|
|
|
|
-
|
|
|
|
- if (addr)
|
|
|
|
- *addr = addr1;
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void txin_action(struct tc_priv *priv, struct aca_ring *txin,
|
|
|
|
+static int txin_action(struct tc_priv *priv, struct aca_ring *txin,
|
|
|
|
struct sk_buff *skb, int qid, enum tc_pkt_type type)
|
|
|
|
{
|
|
|
|
- struct dma_desc *desc, desc1;
|
|
|
|
- u32 phyaddr, *dst, *src;
|
|
|
|
- int i;
|
|
|
|
+ struct device *pdev = priv->ep_dev[0].dev;
|
|
|
|
+ struct aca_ring *txout = &g_plat_priv->soc_rings.txout;
|
|
|
|
+ struct tx_list *txlist = &g_plat_priv->soc_rings.txlist;
|
|
|
|
+ struct dma_tx_desc *desc;
|
|
|
|
+ struct tx_list_item *txlist_item;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ if (!g_plat_priv->netdev) {
|
|
|
|
+ spin_lock_irqsave(&tx_spinlock, flags);
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- if (ring_full(txin)) {
|
|
|
|
- tc_dbg(priv, MSG_TX,
|
|
|
|
- "TXIN Ring Full!: idx: %d, cnt: %d\n",
|
|
|
|
- txin->idx, txin->cnt);
|
|
|
|
+ if ((txin->idx + 2) % txin->dnum == txout->idx) {
|
|
|
|
+ if (g_plat_priv->netdev) {
|
|
|
|
+ netif_stop_queue(g_plat_priv->netdev);
|
|
|
|
+ }
|
|
|
|
+ } else if ((txin->idx + 1) % txin->dnum == txout->idx) {
|
|
|
|
+ tc_err(priv, MSG_TX, "TXIN ring full: txin: %d, txout: %d\n",
|
|
|
|
+ txin->idx, txout->idx);
|
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
|
|
|
|
+ desc = (struct dma_tx_desc *)txin->dbase_mem;
|
|
|
|
+ desc += txin->idx;
|
|
|
|
+
|
|
|
|
+ txlist_item = &txlist->data[txin->idx];
|
|
|
|
+
|
|
|
|
skb = txin_skb_prepare(skb);
|
|
|
|
if (!skb)
|
|
|
|
- return;
|
|
|
|
+ goto err2;
|
|
|
|
|
|
|
|
- if (ring_mmap(skb->data, skb->len, DMA_TO_DEVICE, &phyaddr) < 0) {
|
|
|
|
- tc_err(priv, MSG_TX, "TXIN data mmap failed: 0x%x\n",
|
|
|
|
- (unsigned int)skb->data);
|
|
|
|
- goto err1;
|
|
|
|
- }
|
|
|
|
+ /*
|
|
|
|
+ * Copy the data to a buffer in the driver. This is necessary because there doesn't seem to be a timely signal
|
|
|
|
+ * from the device when it has consumed a buffer, which would allow to safely free it. The data_ptr is only
|
|
|
|
+ * returned in TXOUT after another fixed number of packets (depending on the size of internal buffers) has been
|
|
|
|
+ * transmitted, which may not happen in the near future. Making a copy allows to free the SKB here.
|
|
|
|
+ */
|
|
|
|
+ memcpy(txlist_item->buf, skb->data, skb->len);
|
|
|
|
|
|
|
|
- /* init a new descriptor for the new skb */
|
|
|
|
- desc = (struct dma_desc *)txin->dbase_mem;
|
|
|
|
- desc += txin->idx;
|
|
|
|
+ dma_sync_single_range_for_device(pdev, txlist_item->phyaddr, 0, skb->len, DMA_TO_DEVICE);
|
|
|
|
|
|
|
|
- memset(desc, 0, sizeof(*desc));
|
|
|
|
- memset(&desc1, 0, sizeof(desc1));
|
|
|
|
- desc1.own = 1;
|
|
|
|
- desc1.c = 1;
|
|
|
|
- desc1.sop = 1;
|
|
|
|
- desc1.eop = 1;
|
|
|
|
- desc1.byte_off = phyaddr & 0x7;
|
|
|
|
- desc1.data_len = skb->len;
|
|
|
|
-
|
|
|
|
- desc1.data_ptr = phyaddr & (~(0x7));
|
|
|
|
- desc1.qid = qid;
|
|
|
|
-
|
|
|
|
- dst = (u32 *)desc;
|
|
|
|
- src = (u32 *)&desc1;
|
|
|
|
- for (i = 0; i < DW_SZ(desc1); i++)
|
|
|
|
- dst[i] = cpu_to_be32(src[i]);
|
|
|
|
-
|
|
|
|
- pr_info("txin idx: %d\n", txin->idx);
|
|
|
|
- pr_info("descriptor dst val:(DW0-DW3): 0x%x, 0x%x, 0x%x, 0x%x\n",
|
|
|
|
- dst[0], dst[1], dst[2], dst[3]);
|
|
|
|
- pr_info("descriptor src val: (DW0-DW3): 0x%x, 0x%x, 0x%x, 0x%x\n",
|
|
|
|
- src[0], src[1], src[2], src[3]);
|
|
|
|
-
|
|
|
|
- if (ring_mmap(desc, sizeof(*desc), DMA_TO_DEVICE, NULL) < 0) {
|
|
|
|
- tc_err(priv, MSG_TX, "TXIN descriptor mmap failed: 0x%x\n",
|
|
|
|
- (unsigned int)desc);
|
|
|
|
+ // this should never happen, the buffers are already aligned by kmalloc
|
|
|
|
+ if (WARN_ON((txlist_item->phyaddr & 0x7) != 0))
|
|
|
|
goto err1;
|
|
|
|
+
|
|
|
|
+ if (g_plat_priv->netdev) {
|
|
|
|
+ netdev_sent_queue(g_plat_priv->netdev, skb->len);
|
|
|
|
}
|
|
|
|
+ txlist_item->len = skb->len;
|
|
|
|
+
|
|
|
|
+ memset(desc, 0, sizeof(*desc));
|
|
|
|
|
|
|
|
- ring_idx_inc(txin, idx);
|
|
|
|
+ desc->data_ptr = txlist_item->phyaddr;
|
|
|
|
+ desc->byte_off = 0;
|
|
|
|
+ desc->data_len = skb->len;
|
|
|
|
+ desc->qid = qid;
|
|
|
|
+
|
|
|
|
+ desc->sop = 1;
|
|
|
|
+ desc->eop = 1;
|
|
|
|
+ desc->c = 0;
|
|
|
|
+ desc->own = 1;
|
|
|
|
+
|
|
|
|
+ dev_consume_skb_any(skb);
|
|
|
|
+
|
|
|
|
+ ring_idx_inc(txin);
|
|
|
|
|
|
|
|
/* update TXIN UMT by 1 */
|
|
|
|
writel(1, txin->umt_dst);
|
|
|
|
- pr_info("TXIN send txin packet 1 packet\n");
|
|
|
|
|
|
|
|
- /* Free skb */
|
|
|
|
- dev_kfree_skb_any(skb);
|
|
|
|
+ if (!g_plat_priv->netdev) {
|
|
|
|
+ spin_unlock_irqrestore(&tx_spinlock, flags);
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- /* check txout for testing*/
|
|
|
|
- //txout_action(plat_to_tcpriv(), &g_plat_priv->soc_rings.txout);
|
|
|
|
- return;
|
|
|
|
+ return 0;
|
|
|
|
|
|
|
|
err1:
|
|
|
|
- //skb->delay_free = 0;
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
+
|
|
|
|
+err2:
|
|
|
|
+ if (!g_plat_priv->netdev) {
|
|
|
|
+ spin_unlock_irqrestore(&tx_spinlock, flags);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void txout_action(struct tc_priv *priv, struct aca_ring *txout)
|
|
|
|
{
|
|
|
|
- int i, cnt;
|
|
|
|
- struct dma_desc *desc;
|
|
|
|
- u32 ptr;
|
|
|
|
- void *mem;
|
|
|
|
-
|
|
|
|
- ring_cnt_update(txout);
|
|
|
|
- cnt = ring_dist(txout->idx, txout->cnt, txout->dnum);
|
|
|
|
+ struct aca_ring *txin = &g_plat_priv->soc_rings.txin;
|
|
|
|
+ struct tx_list *txlist = &g_plat_priv->soc_rings.txlist;
|
|
|
|
+ struct tx_list_item *txlist_item;
|
|
|
|
+ int i, cnt, bytes;
|
|
|
|
+ u32 *desc;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ cnt = 0;
|
|
|
|
+ bytes = 0;
|
|
|
|
+
|
|
|
|
+ if (g_plat_priv->netdev) {
|
|
|
|
+ netif_tx_lock(g_plat_priv->netdev);
|
|
|
|
+ } else {
|
|
|
|
+ spin_lock_irqsave(&tx_spinlock, flags);
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- for (i = 0; i < cnt; i++) {
|
|
|
|
+ for (i = 0; i < txout->dnum; i++) {
|
|
|
|
desc = txout->dbase_mem;
|
|
|
|
desc += txout->idx;
|
|
|
|
- /* read from memory */
|
|
|
|
- if (ring_mmap(desc, sizeof(*desc), DMA_FROM_DEVICE, NULL) < 0) {
|
|
|
|
- tc_err(priv, MSG_TX,
|
|
|
|
- "map TXOUT DMA descriptor failed\n");
|
|
|
|
- continue;
|
|
|
|
+
|
|
|
|
+ // *desc seems to be a pointer to a QoSQ buffer or the data_ptr of some previously sent packet
|
|
|
|
+ if (*desc == 0) {
|
|
|
|
+ break;
|
|
|
|
}
|
|
|
|
- ptr = desc->data_ptr + desc->byte_off;
|
|
|
|
- mem = (void * __force)__va(ptr);
|
|
|
|
- kfree(mem);
|
|
|
|
- ring_idx_inc(txout, idx);
|
|
|
|
- }
|
|
|
|
|
|
|
|
- if (cnt)
|
|
|
|
- writel(cnt, txout->umt_dst);
|
|
|
|
- pr_info("TXOUT received %d descriptors\n", cnt);
|
|
|
|
-}
|
|
|
|
+ if (txout->idx == txin->idx) {
|
|
|
|
+ tc_err(priv, MSG_TX, "TXOUT unexpected non-zero descriptor: txin: %d, txout: %d\n",
|
|
|
|
+ txin->idx, txout->idx);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
|
|
|
|
-static void rxin_action(struct tc_priv *priv,
|
|
|
|
- struct aca_ring *rxin, int size, int cnt)
|
|
|
|
-{
|
|
|
|
- int i, dist;
|
|
|
|
- struct dma_desc *desc;
|
|
|
|
- void *data_ptr;
|
|
|
|
- u32 phyaddr;
|
|
|
|
-
|
|
|
|
- if (ring_full(rxin)) {
|
|
|
|
- tc_dbg(priv, MSG_RX,
|
|
|
|
- "RXIN Ring Full!: idx: %d, cnt: %d\n",
|
|
|
|
- rxin->idx, rxin->cnt);
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
+ txlist_item = &txlist->data[txout->idx];
|
|
|
|
|
|
|
|
- dist = ring_dist(rxin->idx, rxin->cnt, rxin->dnum);
|
|
|
|
- if (cnt > dist) {
|
|
|
|
- WARN_ONCE(1, "RXIN NO enough room for free buffers: free: %d, room: %d\n",
|
|
|
|
- cnt, dist);
|
|
|
|
- cnt = dist;
|
|
|
|
+ cnt++;
|
|
|
|
+ bytes += txlist_item->len;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Reuse the returned buffer. The previous buffer should still be referenced by another descriptor.
|
|
|
|
+ * When the driver is unloaded, all buffers in the txlist as well as those referenced by the
|
|
|
|
+ * descriptors managed in ptm_tc.c or atm_tc.c will be freed.
|
|
|
|
+ */
|
|
|
|
+ txlist_item->buf = plat_mem_virt(*desc);
|
|
|
|
+ txlist_item->phyaddr = *desc;
|
|
|
|
+
|
|
|
|
+ *desc = 0;
|
|
|
|
+
|
|
|
|
+ ring_idx_inc(txout);
|
|
|
|
}
|
|
|
|
|
|
|
|
- for (i = 0; i < cnt; i++) {
|
|
|
|
- data_ptr = kmalloc(size, GFP_ATOMIC);
|
|
|
|
- if (!data_ptr) {
|
|
|
|
- tc_err(priv, MSG_RX,
|
|
|
|
- "RXIN kmalloc data buffer failed: %d\n", size);
|
|
|
|
- goto err1;
|
|
|
|
- }
|
|
|
|
+ if (cnt) {
|
|
|
|
+ writel(cnt, txout->umt_dst+0x28); // TXOUT_HD_ACCUM_SUB instead of TXOUT_HD_ACCUM_ADD
|
|
|
|
|
|
|
|
- if (ring_mmap(data_ptr, size, DMA_FROM_DEVICE, &phyaddr) < 0) {
|
|
|
|
- tc_err(priv, MSG_RX,
|
|
|
|
- "RXIN kmalloc data buffer failed: %d\n", size);
|
|
|
|
- goto err2;
|
|
|
|
+ if (g_plat_priv->netdev) {
|
|
|
|
+ netdev_completed_queue(g_plat_priv->netdev, cnt, bytes);
|
|
|
|
}
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- desc = (struct dma_desc *)rxin->dbase_mem;
|
|
|
|
- desc += rxin->idx;
|
|
|
|
- memset(desc, 0, sizeof(*desc));
|
|
|
|
-
|
|
|
|
- desc->data_len = size;
|
|
|
|
- desc->byte_off = phyaddr & 0x7;
|
|
|
|
- desc->eop = 1;
|
|
|
|
- desc->sop = 1;
|
|
|
|
- desc->own = 1;
|
|
|
|
-
|
|
|
|
- desc->data_ptr = phyaddr;
|
|
|
|
-
|
|
|
|
-
|
|
|
|
- if (ring_mmap(desc, sizeof(*desc), DMA_TO_DEVICE, NULL) < 0) {
|
|
|
|
- tc_err(priv, MSG_RX, "RXIN descriptor mmap failed: 0x%x\n",
|
|
|
|
- (unsigned int)desc);
|
|
|
|
- goto err2;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- ring_idx_inc(rxin, idx);
|
|
|
|
+ if (g_plat_priv->netdev) {
|
|
|
|
+ netif_tx_unlock(g_plat_priv->netdev);
|
|
|
|
+ } else {
|
|
|
|
+ spin_unlock_irqrestore(&tx_spinlock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
- /* update RXIN UMT*/
|
|
|
|
- writel(i, rxin->umt_dst);
|
|
|
|
- pr_info("rxin refill %d descriptors\n", i);
|
|
|
|
- return;
|
|
|
|
+ if (cnt && g_plat_priv->netdev && netif_queue_stopped(g_plat_priv->netdev)) {
|
|
|
|
+ netif_wake_queue(g_plat_priv->netdev);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
|
|
|
|
-err2:
|
|
|
|
- kfree(data_ptr);
|
|
|
|
-err1:
|
|
|
|
- if (i)
|
|
|
|
- writel(i, rxin->umt_dst);
|
|
|
|
- return;
|
|
|
|
+static void rxin_action(struct tc_priv *priv,
|
|
|
|
+ struct aca_ring *rxin, int size, int cnt)
|
|
|
|
+{
|
|
|
|
+ /* update RXIN UMT*/
|
|
|
|
+ writel(cnt, rxin->umt_dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rxout_action(struct tc_priv *priv, struct aca_ring *rxout)
|
|
|
|
{
|
|
|
|
+ struct device *pdev = priv->ep_dev[0].dev;
|
|
|
|
int i, cnt;
|
|
|
|
- struct dma_desc *desc;
|
|
|
|
- u32 ptr;
|
|
|
|
- void *mem;
|
|
|
|
+ struct dma_rx_desc *desc;
|
|
|
|
+ dma_addr_t phyaddr;
|
|
|
|
+ void *ptr, *dst;
|
|
|
|
+ size_t len;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
- ring_cnt_update(rxout);
|
|
|
|
- cnt = ring_dist(rxout->idx, rxout->cnt, rxout->dnum);
|
|
|
|
-
|
|
|
|
- for (i = 0; i < cnt; i++) {
|
|
|
|
+ cnt = 0;
|
|
|
|
+ for (i = 0; i < rxout->dnum; i++) {
|
|
|
|
desc = rxout->dbase_mem;
|
|
|
|
desc += rxout->idx;
|
|
|
|
|
|
|
|
- /* read from memory */
|
|
|
|
- if (ring_mmap(desc, sizeof(*desc), DMA_FROM_DEVICE, NULL) < 0) {
|
|
|
|
- tc_err(priv, MSG_RX,
|
|
|
|
- "map RXOUT DMA descriptor failed\n");
|
|
|
|
- continue;
|
|
|
|
+ if (!desc->own) {
|
|
|
|
+ break;
|
|
|
|
}
|
|
|
|
- ptr = desc->data_ptr + desc->byte_off;
|
|
|
|
- mem = __va(ptr);
|
|
|
|
- skb = build_skb(mem, 0);
|
|
|
|
- if (!skb) {
|
|
|
|
- tc_err(priv, MSG_RX,
|
|
|
|
- "RXOUT build skb failed\n");
|
|
|
|
- kfree(mem);
|
|
|
|
- continue;
|
|
|
|
+
|
|
|
|
+ // this seems to be a pointer to a DS PKT buffer
|
|
|
|
+ phyaddr = desc->data_ptr + desc->byte_off;
|
|
|
|
+ ptr = plat_mem_virt(phyaddr);
|
|
|
|
+
|
|
|
|
+ len = desc->data_len;
|
|
|
|
+
|
|
|
|
+ dma_sync_single_range_for_cpu(pdev, phyaddr, 0, len, DMA_FROM_DEVICE);
|
|
|
|
+
|
|
|
|
+ skb = netdev_alloc_skb(g_plat_priv->netdev, len);
|
|
|
|
+ if (unlikely(!skb)) {
|
|
|
|
+ tc_err(priv, MSG_RX, "RXOUT SKB allocation failed\n");
|
|
|
|
+ break;
|
|
|
|
}
|
|
|
|
- priv->tc_ops.recv(NULL, skb);
|
|
|
|
- ring_idx_inc(rxout, idx);
|
|
|
|
+
|
|
|
|
+ dst = skb_put(skb, len);
|
|
|
|
+ memcpy(dst, ptr, len);
|
|
|
|
+
|
|
|
|
+ priv->tc_ops.recv(g_plat_priv->netdev, skb);
|
|
|
|
+
|
|
|
|
+ desc->own = 0;
|
|
|
|
+
|
|
|
|
+ cnt++;
|
|
|
|
+ ring_idx_inc(rxout);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!cnt)
|
|
|
|
- tc_err(priv, MSG_RX, "RXOUT dummy interrupt: dbase: 0x%x, idx: %d, cnt: %d\n",
|
|
|
|
- (unsigned int)rxout->dbase_mem, rxout->idx, rxout->cnt);
|
|
|
|
+ tc_err(priv, MSG_RX, "RXOUT spurious interrupt\n");
|
|
|
|
else
|
|
|
|
- writel(cnt, rxout->umt_dst);
|
|
|
|
+ writel(cnt, rxout->umt_dst+0x28); // RXOUT_HD_ACCUM_SUB instead of RXOUT_HD_ACCUM_ADD
|
|
|
|
|
|
|
|
- pr_info("txout received %d packets\n", cnt);
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -669,7 +622,6 @@ static void plat_rxout_tasklet(unsigned
|
|
|
|
struct aca_ring *rxin = &priv->soc_rings.rxin;
|
|
|
|
struct dc_ep_dev *ep_dev = &tcpriv->ep_dev[rxout->ep_dev_idx];
|
|
|
|
int cnt;
|
|
|
|
-
|
|
|
|
|
|
|
|
cnt = rxout_action(tcpriv, rxout);
|
|
|
|
if (cnt)
|
|
|
|
@@ -687,68 +639,144 @@ static int plat_send(struct net_device *
|
|
|
|
{
|
|
|
|
struct plat_priv *priv = g_plat_priv;
|
|
|
|
struct aca_ring *txin = &priv->soc_rings.txin;
|
|
|
|
+ int res;
|
|
|
|
|
|
|
|
- txin_action(priv->tc_priv, txin, skb, qid, type);
|
|
|
|
+ res = txin_action(priv->tc_priv, txin, skb, qid, type);
|
|
|
|
|
|
|
|
- return 0;
|
|
|
|
+ return res;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void plat_mem_init(void)
|
|
|
|
+{
|
|
|
|
+ struct plat_priv *priv = g_plat_priv;
|
|
|
|
+
|
|
|
|
+ hash_init(priv->mem_map);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* return virtual address */
|
|
|
|
-static void *plat_mem_alloc(size_t size, enum tc_dir dir)
|
|
|
|
+static void *plat_mem_alloc(size_t size, enum tc_dir dir, u32 *phyaddr)
|
|
|
|
{
|
|
|
|
- return kmalloc(size, GFP_KERNEL);
|
|
|
|
+ struct plat_priv *priv = g_plat_priv;
|
|
|
|
+ struct tc_priv *tcpriv = priv->tc_priv;
|
|
|
|
+ struct device *pdev = tcpriv->ep_dev[0].dev;
|
|
|
|
+ enum dma_data_direction dma_dir;
|
|
|
|
+ struct mem_map_entry *entry;
|
|
|
|
+
|
|
|
|
+ entry = kzalloc(sizeof(struct mem_map_entry), GFP_KERNEL);
|
|
|
|
+ if (!entry)
|
|
|
|
+ goto err;
|
|
|
|
+
|
|
|
|
+ entry->size = size;
|
|
|
|
+
|
|
|
|
+ entry->mem = kmalloc(size, GFP_KERNEL);
|
|
|
|
+ if (!entry->mem)
|
|
|
|
+ goto err_alloc;
|
|
|
|
+
|
|
|
|
+ dma_dir = (dir == DS_DIR) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
|
|
|
+
|
|
|
|
+ entry->phyaddr = dma_map_single(pdev, entry->mem, entry->size, dma_dir);
|
|
|
|
+ if (unlikely(dma_mapping_error(pdev, entry->phyaddr))) {
|
|
|
|
+ tc_err(priv, MSG_INIT,
|
|
|
|
+ "plat_mem_alloc: DMA mapping error: buf: 0x%x, size: %d, dir: %d\n",
|
|
|
|
+ (u32)entry->mem, size, dir);
|
|
|
|
+
|
|
|
|
+ goto err_map;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ hash_add(g_plat_priv->mem_map, &entry->node, entry->phyaddr);
|
|
|
|
+
|
|
|
|
+ *phyaddr = entry->phyaddr;
|
|
|
|
+ return entry->mem;
|
|
|
|
+
|
|
|
|
+err_map:
|
|
|
|
+ kfree(entry->mem);
|
|
|
|
+
|
|
|
|
+err_alloc:
|
|
|
|
+ kfree(entry);
|
|
|
|
+
|
|
|
|
+err:
|
|
|
|
+ return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static void plat_mem_free(u32 phy_addr, enum tc_dir dir)
|
|
|
|
+static void *plat_mem_virt(u32 phyaddr)
|
|
|
|
{
|
|
|
|
- void *mem;
|
|
|
|
+ struct mem_map_entry *entry;
|
|
|
|
+
|
|
|
|
+ hash_for_each_possible(g_plat_priv->mem_map, entry, node, phyaddr)
|
|
|
|
+ if (entry->phyaddr == phyaddr)
|
|
|
|
+ return entry->mem;
|
|
|
|
+
|
|
|
|
+ WARN_ON(1);
|
|
|
|
+ return NULL;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct mem_map_entry *plat_mem_entry(u32 phyaddr)
|
|
|
|
+{
|
|
|
|
+ struct mem_map_entry *entry;
|
|
|
|
+
|
|
|
|
+ hash_for_each_possible(g_plat_priv->mem_map, entry, node, phyaddr)
|
|
|
|
+ if (entry->phyaddr == phyaddr)
|
|
|
|
+ return entry;
|
|
|
|
|
|
|
|
- mem = (void * __force)__va(phy_addr);
|
|
|
|
- kfree(mem);
|
|
|
|
+ return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static void aca_soc_ring_init(struct tc_priv *priv,
|
|
|
|
- struct aca_ring *ring, u32 dnum, u32 dsize)
|
|
|
|
+static void plat_mem_free(u32 phyaddr, enum tc_dir dir)
|
|
|
|
{
|
|
|
|
+ struct tc_priv *priv = g_plat_priv->tc_priv;
|
|
|
|
+ struct device *pdev = priv->ep_dev[0].dev;
|
|
|
|
+ enum dma_data_direction dma_dir;
|
|
|
|
+ struct mem_map_entry *entry;
|
|
|
|
+
|
|
|
|
+ entry = plat_mem_entry(phyaddr);
|
|
|
|
+ if (WARN_ON(!entry))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ hash_del(&entry->node);
|
|
|
|
+
|
|
|
|
+ dma_dir = (dir == DS_DIR) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
|
|
|
+ dma_unmap_single(pdev, entry->phyaddr, entry->size, dma_dir);
|
|
|
|
+
|
|
|
|
+ kfree(entry->mem);
|
|
|
|
+
|
|
|
|
+ kfree(entry);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int ring_init(struct tc_priv *priv, struct aca_ring *ring, u32 dnum, u32 dsize)
|
|
|
|
+{
|
|
|
|
+ struct device *pdev = priv->ep_dev[0].dev;
|
|
|
|
int size;
|
|
|
|
- struct device *pdev;
|
|
|
|
|
|
|
|
memset(ring, 0, sizeof(*ring));
|
|
|
|
ring->dnum = dnum;
|
|
|
|
+ ring->dsize = dsize;
|
|
|
|
+
|
|
|
|
+ if (ring->dnum == 0) {
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
size = dsize * dnum;
|
|
|
|
- pdev = priv->ep_dev[0].dev;
|
|
|
|
|
|
|
|
- ring->dbase_mem = kmalloc(size, GFP_KERNEL);
|
|
|
|
+ ring->dbase_mem = dma_alloc_coherent(pdev, size, &(ring->dbase_phymem), GFP_KERNEL);
|
|
|
|
if (!ring->dbase_mem) {
|
|
|
|
- tc_err(priv, MSG_INIT, "Allocate SoC Ring fail: %d\n", dnum);
|
|
|
|
- return;
|
|
|
|
+ tc_err(priv, MSG_INIT, "Ring allocation failed: %d\n", dnum);
|
|
|
|
+ return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
- ring_mmap(ring->dbase_mem, size, DMA_FROM_DEVICE, &(ring->dbase_phymem));
|
|
|
|
- tc_dbg(priv, MSG_INIT, "ring: membase: 0x%x, phybase: 0x%x, dnum: %d\n",
|
|
|
|
- (u32)ring->dbase_mem, ring->dbase_phymem, ring->dnum);
|
|
|
|
-
|
|
|
|
- size = sizeof(u32);
|
|
|
|
- ring->cnt_addr = kzalloc(size, GFP_KERNEL);
|
|
|
|
- if (!ring->cnt_addr) {
|
|
|
|
- tc_err(priv, MSG_INIT, "Allocate cumulative counter fail!\n");
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
|
|
|
|
- ring_mmap(ring->cnt_addr, size, DMA_TO_DEVICE, &(ring->cnt_phyaddr));
|
|
|
|
- tc_dbg(priv, MSG_INIT, "ring: cumulative cnt addr: 0x%x, phy address: 0x%x\n",
|
|
|
|
- (u32)ring->cnt_addr, ring->cnt_phyaddr);
|
|
|
|
+#define ring_dnum(tcpriv, name1, name2) ((!tcpriv->param.name1##_dnum) ? name2##_DNUM : tcpriv->param.name1##_dnum)
|
|
|
|
|
|
|
|
- return;
|
|
|
|
-}
|
|
|
|
+static void ring_free(struct tc_priv *priv, struct aca_ring *ring)
|
|
|
|
+{
|
|
|
|
+ struct device *pdev = priv->ep_dev[0].dev;
|
|
|
|
|
|
|
|
-#define ring_init(tcpriv, ring, name1, name2, num, size) \
|
|
|
|
-{ \
|
|
|
|
- if (!tcpriv->param.name1##_dnum) \
|
|
|
|
- num = name2##_DNUM; \
|
|
|
|
- else \
|
|
|
|
- num = tcpriv->param.name1##_dnum; \
|
|
|
|
- aca_soc_ring_init(tcpriv, ring, num, size); \
|
|
|
|
+ if (ring->dnum == 0) {
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ dma_free_coherent(pdev, ring->dsize * ring->dnum, ring->dbase_mem, ring->dbase_phymem);
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t aca_rx_irq_handler(int irq, void *dev_id)
|
|
|
|
@@ -777,39 +805,55 @@ static irqreturn_t aca_tx_irq_handler(in
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static void irq_init(struct tc_priv *priv, const char *dev_name)
|
|
|
|
+static void plat_irq_init(struct tc_priv *priv, const char *dev_name)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int i;
|
|
|
|
- char name[IFNAMSIZ];
|
|
|
|
+ //char name[IFNAMSIZ];
|
|
|
|
|
|
|
|
for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
|
|
|
|
- sprintf(name, "%s%d", dev_name, i);
|
|
|
|
+ //snprintf(name, sizeof(name), "aca-rxo%d", i);
|
|
|
|
|
|
|
|
ret = devm_request_irq(priv->ep_dev[i].dev, priv->ep_dev[i].aca_rx_irq,
|
|
|
|
- aca_rx_irq_handler, 0, name, &priv->ep_dev[i]);
|
|
|
|
+ aca_rx_irq_handler, 0, "aca-rxo", &priv->ep_dev[i]);
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
tc_err(priv, MSG_INIT,
|
|
|
|
"ACA RX IRQ request Fail!: irq: %d, ep_id: %d\n",
|
|
|
|
priv->ep_dev[i].aca_rx_irq, i);
|
|
|
|
//return;
|
|
|
|
- }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ //snprintf(name, sizeof(name), "aca-txo%d", i);
|
|
|
|
|
|
|
|
ret = devm_request_irq(priv->ep_dev[i].dev, priv->ep_dev[i].aca_tx_irq,
|
|
|
|
- aca_tx_irq_handler, 0, name, &priv->ep_dev[i]);
|
|
|
|
+ aca_tx_irq_handler, 0, "aca-txo", &priv->ep_dev[i]);
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
tc_err(priv, MSG_INIT,
|
|
|
|
"ACA TX IRQ request Fail!: irq: %d, ep_id: %d\n",
|
|
|
|
priv->ep_dev[i].aca_tx_irq, i);
|
|
|
|
//return;
|
|
|
|
- }
|
|
|
|
+ }
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static void plat_irq_free(struct tc_priv *priv)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
|
|
|
|
+
|
|
|
|
+ /* Unregister RX irq handler */
|
|
|
|
+ devm_free_irq(priv->ep_dev[i].dev, priv->ep_dev[i].aca_rx_irq, &priv->ep_dev[i]);
|
|
|
|
+
|
|
|
|
+ /* Unregister TX irq handler */
|
|
|
|
+ devm_free_irq(priv->ep_dev[i].dev, priv->ep_dev[i].aca_tx_irq, &priv->ep_dev[i]);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
/**
|
|
|
|
* Decide txin/rxout queue size
|
|
|
|
* Create a tx/rx queue
|
|
|
|
@@ -819,29 +863,68 @@ static int plat_dp_init(struct plat_priv
|
|
|
|
struct tc_priv *tcpriv;
|
|
|
|
struct aca_ring_grp *soc_rings;
|
|
|
|
struct aca_ring *ring;
|
|
|
|
- int size;
|
|
|
|
u32 dnum;
|
|
|
|
+ int i;
|
|
|
|
+ int ret = 0;
|
|
|
|
|
|
|
|
tcpriv = priv->tc_priv;
|
|
|
|
|
|
|
|
- size = sizeof(struct dma_desc);
|
|
|
|
+ plat_mem_init();
|
|
|
|
+
|
|
|
|
soc_rings = &priv->soc_rings;
|
|
|
|
|
|
|
|
/* txin ring */
|
|
|
|
ring = &soc_rings->txin;
|
|
|
|
- ring_init(tcpriv, ring, txin, TXIN, dnum, size);
|
|
|
|
+ dnum = ring_dnum(tcpriv, txin, TXIN);
|
|
|
|
+ ret = txlist_init(&soc_rings->txlist, dnum);
|
|
|
|
+ if (ret < 0)
|
|
|
|
+ goto err5;
|
|
|
|
+ ret = ring_init(tcpriv, ring, dnum, sizeof(struct dma_tx_desc));
|
|
|
|
+ if (ret < 0)
|
|
|
|
+ goto err4;
|
|
|
|
|
|
|
|
/* txout ring */
|
|
|
|
ring = &soc_rings->txout;
|
|
|
|
- ring_init(tcpriv, ring, txout, TXOUT, dnum, size);
|
|
|
|
+ dnum = ring_dnum(tcpriv, txout, TXOUT);
|
|
|
|
+ ret = ring_init(tcpriv, ring, dnum, sizeof(u32));
|
|
|
|
+ if (ret < 0)
|
|
|
|
+ goto err3;
|
|
|
|
+
|
|
|
|
/* rxin ring */
|
|
|
|
ring = &soc_rings->rxin;
|
|
|
|
- ring_init(tcpriv, ring, rxin, RXIN, dnum, size);
|
|
|
|
+ dnum = ring_dnum(tcpriv, rxin, RXIN);
|
|
|
|
+ ret |= ring_init(tcpriv, ring, dnum, sizeof(struct dma_rx_desc));
|
|
|
|
+ if (ret < 0)
|
|
|
|
+ goto err2;
|
|
|
|
+
|
|
|
|
/* rxout ring */
|
|
|
|
ring = &soc_rings->rxout;
|
|
|
|
- ring_init(tcpriv, ring, rxout, RXOUT, dnum, size);
|
|
|
|
+ dnum = ring_dnum(tcpriv, rxout, RXOUT);
|
|
|
|
+ ret = ring_init(tcpriv, ring, dnum, sizeof(struct dma_rx_desc));
|
|
|
|
+ if (ret < 0)
|
|
|
|
+ goto err1;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < EP_MAX_NUM && i < tcpriv->ep_num; i++) {
|
|
|
|
+
|
|
|
|
+ /* Enable RX interrupt */
|
|
|
|
+ tcpriv->ep_dev[i].hw_ops->icu_en(&tcpriv->ep_dev[i], ACA_HOSTIF_RX);
|
|
|
|
+
|
|
|
|
+ /* Enable TX interrupt */
|
|
|
|
+ tcpriv->ep_dev[i].hw_ops->icu_en(&tcpriv->ep_dev[i], ACA_HOSTIF_TX);
|
|
|
|
+ }
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
+
|
|
|
|
+err1:
|
|
|
|
+ ring_free(tcpriv, &soc_rings->rxin);
|
|
|
|
+err2:
|
|
|
|
+ ring_free(tcpriv, &soc_rings->txout);
|
|
|
|
+err3:
|
|
|
|
+ ring_free(tcpriv, &soc_rings->txin);
|
|
|
|
+err4:
|
|
|
|
+ txlist_free(&soc_rings->txlist);
|
|
|
|
+err5:
|
|
|
|
+ return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
@@ -850,6 +933,26 @@ static int plat_dp_init(struct plat_priv
|
|
|
|
*/
|
|
|
|
static void plat_dp_exit(struct plat_priv *priv)
|
|
|
|
{
|
|
|
|
+ struct tc_priv *tcpriv = priv->tc_priv;
|
|
|
|
+ struct aca_ring_grp *soc_rings = &priv->soc_rings;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < EP_MAX_NUM && i < tcpriv->ep_num; i++) {
|
|
|
|
+
|
|
|
|
+ /* Disable RX interrupt */
|
|
|
|
+ tcpriv->ep_dev[i].hw_ops->icu_mask(&tcpriv->ep_dev[i], ACA_HOSTIF_RX);
|
|
|
|
+
|
|
|
|
+ /* Disable TX interrupt */
|
|
|
|
+ tcpriv->ep_dev[i].hw_ops->icu_mask(&tcpriv->ep_dev[i], ACA_HOSTIF_TX);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ ring_free(tcpriv, &soc_rings->txin);
|
|
|
|
+ ring_free(tcpriv, &soc_rings->txout);
|
|
|
|
+ ring_free(tcpriv, &soc_rings->rxin);
|
|
|
|
+ ring_free(tcpriv, &soc_rings->rxout);
|
|
|
|
+
|
|
|
|
+ txlist_free(&soc_rings->txlist);
|
|
|
|
+
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -858,45 +961,45 @@ static int plat_soc_cfg_get(struct soc_c
|
|
|
|
struct plat_priv *priv = g_plat_priv;
|
|
|
|
|
|
|
|
/* TXIN */
|
|
|
|
- cfg->txin_dbase = priv->soc_rings.txin.dbase_phymem;
|
|
|
|
- cfg->txin_dnum = priv->soc_rings.txin.dnum;
|
|
|
|
- cfg->txin_desc_dwsz = DW_SZ(struct dma_desc);
|
|
|
|
- cfg->txin_cnt_phyaddr = priv->soc_rings.txin.cnt_phyaddr;
|
|
|
|
+ cfg->txin.soc_phydbase = priv->soc_rings.txin.dbase_phymem;
|
|
|
|
+ cfg->txin.soc_dnum = priv->soc_rings.txin.dnum;
|
|
|
|
+ cfg->txin.soc_desc_dwsz = DW_SZ(struct dma_tx_desc);
|
|
|
|
/* TXOUT */
|
|
|
|
- cfg->txout_dbase = priv->soc_rings.txout.dbase_phymem;
|
|
|
|
- cfg->txout_dnum = priv->soc_rings.txout.dnum;
|
|
|
|
- cfg->txout_desc_dwsz = DW_SZ(struct dma_desc);
|
|
|
|
- cfg->txout_cnt_phyaddr = priv->soc_rings.txout.cnt_phyaddr;
|
|
|
|
+ cfg->txout.soc_phydbase = priv->soc_rings.txout.dbase_phymem;
|
|
|
|
+ cfg->txout.soc_dnum = priv->soc_rings.txout.dnum;
|
|
|
|
+ cfg->txout.soc_desc_dwsz = DW_SZ(u32);
|
|
|
|
/* RXOUT */
|
|
|
|
- cfg->rxout_dbase = priv->soc_rings.rxout.dbase_phymem;
|
|
|
|
- cfg->rxout_dnum = priv->soc_rings.rxout.dnum;
|
|
|
|
- cfg->rxout_desc_dwsz = DW_SZ(struct dma_desc);
|
|
|
|
- cfg->rxout_cnt_phyaddr = priv->soc_rings.rxout.cnt_phyaddr;
|
|
|
|
+ cfg->rxout.soc_phydbase = priv->soc_rings.rxout.dbase_phymem;
|
|
|
|
+ cfg->rxout.soc_dnum = priv->soc_rings.rxout.dnum;
|
|
|
|
+ cfg->rxout.soc_desc_dwsz = DW_SZ(struct dma_rx_desc);
|
|
|
|
/* RXIN */
|
|
|
|
- cfg->rxin_dbase = priv->soc_rings.rxin.dbase_phymem;
|
|
|
|
- cfg->rxin_dnum = priv->soc_rings.rxin.dnum;
|
|
|
|
- cfg->rxin_desc_dwsz = DW_SZ(struct dma_desc);
|
|
|
|
- cfg->rxin_cnt_phyaddr = priv->soc_rings.rxin.cnt_phyaddr;
|
|
|
|
+ cfg->rxin.soc_phydbase = priv->soc_rings.rxin.dbase_phymem;
|
|
|
|
+ cfg->rxin.soc_dnum = priv->soc_rings.rxin.dnum;
|
|
|
|
+ cfg->rxin.soc_desc_dwsz = DW_SZ(struct dma_rx_desc);
|
|
|
|
|
|
|
|
tc_info(priv->tc_priv, MSG_INIT,
|
|
|
|
"id: %d, txin(0x%x: %d, 0x%x), txout(0x%x: %d, 0x%x), rxin(0x%x: %d, 0x%x), rxout(0x%x: %d, 0x%x)\n",
|
|
|
|
- id, cfg->txin_dbase, cfg->txin_dnum, cfg->txin_cnt_phyaddr,
|
|
|
|
- cfg->txout_dbase, cfg->txout_dnum, cfg->txout_cnt_phyaddr,
|
|
|
|
- cfg->rxin_dbase, cfg->rxout_dnum, cfg->rxin_cnt_phyaddr,
|
|
|
|
- cfg->rxout_dbase, cfg->rxout_dnum, cfg->rxout_cnt_phyaddr);
|
|
|
|
+ id, cfg->txin.soc_phydbase, cfg->txin.soc_dnum, cfg->txin.soc_cnt_phyaddr,
|
|
|
|
+ cfg->txout.soc_phydbase, cfg->txout.soc_dnum, cfg->txout.soc_cnt_phyaddr,
|
|
|
|
+ cfg->rxin.soc_phydbase, cfg->rxin.soc_dnum, cfg->rxin.soc_cnt_phyaddr,
|
|
|
|
+ cfg->rxout.soc_phydbase, cfg->rxout.soc_dnum, cfg->rxout.soc_cnt_phyaddr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static int plat_open(struct net_device *pdev, char *dev_name,
|
|
|
|
- int *subif, int flag)
|
|
|
|
+static int plat_open(struct net_device *pdev, const char *dev_name,
|
|
|
|
+ int id, int flag)
|
|
|
|
{
|
|
|
|
+ g_plat_priv->netdev = pdev;
|
|
|
|
+
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static void plat_close(struct net_device *pdev, char *dev_name,
|
|
|
|
- int subif, int flag)
|
|
|
|
+static void plat_close(struct net_device *pdev, const char *dev_name,
|
|
|
|
+ int flag)
|
|
|
|
{
|
|
|
|
+ g_plat_priv->netdev = NULL;
|
|
|
|
+
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -971,7 +1074,6 @@ static void plat_disable_us(int en)
|
|
|
|
static int plat_get_mib(struct net_device *pdev,
|
|
|
|
struct rtnl_link_stats64 *stat)
|
|
|
|
{
|
|
|
|
- pr_info("%s is not supported\n", __func__);
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -1181,8 +1283,8 @@ int platform_init(struct tc_priv *tc_pri
|
|
|
|
INIT_WORK(&priv->req_work.work, plat_tc_req_workqueue);
|
|
|
|
tasklet_init(&txout_task, plat_txout_tasklet, 0);
|
|
|
|
tasklet_init(&rxout_task, plat_rxout_tasklet, 0);
|
|
|
|
- irq_init(tc_priv, drv_name);
|
|
|
|
- //tasklet_init(&priv->coc.coc_task, plat_coc_tasklet, 0);
|
|
|
|
+ plat_irq_init(tc_priv, drv_name);
|
|
|
|
+
|
|
|
|
plat_tc_ops_setup(tc_priv);
|
|
|
|
plat_dsl_ops_setup();
|
|
|
|
|
|
|
|
@@ -1201,8 +1303,15 @@ void platform_dsl_exit(void)
|
|
|
|
|
|
|
|
void platform_exit(void)
|
|
|
|
{
|
|
|
|
- //tasklet_kill(&g_plat_priv->coc.coc_task);
|
|
|
|
+ struct tc_priv *tcpriv = plat_to_tcpriv();
|
|
|
|
+
|
|
|
|
+ tasklet_kill(&txout_task);
|
|
|
|
+ tasklet_kill(&rxout_task);
|
|
|
|
+
|
|
|
|
+ plat_irq_free(tcpriv);
|
|
|
|
+
|
|
|
|
plat_dp_exit(g_plat_priv);
|
|
|
|
+
|
|
|
|
g_plat_priv = NULL;
|
|
|
|
}
|
|
|
|
|