xref: /dpdk/drivers/net/dpaa/dpaa_ethdev.c (revision 37f9b54bd3cf5d32baa34284fd35e917c6b6aee6)
1ff9e112dSShreyansh Jain /*-
2ff9e112dSShreyansh Jain  *   BSD LICENSE
3ff9e112dSShreyansh Jain  *
4ff9e112dSShreyansh Jain  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
5ff9e112dSShreyansh Jain  *   Copyright 2017 NXP.
6ff9e112dSShreyansh Jain  *
7ff9e112dSShreyansh Jain  *   Redistribution and use in source and binary forms, with or without
8ff9e112dSShreyansh Jain  *   modification, are permitted provided that the following conditions
9ff9e112dSShreyansh Jain  *   are met:
10ff9e112dSShreyansh Jain  *
11ff9e112dSShreyansh Jain  *     * Redistributions of source code must retain the above copyright
12ff9e112dSShreyansh Jain  *       notice, this list of conditions and the following disclaimer.
13ff9e112dSShreyansh Jain  *     * Redistributions in binary form must reproduce the above copyright
14ff9e112dSShreyansh Jain  *       notice, this list of conditions and the following disclaimer in
15ff9e112dSShreyansh Jain  *       the documentation and/or other materials provided with the
16ff9e112dSShreyansh Jain  *       distribution.
17ff9e112dSShreyansh Jain  *     * Neither the name of  Freescale Semiconductor, Inc nor the names of its
18ff9e112dSShreyansh Jain  *       contributors may be used to endorse or promote products derived
19ff9e112dSShreyansh Jain  *       from this software without specific prior written permission.
20ff9e112dSShreyansh Jain  *
21ff9e112dSShreyansh Jain  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22ff9e112dSShreyansh Jain  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23ff9e112dSShreyansh Jain  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24ff9e112dSShreyansh Jain  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25ff9e112dSShreyansh Jain  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26ff9e112dSShreyansh Jain  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27ff9e112dSShreyansh Jain  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28ff9e112dSShreyansh Jain  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29ff9e112dSShreyansh Jain  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30ff9e112dSShreyansh Jain  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31ff9e112dSShreyansh Jain  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32ff9e112dSShreyansh Jain  */
33ff9e112dSShreyansh Jain /* System headers */
34ff9e112dSShreyansh Jain #include <stdio.h>
35ff9e112dSShreyansh Jain #include <inttypes.h>
36ff9e112dSShreyansh Jain #include <unistd.h>
37ff9e112dSShreyansh Jain #include <limits.h>
38ff9e112dSShreyansh Jain #include <sched.h>
39ff9e112dSShreyansh Jain #include <signal.h>
40ff9e112dSShreyansh Jain #include <pthread.h>
41ff9e112dSShreyansh Jain #include <sys/types.h>
42ff9e112dSShreyansh Jain #include <sys/syscall.h>
43ff9e112dSShreyansh Jain 
44ff9e112dSShreyansh Jain #include <rte_config.h>
45ff9e112dSShreyansh Jain #include <rte_byteorder.h>
46ff9e112dSShreyansh Jain #include <rte_common.h>
47ff9e112dSShreyansh Jain #include <rte_interrupts.h>
48ff9e112dSShreyansh Jain #include <rte_log.h>
49ff9e112dSShreyansh Jain #include <rte_debug.h>
50ff9e112dSShreyansh Jain #include <rte_pci.h>
51ff9e112dSShreyansh Jain #include <rte_atomic.h>
52ff9e112dSShreyansh Jain #include <rte_branch_prediction.h>
53ff9e112dSShreyansh Jain #include <rte_memory.h>
54ff9e112dSShreyansh Jain #include <rte_memzone.h>
55ff9e112dSShreyansh Jain #include <rte_tailq.h>
56ff9e112dSShreyansh Jain #include <rte_eal.h>
57ff9e112dSShreyansh Jain #include <rte_alarm.h>
58ff9e112dSShreyansh Jain #include <rte_ether.h>
59ff9e112dSShreyansh Jain #include <rte_ethdev.h>
60ff9e112dSShreyansh Jain #include <rte_malloc.h>
61ff9e112dSShreyansh Jain #include <rte_ring.h>
62ff9e112dSShreyansh Jain 
63ff9e112dSShreyansh Jain #include <rte_dpaa_bus.h>
64ff9e112dSShreyansh Jain #include <rte_dpaa_logs.h>
65*37f9b54bSShreyansh Jain #include <dpaa_mempool.h>
66ff9e112dSShreyansh Jain 
67ff9e112dSShreyansh Jain #include <dpaa_ethdev.h>
68*37f9b54bSShreyansh Jain #include <dpaa_rxtx.h>
69*37f9b54bSShreyansh Jain 
70*37f9b54bSShreyansh Jain #include <fsl_usd.h>
71*37f9b54bSShreyansh Jain #include <fsl_qman.h>
72*37f9b54bSShreyansh Jain #include <fsl_bman.h>
73*37f9b54bSShreyansh Jain #include <fsl_fman.h>
74ff9e112dSShreyansh Jain 
75ff9e112dSShreyansh Jain /* Keep track of whether QMAN and BMAN have been globally initialized */
76ff9e112dSShreyansh Jain static int is_global_init;
77ff9e112dSShreyansh Jain 
78ff9e112dSShreyansh Jain static int
79ff9e112dSShreyansh Jain dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
80ff9e112dSShreyansh Jain {
81ff9e112dSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
82ff9e112dSShreyansh Jain 
83ff9e112dSShreyansh Jain 	return 0;
84ff9e112dSShreyansh Jain }
85ff9e112dSShreyansh Jain 
86ff9e112dSShreyansh Jain static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
87ff9e112dSShreyansh Jain {
88*37f9b54bSShreyansh Jain 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
89*37f9b54bSShreyansh Jain 
90ff9e112dSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
91ff9e112dSShreyansh Jain 
92ff9e112dSShreyansh Jain 	/* Change tx callback to the real one */
93*37f9b54bSShreyansh Jain 	dev->tx_pkt_burst = dpaa_eth_queue_tx;
94*37f9b54bSShreyansh Jain 	fman_if_enable_rx(dpaa_intf->fif);
95ff9e112dSShreyansh Jain 
96ff9e112dSShreyansh Jain 	return 0;
97ff9e112dSShreyansh Jain }
98ff9e112dSShreyansh Jain 
99ff9e112dSShreyansh Jain static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
100ff9e112dSShreyansh Jain {
101*37f9b54bSShreyansh Jain 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
102*37f9b54bSShreyansh Jain 
103*37f9b54bSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
104*37f9b54bSShreyansh Jain 
105*37f9b54bSShreyansh Jain 	fman_if_disable_rx(dpaa_intf->fif);
106*37f9b54bSShreyansh Jain 	dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
107ff9e112dSShreyansh Jain }
108ff9e112dSShreyansh Jain 
109*37f9b54bSShreyansh Jain static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
110*37f9b54bSShreyansh Jain {
111*37f9b54bSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
112*37f9b54bSShreyansh Jain 
113*37f9b54bSShreyansh Jain 	dpaa_eth_dev_stop(dev);
114*37f9b54bSShreyansh Jain }
115*37f9b54bSShreyansh Jain 
116*37f9b54bSShreyansh Jain static
117*37f9b54bSShreyansh Jain int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
118*37f9b54bSShreyansh Jain 			    uint16_t nb_desc __rte_unused,
119*37f9b54bSShreyansh Jain 			    unsigned int socket_id __rte_unused,
120*37f9b54bSShreyansh Jain 			    const struct rte_eth_rxconf *rx_conf __rte_unused,
121*37f9b54bSShreyansh Jain 			    struct rte_mempool *mp)
122*37f9b54bSShreyansh Jain {
123*37f9b54bSShreyansh Jain 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
124*37f9b54bSShreyansh Jain 
125*37f9b54bSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
126*37f9b54bSShreyansh Jain 
127*37f9b54bSShreyansh Jain 	DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx);
128*37f9b54bSShreyansh Jain 
129*37f9b54bSShreyansh Jain 	if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
130*37f9b54bSShreyansh Jain 		struct fman_if_ic_params icp;
131*37f9b54bSShreyansh Jain 		uint32_t fd_offset;
132*37f9b54bSShreyansh Jain 		uint32_t bp_size;
133*37f9b54bSShreyansh Jain 
134*37f9b54bSShreyansh Jain 		if (!mp->pool_data) {
135*37f9b54bSShreyansh Jain 			DPAA_PMD_ERR("Not an offloaded buffer pool!");
136*37f9b54bSShreyansh Jain 			return -1;
137*37f9b54bSShreyansh Jain 		}
138*37f9b54bSShreyansh Jain 		dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
139*37f9b54bSShreyansh Jain 
140*37f9b54bSShreyansh Jain 		memset(&icp, 0, sizeof(icp));
141*37f9b54bSShreyansh Jain 		/* set ICEOF for to the default value , which is 0*/
142*37f9b54bSShreyansh Jain 		icp.iciof = DEFAULT_ICIOF;
143*37f9b54bSShreyansh Jain 		icp.iceof = DEFAULT_RX_ICEOF;
144*37f9b54bSShreyansh Jain 		icp.icsz = DEFAULT_ICSZ;
145*37f9b54bSShreyansh Jain 		fman_if_set_ic_params(dpaa_intf->fif, &icp);
146*37f9b54bSShreyansh Jain 
147*37f9b54bSShreyansh Jain 		fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
148*37f9b54bSShreyansh Jain 		fman_if_set_fdoff(dpaa_intf->fif, fd_offset);
149*37f9b54bSShreyansh Jain 
150*37f9b54bSShreyansh Jain 		/* Buffer pool size should be equal to Dataroom Size*/
151*37f9b54bSShreyansh Jain 		bp_size = rte_pktmbuf_data_room_size(mp);
152*37f9b54bSShreyansh Jain 		fman_if_set_bp(dpaa_intf->fif, mp->size,
153*37f9b54bSShreyansh Jain 			       dpaa_intf->bp_info->bpid, bp_size);
154*37f9b54bSShreyansh Jain 		dpaa_intf->valid = 1;
155*37f9b54bSShreyansh Jain 		DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d",
156*37f9b54bSShreyansh Jain 			    dpaa_intf->name, fd_offset,
157*37f9b54bSShreyansh Jain 			fman_if_get_fdoff(dpaa_intf->fif));
158*37f9b54bSShreyansh Jain 	}
159*37f9b54bSShreyansh Jain 	dev->data->rx_queues[queue_idx] = &dpaa_intf->rx_queues[queue_idx];
160*37f9b54bSShreyansh Jain 
161*37f9b54bSShreyansh Jain 	return 0;
162*37f9b54bSShreyansh Jain }
163*37f9b54bSShreyansh Jain 
164*37f9b54bSShreyansh Jain static
165*37f9b54bSShreyansh Jain void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
166*37f9b54bSShreyansh Jain {
167*37f9b54bSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
168*37f9b54bSShreyansh Jain }
169*37f9b54bSShreyansh Jain 
170*37f9b54bSShreyansh Jain static
171*37f9b54bSShreyansh Jain int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
172*37f9b54bSShreyansh Jain 			    uint16_t nb_desc __rte_unused,
173*37f9b54bSShreyansh Jain 		unsigned int socket_id __rte_unused,
174*37f9b54bSShreyansh Jain 		const struct rte_eth_txconf *tx_conf __rte_unused)
175*37f9b54bSShreyansh Jain {
176*37f9b54bSShreyansh Jain 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
177*37f9b54bSShreyansh Jain 
178*37f9b54bSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
179*37f9b54bSShreyansh Jain 
180*37f9b54bSShreyansh Jain 	DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx);
181*37f9b54bSShreyansh Jain 	dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
182*37f9b54bSShreyansh Jain 	return 0;
183*37f9b54bSShreyansh Jain }
184*37f9b54bSShreyansh Jain 
185*37f9b54bSShreyansh Jain static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
186ff9e112dSShreyansh Jain {
187ff9e112dSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
188ff9e112dSShreyansh Jain }
189ff9e112dSShreyansh Jain 
190ff9e112dSShreyansh Jain static struct eth_dev_ops dpaa_devops = {
191ff9e112dSShreyansh Jain 	.dev_configure		  = dpaa_eth_dev_configure,
192ff9e112dSShreyansh Jain 	.dev_start		  = dpaa_eth_dev_start,
193ff9e112dSShreyansh Jain 	.dev_stop		  = dpaa_eth_dev_stop,
194ff9e112dSShreyansh Jain 	.dev_close		  = dpaa_eth_dev_close,
195*37f9b54bSShreyansh Jain 
196*37f9b54bSShreyansh Jain 	.rx_queue_setup		  = dpaa_eth_rx_queue_setup,
197*37f9b54bSShreyansh Jain 	.tx_queue_setup		  = dpaa_eth_tx_queue_setup,
198*37f9b54bSShreyansh Jain 	.rx_queue_release	  = dpaa_eth_rx_queue_release,
199*37f9b54bSShreyansh Jain 	.tx_queue_release	  = dpaa_eth_tx_queue_release,
200ff9e112dSShreyansh Jain };
201ff9e112dSShreyansh Jain 
202*37f9b54bSShreyansh Jain /* Initialise an Rx FQ */
203*37f9b54bSShreyansh Jain static int dpaa_rx_queue_init(struct qman_fq *fq,
204*37f9b54bSShreyansh Jain 			      uint32_t fqid)
205*37f9b54bSShreyansh Jain {
206*37f9b54bSShreyansh Jain 	struct qm_mcc_initfq opts;
207*37f9b54bSShreyansh Jain 	int ret;
208*37f9b54bSShreyansh Jain 
209*37f9b54bSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
210*37f9b54bSShreyansh Jain 
211*37f9b54bSShreyansh Jain 	ret = qman_reserve_fqid(fqid);
212*37f9b54bSShreyansh Jain 	if (ret) {
213*37f9b54bSShreyansh Jain 		DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d",
214*37f9b54bSShreyansh Jain 			     fqid, ret);
215*37f9b54bSShreyansh Jain 		return -EINVAL;
216*37f9b54bSShreyansh Jain 	}
217*37f9b54bSShreyansh Jain 
218*37f9b54bSShreyansh Jain 	DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid);
219*37f9b54bSShreyansh Jain 	ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
220*37f9b54bSShreyansh Jain 	if (ret) {
221*37f9b54bSShreyansh Jain 		DPAA_PMD_ERR("create rx fqid %d failed with ret: %d",
222*37f9b54bSShreyansh Jain 			fqid, ret);
223*37f9b54bSShreyansh Jain 		return ret;
224*37f9b54bSShreyansh Jain 	}
225*37f9b54bSShreyansh Jain 
226*37f9b54bSShreyansh Jain 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
227*37f9b54bSShreyansh Jain 		       QM_INITFQ_WE_CONTEXTA;
228*37f9b54bSShreyansh Jain 
229*37f9b54bSShreyansh Jain 	opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
230*37f9b54bSShreyansh Jain 	opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
231*37f9b54bSShreyansh Jain 			   QM_FQCTRL_PREFERINCACHE;
232*37f9b54bSShreyansh Jain 	opts.fqd.context_a.stashing.exclusive = 0;
233*37f9b54bSShreyansh Jain 	opts.fqd.context_a.stashing.annotation_cl = DPAA_IF_RX_ANNOTATION_STASH;
234*37f9b54bSShreyansh Jain 	opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
235*37f9b54bSShreyansh Jain 	opts.fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
236*37f9b54bSShreyansh Jain 
237*37f9b54bSShreyansh Jain 	/*Enable tail drop */
238*37f9b54bSShreyansh Jain 	opts.we_mask = opts.we_mask | QM_INITFQ_WE_TDTHRESH;
239*37f9b54bSShreyansh Jain 	opts.fqd.fq_ctrl = opts.fqd.fq_ctrl | QM_FQCTRL_TDE;
240*37f9b54bSShreyansh Jain 	qm_fqd_taildrop_set(&opts.fqd.td, CONG_THRESHOLD_RX_Q, 1);
241*37f9b54bSShreyansh Jain 
242*37f9b54bSShreyansh Jain 	ret = qman_init_fq(fq, 0, &opts);
243*37f9b54bSShreyansh Jain 	if (ret)
244*37f9b54bSShreyansh Jain 		DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret);
245*37f9b54bSShreyansh Jain 	return ret;
246*37f9b54bSShreyansh Jain }
247*37f9b54bSShreyansh Jain 
248*37f9b54bSShreyansh Jain /* Initialise a Tx FQ */
249*37f9b54bSShreyansh Jain static int dpaa_tx_queue_init(struct qman_fq *fq,
250*37f9b54bSShreyansh Jain 			      struct fman_if *fman_intf)
251*37f9b54bSShreyansh Jain {
252*37f9b54bSShreyansh Jain 	struct qm_mcc_initfq opts;
253*37f9b54bSShreyansh Jain 	int ret;
254*37f9b54bSShreyansh Jain 
255*37f9b54bSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
256*37f9b54bSShreyansh Jain 
257*37f9b54bSShreyansh Jain 	ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
258*37f9b54bSShreyansh Jain 			     QMAN_FQ_FLAG_TO_DCPORTAL, fq);
259*37f9b54bSShreyansh Jain 	if (ret) {
260*37f9b54bSShreyansh Jain 		DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
261*37f9b54bSShreyansh Jain 		return ret;
262*37f9b54bSShreyansh Jain 	}
263*37f9b54bSShreyansh Jain 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
264*37f9b54bSShreyansh Jain 		       QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
265*37f9b54bSShreyansh Jain 	opts.fqd.dest.channel = fman_intf->tx_channel_id;
266*37f9b54bSShreyansh Jain 	opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
267*37f9b54bSShreyansh Jain 	opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
268*37f9b54bSShreyansh Jain 	opts.fqd.context_b = 0;
269*37f9b54bSShreyansh Jain 	/* no tx-confirmation */
270*37f9b54bSShreyansh Jain 	opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
271*37f9b54bSShreyansh Jain 	opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
272*37f9b54bSShreyansh Jain 	DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid);
273*37f9b54bSShreyansh Jain 	ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
274*37f9b54bSShreyansh Jain 	if (ret)
275*37f9b54bSShreyansh Jain 		DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret);
276*37f9b54bSShreyansh Jain 	return ret;
277*37f9b54bSShreyansh Jain }
278*37f9b54bSShreyansh Jain 
279ff9e112dSShreyansh Jain /* Initialise a network interface */
280ff9e112dSShreyansh Jain static int
281ff9e112dSShreyansh Jain dpaa_dev_init(struct rte_eth_dev *eth_dev)
282ff9e112dSShreyansh Jain {
283*37f9b54bSShreyansh Jain 	int num_cores, num_rx_fqs, fqid;
284*37f9b54bSShreyansh Jain 	int loop, ret = 0;
285ff9e112dSShreyansh Jain 	int dev_id;
286ff9e112dSShreyansh Jain 	struct rte_dpaa_device *dpaa_device;
287ff9e112dSShreyansh Jain 	struct dpaa_if *dpaa_intf;
288*37f9b54bSShreyansh Jain 	struct fm_eth_port_cfg *cfg;
289*37f9b54bSShreyansh Jain 	struct fman_if *fman_intf;
290*37f9b54bSShreyansh Jain 	struct fman_if_bpool *bp, *tmp_bp;
291ff9e112dSShreyansh Jain 
292ff9e112dSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
293ff9e112dSShreyansh Jain 
294ff9e112dSShreyansh Jain 	/* For secondary processes, the primary has done all the work */
295ff9e112dSShreyansh Jain 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
296ff9e112dSShreyansh Jain 		return 0;
297ff9e112dSShreyansh Jain 
298ff9e112dSShreyansh Jain 	dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
299ff9e112dSShreyansh Jain 	dev_id = dpaa_device->id.dev_id;
300ff9e112dSShreyansh Jain 	dpaa_intf = eth_dev->data->dev_private;
301*37f9b54bSShreyansh Jain 	cfg = &dpaa_netcfg->port_cfg[dev_id];
302*37f9b54bSShreyansh Jain 	fman_intf = cfg->fman_if;
303ff9e112dSShreyansh Jain 
304ff9e112dSShreyansh Jain 	dpaa_intf->name = dpaa_device->name;
305ff9e112dSShreyansh Jain 
306*37f9b54bSShreyansh Jain 	/* save fman_if & cfg in the interface struture */
307*37f9b54bSShreyansh Jain 	dpaa_intf->fif = fman_intf;
308ff9e112dSShreyansh Jain 	dpaa_intf->ifid = dev_id;
309*37f9b54bSShreyansh Jain 	dpaa_intf->cfg = cfg;
310ff9e112dSShreyansh Jain 
311*37f9b54bSShreyansh Jain 	/* Initialize Rx FQ's */
312*37f9b54bSShreyansh Jain 	if (getenv("DPAA_NUM_RX_QUEUES"))
313*37f9b54bSShreyansh Jain 		num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
314*37f9b54bSShreyansh Jain 	else
315*37f9b54bSShreyansh Jain 		num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
316*37f9b54bSShreyansh Jain 
317*37f9b54bSShreyansh Jain 	/* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX
318*37f9b54bSShreyansh Jain 	 * queues.
319*37f9b54bSShreyansh Jain 	 */
320*37f9b54bSShreyansh Jain 	if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) {
321*37f9b54bSShreyansh Jain 		DPAA_PMD_ERR("Invalid number of RX queues\n");
322*37f9b54bSShreyansh Jain 		return -EINVAL;
323*37f9b54bSShreyansh Jain 	}
324*37f9b54bSShreyansh Jain 
325*37f9b54bSShreyansh Jain 	dpaa_intf->rx_queues = rte_zmalloc(NULL,
326*37f9b54bSShreyansh Jain 		sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
327*37f9b54bSShreyansh Jain 	for (loop = 0; loop < num_rx_fqs; loop++) {
328*37f9b54bSShreyansh Jain 		fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
329*37f9b54bSShreyansh Jain 			DPAA_PCD_FQID_MULTIPLIER + loop;
330*37f9b54bSShreyansh Jain 		ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], fqid);
331*37f9b54bSShreyansh Jain 		if (ret)
332*37f9b54bSShreyansh Jain 			return ret;
333*37f9b54bSShreyansh Jain 		dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
334*37f9b54bSShreyansh Jain 	}
335*37f9b54bSShreyansh Jain 	dpaa_intf->nb_rx_queues = num_rx_fqs;
336*37f9b54bSShreyansh Jain 
337*37f9b54bSShreyansh Jain 	/* Initialise Tx FQs. Have as many Tx FQ's as number of cores */
338*37f9b54bSShreyansh Jain 	num_cores = rte_lcore_count();
339*37f9b54bSShreyansh Jain 	dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
340*37f9b54bSShreyansh Jain 		num_cores, MAX_CACHELINE);
341*37f9b54bSShreyansh Jain 	if (!dpaa_intf->tx_queues)
342*37f9b54bSShreyansh Jain 		return -ENOMEM;
343*37f9b54bSShreyansh Jain 
344*37f9b54bSShreyansh Jain 	for (loop = 0; loop < num_cores; loop++) {
345*37f9b54bSShreyansh Jain 		ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
346*37f9b54bSShreyansh Jain 					 fman_intf);
347*37f9b54bSShreyansh Jain 		if (ret)
348*37f9b54bSShreyansh Jain 			return ret;
349*37f9b54bSShreyansh Jain 		dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
350*37f9b54bSShreyansh Jain 	}
351*37f9b54bSShreyansh Jain 	dpaa_intf->nb_tx_queues = num_cores;
352*37f9b54bSShreyansh Jain 
353*37f9b54bSShreyansh Jain 	DPAA_PMD_DEBUG("All frame queues created");
354*37f9b54bSShreyansh Jain 
355*37f9b54bSShreyansh Jain 	/* reset bpool list, initialize bpool dynamically */
356*37f9b54bSShreyansh Jain 	list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
357*37f9b54bSShreyansh Jain 		list_del(&bp->node);
358*37f9b54bSShreyansh Jain 		rte_free(bp);
359*37f9b54bSShreyansh Jain 	}
360*37f9b54bSShreyansh Jain 
361*37f9b54bSShreyansh Jain 	/* Populate ethdev structure */
362ff9e112dSShreyansh Jain 	eth_dev->dev_ops = &dpaa_devops;
363*37f9b54bSShreyansh Jain 	eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
364*37f9b54bSShreyansh Jain 	eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
365*37f9b54bSShreyansh Jain 
366*37f9b54bSShreyansh Jain 	/* Allocate memory for storing MAC addresses */
367*37f9b54bSShreyansh Jain 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
368*37f9b54bSShreyansh Jain 		ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
369*37f9b54bSShreyansh Jain 	if (eth_dev->data->mac_addrs == NULL) {
370*37f9b54bSShreyansh Jain 		DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
371*37f9b54bSShreyansh Jain 						"store MAC addresses",
372*37f9b54bSShreyansh Jain 				ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
373*37f9b54bSShreyansh Jain 		rte_free(dpaa_intf->rx_queues);
374*37f9b54bSShreyansh Jain 		rte_free(dpaa_intf->tx_queues);
375*37f9b54bSShreyansh Jain 		dpaa_intf->rx_queues = NULL;
376*37f9b54bSShreyansh Jain 		dpaa_intf->tx_queues = NULL;
377*37f9b54bSShreyansh Jain 		dpaa_intf->nb_rx_queues = 0;
378*37f9b54bSShreyansh Jain 		dpaa_intf->nb_tx_queues = 0;
379*37f9b54bSShreyansh Jain 		return -ENOMEM;
380*37f9b54bSShreyansh Jain 	}
381*37f9b54bSShreyansh Jain 
382*37f9b54bSShreyansh Jain 	/* copy the primary mac address */
383*37f9b54bSShreyansh Jain 	ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
384*37f9b54bSShreyansh Jain 
385*37f9b54bSShreyansh Jain 	RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
386*37f9b54bSShreyansh Jain 		dpaa_device->name,
387*37f9b54bSShreyansh Jain 		fman_intf->mac_addr.addr_bytes[0],
388*37f9b54bSShreyansh Jain 		fman_intf->mac_addr.addr_bytes[1],
389*37f9b54bSShreyansh Jain 		fman_intf->mac_addr.addr_bytes[2],
390*37f9b54bSShreyansh Jain 		fman_intf->mac_addr.addr_bytes[3],
391*37f9b54bSShreyansh Jain 		fman_intf->mac_addr.addr_bytes[4],
392*37f9b54bSShreyansh Jain 		fman_intf->mac_addr.addr_bytes[5]);
393*37f9b54bSShreyansh Jain 
394*37f9b54bSShreyansh Jain 	/* Disable RX mode */
395*37f9b54bSShreyansh Jain 	fman_if_discard_rx_errors(fman_intf);
396*37f9b54bSShreyansh Jain 	fman_if_disable_rx(fman_intf);
397*37f9b54bSShreyansh Jain 	/* Disable promiscuous mode */
398*37f9b54bSShreyansh Jain 	fman_if_promiscuous_disable(fman_intf);
399*37f9b54bSShreyansh Jain 	/* Disable multicast */
400*37f9b54bSShreyansh Jain 	fman_if_reset_mcast_filter_table(fman_intf);
401*37f9b54bSShreyansh Jain 	/* Reset interface statistics */
402*37f9b54bSShreyansh Jain 	fman_if_stats_reset(fman_intf);
403ff9e112dSShreyansh Jain 
404ff9e112dSShreyansh Jain 	return 0;
405ff9e112dSShreyansh Jain }
406ff9e112dSShreyansh Jain 
407ff9e112dSShreyansh Jain static int
408ff9e112dSShreyansh Jain dpaa_dev_uninit(struct rte_eth_dev *dev)
409ff9e112dSShreyansh Jain {
410ff9e112dSShreyansh Jain 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
411ff9e112dSShreyansh Jain 
412ff9e112dSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
413ff9e112dSShreyansh Jain 
414ff9e112dSShreyansh Jain 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
415ff9e112dSShreyansh Jain 		return -EPERM;
416ff9e112dSShreyansh Jain 
417ff9e112dSShreyansh Jain 	if (!dpaa_intf) {
418ff9e112dSShreyansh Jain 		DPAA_PMD_WARN("Already closed or not started");
419ff9e112dSShreyansh Jain 		return -1;
420ff9e112dSShreyansh Jain 	}
421ff9e112dSShreyansh Jain 
422ff9e112dSShreyansh Jain 	dpaa_eth_dev_close(dev);
423ff9e112dSShreyansh Jain 
424*37f9b54bSShreyansh Jain 	/* release configuration memory */
425*37f9b54bSShreyansh Jain 	if (dpaa_intf->fc_conf)
426*37f9b54bSShreyansh Jain 		rte_free(dpaa_intf->fc_conf);
427*37f9b54bSShreyansh Jain 
428*37f9b54bSShreyansh Jain 	rte_free(dpaa_intf->rx_queues);
429*37f9b54bSShreyansh Jain 	dpaa_intf->rx_queues = NULL;
430*37f9b54bSShreyansh Jain 
431*37f9b54bSShreyansh Jain 	rte_free(dpaa_intf->tx_queues);
432*37f9b54bSShreyansh Jain 	dpaa_intf->tx_queues = NULL;
433*37f9b54bSShreyansh Jain 
434*37f9b54bSShreyansh Jain 	/* free memory for storing MAC addresses */
435*37f9b54bSShreyansh Jain 	rte_free(dev->data->mac_addrs);
436*37f9b54bSShreyansh Jain 	dev->data->mac_addrs = NULL;
437*37f9b54bSShreyansh Jain 
438ff9e112dSShreyansh Jain 	dev->dev_ops = NULL;
439ff9e112dSShreyansh Jain 	dev->rx_pkt_burst = NULL;
440ff9e112dSShreyansh Jain 	dev->tx_pkt_burst = NULL;
441ff9e112dSShreyansh Jain 
442ff9e112dSShreyansh Jain 	return 0;
443ff9e112dSShreyansh Jain }
444ff9e112dSShreyansh Jain 
445ff9e112dSShreyansh Jain static int
446ff9e112dSShreyansh Jain rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
447ff9e112dSShreyansh Jain 	       struct rte_dpaa_device *dpaa_dev)
448ff9e112dSShreyansh Jain {
449ff9e112dSShreyansh Jain 	int diag;
450ff9e112dSShreyansh Jain 	int ret;
451ff9e112dSShreyansh Jain 	struct rte_eth_dev *eth_dev;
452ff9e112dSShreyansh Jain 
453ff9e112dSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
454ff9e112dSShreyansh Jain 
455ff9e112dSShreyansh Jain 	/* In case of secondary process, the device is already configured
456ff9e112dSShreyansh Jain 	 * and no further action is required, except portal initialization
457ff9e112dSShreyansh Jain 	 * and verifying secondary attachment to port name.
458ff9e112dSShreyansh Jain 	 */
459ff9e112dSShreyansh Jain 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
460ff9e112dSShreyansh Jain 		eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
461ff9e112dSShreyansh Jain 		if (!eth_dev)
462ff9e112dSShreyansh Jain 			return -ENOMEM;
463ff9e112dSShreyansh Jain 		return 0;
464ff9e112dSShreyansh Jain 	}
465ff9e112dSShreyansh Jain 
466ff9e112dSShreyansh Jain 	if (!is_global_init) {
467ff9e112dSShreyansh Jain 		/* One time load of Qman/Bman drivers */
468ff9e112dSShreyansh Jain 		ret = qman_global_init();
469ff9e112dSShreyansh Jain 		if (ret) {
470ff9e112dSShreyansh Jain 			DPAA_PMD_ERR("QMAN initialization failed: %d",
471ff9e112dSShreyansh Jain 				     ret);
472ff9e112dSShreyansh Jain 			return ret;
473ff9e112dSShreyansh Jain 		}
474ff9e112dSShreyansh Jain 		ret = bman_global_init();
475ff9e112dSShreyansh Jain 		if (ret) {
476ff9e112dSShreyansh Jain 			DPAA_PMD_ERR("BMAN initialization failed: %d",
477ff9e112dSShreyansh Jain 				     ret);
478ff9e112dSShreyansh Jain 			return ret;
479ff9e112dSShreyansh Jain 		}
480ff9e112dSShreyansh Jain 
481ff9e112dSShreyansh Jain 		is_global_init = 1;
482ff9e112dSShreyansh Jain 	}
483ff9e112dSShreyansh Jain 
484ff9e112dSShreyansh Jain 	ret = rte_dpaa_portal_init((void *)1);
485ff9e112dSShreyansh Jain 	if (ret) {
486ff9e112dSShreyansh Jain 		DPAA_PMD_ERR("Unable to initialize portal");
487ff9e112dSShreyansh Jain 		return ret;
488ff9e112dSShreyansh Jain 	}
489ff9e112dSShreyansh Jain 
490ff9e112dSShreyansh Jain 	eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
491ff9e112dSShreyansh Jain 	if (eth_dev == NULL)
492ff9e112dSShreyansh Jain 		return -ENOMEM;
493ff9e112dSShreyansh Jain 
494ff9e112dSShreyansh Jain 	eth_dev->data->dev_private = rte_zmalloc(
495ff9e112dSShreyansh Jain 					"ethdev private structure",
496ff9e112dSShreyansh Jain 					sizeof(struct dpaa_if),
497ff9e112dSShreyansh Jain 					RTE_CACHE_LINE_SIZE);
498ff9e112dSShreyansh Jain 	if (!eth_dev->data->dev_private) {
499ff9e112dSShreyansh Jain 		DPAA_PMD_ERR("Cannot allocate memzone for port data");
500ff9e112dSShreyansh Jain 		rte_eth_dev_release_port(eth_dev);
501ff9e112dSShreyansh Jain 		return -ENOMEM;
502ff9e112dSShreyansh Jain 	}
503ff9e112dSShreyansh Jain 
504ff9e112dSShreyansh Jain 	eth_dev->device = &dpaa_dev->device;
505ff9e112dSShreyansh Jain 	eth_dev->device->driver = &dpaa_drv->driver;
506ff9e112dSShreyansh Jain 	dpaa_dev->eth_dev = eth_dev;
507ff9e112dSShreyansh Jain 
508ff9e112dSShreyansh Jain 	/* Invoke PMD device initialization function */
509ff9e112dSShreyansh Jain 	diag = dpaa_dev_init(eth_dev);
510ff9e112dSShreyansh Jain 	if (diag == 0)
511ff9e112dSShreyansh Jain 		return 0;
512ff9e112dSShreyansh Jain 
513ff9e112dSShreyansh Jain 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
514ff9e112dSShreyansh Jain 		rte_free(eth_dev->data->dev_private);
515ff9e112dSShreyansh Jain 
516ff9e112dSShreyansh Jain 	rte_eth_dev_release_port(eth_dev);
517ff9e112dSShreyansh Jain 	return diag;
518ff9e112dSShreyansh Jain }
519ff9e112dSShreyansh Jain 
520ff9e112dSShreyansh Jain static int
521ff9e112dSShreyansh Jain rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
522ff9e112dSShreyansh Jain {
523ff9e112dSShreyansh Jain 	struct rte_eth_dev *eth_dev;
524ff9e112dSShreyansh Jain 
525ff9e112dSShreyansh Jain 	PMD_INIT_FUNC_TRACE();
526ff9e112dSShreyansh Jain 
527ff9e112dSShreyansh Jain 	eth_dev = dpaa_dev->eth_dev;
528ff9e112dSShreyansh Jain 	dpaa_dev_uninit(eth_dev);
529ff9e112dSShreyansh Jain 
530ff9e112dSShreyansh Jain 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
531ff9e112dSShreyansh Jain 		rte_free(eth_dev->data->dev_private);
532ff9e112dSShreyansh Jain 
533ff9e112dSShreyansh Jain 	rte_eth_dev_release_port(eth_dev);
534ff9e112dSShreyansh Jain 
535ff9e112dSShreyansh Jain 	return 0;
536ff9e112dSShreyansh Jain }
537ff9e112dSShreyansh Jain 
538ff9e112dSShreyansh Jain static struct rte_dpaa_driver rte_dpaa_pmd = {
539ff9e112dSShreyansh Jain 	.drv_type = FSL_DPAA_ETH,
540ff9e112dSShreyansh Jain 	.probe = rte_dpaa_probe,
541ff9e112dSShreyansh Jain 	.remove = rte_dpaa_remove,
542ff9e112dSShreyansh Jain };
543ff9e112dSShreyansh Jain 
544ff9e112dSShreyansh Jain RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);
545