xref: /dpdk/drivers/net/ionic/ionic_lif.c (revision 90fa040a20e79e1ca96bcbb1bfae3af371a095f1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2022 Advanced Micro Devices, Inc.
3  */
4 
5 #include <rte_malloc.h>
6 #include <ethdev_driver.h>
7 
8 #include "ionic.h"
9 #include "ionic_logs.h"
10 #include "ionic_lif.h"
11 #include "ionic_ethdev.h"
12 #include "ionic_rx_filter.h"
13 #include "ionic_rxtx.h"
14 
15 /* queuetype support level */
16 static const uint8_t ionic_qtype_vers[IONIC_QTYPE_MAX] = {
17 	[IONIC_QTYPE_ADMINQ]  = 0,   /* 0 = Base version with CQ support */
18 	[IONIC_QTYPE_NOTIFYQ] = 0,   /* 0 = Base version */
19 	[IONIC_QTYPE_RXQ]     = 2,   /* 0 = Base version with CQ+SG support
20 				      * 1 =       ... with EQ
21 				      * 2 =       ... with CMB
22 				      */
23 	[IONIC_QTYPE_TXQ]     = 3,   /* 0 = Base version with CQ+SG support
24 				      * 1 =   ... with Tx SG version 1
25 				      * 2 =       ... with EQ
26 				      * 3 =       ... with CMB
27 				      */
28 };
29 
30 static int ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr);
31 static int ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr);
32 
33 static int
34 ionic_qcq_disable(struct ionic_qcq *qcq)
35 {
36 	struct ionic_queue *q = &qcq->q;
37 	struct ionic_lif *lif = qcq->lif;
38 	struct ionic_admin_ctx ctx = {
39 		.pending_work = true,
40 		.cmd.q_control = {
41 			.opcode = IONIC_CMD_Q_CONTROL,
42 			.type = q->type,
43 			.index = rte_cpu_to_le_32(q->index),
44 			.oper = IONIC_Q_DISABLE,
45 		},
46 	};
47 
48 	return ionic_adminq_post_wait(lif, &ctx);
49 }
50 
51 void
52 ionic_lif_stop(struct ionic_lif *lif)
53 {
54 	uint32_t i;
55 
56 	IONIC_PRINT_CALL();
57 
58 	lif->state &= ~IONIC_LIF_F_UP;
59 
60 	for (i = 0; i < lif->nrxqcqs; i++) {
61 		struct ionic_rx_qcq *rxq = lif->rxqcqs[i];
62 		if (rxq->flags & IONIC_QCQ_F_INITED)
63 			(void)ionic_dev_rx_queue_stop(lif->eth_dev, i);
64 	}
65 
66 	for (i = 0; i < lif->ntxqcqs; i++) {
67 		struct ionic_tx_qcq *txq = lif->txqcqs[i];
68 		if (txq->flags & IONIC_QCQ_F_INITED)
69 			(void)ionic_dev_tx_queue_stop(lif->eth_dev, i);
70 	}
71 }
72 
73 void
74 ionic_lif_reset(struct ionic_lif *lif)
75 {
76 	struct ionic_dev *idev = &lif->adapter->idev;
77 	int err;
78 
79 	IONIC_PRINT_CALL();
80 
81 	ionic_dev_cmd_lif_reset(idev);
82 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
83 	if (err)
84 		IONIC_PRINT(WARNING, "Failed to reset %s", lif->name);
85 }
86 
87 static void
88 ionic_lif_get_abs_stats(const struct ionic_lif *lif, struct rte_eth_stats *stats)
89 {
90 	struct ionic_lif_stats *ls = &lif->info->stats;
91 	uint32_t i;
92 	uint32_t num_rx_q_counters = RTE_MIN(lif->nrxqcqs, (uint32_t)
93 			RTE_ETHDEV_QUEUE_STAT_CNTRS);
94 	uint32_t num_tx_q_counters = RTE_MIN(lif->ntxqcqs, (uint32_t)
95 			RTE_ETHDEV_QUEUE_STAT_CNTRS);
96 
97 	memset(stats, 0, sizeof(*stats));
98 
99 	if (ls == NULL) {
100 		IONIC_PRINT(DEBUG, "Stats on port %u not yet initialized",
101 			lif->port_id);
102 		return;
103 	}
104 
105 	/* RX */
106 
107 	stats->ipackets = ls->rx_ucast_packets +
108 		ls->rx_mcast_packets +
109 		ls->rx_bcast_packets;
110 
111 	stats->ibytes = ls->rx_ucast_bytes +
112 		ls->rx_mcast_bytes +
113 		ls->rx_bcast_bytes;
114 
115 	for (i = 0; i < lif->nrxqcqs; i++) {
116 		struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats;
117 		stats->ierrors +=
118 			rx_stats->bad_cq_status +
119 			rx_stats->bad_len;
120 	}
121 
122 	stats->imissed +=
123 		ls->rx_ucast_drop_packets +
124 		ls->rx_mcast_drop_packets +
125 		ls->rx_bcast_drop_packets;
126 
127 	stats->ierrors +=
128 		ls->rx_dma_error +
129 		ls->rx_desc_fetch_error +
130 		ls->rx_desc_data_error;
131 
132 	for (i = 0; i < num_rx_q_counters; i++) {
133 		struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats;
134 		stats->q_ipackets[i] = rx_stats->packets;
135 		stats->q_ibytes[i] = rx_stats->bytes;
136 		stats->q_errors[i] =
137 			rx_stats->bad_cq_status +
138 			rx_stats->bad_len;
139 	}
140 
141 	/* TX */
142 
143 	stats->opackets = ls->tx_ucast_packets +
144 		ls->tx_mcast_packets +
145 		ls->tx_bcast_packets;
146 
147 	stats->obytes = ls->tx_ucast_bytes +
148 		ls->tx_mcast_bytes +
149 		ls->tx_bcast_bytes;
150 
151 	for (i = 0; i < lif->ntxqcqs; i++) {
152 		struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats;
153 		stats->oerrors += tx_stats->drop;
154 	}
155 
156 	stats->oerrors +=
157 		ls->tx_ucast_drop_packets +
158 		ls->tx_mcast_drop_packets +
159 		ls->tx_bcast_drop_packets;
160 
161 	stats->oerrors +=
162 		ls->tx_dma_error +
163 		ls->tx_queue_disabled +
164 		ls->tx_desc_fetch_error +
165 		ls->tx_desc_data_error;
166 
167 	for (i = 0; i < num_tx_q_counters; i++) {
168 		struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats;
169 		stats->q_opackets[i] = tx_stats->packets;
170 		stats->q_obytes[i] = tx_stats->bytes;
171 	}
172 }
173 
174 void
175 ionic_lif_get_stats(const struct ionic_lif *lif,
176 		struct rte_eth_stats *stats)
177 {
178 	ionic_lif_get_abs_stats(lif, stats);
179 
180 	stats->ipackets  -= lif->stats_base.ipackets;
181 	stats->opackets  -= lif->stats_base.opackets;
182 	stats->ibytes    -= lif->stats_base.ibytes;
183 	stats->obytes    -= lif->stats_base.obytes;
184 	stats->imissed   -= lif->stats_base.imissed;
185 	stats->ierrors   -= lif->stats_base.ierrors;
186 	stats->oerrors   -= lif->stats_base.oerrors;
187 	stats->rx_nombuf -= lif->stats_base.rx_nombuf;
188 }
189 
190 void
191 ionic_lif_reset_stats(struct ionic_lif *lif)
192 {
193 	uint32_t i;
194 
195 	for (i = 0; i < lif->nrxqcqs; i++) {
196 		memset(&lif->rxqcqs[i]->stats, 0,
197 			sizeof(struct ionic_rx_stats));
198 		memset(&lif->txqcqs[i]->stats, 0,
199 			sizeof(struct ionic_tx_stats));
200 	}
201 
202 	ionic_lif_get_abs_stats(lif, &lif->stats_base);
203 }
204 
205 void
206 ionic_lif_get_hw_stats(struct ionic_lif *lif, struct ionic_lif_stats *stats)
207 {
208 	uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t);
209 	uint64_t *stats64 = (uint64_t *)stats;
210 	uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats;
211 	uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base;
212 
213 	for (i = 0; i < count; i++)
214 		stats64[i] = lif_stats64[i] - lif_stats64_base[i];
215 }
216 
217 void
218 ionic_lif_reset_hw_stats(struct ionic_lif *lif)
219 {
220 	uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t);
221 	uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats;
222 	uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base;
223 
224 	for (i = 0; i < count; i++)
225 		lif_stats64_base[i] = lif_stats64[i];
226 }
227 
228 static int
229 ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr)
230 {
231 	struct ionic_admin_ctx ctx = {
232 		.pending_work = true,
233 		.cmd.rx_filter_add = {
234 			.opcode = IONIC_CMD_RX_FILTER_ADD,
235 			.match = rte_cpu_to_le_16(IONIC_RX_FILTER_MATCH_MAC),
236 		},
237 	};
238 	int err;
239 
240 	memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, RTE_ETHER_ADDR_LEN);
241 
242 	err = ionic_adminq_post_wait(lif, &ctx);
243 	if (err)
244 		return err;
245 
246 	IONIC_PRINT(INFO, "rx_filter add (id %d)",
247 		rte_le_to_cpu_32(ctx.comp.rx_filter_add.filter_id));
248 
249 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx);
250 }
251 
252 static int
253 ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr)
254 {
255 	struct ionic_admin_ctx ctx = {
256 		.pending_work = true,
257 		.cmd.rx_filter_del = {
258 			.opcode = IONIC_CMD_RX_FILTER_DEL,
259 		},
260 	};
261 	struct ionic_rx_filter *f;
262 	int err;
263 
264 	IONIC_PRINT_CALL();
265 
266 	rte_spinlock_lock(&lif->rx_filters.lock);
267 
268 	f = ionic_rx_filter_by_addr(lif, addr);
269 	if (!f) {
270 		rte_spinlock_unlock(&lif->rx_filters.lock);
271 		return -ENOENT;
272 	}
273 
274 	ctx.cmd.rx_filter_del.filter_id = rte_cpu_to_le_32(f->filter_id);
275 	ionic_rx_filter_free(f);
276 
277 	rte_spinlock_unlock(&lif->rx_filters.lock);
278 
279 	err = ionic_adminq_post_wait(lif, &ctx);
280 	if (err)
281 		return err;
282 
283 	IONIC_PRINT(INFO, "rx_filter del (id %d)",
284 		rte_le_to_cpu_32(ctx.cmd.rx_filter_del.filter_id));
285 
286 	return 0;
287 }
288 
289 int
290 ionic_dev_add_mac(struct rte_eth_dev *eth_dev,
291 		struct rte_ether_addr *mac_addr,
292 		uint32_t index __rte_unused, uint32_t pool __rte_unused)
293 {
294 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
295 
296 	IONIC_PRINT_CALL();
297 
298 	return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr);
299 }
300 
301 void
302 ionic_dev_remove_mac(struct rte_eth_dev *eth_dev, uint32_t index)
303 {
304 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
305 	struct ionic_adapter *adapter = lif->adapter;
306 	struct rte_ether_addr *mac_addr;
307 
308 	IONIC_PRINT_CALL();
309 
310 	if (index >= adapter->max_mac_addrs) {
311 		IONIC_PRINT(WARNING,
312 			"Index %u is above MAC filter limit %u",
313 			index, adapter->max_mac_addrs);
314 		return;
315 	}
316 
317 	mac_addr = &eth_dev->data->mac_addrs[index];
318 
319 	if (!rte_is_valid_assigned_ether_addr(mac_addr))
320 		return;
321 
322 	ionic_lif_addr_del(lif, (const uint8_t *)mac_addr);
323 }
324 
325 int
326 ionic_dev_set_mac(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr)
327 {
328 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
329 
330 	IONIC_PRINT_CALL();
331 
332 	if (mac_addr == NULL) {
333 		IONIC_PRINT(NOTICE, "New mac is null");
334 		return -1;
335 	}
336 
337 	if (!rte_is_zero_ether_addr((struct rte_ether_addr *)lif->mac_addr)) {
338 		IONIC_PRINT(INFO, "Deleting mac addr %pM",
339 			lif->mac_addr);
340 		ionic_lif_addr_del(lif, lif->mac_addr);
341 		memset(lif->mac_addr, 0, RTE_ETHER_ADDR_LEN);
342 	}
343 
344 	IONIC_PRINT(INFO, "Updating mac addr");
345 
346 	rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)lif->mac_addr);
347 
348 	return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr);
349 }
350 
351 static int
352 ionic_vlan_rx_add_vid(struct ionic_lif *lif, uint16_t vid)
353 {
354 	struct ionic_admin_ctx ctx = {
355 		.pending_work = true,
356 		.cmd.rx_filter_add = {
357 			.opcode = IONIC_CMD_RX_FILTER_ADD,
358 			.match = rte_cpu_to_le_16(IONIC_RX_FILTER_MATCH_VLAN),
359 			.vlan.vlan = rte_cpu_to_le_16(vid),
360 		},
361 	};
362 	int err;
363 
364 	err = ionic_adminq_post_wait(lif, &ctx);
365 	if (err)
366 		return err;
367 
368 	IONIC_PRINT(INFO, "rx_filter add VLAN %d (id %d)", vid,
369 		rte_le_to_cpu_32(ctx.comp.rx_filter_add.filter_id));
370 
371 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx);
372 }
373 
374 static int
375 ionic_vlan_rx_kill_vid(struct ionic_lif *lif, uint16_t vid)
376 {
377 	struct ionic_admin_ctx ctx = {
378 		.pending_work = true,
379 		.cmd.rx_filter_del = {
380 			.opcode = IONIC_CMD_RX_FILTER_DEL,
381 		},
382 	};
383 	struct ionic_rx_filter *f;
384 	int err;
385 
386 	IONIC_PRINT_CALL();
387 
388 	rte_spinlock_lock(&lif->rx_filters.lock);
389 
390 	f = ionic_rx_filter_by_vlan(lif, vid);
391 	if (!f) {
392 		rte_spinlock_unlock(&lif->rx_filters.lock);
393 		return -ENOENT;
394 	}
395 
396 	ctx.cmd.rx_filter_del.filter_id = rte_cpu_to_le_32(f->filter_id);
397 	ionic_rx_filter_free(f);
398 	rte_spinlock_unlock(&lif->rx_filters.lock);
399 
400 	err = ionic_adminq_post_wait(lif, &ctx);
401 	if (err)
402 		return err;
403 
404 	IONIC_PRINT(INFO, "rx_filter del VLAN %d (id %d)", vid,
405 		rte_le_to_cpu_32(ctx.cmd.rx_filter_del.filter_id));
406 
407 	return 0;
408 }
409 
410 int
411 ionic_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id,
412 		int on)
413 {
414 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
415 	int err;
416 
417 	if (on)
418 		err = ionic_vlan_rx_add_vid(lif, vlan_id);
419 	else
420 		err = ionic_vlan_rx_kill_vid(lif, vlan_id);
421 
422 	return err;
423 }
424 
425 static void
426 ionic_lif_rx_mode(struct ionic_lif *lif, uint32_t rx_mode)
427 {
428 	struct ionic_admin_ctx ctx = {
429 		.pending_work = true,
430 		.cmd.rx_mode_set = {
431 			.opcode = IONIC_CMD_RX_MODE_SET,
432 			.rx_mode = rte_cpu_to_le_16(rx_mode),
433 		},
434 	};
435 	int err;
436 
437 	if (rx_mode & IONIC_RX_MODE_F_UNICAST)
438 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_UNICAST");
439 	if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
440 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_MULTICAST");
441 	if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
442 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_BROADCAST");
443 	if (rx_mode & IONIC_RX_MODE_F_PROMISC)
444 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_PROMISC");
445 	if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
446 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_ALLMULTI");
447 
448 	err = ionic_adminq_post_wait(lif, &ctx);
449 	if (err)
450 		IONIC_PRINT(ERR, "Failure setting RX mode");
451 }
452 
453 static void
454 ionic_set_rx_mode(struct ionic_lif *lif, uint32_t rx_mode)
455 {
456 	if (lif->rx_mode != rx_mode) {
457 		lif->rx_mode = rx_mode;
458 		ionic_lif_rx_mode(lif, rx_mode);
459 	}
460 }
461 
462 int
463 ionic_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
464 {
465 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
466 	uint32_t rx_mode = lif->rx_mode;
467 
468 	IONIC_PRINT_CALL();
469 
470 	rx_mode |= IONIC_RX_MODE_F_PROMISC;
471 
472 	ionic_set_rx_mode(lif, rx_mode);
473 
474 	return 0;
475 }
476 
477 int
478 ionic_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
479 {
480 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
481 	uint32_t rx_mode = lif->rx_mode;
482 
483 	rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
484 
485 	ionic_set_rx_mode(lif, rx_mode);
486 
487 	return 0;
488 }
489 
490 int
491 ionic_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
492 {
493 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
494 	uint32_t rx_mode = lif->rx_mode;
495 
496 	rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
497 
498 	ionic_set_rx_mode(lif, rx_mode);
499 
500 	return 0;
501 }
502 
503 int
504 ionic_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
505 {
506 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
507 	uint32_t rx_mode = lif->rx_mode;
508 
509 	rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
510 
511 	ionic_set_rx_mode(lif, rx_mode);
512 
513 	return 0;
514 }
515 
516 int
517 ionic_lif_change_mtu(struct ionic_lif *lif, uint32_t new_mtu)
518 {
519 	struct ionic_admin_ctx ctx = {
520 		.pending_work = true,
521 		.cmd.lif_setattr = {
522 			.opcode = IONIC_CMD_LIF_SETATTR,
523 			.attr = IONIC_LIF_ATTR_MTU,
524 			.mtu = rte_cpu_to_le_32(new_mtu),
525 		},
526 	};
527 
528 	return ionic_adminq_post_wait(lif, &ctx);
529 }
530 
531 int
532 ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
533 {
534 	struct ionic_adapter *adapter = lif->adapter;
535 	struct ionic_dev *idev = &adapter->idev;
536 	unsigned long index;
537 
538 	/*
539 	 * Note: interrupt handler is called for index = 0 only
540 	 * (we use interrupts for the notifyq only anyway,
541 	 * which has index = 0)
542 	 */
543 
544 	for (index = 0; index < adapter->nintrs; index++)
545 		if (!adapter->intrs[index])
546 			break;
547 
548 	if (index == adapter->nintrs)
549 		return -ENOSPC;
550 
551 	adapter->intrs[index] = true;
552 
553 	ionic_intr_init(idev, intr, index);
554 
555 	return 0;
556 }
557 
558 static int
559 ionic_qcq_alloc(struct ionic_lif *lif,
560 		uint8_t type,
561 		size_t struct_size,
562 		uint32_t socket_id,
563 		uint32_t index,
564 		const char *type_name,
565 		uint16_t flags,
566 		uint16_t num_descs,
567 		uint16_t num_segs,
568 		uint16_t desc_size,
569 		uint16_t cq_desc_size,
570 		uint16_t sg_desc_size,
571 		struct ionic_qcq **qcq)
572 {
573 	struct ionic_qcq *new;
574 	uint32_t q_size, cq_size, sg_size, total_size;
575 	void *q_base, *cmb_q_base, *cq_base, *sg_base;
576 	rte_iova_t q_base_pa = 0;
577 	rte_iova_t cq_base_pa = 0;
578 	rte_iova_t sg_base_pa = 0;
579 	rte_iova_t cmb_q_base_pa = 0;
580 	size_t page_size = rte_mem_page_size();
581 	int err;
582 
583 	*qcq = NULL;
584 
585 	q_size  = num_descs * desc_size;
586 	cq_size = num_descs * cq_desc_size;
587 	sg_size = num_descs * sg_desc_size;
588 
589 	total_size = RTE_ALIGN(q_size, page_size) +
590 			RTE_ALIGN(cq_size, page_size);
591 	/*
592 	 * Note: aligning q_size/cq_size is not enough due to cq_base address
593 	 * aligning as q_base could be not aligned to the page.
594 	 * Adding page_size.
595 	 */
596 	total_size += page_size;
597 
598 	if (flags & IONIC_QCQ_F_SG) {
599 		total_size += RTE_ALIGN(sg_size, page_size);
600 		total_size += page_size;
601 	}
602 
603 	new = rte_zmalloc_socket("ionic", struct_size,
604 				RTE_CACHE_LINE_SIZE, socket_id);
605 	if (!new) {
606 		IONIC_PRINT(ERR, "Cannot allocate queue structure");
607 		return -ENOMEM;
608 	}
609 
610 	new->lif = lif;
611 
612 	/* Most queue types will store 1 ptr per descriptor */
613 	new->q.info = rte_calloc_socket("ionic",
614 				(uint64_t)num_descs * num_segs,
615 				sizeof(void *), page_size, socket_id);
616 	if (!new->q.info) {
617 		IONIC_PRINT(ERR, "Cannot allocate queue info");
618 		err = -ENOMEM;
619 		goto err_out_free_qcq;
620 	}
621 
622 	new->q.num_segs = num_segs;
623 	new->q.type = type;
624 
625 	err = ionic_q_init(&new->q, index, num_descs);
626 	if (err) {
627 		IONIC_PRINT(ERR, "Queue initialization failed");
628 		goto err_out_free_info;
629 	}
630 
631 	err = ionic_cq_init(&new->cq, num_descs);
632 	if (err) {
633 		IONIC_PRINT(ERR, "Completion queue initialization failed");
634 		goto err_out_free_info;
635 	}
636 
637 	new->base_z = rte_eth_dma_zone_reserve(lif->eth_dev,
638 		type_name, index /* queue_idx */,
639 		total_size, IONIC_ALIGN, socket_id);
640 
641 	if (!new->base_z) {
642 		IONIC_PRINT(ERR, "Cannot reserve queue DMA memory");
643 		err = -ENOMEM;
644 		goto err_out_free_info;
645 	}
646 
647 	new->base = new->base_z->addr;
648 	new->base_pa = new->base_z->iova;
649 
650 	q_base = new->base;
651 	q_base_pa = new->base_pa;
652 
653 	cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size, page_size);
654 	cq_base_pa = RTE_ALIGN(q_base_pa + q_size, page_size);
655 
656 	if (flags & IONIC_QCQ_F_SG) {
657 		sg_base = (void *)RTE_ALIGN((uintptr_t)cq_base + cq_size,
658 				page_size);
659 		sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size, page_size);
660 		ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
661 	}
662 
663 	if (flags & IONIC_QCQ_F_CMB) {
664 		/* alloc descriptor ring from nic memory */
665 		if (lif->adapter->cmb_offset + q_size >
666 				lif->adapter->bars.bar[2].len) {
667 			IONIC_PRINT(ERR, "Cannot reserve queue from NIC mem");
668 			return -ENOMEM;
669 		}
670 		cmb_q_base = (void *)
671 			((uintptr_t)lif->adapter->bars.bar[2].vaddr +
672 			 (uintptr_t)lif->adapter->cmb_offset);
673 		/* CMB PA is a relative address */
674 		cmb_q_base_pa = lif->adapter->cmb_offset;
675 		lif->adapter->cmb_offset += q_size;
676 	} else {
677 		cmb_q_base = NULL;
678 		cmb_q_base_pa = 0;
679 	}
680 
681 	IONIC_PRINT(DEBUG, "Q-Base-PA = %#jx CQ-Base-PA = %#jx "
682 		"SG-base-PA = %#jx",
683 		q_base_pa, cq_base_pa, sg_base_pa);
684 
685 	ionic_q_map(&new->q, q_base, q_base_pa, cmb_q_base, cmb_q_base_pa);
686 	ionic_cq_map(&new->cq, cq_base, cq_base_pa);
687 
688 	*qcq = new;
689 
690 	return 0;
691 
692 err_out_free_info:
693 	rte_free(new->q.info);
694 err_out_free_qcq:
695 	rte_free(new);
696 
697 	return err;
698 }
699 
700 void
701 ionic_qcq_free(struct ionic_qcq *qcq)
702 {
703 	if (qcq->base_z) {
704 		qcq->base = NULL;
705 		qcq->base_pa = 0;
706 		rte_memzone_free(qcq->base_z);
707 		qcq->base_z = NULL;
708 	}
709 
710 	if (qcq->q.info) {
711 		rte_free(qcq->q.info);
712 		qcq->q.info = NULL;
713 	}
714 
715 	rte_free(qcq);
716 }
717 
718 static uint64_t
719 ionic_rx_rearm_data(struct ionic_lif *lif)
720 {
721 	struct rte_mbuf rxm;
722 
723 	memset(&rxm, 0, sizeof(rxm));
724 
725 	rte_mbuf_refcnt_set(&rxm, 1);
726 	rxm.data_off = RTE_PKTMBUF_HEADROOM;
727 	rxm.nb_segs = 1;
728 	rxm.port = lif->port_id;
729 
730 	rte_compiler_barrier();
731 
732 	RTE_BUILD_BUG_ON(sizeof(rxm.rearm_data[0]) != sizeof(uint64_t));
733 	return rxm.rearm_data[0];
734 }
735 
736 static uint64_t
737 ionic_rx_seg_rearm_data(struct ionic_lif *lif)
738 {
739 	struct rte_mbuf rxm;
740 
741 	memset(&rxm, 0, sizeof(rxm));
742 
743 	rte_mbuf_refcnt_set(&rxm, 1);
744 	rxm.data_off = 0;  /* no headroom */
745 	rxm.nb_segs = 1;
746 	rxm.port = lif->port_id;
747 
748 	rte_compiler_barrier();
749 
750 	RTE_BUILD_BUG_ON(sizeof(rxm.rearm_data[0]) != sizeof(uint64_t));
751 	return rxm.rearm_data[0];
752 }
753 
754 int
755 ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
756 		uint16_t nrxq_descs, struct rte_mempool *mb_pool,
757 		struct ionic_rx_qcq **rxq_out)
758 {
759 	struct ionic_rx_qcq *rxq;
760 	uint16_t flags = 0, seg_size, hdr_seg_size, max_segs, max_segs_fw = 1;
761 	uint32_t max_mtu;
762 	int err;
763 
764 	if (lif->state & IONIC_LIF_F_Q_IN_CMB)
765 		flags |= IONIC_QCQ_F_CMB;
766 
767 	seg_size = rte_pktmbuf_data_room_size(mb_pool);
768 
769 	/* The first mbuf needs to leave headroom */
770 	hdr_seg_size = seg_size - RTE_PKTMBUF_HEADROOM;
771 
772 	max_mtu = rte_le_to_cpu_32(lif->adapter->ident.lif.eth.max_mtu);
773 
774 	/* If mbufs are too small to hold received packets, enable SG */
775 	if (max_mtu > hdr_seg_size &&
776 	    !(lif->features & IONIC_ETH_HW_RX_SG)) {
777 		IONIC_PRINT(NOTICE, "Enabling RX_OFFLOAD_SCATTER");
778 		lif->eth_dev->data->dev_conf.rxmode.offloads |=
779 			RTE_ETH_RX_OFFLOAD_SCATTER;
780 		ionic_lif_configure_rx_sg_offload(lif);
781 	}
782 
783 	if (lif->features & IONIC_ETH_HW_RX_SG) {
784 		flags |= IONIC_QCQ_F_SG;
785 		max_segs_fw = IONIC_RX_MAX_SG_ELEMS + 1;
786 	}
787 
788 	/*
789 	 * Calculate how many fragment pointers might be stored in queue.
790 	 * This is the worst-case number, so that there's enough room in
791 	 * the info array.
792 	 */
793 	max_segs = 1 + (max_mtu + RTE_PKTMBUF_HEADROOM - 1) / seg_size;
794 
795 	IONIC_PRINT(DEBUG, "rxq %u max_mtu %u seg_size %u max_segs %u",
796 		index, max_mtu, seg_size, max_segs);
797 	if (max_segs > max_segs_fw) {
798 		IONIC_PRINT(ERR, "Rx mbuf size insufficient (%d > %d avail)",
799 			max_segs, max_segs_fw);
800 		return -EINVAL;
801 	}
802 
803 	err = ionic_qcq_alloc(lif,
804 		IONIC_QTYPE_RXQ,
805 		sizeof(struct ionic_rx_qcq),
806 		socket_id,
807 		index,
808 		"rx",
809 		flags,
810 		nrxq_descs,
811 		max_segs,
812 		sizeof(struct ionic_rxq_desc),
813 		sizeof(struct ionic_rxq_comp),
814 		sizeof(struct ionic_rxq_sg_desc),
815 		(struct ionic_qcq **)&rxq);
816 	if (err)
817 		return err;
818 
819 	rxq->flags = flags;
820 	rxq->seg_size = seg_size;
821 	rxq->hdr_seg_size = hdr_seg_size;
822 	rxq->rearm_data = ionic_rx_rearm_data(lif);
823 	rxq->rearm_seg_data = ionic_rx_seg_rearm_data(lif);
824 
825 	lif->rxqcqs[index] = rxq;
826 	*rxq_out = rxq;
827 
828 	return 0;
829 }
830 
831 int
832 ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
833 		uint16_t ntxq_descs, struct ionic_tx_qcq **txq_out)
834 {
835 	struct ionic_tx_qcq *txq;
836 	uint16_t flags = 0, num_segs_fw = 1;
837 	int err;
838 
839 	if (lif->features & IONIC_ETH_HW_TX_SG) {
840 		flags |= IONIC_QCQ_F_SG;
841 		num_segs_fw = IONIC_TX_MAX_SG_ELEMS_V1 + 1;
842 	}
843 	if (lif->state & IONIC_LIF_F_Q_IN_CMB)
844 		flags |= IONIC_QCQ_F_CMB;
845 
846 	IONIC_PRINT(DEBUG, "txq %u num_segs %u", index, num_segs_fw);
847 
848 	err = ionic_qcq_alloc(lif,
849 		IONIC_QTYPE_TXQ,
850 		sizeof(struct ionic_tx_qcq),
851 		socket_id,
852 		index,
853 		"tx",
854 		flags,
855 		ntxq_descs,
856 		num_segs_fw,
857 		sizeof(struct ionic_txq_desc),
858 		sizeof(struct ionic_txq_comp),
859 		sizeof(struct ionic_txq_sg_desc_v1),
860 		(struct ionic_qcq **)&txq);
861 	if (err)
862 		return err;
863 
864 	txq->flags = flags;
865 	txq->num_segs_fw = num_segs_fw;
866 
867 	lif->txqcqs[index] = txq;
868 	*txq_out = txq;
869 
870 	return 0;
871 }
872 
873 static int
874 ionic_admin_qcq_alloc(struct ionic_lif *lif)
875 {
876 	uint16_t flags = 0;
877 	int err;
878 
879 	err = ionic_qcq_alloc(lif,
880 		IONIC_QTYPE_ADMINQ,
881 		sizeof(struct ionic_admin_qcq),
882 		rte_socket_id(),
883 		0,
884 		"admin",
885 		flags,
886 		IONIC_ADMINQ_LENGTH,
887 		1,
888 		sizeof(struct ionic_admin_cmd),
889 		sizeof(struct ionic_admin_comp),
890 		0,
891 		(struct ionic_qcq **)&lif->adminqcq);
892 	if (err)
893 		return err;
894 
895 	return 0;
896 }
897 
898 static int
899 ionic_notify_qcq_alloc(struct ionic_lif *lif)
900 {
901 	struct ionic_notify_qcq *nqcq;
902 	struct ionic_dev *idev = &lif->adapter->idev;
903 	uint16_t flags = 0;
904 	int err;
905 
906 	err = ionic_qcq_alloc(lif,
907 		IONIC_QTYPE_NOTIFYQ,
908 		sizeof(struct ionic_notify_qcq),
909 		rte_socket_id(),
910 		0,
911 		"notify",
912 		flags,
913 		IONIC_NOTIFYQ_LENGTH,
914 		1,
915 		sizeof(struct ionic_notifyq_cmd),
916 		sizeof(union ionic_notifyq_comp),
917 		0,
918 		(struct ionic_qcq **)&nqcq);
919 	if (err)
920 		return err;
921 
922 	err = ionic_intr_alloc(lif, &nqcq->intr);
923 	if (err) {
924 		ionic_qcq_free(&nqcq->qcq);
925 		return err;
926 	}
927 
928 	ionic_intr_mask_assert(idev->intr_ctrl, nqcq->intr.index,
929 		IONIC_INTR_MASK_SET);
930 
931 	lif->notifyqcq = nqcq;
932 
933 	return 0;
934 }
935 
936 static void
937 ionic_lif_queue_identify(struct ionic_lif *lif)
938 {
939 	struct ionic_adapter *adapter = lif->adapter;
940 	struct ionic_dev *idev = &adapter->idev;
941 	union ionic_q_identity *q_ident = &adapter->ident.txq;
942 	uint32_t q_words = RTE_DIM(q_ident->words);
943 	uint32_t cmd_words = RTE_DIM(idev->dev_cmd->data);
944 	uint32_t i, nwords, qtype;
945 	int err;
946 
947 	for (qtype = 0; qtype < RTE_DIM(ionic_qtype_vers); qtype++) {
948 		struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
949 
950 		/* Filter out the types this driver knows about */
951 		switch (qtype) {
952 		case IONIC_QTYPE_ADMINQ:
953 		case IONIC_QTYPE_NOTIFYQ:
954 		case IONIC_QTYPE_RXQ:
955 		case IONIC_QTYPE_TXQ:
956 			break;
957 		default:
958 			continue;
959 		}
960 
961 		memset(qti, 0, sizeof(*qti));
962 
963 		ionic_dev_cmd_queue_identify(idev, IONIC_LIF_TYPE_CLASSIC,
964 			qtype, ionic_qtype_vers[qtype]);
965 		err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
966 		if (err == -EINVAL) {
967 			IONIC_PRINT(ERR, "qtype %d not supported\n", qtype);
968 			continue;
969 		} else if (err == -EIO) {
970 			IONIC_PRINT(ERR, "q_ident failed, older FW\n");
971 			return;
972 		} else if (err) {
973 			IONIC_PRINT(ERR, "q_ident failed, qtype %d: %d\n",
974 				qtype, err);
975 			return;
976 		}
977 
978 		nwords = RTE_MIN(q_words, cmd_words);
979 		for (i = 0; i < nwords; i++)
980 			q_ident->words[i] = ioread32(&idev->dev_cmd->data[i]);
981 
982 		qti->version   = q_ident->version;
983 		qti->supported = q_ident->supported;
984 		qti->features  = rte_le_to_cpu_64(q_ident->features);
985 		qti->desc_sz   = rte_le_to_cpu_16(q_ident->desc_sz);
986 		qti->comp_sz   = rte_le_to_cpu_16(q_ident->comp_sz);
987 		qti->sg_desc_sz   = rte_le_to_cpu_16(q_ident->sg_desc_sz);
988 		qti->max_sg_elems = rte_le_to_cpu_16(q_ident->max_sg_elems);
989 		qti->sg_desc_stride =
990 			rte_le_to_cpu_16(q_ident->sg_desc_stride);
991 
992 		IONIC_PRINT(DEBUG, " qtype[%d].version = %d",
993 			qtype, qti->version);
994 		IONIC_PRINT(DEBUG, " qtype[%d].supported = %#x",
995 			qtype, qti->supported);
996 		IONIC_PRINT(DEBUG, " qtype[%d].features = %#jx",
997 			qtype, qti->features);
998 		IONIC_PRINT(DEBUG, " qtype[%d].desc_sz = %d",
999 			qtype, qti->desc_sz);
1000 		IONIC_PRINT(DEBUG, " qtype[%d].comp_sz = %d",
1001 			qtype, qti->comp_sz);
1002 		IONIC_PRINT(DEBUG, " qtype[%d].sg_desc_sz = %d",
1003 			qtype, qti->sg_desc_sz);
1004 		IONIC_PRINT(DEBUG, " qtype[%d].max_sg_elems = %d",
1005 			qtype, qti->max_sg_elems);
1006 		IONIC_PRINT(DEBUG, " qtype[%d].sg_desc_stride = %d",
1007 			qtype, qti->sg_desc_stride);
1008 	}
1009 }
1010 
1011 int
1012 ionic_lif_alloc(struct ionic_lif *lif)
1013 {
1014 	struct ionic_adapter *adapter = lif->adapter;
1015 	uint32_t socket_id = rte_socket_id();
1016 	int err;
1017 
1018 	/*
1019 	 * lif->name was zeroed on allocation.
1020 	 * Copy (sizeof() - 1) bytes to ensure that it is NULL terminated.
1021 	 */
1022 	memcpy(lif->name, lif->eth_dev->data->name, sizeof(lif->name) - 1);
1023 
1024 	IONIC_PRINT(DEBUG, "LIF: %s", lif->name);
1025 
1026 	ionic_lif_queue_identify(lif);
1027 
1028 	if (lif->qtype_info[IONIC_QTYPE_TXQ].version < 1) {
1029 		IONIC_PRINT(ERR, "FW too old, please upgrade");
1030 		return -ENXIO;
1031 	}
1032 
1033 	if (adapter->q_in_cmb) {
1034 		if (adapter->bars.num_bars >= 3 &&
1035 		    lif->qtype_info[IONIC_QTYPE_RXQ].version >= 2 &&
1036 		    lif->qtype_info[IONIC_QTYPE_TXQ].version >= 3) {
1037 			IONIC_PRINT(INFO, "%s enabled on %s",
1038 				PMD_IONIC_CMB_KVARG, lif->name);
1039 			lif->state |= IONIC_LIF_F_Q_IN_CMB;
1040 		} else {
1041 			IONIC_PRINT(ERR, "%s not supported on %s, disabled",
1042 				PMD_IONIC_CMB_KVARG, lif->name);
1043 		}
1044 	}
1045 
1046 	IONIC_PRINT(DEBUG, "Allocating Lif Info");
1047 
1048 	rte_spinlock_init(&lif->adminq_lock);
1049 	rte_spinlock_init(&lif->adminq_service_lock);
1050 
1051 	lif->kern_dbpage = adapter->idev.db_pages;
1052 	if (!lif->kern_dbpage) {
1053 		IONIC_PRINT(ERR, "Cannot map dbpage, aborting");
1054 		return -ENOMEM;
1055 	}
1056 
1057 	lif->txqcqs = rte_calloc_socket("ionic",
1058 				adapter->max_ntxqs_per_lif,
1059 				sizeof(*lif->txqcqs),
1060 				RTE_CACHE_LINE_SIZE, socket_id);
1061 	if (!lif->txqcqs) {
1062 		IONIC_PRINT(ERR, "Cannot allocate tx queues array");
1063 		return -ENOMEM;
1064 	}
1065 
1066 	lif->rxqcqs = rte_calloc_socket("ionic",
1067 				adapter->max_nrxqs_per_lif,
1068 				sizeof(*lif->rxqcqs),
1069 				RTE_CACHE_LINE_SIZE, socket_id);
1070 	if (!lif->rxqcqs) {
1071 		IONIC_PRINT(ERR, "Cannot allocate rx queues array");
1072 		return -ENOMEM;
1073 	}
1074 
1075 	IONIC_PRINT(DEBUG, "Allocating Notify Queue");
1076 
1077 	err = ionic_notify_qcq_alloc(lif);
1078 	if (err) {
1079 		IONIC_PRINT(ERR, "Cannot allocate notify queue");
1080 		return err;
1081 	}
1082 
1083 	IONIC_PRINT(DEBUG, "Allocating Admin Queue");
1084 
1085 	err = ionic_admin_qcq_alloc(lif);
1086 	if (err) {
1087 		IONIC_PRINT(ERR, "Cannot allocate admin queue");
1088 		return err;
1089 	}
1090 
1091 	IONIC_PRINT(DEBUG, "Allocating Lif Info");
1092 
1093 	lif->info_sz = RTE_ALIGN(sizeof(*lif->info), rte_mem_page_size());
1094 
1095 	lif->info_z = rte_eth_dma_zone_reserve(lif->eth_dev,
1096 		"lif_info", 0 /* queue_idx*/,
1097 		lif->info_sz, IONIC_ALIGN, socket_id);
1098 	if (!lif->info_z) {
1099 		IONIC_PRINT(ERR, "Cannot allocate lif info memory");
1100 		return -ENOMEM;
1101 	}
1102 
1103 	lif->info = lif->info_z->addr;
1104 	lif->info_pa = lif->info_z->iova;
1105 
1106 	return 0;
1107 }
1108 
1109 void
1110 ionic_lif_free(struct ionic_lif *lif)
1111 {
1112 	if (lif->notifyqcq) {
1113 		ionic_qcq_free(&lif->notifyqcq->qcq);
1114 		lif->notifyqcq = NULL;
1115 	}
1116 
1117 	if (lif->adminqcq) {
1118 		ionic_qcq_free(&lif->adminqcq->qcq);
1119 		lif->adminqcq = NULL;
1120 	}
1121 
1122 	if (lif->txqcqs) {
1123 		rte_free(lif->txqcqs);
1124 		lif->txqcqs = NULL;
1125 	}
1126 
1127 	if (lif->rxqcqs) {
1128 		rte_free(lif->rxqcqs);
1129 		lif->rxqcqs = NULL;
1130 	}
1131 
1132 	if (lif->info) {
1133 		rte_memzone_free(lif->info_z);
1134 		lif->info = NULL;
1135 	}
1136 }
1137 
1138 void
1139 ionic_lif_free_queues(struct ionic_lif *lif)
1140 {
1141 	uint32_t i;
1142 
1143 	for (i = 0; i < lif->ntxqcqs; i++) {
1144 		ionic_dev_tx_queue_release(lif->eth_dev, i);
1145 		lif->eth_dev->data->tx_queues[i] = NULL;
1146 	}
1147 	for (i = 0; i < lif->nrxqcqs; i++) {
1148 		ionic_dev_rx_queue_release(lif->eth_dev, i);
1149 		lif->eth_dev->data->rx_queues[i] = NULL;
1150 	}
1151 }
1152 
1153 int
1154 ionic_lif_rss_config(struct ionic_lif *lif,
1155 		const uint16_t types, const uint8_t *key, const uint32_t *indir)
1156 {
1157 	struct ionic_adapter *adapter = lif->adapter;
1158 	struct ionic_admin_ctx ctx = {
1159 		.pending_work = true,
1160 		.cmd.lif_setattr = {
1161 			.opcode = IONIC_CMD_LIF_SETATTR,
1162 			.attr = IONIC_LIF_ATTR_RSS,
1163 			.rss.types = rte_cpu_to_le_16(types),
1164 			.rss.addr = rte_cpu_to_le_64(lif->rss_ind_tbl_pa),
1165 		},
1166 	};
1167 	unsigned int i;
1168 	uint16_t tbl_sz =
1169 		rte_le_to_cpu_16(adapter->ident.lif.eth.rss_ind_tbl_sz);
1170 
1171 	IONIC_PRINT_CALL();
1172 
1173 	lif->rss_types = types;
1174 
1175 	if (key)
1176 		memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1177 
1178 	if (indir)
1179 		for (i = 0; i < tbl_sz; i++)
1180 			lif->rss_ind_tbl[i] = indir[i];
1181 
1182 	memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1183 	       IONIC_RSS_HASH_KEY_SIZE);
1184 
1185 	return ionic_adminq_post_wait(lif, &ctx);
1186 }
1187 
1188 static int
1189 ionic_lif_rss_setup(struct ionic_lif *lif)
1190 {
1191 	struct ionic_adapter *adapter = lif->adapter;
1192 	static const uint8_t toeplitz_symmetric_key[] = {
1193 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1194 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1195 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1196 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1197 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1198 	};
1199 	uint32_t i;
1200 	uint16_t tbl_sz =
1201 		rte_le_to_cpu_16(adapter->ident.lif.eth.rss_ind_tbl_sz);
1202 
1203 	IONIC_PRINT_CALL();
1204 
1205 	if (!lif->rss_ind_tbl_z) {
1206 		lif->rss_ind_tbl_z = rte_eth_dma_zone_reserve(lif->eth_dev,
1207 					"rss_ind_tbl", 0 /* queue_idx */,
1208 					sizeof(*lif->rss_ind_tbl) * tbl_sz,
1209 					IONIC_ALIGN, rte_socket_id());
1210 		if (!lif->rss_ind_tbl_z) {
1211 			IONIC_PRINT(ERR, "OOM");
1212 			return -ENOMEM;
1213 		}
1214 
1215 		lif->rss_ind_tbl = lif->rss_ind_tbl_z->addr;
1216 		lif->rss_ind_tbl_pa = lif->rss_ind_tbl_z->iova;
1217 	}
1218 
1219 	if (lif->rss_ind_tbl_nrxqcqs != lif->nrxqcqs) {
1220 		lif->rss_ind_tbl_nrxqcqs = lif->nrxqcqs;
1221 
1222 		/* Fill indirection table with 'default' values */
1223 		for (i = 0; i < tbl_sz; i++)
1224 			lif->rss_ind_tbl[i] = i % lif->nrxqcqs;
1225 	}
1226 
1227 	return ionic_lif_rss_config(lif, IONIC_RSS_OFFLOAD_ALL,
1228 			toeplitz_symmetric_key, NULL);
1229 }
1230 
1231 static void
1232 ionic_lif_rss_teardown(struct ionic_lif *lif)
1233 {
1234 	if (!lif->rss_ind_tbl)
1235 		return;
1236 
1237 	if (lif->rss_ind_tbl_z) {
1238 		/* Disable RSS on the NIC */
1239 		ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1240 
1241 		lif->rss_ind_tbl = NULL;
1242 		lif->rss_ind_tbl_pa = 0;
1243 		rte_memzone_free(lif->rss_ind_tbl_z);
1244 		lif->rss_ind_tbl_z = NULL;
1245 	}
1246 }
1247 
1248 void
1249 ionic_lif_txq_deinit(struct ionic_tx_qcq *txq)
1250 {
1251 	ionic_qcq_disable(&txq->qcq);
1252 
1253 	txq->flags &= ~IONIC_QCQ_F_INITED;
1254 }
1255 
1256 void
1257 ionic_lif_rxq_deinit(struct ionic_rx_qcq *rxq)
1258 {
1259 	ionic_qcq_disable(&rxq->qcq);
1260 
1261 	rxq->flags &= ~IONIC_QCQ_F_INITED;
1262 }
1263 
1264 static void
1265 ionic_lif_adminq_deinit(struct ionic_lif *lif)
1266 {
1267 	lif->adminqcq->flags &= ~IONIC_QCQ_F_INITED;
1268 }
1269 
1270 static void
1271 ionic_lif_notifyq_deinit(struct ionic_lif *lif)
1272 {
1273 	struct ionic_notify_qcq *nqcq = lif->notifyqcq;
1274 	struct ionic_dev *idev = &lif->adapter->idev;
1275 
1276 	if (!(nqcq->flags & IONIC_QCQ_F_INITED))
1277 		return;
1278 
1279 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1280 		IONIC_INTR_MASK_SET);
1281 
1282 	nqcq->flags &= ~IONIC_QCQ_F_INITED;
1283 }
1284 
1285 /* This acts like ionic_napi */
1286 int
1287 ionic_qcq_service(struct ionic_qcq *qcq, int budget, ionic_cq_cb cb,
1288 		void *cb_arg)
1289 {
1290 	struct ionic_cq *cq = &qcq->cq;
1291 	uint32_t work_done;
1292 
1293 	work_done = ionic_cq_service(cq, budget, cb, cb_arg);
1294 
1295 	return work_done;
1296 }
1297 
1298 static void
1299 ionic_link_status_check(struct ionic_lif *lif)
1300 {
1301 	struct ionic_adapter *adapter = lif->adapter;
1302 	bool link_up;
1303 
1304 	lif->state &= ~IONIC_LIF_F_LINK_CHECK_NEEDED;
1305 
1306 	if (!lif->info)
1307 		return;
1308 
1309 	link_up = (lif->info->status.link_status == IONIC_PORT_OPER_STATUS_UP);
1310 
1311 	if ((link_up  && adapter->link_up) ||
1312 	    (!link_up && !adapter->link_up))
1313 		return;
1314 
1315 	if (link_up) {
1316 		adapter->link_speed =
1317 			rte_le_to_cpu_32(lif->info->status.link_speed);
1318 		IONIC_PRINT(DEBUG, "Link up - %d Gbps",
1319 			adapter->link_speed);
1320 	} else {
1321 		IONIC_PRINT(DEBUG, "Link down");
1322 	}
1323 
1324 	adapter->link_up = link_up;
1325 	ionic_dev_link_update(lif->eth_dev, 0);
1326 }
1327 
1328 static void
1329 ionic_lif_handle_fw_down(struct ionic_lif *lif)
1330 {
1331 	if (lif->state & IONIC_LIF_F_FW_RESET)
1332 		return;
1333 
1334 	lif->state |= IONIC_LIF_F_FW_RESET;
1335 
1336 	if (lif->state & IONIC_LIF_F_UP) {
1337 		IONIC_PRINT(NOTICE,
1338 			"Surprise FW stop, stopping %s\n", lif->name);
1339 		ionic_lif_stop(lif);
1340 	}
1341 
1342 	IONIC_PRINT(NOTICE, "FW down, %s stopped", lif->name);
1343 }
1344 
1345 static bool
1346 ionic_notifyq_cb(struct ionic_cq *cq, uint16_t cq_desc_index, void *cb_arg)
1347 {
1348 	union ionic_notifyq_comp *cq_desc_base = cq->base;
1349 	union ionic_notifyq_comp *cq_desc = &cq_desc_base[cq_desc_index];
1350 	struct ionic_lif *lif = cb_arg;
1351 
1352 	IONIC_PRINT(DEBUG, "Notifyq callback eid = %jd ecode = %d",
1353 		cq_desc->event.eid, cq_desc->event.ecode);
1354 
1355 	/* Have we run out of new completions to process? */
1356 	if (!(cq_desc->event.eid > lif->last_eid))
1357 		return false;
1358 
1359 	lif->last_eid = cq_desc->event.eid;
1360 
1361 	switch (cq_desc->event.ecode) {
1362 	case IONIC_EVENT_LINK_CHANGE:
1363 		IONIC_PRINT(DEBUG,
1364 			"Notifyq IONIC_EVENT_LINK_CHANGE %s "
1365 			"eid=%jd link_status=%d link_speed=%d",
1366 			lif->name,
1367 			cq_desc->event.eid,
1368 			cq_desc->link_change.link_status,
1369 			cq_desc->link_change.link_speed);
1370 
1371 		lif->state |= IONIC_LIF_F_LINK_CHECK_NEEDED;
1372 		break;
1373 
1374 	case IONIC_EVENT_RESET:
1375 		IONIC_PRINT(NOTICE,
1376 			"Notifyq IONIC_EVENT_RESET %s "
1377 			"eid=%jd, reset_code=%d state=%d",
1378 			lif->name,
1379 			cq_desc->event.eid,
1380 			cq_desc->reset.reset_code,
1381 			cq_desc->reset.state);
1382 		ionic_lif_handle_fw_down(lif);
1383 		break;
1384 
1385 	default:
1386 		IONIC_PRINT(WARNING, "Notifyq bad event ecode=%d eid=%jd",
1387 			cq_desc->event.ecode, cq_desc->event.eid);
1388 		break;
1389 	}
1390 
1391 	return true;
1392 }
1393 
1394 int
1395 ionic_notifyq_handler(struct ionic_lif *lif, int budget)
1396 {
1397 	struct ionic_dev *idev = &lif->adapter->idev;
1398 	struct ionic_notify_qcq *nqcq = lif->notifyqcq;
1399 	uint32_t work_done;
1400 
1401 	if (!(nqcq->flags & IONIC_QCQ_F_INITED)) {
1402 		IONIC_PRINT(DEBUG, "Notifyq not yet initialized");
1403 		return -1;
1404 	}
1405 
1406 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1407 		IONIC_INTR_MASK_SET);
1408 
1409 	work_done = ionic_qcq_service(&nqcq->qcq, budget,
1410 				ionic_notifyq_cb, lif);
1411 
1412 	if (lif->state & IONIC_LIF_F_LINK_CHECK_NEEDED)
1413 		ionic_link_status_check(lif);
1414 
1415 	ionic_intr_credits(idev->intr_ctrl, nqcq->intr.index,
1416 		work_done, IONIC_INTR_CRED_RESET_COALESCE);
1417 
1418 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1419 		IONIC_INTR_MASK_CLEAR);
1420 
1421 	return 0;
1422 }
1423 
1424 static int
1425 ionic_lif_adminq_init(struct ionic_lif *lif)
1426 {
1427 	struct ionic_dev *idev = &lif->adapter->idev;
1428 	struct ionic_admin_qcq *aqcq = lif->adminqcq;
1429 	struct ionic_queue *q = &aqcq->qcq.q;
1430 	struct ionic_q_init_comp comp;
1431 	uint32_t retries = 5;
1432 	int err;
1433 
1434 retry_adminq_init:
1435 	ionic_dev_cmd_adminq_init(idev, &aqcq->qcq);
1436 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
1437 	if (err == -EAGAIN && retries > 0) {
1438 		retries--;
1439 		rte_delay_us_block(IONIC_DEVCMD_RETRY_WAIT_US);
1440 		goto retry_adminq_init;
1441 	}
1442 	if (err)
1443 		return err;
1444 
1445 	ionic_dev_cmd_comp(idev, &comp);
1446 
1447 	q->hw_type = comp.hw_type;
1448 	q->hw_index = rte_le_to_cpu_32(comp.hw_index);
1449 	q->db = ionic_db_map(lif, q);
1450 
1451 	IONIC_PRINT(DEBUG, "adminq->hw_type %d", q->hw_type);
1452 	IONIC_PRINT(DEBUG, "adminq->hw_index %d", q->hw_index);
1453 	IONIC_PRINT(DEBUG, "adminq->db %p", q->db);
1454 
1455 	aqcq->flags |= IONIC_QCQ_F_INITED;
1456 
1457 	return 0;
1458 }
1459 
1460 static int
1461 ionic_lif_notifyq_init(struct ionic_lif *lif)
1462 {
1463 	struct ionic_dev *idev = &lif->adapter->idev;
1464 	struct ionic_notify_qcq *nqcq = lif->notifyqcq;
1465 	struct ionic_queue *q = &nqcq->qcq.q;
1466 	uint16_t flags = IONIC_QINIT_F_ENA;
1467 	int err;
1468 
1469 	struct ionic_admin_ctx ctx = {
1470 		.pending_work = true,
1471 		.cmd.q_init = {
1472 			.opcode = IONIC_CMD_Q_INIT,
1473 			.type = q->type,
1474 			.ver = lif->qtype_info[q->type].version,
1475 			.index = rte_cpu_to_le_32(q->index),
1476 			.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1477 			.ring_size = rte_log2_u32(q->num_descs),
1478 			.ring_base = rte_cpu_to_le_64(q->base_pa),
1479 		}
1480 	};
1481 
1482 	/* Only enable an interrupt if the device supports them */
1483 	if (lif->adapter->intf->configure_intr != NULL) {
1484 		flags |= IONIC_QINIT_F_IRQ;
1485 		ctx.cmd.q_init.intr_index = rte_cpu_to_le_16(nqcq->intr.index);
1486 	}
1487 	ctx.cmd.q_init.flags = rte_cpu_to_le_16(flags);
1488 
1489 	IONIC_PRINT(DEBUG, "notifyq_init.index %d", q->index);
1490 	IONIC_PRINT(DEBUG, "notifyq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1491 	IONIC_PRINT(DEBUG, "notifyq_init.ring_size %d",
1492 		ctx.cmd.q_init.ring_size);
1493 	IONIC_PRINT(DEBUG, "notifyq_init.ver %u", ctx.cmd.q_init.ver);
1494 
1495 	err = ionic_adminq_post_wait(lif, &ctx);
1496 	if (err)
1497 		return err;
1498 
1499 	q->hw_type = ctx.comp.q_init.hw_type;
1500 	q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
1501 	q->db = NULL;
1502 
1503 	IONIC_PRINT(DEBUG, "notifyq->hw_type %d", q->hw_type);
1504 	IONIC_PRINT(DEBUG, "notifyq->hw_index %d", q->hw_index);
1505 	IONIC_PRINT(DEBUG, "notifyq->db %p", q->db);
1506 
1507 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1508 		IONIC_INTR_MASK_CLEAR);
1509 
1510 	nqcq->flags |= IONIC_QCQ_F_INITED;
1511 
1512 	return 0;
1513 }
1514 
1515 int
1516 ionic_lif_set_features(struct ionic_lif *lif)
1517 {
1518 	struct ionic_admin_ctx ctx = {
1519 		.pending_work = true,
1520 		.cmd.lif_setattr = {
1521 			.opcode = IONIC_CMD_LIF_SETATTR,
1522 			.attr = IONIC_LIF_ATTR_FEATURES,
1523 			.features = rte_cpu_to_le_64(lif->features),
1524 		},
1525 	};
1526 	int err;
1527 
1528 	err = ionic_adminq_post_wait(lif, &ctx);
1529 	if (err)
1530 		return err;
1531 
1532 	lif->hw_features = rte_le_to_cpu_64(ctx.cmd.lif_setattr.features &
1533 						ctx.comp.lif_setattr.features);
1534 
1535 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1536 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_TX_TAG");
1537 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1538 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_STRIP");
1539 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1540 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_FILTER");
1541 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1542 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_HASH");
1543 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1544 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_SG");
1545 	if (lif->hw_features & IONIC_ETH_HW_RX_SG)
1546 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_SG");
1547 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1548 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_CSUM");
1549 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1550 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_CSUM");
1551 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1552 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO");
1553 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1554 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPV6");
1555 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1556 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_ECN");
1557 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1558 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE");
1559 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1560 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE_CSUM");
1561 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1562 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP4");
1563 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1564 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP6");
1565 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1566 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP");
1567 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1568 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP_CSUM");
1569 
1570 	return 0;
1571 }
1572 
1573 int
1574 ionic_lif_txq_init(struct ionic_tx_qcq *txq)
1575 {
1576 	struct ionic_qcq *qcq = &txq->qcq;
1577 	struct ionic_queue *q = &qcq->q;
1578 	struct ionic_lif *lif = qcq->lif;
1579 	struct ionic_cq *cq = &qcq->cq;
1580 	struct ionic_admin_ctx ctx = {
1581 		.pending_work = true,
1582 		.cmd.q_init = {
1583 			.opcode = IONIC_CMD_Q_INIT,
1584 			.type = q->type,
1585 			.ver = lif->qtype_info[q->type].version,
1586 			.index = rte_cpu_to_le_32(q->index),
1587 			.flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA),
1588 			.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1589 			.ring_size = rte_log2_u32(q->num_descs),
1590 			.cq_ring_base = rte_cpu_to_le_64(cq->base_pa),
1591 			.sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa),
1592 		},
1593 	};
1594 	int err;
1595 
1596 	if (txq->flags & IONIC_QCQ_F_SG)
1597 		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG);
1598 	if (txq->flags & IONIC_QCQ_F_CMB) {
1599 		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);
1600 		ctx.cmd.q_init.ring_base = rte_cpu_to_le_64(q->cmb_base_pa);
1601 	} else {
1602 		ctx.cmd.q_init.ring_base = rte_cpu_to_le_64(q->base_pa);
1603 	}
1604 
1605 	IONIC_PRINT(DEBUG, "txq_init.index %d", q->index);
1606 	IONIC_PRINT(DEBUG, "txq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1607 	IONIC_PRINT(DEBUG, "txq_init.ring_size %d",
1608 		ctx.cmd.q_init.ring_size);
1609 	IONIC_PRINT(DEBUG, "txq_init.ver %u", ctx.cmd.q_init.ver);
1610 
1611 	ionic_q_reset(q);
1612 	ionic_cq_reset(cq);
1613 
1614 	err = ionic_adminq_post_wait(lif, &ctx);
1615 	if (err)
1616 		return err;
1617 
1618 	q->hw_type = ctx.comp.q_init.hw_type;
1619 	q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
1620 	q->db = ionic_db_map(lif, q);
1621 
1622 	IONIC_PRINT(DEBUG, "txq->hw_type %d", q->hw_type);
1623 	IONIC_PRINT(DEBUG, "txq->hw_index %d", q->hw_index);
1624 	IONIC_PRINT(DEBUG, "txq->db %p", q->db);
1625 
1626 	txq->flags |= IONIC_QCQ_F_INITED;
1627 
1628 	return 0;
1629 }
1630 
1631 int
1632 ionic_lif_rxq_init(struct ionic_rx_qcq *rxq)
1633 {
1634 	struct ionic_qcq *qcq = &rxq->qcq;
1635 	struct ionic_queue *q = &qcq->q;
1636 	struct ionic_lif *lif = qcq->lif;
1637 	struct ionic_cq *cq = &qcq->cq;
1638 	struct ionic_admin_ctx ctx = {
1639 		.pending_work = true,
1640 		.cmd.q_init = {
1641 			.opcode = IONIC_CMD_Q_INIT,
1642 			.type = q->type,
1643 			.ver = lif->qtype_info[q->type].version,
1644 			.index = rte_cpu_to_le_32(q->index),
1645 			.flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA),
1646 			.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1647 			.ring_size = rte_log2_u32(q->num_descs),
1648 			.cq_ring_base = rte_cpu_to_le_64(cq->base_pa),
1649 			.sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa),
1650 		},
1651 	};
1652 	int err;
1653 
1654 	if (rxq->flags & IONIC_QCQ_F_SG)
1655 		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG);
1656 	if (rxq->flags & IONIC_QCQ_F_CMB) {
1657 		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);
1658 		ctx.cmd.q_init.ring_base = rte_cpu_to_le_64(q->cmb_base_pa);
1659 	} else {
1660 		ctx.cmd.q_init.ring_base = rte_cpu_to_le_64(q->base_pa);
1661 	}
1662 
1663 	IONIC_PRINT(DEBUG, "rxq_init.index %d", q->index);
1664 	IONIC_PRINT(DEBUG, "rxq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1665 	IONIC_PRINT(DEBUG, "rxq_init.ring_size %d",
1666 		ctx.cmd.q_init.ring_size);
1667 	IONIC_PRINT(DEBUG, "rxq_init.ver %u", ctx.cmd.q_init.ver);
1668 
1669 	ionic_q_reset(q);
1670 	ionic_cq_reset(cq);
1671 
1672 	err = ionic_adminq_post_wait(lif, &ctx);
1673 	if (err)
1674 		return err;
1675 
1676 	q->hw_type = ctx.comp.q_init.hw_type;
1677 	q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
1678 	q->db = ionic_db_map(lif, q);
1679 
1680 	rxq->flags |= IONIC_QCQ_F_INITED;
1681 
1682 	IONIC_PRINT(DEBUG, "rxq->hw_type %d", q->hw_type);
1683 	IONIC_PRINT(DEBUG, "rxq->hw_index %d", q->hw_index);
1684 	IONIC_PRINT(DEBUG, "rxq->db %p", q->db);
1685 
1686 	return 0;
1687 }
1688 
1689 static int
1690 ionic_station_set(struct ionic_lif *lif)
1691 {
1692 	struct ionic_admin_ctx ctx = {
1693 		.pending_work = true,
1694 		.cmd.lif_getattr = {
1695 			.opcode = IONIC_CMD_LIF_GETATTR,
1696 			.attr = IONIC_LIF_ATTR_MAC,
1697 		},
1698 	};
1699 	int err;
1700 
1701 	IONIC_PRINT_CALL();
1702 
1703 	err = ionic_adminq_post_wait(lif, &ctx);
1704 	if (err)
1705 		return err;
1706 
1707 	memcpy(lif->mac_addr, ctx.comp.lif_getattr.mac, RTE_ETHER_ADDR_LEN);
1708 
1709 	return 0;
1710 }
1711 
1712 static void
1713 ionic_lif_set_name(struct ionic_lif *lif)
1714 {
1715 	struct ionic_admin_ctx ctx = {
1716 		.pending_work = true,
1717 		.cmd.lif_setattr = {
1718 			.opcode = IONIC_CMD_LIF_SETATTR,
1719 			.attr = IONIC_LIF_ATTR_NAME,
1720 		},
1721 	};
1722 
1723 	memcpy(ctx.cmd.lif_setattr.name, lif->name,
1724 		sizeof(ctx.cmd.lif_setattr.name) - 1);
1725 
1726 	ionic_adminq_post_wait(lif, &ctx);
1727 }
1728 
1729 int
1730 ionic_lif_init(struct ionic_lif *lif)
1731 {
1732 	struct ionic_dev *idev = &lif->adapter->idev;
1733 	struct ionic_lif_init_comp comp;
1734 	uint32_t retries = 5;
1735 	int err;
1736 
1737 	memset(&lif->stats_base, 0, sizeof(lif->stats_base));
1738 
1739 retry_lif_init:
1740 	ionic_dev_cmd_lif_init(idev, lif->info_pa);
1741 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
1742 	if (err == -EAGAIN && retries > 0) {
1743 		retries--;
1744 		rte_delay_us_block(IONIC_DEVCMD_RETRY_WAIT_US);
1745 		goto retry_lif_init;
1746 	}
1747 	if (err)
1748 		return err;
1749 
1750 	ionic_dev_cmd_comp(idev, &comp);
1751 
1752 	lif->hw_index = rte_cpu_to_le_16(comp.hw_index);
1753 
1754 	err = ionic_lif_adminq_init(lif);
1755 	if (err)
1756 		return err;
1757 
1758 	err = ionic_lif_notifyq_init(lif);
1759 	if (err)
1760 		goto err_out_adminq_deinit;
1761 
1762 	/*
1763 	 * Configure initial feature set
1764 	 * This will be updated later by the dev_configure() step
1765 	 */
1766 	lif->features = IONIC_ETH_HW_RX_HASH | IONIC_ETH_HW_VLAN_RX_FILTER;
1767 
1768 	err = ionic_lif_set_features(lif);
1769 	if (err)
1770 		goto err_out_notifyq_deinit;
1771 
1772 	err = ionic_rx_filters_init(lif);
1773 	if (err)
1774 		goto err_out_notifyq_deinit;
1775 
1776 	err = ionic_station_set(lif);
1777 	if (err)
1778 		goto err_out_rx_filter_deinit;
1779 
1780 	ionic_lif_set_name(lif);
1781 
1782 	lif->state |= IONIC_LIF_F_INITED;
1783 
1784 	return 0;
1785 
1786 err_out_rx_filter_deinit:
1787 	ionic_rx_filters_deinit(lif);
1788 
1789 err_out_notifyq_deinit:
1790 	ionic_lif_notifyq_deinit(lif);
1791 
1792 err_out_adminq_deinit:
1793 	ionic_lif_adminq_deinit(lif);
1794 
1795 	return err;
1796 }
1797 
1798 void
1799 ionic_lif_deinit(struct ionic_lif *lif)
1800 {
1801 	if (!(lif->state & IONIC_LIF_F_INITED))
1802 		return;
1803 
1804 	ionic_rx_filters_deinit(lif);
1805 	ionic_lif_rss_teardown(lif);
1806 	ionic_lif_notifyq_deinit(lif);
1807 	ionic_lif_adminq_deinit(lif);
1808 
1809 	lif->state &= ~IONIC_LIF_F_INITED;
1810 }
1811 
1812 void
1813 ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask)
1814 {
1815 	struct rte_eth_dev *eth_dev = lif->eth_dev;
1816 	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1817 
1818 	/*
1819 	 * IONIC_ETH_HW_VLAN_RX_FILTER cannot be turned off, so
1820 	 * set RTE_ETH_RX_OFFLOAD_VLAN_FILTER and ignore RTE_ETH_VLAN_FILTER_MASK
1821 	 */
1822 	rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
1823 
1824 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1825 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1826 			lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP;
1827 		else
1828 			lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP;
1829 	}
1830 }
1831 
1832 void
1833 ionic_lif_configure_rx_sg_offload(struct ionic_lif *lif)
1834 {
1835 	struct rte_eth_rxmode *rxmode = &lif->eth_dev->data->dev_conf.rxmode;
1836 
1837 	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
1838 		lif->features |= IONIC_ETH_HW_RX_SG;
1839 		lif->eth_dev->data->scattered_rx = 1;
1840 	} else {
1841 		lif->features &= ~IONIC_ETH_HW_RX_SG;
1842 		lif->eth_dev->data->scattered_rx = 0;
1843 	}
1844 }
1845 
1846 void
1847 ionic_lif_configure(struct ionic_lif *lif)
1848 {
1849 	struct rte_eth_rxmode *rxmode = &lif->eth_dev->data->dev_conf.rxmode;
1850 	struct rte_eth_txmode *txmode = &lif->eth_dev->data->dev_conf.txmode;
1851 	struct ionic_identity *ident = &lif->adapter->ident;
1852 	union ionic_lif_config *cfg = &ident->lif.eth.config;
1853 	uint32_t ntxqs_per_lif =
1854 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]);
1855 	uint32_t nrxqs_per_lif =
1856 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]);
1857 	uint32_t nrxqs = lif->eth_dev->data->nb_rx_queues;
1858 	uint32_t ntxqs = lif->eth_dev->data->nb_tx_queues;
1859 
1860 	lif->port_id = lif->eth_dev->data->port_id;
1861 
1862 	IONIC_PRINT(DEBUG, "Configuring LIF on port %u",
1863 		lif->port_id);
1864 
1865 	if (nrxqs > 0)
1866 		nrxqs_per_lif = RTE_MIN(nrxqs_per_lif, nrxqs);
1867 
1868 	if (ntxqs > 0)
1869 		ntxqs_per_lif = RTE_MIN(ntxqs_per_lif, ntxqs);
1870 
1871 	lif->nrxqcqs = nrxqs_per_lif;
1872 	lif->ntxqcqs = ntxqs_per_lif;
1873 
1874 	/* Update the LIF configuration based on the eth_dev */
1875 
1876 	/*
1877 	 * NB: While it is true that RSS_HASH is always enabled on ionic,
1878 	 *     setting this flag unconditionally causes problems in DTS.
1879 	 * rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1880 	 */
1881 
1882 	/* RX per-port */
1883 
1884 	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM ||
1885 	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM ||
1886 	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1887 		lif->features |= IONIC_ETH_HW_RX_CSUM;
1888 	else
1889 		lif->features &= ~IONIC_ETH_HW_RX_CSUM;
1890 
1891 	/*
1892 	 * NB: RX_SG may be enabled later during rx_queue_setup() if
1893 	 * required by the mbuf/mtu configuration
1894 	 */
1895 	ionic_lif_configure_rx_sg_offload(lif);
1896 
1897 	/* Covers VLAN_STRIP */
1898 	ionic_lif_configure_vlan_offload(lif, RTE_ETH_VLAN_STRIP_MASK);
1899 
1900 	/* TX per-port */
1901 
1902 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
1903 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
1904 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
1905 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
1906 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
1907 		lif->features |= IONIC_ETH_HW_TX_CSUM;
1908 	else
1909 		lif->features &= ~IONIC_ETH_HW_TX_CSUM;
1910 
1911 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
1912 		lif->features |= IONIC_ETH_HW_VLAN_TX_TAG;
1913 	else
1914 		lif->features &= ~IONIC_ETH_HW_VLAN_TX_TAG;
1915 
1916 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
1917 		lif->features |= IONIC_ETH_HW_TX_SG;
1918 	else
1919 		lif->features &= ~IONIC_ETH_HW_TX_SG;
1920 
1921 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
1922 		lif->features |= IONIC_ETH_HW_TSO;
1923 		lif->features |= IONIC_ETH_HW_TSO_IPV6;
1924 		lif->features |= IONIC_ETH_HW_TSO_ECN;
1925 	} else {
1926 		lif->features &= ~IONIC_ETH_HW_TSO;
1927 		lif->features &= ~IONIC_ETH_HW_TSO_IPV6;
1928 		lif->features &= ~IONIC_ETH_HW_TSO_ECN;
1929 	}
1930 }
1931 
1932 int
1933 ionic_lif_start(struct ionic_lif *lif)
1934 {
1935 	uint32_t rx_mode;
1936 	uint32_t i;
1937 	int err;
1938 
1939 	err = ionic_lif_rss_setup(lif);
1940 	if (err)
1941 		return err;
1942 
1943 	if (!lif->rx_mode) {
1944 		IONIC_PRINT(DEBUG, "Setting RX mode on %s",
1945 			lif->name);
1946 
1947 		rx_mode  = IONIC_RX_MODE_F_UNICAST;
1948 		rx_mode |= IONIC_RX_MODE_F_MULTICAST;
1949 		rx_mode |= IONIC_RX_MODE_F_BROADCAST;
1950 
1951 		ionic_set_rx_mode(lif, rx_mode);
1952 	}
1953 
1954 	IONIC_PRINT(DEBUG, "Starting %u RX queues and %u TX queues "
1955 		"on port %u",
1956 		lif->nrxqcqs, lif->ntxqcqs, lif->port_id);
1957 
1958 	for (i = 0; i < lif->nrxqcqs; i++) {
1959 		struct ionic_rx_qcq *rxq = lif->rxqcqs[i];
1960 		if (!(rxq->flags & IONIC_QCQ_F_DEFERRED)) {
1961 			err = ionic_dev_rx_queue_start(lif->eth_dev, i);
1962 
1963 			if (err)
1964 				return err;
1965 		}
1966 	}
1967 
1968 	for (i = 0; i < lif->ntxqcqs; i++) {
1969 		struct ionic_tx_qcq *txq = lif->txqcqs[i];
1970 		if (!(txq->flags & IONIC_QCQ_F_DEFERRED)) {
1971 			err = ionic_dev_tx_queue_start(lif->eth_dev, i);
1972 
1973 			if (err)
1974 				return err;
1975 		}
1976 	}
1977 
1978 	/* Carrier ON here */
1979 	lif->state |= IONIC_LIF_F_UP;
1980 
1981 	ionic_link_status_check(lif);
1982 
1983 	return 0;
1984 }
1985 
1986 int
1987 ionic_lif_identify(struct ionic_adapter *adapter)
1988 {
1989 	struct ionic_dev *idev = &adapter->idev;
1990 	struct ionic_identity *ident = &adapter->ident;
1991 	union ionic_lif_config *cfg = &ident->lif.eth.config;
1992 	uint32_t lif_words = RTE_DIM(ident->lif.words);
1993 	uint32_t cmd_words = RTE_DIM(idev->dev_cmd->data);
1994 	uint32_t i, nwords;
1995 	int err;
1996 
1997 	ionic_dev_cmd_lif_identify(idev, IONIC_LIF_TYPE_CLASSIC,
1998 		IONIC_IDENTITY_VERSION_1);
1999 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
2000 	if (err)
2001 		return (err);
2002 
2003 	nwords = RTE_MIN(lif_words, cmd_words);
2004 	for (i = 0; i < nwords; i++)
2005 		ident->lif.words[i] = ioread32(&idev->dev_cmd->data[i]);
2006 
2007 	IONIC_PRINT(INFO, "capabilities 0x%" PRIx64 " ",
2008 		rte_le_to_cpu_64(ident->lif.capabilities));
2009 
2010 	IONIC_PRINT(INFO, "eth.max_ucast_filters 0x%" PRIx32 " ",
2011 		rte_le_to_cpu_32(ident->lif.eth.max_ucast_filters));
2012 	IONIC_PRINT(INFO, "eth.max_mcast_filters 0x%" PRIx32 " ",
2013 		rte_le_to_cpu_32(ident->lif.eth.max_mcast_filters));
2014 
2015 	IONIC_PRINT(INFO, "eth.features 0x%" PRIx64 " ",
2016 		rte_le_to_cpu_64(cfg->features));
2017 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_ADMINQ] 0x%" PRIx32 " ",
2018 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_ADMINQ]));
2019 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] 0x%" PRIx32 " ",
2020 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_NOTIFYQ]));
2021 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_RXQ] 0x%" PRIx32 " ",
2022 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]));
2023 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_TXQ] 0x%" PRIx32 " ",
2024 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]));
2025 
2026 	return 0;
2027 }
2028 
2029 int
2030 ionic_lifs_size(struct ionic_adapter *adapter)
2031 {
2032 	struct ionic_identity *ident = &adapter->ident;
2033 	union ionic_lif_config *cfg = &ident->lif.eth.config;
2034 	uint32_t nintrs, dev_nintrs = rte_le_to_cpu_32(ident->dev.nintrs);
2035 
2036 	adapter->max_ntxqs_per_lif =
2037 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]);
2038 	adapter->max_nrxqs_per_lif =
2039 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]);
2040 
2041 	nintrs = 1 /* notifyq */;
2042 
2043 	if (nintrs > dev_nintrs) {
2044 		IONIC_PRINT(ERR,
2045 			"At most %d intr supported, minimum req'd is %u",
2046 			dev_nintrs, nintrs);
2047 		return -ENOSPC;
2048 	}
2049 
2050 	adapter->nintrs = nintrs;
2051 
2052 	return 0;
2053 }
2054