xref: /dpdk/drivers/net/ionic/ionic_lif.c (revision 1a9afd1f45070c9cf31dc0eae9c9de83c40369f3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2022 Advanced Micro Devices, Inc.
3  */
4 
5 #include <rte_malloc.h>
6 #include <ethdev_driver.h>
7 
8 #include "ionic.h"
9 #include "ionic_logs.h"
10 #include "ionic_lif.h"
11 #include "ionic_ethdev.h"
12 #include "ionic_rx_filter.h"
13 #include "ionic_rxtx.h"
14 
15 /* queuetype support level */
16 static const uint8_t ionic_qtype_vers[IONIC_QTYPE_MAX] = {
17 	[IONIC_QTYPE_ADMINQ]  = 0,   /* 0 = Base version with CQ support */
18 	[IONIC_QTYPE_NOTIFYQ] = 0,   /* 0 = Base version */
19 	[IONIC_QTYPE_RXQ]     = 2,   /* 0 = Base version with CQ+SG support
20 				      * 1 =       ... with EQ
21 				      * 2 =       ... with CMB
22 				      */
23 	[IONIC_QTYPE_TXQ]     = 3,   /* 0 = Base version with CQ+SG support
24 				      * 1 =   ... with Tx SG version 1
25 				      * 2 =       ... with EQ
26 				      * 3 =       ... with CMB
27 				      */
28 };
29 
30 static int ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr);
31 static int ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr);
32 
33 static int
34 ionic_qcq_disable(struct ionic_qcq *qcq)
35 {
36 	struct ionic_queue *q = &qcq->q;
37 	struct ionic_lif *lif = qcq->lif;
38 	struct ionic_admin_ctx ctx = {
39 		.pending_work = true,
40 		.cmd.q_control = {
41 			.opcode = IONIC_CMD_Q_CONTROL,
42 			.type = q->type,
43 			.index = rte_cpu_to_le_32(q->index),
44 			.oper = IONIC_Q_DISABLE,
45 		},
46 	};
47 
48 	return ionic_adminq_post_wait(lif, &ctx);
49 }
50 
51 void
52 ionic_lif_stop(struct ionic_lif *lif)
53 {
54 	uint32_t i;
55 
56 	IONIC_PRINT_CALL();
57 
58 	lif->state &= ~IONIC_LIF_F_UP;
59 
60 	for (i = 0; i < lif->nrxqcqs; i++) {
61 		struct ionic_rx_qcq *rxq = lif->rxqcqs[i];
62 		if (rxq->flags & IONIC_QCQ_F_INITED)
63 			(void)ionic_dev_rx_queue_stop(lif->eth_dev, i);
64 	}
65 
66 	for (i = 0; i < lif->ntxqcqs; i++) {
67 		struct ionic_tx_qcq *txq = lif->txqcqs[i];
68 		if (txq->flags & IONIC_QCQ_F_INITED)
69 			(void)ionic_dev_tx_queue_stop(lif->eth_dev, i);
70 	}
71 }
72 
73 void
74 ionic_lif_reset(struct ionic_lif *lif)
75 {
76 	struct ionic_dev *idev = &lif->adapter->idev;
77 	int err;
78 
79 	IONIC_PRINT_CALL();
80 
81 	ionic_dev_cmd_lif_reset(idev);
82 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
83 	if (err)
84 		IONIC_PRINT(WARNING, "Failed to reset %s", lif->name);
85 }
86 
87 static void
88 ionic_lif_get_abs_stats(const struct ionic_lif *lif, struct rte_eth_stats *stats)
89 {
90 	struct ionic_lif_stats *ls = &lif->info->stats;
91 	uint32_t i;
92 	uint32_t num_rx_q_counters = RTE_MIN(lif->nrxqcqs, (uint32_t)
93 			RTE_ETHDEV_QUEUE_STAT_CNTRS);
94 	uint32_t num_tx_q_counters = RTE_MIN(lif->ntxqcqs, (uint32_t)
95 			RTE_ETHDEV_QUEUE_STAT_CNTRS);
96 
97 	memset(stats, 0, sizeof(*stats));
98 
99 	if (ls == NULL) {
100 		IONIC_PRINT(DEBUG, "Stats on port %u not yet initialized",
101 			lif->port_id);
102 		return;
103 	}
104 
105 	/* RX */
106 
107 	stats->ipackets = ls->rx_ucast_packets +
108 		ls->rx_mcast_packets +
109 		ls->rx_bcast_packets;
110 
111 	stats->ibytes = ls->rx_ucast_bytes +
112 		ls->rx_mcast_bytes +
113 		ls->rx_bcast_bytes;
114 
115 	for (i = 0; i < lif->nrxqcqs; i++) {
116 		struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats;
117 		stats->ierrors +=
118 			rx_stats->bad_cq_status +
119 			rx_stats->bad_len;
120 	}
121 
122 	stats->imissed +=
123 		ls->rx_ucast_drop_packets +
124 		ls->rx_mcast_drop_packets +
125 		ls->rx_bcast_drop_packets;
126 
127 	stats->ierrors +=
128 		ls->rx_dma_error +
129 		ls->rx_desc_fetch_error +
130 		ls->rx_desc_data_error;
131 
132 	for (i = 0; i < num_rx_q_counters; i++) {
133 		struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats;
134 		stats->q_ipackets[i] = rx_stats->packets;
135 		stats->q_ibytes[i] = rx_stats->bytes;
136 		stats->q_errors[i] =
137 			rx_stats->bad_cq_status +
138 			rx_stats->bad_len;
139 	}
140 
141 	/* TX */
142 
143 	stats->opackets = ls->tx_ucast_packets +
144 		ls->tx_mcast_packets +
145 		ls->tx_bcast_packets;
146 
147 	stats->obytes = ls->tx_ucast_bytes +
148 		ls->tx_mcast_bytes +
149 		ls->tx_bcast_bytes;
150 
151 	for (i = 0; i < lif->ntxqcqs; i++) {
152 		struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats;
153 		stats->oerrors += tx_stats->drop;
154 	}
155 
156 	stats->oerrors +=
157 		ls->tx_ucast_drop_packets +
158 		ls->tx_mcast_drop_packets +
159 		ls->tx_bcast_drop_packets;
160 
161 	stats->oerrors +=
162 		ls->tx_dma_error +
163 		ls->tx_queue_disabled +
164 		ls->tx_desc_fetch_error +
165 		ls->tx_desc_data_error;
166 
167 	for (i = 0; i < num_tx_q_counters; i++) {
168 		struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats;
169 		stats->q_opackets[i] = tx_stats->packets;
170 		stats->q_obytes[i] = tx_stats->bytes;
171 	}
172 }
173 
174 void
175 ionic_lif_get_stats(const struct ionic_lif *lif,
176 		struct rte_eth_stats *stats)
177 {
178 	ionic_lif_get_abs_stats(lif, stats);
179 
180 	stats->ipackets  -= lif->stats_base.ipackets;
181 	stats->opackets  -= lif->stats_base.opackets;
182 	stats->ibytes    -= lif->stats_base.ibytes;
183 	stats->obytes    -= lif->stats_base.obytes;
184 	stats->imissed   -= lif->stats_base.imissed;
185 	stats->ierrors   -= lif->stats_base.ierrors;
186 	stats->oerrors   -= lif->stats_base.oerrors;
187 	stats->rx_nombuf -= lif->stats_base.rx_nombuf;
188 }
189 
190 void
191 ionic_lif_reset_stats(struct ionic_lif *lif)
192 {
193 	uint32_t i;
194 
195 	for (i = 0; i < lif->nrxqcqs; i++) {
196 		memset(&lif->rxqcqs[i]->stats, 0,
197 			sizeof(struct ionic_rx_stats));
198 		memset(&lif->txqcqs[i]->stats, 0,
199 			sizeof(struct ionic_tx_stats));
200 	}
201 
202 	ionic_lif_get_abs_stats(lif, &lif->stats_base);
203 }
204 
205 void
206 ionic_lif_get_hw_stats(struct ionic_lif *lif, struct ionic_lif_stats *stats)
207 {
208 	uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t);
209 	uint64_t *stats64 = (uint64_t *)stats;
210 	uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats;
211 	uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base;
212 
213 	for (i = 0; i < count; i++)
214 		stats64[i] = lif_stats64[i] - lif_stats64_base[i];
215 }
216 
217 void
218 ionic_lif_reset_hw_stats(struct ionic_lif *lif)
219 {
220 	uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t);
221 	uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats;
222 	uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base;
223 
224 	for (i = 0; i < count; i++)
225 		lif_stats64_base[i] = lif_stats64[i];
226 }
227 
228 static int
229 ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr)
230 {
231 	struct ionic_admin_ctx ctx = {
232 		.pending_work = true,
233 		.cmd.rx_filter_add = {
234 			.opcode = IONIC_CMD_RX_FILTER_ADD,
235 			.match = rte_cpu_to_le_16(IONIC_RX_FILTER_MATCH_MAC),
236 		},
237 	};
238 	int err;
239 
240 	memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, RTE_ETHER_ADDR_LEN);
241 
242 	err = ionic_adminq_post_wait(lif, &ctx);
243 	if (err)
244 		return err;
245 
246 	IONIC_PRINT(INFO, "rx_filter add (id %d)",
247 		rte_le_to_cpu_32(ctx.comp.rx_filter_add.filter_id));
248 
249 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx);
250 }
251 
252 static int
253 ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr)
254 {
255 	struct ionic_admin_ctx ctx = {
256 		.pending_work = true,
257 		.cmd.rx_filter_del = {
258 			.opcode = IONIC_CMD_RX_FILTER_DEL,
259 		},
260 	};
261 	struct ionic_rx_filter *f;
262 	int err;
263 
264 	IONIC_PRINT_CALL();
265 
266 	rte_spinlock_lock(&lif->rx_filters.lock);
267 
268 	f = ionic_rx_filter_by_addr(lif, addr);
269 	if (!f) {
270 		rte_spinlock_unlock(&lif->rx_filters.lock);
271 		return -ENOENT;
272 	}
273 
274 	ctx.cmd.rx_filter_del.filter_id = rte_cpu_to_le_32(f->filter_id);
275 	ionic_rx_filter_free(f);
276 
277 	rte_spinlock_unlock(&lif->rx_filters.lock);
278 
279 	err = ionic_adminq_post_wait(lif, &ctx);
280 	if (err)
281 		return err;
282 
283 	IONIC_PRINT(INFO, "rx_filter del (id %d)",
284 		rte_le_to_cpu_32(ctx.cmd.rx_filter_del.filter_id));
285 
286 	return 0;
287 }
288 
289 int
290 ionic_dev_add_mac(struct rte_eth_dev *eth_dev,
291 		struct rte_ether_addr *mac_addr,
292 		uint32_t index __rte_unused, uint32_t pool __rte_unused)
293 {
294 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
295 
296 	IONIC_PRINT_CALL();
297 
298 	return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr);
299 }
300 
301 void
302 ionic_dev_remove_mac(struct rte_eth_dev *eth_dev, uint32_t index)
303 {
304 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
305 	struct ionic_adapter *adapter = lif->adapter;
306 	struct rte_ether_addr *mac_addr;
307 
308 	IONIC_PRINT_CALL();
309 
310 	if (index >= adapter->max_mac_addrs) {
311 		IONIC_PRINT(WARNING,
312 			"Index %u is above MAC filter limit %u",
313 			index, adapter->max_mac_addrs);
314 		return;
315 	}
316 
317 	mac_addr = &eth_dev->data->mac_addrs[index];
318 
319 	if (!rte_is_valid_assigned_ether_addr(mac_addr))
320 		return;
321 
322 	ionic_lif_addr_del(lif, (const uint8_t *)mac_addr);
323 }
324 
325 int
326 ionic_dev_set_mac(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr)
327 {
328 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
329 
330 	IONIC_PRINT_CALL();
331 
332 	if (mac_addr == NULL) {
333 		IONIC_PRINT(NOTICE, "New mac is null");
334 		return -1;
335 	}
336 
337 	if (!rte_is_zero_ether_addr((struct rte_ether_addr *)lif->mac_addr)) {
338 		IONIC_PRINT(INFO, "Deleting mac addr %pM",
339 			lif->mac_addr);
340 		ionic_lif_addr_del(lif, lif->mac_addr);
341 		memset(lif->mac_addr, 0, RTE_ETHER_ADDR_LEN);
342 	}
343 
344 	IONIC_PRINT(INFO, "Updating mac addr");
345 
346 	rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)lif->mac_addr);
347 
348 	return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr);
349 }
350 
351 static int
352 ionic_vlan_rx_add_vid(struct ionic_lif *lif, uint16_t vid)
353 {
354 	struct ionic_admin_ctx ctx = {
355 		.pending_work = true,
356 		.cmd.rx_filter_add = {
357 			.opcode = IONIC_CMD_RX_FILTER_ADD,
358 			.match = rte_cpu_to_le_16(IONIC_RX_FILTER_MATCH_VLAN),
359 			.vlan.vlan = rte_cpu_to_le_16(vid),
360 		},
361 	};
362 	int err;
363 
364 	err = ionic_adminq_post_wait(lif, &ctx);
365 	if (err)
366 		return err;
367 
368 	IONIC_PRINT(INFO, "rx_filter add VLAN %d (id %d)", vid,
369 		rte_le_to_cpu_32(ctx.comp.rx_filter_add.filter_id));
370 
371 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx);
372 }
373 
374 static int
375 ionic_vlan_rx_kill_vid(struct ionic_lif *lif, uint16_t vid)
376 {
377 	struct ionic_admin_ctx ctx = {
378 		.pending_work = true,
379 		.cmd.rx_filter_del = {
380 			.opcode = IONIC_CMD_RX_FILTER_DEL,
381 		},
382 	};
383 	struct ionic_rx_filter *f;
384 	int err;
385 
386 	IONIC_PRINT_CALL();
387 
388 	rte_spinlock_lock(&lif->rx_filters.lock);
389 
390 	f = ionic_rx_filter_by_vlan(lif, vid);
391 	if (!f) {
392 		rte_spinlock_unlock(&lif->rx_filters.lock);
393 		return -ENOENT;
394 	}
395 
396 	ctx.cmd.rx_filter_del.filter_id = rte_cpu_to_le_32(f->filter_id);
397 	ionic_rx_filter_free(f);
398 	rte_spinlock_unlock(&lif->rx_filters.lock);
399 
400 	err = ionic_adminq_post_wait(lif, &ctx);
401 	if (err)
402 		return err;
403 
404 	IONIC_PRINT(INFO, "rx_filter del VLAN %d (id %d)", vid,
405 		rte_le_to_cpu_32(ctx.cmd.rx_filter_del.filter_id));
406 
407 	return 0;
408 }
409 
410 int
411 ionic_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id,
412 		int on)
413 {
414 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
415 	int err;
416 
417 	if (on)
418 		err = ionic_vlan_rx_add_vid(lif, vlan_id);
419 	else
420 		err = ionic_vlan_rx_kill_vid(lif, vlan_id);
421 
422 	return err;
423 }
424 
425 static void
426 ionic_lif_rx_mode(struct ionic_lif *lif, uint32_t rx_mode)
427 {
428 	struct ionic_admin_ctx ctx = {
429 		.pending_work = true,
430 		.cmd.rx_mode_set = {
431 			.opcode = IONIC_CMD_RX_MODE_SET,
432 			.rx_mode = rte_cpu_to_le_16(rx_mode),
433 		},
434 	};
435 	int err;
436 
437 	if (rx_mode & IONIC_RX_MODE_F_UNICAST)
438 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_UNICAST");
439 	if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
440 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_MULTICAST");
441 	if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
442 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_BROADCAST");
443 	if (rx_mode & IONIC_RX_MODE_F_PROMISC)
444 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_PROMISC");
445 	if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
446 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_ALLMULTI");
447 
448 	err = ionic_adminq_post_wait(lif, &ctx);
449 	if (err)
450 		IONIC_PRINT(ERR, "Failure setting RX mode");
451 }
452 
453 static void
454 ionic_set_rx_mode(struct ionic_lif *lif, uint32_t rx_mode)
455 {
456 	if (lif->rx_mode != rx_mode) {
457 		lif->rx_mode = rx_mode;
458 		ionic_lif_rx_mode(lif, rx_mode);
459 	}
460 }
461 
462 int
463 ionic_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
464 {
465 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
466 	uint32_t rx_mode = lif->rx_mode;
467 
468 	IONIC_PRINT_CALL();
469 
470 	rx_mode |= IONIC_RX_MODE_F_PROMISC;
471 
472 	ionic_set_rx_mode(lif, rx_mode);
473 
474 	return 0;
475 }
476 
477 int
478 ionic_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
479 {
480 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
481 	uint32_t rx_mode = lif->rx_mode;
482 
483 	rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
484 
485 	ionic_set_rx_mode(lif, rx_mode);
486 
487 	return 0;
488 }
489 
490 int
491 ionic_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
492 {
493 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
494 	uint32_t rx_mode = lif->rx_mode;
495 
496 	rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
497 
498 	ionic_set_rx_mode(lif, rx_mode);
499 
500 	return 0;
501 }
502 
503 int
504 ionic_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
505 {
506 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
507 	uint32_t rx_mode = lif->rx_mode;
508 
509 	rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
510 
511 	ionic_set_rx_mode(lif, rx_mode);
512 
513 	return 0;
514 }
515 
516 int
517 ionic_lif_change_mtu(struct ionic_lif *lif, uint32_t new_mtu)
518 {
519 	struct ionic_admin_ctx ctx = {
520 		.pending_work = true,
521 		.cmd.lif_setattr = {
522 			.opcode = IONIC_CMD_LIF_SETATTR,
523 			.attr = IONIC_LIF_ATTR_MTU,
524 			.mtu = rte_cpu_to_le_32(new_mtu),
525 		},
526 	};
527 
528 	return ionic_adminq_post_wait(lif, &ctx);
529 }
530 
531 int
532 ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
533 {
534 	struct ionic_adapter *adapter = lif->adapter;
535 	struct ionic_dev *idev = &adapter->idev;
536 	unsigned long index;
537 
538 	/*
539 	 * Note: interrupt handler is called for index = 0 only
540 	 * (we use interrupts for the notifyq only anyway,
541 	 * which has index = 0)
542 	 */
543 
544 	for (index = 0; index < adapter->nintrs; index++)
545 		if (!adapter->intrs[index])
546 			break;
547 
548 	if (index == adapter->nintrs)
549 		return -ENOSPC;
550 
551 	adapter->intrs[index] = true;
552 
553 	ionic_intr_init(idev, intr, index);
554 
555 	return 0;
556 }
557 
558 static int
559 ionic_qcq_alloc(struct ionic_lif *lif,
560 		uint8_t type,
561 		size_t struct_size,
562 		uint32_t socket_id,
563 		uint32_t index,
564 		const char *type_name,
565 		uint16_t flags,
566 		uint16_t num_descs,
567 		uint16_t num_segs,
568 		uint16_t desc_size,
569 		uint16_t cq_desc_size,
570 		uint16_t sg_desc_size,
571 		struct ionic_qcq **qcq)
572 {
573 	struct ionic_qcq *new;
574 	uint32_t q_size, cq_size, sg_size, total_size;
575 	void *q_base, *cmb_q_base, *cq_base, *sg_base;
576 	rte_iova_t q_base_pa = 0;
577 	rte_iova_t cq_base_pa = 0;
578 	rte_iova_t sg_base_pa = 0;
579 	rte_iova_t cmb_q_base_pa = 0;
580 	size_t page_size = rte_mem_page_size();
581 	int err;
582 
583 	*qcq = NULL;
584 
585 	q_size  = num_descs * desc_size;
586 	cq_size = num_descs * cq_desc_size;
587 	sg_size = num_descs * sg_desc_size;
588 
589 	total_size = RTE_ALIGN(q_size, page_size) +
590 			RTE_ALIGN(cq_size, page_size);
591 	/*
592 	 * Note: aligning q_size/cq_size is not enough due to cq_base address
593 	 * aligning as q_base could be not aligned to the page.
594 	 * Adding page_size.
595 	 */
596 	total_size += page_size;
597 
598 	if (flags & IONIC_QCQ_F_SG) {
599 		total_size += RTE_ALIGN(sg_size, page_size);
600 		total_size += page_size;
601 	}
602 
603 	new = rte_zmalloc_socket("ionic", struct_size,
604 				RTE_CACHE_LINE_SIZE, socket_id);
605 	if (!new) {
606 		IONIC_PRINT(ERR, "Cannot allocate queue structure");
607 		return -ENOMEM;
608 	}
609 
610 	new->lif = lif;
611 
612 	/* Most queue types will store 1 ptr per descriptor */
613 	new->q.info = rte_calloc_socket("ionic",
614 				(uint64_t)num_descs * num_segs,
615 				sizeof(void *), page_size, socket_id);
616 	if (!new->q.info) {
617 		IONIC_PRINT(ERR, "Cannot allocate queue info");
618 		err = -ENOMEM;
619 		goto err_out_free_qcq;
620 	}
621 
622 	new->q.num_segs = num_segs;
623 	new->q.type = type;
624 
625 	err = ionic_q_init(&new->q, index, num_descs);
626 	if (err) {
627 		IONIC_PRINT(ERR, "Queue initialization failed");
628 		goto err_out_free_info;
629 	}
630 
631 	err = ionic_cq_init(&new->cq, num_descs);
632 	if (err) {
633 		IONIC_PRINT(ERR, "Completion queue initialization failed");
634 		goto err_out_free_info;
635 	}
636 
637 	new->base_z = rte_eth_dma_zone_reserve(lif->eth_dev,
638 		type_name, index /* queue_idx */,
639 		total_size, IONIC_ALIGN, socket_id);
640 
641 	if (!new->base_z) {
642 		IONIC_PRINT(ERR, "Cannot reserve queue DMA memory");
643 		err = -ENOMEM;
644 		goto err_out_free_info;
645 	}
646 
647 	new->base = new->base_z->addr;
648 	new->base_pa = new->base_z->iova;
649 
650 	q_base = new->base;
651 	q_base_pa = new->base_pa;
652 
653 	cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size, page_size);
654 	cq_base_pa = RTE_ALIGN(q_base_pa + q_size, page_size);
655 
656 	if (flags & IONIC_QCQ_F_SG) {
657 		sg_base = (void *)RTE_ALIGN((uintptr_t)cq_base + cq_size,
658 				page_size);
659 		sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size, page_size);
660 		ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
661 	}
662 
663 	if (flags & IONIC_QCQ_F_CMB) {
664 		/* alloc descriptor ring from nic memory */
665 		if (lif->adapter->cmb_offset + q_size >
666 				lif->adapter->bars.bar[2].len) {
667 			IONIC_PRINT(ERR, "Cannot reserve queue from NIC mem");
668 			return -ENOMEM;
669 		}
670 		cmb_q_base = (void *)
671 			((uintptr_t)lif->adapter->bars.bar[2].vaddr +
672 			 (uintptr_t)lif->adapter->cmb_offset);
673 		/* CMB PA is a relative address */
674 		cmb_q_base_pa = lif->adapter->cmb_offset;
675 		lif->adapter->cmb_offset += q_size;
676 	} else {
677 		cmb_q_base = NULL;
678 		cmb_q_base_pa = 0;
679 	}
680 
681 	IONIC_PRINT(DEBUG, "Q-Base-PA = %#jx CQ-Base-PA = %#jx "
682 		"SG-base-PA = %#jx",
683 		q_base_pa, cq_base_pa, sg_base_pa);
684 
685 	ionic_q_map(&new->q, q_base, q_base_pa, cmb_q_base, cmb_q_base_pa);
686 	ionic_cq_map(&new->cq, cq_base, cq_base_pa);
687 
688 	*qcq = new;
689 
690 	return 0;
691 
692 err_out_free_info:
693 	rte_free(new->q.info);
694 err_out_free_qcq:
695 	rte_free(new);
696 
697 	return err;
698 }
699 
700 void
701 ionic_qcq_free(struct ionic_qcq *qcq)
702 {
703 	if (qcq->base_z) {
704 		qcq->base = NULL;
705 		qcq->base_pa = 0;
706 		rte_memzone_free(qcq->base_z);
707 		qcq->base_z = NULL;
708 	}
709 
710 	if (qcq->q.info) {
711 		rte_free(qcq->q.info);
712 		qcq->q.info = NULL;
713 	}
714 
715 	rte_free(qcq);
716 }
717 
718 static uint64_t
719 ionic_rx_rearm_data(struct ionic_lif *lif)
720 {
721 	struct rte_mbuf rxm;
722 
723 	memset(&rxm, 0, sizeof(rxm));
724 
725 	rte_mbuf_refcnt_set(&rxm, 1);
726 	rxm.data_off = RTE_PKTMBUF_HEADROOM;
727 	rxm.nb_segs = 1;
728 	rxm.port = lif->port_id;
729 
730 	rte_compiler_barrier();
731 
732 	RTE_BUILD_BUG_ON(sizeof(rxm.rearm_data[0]) != sizeof(uint64_t));
733 	return rxm.rearm_data[0];
734 }
735 
736 static uint64_t
737 ionic_rx_seg_rearm_data(struct ionic_lif *lif)
738 {
739 	struct rte_mbuf rxm;
740 
741 	memset(&rxm, 0, sizeof(rxm));
742 
743 	rte_mbuf_refcnt_set(&rxm, 1);
744 	rxm.data_off = 0;  /* no headroom */
745 	rxm.nb_segs = 1;
746 	rxm.port = lif->port_id;
747 
748 	rte_compiler_barrier();
749 
750 	RTE_BUILD_BUG_ON(sizeof(rxm.rearm_data[0]) != sizeof(uint64_t));
751 	return rxm.rearm_data[0];
752 }
753 
754 int
755 ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
756 		uint16_t nrxq_descs, struct rte_mempool *mb_pool,
757 		struct ionic_rx_qcq **rxq_out)
758 {
759 	struct ionic_rx_qcq *rxq;
760 	uint16_t flags = 0, seg_size, hdr_seg_size, max_segs, max_segs_fw = 1;
761 	uint32_t max_mtu;
762 	int err;
763 
764 	if (lif->state & IONIC_LIF_F_Q_IN_CMB)
765 		flags |= IONIC_QCQ_F_CMB;
766 
767 	seg_size = rte_pktmbuf_data_room_size(mb_pool);
768 
769 	/* The first mbuf needs to leave headroom */
770 	hdr_seg_size = seg_size - RTE_PKTMBUF_HEADROOM;
771 
772 	max_mtu = rte_le_to_cpu_32(lif->adapter->ident.lif.eth.max_mtu);
773 
774 	/* If mbufs are too small to hold received packets, enable SG */
775 	if (max_mtu > hdr_seg_size &&
776 	    !(lif->features & IONIC_ETH_HW_RX_SG)) {
777 		IONIC_PRINT(NOTICE, "Enabling RX_OFFLOAD_SCATTER");
778 		lif->eth_dev->data->dev_conf.rxmode.offloads |=
779 			RTE_ETH_RX_OFFLOAD_SCATTER;
780 		ionic_lif_configure_rx_sg_offload(lif);
781 	}
782 
783 	if (lif->features & IONIC_ETH_HW_RX_SG) {
784 		flags |= IONIC_QCQ_F_SG;
785 		max_segs_fw = IONIC_RX_MAX_SG_ELEMS + 1;
786 	}
787 
788 	/*
789 	 * Calculate how many fragment pointers might be stored in queue.
790 	 * This is the worst-case number, so that there's enough room in
791 	 * the info array.
792 	 */
793 	max_segs = 1 + (max_mtu + RTE_PKTMBUF_HEADROOM - 1) / seg_size;
794 
795 	IONIC_PRINT(DEBUG, "rxq %u max_mtu %u seg_size %u max_segs %u",
796 		index, max_mtu, seg_size, max_segs);
797 	if (max_segs > max_segs_fw) {
798 		IONIC_PRINT(ERR, "Rx mbuf size insufficient (%d > %d avail)",
799 			max_segs, max_segs_fw);
800 		return -EINVAL;
801 	}
802 
803 	err = ionic_qcq_alloc(lif,
804 		IONIC_QTYPE_RXQ,
805 		sizeof(struct ionic_rx_qcq),
806 		socket_id,
807 		index,
808 		"rx",
809 		flags,
810 		nrxq_descs,
811 		max_segs,
812 		sizeof(struct ionic_rxq_desc),
813 		sizeof(struct ionic_rxq_comp),
814 		sizeof(struct ionic_rxq_sg_desc),
815 		(struct ionic_qcq **)&rxq);
816 	if (err)
817 		return err;
818 
819 	rxq->flags = flags;
820 	rxq->seg_size = seg_size;
821 	rxq->hdr_seg_size = hdr_seg_size;
822 	rxq->rearm_data = ionic_rx_rearm_data(lif);
823 	rxq->rearm_seg_data = ionic_rx_seg_rearm_data(lif);
824 
825 	lif->rxqcqs[index] = rxq;
826 	*rxq_out = rxq;
827 
828 	return 0;
829 }
830 
831 int
832 ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
833 		uint16_t ntxq_descs, struct ionic_tx_qcq **txq_out)
834 {
835 	struct ionic_tx_qcq *txq;
836 	uint16_t flags = 0, num_segs_fw = 1;
837 	int err;
838 
839 	if (lif->features & IONIC_ETH_HW_TX_SG) {
840 		flags |= IONIC_QCQ_F_SG;
841 		num_segs_fw = IONIC_TX_MAX_SG_ELEMS_V1 + 1;
842 	}
843 	if (lif->state & IONIC_LIF_F_Q_IN_CMB)
844 		flags |= IONIC_QCQ_F_CMB;
845 
846 	IONIC_PRINT(DEBUG, "txq %u num_segs %u", index, num_segs_fw);
847 
848 	err = ionic_qcq_alloc(lif,
849 		IONIC_QTYPE_TXQ,
850 		sizeof(struct ionic_tx_qcq),
851 		socket_id,
852 		index,
853 		"tx",
854 		flags,
855 		ntxq_descs,
856 		num_segs_fw,
857 		sizeof(struct ionic_txq_desc),
858 		sizeof(struct ionic_txq_comp),
859 		sizeof(struct ionic_txq_sg_desc_v1),
860 		(struct ionic_qcq **)&txq);
861 	if (err)
862 		return err;
863 
864 	txq->flags = flags;
865 	txq->num_segs_fw = num_segs_fw;
866 
867 	lif->txqcqs[index] = txq;
868 	*txq_out = txq;
869 
870 	return 0;
871 }
872 
873 static int
874 ionic_admin_qcq_alloc(struct ionic_lif *lif)
875 {
876 	uint16_t flags = 0;
877 	int err;
878 
879 	err = ionic_qcq_alloc(lif,
880 		IONIC_QTYPE_ADMINQ,
881 		sizeof(struct ionic_admin_qcq),
882 		rte_socket_id(),
883 		0,
884 		"admin",
885 		flags,
886 		IONIC_ADMINQ_LENGTH,
887 		1,
888 		sizeof(struct ionic_admin_cmd),
889 		sizeof(struct ionic_admin_comp),
890 		0,
891 		(struct ionic_qcq **)&lif->adminqcq);
892 	if (err)
893 		return err;
894 
895 	return 0;
896 }
897 
898 static int
899 ionic_notify_qcq_alloc(struct ionic_lif *lif)
900 {
901 	struct ionic_notify_qcq *nqcq;
902 	struct ionic_dev *idev = &lif->adapter->idev;
903 	uint16_t flags = 0;
904 	int err;
905 
906 	err = ionic_qcq_alloc(lif,
907 		IONIC_QTYPE_NOTIFYQ,
908 		sizeof(struct ionic_notify_qcq),
909 		rte_socket_id(),
910 		0,
911 		"notify",
912 		flags,
913 		IONIC_NOTIFYQ_LENGTH,
914 		1,
915 		sizeof(struct ionic_notifyq_cmd),
916 		sizeof(union ionic_notifyq_comp),
917 		0,
918 		(struct ionic_qcq **)&nqcq);
919 	if (err)
920 		return err;
921 
922 	err = ionic_intr_alloc(lif, &nqcq->intr);
923 	if (err) {
924 		ionic_qcq_free(&nqcq->qcq);
925 		return err;
926 	}
927 
928 	ionic_intr_mask_assert(idev->intr_ctrl, nqcq->intr.index,
929 		IONIC_INTR_MASK_SET);
930 
931 	lif->notifyqcq = nqcq;
932 
933 	return 0;
934 }
935 
936 static void
937 ionic_lif_queue_identify(struct ionic_lif *lif)
938 {
939 	struct ionic_adapter *adapter = lif->adapter;
940 	struct ionic_dev *idev = &adapter->idev;
941 	union ionic_q_identity *q_ident = &adapter->ident.txq;
942 	uint32_t q_words = RTE_DIM(q_ident->words);
943 	uint32_t cmd_words = RTE_DIM(idev->dev_cmd->data);
944 	uint32_t i, nwords, qtype;
945 	int err;
946 
947 	for (qtype = 0; qtype < RTE_DIM(ionic_qtype_vers); qtype++) {
948 		struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
949 
950 		/* Filter out the types this driver knows about */
951 		switch (qtype) {
952 		case IONIC_QTYPE_ADMINQ:
953 		case IONIC_QTYPE_NOTIFYQ:
954 		case IONIC_QTYPE_RXQ:
955 		case IONIC_QTYPE_TXQ:
956 			break;
957 		default:
958 			continue;
959 		}
960 
961 		memset(qti, 0, sizeof(*qti));
962 
963 		ionic_dev_cmd_queue_identify(idev, IONIC_LIF_TYPE_CLASSIC,
964 			qtype, ionic_qtype_vers[qtype]);
965 		err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
966 		if (err == -EINVAL) {
967 			IONIC_PRINT(ERR, "qtype %d not supported\n", qtype);
968 			continue;
969 		} else if (err == -EIO) {
970 			IONIC_PRINT(ERR, "q_ident failed, older FW\n");
971 			return;
972 		} else if (err) {
973 			IONIC_PRINT(ERR, "q_ident failed, qtype %d: %d\n",
974 				qtype, err);
975 			return;
976 		}
977 
978 		nwords = RTE_MIN(q_words, cmd_words);
979 		for (i = 0; i < nwords; i++)
980 			q_ident->words[i] = ioread32(&idev->dev_cmd->data[i]);
981 
982 		qti->version   = q_ident->version;
983 		qti->supported = q_ident->supported;
984 		qti->features  = rte_le_to_cpu_64(q_ident->features);
985 		qti->desc_sz   = rte_le_to_cpu_16(q_ident->desc_sz);
986 		qti->comp_sz   = rte_le_to_cpu_16(q_ident->comp_sz);
987 		qti->sg_desc_sz   = rte_le_to_cpu_16(q_ident->sg_desc_sz);
988 		qti->max_sg_elems = rte_le_to_cpu_16(q_ident->max_sg_elems);
989 		qti->sg_desc_stride =
990 			rte_le_to_cpu_16(q_ident->sg_desc_stride);
991 
992 		IONIC_PRINT(DEBUG, " qtype[%d].version = %d",
993 			qtype, qti->version);
994 		IONIC_PRINT(DEBUG, " qtype[%d].supported = %#x",
995 			qtype, qti->supported);
996 		IONIC_PRINT(DEBUG, " qtype[%d].features = %#jx",
997 			qtype, qti->features);
998 		IONIC_PRINT(DEBUG, " qtype[%d].desc_sz = %d",
999 			qtype, qti->desc_sz);
1000 		IONIC_PRINT(DEBUG, " qtype[%d].comp_sz = %d",
1001 			qtype, qti->comp_sz);
1002 		IONIC_PRINT(DEBUG, " qtype[%d].sg_desc_sz = %d",
1003 			qtype, qti->sg_desc_sz);
1004 		IONIC_PRINT(DEBUG, " qtype[%d].max_sg_elems = %d",
1005 			qtype, qti->max_sg_elems);
1006 		IONIC_PRINT(DEBUG, " qtype[%d].sg_desc_stride = %d",
1007 			qtype, qti->sg_desc_stride);
1008 	}
1009 }
1010 
1011 int
1012 ionic_lif_alloc(struct ionic_lif *lif)
1013 {
1014 	struct ionic_adapter *adapter = lif->adapter;
1015 	uint32_t socket_id = rte_socket_id();
1016 	int err;
1017 
1018 	/*
1019 	 * lif->name was zeroed on allocation.
1020 	 * Copy (sizeof() - 1) bytes to ensure that it is NULL terminated.
1021 	 */
1022 	memcpy(lif->name, lif->eth_dev->data->name, sizeof(lif->name) - 1);
1023 
1024 	IONIC_PRINT(DEBUG, "LIF: %s", lif->name);
1025 
1026 	ionic_lif_queue_identify(lif);
1027 
1028 	if (lif->qtype_info[IONIC_QTYPE_TXQ].version < 1) {
1029 		IONIC_PRINT(ERR, "FW too old, please upgrade");
1030 		return -ENXIO;
1031 	}
1032 
1033 	if (adapter->q_in_cmb) {
1034 		if (adapter->bars.num_bars >= 3 &&
1035 		    lif->qtype_info[IONIC_QTYPE_RXQ].version >= 2 &&
1036 		    lif->qtype_info[IONIC_QTYPE_TXQ].version >= 3) {
1037 			IONIC_PRINT(INFO, "%s enabled on %s",
1038 				PMD_IONIC_CMB_KVARG, lif->name);
1039 			lif->state |= IONIC_LIF_F_Q_IN_CMB;
1040 		} else {
1041 			IONIC_PRINT(ERR, "%s not supported on %s, disabled",
1042 				PMD_IONIC_CMB_KVARG, lif->name);
1043 		}
1044 	}
1045 
1046 	IONIC_PRINT(DEBUG, "Allocating Lif Info");
1047 
1048 	rte_spinlock_init(&lif->adminq_lock);
1049 	rte_spinlock_init(&lif->adminq_service_lock);
1050 
1051 	lif->kern_dbpage = adapter->idev.db_pages;
1052 	if (!lif->kern_dbpage) {
1053 		IONIC_PRINT(ERR, "Cannot map dbpage, aborting");
1054 		return -ENOMEM;
1055 	}
1056 
1057 	lif->txqcqs = rte_calloc_socket("ionic",
1058 				adapter->max_ntxqs_per_lif,
1059 				sizeof(*lif->txqcqs),
1060 				RTE_CACHE_LINE_SIZE, socket_id);
1061 	if (!lif->txqcqs) {
1062 		IONIC_PRINT(ERR, "Cannot allocate tx queues array");
1063 		return -ENOMEM;
1064 	}
1065 
1066 	lif->rxqcqs = rte_calloc_socket("ionic",
1067 				adapter->max_nrxqs_per_lif,
1068 				sizeof(*lif->rxqcqs),
1069 				RTE_CACHE_LINE_SIZE, socket_id);
1070 	if (!lif->rxqcqs) {
1071 		IONIC_PRINT(ERR, "Cannot allocate rx queues array");
1072 		return -ENOMEM;
1073 	}
1074 
1075 	IONIC_PRINT(DEBUG, "Allocating Notify Queue");
1076 
1077 	err = ionic_notify_qcq_alloc(lif);
1078 	if (err) {
1079 		IONIC_PRINT(ERR, "Cannot allocate notify queue");
1080 		return err;
1081 	}
1082 
1083 	IONIC_PRINT(DEBUG, "Allocating Admin Queue");
1084 
1085 	err = ionic_admin_qcq_alloc(lif);
1086 	if (err) {
1087 		IONIC_PRINT(ERR, "Cannot allocate admin queue");
1088 		return err;
1089 	}
1090 
1091 	IONIC_PRINT(DEBUG, "Allocating Lif Info");
1092 
1093 	lif->info_sz = RTE_ALIGN(sizeof(*lif->info), rte_mem_page_size());
1094 
1095 	lif->info_z = rte_eth_dma_zone_reserve(lif->eth_dev,
1096 		"lif_info", 0 /* queue_idx*/,
1097 		lif->info_sz, IONIC_ALIGN, socket_id);
1098 	if (!lif->info_z) {
1099 		IONIC_PRINT(ERR, "Cannot allocate lif info memory");
1100 		return -ENOMEM;
1101 	}
1102 
1103 	lif->info = lif->info_z->addr;
1104 	lif->info_pa = lif->info_z->iova;
1105 
1106 	return 0;
1107 }
1108 
1109 void
1110 ionic_lif_free(struct ionic_lif *lif)
1111 {
1112 	if (lif->notifyqcq) {
1113 		ionic_qcq_free(&lif->notifyqcq->qcq);
1114 		lif->notifyqcq = NULL;
1115 	}
1116 
1117 	if (lif->adminqcq) {
1118 		ionic_qcq_free(&lif->adminqcq->qcq);
1119 		lif->adminqcq = NULL;
1120 	}
1121 
1122 	if (lif->txqcqs) {
1123 		rte_free(lif->txqcqs);
1124 		lif->txqcqs = NULL;
1125 	}
1126 
1127 	if (lif->rxqcqs) {
1128 		rte_free(lif->rxqcqs);
1129 		lif->rxqcqs = NULL;
1130 	}
1131 
1132 	if (lif->info) {
1133 		rte_memzone_free(lif->info_z);
1134 		lif->info = NULL;
1135 	}
1136 }
1137 
1138 void
1139 ionic_lif_free_queues(struct ionic_lif *lif)
1140 {
1141 	uint32_t i;
1142 
1143 	for (i = 0; i < lif->ntxqcqs; i++) {
1144 		ionic_dev_tx_queue_release(lif->eth_dev, i);
1145 		lif->eth_dev->data->tx_queues[i] = NULL;
1146 	}
1147 	for (i = 0; i < lif->nrxqcqs; i++) {
1148 		ionic_dev_rx_queue_release(lif->eth_dev, i);
1149 		lif->eth_dev->data->rx_queues[i] = NULL;
1150 	}
1151 }
1152 
1153 int
1154 ionic_lif_rss_config(struct ionic_lif *lif,
1155 		const uint16_t types, const uint8_t *key, const uint32_t *indir)
1156 {
1157 	struct ionic_adapter *adapter = lif->adapter;
1158 	struct ionic_admin_ctx ctx = {
1159 		.pending_work = true,
1160 		.cmd.lif_setattr = {
1161 			.opcode = IONIC_CMD_LIF_SETATTR,
1162 			.attr = IONIC_LIF_ATTR_RSS,
1163 			.rss.types = rte_cpu_to_le_16(types),
1164 			.rss.addr = rte_cpu_to_le_64(lif->rss_ind_tbl_pa),
1165 		},
1166 	};
1167 	unsigned int i;
1168 	uint16_t tbl_sz =
1169 		rte_le_to_cpu_16(adapter->ident.lif.eth.rss_ind_tbl_sz);
1170 
1171 	IONIC_PRINT_CALL();
1172 
1173 	lif->rss_types = types;
1174 
1175 	if (key)
1176 		memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1177 
1178 	if (indir)
1179 		for (i = 0; i < tbl_sz; i++)
1180 			lif->rss_ind_tbl[i] = indir[i];
1181 
1182 	memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1183 	       IONIC_RSS_HASH_KEY_SIZE);
1184 
1185 	return ionic_adminq_post_wait(lif, &ctx);
1186 }
1187 
1188 static int
1189 ionic_lif_rss_setup(struct ionic_lif *lif)
1190 {
1191 	struct ionic_adapter *adapter = lif->adapter;
1192 	static const uint8_t toeplitz_symmetric_key[] = {
1193 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1194 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1195 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1196 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1197 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1198 	};
1199 	uint32_t i;
1200 	uint16_t tbl_sz =
1201 		rte_le_to_cpu_16(adapter->ident.lif.eth.rss_ind_tbl_sz);
1202 
1203 	IONIC_PRINT_CALL();
1204 
1205 	if (!lif->rss_ind_tbl_z) {
1206 		lif->rss_ind_tbl_z = rte_eth_dma_zone_reserve(lif->eth_dev,
1207 					"rss_ind_tbl", 0 /* queue_idx */,
1208 					sizeof(*lif->rss_ind_tbl) * tbl_sz,
1209 					IONIC_ALIGN, rte_socket_id());
1210 		if (!lif->rss_ind_tbl_z) {
1211 			IONIC_PRINT(ERR, "OOM");
1212 			return -ENOMEM;
1213 		}
1214 
1215 		lif->rss_ind_tbl = lif->rss_ind_tbl_z->addr;
1216 		lif->rss_ind_tbl_pa = lif->rss_ind_tbl_z->iova;
1217 	}
1218 
1219 	if (lif->rss_ind_tbl_nrxqcqs != lif->nrxqcqs) {
1220 		lif->rss_ind_tbl_nrxqcqs = lif->nrxqcqs;
1221 
1222 		/* Fill indirection table with 'default' values */
1223 		for (i = 0; i < tbl_sz; i++)
1224 			lif->rss_ind_tbl[i] = i % lif->nrxqcqs;
1225 	}
1226 
1227 	return ionic_lif_rss_config(lif, IONIC_RSS_OFFLOAD_ALL,
1228 			toeplitz_symmetric_key, NULL);
1229 }
1230 
1231 static void
1232 ionic_lif_rss_teardown(struct ionic_lif *lif)
1233 {
1234 	if (lif->rss_ind_tbl) {
1235 		lif->rss_ind_tbl = NULL;
1236 		lif->rss_ind_tbl_pa = 0;
1237 		rte_memzone_free(lif->rss_ind_tbl_z);
1238 		lif->rss_ind_tbl_z = NULL;
1239 	}
1240 }
1241 
1242 void
1243 ionic_lif_txq_deinit(struct ionic_tx_qcq *txq)
1244 {
1245 	ionic_qcq_disable(&txq->qcq);
1246 
1247 	txq->flags &= ~IONIC_QCQ_F_INITED;
1248 }
1249 
1250 void
1251 ionic_lif_rxq_deinit(struct ionic_rx_qcq *rxq)
1252 {
1253 	ionic_qcq_disable(&rxq->qcq);
1254 
1255 	rxq->flags &= ~IONIC_QCQ_F_INITED;
1256 }
1257 
1258 static void
1259 ionic_lif_adminq_deinit(struct ionic_lif *lif)
1260 {
1261 	lif->adminqcq->flags &= ~IONIC_QCQ_F_INITED;
1262 }
1263 
1264 static void
1265 ionic_lif_notifyq_deinit(struct ionic_lif *lif)
1266 {
1267 	struct ionic_notify_qcq *nqcq = lif->notifyqcq;
1268 	struct ionic_dev *idev = &lif->adapter->idev;
1269 
1270 	if (!(nqcq->flags & IONIC_QCQ_F_INITED))
1271 		return;
1272 
1273 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1274 		IONIC_INTR_MASK_SET);
1275 
1276 	nqcq->flags &= ~IONIC_QCQ_F_INITED;
1277 }
1278 
1279 /* This acts like ionic_napi */
1280 int
1281 ionic_qcq_service(struct ionic_qcq *qcq, int budget, ionic_cq_cb cb,
1282 		void *cb_arg)
1283 {
1284 	struct ionic_cq *cq = &qcq->cq;
1285 	uint32_t work_done;
1286 
1287 	work_done = ionic_cq_service(cq, budget, cb, cb_arg);
1288 
1289 	return work_done;
1290 }
1291 
1292 static void
1293 ionic_link_status_check(struct ionic_lif *lif)
1294 {
1295 	struct ionic_adapter *adapter = lif->adapter;
1296 	bool link_up;
1297 
1298 	lif->state &= ~IONIC_LIF_F_LINK_CHECK_NEEDED;
1299 
1300 	if (!lif->info)
1301 		return;
1302 
1303 	link_up = (lif->info->status.link_status == IONIC_PORT_OPER_STATUS_UP);
1304 
1305 	if ((link_up  && adapter->link_up) ||
1306 	    (!link_up && !adapter->link_up))
1307 		return;
1308 
1309 	if (link_up) {
1310 		adapter->link_speed =
1311 			rte_le_to_cpu_32(lif->info->status.link_speed);
1312 		IONIC_PRINT(DEBUG, "Link up - %d Gbps",
1313 			adapter->link_speed);
1314 	} else {
1315 		IONIC_PRINT(DEBUG, "Link down");
1316 	}
1317 
1318 	adapter->link_up = link_up;
1319 	ionic_dev_link_update(lif->eth_dev, 0);
1320 }
1321 
1322 static void
1323 ionic_lif_handle_fw_down(struct ionic_lif *lif)
1324 {
1325 	if (lif->state & IONIC_LIF_F_FW_RESET)
1326 		return;
1327 
1328 	lif->state |= IONIC_LIF_F_FW_RESET;
1329 
1330 	if (lif->state & IONIC_LIF_F_UP) {
1331 		IONIC_PRINT(NOTICE,
1332 			"Surprise FW stop, stopping %s\n", lif->name);
1333 		ionic_lif_stop(lif);
1334 	}
1335 
1336 	IONIC_PRINT(NOTICE, "FW down, %s stopped", lif->name);
1337 }
1338 
1339 static bool
1340 ionic_notifyq_cb(struct ionic_cq *cq, uint16_t cq_desc_index, void *cb_arg)
1341 {
1342 	union ionic_notifyq_comp *cq_desc_base = cq->base;
1343 	union ionic_notifyq_comp *cq_desc = &cq_desc_base[cq_desc_index];
1344 	struct ionic_lif *lif = cb_arg;
1345 
1346 	IONIC_PRINT(DEBUG, "Notifyq callback eid = %jd ecode = %d",
1347 		cq_desc->event.eid, cq_desc->event.ecode);
1348 
1349 	/* Have we run out of new completions to process? */
1350 	if (!(cq_desc->event.eid > lif->last_eid))
1351 		return false;
1352 
1353 	lif->last_eid = cq_desc->event.eid;
1354 
1355 	switch (cq_desc->event.ecode) {
1356 	case IONIC_EVENT_LINK_CHANGE:
1357 		IONIC_PRINT(DEBUG,
1358 			"Notifyq IONIC_EVENT_LINK_CHANGE %s "
1359 			"eid=%jd link_status=%d link_speed=%d",
1360 			lif->name,
1361 			cq_desc->event.eid,
1362 			cq_desc->link_change.link_status,
1363 			cq_desc->link_change.link_speed);
1364 
1365 		lif->state |= IONIC_LIF_F_LINK_CHECK_NEEDED;
1366 		break;
1367 
1368 	case IONIC_EVENT_RESET:
1369 		IONIC_PRINT(NOTICE,
1370 			"Notifyq IONIC_EVENT_RESET %s "
1371 			"eid=%jd, reset_code=%d state=%d",
1372 			lif->name,
1373 			cq_desc->event.eid,
1374 			cq_desc->reset.reset_code,
1375 			cq_desc->reset.state);
1376 		ionic_lif_handle_fw_down(lif);
1377 		break;
1378 
1379 	default:
1380 		IONIC_PRINT(WARNING, "Notifyq bad event ecode=%d eid=%jd",
1381 			cq_desc->event.ecode, cq_desc->event.eid);
1382 		break;
1383 	}
1384 
1385 	return true;
1386 }
1387 
1388 int
1389 ionic_notifyq_handler(struct ionic_lif *lif, int budget)
1390 {
1391 	struct ionic_dev *idev = &lif->adapter->idev;
1392 	struct ionic_notify_qcq *nqcq = lif->notifyqcq;
1393 	uint32_t work_done;
1394 
1395 	if (!(nqcq->flags & IONIC_QCQ_F_INITED)) {
1396 		IONIC_PRINT(DEBUG, "Notifyq not yet initialized");
1397 		return -1;
1398 	}
1399 
1400 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1401 		IONIC_INTR_MASK_SET);
1402 
1403 	work_done = ionic_qcq_service(&nqcq->qcq, budget,
1404 				ionic_notifyq_cb, lif);
1405 
1406 	if (lif->state & IONIC_LIF_F_LINK_CHECK_NEEDED)
1407 		ionic_link_status_check(lif);
1408 
1409 	ionic_intr_credits(idev->intr_ctrl, nqcq->intr.index,
1410 		work_done, IONIC_INTR_CRED_RESET_COALESCE);
1411 
1412 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1413 		IONIC_INTR_MASK_CLEAR);
1414 
1415 	return 0;
1416 }
1417 
1418 static int
1419 ionic_lif_adminq_init(struct ionic_lif *lif)
1420 {
1421 	struct ionic_dev *idev = &lif->adapter->idev;
1422 	struct ionic_admin_qcq *aqcq = lif->adminqcq;
1423 	struct ionic_queue *q = &aqcq->qcq.q;
1424 	struct ionic_q_init_comp comp;
1425 	uint32_t retries = 5;
1426 	int err;
1427 
1428 retry_adminq_init:
1429 	ionic_dev_cmd_adminq_init(idev, &aqcq->qcq);
1430 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
1431 	if (err == -EAGAIN && retries > 0) {
1432 		retries--;
1433 		rte_delay_us_block(IONIC_DEVCMD_RETRY_WAIT_US);
1434 		goto retry_adminq_init;
1435 	}
1436 	if (err)
1437 		return err;
1438 
1439 	ionic_dev_cmd_comp(idev, &comp);
1440 
1441 	q->hw_type = comp.hw_type;
1442 	q->hw_index = rte_le_to_cpu_32(comp.hw_index);
1443 	q->db = ionic_db_map(lif, q);
1444 
1445 	IONIC_PRINT(DEBUG, "adminq->hw_type %d", q->hw_type);
1446 	IONIC_PRINT(DEBUG, "adminq->hw_index %d", q->hw_index);
1447 	IONIC_PRINT(DEBUG, "adminq->db %p", q->db);
1448 
1449 	aqcq->flags |= IONIC_QCQ_F_INITED;
1450 
1451 	return 0;
1452 }
1453 
1454 static int
1455 ionic_lif_notifyq_init(struct ionic_lif *lif)
1456 {
1457 	struct ionic_dev *idev = &lif->adapter->idev;
1458 	struct ionic_notify_qcq *nqcq = lif->notifyqcq;
1459 	struct ionic_queue *q = &nqcq->qcq.q;
1460 	uint16_t flags = IONIC_QINIT_F_ENA;
1461 	int err;
1462 
1463 	struct ionic_admin_ctx ctx = {
1464 		.pending_work = true,
1465 		.cmd.q_init = {
1466 			.opcode = IONIC_CMD_Q_INIT,
1467 			.type = q->type,
1468 			.ver = lif->qtype_info[q->type].version,
1469 			.index = rte_cpu_to_le_32(q->index),
1470 			.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1471 			.ring_size = rte_log2_u32(q->num_descs),
1472 			.ring_base = rte_cpu_to_le_64(q->base_pa),
1473 		}
1474 	};
1475 
1476 	/* Only enable an interrupt if the device supports them */
1477 	if (lif->adapter->intf->configure_intr != NULL) {
1478 		flags |= IONIC_QINIT_F_IRQ;
1479 		ctx.cmd.q_init.intr_index = rte_cpu_to_le_16(nqcq->intr.index);
1480 	}
1481 	ctx.cmd.q_init.flags = rte_cpu_to_le_16(flags);
1482 
1483 	IONIC_PRINT(DEBUG, "notifyq_init.index %d", q->index);
1484 	IONIC_PRINT(DEBUG, "notifyq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1485 	IONIC_PRINT(DEBUG, "notifyq_init.ring_size %d",
1486 		ctx.cmd.q_init.ring_size);
1487 	IONIC_PRINT(DEBUG, "notifyq_init.ver %u", ctx.cmd.q_init.ver);
1488 
1489 	err = ionic_adminq_post_wait(lif, &ctx);
1490 	if (err)
1491 		return err;
1492 
1493 	q->hw_type = ctx.comp.q_init.hw_type;
1494 	q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
1495 	q->db = NULL;
1496 
1497 	IONIC_PRINT(DEBUG, "notifyq->hw_type %d", q->hw_type);
1498 	IONIC_PRINT(DEBUG, "notifyq->hw_index %d", q->hw_index);
1499 	IONIC_PRINT(DEBUG, "notifyq->db %p", q->db);
1500 
1501 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1502 		IONIC_INTR_MASK_CLEAR);
1503 
1504 	nqcq->flags |= IONIC_QCQ_F_INITED;
1505 
1506 	return 0;
1507 }
1508 
1509 int
1510 ionic_lif_set_features(struct ionic_lif *lif)
1511 {
1512 	struct ionic_admin_ctx ctx = {
1513 		.pending_work = true,
1514 		.cmd.lif_setattr = {
1515 			.opcode = IONIC_CMD_LIF_SETATTR,
1516 			.attr = IONIC_LIF_ATTR_FEATURES,
1517 			.features = rte_cpu_to_le_64(lif->features),
1518 		},
1519 	};
1520 	int err;
1521 
1522 	err = ionic_adminq_post_wait(lif, &ctx);
1523 	if (err)
1524 		return err;
1525 
1526 	lif->hw_features = rte_le_to_cpu_64(ctx.cmd.lif_setattr.features &
1527 						ctx.comp.lif_setattr.features);
1528 
1529 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1530 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_TX_TAG");
1531 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1532 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_STRIP");
1533 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1534 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_FILTER");
1535 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1536 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_HASH");
1537 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1538 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_SG");
1539 	if (lif->hw_features & IONIC_ETH_HW_RX_SG)
1540 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_SG");
1541 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1542 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_CSUM");
1543 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1544 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_CSUM");
1545 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1546 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO");
1547 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1548 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPV6");
1549 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1550 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_ECN");
1551 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1552 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE");
1553 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1554 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE_CSUM");
1555 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1556 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP4");
1557 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1558 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP6");
1559 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1560 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP");
1561 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1562 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP_CSUM");
1563 
1564 	return 0;
1565 }
1566 
1567 int
1568 ionic_lif_txq_init(struct ionic_tx_qcq *txq)
1569 {
1570 	struct ionic_qcq *qcq = &txq->qcq;
1571 	struct ionic_queue *q = &qcq->q;
1572 	struct ionic_lif *lif = qcq->lif;
1573 	struct ionic_cq *cq = &qcq->cq;
1574 	struct ionic_admin_ctx ctx = {
1575 		.pending_work = true,
1576 		.cmd.q_init = {
1577 			.opcode = IONIC_CMD_Q_INIT,
1578 			.type = q->type,
1579 			.ver = lif->qtype_info[q->type].version,
1580 			.index = rte_cpu_to_le_32(q->index),
1581 			.flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA),
1582 			.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1583 			.ring_size = rte_log2_u32(q->num_descs),
1584 			.cq_ring_base = rte_cpu_to_le_64(cq->base_pa),
1585 			.sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa),
1586 		},
1587 	};
1588 	int err;
1589 
1590 	if (txq->flags & IONIC_QCQ_F_SG)
1591 		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG);
1592 	if (txq->flags & IONIC_QCQ_F_CMB) {
1593 		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);
1594 		ctx.cmd.q_init.ring_base = rte_cpu_to_le_64(q->cmb_base_pa);
1595 	} else {
1596 		ctx.cmd.q_init.ring_base = rte_cpu_to_le_64(q->base_pa);
1597 	}
1598 
1599 	IONIC_PRINT(DEBUG, "txq_init.index %d", q->index);
1600 	IONIC_PRINT(DEBUG, "txq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1601 	IONIC_PRINT(DEBUG, "txq_init.ring_size %d",
1602 		ctx.cmd.q_init.ring_size);
1603 	IONIC_PRINT(DEBUG, "txq_init.ver %u", ctx.cmd.q_init.ver);
1604 
1605 	ionic_q_reset(q);
1606 	ionic_cq_reset(cq);
1607 
1608 	err = ionic_adminq_post_wait(lif, &ctx);
1609 	if (err)
1610 		return err;
1611 
1612 	q->hw_type = ctx.comp.q_init.hw_type;
1613 	q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
1614 	q->db = ionic_db_map(lif, q);
1615 
1616 	IONIC_PRINT(DEBUG, "txq->hw_type %d", q->hw_type);
1617 	IONIC_PRINT(DEBUG, "txq->hw_index %d", q->hw_index);
1618 	IONIC_PRINT(DEBUG, "txq->db %p", q->db);
1619 
1620 	txq->flags |= IONIC_QCQ_F_INITED;
1621 
1622 	return 0;
1623 }
1624 
1625 int
1626 ionic_lif_rxq_init(struct ionic_rx_qcq *rxq)
1627 {
1628 	struct ionic_qcq *qcq = &rxq->qcq;
1629 	struct ionic_queue *q = &qcq->q;
1630 	struct ionic_lif *lif = qcq->lif;
1631 	struct ionic_cq *cq = &qcq->cq;
1632 	struct ionic_admin_ctx ctx = {
1633 		.pending_work = true,
1634 		.cmd.q_init = {
1635 			.opcode = IONIC_CMD_Q_INIT,
1636 			.type = q->type,
1637 			.ver = lif->qtype_info[q->type].version,
1638 			.index = rte_cpu_to_le_32(q->index),
1639 			.flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA),
1640 			.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1641 			.ring_size = rte_log2_u32(q->num_descs),
1642 			.cq_ring_base = rte_cpu_to_le_64(cq->base_pa),
1643 			.sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa),
1644 		},
1645 	};
1646 	int err;
1647 
1648 	if (rxq->flags & IONIC_QCQ_F_SG)
1649 		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG);
1650 	if (rxq->flags & IONIC_QCQ_F_CMB) {
1651 		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);
1652 		ctx.cmd.q_init.ring_base = rte_cpu_to_le_64(q->cmb_base_pa);
1653 	} else {
1654 		ctx.cmd.q_init.ring_base = rte_cpu_to_le_64(q->base_pa);
1655 	}
1656 
1657 	IONIC_PRINT(DEBUG, "rxq_init.index %d", q->index);
1658 	IONIC_PRINT(DEBUG, "rxq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1659 	IONIC_PRINT(DEBUG, "rxq_init.ring_size %d",
1660 		ctx.cmd.q_init.ring_size);
1661 	IONIC_PRINT(DEBUG, "rxq_init.ver %u", ctx.cmd.q_init.ver);
1662 
1663 	ionic_q_reset(q);
1664 	ionic_cq_reset(cq);
1665 
1666 	err = ionic_adminq_post_wait(lif, &ctx);
1667 	if (err)
1668 		return err;
1669 
1670 	q->hw_type = ctx.comp.q_init.hw_type;
1671 	q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
1672 	q->db = ionic_db_map(lif, q);
1673 
1674 	rxq->flags |= IONIC_QCQ_F_INITED;
1675 
1676 	IONIC_PRINT(DEBUG, "rxq->hw_type %d", q->hw_type);
1677 	IONIC_PRINT(DEBUG, "rxq->hw_index %d", q->hw_index);
1678 	IONIC_PRINT(DEBUG, "rxq->db %p", q->db);
1679 
1680 	return 0;
1681 }
1682 
1683 static int
1684 ionic_station_set(struct ionic_lif *lif)
1685 {
1686 	struct ionic_admin_ctx ctx = {
1687 		.pending_work = true,
1688 		.cmd.lif_getattr = {
1689 			.opcode = IONIC_CMD_LIF_GETATTR,
1690 			.attr = IONIC_LIF_ATTR_MAC,
1691 		},
1692 	};
1693 	int err;
1694 
1695 	IONIC_PRINT_CALL();
1696 
1697 	err = ionic_adminq_post_wait(lif, &ctx);
1698 	if (err)
1699 		return err;
1700 
1701 	memcpy(lif->mac_addr, ctx.comp.lif_getattr.mac, RTE_ETHER_ADDR_LEN);
1702 
1703 	return 0;
1704 }
1705 
1706 static void
1707 ionic_lif_set_name(struct ionic_lif *lif)
1708 {
1709 	struct ionic_admin_ctx ctx = {
1710 		.pending_work = true,
1711 		.cmd.lif_setattr = {
1712 			.opcode = IONIC_CMD_LIF_SETATTR,
1713 			.attr = IONIC_LIF_ATTR_NAME,
1714 		},
1715 	};
1716 
1717 	memcpy(ctx.cmd.lif_setattr.name, lif->name,
1718 		sizeof(ctx.cmd.lif_setattr.name) - 1);
1719 
1720 	ionic_adminq_post_wait(lif, &ctx);
1721 }
1722 
1723 int
1724 ionic_lif_init(struct ionic_lif *lif)
1725 {
1726 	struct ionic_dev *idev = &lif->adapter->idev;
1727 	struct ionic_lif_init_comp comp;
1728 	uint32_t retries = 5;
1729 	int err;
1730 
1731 	memset(&lif->stats_base, 0, sizeof(lif->stats_base));
1732 
1733 retry_lif_init:
1734 	ionic_dev_cmd_lif_init(idev, lif->info_pa);
1735 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
1736 	if (err == -EAGAIN && retries > 0) {
1737 		retries--;
1738 		rte_delay_us_block(IONIC_DEVCMD_RETRY_WAIT_US);
1739 		goto retry_lif_init;
1740 	}
1741 	if (err)
1742 		return err;
1743 
1744 	ionic_dev_cmd_comp(idev, &comp);
1745 
1746 	lif->hw_index = rte_cpu_to_le_16(comp.hw_index);
1747 
1748 	err = ionic_lif_adminq_init(lif);
1749 	if (err)
1750 		return err;
1751 
1752 	err = ionic_lif_notifyq_init(lif);
1753 	if (err)
1754 		goto err_out_adminq_deinit;
1755 
1756 	/*
1757 	 * Configure initial feature set
1758 	 * This will be updated later by the dev_configure() step
1759 	 */
1760 	lif->features = IONIC_ETH_HW_RX_HASH | IONIC_ETH_HW_VLAN_RX_FILTER;
1761 
1762 	err = ionic_lif_set_features(lif);
1763 	if (err)
1764 		goto err_out_notifyq_deinit;
1765 
1766 	err = ionic_rx_filters_init(lif);
1767 	if (err)
1768 		goto err_out_notifyq_deinit;
1769 
1770 	err = ionic_station_set(lif);
1771 	if (err)
1772 		goto err_out_rx_filter_deinit;
1773 
1774 	ionic_lif_set_name(lif);
1775 
1776 	lif->state |= IONIC_LIF_F_INITED;
1777 
1778 	return 0;
1779 
1780 err_out_rx_filter_deinit:
1781 	ionic_rx_filters_deinit(lif);
1782 
1783 err_out_notifyq_deinit:
1784 	ionic_lif_notifyq_deinit(lif);
1785 
1786 err_out_adminq_deinit:
1787 	ionic_lif_adminq_deinit(lif);
1788 
1789 	return err;
1790 }
1791 
1792 void
1793 ionic_lif_deinit(struct ionic_lif *lif)
1794 {
1795 	if (!(lif->state & IONIC_LIF_F_INITED))
1796 		return;
1797 
1798 	ionic_rx_filters_deinit(lif);
1799 	ionic_lif_rss_teardown(lif);
1800 	ionic_lif_notifyq_deinit(lif);
1801 	ionic_lif_adminq_deinit(lif);
1802 
1803 	lif->state &= ~IONIC_LIF_F_INITED;
1804 }
1805 
1806 void
1807 ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask)
1808 {
1809 	struct rte_eth_dev *eth_dev = lif->eth_dev;
1810 	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1811 
1812 	/*
1813 	 * IONIC_ETH_HW_VLAN_RX_FILTER cannot be turned off, so
1814 	 * set RTE_ETH_RX_OFFLOAD_VLAN_FILTER and ignore RTE_ETH_VLAN_FILTER_MASK
1815 	 */
1816 	rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
1817 
1818 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1819 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1820 			lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP;
1821 		else
1822 			lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP;
1823 	}
1824 }
1825 
1826 void
1827 ionic_lif_configure_rx_sg_offload(struct ionic_lif *lif)
1828 {
1829 	struct rte_eth_rxmode *rxmode = &lif->eth_dev->data->dev_conf.rxmode;
1830 
1831 	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
1832 		lif->features |= IONIC_ETH_HW_RX_SG;
1833 		lif->eth_dev->data->scattered_rx = 1;
1834 	} else {
1835 		lif->features &= ~IONIC_ETH_HW_RX_SG;
1836 		lif->eth_dev->data->scattered_rx = 0;
1837 	}
1838 }
1839 
1840 void
1841 ionic_lif_configure(struct ionic_lif *lif)
1842 {
1843 	struct rte_eth_rxmode *rxmode = &lif->eth_dev->data->dev_conf.rxmode;
1844 	struct rte_eth_txmode *txmode = &lif->eth_dev->data->dev_conf.txmode;
1845 	struct ionic_identity *ident = &lif->adapter->ident;
1846 	union ionic_lif_config *cfg = &ident->lif.eth.config;
1847 	uint32_t ntxqs_per_lif =
1848 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]);
1849 	uint32_t nrxqs_per_lif =
1850 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]);
1851 	uint32_t nrxqs = lif->eth_dev->data->nb_rx_queues;
1852 	uint32_t ntxqs = lif->eth_dev->data->nb_tx_queues;
1853 
1854 	lif->port_id = lif->eth_dev->data->port_id;
1855 
1856 	IONIC_PRINT(DEBUG, "Configuring LIF on port %u",
1857 		lif->port_id);
1858 
1859 	if (nrxqs > 0)
1860 		nrxqs_per_lif = RTE_MIN(nrxqs_per_lif, nrxqs);
1861 
1862 	if (ntxqs > 0)
1863 		ntxqs_per_lif = RTE_MIN(ntxqs_per_lif, ntxqs);
1864 
1865 	lif->nrxqcqs = nrxqs_per_lif;
1866 	lif->ntxqcqs = ntxqs_per_lif;
1867 
1868 	/* Update the LIF configuration based on the eth_dev */
1869 
1870 	/*
1871 	 * NB: While it is true that RSS_HASH is always enabled on ionic,
1872 	 *     setting this flag unconditionally causes problems in DTS.
1873 	 * rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1874 	 */
1875 
1876 	/* RX per-port */
1877 
1878 	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM ||
1879 	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM ||
1880 	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1881 		lif->features |= IONIC_ETH_HW_RX_CSUM;
1882 	else
1883 		lif->features &= ~IONIC_ETH_HW_RX_CSUM;
1884 
1885 	/*
1886 	 * NB: RX_SG may be enabled later during rx_queue_setup() if
1887 	 * required by the mbuf/mtu configuration
1888 	 */
1889 	ionic_lif_configure_rx_sg_offload(lif);
1890 
1891 	/* Covers VLAN_STRIP */
1892 	ionic_lif_configure_vlan_offload(lif, RTE_ETH_VLAN_STRIP_MASK);
1893 
1894 	/* TX per-port */
1895 
1896 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
1897 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
1898 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
1899 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
1900 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
1901 		lif->features |= IONIC_ETH_HW_TX_CSUM;
1902 	else
1903 		lif->features &= ~IONIC_ETH_HW_TX_CSUM;
1904 
1905 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
1906 		lif->features |= IONIC_ETH_HW_VLAN_TX_TAG;
1907 	else
1908 		lif->features &= ~IONIC_ETH_HW_VLAN_TX_TAG;
1909 
1910 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
1911 		lif->features |= IONIC_ETH_HW_TX_SG;
1912 	else
1913 		lif->features &= ~IONIC_ETH_HW_TX_SG;
1914 
1915 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
1916 		lif->features |= IONIC_ETH_HW_TSO;
1917 		lif->features |= IONIC_ETH_HW_TSO_IPV6;
1918 		lif->features |= IONIC_ETH_HW_TSO_ECN;
1919 	} else {
1920 		lif->features &= ~IONIC_ETH_HW_TSO;
1921 		lif->features &= ~IONIC_ETH_HW_TSO_IPV6;
1922 		lif->features &= ~IONIC_ETH_HW_TSO_ECN;
1923 	}
1924 }
1925 
1926 int
1927 ionic_lif_start(struct ionic_lif *lif)
1928 {
1929 	uint32_t rx_mode;
1930 	uint32_t i;
1931 	int err;
1932 
1933 	err = ionic_lif_rss_setup(lif);
1934 	if (err)
1935 		return err;
1936 
1937 	if (!lif->rx_mode) {
1938 		IONIC_PRINT(DEBUG, "Setting RX mode on %s",
1939 			lif->name);
1940 
1941 		rx_mode  = IONIC_RX_MODE_F_UNICAST;
1942 		rx_mode |= IONIC_RX_MODE_F_MULTICAST;
1943 		rx_mode |= IONIC_RX_MODE_F_BROADCAST;
1944 
1945 		ionic_set_rx_mode(lif, rx_mode);
1946 	}
1947 
1948 	IONIC_PRINT(DEBUG, "Starting %u RX queues and %u TX queues "
1949 		"on port %u",
1950 		lif->nrxqcqs, lif->ntxqcqs, lif->port_id);
1951 
1952 	for (i = 0; i < lif->nrxqcqs; i++) {
1953 		struct ionic_rx_qcq *rxq = lif->rxqcqs[i];
1954 		if (!(rxq->flags & IONIC_QCQ_F_DEFERRED)) {
1955 			err = ionic_dev_rx_queue_start(lif->eth_dev, i);
1956 
1957 			if (err)
1958 				return err;
1959 		}
1960 	}
1961 
1962 	for (i = 0; i < lif->ntxqcqs; i++) {
1963 		struct ionic_tx_qcq *txq = lif->txqcqs[i];
1964 		if (!(txq->flags & IONIC_QCQ_F_DEFERRED)) {
1965 			err = ionic_dev_tx_queue_start(lif->eth_dev, i);
1966 
1967 			if (err)
1968 				return err;
1969 		}
1970 	}
1971 
1972 	/* Carrier ON here */
1973 	lif->state |= IONIC_LIF_F_UP;
1974 
1975 	ionic_link_status_check(lif);
1976 
1977 	return 0;
1978 }
1979 
1980 int
1981 ionic_lif_identify(struct ionic_adapter *adapter)
1982 {
1983 	struct ionic_dev *idev = &adapter->idev;
1984 	struct ionic_identity *ident = &adapter->ident;
1985 	union ionic_lif_config *cfg = &ident->lif.eth.config;
1986 	uint32_t lif_words = RTE_DIM(ident->lif.words);
1987 	uint32_t cmd_words = RTE_DIM(idev->dev_cmd->data);
1988 	uint32_t i, nwords;
1989 	int err;
1990 
1991 	ionic_dev_cmd_lif_identify(idev, IONIC_LIF_TYPE_CLASSIC,
1992 		IONIC_IDENTITY_VERSION_1);
1993 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
1994 	if (err)
1995 		return (err);
1996 
1997 	nwords = RTE_MIN(lif_words, cmd_words);
1998 	for (i = 0; i < nwords; i++)
1999 		ident->lif.words[i] = ioread32(&idev->dev_cmd->data[i]);
2000 
2001 	IONIC_PRINT(INFO, "capabilities 0x%" PRIx64 " ",
2002 		rte_le_to_cpu_64(ident->lif.capabilities));
2003 
2004 	IONIC_PRINT(INFO, "eth.max_ucast_filters 0x%" PRIx32 " ",
2005 		rte_le_to_cpu_32(ident->lif.eth.max_ucast_filters));
2006 	IONIC_PRINT(INFO, "eth.max_mcast_filters 0x%" PRIx32 " ",
2007 		rte_le_to_cpu_32(ident->lif.eth.max_mcast_filters));
2008 
2009 	IONIC_PRINT(INFO, "eth.features 0x%" PRIx64 " ",
2010 		rte_le_to_cpu_64(cfg->features));
2011 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_ADMINQ] 0x%" PRIx32 " ",
2012 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_ADMINQ]));
2013 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] 0x%" PRIx32 " ",
2014 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_NOTIFYQ]));
2015 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_RXQ] 0x%" PRIx32 " ",
2016 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]));
2017 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_TXQ] 0x%" PRIx32 " ",
2018 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]));
2019 
2020 	return 0;
2021 }
2022 
2023 int
2024 ionic_lifs_size(struct ionic_adapter *adapter)
2025 {
2026 	struct ionic_identity *ident = &adapter->ident;
2027 	union ionic_lif_config *cfg = &ident->lif.eth.config;
2028 	uint32_t nintrs, dev_nintrs = rte_le_to_cpu_32(ident->dev.nintrs);
2029 
2030 	adapter->max_ntxqs_per_lif =
2031 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]);
2032 	adapter->max_nrxqs_per_lif =
2033 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]);
2034 
2035 	nintrs = 1 /* notifyq */;
2036 
2037 	if (nintrs > dev_nintrs) {
2038 		IONIC_PRINT(ERR,
2039 			"At most %d intr supported, minimum req'd is %u",
2040 			dev_nintrs, nintrs);
2041 		return -ENOSPC;
2042 	}
2043 
2044 	adapter->nintrs = nintrs;
2045 
2046 	return 0;
2047 }
2048