xref: /dpdk/drivers/net/ionic/ionic_lif.c (revision 51de3175e352da37438b1afa08ae4255e9cdc130)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2022 Advanced Micro Devices, Inc.
3  */
4 
5 #include <rte_malloc.h>
6 #include <ethdev_driver.h>
7 
8 #include "ionic.h"
9 #include "ionic_logs.h"
10 #include "ionic_lif.h"
11 #include "ionic_ethdev.h"
12 #include "ionic_rx_filter.h"
13 #include "ionic_rxtx.h"
14 
15 /* queuetype support level */
16 static const uint8_t ionic_qtype_vers[IONIC_QTYPE_MAX] = {
17 	[IONIC_QTYPE_ADMINQ]  = 0,   /* 0 = Base version with CQ support */
18 	[IONIC_QTYPE_NOTIFYQ] = 0,   /* 0 = Base version */
19 	[IONIC_QTYPE_RXQ]     = 2,   /* 0 = Base version with CQ+SG support
20 				      * 1 =       ... with EQ
21 				      * 2 =       ... with CMB
22 				      */
23 	[IONIC_QTYPE_TXQ]     = 3,   /* 0 = Base version with CQ+SG support
24 				      * 1 =   ... with Tx SG version 1
25 				      * 2 =       ... with EQ
26 				      * 3 =       ... with CMB
27 				      */
28 };
29 
30 static int ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr);
31 static int ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr);
32 
33 static int
34 ionic_qcq_disable(struct ionic_qcq *qcq)
35 {
36 	struct ionic_queue *q = &qcq->q;
37 	struct ionic_lif *lif = qcq->lif;
38 	struct ionic_admin_ctx ctx = {
39 		.pending_work = true,
40 		.cmd.q_control = {
41 			.opcode = IONIC_CMD_Q_CONTROL,
42 			.type = q->type,
43 			.index = rte_cpu_to_le_32(q->index),
44 			.oper = IONIC_Q_DISABLE,
45 		},
46 	};
47 
48 	return ionic_adminq_post_wait(lif, &ctx);
49 }
50 
51 void
52 ionic_lif_stop(struct ionic_lif *lif)
53 {
54 	uint32_t i;
55 
56 	IONIC_PRINT_CALL();
57 
58 	lif->state &= ~IONIC_LIF_F_UP;
59 
60 	for (i = 0; i < lif->nrxqcqs; i++) {
61 		struct ionic_rx_qcq *rxq = lif->rxqcqs[i];
62 		if (rxq->flags & IONIC_QCQ_F_INITED)
63 			(void)ionic_dev_rx_queue_stop(lif->eth_dev, i);
64 	}
65 
66 	for (i = 0; i < lif->ntxqcqs; i++) {
67 		struct ionic_tx_qcq *txq = lif->txqcqs[i];
68 		if (txq->flags & IONIC_QCQ_F_INITED)
69 			(void)ionic_dev_tx_queue_stop(lif->eth_dev, i);
70 	}
71 }
72 
73 void
74 ionic_lif_reset(struct ionic_lif *lif)
75 {
76 	struct ionic_dev *idev = &lif->adapter->idev;
77 	int err;
78 
79 	IONIC_PRINT_CALL();
80 
81 	ionic_dev_cmd_lif_reset(idev);
82 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
83 	if (err)
84 		IONIC_PRINT(WARNING, "Failed to reset %s", lif->name);
85 }
86 
87 static void
88 ionic_lif_get_abs_stats(const struct ionic_lif *lif, struct rte_eth_stats *stats)
89 {
90 	struct ionic_lif_stats *ls = &lif->info->stats;
91 	uint32_t i;
92 	uint32_t num_rx_q_counters = RTE_MIN(lif->nrxqcqs, (uint32_t)
93 			RTE_ETHDEV_QUEUE_STAT_CNTRS);
94 	uint32_t num_tx_q_counters = RTE_MIN(lif->ntxqcqs, (uint32_t)
95 			RTE_ETHDEV_QUEUE_STAT_CNTRS);
96 
97 	memset(stats, 0, sizeof(*stats));
98 
99 	if (ls == NULL) {
100 		IONIC_PRINT(DEBUG, "Stats on port %u not yet initialized",
101 			lif->port_id);
102 		return;
103 	}
104 
105 	/* RX */
106 
107 	stats->ipackets = ls->rx_ucast_packets +
108 		ls->rx_mcast_packets +
109 		ls->rx_bcast_packets;
110 
111 	stats->ibytes = ls->rx_ucast_bytes +
112 		ls->rx_mcast_bytes +
113 		ls->rx_bcast_bytes;
114 
115 	for (i = 0; i < lif->nrxqcqs; i++) {
116 		struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats;
117 		stats->ierrors +=
118 			rx_stats->bad_cq_status +
119 			rx_stats->bad_len;
120 	}
121 
122 	stats->imissed +=
123 		ls->rx_ucast_drop_packets +
124 		ls->rx_mcast_drop_packets +
125 		ls->rx_bcast_drop_packets;
126 
127 	stats->ierrors +=
128 		ls->rx_dma_error +
129 		ls->rx_desc_fetch_error +
130 		ls->rx_desc_data_error;
131 
132 	for (i = 0; i < num_rx_q_counters; i++) {
133 		struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats;
134 		stats->q_ipackets[i] = rx_stats->packets;
135 		stats->q_ibytes[i] = rx_stats->bytes;
136 		stats->q_errors[i] =
137 			rx_stats->bad_cq_status +
138 			rx_stats->bad_len;
139 	}
140 
141 	/* TX */
142 
143 	stats->opackets = ls->tx_ucast_packets +
144 		ls->tx_mcast_packets +
145 		ls->tx_bcast_packets;
146 
147 	stats->obytes = ls->tx_ucast_bytes +
148 		ls->tx_mcast_bytes +
149 		ls->tx_bcast_bytes;
150 
151 	for (i = 0; i < lif->ntxqcqs; i++) {
152 		struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats;
153 		stats->oerrors += tx_stats->drop;
154 	}
155 
156 	stats->oerrors +=
157 		ls->tx_ucast_drop_packets +
158 		ls->tx_mcast_drop_packets +
159 		ls->tx_bcast_drop_packets;
160 
161 	stats->oerrors +=
162 		ls->tx_dma_error +
163 		ls->tx_queue_disabled +
164 		ls->tx_desc_fetch_error +
165 		ls->tx_desc_data_error;
166 
167 	for (i = 0; i < num_tx_q_counters; i++) {
168 		struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats;
169 		stats->q_opackets[i] = tx_stats->packets;
170 		stats->q_obytes[i] = tx_stats->bytes;
171 	}
172 }
173 
174 void
175 ionic_lif_get_stats(const struct ionic_lif *lif,
176 		struct rte_eth_stats *stats)
177 {
178 	ionic_lif_get_abs_stats(lif, stats);
179 
180 	stats->ipackets  -= lif->stats_base.ipackets;
181 	stats->opackets  -= lif->stats_base.opackets;
182 	stats->ibytes    -= lif->stats_base.ibytes;
183 	stats->obytes    -= lif->stats_base.obytes;
184 	stats->imissed   -= lif->stats_base.imissed;
185 	stats->ierrors   -= lif->stats_base.ierrors;
186 	stats->oerrors   -= lif->stats_base.oerrors;
187 	stats->rx_nombuf -= lif->stats_base.rx_nombuf;
188 }
189 
190 void
191 ionic_lif_reset_stats(struct ionic_lif *lif)
192 {
193 	uint32_t i;
194 
195 	for (i = 0; i < lif->nrxqcqs; i++) {
196 		memset(&lif->rxqcqs[i]->stats, 0,
197 			sizeof(struct ionic_rx_stats));
198 		memset(&lif->txqcqs[i]->stats, 0,
199 			sizeof(struct ionic_tx_stats));
200 	}
201 
202 	ionic_lif_get_abs_stats(lif, &lif->stats_base);
203 }
204 
205 void
206 ionic_lif_get_hw_stats(struct ionic_lif *lif, struct ionic_lif_stats *stats)
207 {
208 	uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t);
209 	uint64_t *stats64 = (uint64_t *)stats;
210 	uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats;
211 	uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base;
212 
213 	for (i = 0; i < count; i++)
214 		stats64[i] = lif_stats64[i] - lif_stats64_base[i];
215 }
216 
217 void
218 ionic_lif_reset_hw_stats(struct ionic_lif *lif)
219 {
220 	uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t);
221 	uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats;
222 	uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base;
223 
224 	for (i = 0; i < count; i++)
225 		lif_stats64_base[i] = lif_stats64[i];
226 }
227 
228 static int
229 ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr)
230 {
231 	struct ionic_admin_ctx ctx = {
232 		.pending_work = true,
233 		.cmd.rx_filter_add = {
234 			.opcode = IONIC_CMD_RX_FILTER_ADD,
235 			.match = rte_cpu_to_le_16(IONIC_RX_FILTER_MATCH_MAC),
236 		},
237 	};
238 	int err;
239 
240 	memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, RTE_ETHER_ADDR_LEN);
241 
242 	err = ionic_adminq_post_wait(lif, &ctx);
243 	if (err)
244 		return err;
245 
246 	IONIC_PRINT(INFO, "rx_filter add (id %d)",
247 		rte_le_to_cpu_32(ctx.comp.rx_filter_add.filter_id));
248 
249 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx);
250 }
251 
252 static int
253 ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr)
254 {
255 	struct ionic_admin_ctx ctx = {
256 		.pending_work = true,
257 		.cmd.rx_filter_del = {
258 			.opcode = IONIC_CMD_RX_FILTER_DEL,
259 		},
260 	};
261 	struct ionic_rx_filter *f;
262 	int err;
263 
264 	IONIC_PRINT_CALL();
265 
266 	rte_spinlock_lock(&lif->rx_filters.lock);
267 
268 	f = ionic_rx_filter_by_addr(lif, addr);
269 	if (!f) {
270 		rte_spinlock_unlock(&lif->rx_filters.lock);
271 		return -ENOENT;
272 	}
273 
274 	ctx.cmd.rx_filter_del.filter_id = rte_cpu_to_le_32(f->filter_id);
275 	ionic_rx_filter_free(f);
276 
277 	rte_spinlock_unlock(&lif->rx_filters.lock);
278 
279 	err = ionic_adminq_post_wait(lif, &ctx);
280 	if (err)
281 		return err;
282 
283 	IONIC_PRINT(INFO, "rx_filter del (id %d)",
284 		rte_le_to_cpu_32(ctx.cmd.rx_filter_del.filter_id));
285 
286 	return 0;
287 }
288 
289 int
290 ionic_dev_add_mac(struct rte_eth_dev *eth_dev,
291 		struct rte_ether_addr *mac_addr,
292 		uint32_t index __rte_unused, uint32_t pool __rte_unused)
293 {
294 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
295 
296 	IONIC_PRINT_CALL();
297 
298 	return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr);
299 }
300 
301 void
302 ionic_dev_remove_mac(struct rte_eth_dev *eth_dev, uint32_t index)
303 {
304 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
305 	struct ionic_adapter *adapter = lif->adapter;
306 	struct rte_ether_addr *mac_addr;
307 
308 	IONIC_PRINT_CALL();
309 
310 	if (index >= adapter->max_mac_addrs) {
311 		IONIC_PRINT(WARNING,
312 			"Index %u is above MAC filter limit %u",
313 			index, adapter->max_mac_addrs);
314 		return;
315 	}
316 
317 	mac_addr = &eth_dev->data->mac_addrs[index];
318 
319 	if (!rte_is_valid_assigned_ether_addr(mac_addr))
320 		return;
321 
322 	ionic_lif_addr_del(lif, (const uint8_t *)mac_addr);
323 }
324 
325 int
326 ionic_dev_set_mac(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr)
327 {
328 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
329 
330 	IONIC_PRINT_CALL();
331 
332 	if (mac_addr == NULL) {
333 		IONIC_PRINT(NOTICE, "New mac is null");
334 		return -1;
335 	}
336 
337 	if (!rte_is_zero_ether_addr((struct rte_ether_addr *)lif->mac_addr)) {
338 		IONIC_PRINT(INFO, "Deleting mac addr %pM",
339 			lif->mac_addr);
340 		ionic_lif_addr_del(lif, lif->mac_addr);
341 		memset(lif->mac_addr, 0, RTE_ETHER_ADDR_LEN);
342 	}
343 
344 	IONIC_PRINT(INFO, "Updating mac addr");
345 
346 	rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)lif->mac_addr);
347 
348 	return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr);
349 }
350 
351 static int
352 ionic_vlan_rx_add_vid(struct ionic_lif *lif, uint16_t vid)
353 {
354 	struct ionic_admin_ctx ctx = {
355 		.pending_work = true,
356 		.cmd.rx_filter_add = {
357 			.opcode = IONIC_CMD_RX_FILTER_ADD,
358 			.match = rte_cpu_to_le_16(IONIC_RX_FILTER_MATCH_VLAN),
359 			.vlan.vlan = rte_cpu_to_le_16(vid),
360 		},
361 	};
362 	int err;
363 
364 	err = ionic_adminq_post_wait(lif, &ctx);
365 	if (err)
366 		return err;
367 
368 	IONIC_PRINT(INFO, "rx_filter add VLAN %d (id %d)", vid,
369 		rte_le_to_cpu_32(ctx.comp.rx_filter_add.filter_id));
370 
371 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx);
372 }
373 
374 static int
375 ionic_vlan_rx_kill_vid(struct ionic_lif *lif, uint16_t vid)
376 {
377 	struct ionic_admin_ctx ctx = {
378 		.pending_work = true,
379 		.cmd.rx_filter_del = {
380 			.opcode = IONIC_CMD_RX_FILTER_DEL,
381 		},
382 	};
383 	struct ionic_rx_filter *f;
384 	int err;
385 
386 	IONIC_PRINT_CALL();
387 
388 	rte_spinlock_lock(&lif->rx_filters.lock);
389 
390 	f = ionic_rx_filter_by_vlan(lif, vid);
391 	if (!f) {
392 		rte_spinlock_unlock(&lif->rx_filters.lock);
393 		return -ENOENT;
394 	}
395 
396 	ctx.cmd.rx_filter_del.filter_id = rte_cpu_to_le_32(f->filter_id);
397 	ionic_rx_filter_free(f);
398 	rte_spinlock_unlock(&lif->rx_filters.lock);
399 
400 	err = ionic_adminq_post_wait(lif, &ctx);
401 	if (err)
402 		return err;
403 
404 	IONIC_PRINT(INFO, "rx_filter del VLAN %d (id %d)", vid,
405 		rte_le_to_cpu_32(ctx.cmd.rx_filter_del.filter_id));
406 
407 	return 0;
408 }
409 
410 int
411 ionic_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id,
412 		int on)
413 {
414 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
415 	int err;
416 
417 	if (on)
418 		err = ionic_vlan_rx_add_vid(lif, vlan_id);
419 	else
420 		err = ionic_vlan_rx_kill_vid(lif, vlan_id);
421 
422 	return err;
423 }
424 
425 static void
426 ionic_lif_rx_mode(struct ionic_lif *lif, uint32_t rx_mode)
427 {
428 	struct ionic_admin_ctx ctx = {
429 		.pending_work = true,
430 		.cmd.rx_mode_set = {
431 			.opcode = IONIC_CMD_RX_MODE_SET,
432 			.rx_mode = rte_cpu_to_le_16(rx_mode),
433 		},
434 	};
435 	int err;
436 
437 	if (rx_mode & IONIC_RX_MODE_F_UNICAST)
438 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_UNICAST");
439 	if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
440 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_MULTICAST");
441 	if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
442 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_BROADCAST");
443 	if (rx_mode & IONIC_RX_MODE_F_PROMISC)
444 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_PROMISC");
445 	if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
446 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_ALLMULTI");
447 
448 	err = ionic_adminq_post_wait(lif, &ctx);
449 	if (err)
450 		IONIC_PRINT(ERR, "Failure setting RX mode");
451 }
452 
453 static void
454 ionic_set_rx_mode(struct ionic_lif *lif, uint32_t rx_mode)
455 {
456 	if (lif->rx_mode != rx_mode) {
457 		lif->rx_mode = rx_mode;
458 		ionic_lif_rx_mode(lif, rx_mode);
459 	}
460 }
461 
462 int
463 ionic_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
464 {
465 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
466 	uint32_t rx_mode = lif->rx_mode;
467 
468 	IONIC_PRINT_CALL();
469 
470 	rx_mode |= IONIC_RX_MODE_F_PROMISC;
471 
472 	ionic_set_rx_mode(lif, rx_mode);
473 
474 	return 0;
475 }
476 
477 int
478 ionic_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
479 {
480 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
481 	uint32_t rx_mode = lif->rx_mode;
482 
483 	rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
484 
485 	ionic_set_rx_mode(lif, rx_mode);
486 
487 	return 0;
488 }
489 
490 int
491 ionic_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
492 {
493 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
494 	uint32_t rx_mode = lif->rx_mode;
495 
496 	rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
497 
498 	ionic_set_rx_mode(lif, rx_mode);
499 
500 	return 0;
501 }
502 
503 int
504 ionic_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
505 {
506 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
507 	uint32_t rx_mode = lif->rx_mode;
508 
509 	rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
510 
511 	ionic_set_rx_mode(lif, rx_mode);
512 
513 	return 0;
514 }
515 
516 int
517 ionic_lif_change_mtu(struct ionic_lif *lif, uint32_t new_mtu)
518 {
519 	struct ionic_admin_ctx ctx = {
520 		.pending_work = true,
521 		.cmd.lif_setattr = {
522 			.opcode = IONIC_CMD_LIF_SETATTR,
523 			.attr = IONIC_LIF_ATTR_MTU,
524 			.mtu = rte_cpu_to_le_32(new_mtu),
525 		},
526 	};
527 
528 	return ionic_adminq_post_wait(lif, &ctx);
529 }
530 
531 int
532 ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
533 {
534 	struct ionic_adapter *adapter = lif->adapter;
535 	struct ionic_dev *idev = &adapter->idev;
536 	unsigned long index;
537 
538 	/*
539 	 * Note: interrupt handler is called for index = 0 only
540 	 * (we use interrupts for the notifyq only anyway,
541 	 * which has index = 0)
542 	 */
543 
544 	for (index = 0; index < adapter->nintrs; index++)
545 		if (!adapter->intrs[index])
546 			break;
547 
548 	if (index == adapter->nintrs)
549 		return -ENOSPC;
550 
551 	adapter->intrs[index] = true;
552 
553 	ionic_intr_init(idev, intr, index);
554 
555 	return 0;
556 }
557 
558 static int
559 ionic_qcq_alloc(struct ionic_lif *lif,
560 		uint8_t type,
561 		size_t struct_size,
562 		uint32_t socket_id,
563 		uint32_t index,
564 		const char *type_name,
565 		uint16_t flags,
566 		uint16_t num_descs,
567 		uint16_t num_segs,
568 		uint16_t desc_size,
569 		uint16_t cq_desc_size,
570 		uint16_t sg_desc_size,
571 		struct ionic_qcq **qcq)
572 {
573 	struct ionic_qcq *new;
574 	uint32_t q_size, cq_size, sg_size, total_size;
575 	void *q_base, *cq_base, *sg_base;
576 	rte_iova_t q_base_pa = 0;
577 	rte_iova_t cq_base_pa = 0;
578 	rte_iova_t sg_base_pa = 0;
579 	size_t page_size = rte_mem_page_size();
580 	int err;
581 
582 	*qcq = NULL;
583 
584 	q_size  = num_descs * desc_size;
585 	cq_size = num_descs * cq_desc_size;
586 	sg_size = num_descs * sg_desc_size;
587 
588 	total_size = RTE_ALIGN(q_size, page_size) +
589 			RTE_ALIGN(cq_size, page_size);
590 	/*
591 	 * Note: aligning q_size/cq_size is not enough due to cq_base address
592 	 * aligning as q_base could be not aligned to the page.
593 	 * Adding page_size.
594 	 */
595 	total_size += page_size;
596 
597 	if (flags & IONIC_QCQ_F_SG) {
598 		total_size += RTE_ALIGN(sg_size, page_size);
599 		total_size += page_size;
600 	}
601 
602 	new = rte_zmalloc_socket("ionic", struct_size,
603 				RTE_CACHE_LINE_SIZE, socket_id);
604 	if (!new) {
605 		IONIC_PRINT(ERR, "Cannot allocate queue structure");
606 		return -ENOMEM;
607 	}
608 
609 	new->lif = lif;
610 
611 	/* Most queue types will store 1 ptr per descriptor */
612 	new->q.info = rte_calloc_socket("ionic",
613 				(uint64_t)num_descs * num_segs,
614 				sizeof(void *), page_size, socket_id);
615 	if (!new->q.info) {
616 		IONIC_PRINT(ERR, "Cannot allocate queue info");
617 		err = -ENOMEM;
618 		goto err_out_free_qcq;
619 	}
620 
621 	new->q.num_segs = num_segs;
622 	new->q.type = type;
623 
624 	err = ionic_q_init(&new->q, index, num_descs);
625 	if (err) {
626 		IONIC_PRINT(ERR, "Queue initialization failed");
627 		goto err_out_free_info;
628 	}
629 
630 	err = ionic_cq_init(&new->cq, num_descs);
631 	if (err) {
632 		IONIC_PRINT(ERR, "Completion queue initialization failed");
633 		goto err_out_free_info;
634 	}
635 
636 	new->base_z = rte_eth_dma_zone_reserve(lif->eth_dev,
637 		type_name, index /* queue_idx */,
638 		total_size, IONIC_ALIGN, socket_id);
639 
640 	if (!new->base_z) {
641 		IONIC_PRINT(ERR, "Cannot reserve queue DMA memory");
642 		err = -ENOMEM;
643 		goto err_out_free_info;
644 	}
645 
646 	new->base = new->base_z->addr;
647 	new->base_pa = new->base_z->iova;
648 
649 	q_base = new->base;
650 	q_base_pa = new->base_pa;
651 
652 	cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size, page_size);
653 	cq_base_pa = RTE_ALIGN(q_base_pa + q_size, page_size);
654 
655 	if (flags & IONIC_QCQ_F_SG) {
656 		sg_base = (void *)RTE_ALIGN((uintptr_t)cq_base + cq_size,
657 				page_size);
658 		sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size, page_size);
659 		ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
660 	}
661 
662 	if (flags & IONIC_QCQ_F_CMB) {
663 		/* alloc descriptor ring from nic memory */
664 		if (lif->adapter->cmb_offset + q_size >
665 				lif->adapter->bars.bar[2].len) {
666 			IONIC_PRINT(ERR, "Cannot reserve queue from NIC mem");
667 			return -ENOMEM;
668 		}
669 		q_base = (void *)
670 			((uintptr_t)lif->adapter->bars.bar[2].vaddr +
671 			 (uintptr_t)lif->adapter->cmb_offset);
672 		/* CMB PA is a relative address */
673 		q_base_pa = lif->adapter->cmb_offset;
674 		lif->adapter->cmb_offset += q_size;
675 	}
676 
677 	IONIC_PRINT(DEBUG, "Q-Base-PA = %#jx CQ-Base-PA = %#jx "
678 		"SG-base-PA = %#jx",
679 		q_base_pa, cq_base_pa, sg_base_pa);
680 
681 	ionic_q_map(&new->q, q_base, q_base_pa);
682 	ionic_cq_map(&new->cq, cq_base, cq_base_pa);
683 
684 	*qcq = new;
685 
686 	return 0;
687 
688 err_out_free_info:
689 	rte_free(new->q.info);
690 err_out_free_qcq:
691 	rte_free(new);
692 
693 	return err;
694 }
695 
696 void
697 ionic_qcq_free(struct ionic_qcq *qcq)
698 {
699 	if (qcq->base_z) {
700 		qcq->base = NULL;
701 		qcq->base_pa = 0;
702 		rte_memzone_free(qcq->base_z);
703 		qcq->base_z = NULL;
704 	}
705 
706 	if (qcq->q.info) {
707 		rte_free(qcq->q.info);
708 		qcq->q.info = NULL;
709 	}
710 
711 	rte_free(qcq);
712 }
713 
714 static uint64_t
715 ionic_rx_rearm_data(struct ionic_lif *lif)
716 {
717 	struct rte_mbuf rxm;
718 
719 	memset(&rxm, 0, sizeof(rxm));
720 
721 	rte_mbuf_refcnt_set(&rxm, 1);
722 	rxm.data_off = RTE_PKTMBUF_HEADROOM;
723 	rxm.nb_segs = 1;
724 	rxm.port = lif->port_id;
725 
726 	rte_compiler_barrier();
727 
728 	RTE_BUILD_BUG_ON(sizeof(rxm.rearm_data[0]) != sizeof(uint64_t));
729 	return rxm.rearm_data[0];
730 }
731 
732 static uint64_t
733 ionic_rx_seg_rearm_data(struct ionic_lif *lif)
734 {
735 	struct rte_mbuf rxm;
736 
737 	memset(&rxm, 0, sizeof(rxm));
738 
739 	rte_mbuf_refcnt_set(&rxm, 1);
740 	rxm.data_off = 0;  /* no headroom */
741 	rxm.nb_segs = 1;
742 	rxm.port = lif->port_id;
743 
744 	rte_compiler_barrier();
745 
746 	RTE_BUILD_BUG_ON(sizeof(rxm.rearm_data[0]) != sizeof(uint64_t));
747 	return rxm.rearm_data[0];
748 }
749 
750 int
751 ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
752 		uint16_t nrxq_descs, struct rte_mempool *mb_pool,
753 		struct ionic_rx_qcq **rxq_out)
754 {
755 	struct ionic_rx_qcq *rxq;
756 	uint16_t flags = 0, seg_size, hdr_seg_size, max_segs, max_segs_fw = 1;
757 	uint32_t max_mtu;
758 	int err;
759 
760 	if (lif->state & IONIC_LIF_F_Q_IN_CMB)
761 		flags |= IONIC_QCQ_F_CMB;
762 
763 	seg_size = rte_pktmbuf_data_room_size(mb_pool);
764 
765 	/* The first mbuf needs to leave headroom */
766 	hdr_seg_size = seg_size - RTE_PKTMBUF_HEADROOM;
767 
768 	max_mtu = rte_le_to_cpu_32(lif->adapter->ident.lif.eth.max_mtu);
769 
770 	/* If mbufs are too small to hold received packets, enable SG */
771 	if (max_mtu > hdr_seg_size &&
772 	    !(lif->features & IONIC_ETH_HW_RX_SG)) {
773 		IONIC_PRINT(NOTICE, "Enabling RX_OFFLOAD_SCATTER");
774 		lif->eth_dev->data->dev_conf.rxmode.offloads |=
775 			RTE_ETH_RX_OFFLOAD_SCATTER;
776 		ionic_lif_configure_rx_sg_offload(lif);
777 	}
778 
779 	if (lif->features & IONIC_ETH_HW_RX_SG) {
780 		flags |= IONIC_QCQ_F_SG;
781 		max_segs_fw = IONIC_RX_MAX_SG_ELEMS + 1;
782 	}
783 
784 	/*
785 	 * Calculate how many fragment pointers might be stored in queue.
786 	 * This is the worst-case number, so that there's enough room in
787 	 * the info array.
788 	 */
789 	max_segs = 1 + (max_mtu + RTE_PKTMBUF_HEADROOM - 1) / seg_size;
790 
791 	IONIC_PRINT(DEBUG, "rxq %u max_mtu %u seg_size %u max_segs %u",
792 		index, max_mtu, seg_size, max_segs);
793 	if (max_segs > max_segs_fw) {
794 		IONIC_PRINT(ERR, "Rx mbuf size insufficient (%d > %d avail)",
795 			max_segs, max_segs_fw);
796 		return -EINVAL;
797 	}
798 
799 	err = ionic_qcq_alloc(lif,
800 		IONIC_QTYPE_RXQ,
801 		sizeof(struct ionic_rx_qcq),
802 		socket_id,
803 		index,
804 		"rx",
805 		flags,
806 		nrxq_descs,
807 		max_segs,
808 		sizeof(struct ionic_rxq_desc),
809 		sizeof(struct ionic_rxq_comp),
810 		sizeof(struct ionic_rxq_sg_desc),
811 		(struct ionic_qcq **)&rxq);
812 	if (err)
813 		return err;
814 
815 	rxq->flags = flags;
816 	rxq->seg_size = seg_size;
817 	rxq->hdr_seg_size = hdr_seg_size;
818 	rxq->rearm_data = ionic_rx_rearm_data(lif);
819 	rxq->rearm_seg_data = ionic_rx_seg_rearm_data(lif);
820 
821 	lif->rxqcqs[index] = rxq;
822 	*rxq_out = rxq;
823 
824 	return 0;
825 }
826 
827 int
828 ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
829 		uint16_t ntxq_descs, struct ionic_tx_qcq **txq_out)
830 {
831 	struct ionic_tx_qcq *txq;
832 	uint16_t flags = 0, num_segs_fw = 1;
833 	int err;
834 
835 	if (lif->features & IONIC_ETH_HW_TX_SG) {
836 		flags |= IONIC_QCQ_F_SG;
837 		num_segs_fw = IONIC_TX_MAX_SG_ELEMS_V1 + 1;
838 	}
839 	if (lif->state & IONIC_LIF_F_Q_IN_CMB)
840 		flags |= IONIC_QCQ_F_CMB;
841 
842 	IONIC_PRINT(DEBUG, "txq %u num_segs %u", index, num_segs_fw);
843 
844 	err = ionic_qcq_alloc(lif,
845 		IONIC_QTYPE_TXQ,
846 		sizeof(struct ionic_tx_qcq),
847 		socket_id,
848 		index,
849 		"tx",
850 		flags,
851 		ntxq_descs,
852 		num_segs_fw,
853 		sizeof(struct ionic_txq_desc),
854 		sizeof(struct ionic_txq_comp),
855 		sizeof(struct ionic_txq_sg_desc_v1),
856 		(struct ionic_qcq **)&txq);
857 	if (err)
858 		return err;
859 
860 	txq->flags = flags;
861 	txq->num_segs_fw = num_segs_fw;
862 
863 	lif->txqcqs[index] = txq;
864 	*txq_out = txq;
865 
866 	return 0;
867 }
868 
869 static int
870 ionic_admin_qcq_alloc(struct ionic_lif *lif)
871 {
872 	uint16_t flags = 0;
873 	int err;
874 
875 	err = ionic_qcq_alloc(lif,
876 		IONIC_QTYPE_ADMINQ,
877 		sizeof(struct ionic_admin_qcq),
878 		rte_socket_id(),
879 		0,
880 		"admin",
881 		flags,
882 		IONIC_ADMINQ_LENGTH,
883 		1,
884 		sizeof(struct ionic_admin_cmd),
885 		sizeof(struct ionic_admin_comp),
886 		0,
887 		(struct ionic_qcq **)&lif->adminqcq);
888 	if (err)
889 		return err;
890 
891 	return 0;
892 }
893 
894 static int
895 ionic_notify_qcq_alloc(struct ionic_lif *lif)
896 {
897 	struct ionic_notify_qcq *nqcq;
898 	struct ionic_dev *idev = &lif->adapter->idev;
899 	uint16_t flags = 0;
900 	int err;
901 
902 	err = ionic_qcq_alloc(lif,
903 		IONIC_QTYPE_NOTIFYQ,
904 		sizeof(struct ionic_notify_qcq),
905 		rte_socket_id(),
906 		0,
907 		"notify",
908 		flags,
909 		IONIC_NOTIFYQ_LENGTH,
910 		1,
911 		sizeof(struct ionic_notifyq_cmd),
912 		sizeof(union ionic_notifyq_comp),
913 		0,
914 		(struct ionic_qcq **)&nqcq);
915 	if (err)
916 		return err;
917 
918 	err = ionic_intr_alloc(lif, &nqcq->intr);
919 	if (err) {
920 		ionic_qcq_free(&nqcq->qcq);
921 		return err;
922 	}
923 
924 	ionic_intr_mask_assert(idev->intr_ctrl, nqcq->intr.index,
925 		IONIC_INTR_MASK_SET);
926 
927 	lif->notifyqcq = nqcq;
928 
929 	return 0;
930 }
931 
932 static void
933 ionic_lif_queue_identify(struct ionic_lif *lif)
934 {
935 	struct ionic_adapter *adapter = lif->adapter;
936 	struct ionic_dev *idev = &adapter->idev;
937 	union ionic_q_identity *q_ident = &adapter->ident.txq;
938 	uint32_t q_words = RTE_DIM(q_ident->words);
939 	uint32_t cmd_words = RTE_DIM(idev->dev_cmd->data);
940 	uint32_t i, nwords, qtype;
941 	int err;
942 
943 	for (qtype = 0; qtype < RTE_DIM(ionic_qtype_vers); qtype++) {
944 		struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
945 
946 		/* Filter out the types this driver knows about */
947 		switch (qtype) {
948 		case IONIC_QTYPE_ADMINQ:
949 		case IONIC_QTYPE_NOTIFYQ:
950 		case IONIC_QTYPE_RXQ:
951 		case IONIC_QTYPE_TXQ:
952 			break;
953 		default:
954 			continue;
955 		}
956 
957 		memset(qti, 0, sizeof(*qti));
958 
959 		ionic_dev_cmd_queue_identify(idev, IONIC_LIF_TYPE_CLASSIC,
960 			qtype, ionic_qtype_vers[qtype]);
961 		err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
962 		if (err == -EINVAL) {
963 			IONIC_PRINT(ERR, "qtype %d not supported\n", qtype);
964 			continue;
965 		} else if (err == -EIO) {
966 			IONIC_PRINT(ERR, "q_ident failed, older FW\n");
967 			return;
968 		} else if (err) {
969 			IONIC_PRINT(ERR, "q_ident failed, qtype %d: %d\n",
970 				qtype, err);
971 			return;
972 		}
973 
974 		nwords = RTE_MIN(q_words, cmd_words);
975 		for (i = 0; i < nwords; i++)
976 			q_ident->words[i] = ioread32(&idev->dev_cmd->data[i]);
977 
978 		qti->version   = q_ident->version;
979 		qti->supported = q_ident->supported;
980 		qti->features  = rte_le_to_cpu_64(q_ident->features);
981 		qti->desc_sz   = rte_le_to_cpu_16(q_ident->desc_sz);
982 		qti->comp_sz   = rte_le_to_cpu_16(q_ident->comp_sz);
983 		qti->sg_desc_sz   = rte_le_to_cpu_16(q_ident->sg_desc_sz);
984 		qti->max_sg_elems = rte_le_to_cpu_16(q_ident->max_sg_elems);
985 		qti->sg_desc_stride =
986 			rte_le_to_cpu_16(q_ident->sg_desc_stride);
987 
988 		IONIC_PRINT(DEBUG, " qtype[%d].version = %d",
989 			qtype, qti->version);
990 		IONIC_PRINT(DEBUG, " qtype[%d].supported = %#x",
991 			qtype, qti->supported);
992 		IONIC_PRINT(DEBUG, " qtype[%d].features = %#jx",
993 			qtype, qti->features);
994 		IONIC_PRINT(DEBUG, " qtype[%d].desc_sz = %d",
995 			qtype, qti->desc_sz);
996 		IONIC_PRINT(DEBUG, " qtype[%d].comp_sz = %d",
997 			qtype, qti->comp_sz);
998 		IONIC_PRINT(DEBUG, " qtype[%d].sg_desc_sz = %d",
999 			qtype, qti->sg_desc_sz);
1000 		IONIC_PRINT(DEBUG, " qtype[%d].max_sg_elems = %d",
1001 			qtype, qti->max_sg_elems);
1002 		IONIC_PRINT(DEBUG, " qtype[%d].sg_desc_stride = %d",
1003 			qtype, qti->sg_desc_stride);
1004 	}
1005 }
1006 
1007 int
1008 ionic_lif_alloc(struct ionic_lif *lif)
1009 {
1010 	struct ionic_adapter *adapter = lif->adapter;
1011 	uint32_t socket_id = rte_socket_id();
1012 	int err;
1013 
1014 	/*
1015 	 * lif->name was zeroed on allocation.
1016 	 * Copy (sizeof() - 1) bytes to ensure that it is NULL terminated.
1017 	 */
1018 	memcpy(lif->name, lif->eth_dev->data->name, sizeof(lif->name) - 1);
1019 
1020 	IONIC_PRINT(DEBUG, "LIF: %s", lif->name);
1021 
1022 	ionic_lif_queue_identify(lif);
1023 
1024 	if (lif->qtype_info[IONIC_QTYPE_TXQ].version < 1) {
1025 		IONIC_PRINT(ERR, "FW too old, please upgrade");
1026 		return -ENXIO;
1027 	}
1028 
1029 	if (adapter->q_in_cmb) {
1030 		if (adapter->bars.num_bars >= 3 &&
1031 		    lif->qtype_info[IONIC_QTYPE_RXQ].version >= 2 &&
1032 		    lif->qtype_info[IONIC_QTYPE_TXQ].version >= 3) {
1033 			IONIC_PRINT(INFO, "%s enabled on %s",
1034 				PMD_IONIC_CMB_KVARG, lif->name);
1035 			lif->state |= IONIC_LIF_F_Q_IN_CMB;
1036 		} else {
1037 			IONIC_PRINT(ERR, "%s not supported on %s, disabled",
1038 				PMD_IONIC_CMB_KVARG, lif->name);
1039 		}
1040 	}
1041 
1042 	IONIC_PRINT(DEBUG, "Allocating Lif Info");
1043 
1044 	rte_spinlock_init(&lif->adminq_lock);
1045 	rte_spinlock_init(&lif->adminq_service_lock);
1046 
1047 	lif->kern_dbpage = adapter->idev.db_pages;
1048 	if (!lif->kern_dbpage) {
1049 		IONIC_PRINT(ERR, "Cannot map dbpage, aborting");
1050 		return -ENOMEM;
1051 	}
1052 
1053 	lif->txqcqs = rte_calloc_socket("ionic",
1054 				adapter->max_ntxqs_per_lif,
1055 				sizeof(*lif->txqcqs),
1056 				RTE_CACHE_LINE_SIZE, socket_id);
1057 	if (!lif->txqcqs) {
1058 		IONIC_PRINT(ERR, "Cannot allocate tx queues array");
1059 		return -ENOMEM;
1060 	}
1061 
1062 	lif->rxqcqs = rte_calloc_socket("ionic",
1063 				adapter->max_nrxqs_per_lif,
1064 				sizeof(*lif->rxqcqs),
1065 				RTE_CACHE_LINE_SIZE, socket_id);
1066 	if (!lif->rxqcqs) {
1067 		IONIC_PRINT(ERR, "Cannot allocate rx queues array");
1068 		return -ENOMEM;
1069 	}
1070 
1071 	IONIC_PRINT(DEBUG, "Allocating Notify Queue");
1072 
1073 	err = ionic_notify_qcq_alloc(lif);
1074 	if (err) {
1075 		IONIC_PRINT(ERR, "Cannot allocate notify queue");
1076 		return err;
1077 	}
1078 
1079 	IONIC_PRINT(DEBUG, "Allocating Admin Queue");
1080 
1081 	err = ionic_admin_qcq_alloc(lif);
1082 	if (err) {
1083 		IONIC_PRINT(ERR, "Cannot allocate admin queue");
1084 		return err;
1085 	}
1086 
1087 	IONIC_PRINT(DEBUG, "Allocating Lif Info");
1088 
1089 	lif->info_sz = RTE_ALIGN(sizeof(*lif->info), rte_mem_page_size());
1090 
1091 	lif->info_z = rte_eth_dma_zone_reserve(lif->eth_dev,
1092 		"lif_info", 0 /* queue_idx*/,
1093 		lif->info_sz, IONIC_ALIGN, socket_id);
1094 	if (!lif->info_z) {
1095 		IONIC_PRINT(ERR, "Cannot allocate lif info memory");
1096 		return -ENOMEM;
1097 	}
1098 
1099 	lif->info = lif->info_z->addr;
1100 	lif->info_pa = lif->info_z->iova;
1101 
1102 	return 0;
1103 }
1104 
1105 void
1106 ionic_lif_free(struct ionic_lif *lif)
1107 {
1108 	if (lif->notifyqcq) {
1109 		ionic_qcq_free(&lif->notifyqcq->qcq);
1110 		lif->notifyqcq = NULL;
1111 	}
1112 
1113 	if (lif->adminqcq) {
1114 		ionic_qcq_free(&lif->adminqcq->qcq);
1115 		lif->adminqcq = NULL;
1116 	}
1117 
1118 	if (lif->txqcqs) {
1119 		rte_free(lif->txqcqs);
1120 		lif->txqcqs = NULL;
1121 	}
1122 
1123 	if (lif->rxqcqs) {
1124 		rte_free(lif->rxqcqs);
1125 		lif->rxqcqs = NULL;
1126 	}
1127 
1128 	if (lif->info) {
1129 		rte_memzone_free(lif->info_z);
1130 		lif->info = NULL;
1131 	}
1132 }
1133 
1134 void
1135 ionic_lif_free_queues(struct ionic_lif *lif)
1136 {
1137 	uint32_t i;
1138 
1139 	for (i = 0; i < lif->ntxqcqs; i++) {
1140 		ionic_dev_tx_queue_release(lif->eth_dev, i);
1141 		lif->eth_dev->data->tx_queues[i] = NULL;
1142 	}
1143 	for (i = 0; i < lif->nrxqcqs; i++) {
1144 		ionic_dev_rx_queue_release(lif->eth_dev, i);
1145 		lif->eth_dev->data->rx_queues[i] = NULL;
1146 	}
1147 }
1148 
1149 int
1150 ionic_lif_rss_config(struct ionic_lif *lif,
1151 		const uint16_t types, const uint8_t *key, const uint32_t *indir)
1152 {
1153 	struct ionic_adapter *adapter = lif->adapter;
1154 	struct ionic_admin_ctx ctx = {
1155 		.pending_work = true,
1156 		.cmd.lif_setattr = {
1157 			.opcode = IONIC_CMD_LIF_SETATTR,
1158 			.attr = IONIC_LIF_ATTR_RSS,
1159 			.rss.types = rte_cpu_to_le_16(types),
1160 			.rss.addr = rte_cpu_to_le_64(lif->rss_ind_tbl_pa),
1161 		},
1162 	};
1163 	unsigned int i;
1164 	uint16_t tbl_sz =
1165 		rte_le_to_cpu_16(adapter->ident.lif.eth.rss_ind_tbl_sz);
1166 
1167 	IONIC_PRINT_CALL();
1168 
1169 	lif->rss_types = types;
1170 
1171 	if (key)
1172 		memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1173 
1174 	if (indir)
1175 		for (i = 0; i < tbl_sz; i++)
1176 			lif->rss_ind_tbl[i] = indir[i];
1177 
1178 	memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1179 	       IONIC_RSS_HASH_KEY_SIZE);
1180 
1181 	return ionic_adminq_post_wait(lif, &ctx);
1182 }
1183 
1184 static int
1185 ionic_lif_rss_setup(struct ionic_lif *lif)
1186 {
1187 	struct ionic_adapter *adapter = lif->adapter;
1188 	static const uint8_t toeplitz_symmetric_key[] = {
1189 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1190 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1191 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1192 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1193 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1194 	};
1195 	uint32_t i;
1196 	uint16_t tbl_sz =
1197 		rte_le_to_cpu_16(adapter->ident.lif.eth.rss_ind_tbl_sz);
1198 
1199 	IONIC_PRINT_CALL();
1200 
1201 	if (!lif->rss_ind_tbl_z) {
1202 		lif->rss_ind_tbl_z = rte_eth_dma_zone_reserve(lif->eth_dev,
1203 					"rss_ind_tbl", 0 /* queue_idx */,
1204 					sizeof(*lif->rss_ind_tbl) * tbl_sz,
1205 					IONIC_ALIGN, rte_socket_id());
1206 		if (!lif->rss_ind_tbl_z) {
1207 			IONIC_PRINT(ERR, "OOM");
1208 			return -ENOMEM;
1209 		}
1210 
1211 		lif->rss_ind_tbl = lif->rss_ind_tbl_z->addr;
1212 		lif->rss_ind_tbl_pa = lif->rss_ind_tbl_z->iova;
1213 	}
1214 
1215 	if (lif->rss_ind_tbl_nrxqcqs != lif->nrxqcqs) {
1216 		lif->rss_ind_tbl_nrxqcqs = lif->nrxqcqs;
1217 
1218 		/* Fill indirection table with 'default' values */
1219 		for (i = 0; i < tbl_sz; i++)
1220 			lif->rss_ind_tbl[i] = i % lif->nrxqcqs;
1221 	}
1222 
1223 	return ionic_lif_rss_config(lif, IONIC_RSS_OFFLOAD_ALL,
1224 			toeplitz_symmetric_key, NULL);
1225 }
1226 
1227 static void
1228 ionic_lif_rss_teardown(struct ionic_lif *lif)
1229 {
1230 	if (!lif->rss_ind_tbl)
1231 		return;
1232 
1233 	if (lif->rss_ind_tbl_z) {
1234 		/* Disable RSS on the NIC */
1235 		ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1236 
1237 		lif->rss_ind_tbl = NULL;
1238 		lif->rss_ind_tbl_pa = 0;
1239 		rte_memzone_free(lif->rss_ind_tbl_z);
1240 		lif->rss_ind_tbl_z = NULL;
1241 	}
1242 }
1243 
1244 void
1245 ionic_lif_txq_deinit(struct ionic_tx_qcq *txq)
1246 {
1247 	ionic_qcq_disable(&txq->qcq);
1248 
1249 	txq->flags &= ~IONIC_QCQ_F_INITED;
1250 }
1251 
1252 void
1253 ionic_lif_rxq_deinit(struct ionic_rx_qcq *rxq)
1254 {
1255 	ionic_qcq_disable(&rxq->qcq);
1256 
1257 	rxq->flags &= ~IONIC_QCQ_F_INITED;
1258 }
1259 
1260 static void
1261 ionic_lif_adminq_deinit(struct ionic_lif *lif)
1262 {
1263 	lif->adminqcq->flags &= ~IONIC_QCQ_F_INITED;
1264 }
1265 
1266 static void
1267 ionic_lif_notifyq_deinit(struct ionic_lif *lif)
1268 {
1269 	struct ionic_notify_qcq *nqcq = lif->notifyqcq;
1270 	struct ionic_dev *idev = &lif->adapter->idev;
1271 
1272 	if (!(nqcq->flags & IONIC_QCQ_F_INITED))
1273 		return;
1274 
1275 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1276 		IONIC_INTR_MASK_SET);
1277 
1278 	nqcq->flags &= ~IONIC_QCQ_F_INITED;
1279 }
1280 
1281 /* This acts like ionic_napi */
1282 int
1283 ionic_qcq_service(struct ionic_qcq *qcq, int budget, ionic_cq_cb cb,
1284 		void *cb_arg)
1285 {
1286 	struct ionic_cq *cq = &qcq->cq;
1287 	uint32_t work_done;
1288 
1289 	work_done = ionic_cq_service(cq, budget, cb, cb_arg);
1290 
1291 	return work_done;
1292 }
1293 
1294 static void
1295 ionic_link_status_check(struct ionic_lif *lif)
1296 {
1297 	struct ionic_adapter *adapter = lif->adapter;
1298 	bool link_up;
1299 
1300 	lif->state &= ~IONIC_LIF_F_LINK_CHECK_NEEDED;
1301 
1302 	if (!lif->info)
1303 		return;
1304 
1305 	link_up = (lif->info->status.link_status == IONIC_PORT_OPER_STATUS_UP);
1306 
1307 	if ((link_up  && adapter->link_up) ||
1308 	    (!link_up && !adapter->link_up))
1309 		return;
1310 
1311 	if (link_up) {
1312 		adapter->link_speed =
1313 			rte_le_to_cpu_32(lif->info->status.link_speed);
1314 		IONIC_PRINT(DEBUG, "Link up - %d Gbps",
1315 			adapter->link_speed);
1316 	} else {
1317 		IONIC_PRINT(DEBUG, "Link down");
1318 	}
1319 
1320 	adapter->link_up = link_up;
1321 	ionic_dev_link_update(lif->eth_dev, 0);
1322 }
1323 
1324 static void
1325 ionic_lif_handle_fw_down(struct ionic_lif *lif)
1326 {
1327 	if (lif->state & IONIC_LIF_F_FW_RESET)
1328 		return;
1329 
1330 	lif->state |= IONIC_LIF_F_FW_RESET;
1331 
1332 	if (lif->state & IONIC_LIF_F_UP) {
1333 		IONIC_PRINT(NOTICE,
1334 			"Surprise FW stop, stopping %s\n", lif->name);
1335 		ionic_lif_stop(lif);
1336 	}
1337 
1338 	IONIC_PRINT(NOTICE, "FW down, %s stopped", lif->name);
1339 }
1340 
1341 static bool
1342 ionic_notifyq_cb(struct ionic_cq *cq, uint16_t cq_desc_index, void *cb_arg)
1343 {
1344 	union ionic_notifyq_comp *cq_desc_base = cq->base;
1345 	union ionic_notifyq_comp *cq_desc = &cq_desc_base[cq_desc_index];
1346 	struct ionic_lif *lif = cb_arg;
1347 
1348 	IONIC_PRINT(DEBUG, "Notifyq callback eid = %jd ecode = %d",
1349 		cq_desc->event.eid, cq_desc->event.ecode);
1350 
1351 	/* Have we run out of new completions to process? */
1352 	if (!(cq_desc->event.eid > lif->last_eid))
1353 		return false;
1354 
1355 	lif->last_eid = cq_desc->event.eid;
1356 
1357 	switch (cq_desc->event.ecode) {
1358 	case IONIC_EVENT_LINK_CHANGE:
1359 		IONIC_PRINT(DEBUG,
1360 			"Notifyq IONIC_EVENT_LINK_CHANGE %s "
1361 			"eid=%jd link_status=%d link_speed=%d",
1362 			lif->name,
1363 			cq_desc->event.eid,
1364 			cq_desc->link_change.link_status,
1365 			cq_desc->link_change.link_speed);
1366 
1367 		lif->state |= IONIC_LIF_F_LINK_CHECK_NEEDED;
1368 		break;
1369 
1370 	case IONIC_EVENT_RESET:
1371 		IONIC_PRINT(NOTICE,
1372 			"Notifyq IONIC_EVENT_RESET %s "
1373 			"eid=%jd, reset_code=%d state=%d",
1374 			lif->name,
1375 			cq_desc->event.eid,
1376 			cq_desc->reset.reset_code,
1377 			cq_desc->reset.state);
1378 		ionic_lif_handle_fw_down(lif);
1379 		break;
1380 
1381 	default:
1382 		IONIC_PRINT(WARNING, "Notifyq bad event ecode=%d eid=%jd",
1383 			cq_desc->event.ecode, cq_desc->event.eid);
1384 		break;
1385 	}
1386 
1387 	return true;
1388 }
1389 
1390 int
1391 ionic_notifyq_handler(struct ionic_lif *lif, int budget)
1392 {
1393 	struct ionic_dev *idev = &lif->adapter->idev;
1394 	struct ionic_notify_qcq *nqcq = lif->notifyqcq;
1395 	uint32_t work_done;
1396 
1397 	if (!(nqcq->flags & IONIC_QCQ_F_INITED)) {
1398 		IONIC_PRINT(DEBUG, "Notifyq not yet initialized");
1399 		return -1;
1400 	}
1401 
1402 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1403 		IONIC_INTR_MASK_SET);
1404 
1405 	work_done = ionic_qcq_service(&nqcq->qcq, budget,
1406 				ionic_notifyq_cb, lif);
1407 
1408 	if (lif->state & IONIC_LIF_F_LINK_CHECK_NEEDED)
1409 		ionic_link_status_check(lif);
1410 
1411 	ionic_intr_credits(idev->intr_ctrl, nqcq->intr.index,
1412 		work_done, IONIC_INTR_CRED_RESET_COALESCE);
1413 
1414 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1415 		IONIC_INTR_MASK_CLEAR);
1416 
1417 	return 0;
1418 }
1419 
1420 static int
1421 ionic_lif_adminq_init(struct ionic_lif *lif)
1422 {
1423 	struct ionic_dev *idev = &lif->adapter->idev;
1424 	struct ionic_admin_qcq *aqcq = lif->adminqcq;
1425 	struct ionic_queue *q = &aqcq->qcq.q;
1426 	struct ionic_q_init_comp comp;
1427 	uint32_t retries = 5;
1428 	int err;
1429 
1430 retry_adminq_init:
1431 	ionic_dev_cmd_adminq_init(idev, &aqcq->qcq);
1432 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
1433 	if (err == -EAGAIN && retries > 0) {
1434 		retries--;
1435 		rte_delay_us_block(IONIC_DEVCMD_RETRY_WAIT_US);
1436 		goto retry_adminq_init;
1437 	}
1438 	if (err)
1439 		return err;
1440 
1441 	ionic_dev_cmd_comp(idev, &comp);
1442 
1443 	q->hw_type = comp.hw_type;
1444 	q->hw_index = rte_le_to_cpu_32(comp.hw_index);
1445 	q->db = ionic_db_map(lif, q);
1446 
1447 	IONIC_PRINT(DEBUG, "adminq->hw_type %d", q->hw_type);
1448 	IONIC_PRINT(DEBUG, "adminq->hw_index %d", q->hw_index);
1449 	IONIC_PRINT(DEBUG, "adminq->db %p", q->db);
1450 
1451 	aqcq->flags |= IONIC_QCQ_F_INITED;
1452 
1453 	return 0;
1454 }
1455 
1456 static int
1457 ionic_lif_notifyq_init(struct ionic_lif *lif)
1458 {
1459 	struct ionic_dev *idev = &lif->adapter->idev;
1460 	struct ionic_notify_qcq *nqcq = lif->notifyqcq;
1461 	struct ionic_queue *q = &nqcq->qcq.q;
1462 	uint16_t flags = IONIC_QINIT_F_ENA;
1463 	int err;
1464 
1465 	struct ionic_admin_ctx ctx = {
1466 		.pending_work = true,
1467 		.cmd.q_init = {
1468 			.opcode = IONIC_CMD_Q_INIT,
1469 			.type = q->type,
1470 			.ver = lif->qtype_info[q->type].version,
1471 			.index = rte_cpu_to_le_32(q->index),
1472 			.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1473 			.ring_size = rte_log2_u32(q->num_descs),
1474 			.ring_base = rte_cpu_to_le_64(q->base_pa),
1475 		}
1476 	};
1477 
1478 	/* Only enable an interrupt if the device supports them */
1479 	if (lif->adapter->intf->configure_intr != NULL) {
1480 		flags |= IONIC_QINIT_F_IRQ;
1481 		ctx.cmd.q_init.intr_index = rte_cpu_to_le_16(nqcq->intr.index);
1482 	}
1483 	ctx.cmd.q_init.flags = rte_cpu_to_le_16(flags);
1484 
1485 	IONIC_PRINT(DEBUG, "notifyq_init.index %d", q->index);
1486 	IONIC_PRINT(DEBUG, "notifyq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1487 	IONIC_PRINT(DEBUG, "notifyq_init.ring_size %d",
1488 		ctx.cmd.q_init.ring_size);
1489 	IONIC_PRINT(DEBUG, "notifyq_init.ver %u", ctx.cmd.q_init.ver);
1490 
1491 	err = ionic_adminq_post_wait(lif, &ctx);
1492 	if (err)
1493 		return err;
1494 
1495 	q->hw_type = ctx.comp.q_init.hw_type;
1496 	q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
1497 	q->db = NULL;
1498 
1499 	IONIC_PRINT(DEBUG, "notifyq->hw_type %d", q->hw_type);
1500 	IONIC_PRINT(DEBUG, "notifyq->hw_index %d", q->hw_index);
1501 	IONIC_PRINT(DEBUG, "notifyq->db %p", q->db);
1502 
1503 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1504 		IONIC_INTR_MASK_CLEAR);
1505 
1506 	nqcq->flags |= IONIC_QCQ_F_INITED;
1507 
1508 	return 0;
1509 }
1510 
1511 int
1512 ionic_lif_set_features(struct ionic_lif *lif)
1513 {
1514 	struct ionic_admin_ctx ctx = {
1515 		.pending_work = true,
1516 		.cmd.lif_setattr = {
1517 			.opcode = IONIC_CMD_LIF_SETATTR,
1518 			.attr = IONIC_LIF_ATTR_FEATURES,
1519 			.features = rte_cpu_to_le_64(lif->features),
1520 		},
1521 	};
1522 	int err;
1523 
1524 	err = ionic_adminq_post_wait(lif, &ctx);
1525 	if (err)
1526 		return err;
1527 
1528 	lif->hw_features = rte_le_to_cpu_64(ctx.cmd.lif_setattr.features &
1529 						ctx.comp.lif_setattr.features);
1530 
1531 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1532 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_TX_TAG");
1533 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1534 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_STRIP");
1535 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1536 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_FILTER");
1537 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1538 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_HASH");
1539 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1540 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_SG");
1541 	if (lif->hw_features & IONIC_ETH_HW_RX_SG)
1542 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_SG");
1543 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1544 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_CSUM");
1545 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1546 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_CSUM");
1547 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1548 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO");
1549 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1550 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPV6");
1551 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1552 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_ECN");
1553 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1554 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE");
1555 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1556 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE_CSUM");
1557 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1558 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP4");
1559 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1560 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP6");
1561 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1562 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP");
1563 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1564 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP_CSUM");
1565 
1566 	return 0;
1567 }
1568 
1569 int
1570 ionic_lif_txq_init(struct ionic_tx_qcq *txq)
1571 {
1572 	struct ionic_qcq *qcq = &txq->qcq;
1573 	struct ionic_queue *q = &qcq->q;
1574 	struct ionic_lif *lif = qcq->lif;
1575 	struct ionic_cq *cq = &qcq->cq;
1576 	struct ionic_admin_ctx ctx = {
1577 		.pending_work = true,
1578 		.cmd.q_init = {
1579 			.opcode = IONIC_CMD_Q_INIT,
1580 			.type = q->type,
1581 			.ver = lif->qtype_info[q->type].version,
1582 			.index = rte_cpu_to_le_32(q->index),
1583 			.flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA),
1584 			.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1585 			.ring_size = rte_log2_u32(q->num_descs),
1586 			.ring_base = rte_cpu_to_le_64(q->base_pa),
1587 			.cq_ring_base = rte_cpu_to_le_64(cq->base_pa),
1588 			.sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa),
1589 		},
1590 	};
1591 	int err;
1592 
1593 	if (txq->flags & IONIC_QCQ_F_SG)
1594 		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG);
1595 	if (txq->flags & IONIC_QCQ_F_CMB)
1596 		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);
1597 
1598 	IONIC_PRINT(DEBUG, "txq_init.index %d", q->index);
1599 	IONIC_PRINT(DEBUG, "txq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1600 	IONIC_PRINT(DEBUG, "txq_init.ring_size %d",
1601 		ctx.cmd.q_init.ring_size);
1602 	IONIC_PRINT(DEBUG, "txq_init.ver %u", ctx.cmd.q_init.ver);
1603 
1604 	ionic_q_reset(q);
1605 	ionic_cq_reset(cq);
1606 
1607 	err = ionic_adminq_post_wait(lif, &ctx);
1608 	if (err)
1609 		return err;
1610 
1611 	q->hw_type = ctx.comp.q_init.hw_type;
1612 	q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
1613 	q->db = ionic_db_map(lif, q);
1614 
1615 	IONIC_PRINT(DEBUG, "txq->hw_type %d", q->hw_type);
1616 	IONIC_PRINT(DEBUG, "txq->hw_index %d", q->hw_index);
1617 	IONIC_PRINT(DEBUG, "txq->db %p", q->db);
1618 
1619 	txq->flags |= IONIC_QCQ_F_INITED;
1620 
1621 	return 0;
1622 }
1623 
1624 int
1625 ionic_lif_rxq_init(struct ionic_rx_qcq *rxq)
1626 {
1627 	struct ionic_qcq *qcq = &rxq->qcq;
1628 	struct ionic_queue *q = &qcq->q;
1629 	struct ionic_lif *lif = qcq->lif;
1630 	struct ionic_cq *cq = &qcq->cq;
1631 	struct ionic_admin_ctx ctx = {
1632 		.pending_work = true,
1633 		.cmd.q_init = {
1634 			.opcode = IONIC_CMD_Q_INIT,
1635 			.type = q->type,
1636 			.ver = lif->qtype_info[q->type].version,
1637 			.index = rte_cpu_to_le_32(q->index),
1638 			.flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA),
1639 			.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1640 			.ring_size = rte_log2_u32(q->num_descs),
1641 			.ring_base = rte_cpu_to_le_64(q->base_pa),
1642 			.cq_ring_base = rte_cpu_to_le_64(cq->base_pa),
1643 			.sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa),
1644 		},
1645 	};
1646 	int err;
1647 
1648 	if (rxq->flags & IONIC_QCQ_F_SG)
1649 		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG);
1650 	if (rxq->flags & IONIC_QCQ_F_CMB)
1651 		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);
1652 
1653 	IONIC_PRINT(DEBUG, "rxq_init.index %d", q->index);
1654 	IONIC_PRINT(DEBUG, "rxq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1655 	IONIC_PRINT(DEBUG, "rxq_init.ring_size %d",
1656 		ctx.cmd.q_init.ring_size);
1657 	IONIC_PRINT(DEBUG, "rxq_init.ver %u", ctx.cmd.q_init.ver);
1658 
1659 	ionic_q_reset(q);
1660 	ionic_cq_reset(cq);
1661 
1662 	err = ionic_adminq_post_wait(lif, &ctx);
1663 	if (err)
1664 		return err;
1665 
1666 	q->hw_type = ctx.comp.q_init.hw_type;
1667 	q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
1668 	q->db = ionic_db_map(lif, q);
1669 
1670 	rxq->flags |= IONIC_QCQ_F_INITED;
1671 
1672 	IONIC_PRINT(DEBUG, "rxq->hw_type %d", q->hw_type);
1673 	IONIC_PRINT(DEBUG, "rxq->hw_index %d", q->hw_index);
1674 	IONIC_PRINT(DEBUG, "rxq->db %p", q->db);
1675 
1676 	return 0;
1677 }
1678 
1679 static int
1680 ionic_station_set(struct ionic_lif *lif)
1681 {
1682 	struct ionic_admin_ctx ctx = {
1683 		.pending_work = true,
1684 		.cmd.lif_getattr = {
1685 			.opcode = IONIC_CMD_LIF_GETATTR,
1686 			.attr = IONIC_LIF_ATTR_MAC,
1687 		},
1688 	};
1689 	int err;
1690 
1691 	IONIC_PRINT_CALL();
1692 
1693 	err = ionic_adminq_post_wait(lif, &ctx);
1694 	if (err)
1695 		return err;
1696 
1697 	memcpy(lif->mac_addr, ctx.comp.lif_getattr.mac, RTE_ETHER_ADDR_LEN);
1698 
1699 	return 0;
1700 }
1701 
1702 static void
1703 ionic_lif_set_name(struct ionic_lif *lif)
1704 {
1705 	struct ionic_admin_ctx ctx = {
1706 		.pending_work = true,
1707 		.cmd.lif_setattr = {
1708 			.opcode = IONIC_CMD_LIF_SETATTR,
1709 			.attr = IONIC_LIF_ATTR_NAME,
1710 		},
1711 	};
1712 
1713 	memcpy(ctx.cmd.lif_setattr.name, lif->name,
1714 		sizeof(ctx.cmd.lif_setattr.name) - 1);
1715 
1716 	ionic_adminq_post_wait(lif, &ctx);
1717 }
1718 
1719 int
1720 ionic_lif_init(struct ionic_lif *lif)
1721 {
1722 	struct ionic_dev *idev = &lif->adapter->idev;
1723 	struct ionic_lif_init_comp comp;
1724 	uint32_t retries = 5;
1725 	int err;
1726 
1727 	memset(&lif->stats_base, 0, sizeof(lif->stats_base));
1728 
1729 retry_lif_init:
1730 	ionic_dev_cmd_lif_init(idev, lif->info_pa);
1731 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
1732 	if (err == -EAGAIN && retries > 0) {
1733 		retries--;
1734 		rte_delay_us_block(IONIC_DEVCMD_RETRY_WAIT_US);
1735 		goto retry_lif_init;
1736 	}
1737 	if (err)
1738 		return err;
1739 
1740 	ionic_dev_cmd_comp(idev, &comp);
1741 
1742 	lif->hw_index = rte_cpu_to_le_16(comp.hw_index);
1743 
1744 	err = ionic_lif_adminq_init(lif);
1745 	if (err)
1746 		return err;
1747 
1748 	err = ionic_lif_notifyq_init(lif);
1749 	if (err)
1750 		goto err_out_adminq_deinit;
1751 
1752 	/*
1753 	 * Configure initial feature set
1754 	 * This will be updated later by the dev_configure() step
1755 	 */
1756 	lif->features = IONIC_ETH_HW_RX_HASH | IONIC_ETH_HW_VLAN_RX_FILTER;
1757 
1758 	err = ionic_lif_set_features(lif);
1759 	if (err)
1760 		goto err_out_notifyq_deinit;
1761 
1762 	err = ionic_rx_filters_init(lif);
1763 	if (err)
1764 		goto err_out_notifyq_deinit;
1765 
1766 	err = ionic_station_set(lif);
1767 	if (err)
1768 		goto err_out_rx_filter_deinit;
1769 
1770 	ionic_lif_set_name(lif);
1771 
1772 	lif->state |= IONIC_LIF_F_INITED;
1773 
1774 	return 0;
1775 
1776 err_out_rx_filter_deinit:
1777 	ionic_rx_filters_deinit(lif);
1778 
1779 err_out_notifyq_deinit:
1780 	ionic_lif_notifyq_deinit(lif);
1781 
1782 err_out_adminq_deinit:
1783 	ionic_lif_adminq_deinit(lif);
1784 
1785 	return err;
1786 }
1787 
1788 void
1789 ionic_lif_deinit(struct ionic_lif *lif)
1790 {
1791 	if (!(lif->state & IONIC_LIF_F_INITED))
1792 		return;
1793 
1794 	ionic_rx_filters_deinit(lif);
1795 	ionic_lif_rss_teardown(lif);
1796 	ionic_lif_notifyq_deinit(lif);
1797 	ionic_lif_adminq_deinit(lif);
1798 
1799 	lif->state &= ~IONIC_LIF_F_INITED;
1800 }
1801 
1802 void
1803 ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask)
1804 {
1805 	struct rte_eth_dev *eth_dev = lif->eth_dev;
1806 	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1807 
1808 	/*
1809 	 * IONIC_ETH_HW_VLAN_RX_FILTER cannot be turned off, so
1810 	 * set RTE_ETH_RX_OFFLOAD_VLAN_FILTER and ignore RTE_ETH_VLAN_FILTER_MASK
1811 	 */
1812 	rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
1813 
1814 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1815 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1816 			lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP;
1817 		else
1818 			lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP;
1819 	}
1820 }
1821 
1822 void
1823 ionic_lif_configure_rx_sg_offload(struct ionic_lif *lif)
1824 {
1825 	struct rte_eth_rxmode *rxmode = &lif->eth_dev->data->dev_conf.rxmode;
1826 
1827 	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
1828 		lif->features |= IONIC_ETH_HW_RX_SG;
1829 		lif->eth_dev->data->scattered_rx = 1;
1830 	} else {
1831 		lif->features &= ~IONIC_ETH_HW_RX_SG;
1832 		lif->eth_dev->data->scattered_rx = 0;
1833 	}
1834 }
1835 
1836 void
1837 ionic_lif_configure(struct ionic_lif *lif)
1838 {
1839 	struct rte_eth_rxmode *rxmode = &lif->eth_dev->data->dev_conf.rxmode;
1840 	struct rte_eth_txmode *txmode = &lif->eth_dev->data->dev_conf.txmode;
1841 	struct ionic_identity *ident = &lif->adapter->ident;
1842 	union ionic_lif_config *cfg = &ident->lif.eth.config;
1843 	uint32_t ntxqs_per_lif =
1844 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]);
1845 	uint32_t nrxqs_per_lif =
1846 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]);
1847 	uint32_t nrxqs = lif->eth_dev->data->nb_rx_queues;
1848 	uint32_t ntxqs = lif->eth_dev->data->nb_tx_queues;
1849 
1850 	lif->port_id = lif->eth_dev->data->port_id;
1851 
1852 	IONIC_PRINT(DEBUG, "Configuring LIF on port %u",
1853 		lif->port_id);
1854 
1855 	if (nrxqs > 0)
1856 		nrxqs_per_lif = RTE_MIN(nrxqs_per_lif, nrxqs);
1857 
1858 	if (ntxqs > 0)
1859 		ntxqs_per_lif = RTE_MIN(ntxqs_per_lif, ntxqs);
1860 
1861 	lif->nrxqcqs = nrxqs_per_lif;
1862 	lif->ntxqcqs = ntxqs_per_lif;
1863 
1864 	/* Update the LIF configuration based on the eth_dev */
1865 
1866 	/*
1867 	 * NB: While it is true that RSS_HASH is always enabled on ionic,
1868 	 *     setting this flag unconditionally causes problems in DTS.
1869 	 * rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1870 	 */
1871 
1872 	/* RX per-port */
1873 
1874 	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM ||
1875 	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM ||
1876 	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1877 		lif->features |= IONIC_ETH_HW_RX_CSUM;
1878 	else
1879 		lif->features &= ~IONIC_ETH_HW_RX_CSUM;
1880 
1881 	/*
1882 	 * NB: RX_SG may be enabled later during rx_queue_setup() if
1883 	 * required by the mbuf/mtu configuration
1884 	 */
1885 	ionic_lif_configure_rx_sg_offload(lif);
1886 
1887 	/* Covers VLAN_STRIP */
1888 	ionic_lif_configure_vlan_offload(lif, RTE_ETH_VLAN_STRIP_MASK);
1889 
1890 	/* TX per-port */
1891 
1892 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
1893 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
1894 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
1895 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
1896 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
1897 		lif->features |= IONIC_ETH_HW_TX_CSUM;
1898 	else
1899 		lif->features &= ~IONIC_ETH_HW_TX_CSUM;
1900 
1901 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
1902 		lif->features |= IONIC_ETH_HW_VLAN_TX_TAG;
1903 	else
1904 		lif->features &= ~IONIC_ETH_HW_VLAN_TX_TAG;
1905 
1906 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
1907 		lif->features |= IONIC_ETH_HW_TX_SG;
1908 	else
1909 		lif->features &= ~IONIC_ETH_HW_TX_SG;
1910 
1911 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
1912 		lif->features |= IONIC_ETH_HW_TSO;
1913 		lif->features |= IONIC_ETH_HW_TSO_IPV6;
1914 		lif->features |= IONIC_ETH_HW_TSO_ECN;
1915 	} else {
1916 		lif->features &= ~IONIC_ETH_HW_TSO;
1917 		lif->features &= ~IONIC_ETH_HW_TSO_IPV6;
1918 		lif->features &= ~IONIC_ETH_HW_TSO_ECN;
1919 	}
1920 }
1921 
1922 int
1923 ionic_lif_start(struct ionic_lif *lif)
1924 {
1925 	uint32_t rx_mode;
1926 	uint32_t i;
1927 	int err;
1928 
1929 	err = ionic_lif_rss_setup(lif);
1930 	if (err)
1931 		return err;
1932 
1933 	if (!lif->rx_mode) {
1934 		IONIC_PRINT(DEBUG, "Setting RX mode on %s",
1935 			lif->name);
1936 
1937 		rx_mode  = IONIC_RX_MODE_F_UNICAST;
1938 		rx_mode |= IONIC_RX_MODE_F_MULTICAST;
1939 		rx_mode |= IONIC_RX_MODE_F_BROADCAST;
1940 
1941 		ionic_set_rx_mode(lif, rx_mode);
1942 	}
1943 
1944 	IONIC_PRINT(DEBUG, "Starting %u RX queues and %u TX queues "
1945 		"on port %u",
1946 		lif->nrxqcqs, lif->ntxqcqs, lif->port_id);
1947 
1948 	for (i = 0; i < lif->nrxqcqs; i++) {
1949 		struct ionic_rx_qcq *rxq = lif->rxqcqs[i];
1950 		if (!(rxq->flags & IONIC_QCQ_F_DEFERRED)) {
1951 			err = ionic_dev_rx_queue_start(lif->eth_dev, i);
1952 
1953 			if (err)
1954 				return err;
1955 		}
1956 	}
1957 
1958 	for (i = 0; i < lif->ntxqcqs; i++) {
1959 		struct ionic_tx_qcq *txq = lif->txqcqs[i];
1960 		if (!(txq->flags & IONIC_QCQ_F_DEFERRED)) {
1961 			err = ionic_dev_tx_queue_start(lif->eth_dev, i);
1962 
1963 			if (err)
1964 				return err;
1965 		}
1966 	}
1967 
1968 	/* Carrier ON here */
1969 	lif->state |= IONIC_LIF_F_UP;
1970 
1971 	ionic_link_status_check(lif);
1972 
1973 	return 0;
1974 }
1975 
1976 int
1977 ionic_lif_identify(struct ionic_adapter *adapter)
1978 {
1979 	struct ionic_dev *idev = &adapter->idev;
1980 	struct ionic_identity *ident = &adapter->ident;
1981 	union ionic_lif_config *cfg = &ident->lif.eth.config;
1982 	uint32_t lif_words = RTE_DIM(ident->lif.words);
1983 	uint32_t cmd_words = RTE_DIM(idev->dev_cmd->data);
1984 	uint32_t i, nwords;
1985 	int err;
1986 
1987 	ionic_dev_cmd_lif_identify(idev, IONIC_LIF_TYPE_CLASSIC,
1988 		IONIC_IDENTITY_VERSION_1);
1989 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
1990 	if (err)
1991 		return (err);
1992 
1993 	nwords = RTE_MIN(lif_words, cmd_words);
1994 	for (i = 0; i < nwords; i++)
1995 		ident->lif.words[i] = ioread32(&idev->dev_cmd->data[i]);
1996 
1997 	IONIC_PRINT(INFO, "capabilities 0x%" PRIx64 " ",
1998 		rte_le_to_cpu_64(ident->lif.capabilities));
1999 
2000 	IONIC_PRINT(INFO, "eth.max_ucast_filters 0x%" PRIx32 " ",
2001 		rte_le_to_cpu_32(ident->lif.eth.max_ucast_filters));
2002 	IONIC_PRINT(INFO, "eth.max_mcast_filters 0x%" PRIx32 " ",
2003 		rte_le_to_cpu_32(ident->lif.eth.max_mcast_filters));
2004 
2005 	IONIC_PRINT(INFO, "eth.features 0x%" PRIx64 " ",
2006 		rte_le_to_cpu_64(cfg->features));
2007 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_ADMINQ] 0x%" PRIx32 " ",
2008 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_ADMINQ]));
2009 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] 0x%" PRIx32 " ",
2010 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_NOTIFYQ]));
2011 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_RXQ] 0x%" PRIx32 " ",
2012 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]));
2013 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_TXQ] 0x%" PRIx32 " ",
2014 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]));
2015 
2016 	return 0;
2017 }
2018 
2019 int
2020 ionic_lifs_size(struct ionic_adapter *adapter)
2021 {
2022 	struct ionic_identity *ident = &adapter->ident;
2023 	union ionic_lif_config *cfg = &ident->lif.eth.config;
2024 	uint32_t nintrs, dev_nintrs = rte_le_to_cpu_32(ident->dev.nintrs);
2025 
2026 	adapter->max_ntxqs_per_lif =
2027 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]);
2028 	adapter->max_nrxqs_per_lif =
2029 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]);
2030 
2031 	nintrs = 1 /* notifyq */;
2032 
2033 	if (nintrs > dev_nintrs) {
2034 		IONIC_PRINT(ERR,
2035 			"At most %d intr supported, minimum req'd is %u",
2036 			dev_nintrs, nintrs);
2037 		return -ENOSPC;
2038 	}
2039 
2040 	adapter->nintrs = nintrs;
2041 
2042 	return 0;
2043 }
2044