xref: /dpdk/drivers/net/ionic/ionic_lif.c (revision 0f1dc8cb671203d52488fd66936f2fe6dcca03cc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2022 Advanced Micro Devices, Inc.
3  */
4 
5 #include <rte_malloc.h>
6 #include <ethdev_driver.h>
7 
8 #include "ionic.h"
9 #include "ionic_logs.h"
10 #include "ionic_lif.h"
11 #include "ionic_ethdev.h"
12 #include "ionic_rx_filter.h"
13 #include "ionic_rxtx.h"
14 
15 /* queuetype support level */
16 static const uint8_t ionic_qtype_vers[IONIC_QTYPE_MAX] = {
17 	[IONIC_QTYPE_ADMINQ]  = 0,   /* 0 = Base version with CQ support */
18 	[IONIC_QTYPE_NOTIFYQ] = 0,   /* 0 = Base version */
19 	[IONIC_QTYPE_RXQ]     = 2,   /* 0 = Base version with CQ+SG support
20 				      * 1 =       ... with EQ
21 				      * 2 =       ... with CMB
22 				      */
23 	[IONIC_QTYPE_TXQ]     = 3,   /* 0 = Base version with CQ+SG support
24 				      * 1 =   ... with Tx SG version 1
25 				      * 2 =       ... with EQ
26 				      * 3 =       ... with CMB
27 				      */
28 };
29 
30 static int ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr);
31 static int ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr);
32 
33 static int
34 ionic_qcq_disable_nowait(struct ionic_qcq *qcq,
35 		struct ionic_admin_ctx *ctx)
36 {
37 	int err;
38 
39 	struct ionic_queue *q = &qcq->q;
40 	struct ionic_lif *lif = qcq->lif;
41 
42 	*ctx = (struct ionic_admin_ctx) {
43 		.pending_work = true,
44 		.cmd.q_control = {
45 			.opcode = IONIC_CMD_Q_CONTROL,
46 			.type = q->type,
47 			.index = rte_cpu_to_le_32(q->index),
48 			.oper = IONIC_Q_DISABLE,
49 		},
50 	};
51 
52 	/* Does not wait for command completion */
53 	err = ionic_adminq_post(lif, ctx);
54 	if (err)
55 		ctx->pending_work = false;
56 	return err;
57 }
58 
59 void
60 ionic_lif_stop(struct ionic_lif *lif)
61 {
62 	struct rte_eth_dev *dev = lif->eth_dev;
63 	uint32_t i, j, chunk;
64 
65 	IONIC_PRINT_CALL();
66 
67 	lif->state &= ~IONIC_LIF_F_UP;
68 
69 	chunk = ionic_adminq_space_avail(lif);
70 
71 	for (i = 0; i < lif->nrxqcqs; i += chunk) {
72 		for (j = 0; j < chunk && i + j < lif->nrxqcqs; j++)
73 			ionic_dev_rx_queue_stop_firsthalf(dev, i + j);
74 
75 		for (j = 0; j < chunk && i + j < lif->nrxqcqs; j++)
76 			ionic_dev_rx_queue_stop_secondhalf(dev, i + j);
77 	}
78 
79 	for (i = 0; i < lif->ntxqcqs; i += chunk) {
80 		for (j = 0; j < chunk && i + j < lif->ntxqcqs; j++)
81 			ionic_dev_tx_queue_stop_firsthalf(dev, i + j);
82 
83 		for (j = 0; j < chunk && i + j < lif->ntxqcqs; j++)
84 			ionic_dev_tx_queue_stop_secondhalf(dev, i + j);
85 	}
86 }
87 
88 void
89 ionic_lif_reset(struct ionic_lif *lif)
90 {
91 	struct ionic_dev *idev = &lif->adapter->idev;
92 	int err;
93 
94 	IONIC_PRINT_CALL();
95 
96 	ionic_dev_cmd_lif_reset(idev);
97 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
98 	if (err)
99 		IONIC_PRINT(WARNING, "Failed to reset %s", lif->name);
100 }
101 
102 static void
103 ionic_lif_get_abs_stats(const struct ionic_lif *lif, struct rte_eth_stats *stats)
104 {
105 	struct ionic_lif_stats *ls = &lif->info->stats;
106 	uint32_t i;
107 	uint32_t num_rx_q_counters = RTE_MIN(lif->nrxqcqs, (uint32_t)
108 			RTE_ETHDEV_QUEUE_STAT_CNTRS);
109 	uint32_t num_tx_q_counters = RTE_MIN(lif->ntxqcqs, (uint32_t)
110 			RTE_ETHDEV_QUEUE_STAT_CNTRS);
111 
112 	memset(stats, 0, sizeof(*stats));
113 
114 	if (ls == NULL) {
115 		IONIC_PRINT(DEBUG, "Stats on port %u not yet initialized",
116 			lif->port_id);
117 		return;
118 	}
119 
120 	/* RX */
121 
122 	stats->ipackets = ls->rx_ucast_packets +
123 		ls->rx_mcast_packets +
124 		ls->rx_bcast_packets;
125 
126 	stats->ibytes = ls->rx_ucast_bytes +
127 		ls->rx_mcast_bytes +
128 		ls->rx_bcast_bytes;
129 
130 	for (i = 0; i < lif->nrxqcqs; i++) {
131 		struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats;
132 		stats->ierrors +=
133 			rx_stats->bad_cq_status +
134 			rx_stats->bad_len;
135 	}
136 
137 	stats->imissed +=
138 		ls->rx_ucast_drop_packets +
139 		ls->rx_mcast_drop_packets +
140 		ls->rx_bcast_drop_packets;
141 
142 	stats->ierrors +=
143 		ls->rx_dma_error +
144 		ls->rx_desc_fetch_error +
145 		ls->rx_desc_data_error;
146 
147 	for (i = 0; i < num_rx_q_counters; i++) {
148 		struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats;
149 		stats->q_ipackets[i] = rx_stats->packets;
150 		stats->q_ibytes[i] = rx_stats->bytes;
151 		stats->q_errors[i] =
152 			rx_stats->bad_cq_status +
153 			rx_stats->bad_len;
154 	}
155 
156 	/* TX */
157 
158 	stats->opackets = ls->tx_ucast_packets +
159 		ls->tx_mcast_packets +
160 		ls->tx_bcast_packets;
161 
162 	stats->obytes = ls->tx_ucast_bytes +
163 		ls->tx_mcast_bytes +
164 		ls->tx_bcast_bytes;
165 
166 	for (i = 0; i < lif->ntxqcqs; i++) {
167 		struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats;
168 		stats->oerrors += tx_stats->drop;
169 	}
170 
171 	stats->oerrors +=
172 		ls->tx_ucast_drop_packets +
173 		ls->tx_mcast_drop_packets +
174 		ls->tx_bcast_drop_packets;
175 
176 	stats->oerrors +=
177 		ls->tx_dma_error +
178 		ls->tx_queue_disabled +
179 		ls->tx_desc_fetch_error +
180 		ls->tx_desc_data_error;
181 
182 	for (i = 0; i < num_tx_q_counters; i++) {
183 		struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats;
184 		stats->q_opackets[i] = tx_stats->packets;
185 		stats->q_obytes[i] = tx_stats->bytes;
186 	}
187 }
188 
189 void
190 ionic_lif_get_stats(const struct ionic_lif *lif,
191 		struct rte_eth_stats *stats)
192 {
193 	ionic_lif_get_abs_stats(lif, stats);
194 
195 	stats->ipackets  -= lif->stats_base.ipackets;
196 	stats->opackets  -= lif->stats_base.opackets;
197 	stats->ibytes    -= lif->stats_base.ibytes;
198 	stats->obytes    -= lif->stats_base.obytes;
199 	stats->imissed   -= lif->stats_base.imissed;
200 	stats->ierrors   -= lif->stats_base.ierrors;
201 	stats->oerrors   -= lif->stats_base.oerrors;
202 	stats->rx_nombuf -= lif->stats_base.rx_nombuf;
203 }
204 
205 void
206 ionic_lif_reset_stats(struct ionic_lif *lif)
207 {
208 	uint32_t i;
209 
210 	for (i = 0; i < lif->nrxqcqs; i++) {
211 		memset(&lif->rxqcqs[i]->stats, 0,
212 			sizeof(struct ionic_rx_stats));
213 		memset(&lif->txqcqs[i]->stats, 0,
214 			sizeof(struct ionic_tx_stats));
215 	}
216 
217 	ionic_lif_get_abs_stats(lif, &lif->stats_base);
218 }
219 
220 void
221 ionic_lif_get_hw_stats(struct ionic_lif *lif, struct ionic_lif_stats *stats)
222 {
223 	uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t);
224 	uint64_t *stats64 = (uint64_t *)stats;
225 	uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats;
226 	uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base;
227 
228 	for (i = 0; i < count; i++)
229 		stats64[i] = lif_stats64[i] - lif_stats64_base[i];
230 }
231 
232 void
233 ionic_lif_reset_hw_stats(struct ionic_lif *lif)
234 {
235 	uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t);
236 	uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats;
237 	uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base;
238 
239 	for (i = 0; i < count; i++)
240 		lif_stats64_base[i] = lif_stats64[i];
241 }
242 
243 static int
244 ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr)
245 {
246 	struct ionic_admin_ctx ctx = {
247 		.pending_work = true,
248 		.cmd.rx_filter_add = {
249 			.opcode = IONIC_CMD_RX_FILTER_ADD,
250 			.match = rte_cpu_to_le_16(IONIC_RX_FILTER_MATCH_MAC),
251 		},
252 	};
253 	int err;
254 
255 	memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, RTE_ETHER_ADDR_LEN);
256 
257 	err = ionic_adminq_post_wait(lif, &ctx);
258 	if (err)
259 		return err;
260 
261 	IONIC_PRINT(INFO, "rx_filter add (id %d)",
262 		rte_le_to_cpu_32(ctx.comp.rx_filter_add.filter_id));
263 
264 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx);
265 }
266 
267 static int
268 ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr)
269 {
270 	struct ionic_admin_ctx ctx = {
271 		.pending_work = true,
272 		.cmd.rx_filter_del = {
273 			.opcode = IONIC_CMD_RX_FILTER_DEL,
274 		},
275 	};
276 	struct ionic_rx_filter *f;
277 	int err;
278 
279 	IONIC_PRINT_CALL();
280 
281 	rte_spinlock_lock(&lif->rx_filters.lock);
282 
283 	f = ionic_rx_filter_by_addr(lif, addr);
284 	if (!f) {
285 		rte_spinlock_unlock(&lif->rx_filters.lock);
286 		return -ENOENT;
287 	}
288 
289 	ctx.cmd.rx_filter_del.filter_id = rte_cpu_to_le_32(f->filter_id);
290 	ionic_rx_filter_free(f);
291 
292 	rte_spinlock_unlock(&lif->rx_filters.lock);
293 
294 	err = ionic_adminq_post_wait(lif, &ctx);
295 	if (err)
296 		return err;
297 
298 	IONIC_PRINT(INFO, "rx_filter del (id %d)",
299 		rte_le_to_cpu_32(ctx.cmd.rx_filter_del.filter_id));
300 
301 	return 0;
302 }
303 
304 int
305 ionic_dev_add_mac(struct rte_eth_dev *eth_dev,
306 		struct rte_ether_addr *mac_addr,
307 		uint32_t index __rte_unused, uint32_t pool __rte_unused)
308 {
309 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
310 
311 	IONIC_PRINT_CALL();
312 
313 	return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr);
314 }
315 
316 void
317 ionic_dev_remove_mac(struct rte_eth_dev *eth_dev, uint32_t index)
318 {
319 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
320 	struct ionic_adapter *adapter = lif->adapter;
321 	struct rte_ether_addr *mac_addr;
322 
323 	IONIC_PRINT_CALL();
324 
325 	if (index >= adapter->max_mac_addrs) {
326 		IONIC_PRINT(WARNING,
327 			"Index %u is above MAC filter limit %u",
328 			index, adapter->max_mac_addrs);
329 		return;
330 	}
331 
332 	mac_addr = &eth_dev->data->mac_addrs[index];
333 
334 	if (!rte_is_valid_assigned_ether_addr(mac_addr))
335 		return;
336 
337 	ionic_lif_addr_del(lif, (const uint8_t *)mac_addr);
338 }
339 
340 int
341 ionic_dev_set_mac(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr)
342 {
343 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
344 
345 	IONIC_PRINT_CALL();
346 
347 	if (mac_addr == NULL) {
348 		IONIC_PRINT(NOTICE, "New mac is null");
349 		return -1;
350 	}
351 
352 	if (!rte_is_zero_ether_addr((struct rte_ether_addr *)lif->mac_addr)) {
353 		IONIC_PRINT(INFO, "Deleting mac addr %pM",
354 			lif->mac_addr);
355 		ionic_lif_addr_del(lif, lif->mac_addr);
356 		memset(lif->mac_addr, 0, RTE_ETHER_ADDR_LEN);
357 	}
358 
359 	IONIC_PRINT(INFO, "Updating mac addr");
360 
361 	rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)lif->mac_addr);
362 
363 	return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr);
364 }
365 
366 static int
367 ionic_vlan_rx_add_vid(struct ionic_lif *lif, uint16_t vid)
368 {
369 	struct ionic_admin_ctx ctx = {
370 		.pending_work = true,
371 		.cmd.rx_filter_add = {
372 			.opcode = IONIC_CMD_RX_FILTER_ADD,
373 			.match = rte_cpu_to_le_16(IONIC_RX_FILTER_MATCH_VLAN),
374 			.vlan.vlan = rte_cpu_to_le_16(vid),
375 		},
376 	};
377 	int err;
378 
379 	err = ionic_adminq_post_wait(lif, &ctx);
380 	if (err)
381 		return err;
382 
383 	IONIC_PRINT(INFO, "rx_filter add VLAN %d (id %d)", vid,
384 		rte_le_to_cpu_32(ctx.comp.rx_filter_add.filter_id));
385 
386 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx);
387 }
388 
389 static int
390 ionic_vlan_rx_kill_vid(struct ionic_lif *lif, uint16_t vid)
391 {
392 	struct ionic_admin_ctx ctx = {
393 		.pending_work = true,
394 		.cmd.rx_filter_del = {
395 			.opcode = IONIC_CMD_RX_FILTER_DEL,
396 		},
397 	};
398 	struct ionic_rx_filter *f;
399 	int err;
400 
401 	IONIC_PRINT_CALL();
402 
403 	rte_spinlock_lock(&lif->rx_filters.lock);
404 
405 	f = ionic_rx_filter_by_vlan(lif, vid);
406 	if (!f) {
407 		rte_spinlock_unlock(&lif->rx_filters.lock);
408 		return -ENOENT;
409 	}
410 
411 	ctx.cmd.rx_filter_del.filter_id = rte_cpu_to_le_32(f->filter_id);
412 	ionic_rx_filter_free(f);
413 	rte_spinlock_unlock(&lif->rx_filters.lock);
414 
415 	err = ionic_adminq_post_wait(lif, &ctx);
416 	if (err)
417 		return err;
418 
419 	IONIC_PRINT(INFO, "rx_filter del VLAN %d (id %d)", vid,
420 		rte_le_to_cpu_32(ctx.cmd.rx_filter_del.filter_id));
421 
422 	return 0;
423 }
424 
425 int
426 ionic_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id,
427 		int on)
428 {
429 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
430 	int err;
431 
432 	if (on)
433 		err = ionic_vlan_rx_add_vid(lif, vlan_id);
434 	else
435 		err = ionic_vlan_rx_kill_vid(lif, vlan_id);
436 
437 	return err;
438 }
439 
440 static void
441 ionic_lif_rx_mode(struct ionic_lif *lif, uint32_t rx_mode)
442 {
443 	struct ionic_admin_ctx ctx = {
444 		.pending_work = true,
445 		.cmd.rx_mode_set = {
446 			.opcode = IONIC_CMD_RX_MODE_SET,
447 			.rx_mode = rte_cpu_to_le_16(rx_mode),
448 		},
449 	};
450 	int err;
451 
452 	if (rx_mode & IONIC_RX_MODE_F_UNICAST)
453 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_UNICAST");
454 	if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
455 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_MULTICAST");
456 	if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
457 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_BROADCAST");
458 	if (rx_mode & IONIC_RX_MODE_F_PROMISC)
459 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_PROMISC");
460 	if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
461 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_ALLMULTI");
462 
463 	err = ionic_adminq_post_wait(lif, &ctx);
464 	if (err)
465 		IONIC_PRINT(ERR, "Failure setting RX mode");
466 }
467 
468 static void
469 ionic_set_rx_mode(struct ionic_lif *lif, uint32_t rx_mode)
470 {
471 	if (lif->rx_mode != rx_mode) {
472 		lif->rx_mode = rx_mode;
473 		ionic_lif_rx_mode(lif, rx_mode);
474 	}
475 }
476 
477 int
478 ionic_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
479 {
480 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
481 	uint32_t rx_mode = lif->rx_mode;
482 
483 	IONIC_PRINT_CALL();
484 
485 	rx_mode |= IONIC_RX_MODE_F_PROMISC;
486 
487 	ionic_set_rx_mode(lif, rx_mode);
488 
489 	return 0;
490 }
491 
492 int
493 ionic_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
494 {
495 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
496 	uint32_t rx_mode = lif->rx_mode;
497 
498 	rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
499 
500 	ionic_set_rx_mode(lif, rx_mode);
501 
502 	return 0;
503 }
504 
505 int
506 ionic_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
507 {
508 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
509 	uint32_t rx_mode = lif->rx_mode;
510 
511 	rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
512 
513 	ionic_set_rx_mode(lif, rx_mode);
514 
515 	return 0;
516 }
517 
518 int
519 ionic_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
520 {
521 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
522 	uint32_t rx_mode = lif->rx_mode;
523 
524 	rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
525 
526 	ionic_set_rx_mode(lif, rx_mode);
527 
528 	return 0;
529 }
530 
531 int
532 ionic_lif_change_mtu(struct ionic_lif *lif, uint32_t new_mtu)
533 {
534 	struct ionic_admin_ctx ctx = {
535 		.pending_work = true,
536 		.cmd.lif_setattr = {
537 			.opcode = IONIC_CMD_LIF_SETATTR,
538 			.attr = IONIC_LIF_ATTR_MTU,
539 			.mtu = rte_cpu_to_le_32(new_mtu),
540 		},
541 	};
542 
543 	return ionic_adminq_post_wait(lif, &ctx);
544 }
545 
546 int
547 ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
548 {
549 	struct ionic_adapter *adapter = lif->adapter;
550 	struct ionic_dev *idev = &adapter->idev;
551 	unsigned long index;
552 
553 	/*
554 	 * Note: interrupt handler is called for index = 0 only
555 	 * (we use interrupts for the notifyq only anyway,
556 	 * which has index = 0)
557 	 */
558 
559 	for (index = 0; index < adapter->nintrs; index++)
560 		if (!adapter->intrs[index])
561 			break;
562 
563 	if (index == adapter->nintrs)
564 		return -ENOSPC;
565 
566 	adapter->intrs[index] = true;
567 
568 	ionic_intr_init(idev, intr, index);
569 
570 	return 0;
571 }
572 
573 static int
574 ionic_qcq_alloc(struct ionic_lif *lif,
575 		uint8_t type,
576 		size_t struct_size,
577 		uint32_t socket_id,
578 		uint32_t index,
579 		const char *type_name,
580 		uint16_t flags,
581 		uint16_t num_descs,
582 		uint16_t num_segs,
583 		uint16_t desc_size,
584 		uint16_t cq_desc_size,
585 		uint16_t sg_desc_size,
586 		struct ionic_qcq **qcq)
587 {
588 	struct ionic_qcq *new;
589 	uint32_t q_size, cq_size, sg_size, total_size;
590 	void *q_base, *cmb_q_base, *cq_base, *sg_base;
591 	rte_iova_t q_base_pa = 0;
592 	rte_iova_t cq_base_pa = 0;
593 	rte_iova_t sg_base_pa = 0;
594 	rte_iova_t cmb_q_base_pa = 0;
595 	size_t page_size = rte_mem_page_size();
596 	int err;
597 
598 	*qcq = NULL;
599 
600 	q_size  = num_descs * desc_size;
601 	cq_size = num_descs * cq_desc_size;
602 	sg_size = num_descs * sg_desc_size;
603 
604 	total_size = RTE_ALIGN(q_size, page_size) +
605 			RTE_ALIGN(cq_size, page_size);
606 	/*
607 	 * Note: aligning q_size/cq_size is not enough due to cq_base address
608 	 * aligning as q_base could be not aligned to the page.
609 	 * Adding page_size.
610 	 */
611 	total_size += page_size;
612 
613 	if (flags & IONIC_QCQ_F_SG) {
614 		total_size += RTE_ALIGN(sg_size, page_size);
615 		total_size += page_size;
616 	}
617 
618 	new = rte_zmalloc_socket("ionic", struct_size,
619 				RTE_CACHE_LINE_SIZE, socket_id);
620 	if (!new) {
621 		IONIC_PRINT(ERR, "Cannot allocate queue structure");
622 		return -ENOMEM;
623 	}
624 
625 	new->lif = lif;
626 
627 	/* Most queue types will store 1 ptr per descriptor */
628 	new->q.info = rte_calloc_socket("ionic",
629 				(uint64_t)num_descs * num_segs,
630 				sizeof(void *), page_size, socket_id);
631 	if (!new->q.info) {
632 		IONIC_PRINT(ERR, "Cannot allocate queue info");
633 		err = -ENOMEM;
634 		goto err_out_free_qcq;
635 	}
636 
637 	new->q.num_segs = num_segs;
638 	new->q.type = type;
639 
640 	err = ionic_q_init(&new->q, index, num_descs);
641 	if (err) {
642 		IONIC_PRINT(ERR, "Queue initialization failed");
643 		goto err_out_free_info;
644 	}
645 
646 	err = ionic_cq_init(&new->cq, num_descs);
647 	if (err) {
648 		IONIC_PRINT(ERR, "Completion queue initialization failed");
649 		goto err_out_free_info;
650 	}
651 
652 	new->base_z = rte_eth_dma_zone_reserve(lif->eth_dev,
653 		type_name, index /* queue_idx */,
654 		total_size, IONIC_ALIGN, socket_id);
655 
656 	if (!new->base_z) {
657 		IONIC_PRINT(ERR, "Cannot reserve queue DMA memory");
658 		err = -ENOMEM;
659 		goto err_out_free_info;
660 	}
661 
662 	new->base = new->base_z->addr;
663 	new->base_pa = new->base_z->iova;
664 
665 	q_base = new->base;
666 	q_base_pa = new->base_pa;
667 
668 	cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size, page_size);
669 	cq_base_pa = RTE_ALIGN(q_base_pa + q_size, page_size);
670 
671 	if (flags & IONIC_QCQ_F_SG) {
672 		sg_base = (void *)RTE_ALIGN((uintptr_t)cq_base + cq_size,
673 				page_size);
674 		sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size, page_size);
675 		ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
676 	}
677 
678 	if (flags & IONIC_QCQ_F_CMB) {
679 		/* alloc descriptor ring from nic memory */
680 		if (lif->adapter->cmb_offset + q_size >
681 				lif->adapter->bars.bar[2].len) {
682 			IONIC_PRINT(ERR, "Cannot reserve queue from NIC mem");
683 			return -ENOMEM;
684 		}
685 		cmb_q_base = (void *)
686 			((uintptr_t)lif->adapter->bars.bar[2].vaddr +
687 			 (uintptr_t)lif->adapter->cmb_offset);
688 		/* CMB PA is a relative address */
689 		cmb_q_base_pa = lif->adapter->cmb_offset;
690 		lif->adapter->cmb_offset += q_size;
691 	} else {
692 		cmb_q_base = NULL;
693 		cmb_q_base_pa = 0;
694 	}
695 
696 	IONIC_PRINT(DEBUG, "Q-Base-PA = %#jx CQ-Base-PA = %#jx "
697 		"SG-base-PA = %#jx",
698 		q_base_pa, cq_base_pa, sg_base_pa);
699 
700 	ionic_q_map(&new->q, q_base, q_base_pa, cmb_q_base, cmb_q_base_pa);
701 	ionic_cq_map(&new->cq, cq_base, cq_base_pa);
702 
703 	*qcq = new;
704 
705 	return 0;
706 
707 err_out_free_info:
708 	rte_free(new->q.info);
709 err_out_free_qcq:
710 	rte_free(new);
711 
712 	return err;
713 }
714 
715 void
716 ionic_qcq_free(struct ionic_qcq *qcq)
717 {
718 	if (qcq->base_z) {
719 		qcq->base = NULL;
720 		qcq->base_pa = 0;
721 		rte_memzone_free(qcq->base_z);
722 		qcq->base_z = NULL;
723 	}
724 
725 	if (qcq->q.info) {
726 		rte_free(qcq->q.info);
727 		qcq->q.info = NULL;
728 	}
729 
730 	rte_free(qcq);
731 }
732 
733 static uint64_t
734 ionic_rx_rearm_data(struct ionic_lif *lif)
735 {
736 	struct rte_mbuf rxm;
737 
738 	memset(&rxm, 0, sizeof(rxm));
739 
740 	rte_mbuf_refcnt_set(&rxm, 1);
741 	rxm.data_off = RTE_PKTMBUF_HEADROOM;
742 	rxm.nb_segs = 1;
743 	rxm.port = lif->port_id;
744 
745 	rte_compiler_barrier();
746 
747 	RTE_BUILD_BUG_ON(sizeof(rxm.rearm_data[0]) != sizeof(uint64_t));
748 	return rxm.rearm_data[0];
749 }
750 
751 static uint64_t
752 ionic_rx_seg_rearm_data(struct ionic_lif *lif)
753 {
754 	struct rte_mbuf rxm;
755 
756 	memset(&rxm, 0, sizeof(rxm));
757 
758 	rte_mbuf_refcnt_set(&rxm, 1);
759 	rxm.data_off = 0;  /* no headroom */
760 	rxm.nb_segs = 1;
761 	rxm.port = lif->port_id;
762 
763 	rte_compiler_barrier();
764 
765 	RTE_BUILD_BUG_ON(sizeof(rxm.rearm_data[0]) != sizeof(uint64_t));
766 	return rxm.rearm_data[0];
767 }
768 
769 int
770 ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
771 		uint16_t nrxq_descs, struct rte_mempool *mb_pool,
772 		struct ionic_rx_qcq **rxq_out)
773 {
774 	struct ionic_rx_qcq *rxq;
775 	uint16_t flags = 0, seg_size, hdr_seg_size, max_segs, max_segs_fw = 1;
776 	uint32_t max_mtu;
777 	int err;
778 
779 	if (lif->state & IONIC_LIF_F_Q_IN_CMB)
780 		flags |= IONIC_QCQ_F_CMB;
781 
782 	seg_size = rte_pktmbuf_data_room_size(mb_pool);
783 
784 	/* The first mbuf needs to leave headroom */
785 	hdr_seg_size = seg_size - RTE_PKTMBUF_HEADROOM;
786 
787 	max_mtu = rte_le_to_cpu_32(lif->adapter->ident.lif.eth.max_mtu);
788 
789 	/* If mbufs are too small to hold received packets, enable SG */
790 	if (max_mtu > hdr_seg_size &&
791 	    !(lif->features & IONIC_ETH_HW_RX_SG)) {
792 		IONIC_PRINT(NOTICE, "Enabling RX_OFFLOAD_SCATTER");
793 		lif->eth_dev->data->dev_conf.rxmode.offloads |=
794 			RTE_ETH_RX_OFFLOAD_SCATTER;
795 		ionic_lif_configure_rx_sg_offload(lif);
796 	}
797 
798 	if (lif->features & IONIC_ETH_HW_RX_SG) {
799 		flags |= IONIC_QCQ_F_SG;
800 		max_segs_fw = IONIC_RX_MAX_SG_ELEMS + 1;
801 	}
802 
803 	/*
804 	 * Calculate how many fragment pointers might be stored in queue.
805 	 * This is the worst-case number, so that there's enough room in
806 	 * the info array.
807 	 */
808 	max_segs = 1 + (max_mtu + RTE_PKTMBUF_HEADROOM - 1) / seg_size;
809 
810 	IONIC_PRINT(DEBUG, "rxq %u max_mtu %u seg_size %u max_segs %u",
811 		index, max_mtu, seg_size, max_segs);
812 	if (max_segs > max_segs_fw) {
813 		IONIC_PRINT(ERR, "Rx mbuf size insufficient (%d > %d avail)",
814 			max_segs, max_segs_fw);
815 		return -EINVAL;
816 	}
817 
818 	err = ionic_qcq_alloc(lif,
819 		IONIC_QTYPE_RXQ,
820 		sizeof(struct ionic_rx_qcq),
821 		socket_id,
822 		index,
823 		"rx",
824 		flags,
825 		nrxq_descs,
826 		max_segs,
827 		sizeof(struct ionic_rxq_desc),
828 		sizeof(struct ionic_rxq_comp),
829 		sizeof(struct ionic_rxq_sg_desc),
830 		(struct ionic_qcq **)&rxq);
831 	if (err)
832 		return err;
833 
834 	rxq->flags = flags;
835 	rxq->seg_size = seg_size;
836 	rxq->hdr_seg_size = hdr_seg_size;
837 	rxq->rearm_data = ionic_rx_rearm_data(lif);
838 	rxq->rearm_seg_data = ionic_rx_seg_rearm_data(lif);
839 
840 	lif->rxqcqs[index] = rxq;
841 	*rxq_out = rxq;
842 
843 	return 0;
844 }
845 
846 int
847 ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
848 		uint16_t ntxq_descs, struct ionic_tx_qcq **txq_out)
849 {
850 	struct ionic_tx_qcq *txq;
851 	uint16_t flags = 0, num_segs_fw = 1;
852 	int err;
853 
854 	if (lif->features & IONIC_ETH_HW_TX_SG) {
855 		flags |= IONIC_QCQ_F_SG;
856 		num_segs_fw = IONIC_TX_MAX_SG_ELEMS_V1 + 1;
857 	}
858 	if (lif->state & IONIC_LIF_F_Q_IN_CMB)
859 		flags |= IONIC_QCQ_F_CMB;
860 
861 	IONIC_PRINT(DEBUG, "txq %u num_segs %u", index, num_segs_fw);
862 
863 	err = ionic_qcq_alloc(lif,
864 		IONIC_QTYPE_TXQ,
865 		sizeof(struct ionic_tx_qcq),
866 		socket_id,
867 		index,
868 		"tx",
869 		flags,
870 		ntxq_descs,
871 		num_segs_fw,
872 		sizeof(struct ionic_txq_desc),
873 		sizeof(struct ionic_txq_comp),
874 		sizeof(struct ionic_txq_sg_desc_v1),
875 		(struct ionic_qcq **)&txq);
876 	if (err)
877 		return err;
878 
879 	txq->flags = flags;
880 	txq->num_segs_fw = num_segs_fw;
881 
882 	lif->txqcqs[index] = txq;
883 	*txq_out = txq;
884 
885 	return 0;
886 }
887 
888 static int
889 ionic_admin_qcq_alloc(struct ionic_lif *lif)
890 {
891 	uint16_t flags = 0;
892 	int err;
893 
894 	err = ionic_qcq_alloc(lif,
895 		IONIC_QTYPE_ADMINQ,
896 		sizeof(struct ionic_admin_qcq),
897 		rte_socket_id(),
898 		0,
899 		"admin",
900 		flags,
901 		IONIC_ADMINQ_LENGTH,
902 		1,
903 		sizeof(struct ionic_admin_cmd),
904 		sizeof(struct ionic_admin_comp),
905 		0,
906 		(struct ionic_qcq **)&lif->adminqcq);
907 	if (err)
908 		return err;
909 
910 	return 0;
911 }
912 
913 static int
914 ionic_notify_qcq_alloc(struct ionic_lif *lif)
915 {
916 	struct ionic_notify_qcq *nqcq;
917 	struct ionic_dev *idev = &lif->adapter->idev;
918 	uint16_t flags = 0;
919 	int err;
920 
921 	err = ionic_qcq_alloc(lif,
922 		IONIC_QTYPE_NOTIFYQ,
923 		sizeof(struct ionic_notify_qcq),
924 		rte_socket_id(),
925 		0,
926 		"notify",
927 		flags,
928 		IONIC_NOTIFYQ_LENGTH,
929 		1,
930 		sizeof(struct ionic_notifyq_cmd),
931 		sizeof(union ionic_notifyq_comp),
932 		0,
933 		(struct ionic_qcq **)&nqcq);
934 	if (err)
935 		return err;
936 
937 	err = ionic_intr_alloc(lif, &nqcq->intr);
938 	if (err) {
939 		ionic_qcq_free(&nqcq->qcq);
940 		return err;
941 	}
942 
943 	ionic_intr_mask_assert(idev->intr_ctrl, nqcq->intr.index,
944 		IONIC_INTR_MASK_SET);
945 
946 	lif->notifyqcq = nqcq;
947 
948 	return 0;
949 }
950 
951 static void
952 ionic_lif_queue_identify(struct ionic_lif *lif)
953 {
954 	struct ionic_adapter *adapter = lif->adapter;
955 	struct ionic_dev *idev = &adapter->idev;
956 	union ionic_q_identity *q_ident = &adapter->ident.txq;
957 	uint32_t q_words = RTE_DIM(q_ident->words);
958 	uint32_t cmd_words = RTE_DIM(idev->dev_cmd->data);
959 	uint32_t i, nwords, qtype;
960 	int err;
961 
962 	for (qtype = 0; qtype < RTE_DIM(ionic_qtype_vers); qtype++) {
963 		struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
964 
965 		/* Filter out the types this driver knows about */
966 		switch (qtype) {
967 		case IONIC_QTYPE_ADMINQ:
968 		case IONIC_QTYPE_NOTIFYQ:
969 		case IONIC_QTYPE_RXQ:
970 		case IONIC_QTYPE_TXQ:
971 			break;
972 		default:
973 			continue;
974 		}
975 
976 		memset(qti, 0, sizeof(*qti));
977 
978 		ionic_dev_cmd_queue_identify(idev, IONIC_LIF_TYPE_CLASSIC,
979 			qtype, ionic_qtype_vers[qtype]);
980 		err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
981 		if (err == -EINVAL) {
982 			IONIC_PRINT(ERR, "qtype %d not supported\n", qtype);
983 			continue;
984 		} else if (err == -EIO) {
985 			IONIC_PRINT(ERR, "q_ident failed, older FW\n");
986 			return;
987 		} else if (err) {
988 			IONIC_PRINT(ERR, "q_ident failed, qtype %d: %d\n",
989 				qtype, err);
990 			return;
991 		}
992 
993 		nwords = RTE_MIN(q_words, cmd_words);
994 		for (i = 0; i < nwords; i++)
995 			q_ident->words[i] = ioread32(&idev->dev_cmd->data[i]);
996 
997 		qti->version   = q_ident->version;
998 		qti->supported = q_ident->supported;
999 		qti->features  = rte_le_to_cpu_64(q_ident->features);
1000 		qti->desc_sz   = rte_le_to_cpu_16(q_ident->desc_sz);
1001 		qti->comp_sz   = rte_le_to_cpu_16(q_ident->comp_sz);
1002 		qti->sg_desc_sz   = rte_le_to_cpu_16(q_ident->sg_desc_sz);
1003 		qti->max_sg_elems = rte_le_to_cpu_16(q_ident->max_sg_elems);
1004 		qti->sg_desc_stride =
1005 			rte_le_to_cpu_16(q_ident->sg_desc_stride);
1006 
1007 		IONIC_PRINT(DEBUG, " qtype[%d].version = %d",
1008 			qtype, qti->version);
1009 		IONIC_PRINT(DEBUG, " qtype[%d].supported = %#x",
1010 			qtype, qti->supported);
1011 		IONIC_PRINT(DEBUG, " qtype[%d].features = %#jx",
1012 			qtype, qti->features);
1013 		IONIC_PRINT(DEBUG, " qtype[%d].desc_sz = %d",
1014 			qtype, qti->desc_sz);
1015 		IONIC_PRINT(DEBUG, " qtype[%d].comp_sz = %d",
1016 			qtype, qti->comp_sz);
1017 		IONIC_PRINT(DEBUG, " qtype[%d].sg_desc_sz = %d",
1018 			qtype, qti->sg_desc_sz);
1019 		IONIC_PRINT(DEBUG, " qtype[%d].max_sg_elems = %d",
1020 			qtype, qti->max_sg_elems);
1021 		IONIC_PRINT(DEBUG, " qtype[%d].sg_desc_stride = %d",
1022 			qtype, qti->sg_desc_stride);
1023 	}
1024 }
1025 
1026 int
1027 ionic_lif_alloc(struct ionic_lif *lif)
1028 {
1029 	struct ionic_adapter *adapter = lif->adapter;
1030 	uint32_t socket_id = rte_socket_id();
1031 	int err;
1032 
1033 	/*
1034 	 * lif->name was zeroed on allocation.
1035 	 * Copy (sizeof() - 1) bytes to ensure that it is NULL terminated.
1036 	 */
1037 	memcpy(lif->name, lif->eth_dev->data->name, sizeof(lif->name) - 1);
1038 
1039 	IONIC_PRINT(DEBUG, "LIF: %s", lif->name);
1040 
1041 	ionic_lif_queue_identify(lif);
1042 
1043 	if (lif->qtype_info[IONIC_QTYPE_TXQ].version < 1) {
1044 		IONIC_PRINT(ERR, "FW too old, please upgrade");
1045 		return -ENXIO;
1046 	}
1047 
1048 	if (adapter->q_in_cmb) {
1049 		if (adapter->bars.num_bars >= 3 &&
1050 		    lif->qtype_info[IONIC_QTYPE_RXQ].version >= 2 &&
1051 		    lif->qtype_info[IONIC_QTYPE_TXQ].version >= 3) {
1052 			IONIC_PRINT(INFO, "%s enabled on %s",
1053 				PMD_IONIC_CMB_KVARG, lif->name);
1054 			lif->state |= IONIC_LIF_F_Q_IN_CMB;
1055 		} else {
1056 			IONIC_PRINT(ERR, "%s not supported on %s, disabled",
1057 				PMD_IONIC_CMB_KVARG, lif->name);
1058 		}
1059 	}
1060 
1061 	IONIC_PRINT(DEBUG, "Allocating Lif Info");
1062 
1063 	rte_spinlock_init(&lif->adminq_lock);
1064 	rte_spinlock_init(&lif->adminq_service_lock);
1065 
1066 	lif->kern_dbpage = adapter->idev.db_pages;
1067 	if (!lif->kern_dbpage) {
1068 		IONIC_PRINT(ERR, "Cannot map dbpage, aborting");
1069 		return -ENOMEM;
1070 	}
1071 
1072 	lif->txqcqs = rte_calloc_socket("ionic",
1073 				adapter->max_ntxqs_per_lif,
1074 				sizeof(*lif->txqcqs),
1075 				RTE_CACHE_LINE_SIZE, socket_id);
1076 	if (!lif->txqcqs) {
1077 		IONIC_PRINT(ERR, "Cannot allocate tx queues array");
1078 		return -ENOMEM;
1079 	}
1080 
1081 	lif->rxqcqs = rte_calloc_socket("ionic",
1082 				adapter->max_nrxqs_per_lif,
1083 				sizeof(*lif->rxqcqs),
1084 				RTE_CACHE_LINE_SIZE, socket_id);
1085 	if (!lif->rxqcqs) {
1086 		IONIC_PRINT(ERR, "Cannot allocate rx queues array");
1087 		return -ENOMEM;
1088 	}
1089 
1090 	IONIC_PRINT(DEBUG, "Allocating Notify Queue");
1091 
1092 	err = ionic_notify_qcq_alloc(lif);
1093 	if (err) {
1094 		IONIC_PRINT(ERR, "Cannot allocate notify queue");
1095 		return err;
1096 	}
1097 
1098 	IONIC_PRINT(DEBUG, "Allocating Admin Queue");
1099 
1100 	err = ionic_admin_qcq_alloc(lif);
1101 	if (err) {
1102 		IONIC_PRINT(ERR, "Cannot allocate admin queue");
1103 		return err;
1104 	}
1105 
1106 	IONIC_PRINT(DEBUG, "Allocating Lif Info");
1107 
1108 	lif->info_sz = RTE_ALIGN(sizeof(*lif->info), rte_mem_page_size());
1109 
1110 	lif->info_z = rte_eth_dma_zone_reserve(lif->eth_dev,
1111 		"lif_info", 0 /* queue_idx*/,
1112 		lif->info_sz, IONIC_ALIGN, socket_id);
1113 	if (!lif->info_z) {
1114 		IONIC_PRINT(ERR, "Cannot allocate lif info memory");
1115 		return -ENOMEM;
1116 	}
1117 
1118 	lif->info = lif->info_z->addr;
1119 	lif->info_pa = lif->info_z->iova;
1120 
1121 	return 0;
1122 }
1123 
1124 void
1125 ionic_lif_free(struct ionic_lif *lif)
1126 {
1127 	if (lif->notifyqcq) {
1128 		ionic_qcq_free(&lif->notifyqcq->qcq);
1129 		lif->notifyqcq = NULL;
1130 	}
1131 
1132 	if (lif->adminqcq) {
1133 		ionic_qcq_free(&lif->adminqcq->qcq);
1134 		lif->adminqcq = NULL;
1135 	}
1136 
1137 	if (lif->txqcqs) {
1138 		rte_free(lif->txqcqs);
1139 		lif->txqcqs = NULL;
1140 	}
1141 
1142 	if (lif->rxqcqs) {
1143 		rte_free(lif->rxqcqs);
1144 		lif->rxqcqs = NULL;
1145 	}
1146 
1147 	if (lif->info) {
1148 		rte_memzone_free(lif->info_z);
1149 		lif->info = NULL;
1150 	}
1151 }
1152 
1153 void
1154 ionic_lif_free_queues(struct ionic_lif *lif)
1155 {
1156 	uint32_t i;
1157 
1158 	for (i = 0; i < lif->ntxqcqs; i++) {
1159 		ionic_dev_tx_queue_release(lif->eth_dev, i);
1160 		lif->eth_dev->data->tx_queues[i] = NULL;
1161 	}
1162 	for (i = 0; i < lif->nrxqcqs; i++) {
1163 		ionic_dev_rx_queue_release(lif->eth_dev, i);
1164 		lif->eth_dev->data->rx_queues[i] = NULL;
1165 	}
1166 }
1167 
1168 int
1169 ionic_lif_rss_config(struct ionic_lif *lif,
1170 		const uint16_t types, const uint8_t *key, const uint32_t *indir)
1171 {
1172 	struct ionic_adapter *adapter = lif->adapter;
1173 	struct ionic_admin_ctx ctx = {
1174 		.pending_work = true,
1175 		.cmd.lif_setattr = {
1176 			.opcode = IONIC_CMD_LIF_SETATTR,
1177 			.attr = IONIC_LIF_ATTR_RSS,
1178 			.rss.types = rte_cpu_to_le_16(types),
1179 			.rss.addr = rte_cpu_to_le_64(lif->rss_ind_tbl_pa),
1180 		},
1181 	};
1182 	unsigned int i;
1183 	uint16_t tbl_sz =
1184 		rte_le_to_cpu_16(adapter->ident.lif.eth.rss_ind_tbl_sz);
1185 
1186 	IONIC_PRINT_CALL();
1187 
1188 	lif->rss_types = types;
1189 
1190 	if (key)
1191 		memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1192 
1193 	if (indir)
1194 		for (i = 0; i < tbl_sz; i++)
1195 			lif->rss_ind_tbl[i] = indir[i];
1196 
1197 	memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1198 	       IONIC_RSS_HASH_KEY_SIZE);
1199 
1200 	return ionic_adminq_post_wait(lif, &ctx);
1201 }
1202 
1203 static int
1204 ionic_lif_rss_setup(struct ionic_lif *lif)
1205 {
1206 	struct ionic_adapter *adapter = lif->adapter;
1207 	static const uint8_t toeplitz_symmetric_key[] = {
1208 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1209 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1210 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1211 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1212 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1213 	};
1214 	uint32_t i;
1215 	uint16_t tbl_sz =
1216 		rte_le_to_cpu_16(adapter->ident.lif.eth.rss_ind_tbl_sz);
1217 
1218 	IONIC_PRINT_CALL();
1219 
1220 	if (!lif->rss_ind_tbl_z) {
1221 		lif->rss_ind_tbl_z = rte_eth_dma_zone_reserve(lif->eth_dev,
1222 					"rss_ind_tbl", 0 /* queue_idx */,
1223 					sizeof(*lif->rss_ind_tbl) * tbl_sz,
1224 					IONIC_ALIGN, rte_socket_id());
1225 		if (!lif->rss_ind_tbl_z) {
1226 			IONIC_PRINT(ERR, "OOM");
1227 			return -ENOMEM;
1228 		}
1229 
1230 		lif->rss_ind_tbl = lif->rss_ind_tbl_z->addr;
1231 		lif->rss_ind_tbl_pa = lif->rss_ind_tbl_z->iova;
1232 	}
1233 
1234 	if (lif->rss_ind_tbl_nrxqcqs != lif->nrxqcqs) {
1235 		lif->rss_ind_tbl_nrxqcqs = lif->nrxqcqs;
1236 
1237 		/* Fill indirection table with 'default' values */
1238 		for (i = 0; i < tbl_sz; i++)
1239 			lif->rss_ind_tbl[i] = i % lif->nrxqcqs;
1240 	}
1241 
1242 	return ionic_lif_rss_config(lif, IONIC_RSS_OFFLOAD_ALL,
1243 			toeplitz_symmetric_key, NULL);
1244 }
1245 
1246 static void
1247 ionic_lif_rss_teardown(struct ionic_lif *lif)
1248 {
1249 	if (lif->rss_ind_tbl) {
1250 		lif->rss_ind_tbl = NULL;
1251 		lif->rss_ind_tbl_pa = 0;
1252 		rte_memzone_free(lif->rss_ind_tbl_z);
1253 		lif->rss_ind_tbl_z = NULL;
1254 	}
1255 }
1256 
1257 void
1258 ionic_lif_txq_deinit_nowait(struct ionic_tx_qcq *txq)
1259 {
1260 	ionic_qcq_disable_nowait(&txq->qcq, &txq->admin_ctx);
1261 
1262 	txq->flags &= ~IONIC_QCQ_F_INITED;
1263 }
1264 
1265 void
1266 ionic_lif_txq_stats(struct ionic_tx_qcq *txq)
1267 {
1268 	struct ionic_tx_stats *stats = &txq->stats;
1269 
1270 	IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju",
1271 		txq->qcq.q.index, stats->packets, stats->tso);
1272 	IONIC_PRINT(DEBUG, "TX queue %u comps %ju (%ju per)",
1273 		txq->qcq.q.index, stats->comps,
1274 		stats->comps ? stats->packets / stats->comps : 0);
1275 }
1276 
1277 void
1278 ionic_lif_rxq_deinit_nowait(struct ionic_rx_qcq *rxq)
1279 {
1280 	ionic_qcq_disable_nowait(&rxq->qcq, &rxq->admin_ctx);
1281 
1282 	rxq->flags &= ~IONIC_QCQ_F_INITED;
1283 }
1284 
1285 void
1286 ionic_lif_rxq_stats(struct ionic_rx_qcq *rxq)
1287 {
1288 	struct ionic_rx_stats *stats = &rxq->stats;
1289 
1290 	IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju",
1291 		rxq->qcq.q.index, stats->packets, stats->mtods);
1292 }
1293 
1294 static void
1295 ionic_lif_adminq_deinit(struct ionic_lif *lif)
1296 {
1297 	lif->adminqcq->flags &= ~IONIC_QCQ_F_INITED;
1298 }
1299 
1300 static void
1301 ionic_lif_notifyq_deinit(struct ionic_lif *lif)
1302 {
1303 	struct ionic_notify_qcq *nqcq = lif->notifyqcq;
1304 	struct ionic_dev *idev = &lif->adapter->idev;
1305 
1306 	if (!(nqcq->flags & IONIC_QCQ_F_INITED))
1307 		return;
1308 
1309 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1310 		IONIC_INTR_MASK_SET);
1311 
1312 	nqcq->flags &= ~IONIC_QCQ_F_INITED;
1313 }
1314 
1315 /* This acts like ionic_napi */
1316 int
1317 ionic_qcq_service(struct ionic_qcq *qcq, int budget, ionic_cq_cb cb,
1318 		void *cb_arg)
1319 {
1320 	struct ionic_cq *cq = &qcq->cq;
1321 	uint32_t work_done;
1322 
1323 	work_done = ionic_cq_service(cq, budget, cb, cb_arg);
1324 
1325 	return work_done;
1326 }
1327 
1328 static void
1329 ionic_link_status_check(struct ionic_lif *lif)
1330 {
1331 	struct ionic_adapter *adapter = lif->adapter;
1332 	bool link_up;
1333 
1334 	lif->state &= ~IONIC_LIF_F_LINK_CHECK_NEEDED;
1335 
1336 	if (!lif->info)
1337 		return;
1338 
1339 	link_up = (lif->info->status.link_status == IONIC_PORT_OPER_STATUS_UP);
1340 
1341 	if ((link_up  && adapter->link_up) ||
1342 	    (!link_up && !adapter->link_up))
1343 		return;
1344 
1345 	if (link_up) {
1346 		adapter->link_speed =
1347 			rte_le_to_cpu_32(lif->info->status.link_speed);
1348 		IONIC_PRINT(DEBUG, "Link up - %d Gbps",
1349 			adapter->link_speed);
1350 	} else {
1351 		IONIC_PRINT(DEBUG, "Link down");
1352 	}
1353 
1354 	adapter->link_up = link_up;
1355 	ionic_dev_link_update(lif->eth_dev, 0);
1356 }
1357 
1358 static void
1359 ionic_lif_handle_fw_down(struct ionic_lif *lif)
1360 {
1361 	if (lif->state & IONIC_LIF_F_FW_RESET)
1362 		return;
1363 
1364 	lif->state |= IONIC_LIF_F_FW_RESET;
1365 
1366 	if (lif->state & IONIC_LIF_F_UP) {
1367 		IONIC_PRINT(NOTICE,
1368 			"Surprise FW stop, stopping %s\n", lif->name);
1369 		ionic_lif_stop(lif);
1370 	}
1371 
1372 	IONIC_PRINT(NOTICE, "FW down, %s stopped", lif->name);
1373 }
1374 
1375 static bool
1376 ionic_notifyq_cb(struct ionic_cq *cq, uint16_t cq_desc_index, void *cb_arg)
1377 {
1378 	union ionic_notifyq_comp *cq_desc_base = cq->base;
1379 	union ionic_notifyq_comp *cq_desc = &cq_desc_base[cq_desc_index];
1380 	struct ionic_lif *lif = cb_arg;
1381 
1382 	IONIC_PRINT(DEBUG, "Notifyq callback eid = %jd ecode = %d",
1383 		cq_desc->event.eid, cq_desc->event.ecode);
1384 
1385 	/* Have we run out of new completions to process? */
1386 	if (!(cq_desc->event.eid > lif->last_eid))
1387 		return false;
1388 
1389 	lif->last_eid = cq_desc->event.eid;
1390 
1391 	switch (cq_desc->event.ecode) {
1392 	case IONIC_EVENT_LINK_CHANGE:
1393 		IONIC_PRINT(DEBUG,
1394 			"Notifyq IONIC_EVENT_LINK_CHANGE %s "
1395 			"eid=%jd link_status=%d link_speed=%d",
1396 			lif->name,
1397 			cq_desc->event.eid,
1398 			cq_desc->link_change.link_status,
1399 			cq_desc->link_change.link_speed);
1400 
1401 		lif->state |= IONIC_LIF_F_LINK_CHECK_NEEDED;
1402 		break;
1403 
1404 	case IONIC_EVENT_RESET:
1405 		IONIC_PRINT(NOTICE,
1406 			"Notifyq IONIC_EVENT_RESET %s "
1407 			"eid=%jd, reset_code=%d state=%d",
1408 			lif->name,
1409 			cq_desc->event.eid,
1410 			cq_desc->reset.reset_code,
1411 			cq_desc->reset.state);
1412 		ionic_lif_handle_fw_down(lif);
1413 		break;
1414 
1415 	default:
1416 		IONIC_PRINT(WARNING, "Notifyq bad event ecode=%d eid=%jd",
1417 			cq_desc->event.ecode, cq_desc->event.eid);
1418 		break;
1419 	}
1420 
1421 	return true;
1422 }
1423 
1424 int
1425 ionic_notifyq_handler(struct ionic_lif *lif, int budget)
1426 {
1427 	struct ionic_dev *idev = &lif->adapter->idev;
1428 	struct ionic_notify_qcq *nqcq = lif->notifyqcq;
1429 	uint32_t work_done;
1430 
1431 	if (!(nqcq->flags & IONIC_QCQ_F_INITED)) {
1432 		IONIC_PRINT(DEBUG, "Notifyq not yet initialized");
1433 		return -1;
1434 	}
1435 
1436 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1437 		IONIC_INTR_MASK_SET);
1438 
1439 	work_done = ionic_qcq_service(&nqcq->qcq, budget,
1440 				ionic_notifyq_cb, lif);
1441 
1442 	if (lif->state & IONIC_LIF_F_LINK_CHECK_NEEDED)
1443 		ionic_link_status_check(lif);
1444 
1445 	ionic_intr_credits(idev->intr_ctrl, nqcq->intr.index,
1446 		work_done, IONIC_INTR_CRED_RESET_COALESCE);
1447 
1448 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1449 		IONIC_INTR_MASK_CLEAR);
1450 
1451 	return 0;
1452 }
1453 
1454 static int
1455 ionic_lif_adminq_init(struct ionic_lif *lif)
1456 {
1457 	struct ionic_dev *idev = &lif->adapter->idev;
1458 	struct ionic_admin_qcq *aqcq = lif->adminqcq;
1459 	struct ionic_queue *q = &aqcq->qcq.q;
1460 	struct ionic_q_init_comp comp;
1461 	uint32_t retries = 5;
1462 	int err;
1463 
1464 retry_adminq_init:
1465 	ionic_dev_cmd_adminq_init(idev, &aqcq->qcq);
1466 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
1467 	if (err == -EAGAIN && retries > 0) {
1468 		retries--;
1469 		rte_delay_us_block(IONIC_DEVCMD_RETRY_WAIT_US);
1470 		goto retry_adminq_init;
1471 	}
1472 	if (err)
1473 		return err;
1474 
1475 	ionic_dev_cmd_comp(idev, &comp);
1476 
1477 	q->hw_type = comp.hw_type;
1478 	q->hw_index = rte_le_to_cpu_32(comp.hw_index);
1479 	q->db = ionic_db_map(lif, q);
1480 
1481 	IONIC_PRINT(DEBUG, "adminq->hw_type %d", q->hw_type);
1482 	IONIC_PRINT(DEBUG, "adminq->hw_index %d", q->hw_index);
1483 	IONIC_PRINT(DEBUG, "adminq->db %p", q->db);
1484 
1485 	aqcq->flags |= IONIC_QCQ_F_INITED;
1486 
1487 	return 0;
1488 }
1489 
1490 static int
1491 ionic_lif_notifyq_init(struct ionic_lif *lif)
1492 {
1493 	struct ionic_dev *idev = &lif->adapter->idev;
1494 	struct ionic_notify_qcq *nqcq = lif->notifyqcq;
1495 	struct ionic_queue *q = &nqcq->qcq.q;
1496 	uint16_t flags = IONIC_QINIT_F_ENA;
1497 	int err;
1498 
1499 	struct ionic_admin_ctx ctx = {
1500 		.pending_work = true,
1501 		.cmd.q_init = {
1502 			.opcode = IONIC_CMD_Q_INIT,
1503 			.type = q->type,
1504 			.ver = lif->qtype_info[q->type].version,
1505 			.index = rte_cpu_to_le_32(q->index),
1506 			.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1507 			.ring_size = rte_log2_u32(q->num_descs),
1508 			.ring_base = rte_cpu_to_le_64(q->base_pa),
1509 		}
1510 	};
1511 
1512 	/* Only enable an interrupt if the device supports them */
1513 	if (lif->adapter->intf->configure_intr != NULL) {
1514 		flags |= IONIC_QINIT_F_IRQ;
1515 		ctx.cmd.q_init.intr_index = rte_cpu_to_le_16(nqcq->intr.index);
1516 	}
1517 	ctx.cmd.q_init.flags = rte_cpu_to_le_16(flags);
1518 
1519 	IONIC_PRINT(DEBUG, "notifyq_init.index %d", q->index);
1520 	IONIC_PRINT(DEBUG, "notifyq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1521 	IONIC_PRINT(DEBUG, "notifyq_init.ring_size %d",
1522 		ctx.cmd.q_init.ring_size);
1523 	IONIC_PRINT(DEBUG, "notifyq_init.ver %u", ctx.cmd.q_init.ver);
1524 
1525 	err = ionic_adminq_post_wait(lif, &ctx);
1526 	if (err)
1527 		return err;
1528 
1529 	q->hw_type = ctx.comp.q_init.hw_type;
1530 	q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
1531 	q->db = NULL;
1532 
1533 	IONIC_PRINT(DEBUG, "notifyq->hw_type %d", q->hw_type);
1534 	IONIC_PRINT(DEBUG, "notifyq->hw_index %d", q->hw_index);
1535 	IONIC_PRINT(DEBUG, "notifyq->db %p", q->db);
1536 
1537 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1538 		IONIC_INTR_MASK_CLEAR);
1539 
1540 	nqcq->flags |= IONIC_QCQ_F_INITED;
1541 
1542 	return 0;
1543 }
1544 
1545 int
1546 ionic_lif_set_features(struct ionic_lif *lif)
1547 {
1548 	struct ionic_admin_ctx ctx = {
1549 		.pending_work = true,
1550 		.cmd.lif_setattr = {
1551 			.opcode = IONIC_CMD_LIF_SETATTR,
1552 			.attr = IONIC_LIF_ATTR_FEATURES,
1553 			.features = rte_cpu_to_le_64(lif->features),
1554 		},
1555 	};
1556 	int err;
1557 
1558 	err = ionic_adminq_post_wait(lif, &ctx);
1559 	if (err)
1560 		return err;
1561 
1562 	lif->hw_features = rte_le_to_cpu_64(ctx.cmd.lif_setattr.features &
1563 						ctx.comp.lif_setattr.features);
1564 
1565 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1566 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_TX_TAG");
1567 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1568 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_STRIP");
1569 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1570 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_FILTER");
1571 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1572 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_HASH");
1573 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1574 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_SG");
1575 	if (lif->hw_features & IONIC_ETH_HW_RX_SG)
1576 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_SG");
1577 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1578 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_CSUM");
1579 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1580 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_CSUM");
1581 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1582 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO");
1583 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1584 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPV6");
1585 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1586 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_ECN");
1587 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1588 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE");
1589 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1590 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE_CSUM");
1591 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1592 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP4");
1593 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1594 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP6");
1595 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1596 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP");
1597 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1598 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP_CSUM");
1599 
1600 	return 0;
1601 }
1602 
1603 int
1604 ionic_lif_txq_init_nowait(struct ionic_tx_qcq *txq)
1605 {
1606 	struct ionic_qcq *qcq = &txq->qcq;
1607 	struct ionic_queue *q = &qcq->q;
1608 	struct ionic_lif *lif = qcq->lif;
1609 	struct ionic_cq *cq = &qcq->cq;
1610 	struct ionic_admin_ctx *ctx = &txq->admin_ctx;
1611 	int err;
1612 
1613 	*ctx = (struct ionic_admin_ctx) {
1614 		.pending_work = true,
1615 		.cmd.q_init = {
1616 			.opcode = IONIC_CMD_Q_INIT,
1617 			.type = q->type,
1618 			.ver = lif->qtype_info[q->type].version,
1619 			.index = rte_cpu_to_le_32(q->index),
1620 			.flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA),
1621 			.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1622 			.ring_size = rte_log2_u32(q->num_descs),
1623 			.cq_ring_base = rte_cpu_to_le_64(cq->base_pa),
1624 			.sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa),
1625 		},
1626 	};
1627 
1628 	if (txq->flags & IONIC_QCQ_F_SG)
1629 		ctx->cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG);
1630 	if (txq->flags & IONIC_QCQ_F_CMB) {
1631 		ctx->cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);
1632 		ctx->cmd.q_init.ring_base = rte_cpu_to_le_64(q->cmb_base_pa);
1633 	} else {
1634 		ctx->cmd.q_init.ring_base = rte_cpu_to_le_64(q->base_pa);
1635 	}
1636 
1637 	IONIC_PRINT(DEBUG, "txq_init.index %d", q->index);
1638 	IONIC_PRINT(DEBUG, "txq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1639 	IONIC_PRINT(DEBUG, "txq_init.ring_size %d",
1640 		ctx->cmd.q_init.ring_size);
1641 	IONIC_PRINT(DEBUG, "txq_init.ver %u", ctx->cmd.q_init.ver);
1642 
1643 	ionic_q_reset(q);
1644 	ionic_cq_reset(cq);
1645 
1646 	/* Caller responsible for calling ionic_lif_txq_init_done() */
1647 	err = ionic_adminq_post(lif, ctx);
1648 	if (err)
1649 		ctx->pending_work = false;
1650 	return err;
1651 }
1652 
1653 void
1654 ionic_lif_txq_init_done(struct ionic_tx_qcq *txq)
1655 {
1656 	struct ionic_lif *lif = txq->qcq.lif;
1657 	struct ionic_queue *q = &txq->qcq.q;
1658 	struct ionic_admin_ctx *ctx = &txq->admin_ctx;
1659 
1660 	q->hw_type = ctx->comp.q_init.hw_type;
1661 	q->hw_index = rte_le_to_cpu_32(ctx->comp.q_init.hw_index);
1662 	q->db = ionic_db_map(lif, q);
1663 
1664 	IONIC_PRINT(DEBUG, "txq->hw_type %d", q->hw_type);
1665 	IONIC_PRINT(DEBUG, "txq->hw_index %d", q->hw_index);
1666 	IONIC_PRINT(DEBUG, "txq->db %p", q->db);
1667 
1668 	txq->flags |= IONIC_QCQ_F_INITED;
1669 }
1670 
1671 int
1672 ionic_lif_rxq_init_nowait(struct ionic_rx_qcq *rxq)
1673 {
1674 	struct ionic_qcq *qcq = &rxq->qcq;
1675 	struct ionic_queue *q = &qcq->q;
1676 	struct ionic_lif *lif = qcq->lif;
1677 	struct ionic_cq *cq = &qcq->cq;
1678 	struct ionic_admin_ctx *ctx = &rxq->admin_ctx;
1679 	int err;
1680 
1681 	*ctx = (struct ionic_admin_ctx) {
1682 		.pending_work = true,
1683 		.cmd.q_init = {
1684 			.opcode = IONIC_CMD_Q_INIT,
1685 			.type = q->type,
1686 			.ver = lif->qtype_info[q->type].version,
1687 			.index = rte_cpu_to_le_32(q->index),
1688 			.flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA),
1689 			.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1690 			.ring_size = rte_log2_u32(q->num_descs),
1691 			.cq_ring_base = rte_cpu_to_le_64(cq->base_pa),
1692 			.sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa),
1693 		},
1694 	};
1695 
1696 	if (rxq->flags & IONIC_QCQ_F_SG)
1697 		ctx->cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG);
1698 	if (rxq->flags & IONIC_QCQ_F_CMB) {
1699 		ctx->cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);
1700 		ctx->cmd.q_init.ring_base = rte_cpu_to_le_64(q->cmb_base_pa);
1701 	} else {
1702 		ctx->cmd.q_init.ring_base = rte_cpu_to_le_64(q->base_pa);
1703 	}
1704 
1705 	IONIC_PRINT(DEBUG, "rxq_init.index %d", q->index);
1706 	IONIC_PRINT(DEBUG, "rxq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1707 	IONIC_PRINT(DEBUG, "rxq_init.ring_size %d",
1708 		ctx->cmd.q_init.ring_size);
1709 	IONIC_PRINT(DEBUG, "rxq_init.ver %u", ctx->cmd.q_init.ver);
1710 
1711 	ionic_q_reset(q);
1712 	ionic_cq_reset(cq);
1713 
1714 	/* Caller responsible for calling ionic_lif_rxq_init_done() */
1715 	err = ionic_adminq_post(lif, ctx);
1716 	if (err)
1717 		ctx->pending_work = false;
1718 	return err;
1719 }
1720 
1721 void
1722 ionic_lif_rxq_init_done(struct ionic_rx_qcq *rxq)
1723 {
1724 	struct ionic_lif *lif = rxq->qcq.lif;
1725 	struct ionic_queue *q = &rxq->qcq.q;
1726 	struct ionic_admin_ctx *ctx = &rxq->admin_ctx;
1727 
1728 	q->hw_type = ctx->comp.q_init.hw_type;
1729 	q->hw_index = rte_le_to_cpu_32(ctx->comp.q_init.hw_index);
1730 	q->db = ionic_db_map(lif, q);
1731 
1732 	rxq->flags |= IONIC_QCQ_F_INITED;
1733 
1734 	IONIC_PRINT(DEBUG, "rxq->hw_type %d", q->hw_type);
1735 	IONIC_PRINT(DEBUG, "rxq->hw_index %d", q->hw_index);
1736 	IONIC_PRINT(DEBUG, "rxq->db %p", q->db);
1737 }
1738 
1739 static int
1740 ionic_station_set(struct ionic_lif *lif)
1741 {
1742 	struct ionic_admin_ctx ctx = {
1743 		.pending_work = true,
1744 		.cmd.lif_getattr = {
1745 			.opcode = IONIC_CMD_LIF_GETATTR,
1746 			.attr = IONIC_LIF_ATTR_MAC,
1747 		},
1748 	};
1749 	int err;
1750 
1751 	IONIC_PRINT_CALL();
1752 
1753 	err = ionic_adminq_post_wait(lif, &ctx);
1754 	if (err)
1755 		return err;
1756 
1757 	memcpy(lif->mac_addr, ctx.comp.lif_getattr.mac, RTE_ETHER_ADDR_LEN);
1758 
1759 	return 0;
1760 }
1761 
1762 static void
1763 ionic_lif_set_name(struct ionic_lif *lif)
1764 {
1765 	struct ionic_admin_ctx ctx = {
1766 		.pending_work = true,
1767 		.cmd.lif_setattr = {
1768 			.opcode = IONIC_CMD_LIF_SETATTR,
1769 			.attr = IONIC_LIF_ATTR_NAME,
1770 		},
1771 	};
1772 
1773 	memcpy(ctx.cmd.lif_setattr.name, lif->name,
1774 		sizeof(ctx.cmd.lif_setattr.name) - 1);
1775 
1776 	ionic_adminq_post_wait(lif, &ctx);
1777 }
1778 
1779 int
1780 ionic_lif_init(struct ionic_lif *lif)
1781 {
1782 	struct ionic_dev *idev = &lif->adapter->idev;
1783 	struct ionic_lif_init_comp comp;
1784 	uint32_t retries = 5;
1785 	int err;
1786 
1787 	memset(&lif->stats_base, 0, sizeof(lif->stats_base));
1788 
1789 retry_lif_init:
1790 	ionic_dev_cmd_lif_init(idev, lif->info_pa);
1791 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
1792 	if (err == -EAGAIN && retries > 0) {
1793 		retries--;
1794 		rte_delay_us_block(IONIC_DEVCMD_RETRY_WAIT_US);
1795 		goto retry_lif_init;
1796 	}
1797 	if (err)
1798 		return err;
1799 
1800 	ionic_dev_cmd_comp(idev, &comp);
1801 
1802 	lif->hw_index = rte_cpu_to_le_16(comp.hw_index);
1803 
1804 	err = ionic_lif_adminq_init(lif);
1805 	if (err)
1806 		return err;
1807 
1808 	err = ionic_lif_notifyq_init(lif);
1809 	if (err)
1810 		goto err_out_adminq_deinit;
1811 
1812 	/*
1813 	 * Configure initial feature set
1814 	 * This will be updated later by the dev_configure() step
1815 	 */
1816 	lif->features = IONIC_ETH_HW_RX_HASH | IONIC_ETH_HW_VLAN_RX_FILTER;
1817 
1818 	err = ionic_lif_set_features(lif);
1819 	if (err)
1820 		goto err_out_notifyq_deinit;
1821 
1822 	err = ionic_rx_filters_init(lif);
1823 	if (err)
1824 		goto err_out_notifyq_deinit;
1825 
1826 	err = ionic_station_set(lif);
1827 	if (err)
1828 		goto err_out_rx_filter_deinit;
1829 
1830 	ionic_lif_set_name(lif);
1831 
1832 	lif->state |= IONIC_LIF_F_INITED;
1833 
1834 	return 0;
1835 
1836 err_out_rx_filter_deinit:
1837 	ionic_rx_filters_deinit(lif);
1838 
1839 err_out_notifyq_deinit:
1840 	ionic_lif_notifyq_deinit(lif);
1841 
1842 err_out_adminq_deinit:
1843 	ionic_lif_adminq_deinit(lif);
1844 
1845 	return err;
1846 }
1847 
1848 void
1849 ionic_lif_deinit(struct ionic_lif *lif)
1850 {
1851 	if (!(lif->state & IONIC_LIF_F_INITED))
1852 		return;
1853 
1854 	ionic_rx_filters_deinit(lif);
1855 	ionic_lif_rss_teardown(lif);
1856 	ionic_lif_notifyq_deinit(lif);
1857 	ionic_lif_adminq_deinit(lif);
1858 
1859 	lif->state &= ~IONIC_LIF_F_INITED;
1860 }
1861 
1862 void
1863 ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask)
1864 {
1865 	struct rte_eth_dev *eth_dev = lif->eth_dev;
1866 	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1867 
1868 	/*
1869 	 * IONIC_ETH_HW_VLAN_RX_FILTER cannot be turned off, so
1870 	 * set RTE_ETH_RX_OFFLOAD_VLAN_FILTER and ignore RTE_ETH_VLAN_FILTER_MASK
1871 	 */
1872 	rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
1873 
1874 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1875 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1876 			lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP;
1877 		else
1878 			lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP;
1879 	}
1880 }
1881 
1882 void
1883 ionic_lif_configure_rx_sg_offload(struct ionic_lif *lif)
1884 {
1885 	struct rte_eth_rxmode *rxmode = &lif->eth_dev->data->dev_conf.rxmode;
1886 
1887 	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
1888 		lif->features |= IONIC_ETH_HW_RX_SG;
1889 		lif->eth_dev->data->scattered_rx = 1;
1890 	} else {
1891 		lif->features &= ~IONIC_ETH_HW_RX_SG;
1892 		lif->eth_dev->data->scattered_rx = 0;
1893 	}
1894 }
1895 
1896 void
1897 ionic_lif_configure(struct ionic_lif *lif)
1898 {
1899 	struct rte_eth_rxmode *rxmode = &lif->eth_dev->data->dev_conf.rxmode;
1900 	struct rte_eth_txmode *txmode = &lif->eth_dev->data->dev_conf.txmode;
1901 	struct ionic_identity *ident = &lif->adapter->ident;
1902 	union ionic_lif_config *cfg = &ident->lif.eth.config;
1903 	uint32_t ntxqs_per_lif =
1904 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]);
1905 	uint32_t nrxqs_per_lif =
1906 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]);
1907 	uint32_t nrxqs = lif->eth_dev->data->nb_rx_queues;
1908 	uint32_t ntxqs = lif->eth_dev->data->nb_tx_queues;
1909 
1910 	lif->port_id = lif->eth_dev->data->port_id;
1911 
1912 	IONIC_PRINT(DEBUG, "Configuring LIF on port %u",
1913 		lif->port_id);
1914 
1915 	if (nrxqs > 0)
1916 		nrxqs_per_lif = RTE_MIN(nrxqs_per_lif, nrxqs);
1917 
1918 	if (ntxqs > 0)
1919 		ntxqs_per_lif = RTE_MIN(ntxqs_per_lif, ntxqs);
1920 
1921 	lif->nrxqcqs = nrxqs_per_lif;
1922 	lif->ntxqcqs = ntxqs_per_lif;
1923 
1924 	/* Update the LIF configuration based on the eth_dev */
1925 
1926 	/*
1927 	 * NB: While it is true that RSS_HASH is always enabled on ionic,
1928 	 *     setting this flag unconditionally causes problems in DTS.
1929 	 * rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1930 	 */
1931 
1932 	/* RX per-port */
1933 
1934 	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM ||
1935 	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM ||
1936 	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1937 		lif->features |= IONIC_ETH_HW_RX_CSUM;
1938 	else
1939 		lif->features &= ~IONIC_ETH_HW_RX_CSUM;
1940 
1941 	/*
1942 	 * NB: RX_SG may be enabled later during rx_queue_setup() if
1943 	 * required by the mbuf/mtu configuration
1944 	 */
1945 	ionic_lif_configure_rx_sg_offload(lif);
1946 
1947 	/* Covers VLAN_STRIP */
1948 	ionic_lif_configure_vlan_offload(lif, RTE_ETH_VLAN_STRIP_MASK);
1949 
1950 	/* TX per-port */
1951 
1952 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
1953 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
1954 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
1955 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
1956 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
1957 		lif->features |= IONIC_ETH_HW_TX_CSUM;
1958 	else
1959 		lif->features &= ~IONIC_ETH_HW_TX_CSUM;
1960 
1961 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
1962 		lif->features |= IONIC_ETH_HW_VLAN_TX_TAG;
1963 	else
1964 		lif->features &= ~IONIC_ETH_HW_VLAN_TX_TAG;
1965 
1966 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
1967 		lif->features |= IONIC_ETH_HW_TX_SG;
1968 	else
1969 		lif->features &= ~IONIC_ETH_HW_TX_SG;
1970 
1971 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
1972 		lif->features |= IONIC_ETH_HW_TSO;
1973 		lif->features |= IONIC_ETH_HW_TSO_IPV6;
1974 		lif->features |= IONIC_ETH_HW_TSO_ECN;
1975 	} else {
1976 		lif->features &= ~IONIC_ETH_HW_TSO;
1977 		lif->features &= ~IONIC_ETH_HW_TSO_IPV6;
1978 		lif->features &= ~IONIC_ETH_HW_TSO_ECN;
1979 	}
1980 }
1981 
1982 int
1983 ionic_lif_start(struct ionic_lif *lif)
1984 {
1985 	struct rte_eth_dev *dev = lif->eth_dev;
1986 	uint32_t rx_mode;
1987 	uint32_t i, j, chunk;
1988 	int err;
1989 	bool fatal = false;
1990 
1991 	err = ionic_lif_rss_setup(lif);
1992 	if (err)
1993 		return err;
1994 
1995 	if (!lif->rx_mode) {
1996 		IONIC_PRINT(DEBUG, "Setting RX mode on %s",
1997 			lif->name);
1998 
1999 		rx_mode  = IONIC_RX_MODE_F_UNICAST;
2000 		rx_mode |= IONIC_RX_MODE_F_MULTICAST;
2001 		rx_mode |= IONIC_RX_MODE_F_BROADCAST;
2002 
2003 		ionic_set_rx_mode(lif, rx_mode);
2004 	}
2005 
2006 	IONIC_PRINT(DEBUG, "Starting %u RX queues and %u TX queues "
2007 		"on port %u",
2008 		lif->nrxqcqs, lif->ntxqcqs, lif->port_id);
2009 
2010 	chunk = ionic_adminq_space_avail(lif);
2011 
2012 	for (i = 0; i < lif->nrxqcqs; i += chunk) {
2013 		if (lif->rxqcqs[0]->flags & IONIC_QCQ_F_DEFERRED) {
2014 			IONIC_PRINT(DEBUG, "Rx queue start deferred");
2015 			break;
2016 		}
2017 
2018 		for (j = 0; j < chunk && i + j < lif->nrxqcqs; j++) {
2019 			err = ionic_dev_rx_queue_start_firsthalf(dev, i + j);
2020 			if (err) {
2021 				fatal = true;
2022 				break;
2023 			}
2024 		}
2025 
2026 		for (j = 0; j < chunk && i + j < lif->nrxqcqs; j++) {
2027 			/* Commands that failed to post return immediately */
2028 			err = ionic_dev_rx_queue_start_secondhalf(dev, i + j);
2029 			if (err)
2030 				/* Don't break */
2031 				fatal = true;
2032 		}
2033 	}
2034 	if (fatal)
2035 		return -EIO;
2036 
2037 	for (i = 0; i < lif->ntxqcqs; i += chunk) {
2038 		if (lif->txqcqs[0]->flags & IONIC_QCQ_F_DEFERRED) {
2039 			IONIC_PRINT(DEBUG, "Tx queue start deferred");
2040 			break;
2041 		}
2042 
2043 		for (j = 0; j < chunk && i + j < lif->ntxqcqs; j++) {
2044 			err = ionic_dev_tx_queue_start_firsthalf(dev, i + j);
2045 			if (err) {
2046 				fatal = true;
2047 				break;
2048 			}
2049 		}
2050 
2051 		for (j = 0; j < chunk && i + j < lif->ntxqcqs; j++) {
2052 			/* Commands that failed to post return immediately */
2053 			err = ionic_dev_tx_queue_start_secondhalf(dev, i + j);
2054 			if (err)
2055 				/* Don't break */
2056 				fatal = true;
2057 		}
2058 	}
2059 	if (fatal)
2060 		return -EIO;
2061 
2062 	/* Carrier ON here */
2063 	lif->state |= IONIC_LIF_F_UP;
2064 
2065 	ionic_link_status_check(lif);
2066 
2067 	return 0;
2068 }
2069 
2070 int
2071 ionic_lif_identify(struct ionic_adapter *adapter)
2072 {
2073 	struct ionic_dev *idev = &adapter->idev;
2074 	struct ionic_identity *ident = &adapter->ident;
2075 	union ionic_lif_config *cfg = &ident->lif.eth.config;
2076 	uint32_t lif_words = RTE_DIM(ident->lif.words);
2077 	uint32_t cmd_words = RTE_DIM(idev->dev_cmd->data);
2078 	uint32_t i, nwords;
2079 	int err;
2080 
2081 	ionic_dev_cmd_lif_identify(idev, IONIC_LIF_TYPE_CLASSIC,
2082 		IONIC_IDENTITY_VERSION_1);
2083 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
2084 	if (err)
2085 		return (err);
2086 
2087 	nwords = RTE_MIN(lif_words, cmd_words);
2088 	for (i = 0; i < nwords; i++)
2089 		ident->lif.words[i] = ioread32(&idev->dev_cmd->data[i]);
2090 
2091 	IONIC_PRINT(INFO, "capabilities 0x%" PRIx64 " ",
2092 		rte_le_to_cpu_64(ident->lif.capabilities));
2093 
2094 	IONIC_PRINT(INFO, "eth.max_ucast_filters 0x%" PRIx32 " ",
2095 		rte_le_to_cpu_32(ident->lif.eth.max_ucast_filters));
2096 	IONIC_PRINT(INFO, "eth.max_mcast_filters 0x%" PRIx32 " ",
2097 		rte_le_to_cpu_32(ident->lif.eth.max_mcast_filters));
2098 
2099 	IONIC_PRINT(INFO, "eth.features 0x%" PRIx64 " ",
2100 		rte_le_to_cpu_64(cfg->features));
2101 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_ADMINQ] 0x%" PRIx32 " ",
2102 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_ADMINQ]));
2103 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] 0x%" PRIx32 " ",
2104 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_NOTIFYQ]));
2105 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_RXQ] 0x%" PRIx32 " ",
2106 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]));
2107 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_TXQ] 0x%" PRIx32 " ",
2108 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]));
2109 
2110 	return 0;
2111 }
2112 
2113 int
2114 ionic_lifs_size(struct ionic_adapter *adapter)
2115 {
2116 	struct ionic_identity *ident = &adapter->ident;
2117 	union ionic_lif_config *cfg = &ident->lif.eth.config;
2118 	uint32_t nintrs, dev_nintrs = rte_le_to_cpu_32(ident->dev.nintrs);
2119 
2120 	adapter->max_ntxqs_per_lif =
2121 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]);
2122 	adapter->max_nrxqs_per_lif =
2123 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]);
2124 
2125 	nintrs = 1 /* notifyq */;
2126 
2127 	if (nintrs > dev_nintrs) {
2128 		IONIC_PRINT(ERR,
2129 			"At most %d intr supported, minimum req'd is %u",
2130 			dev_nintrs, nintrs);
2131 		return -ENOSPC;
2132 	}
2133 
2134 	adapter->nintrs = nintrs;
2135 
2136 	return 0;
2137 }
2138