xref: /dpdk/drivers/net/ionic/ionic_lif.c (revision 09442498ef736d0a96632cf8b8c15d8ca78a6468)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2022 Advanced Micro Devices, Inc.
3  */
4 
5 #include <rte_malloc.h>
6 #include <ethdev_driver.h>
7 
8 #include "ionic.h"
9 #include "ionic_logs.h"
10 #include "ionic_lif.h"
11 #include "ionic_ethdev.h"
12 #include "ionic_rx_filter.h"
13 #include "ionic_rxtx.h"
14 
15 /* queuetype support level */
16 static const uint8_t ionic_qtype_vers[IONIC_QTYPE_MAX] = {
17 	[IONIC_QTYPE_ADMINQ]  = 0,   /* 0 = Base version with CQ support */
18 	[IONIC_QTYPE_NOTIFYQ] = 0,   /* 0 = Base version */
19 	[IONIC_QTYPE_RXQ]     = 2,   /* 0 = Base version with CQ+SG support
20 				      * 1 =       ... with EQ
21 				      * 2 =       ... with CMB
22 				      */
23 	[IONIC_QTYPE_TXQ]     = 3,   /* 0 = Base version with CQ+SG support
24 				      * 1 =   ... with Tx SG version 1
25 				      * 2 =       ... with EQ
26 				      * 3 =       ... with CMB
27 				      */
28 };
29 
30 static int ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr);
31 static int ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr);
32 
33 static int
34 ionic_qcq_disable(struct ionic_qcq *qcq)
35 {
36 	struct ionic_queue *q = &qcq->q;
37 	struct ionic_lif *lif = qcq->lif;
38 	struct ionic_admin_ctx ctx = {
39 		.pending_work = true,
40 		.cmd.q_control = {
41 			.opcode = IONIC_CMD_Q_CONTROL,
42 			.type = q->type,
43 			.index = rte_cpu_to_le_32(q->index),
44 			.oper = IONIC_Q_DISABLE,
45 		},
46 	};
47 
48 	return ionic_adminq_post_wait(lif, &ctx);
49 }
50 
51 void
52 ionic_lif_stop(struct ionic_lif *lif)
53 {
54 	uint32_t i;
55 
56 	IONIC_PRINT_CALL();
57 
58 	lif->state &= ~IONIC_LIF_F_UP;
59 
60 	for (i = 0; i < lif->nrxqcqs; i++) {
61 		struct ionic_rx_qcq *rxq = lif->rxqcqs[i];
62 		if (rxq->flags & IONIC_QCQ_F_INITED)
63 			(void)ionic_dev_rx_queue_stop(lif->eth_dev, i);
64 	}
65 
66 	for (i = 0; i < lif->ntxqcqs; i++) {
67 		struct ionic_tx_qcq *txq = lif->txqcqs[i];
68 		if (txq->flags & IONIC_QCQ_F_INITED)
69 			(void)ionic_dev_tx_queue_stop(lif->eth_dev, i);
70 	}
71 }
72 
73 void
74 ionic_lif_reset(struct ionic_lif *lif)
75 {
76 	struct ionic_dev *idev = &lif->adapter->idev;
77 	int err;
78 
79 	IONIC_PRINT_CALL();
80 
81 	ionic_dev_cmd_lif_reset(idev);
82 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
83 	if (err)
84 		IONIC_PRINT(WARNING, "Failed to reset %s", lif->name);
85 }
86 
87 static void
88 ionic_lif_get_abs_stats(const struct ionic_lif *lif, struct rte_eth_stats *stats)
89 {
90 	struct ionic_lif_stats *ls = &lif->info->stats;
91 	uint32_t i;
92 	uint32_t num_rx_q_counters = RTE_MIN(lif->nrxqcqs, (uint32_t)
93 			RTE_ETHDEV_QUEUE_STAT_CNTRS);
94 	uint32_t num_tx_q_counters = RTE_MIN(lif->ntxqcqs, (uint32_t)
95 			RTE_ETHDEV_QUEUE_STAT_CNTRS);
96 
97 	memset(stats, 0, sizeof(*stats));
98 
99 	if (ls == NULL) {
100 		IONIC_PRINT(DEBUG, "Stats on port %u not yet initialized",
101 			lif->port_id);
102 		return;
103 	}
104 
105 	/* RX */
106 
107 	stats->ipackets = ls->rx_ucast_packets +
108 		ls->rx_mcast_packets +
109 		ls->rx_bcast_packets;
110 
111 	stats->ibytes = ls->rx_ucast_bytes +
112 		ls->rx_mcast_bytes +
113 		ls->rx_bcast_bytes;
114 
115 	for (i = 0; i < lif->nrxqcqs; i++) {
116 		struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats;
117 		stats->ierrors +=
118 			rx_stats->bad_cq_status +
119 			rx_stats->bad_len;
120 	}
121 
122 	stats->imissed +=
123 		ls->rx_ucast_drop_packets +
124 		ls->rx_mcast_drop_packets +
125 		ls->rx_bcast_drop_packets;
126 
127 	stats->ierrors +=
128 		ls->rx_dma_error +
129 		ls->rx_desc_fetch_error +
130 		ls->rx_desc_data_error;
131 
132 	for (i = 0; i < num_rx_q_counters; i++) {
133 		struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats;
134 		stats->q_ipackets[i] = rx_stats->packets;
135 		stats->q_ibytes[i] = rx_stats->bytes;
136 		stats->q_errors[i] =
137 			rx_stats->bad_cq_status +
138 			rx_stats->bad_len;
139 	}
140 
141 	/* TX */
142 
143 	stats->opackets = ls->tx_ucast_packets +
144 		ls->tx_mcast_packets +
145 		ls->tx_bcast_packets;
146 
147 	stats->obytes = ls->tx_ucast_bytes +
148 		ls->tx_mcast_bytes +
149 		ls->tx_bcast_bytes;
150 
151 	for (i = 0; i < lif->ntxqcqs; i++) {
152 		struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats;
153 		stats->oerrors += tx_stats->drop;
154 	}
155 
156 	stats->oerrors +=
157 		ls->tx_ucast_drop_packets +
158 		ls->tx_mcast_drop_packets +
159 		ls->tx_bcast_drop_packets;
160 
161 	stats->oerrors +=
162 		ls->tx_dma_error +
163 		ls->tx_queue_disabled +
164 		ls->tx_desc_fetch_error +
165 		ls->tx_desc_data_error;
166 
167 	for (i = 0; i < num_tx_q_counters; i++) {
168 		struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats;
169 		stats->q_opackets[i] = tx_stats->packets;
170 		stats->q_obytes[i] = tx_stats->bytes;
171 	}
172 }
173 
174 void
175 ionic_lif_get_stats(const struct ionic_lif *lif,
176 		struct rte_eth_stats *stats)
177 {
178 	ionic_lif_get_abs_stats(lif, stats);
179 
180 	stats->ipackets  -= lif->stats_base.ipackets;
181 	stats->opackets  -= lif->stats_base.opackets;
182 	stats->ibytes    -= lif->stats_base.ibytes;
183 	stats->obytes    -= lif->stats_base.obytes;
184 	stats->imissed   -= lif->stats_base.imissed;
185 	stats->ierrors   -= lif->stats_base.ierrors;
186 	stats->oerrors   -= lif->stats_base.oerrors;
187 	stats->rx_nombuf -= lif->stats_base.rx_nombuf;
188 }
189 
190 void
191 ionic_lif_reset_stats(struct ionic_lif *lif)
192 {
193 	uint32_t i;
194 
195 	for (i = 0; i < lif->nrxqcqs; i++) {
196 		memset(&lif->rxqcqs[i]->stats, 0,
197 			sizeof(struct ionic_rx_stats));
198 		memset(&lif->txqcqs[i]->stats, 0,
199 			sizeof(struct ionic_tx_stats));
200 	}
201 
202 	ionic_lif_get_abs_stats(lif, &lif->stats_base);
203 }
204 
205 void
206 ionic_lif_get_hw_stats(struct ionic_lif *lif, struct ionic_lif_stats *stats)
207 {
208 	uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t);
209 	uint64_t *stats64 = (uint64_t *)stats;
210 	uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats;
211 	uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base;
212 
213 	for (i = 0; i < count; i++)
214 		stats64[i] = lif_stats64[i] - lif_stats64_base[i];
215 }
216 
217 void
218 ionic_lif_reset_hw_stats(struct ionic_lif *lif)
219 {
220 	uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t);
221 	uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats;
222 	uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base;
223 
224 	for (i = 0; i < count; i++)
225 		lif_stats64_base[i] = lif_stats64[i];
226 }
227 
228 static int
229 ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr)
230 {
231 	struct ionic_admin_ctx ctx = {
232 		.pending_work = true,
233 		.cmd.rx_filter_add = {
234 			.opcode = IONIC_CMD_RX_FILTER_ADD,
235 			.match = rte_cpu_to_le_16(IONIC_RX_FILTER_MATCH_MAC),
236 		},
237 	};
238 	int err;
239 
240 	memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, RTE_ETHER_ADDR_LEN);
241 
242 	err = ionic_adminq_post_wait(lif, &ctx);
243 	if (err)
244 		return err;
245 
246 	IONIC_PRINT(INFO, "rx_filter add (id %d)",
247 		rte_le_to_cpu_32(ctx.comp.rx_filter_add.filter_id));
248 
249 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx);
250 }
251 
252 static int
253 ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr)
254 {
255 	struct ionic_admin_ctx ctx = {
256 		.pending_work = true,
257 		.cmd.rx_filter_del = {
258 			.opcode = IONIC_CMD_RX_FILTER_DEL,
259 		},
260 	};
261 	struct ionic_rx_filter *f;
262 	int err;
263 
264 	IONIC_PRINT_CALL();
265 
266 	rte_spinlock_lock(&lif->rx_filters.lock);
267 
268 	f = ionic_rx_filter_by_addr(lif, addr);
269 	if (!f) {
270 		rte_spinlock_unlock(&lif->rx_filters.lock);
271 		return -ENOENT;
272 	}
273 
274 	ctx.cmd.rx_filter_del.filter_id = rte_cpu_to_le_32(f->filter_id);
275 	ionic_rx_filter_free(f);
276 
277 	rte_spinlock_unlock(&lif->rx_filters.lock);
278 
279 	err = ionic_adminq_post_wait(lif, &ctx);
280 	if (err)
281 		return err;
282 
283 	IONIC_PRINT(INFO, "rx_filter del (id %d)",
284 		rte_le_to_cpu_32(ctx.cmd.rx_filter_del.filter_id));
285 
286 	return 0;
287 }
288 
289 int
290 ionic_dev_add_mac(struct rte_eth_dev *eth_dev,
291 		struct rte_ether_addr *mac_addr,
292 		uint32_t index __rte_unused, uint32_t pool __rte_unused)
293 {
294 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
295 
296 	IONIC_PRINT_CALL();
297 
298 	return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr);
299 }
300 
301 void
302 ionic_dev_remove_mac(struct rte_eth_dev *eth_dev, uint32_t index)
303 {
304 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
305 	struct ionic_adapter *adapter = lif->adapter;
306 	struct rte_ether_addr *mac_addr;
307 
308 	IONIC_PRINT_CALL();
309 
310 	if (index >= adapter->max_mac_addrs) {
311 		IONIC_PRINT(WARNING,
312 			"Index %u is above MAC filter limit %u",
313 			index, adapter->max_mac_addrs);
314 		return;
315 	}
316 
317 	mac_addr = &eth_dev->data->mac_addrs[index];
318 
319 	if (!rte_is_valid_assigned_ether_addr(mac_addr))
320 		return;
321 
322 	ionic_lif_addr_del(lif, (const uint8_t *)mac_addr);
323 }
324 
325 int
326 ionic_dev_set_mac(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr)
327 {
328 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
329 
330 	IONIC_PRINT_CALL();
331 
332 	if (mac_addr == NULL) {
333 		IONIC_PRINT(NOTICE, "New mac is null");
334 		return -1;
335 	}
336 
337 	if (!rte_is_zero_ether_addr((struct rte_ether_addr *)lif->mac_addr)) {
338 		IONIC_PRINT(INFO, "Deleting mac addr %pM",
339 			lif->mac_addr);
340 		ionic_lif_addr_del(lif, lif->mac_addr);
341 		memset(lif->mac_addr, 0, RTE_ETHER_ADDR_LEN);
342 	}
343 
344 	IONIC_PRINT(INFO, "Updating mac addr");
345 
346 	rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)lif->mac_addr);
347 
348 	return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr);
349 }
350 
351 static int
352 ionic_vlan_rx_add_vid(struct ionic_lif *lif, uint16_t vid)
353 {
354 	struct ionic_admin_ctx ctx = {
355 		.pending_work = true,
356 		.cmd.rx_filter_add = {
357 			.opcode = IONIC_CMD_RX_FILTER_ADD,
358 			.match = rte_cpu_to_le_16(IONIC_RX_FILTER_MATCH_VLAN),
359 			.vlan.vlan = rte_cpu_to_le_16(vid),
360 		},
361 	};
362 	int err;
363 
364 	err = ionic_adminq_post_wait(lif, &ctx);
365 	if (err)
366 		return err;
367 
368 	IONIC_PRINT(INFO, "rx_filter add VLAN %d (id %d)", vid,
369 		rte_le_to_cpu_32(ctx.comp.rx_filter_add.filter_id));
370 
371 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx);
372 }
373 
374 static int
375 ionic_vlan_rx_kill_vid(struct ionic_lif *lif, uint16_t vid)
376 {
377 	struct ionic_admin_ctx ctx = {
378 		.pending_work = true,
379 		.cmd.rx_filter_del = {
380 			.opcode = IONIC_CMD_RX_FILTER_DEL,
381 		},
382 	};
383 	struct ionic_rx_filter *f;
384 	int err;
385 
386 	IONIC_PRINT_CALL();
387 
388 	rte_spinlock_lock(&lif->rx_filters.lock);
389 
390 	f = ionic_rx_filter_by_vlan(lif, vid);
391 	if (!f) {
392 		rte_spinlock_unlock(&lif->rx_filters.lock);
393 		return -ENOENT;
394 	}
395 
396 	ctx.cmd.rx_filter_del.filter_id = rte_cpu_to_le_32(f->filter_id);
397 	ionic_rx_filter_free(f);
398 	rte_spinlock_unlock(&lif->rx_filters.lock);
399 
400 	err = ionic_adminq_post_wait(lif, &ctx);
401 	if (err)
402 		return err;
403 
404 	IONIC_PRINT(INFO, "rx_filter del VLAN %d (id %d)", vid,
405 		rte_le_to_cpu_32(ctx.cmd.rx_filter_del.filter_id));
406 
407 	return 0;
408 }
409 
410 int
411 ionic_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id,
412 		int on)
413 {
414 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
415 	int err;
416 
417 	if (on)
418 		err = ionic_vlan_rx_add_vid(lif, vlan_id);
419 	else
420 		err = ionic_vlan_rx_kill_vid(lif, vlan_id);
421 
422 	return err;
423 }
424 
425 static void
426 ionic_lif_rx_mode(struct ionic_lif *lif, uint32_t rx_mode)
427 {
428 	struct ionic_admin_ctx ctx = {
429 		.pending_work = true,
430 		.cmd.rx_mode_set = {
431 			.opcode = IONIC_CMD_RX_MODE_SET,
432 			.rx_mode = rte_cpu_to_le_16(rx_mode),
433 		},
434 	};
435 	int err;
436 
437 	if (rx_mode & IONIC_RX_MODE_F_UNICAST)
438 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_UNICAST");
439 	if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
440 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_MULTICAST");
441 	if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
442 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_BROADCAST");
443 	if (rx_mode & IONIC_RX_MODE_F_PROMISC)
444 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_PROMISC");
445 	if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
446 		IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_ALLMULTI");
447 
448 	err = ionic_adminq_post_wait(lif, &ctx);
449 	if (err)
450 		IONIC_PRINT(ERR, "Failure setting RX mode");
451 }
452 
453 static void
454 ionic_set_rx_mode(struct ionic_lif *lif, uint32_t rx_mode)
455 {
456 	if (lif->rx_mode != rx_mode) {
457 		lif->rx_mode = rx_mode;
458 		ionic_lif_rx_mode(lif, rx_mode);
459 	}
460 }
461 
462 int
463 ionic_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
464 {
465 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
466 	uint32_t rx_mode = lif->rx_mode;
467 
468 	IONIC_PRINT_CALL();
469 
470 	rx_mode |= IONIC_RX_MODE_F_PROMISC;
471 
472 	ionic_set_rx_mode(lif, rx_mode);
473 
474 	return 0;
475 }
476 
477 int
478 ionic_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
479 {
480 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
481 	uint32_t rx_mode = lif->rx_mode;
482 
483 	rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
484 
485 	ionic_set_rx_mode(lif, rx_mode);
486 
487 	return 0;
488 }
489 
490 int
491 ionic_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
492 {
493 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
494 	uint32_t rx_mode = lif->rx_mode;
495 
496 	rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
497 
498 	ionic_set_rx_mode(lif, rx_mode);
499 
500 	return 0;
501 }
502 
503 int
504 ionic_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
505 {
506 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
507 	uint32_t rx_mode = lif->rx_mode;
508 
509 	rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
510 
511 	ionic_set_rx_mode(lif, rx_mode);
512 
513 	return 0;
514 }
515 
516 int
517 ionic_lif_change_mtu(struct ionic_lif *lif, uint32_t new_mtu)
518 {
519 	struct ionic_admin_ctx ctx = {
520 		.pending_work = true,
521 		.cmd.lif_setattr = {
522 			.opcode = IONIC_CMD_LIF_SETATTR,
523 			.attr = IONIC_LIF_ATTR_MTU,
524 			.mtu = rte_cpu_to_le_32(new_mtu),
525 		},
526 	};
527 
528 	return ionic_adminq_post_wait(lif, &ctx);
529 }
530 
531 int
532 ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
533 {
534 	struct ionic_adapter *adapter = lif->adapter;
535 	struct ionic_dev *idev = &adapter->idev;
536 	unsigned long index;
537 
538 	/*
539 	 * Note: interrupt handler is called for index = 0 only
540 	 * (we use interrupts for the notifyq only anyway,
541 	 * which has index = 0)
542 	 */
543 
544 	for (index = 0; index < adapter->nintrs; index++)
545 		if (!adapter->intrs[index])
546 			break;
547 
548 	if (index == adapter->nintrs)
549 		return -ENOSPC;
550 
551 	adapter->intrs[index] = true;
552 
553 	ionic_intr_init(idev, intr, index);
554 
555 	return 0;
556 }
557 
558 static int
559 ionic_qcq_alloc(struct ionic_lif *lif,
560 		uint8_t type,
561 		size_t struct_size,
562 		uint32_t socket_id,
563 		uint32_t index,
564 		const char *type_name,
565 		uint16_t flags,
566 		uint16_t num_descs,
567 		uint16_t num_segs,
568 		uint16_t desc_size,
569 		uint16_t cq_desc_size,
570 		uint16_t sg_desc_size,
571 		struct ionic_qcq **qcq)
572 {
573 	struct ionic_qcq *new;
574 	uint32_t q_size, cq_size, sg_size, total_size;
575 	void *q_base, *cq_base, *sg_base;
576 	rte_iova_t q_base_pa = 0;
577 	rte_iova_t cq_base_pa = 0;
578 	rte_iova_t sg_base_pa = 0;
579 	size_t page_size = rte_mem_page_size();
580 	int err;
581 
582 	*qcq = NULL;
583 
584 	q_size  = num_descs * desc_size;
585 	cq_size = num_descs * cq_desc_size;
586 	sg_size = num_descs * sg_desc_size;
587 
588 	total_size = RTE_ALIGN(q_size, page_size) +
589 			RTE_ALIGN(cq_size, page_size);
590 	/*
591 	 * Note: aligning q_size/cq_size is not enough due to cq_base address
592 	 * aligning as q_base could be not aligned to the page.
593 	 * Adding page_size.
594 	 */
595 	total_size += page_size;
596 
597 	if (flags & IONIC_QCQ_F_SG) {
598 		total_size += RTE_ALIGN(sg_size, page_size);
599 		total_size += page_size;
600 	}
601 
602 	new = rte_zmalloc_socket("ionic", struct_size,
603 				RTE_CACHE_LINE_SIZE, socket_id);
604 	if (!new) {
605 		IONIC_PRINT(ERR, "Cannot allocate queue structure");
606 		return -ENOMEM;
607 	}
608 
609 	new->lif = lif;
610 
611 	/* Most queue types will store 1 ptr per descriptor */
612 	new->q.info = rte_calloc_socket("ionic",
613 				(uint64_t)num_descs * num_segs,
614 				sizeof(void *), page_size, socket_id);
615 	if (!new->q.info) {
616 		IONIC_PRINT(ERR, "Cannot allocate queue info");
617 		err = -ENOMEM;
618 		goto err_out_free_qcq;
619 	}
620 
621 	new->q.num_segs = num_segs;
622 	new->q.type = type;
623 
624 	err = ionic_q_init(&new->q, index, num_descs);
625 	if (err) {
626 		IONIC_PRINT(ERR, "Queue initialization failed");
627 		goto err_out_free_info;
628 	}
629 
630 	err = ionic_cq_init(&new->cq, num_descs);
631 	if (err) {
632 		IONIC_PRINT(ERR, "Completion queue initialization failed");
633 		goto err_out_free_info;
634 	}
635 
636 	new->base_z = rte_eth_dma_zone_reserve(lif->eth_dev,
637 		type_name, index /* queue_idx */,
638 		total_size, IONIC_ALIGN, socket_id);
639 
640 	if (!new->base_z) {
641 		IONIC_PRINT(ERR, "Cannot reserve queue DMA memory");
642 		err = -ENOMEM;
643 		goto err_out_free_info;
644 	}
645 
646 	new->base = new->base_z->addr;
647 	new->base_pa = new->base_z->iova;
648 
649 	q_base = new->base;
650 	q_base_pa = new->base_pa;
651 
652 	cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size, page_size);
653 	cq_base_pa = RTE_ALIGN(q_base_pa + q_size, page_size);
654 
655 	if (flags & IONIC_QCQ_F_SG) {
656 		sg_base = (void *)RTE_ALIGN((uintptr_t)cq_base + cq_size,
657 				page_size);
658 		sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size, page_size);
659 		ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
660 	}
661 
662 	if (flags & IONIC_QCQ_F_CMB) {
663 		/* alloc descriptor ring from nic memory */
664 		if (lif->adapter->cmb_offset + q_size >
665 				lif->adapter->bars.bar[2].len) {
666 			IONIC_PRINT(ERR, "Cannot reserve queue from NIC mem");
667 			return -ENOMEM;
668 		}
669 		q_base = (void *)
670 			((uintptr_t)lif->adapter->bars.bar[2].vaddr +
671 			 (uintptr_t)lif->adapter->cmb_offset);
672 		/* CMB PA is a relative address */
673 		q_base_pa = lif->adapter->cmb_offset;
674 		lif->adapter->cmb_offset += q_size;
675 	}
676 
677 	IONIC_PRINT(DEBUG, "Q-Base-PA = %#jx CQ-Base-PA = %#jx "
678 		"SG-base-PA = %#jx",
679 		q_base_pa, cq_base_pa, sg_base_pa);
680 
681 	ionic_q_map(&new->q, q_base, q_base_pa);
682 	ionic_cq_map(&new->cq, cq_base, cq_base_pa);
683 
684 	*qcq = new;
685 
686 	return 0;
687 
688 err_out_free_info:
689 	rte_free(new->q.info);
690 err_out_free_qcq:
691 	rte_free(new);
692 
693 	return err;
694 }
695 
696 void
697 ionic_qcq_free(struct ionic_qcq *qcq)
698 {
699 	if (qcq->base_z) {
700 		qcq->base = NULL;
701 		qcq->base_pa = 0;
702 		rte_memzone_free(qcq->base_z);
703 		qcq->base_z = NULL;
704 	}
705 
706 	if (qcq->q.info) {
707 		rte_free(qcq->q.info);
708 		qcq->q.info = NULL;
709 	}
710 
711 	rte_free(qcq);
712 }
713 
714 static uint64_t
715 ionic_rx_rearm_data(struct ionic_lif *lif)
716 {
717 	struct rte_mbuf rxm;
718 
719 	memset(&rxm, 0, sizeof(rxm));
720 
721 	rte_mbuf_refcnt_set(&rxm, 1);
722 	rxm.data_off = RTE_PKTMBUF_HEADROOM;
723 	rxm.nb_segs = 1;
724 	rxm.port = lif->port_id;
725 
726 	rte_compiler_barrier();
727 
728 	RTE_BUILD_BUG_ON(sizeof(rxm.rearm_data[0]) != sizeof(uint64_t));
729 	return rxm.rearm_data[0];
730 }
731 
732 static uint64_t
733 ionic_rx_seg_rearm_data(struct ionic_lif *lif)
734 {
735 	struct rte_mbuf rxm;
736 
737 	memset(&rxm, 0, sizeof(rxm));
738 
739 	rte_mbuf_refcnt_set(&rxm, 1);
740 	rxm.data_off = 0;  /* no headroom */
741 	rxm.nb_segs = 1;
742 	rxm.port = lif->port_id;
743 
744 	rte_compiler_barrier();
745 
746 	RTE_BUILD_BUG_ON(sizeof(rxm.rearm_data[0]) != sizeof(uint64_t));
747 	return rxm.rearm_data[0];
748 }
749 
750 int
751 ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
752 		uint16_t nrxq_descs, struct rte_mempool *mb_pool,
753 		struct ionic_rx_qcq **rxq_out)
754 {
755 	struct ionic_rx_qcq *rxq;
756 	uint16_t flags = 0, seg_size, hdr_seg_size, max_segs, max_segs_fw = 1;
757 	uint32_t max_mtu;
758 	int err;
759 
760 	if (lif->state & IONIC_LIF_F_Q_IN_CMB)
761 		flags |= IONIC_QCQ_F_CMB;
762 
763 	seg_size = rte_pktmbuf_data_room_size(mb_pool);
764 
765 	/* The first mbuf needs to leave headroom */
766 	hdr_seg_size = seg_size - RTE_PKTMBUF_HEADROOM;
767 
768 	max_mtu = rte_le_to_cpu_32(lif->adapter->ident.lif.eth.max_mtu);
769 
770 	/* If mbufs are too small to hold received packets, enable SG */
771 	if (max_mtu > hdr_seg_size) {
772 		IONIC_PRINT(NOTICE, "Enabling RX_OFFLOAD_SCATTER");
773 		lif->eth_dev->data->dev_conf.rxmode.offloads |=
774 			RTE_ETH_RX_OFFLOAD_SCATTER;
775 		ionic_lif_configure_rx_sg_offload(lif);
776 	}
777 
778 	if (lif->features & IONIC_ETH_HW_RX_SG) {
779 		flags |= IONIC_QCQ_F_SG;
780 		max_segs_fw = IONIC_RX_MAX_SG_ELEMS + 1;
781 	}
782 
783 	/*
784 	 * Calculate how many fragment pointers might be stored in queue.
785 	 * This is the worst-case number, so that there's enough room in
786 	 * the info array.
787 	 */
788 	max_segs = 1 + (max_mtu + RTE_PKTMBUF_HEADROOM - 1) / seg_size;
789 
790 	IONIC_PRINT(DEBUG, "rxq %u max_mtu %u seg_size %u max_segs %u",
791 		index, max_mtu, seg_size, max_segs);
792 	if (max_segs > max_segs_fw) {
793 		IONIC_PRINT(ERR, "Rx mbuf size insufficient (%d > %d avail)",
794 			max_segs, max_segs_fw);
795 		return -EINVAL;
796 	}
797 
798 	err = ionic_qcq_alloc(lif,
799 		IONIC_QTYPE_RXQ,
800 		sizeof(struct ionic_rx_qcq),
801 		socket_id,
802 		index,
803 		"rx",
804 		flags,
805 		nrxq_descs,
806 		max_segs,
807 		sizeof(struct ionic_rxq_desc),
808 		sizeof(struct ionic_rxq_comp),
809 		sizeof(struct ionic_rxq_sg_desc),
810 		(struct ionic_qcq **)&rxq);
811 	if (err)
812 		return err;
813 
814 	rxq->flags = flags;
815 	rxq->seg_size = seg_size;
816 	rxq->hdr_seg_size = hdr_seg_size;
817 	rxq->rearm_data = ionic_rx_rearm_data(lif);
818 	rxq->rearm_seg_data = ionic_rx_seg_rearm_data(lif);
819 
820 	lif->rxqcqs[index] = rxq;
821 	*rxq_out = rxq;
822 
823 	return 0;
824 }
825 
826 int
827 ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
828 		uint16_t ntxq_descs, struct ionic_tx_qcq **txq_out)
829 {
830 	struct ionic_tx_qcq *txq;
831 	uint16_t flags = 0, num_segs_fw = 1;
832 	int err;
833 
834 	if (lif->features & IONIC_ETH_HW_TX_SG) {
835 		flags |= IONIC_QCQ_F_SG;
836 		num_segs_fw = IONIC_TX_MAX_SG_ELEMS_V1 + 1;
837 	}
838 	if (lif->state & IONIC_LIF_F_Q_IN_CMB)
839 		flags |= IONIC_QCQ_F_CMB;
840 
841 	IONIC_PRINT(DEBUG, "txq %u num_segs %u", index, num_segs_fw);
842 
843 	err = ionic_qcq_alloc(lif,
844 		IONIC_QTYPE_TXQ,
845 		sizeof(struct ionic_tx_qcq),
846 		socket_id,
847 		index,
848 		"tx",
849 		flags,
850 		ntxq_descs,
851 		num_segs_fw,
852 		sizeof(struct ionic_txq_desc),
853 		sizeof(struct ionic_txq_comp),
854 		sizeof(struct ionic_txq_sg_desc_v1),
855 		(struct ionic_qcq **)&txq);
856 	if (err)
857 		return err;
858 
859 	txq->flags = flags;
860 	txq->num_segs_fw = num_segs_fw;
861 
862 	lif->txqcqs[index] = txq;
863 	*txq_out = txq;
864 
865 	return 0;
866 }
867 
868 static int
869 ionic_admin_qcq_alloc(struct ionic_lif *lif)
870 {
871 	uint16_t flags = 0;
872 	int err;
873 
874 	err = ionic_qcq_alloc(lif,
875 		IONIC_QTYPE_ADMINQ,
876 		sizeof(struct ionic_admin_qcq),
877 		rte_socket_id(),
878 		0,
879 		"admin",
880 		flags,
881 		IONIC_ADMINQ_LENGTH,
882 		1,
883 		sizeof(struct ionic_admin_cmd),
884 		sizeof(struct ionic_admin_comp),
885 		0,
886 		(struct ionic_qcq **)&lif->adminqcq);
887 	if (err)
888 		return err;
889 
890 	return 0;
891 }
892 
893 static int
894 ionic_notify_qcq_alloc(struct ionic_lif *lif)
895 {
896 	struct ionic_notify_qcq *nqcq;
897 	struct ionic_dev *idev = &lif->adapter->idev;
898 	uint16_t flags = 0;
899 	int err;
900 
901 	err = ionic_qcq_alloc(lif,
902 		IONIC_QTYPE_NOTIFYQ,
903 		sizeof(struct ionic_notify_qcq),
904 		rte_socket_id(),
905 		0,
906 		"notify",
907 		flags,
908 		IONIC_NOTIFYQ_LENGTH,
909 		1,
910 		sizeof(struct ionic_notifyq_cmd),
911 		sizeof(union ionic_notifyq_comp),
912 		0,
913 		(struct ionic_qcq **)&nqcq);
914 	if (err)
915 		return err;
916 
917 	err = ionic_intr_alloc(lif, &nqcq->intr);
918 	if (err) {
919 		ionic_qcq_free(&nqcq->qcq);
920 		return err;
921 	}
922 
923 	ionic_intr_mask_assert(idev->intr_ctrl, nqcq->intr.index,
924 		IONIC_INTR_MASK_SET);
925 
926 	lif->notifyqcq = nqcq;
927 
928 	return 0;
929 }
930 
931 static void
932 ionic_lif_queue_identify(struct ionic_lif *lif)
933 {
934 	struct ionic_adapter *adapter = lif->adapter;
935 	struct ionic_dev *idev = &adapter->idev;
936 	union ionic_q_identity *q_ident = &adapter->ident.txq;
937 	uint32_t q_words = RTE_DIM(q_ident->words);
938 	uint32_t cmd_words = RTE_DIM(idev->dev_cmd->data);
939 	uint32_t i, nwords, qtype;
940 	int err;
941 
942 	for (qtype = 0; qtype < RTE_DIM(ionic_qtype_vers); qtype++) {
943 		struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
944 
945 		/* Filter out the types this driver knows about */
946 		switch (qtype) {
947 		case IONIC_QTYPE_ADMINQ:
948 		case IONIC_QTYPE_NOTIFYQ:
949 		case IONIC_QTYPE_RXQ:
950 		case IONIC_QTYPE_TXQ:
951 			break;
952 		default:
953 			continue;
954 		}
955 
956 		memset(qti, 0, sizeof(*qti));
957 
958 		ionic_dev_cmd_queue_identify(idev, IONIC_LIF_TYPE_CLASSIC,
959 			qtype, ionic_qtype_vers[qtype]);
960 		err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
961 		if (err == -EINVAL) {
962 			IONIC_PRINT(ERR, "qtype %d not supported\n", qtype);
963 			continue;
964 		} else if (err == -EIO) {
965 			IONIC_PRINT(ERR, "q_ident failed, older FW\n");
966 			return;
967 		} else if (err) {
968 			IONIC_PRINT(ERR, "q_ident failed, qtype %d: %d\n",
969 				qtype, err);
970 			return;
971 		}
972 
973 		nwords = RTE_MIN(q_words, cmd_words);
974 		for (i = 0; i < nwords; i++)
975 			q_ident->words[i] = ioread32(&idev->dev_cmd->data[i]);
976 
977 		qti->version   = q_ident->version;
978 		qti->supported = q_ident->supported;
979 		qti->features  = rte_le_to_cpu_64(q_ident->features);
980 		qti->desc_sz   = rte_le_to_cpu_16(q_ident->desc_sz);
981 		qti->comp_sz   = rte_le_to_cpu_16(q_ident->comp_sz);
982 		qti->sg_desc_sz   = rte_le_to_cpu_16(q_ident->sg_desc_sz);
983 		qti->max_sg_elems = rte_le_to_cpu_16(q_ident->max_sg_elems);
984 		qti->sg_desc_stride =
985 			rte_le_to_cpu_16(q_ident->sg_desc_stride);
986 
987 		IONIC_PRINT(DEBUG, " qtype[%d].version = %d",
988 			qtype, qti->version);
989 		IONIC_PRINT(DEBUG, " qtype[%d].supported = %#x",
990 			qtype, qti->supported);
991 		IONIC_PRINT(DEBUG, " qtype[%d].features = %#jx",
992 			qtype, qti->features);
993 		IONIC_PRINT(DEBUG, " qtype[%d].desc_sz = %d",
994 			qtype, qti->desc_sz);
995 		IONIC_PRINT(DEBUG, " qtype[%d].comp_sz = %d",
996 			qtype, qti->comp_sz);
997 		IONIC_PRINT(DEBUG, " qtype[%d].sg_desc_sz = %d",
998 			qtype, qti->sg_desc_sz);
999 		IONIC_PRINT(DEBUG, " qtype[%d].max_sg_elems = %d",
1000 			qtype, qti->max_sg_elems);
1001 		IONIC_PRINT(DEBUG, " qtype[%d].sg_desc_stride = %d",
1002 			qtype, qti->sg_desc_stride);
1003 	}
1004 }
1005 
1006 int
1007 ionic_lif_alloc(struct ionic_lif *lif)
1008 {
1009 	struct ionic_adapter *adapter = lif->adapter;
1010 	uint32_t socket_id = rte_socket_id();
1011 	int err;
1012 
1013 	/*
1014 	 * lif->name was zeroed on allocation.
1015 	 * Copy (sizeof() - 1) bytes to ensure that it is NULL terminated.
1016 	 */
1017 	memcpy(lif->name, lif->eth_dev->data->name, sizeof(lif->name) - 1);
1018 
1019 	IONIC_PRINT(DEBUG, "LIF: %s", lif->name);
1020 
1021 	ionic_lif_queue_identify(lif);
1022 
1023 	if (lif->qtype_info[IONIC_QTYPE_TXQ].version < 1) {
1024 		IONIC_PRINT(ERR, "FW too old, please upgrade");
1025 		return -ENXIO;
1026 	}
1027 
1028 	if (adapter->q_in_cmb) {
1029 		if (adapter->bars.num_bars >= 3 &&
1030 		    lif->qtype_info[IONIC_QTYPE_RXQ].version >= 2 &&
1031 		    lif->qtype_info[IONIC_QTYPE_TXQ].version >= 3) {
1032 			IONIC_PRINT(INFO, "%s enabled on %s",
1033 				PMD_IONIC_CMB_KVARG, lif->name);
1034 			lif->state |= IONIC_LIF_F_Q_IN_CMB;
1035 		} else {
1036 			IONIC_PRINT(ERR, "%s not supported on %s, disabled",
1037 				PMD_IONIC_CMB_KVARG, lif->name);
1038 		}
1039 	}
1040 
1041 	IONIC_PRINT(DEBUG, "Allocating Lif Info");
1042 
1043 	rte_spinlock_init(&lif->adminq_lock);
1044 	rte_spinlock_init(&lif->adminq_service_lock);
1045 
1046 	lif->kern_dbpage = adapter->idev.db_pages;
1047 	if (!lif->kern_dbpage) {
1048 		IONIC_PRINT(ERR, "Cannot map dbpage, aborting");
1049 		return -ENOMEM;
1050 	}
1051 
1052 	lif->txqcqs = rte_calloc_socket("ionic",
1053 				adapter->max_ntxqs_per_lif,
1054 				sizeof(*lif->txqcqs),
1055 				RTE_CACHE_LINE_SIZE, socket_id);
1056 	if (!lif->txqcqs) {
1057 		IONIC_PRINT(ERR, "Cannot allocate tx queues array");
1058 		return -ENOMEM;
1059 	}
1060 
1061 	lif->rxqcqs = rte_calloc_socket("ionic",
1062 				adapter->max_nrxqs_per_lif,
1063 				sizeof(*lif->rxqcqs),
1064 				RTE_CACHE_LINE_SIZE, socket_id);
1065 	if (!lif->rxqcqs) {
1066 		IONIC_PRINT(ERR, "Cannot allocate rx queues array");
1067 		return -ENOMEM;
1068 	}
1069 
1070 	IONIC_PRINT(DEBUG, "Allocating Notify Queue");
1071 
1072 	err = ionic_notify_qcq_alloc(lif);
1073 	if (err) {
1074 		IONIC_PRINT(ERR, "Cannot allocate notify queue");
1075 		return err;
1076 	}
1077 
1078 	IONIC_PRINT(DEBUG, "Allocating Admin Queue");
1079 
1080 	err = ionic_admin_qcq_alloc(lif);
1081 	if (err) {
1082 		IONIC_PRINT(ERR, "Cannot allocate admin queue");
1083 		return err;
1084 	}
1085 
1086 	IONIC_PRINT(DEBUG, "Allocating Lif Info");
1087 
1088 	lif->info_sz = RTE_ALIGN(sizeof(*lif->info), rte_mem_page_size());
1089 
1090 	lif->info_z = rte_eth_dma_zone_reserve(lif->eth_dev,
1091 		"lif_info", 0 /* queue_idx*/,
1092 		lif->info_sz, IONIC_ALIGN, socket_id);
1093 	if (!lif->info_z) {
1094 		IONIC_PRINT(ERR, "Cannot allocate lif info memory");
1095 		return -ENOMEM;
1096 	}
1097 
1098 	lif->info = lif->info_z->addr;
1099 	lif->info_pa = lif->info_z->iova;
1100 
1101 	return 0;
1102 }
1103 
1104 void
1105 ionic_lif_free(struct ionic_lif *lif)
1106 {
1107 	if (lif->notifyqcq) {
1108 		ionic_qcq_free(&lif->notifyqcq->qcq);
1109 		lif->notifyqcq = NULL;
1110 	}
1111 
1112 	if (lif->adminqcq) {
1113 		ionic_qcq_free(&lif->adminqcq->qcq);
1114 		lif->adminqcq = NULL;
1115 	}
1116 
1117 	if (lif->txqcqs) {
1118 		rte_free(lif->txqcqs);
1119 		lif->txqcqs = NULL;
1120 	}
1121 
1122 	if (lif->rxqcqs) {
1123 		rte_free(lif->rxqcqs);
1124 		lif->rxqcqs = NULL;
1125 	}
1126 
1127 	if (lif->info) {
1128 		rte_memzone_free(lif->info_z);
1129 		lif->info = NULL;
1130 	}
1131 }
1132 
1133 void
1134 ionic_lif_free_queues(struct ionic_lif *lif)
1135 {
1136 	uint32_t i;
1137 
1138 	for (i = 0; i < lif->ntxqcqs; i++) {
1139 		ionic_dev_tx_queue_release(lif->eth_dev, i);
1140 		lif->eth_dev->data->tx_queues[i] = NULL;
1141 	}
1142 	for (i = 0; i < lif->nrxqcqs; i++) {
1143 		ionic_dev_rx_queue_release(lif->eth_dev, i);
1144 		lif->eth_dev->data->rx_queues[i] = NULL;
1145 	}
1146 }
1147 
1148 int
1149 ionic_lif_rss_config(struct ionic_lif *lif,
1150 		const uint16_t types, const uint8_t *key, const uint32_t *indir)
1151 {
1152 	struct ionic_adapter *adapter = lif->adapter;
1153 	struct ionic_admin_ctx ctx = {
1154 		.pending_work = true,
1155 		.cmd.lif_setattr = {
1156 			.opcode = IONIC_CMD_LIF_SETATTR,
1157 			.attr = IONIC_LIF_ATTR_RSS,
1158 			.rss.types = rte_cpu_to_le_16(types),
1159 			.rss.addr = rte_cpu_to_le_64(lif->rss_ind_tbl_pa),
1160 		},
1161 	};
1162 	unsigned int i;
1163 	uint16_t tbl_sz =
1164 		rte_le_to_cpu_16(adapter->ident.lif.eth.rss_ind_tbl_sz);
1165 
1166 	IONIC_PRINT_CALL();
1167 
1168 	lif->rss_types = types;
1169 
1170 	if (key)
1171 		memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1172 
1173 	if (indir)
1174 		for (i = 0; i < tbl_sz; i++)
1175 			lif->rss_ind_tbl[i] = indir[i];
1176 
1177 	memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1178 	       IONIC_RSS_HASH_KEY_SIZE);
1179 
1180 	return ionic_adminq_post_wait(lif, &ctx);
1181 }
1182 
1183 static int
1184 ionic_lif_rss_setup(struct ionic_lif *lif)
1185 {
1186 	struct ionic_adapter *adapter = lif->adapter;
1187 	static const uint8_t toeplitz_symmetric_key[] = {
1188 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1189 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1190 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1191 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1192 		0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1193 	};
1194 	uint32_t i;
1195 	uint16_t tbl_sz =
1196 		rte_le_to_cpu_16(adapter->ident.lif.eth.rss_ind_tbl_sz);
1197 
1198 	IONIC_PRINT_CALL();
1199 
1200 	if (!lif->rss_ind_tbl_z) {
1201 		lif->rss_ind_tbl_z = rte_eth_dma_zone_reserve(lif->eth_dev,
1202 					"rss_ind_tbl", 0 /* queue_idx */,
1203 					sizeof(*lif->rss_ind_tbl) * tbl_sz,
1204 					IONIC_ALIGN, rte_socket_id());
1205 		if (!lif->rss_ind_tbl_z) {
1206 			IONIC_PRINT(ERR, "OOM");
1207 			return -ENOMEM;
1208 		}
1209 
1210 		lif->rss_ind_tbl = lif->rss_ind_tbl_z->addr;
1211 		lif->rss_ind_tbl_pa = lif->rss_ind_tbl_z->iova;
1212 	}
1213 
1214 	if (lif->rss_ind_tbl_nrxqcqs != lif->nrxqcqs) {
1215 		lif->rss_ind_tbl_nrxqcqs = lif->nrxqcqs;
1216 
1217 		/* Fill indirection table with 'default' values */
1218 		for (i = 0; i < tbl_sz; i++)
1219 			lif->rss_ind_tbl[i] = i % lif->nrxqcqs;
1220 	}
1221 
1222 	return ionic_lif_rss_config(lif, IONIC_RSS_OFFLOAD_ALL,
1223 			toeplitz_symmetric_key, NULL);
1224 }
1225 
1226 static void
1227 ionic_lif_rss_teardown(struct ionic_lif *lif)
1228 {
1229 	if (!lif->rss_ind_tbl)
1230 		return;
1231 
1232 	if (lif->rss_ind_tbl_z) {
1233 		/* Disable RSS on the NIC */
1234 		ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1235 
1236 		lif->rss_ind_tbl = NULL;
1237 		lif->rss_ind_tbl_pa = 0;
1238 		rte_memzone_free(lif->rss_ind_tbl_z);
1239 		lif->rss_ind_tbl_z = NULL;
1240 	}
1241 }
1242 
1243 void
1244 ionic_lif_txq_deinit(struct ionic_tx_qcq *txq)
1245 {
1246 	ionic_qcq_disable(&txq->qcq);
1247 
1248 	txq->flags &= ~IONIC_QCQ_F_INITED;
1249 }
1250 
1251 void
1252 ionic_lif_rxq_deinit(struct ionic_rx_qcq *rxq)
1253 {
1254 	ionic_qcq_disable(&rxq->qcq);
1255 
1256 	rxq->flags &= ~IONIC_QCQ_F_INITED;
1257 }
1258 
1259 static void
1260 ionic_lif_adminq_deinit(struct ionic_lif *lif)
1261 {
1262 	lif->adminqcq->flags &= ~IONIC_QCQ_F_INITED;
1263 }
1264 
1265 static void
1266 ionic_lif_notifyq_deinit(struct ionic_lif *lif)
1267 {
1268 	struct ionic_notify_qcq *nqcq = lif->notifyqcq;
1269 	struct ionic_dev *idev = &lif->adapter->idev;
1270 
1271 	if (!(nqcq->flags & IONIC_QCQ_F_INITED))
1272 		return;
1273 
1274 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1275 		IONIC_INTR_MASK_SET);
1276 
1277 	nqcq->flags &= ~IONIC_QCQ_F_INITED;
1278 }
1279 
1280 /* This acts like ionic_napi */
1281 int
1282 ionic_qcq_service(struct ionic_qcq *qcq, int budget, ionic_cq_cb cb,
1283 		void *cb_arg)
1284 {
1285 	struct ionic_cq *cq = &qcq->cq;
1286 	uint32_t work_done;
1287 
1288 	work_done = ionic_cq_service(cq, budget, cb, cb_arg);
1289 
1290 	return work_done;
1291 }
1292 
1293 static void
1294 ionic_link_status_check(struct ionic_lif *lif)
1295 {
1296 	struct ionic_adapter *adapter = lif->adapter;
1297 	bool link_up;
1298 
1299 	lif->state &= ~IONIC_LIF_F_LINK_CHECK_NEEDED;
1300 
1301 	if (!lif->info)
1302 		return;
1303 
1304 	link_up = (lif->info->status.link_status == IONIC_PORT_OPER_STATUS_UP);
1305 
1306 	if ((link_up  && adapter->link_up) ||
1307 	    (!link_up && !adapter->link_up))
1308 		return;
1309 
1310 	if (link_up) {
1311 		adapter->link_speed =
1312 			rte_le_to_cpu_32(lif->info->status.link_speed);
1313 		IONIC_PRINT(DEBUG, "Link up - %d Gbps",
1314 			adapter->link_speed);
1315 	} else {
1316 		IONIC_PRINT(DEBUG, "Link down");
1317 	}
1318 
1319 	adapter->link_up = link_up;
1320 	ionic_dev_link_update(lif->eth_dev, 0);
1321 }
1322 
1323 static void
1324 ionic_lif_handle_fw_down(struct ionic_lif *lif)
1325 {
1326 	if (lif->state & IONIC_LIF_F_FW_RESET)
1327 		return;
1328 
1329 	lif->state |= IONIC_LIF_F_FW_RESET;
1330 
1331 	if (lif->state & IONIC_LIF_F_UP) {
1332 		IONIC_PRINT(NOTICE,
1333 			"Surprise FW stop, stopping %s\n", lif->name);
1334 		ionic_lif_stop(lif);
1335 	}
1336 
1337 	IONIC_PRINT(NOTICE, "FW down, %s stopped", lif->name);
1338 }
1339 
1340 static bool
1341 ionic_notifyq_cb(struct ionic_cq *cq, uint16_t cq_desc_index, void *cb_arg)
1342 {
1343 	union ionic_notifyq_comp *cq_desc_base = cq->base;
1344 	union ionic_notifyq_comp *cq_desc = &cq_desc_base[cq_desc_index];
1345 	struct ionic_lif *lif = cb_arg;
1346 
1347 	IONIC_PRINT(DEBUG, "Notifyq callback eid = %jd ecode = %d",
1348 		cq_desc->event.eid, cq_desc->event.ecode);
1349 
1350 	/* Have we run out of new completions to process? */
1351 	if (!(cq_desc->event.eid > lif->last_eid))
1352 		return false;
1353 
1354 	lif->last_eid = cq_desc->event.eid;
1355 
1356 	switch (cq_desc->event.ecode) {
1357 	case IONIC_EVENT_LINK_CHANGE:
1358 		IONIC_PRINT(DEBUG,
1359 			"Notifyq IONIC_EVENT_LINK_CHANGE %s "
1360 			"eid=%jd link_status=%d link_speed=%d",
1361 			lif->name,
1362 			cq_desc->event.eid,
1363 			cq_desc->link_change.link_status,
1364 			cq_desc->link_change.link_speed);
1365 
1366 		lif->state |= IONIC_LIF_F_LINK_CHECK_NEEDED;
1367 		break;
1368 
1369 	case IONIC_EVENT_RESET:
1370 		IONIC_PRINT(NOTICE,
1371 			"Notifyq IONIC_EVENT_RESET %s "
1372 			"eid=%jd, reset_code=%d state=%d",
1373 			lif->name,
1374 			cq_desc->event.eid,
1375 			cq_desc->reset.reset_code,
1376 			cq_desc->reset.state);
1377 		ionic_lif_handle_fw_down(lif);
1378 		break;
1379 
1380 	default:
1381 		IONIC_PRINT(WARNING, "Notifyq bad event ecode=%d eid=%jd",
1382 			cq_desc->event.ecode, cq_desc->event.eid);
1383 		break;
1384 	}
1385 
1386 	return true;
1387 }
1388 
1389 int
1390 ionic_notifyq_handler(struct ionic_lif *lif, int budget)
1391 {
1392 	struct ionic_dev *idev = &lif->adapter->idev;
1393 	struct ionic_notify_qcq *nqcq = lif->notifyqcq;
1394 	uint32_t work_done;
1395 
1396 	if (!(nqcq->flags & IONIC_QCQ_F_INITED)) {
1397 		IONIC_PRINT(DEBUG, "Notifyq not yet initialized");
1398 		return -1;
1399 	}
1400 
1401 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1402 		IONIC_INTR_MASK_SET);
1403 
1404 	work_done = ionic_qcq_service(&nqcq->qcq, budget,
1405 				ionic_notifyq_cb, lif);
1406 
1407 	if (lif->state & IONIC_LIF_F_LINK_CHECK_NEEDED)
1408 		ionic_link_status_check(lif);
1409 
1410 	ionic_intr_credits(idev->intr_ctrl, nqcq->intr.index,
1411 		work_done, IONIC_INTR_CRED_RESET_COALESCE);
1412 
1413 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1414 		IONIC_INTR_MASK_CLEAR);
1415 
1416 	return 0;
1417 }
1418 
1419 static int
1420 ionic_lif_adminq_init(struct ionic_lif *lif)
1421 {
1422 	struct ionic_dev *idev = &lif->adapter->idev;
1423 	struct ionic_admin_qcq *aqcq = lif->adminqcq;
1424 	struct ionic_queue *q = &aqcq->qcq.q;
1425 	struct ionic_q_init_comp comp;
1426 	uint32_t retries = 5;
1427 	int err;
1428 
1429 retry_adminq_init:
1430 	ionic_dev_cmd_adminq_init(idev, &aqcq->qcq);
1431 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
1432 	if (err == -EAGAIN && retries > 0) {
1433 		retries--;
1434 		rte_delay_us_block(IONIC_DEVCMD_RETRY_WAIT_US);
1435 		goto retry_adminq_init;
1436 	}
1437 	if (err)
1438 		return err;
1439 
1440 	ionic_dev_cmd_comp(idev, &comp);
1441 
1442 	q->hw_type = comp.hw_type;
1443 	q->hw_index = rte_le_to_cpu_32(comp.hw_index);
1444 	q->db = ionic_db_map(lif, q);
1445 
1446 	IONIC_PRINT(DEBUG, "adminq->hw_type %d", q->hw_type);
1447 	IONIC_PRINT(DEBUG, "adminq->hw_index %d", q->hw_index);
1448 	IONIC_PRINT(DEBUG, "adminq->db %p", q->db);
1449 
1450 	aqcq->flags |= IONIC_QCQ_F_INITED;
1451 
1452 	return 0;
1453 }
1454 
1455 static int
1456 ionic_lif_notifyq_init(struct ionic_lif *lif)
1457 {
1458 	struct ionic_dev *idev = &lif->adapter->idev;
1459 	struct ionic_notify_qcq *nqcq = lif->notifyqcq;
1460 	struct ionic_queue *q = &nqcq->qcq.q;
1461 	uint16_t flags = IONIC_QINIT_F_ENA;
1462 	int err;
1463 
1464 	struct ionic_admin_ctx ctx = {
1465 		.pending_work = true,
1466 		.cmd.q_init = {
1467 			.opcode = IONIC_CMD_Q_INIT,
1468 			.type = q->type,
1469 			.ver = lif->qtype_info[q->type].version,
1470 			.index = rte_cpu_to_le_32(q->index),
1471 			.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1472 			.ring_size = rte_log2_u32(q->num_descs),
1473 			.ring_base = rte_cpu_to_le_64(q->base_pa),
1474 		}
1475 	};
1476 
1477 	/* Only enable an interrupt if the device supports them */
1478 	if (lif->adapter->intf->configure_intr != NULL) {
1479 		flags |= IONIC_QINIT_F_IRQ;
1480 		ctx.cmd.q_init.intr_index = rte_cpu_to_le_16(nqcq->intr.index);
1481 	}
1482 	ctx.cmd.q_init.flags = rte_cpu_to_le_16(flags);
1483 
1484 	IONIC_PRINT(DEBUG, "notifyq_init.index %d", q->index);
1485 	IONIC_PRINT(DEBUG, "notifyq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1486 	IONIC_PRINT(DEBUG, "notifyq_init.ring_size %d",
1487 		ctx.cmd.q_init.ring_size);
1488 	IONIC_PRINT(DEBUG, "notifyq_init.ver %u", ctx.cmd.q_init.ver);
1489 
1490 	err = ionic_adminq_post_wait(lif, &ctx);
1491 	if (err)
1492 		return err;
1493 
1494 	q->hw_type = ctx.comp.q_init.hw_type;
1495 	q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
1496 	q->db = NULL;
1497 
1498 	IONIC_PRINT(DEBUG, "notifyq->hw_type %d", q->hw_type);
1499 	IONIC_PRINT(DEBUG, "notifyq->hw_index %d", q->hw_index);
1500 	IONIC_PRINT(DEBUG, "notifyq->db %p", q->db);
1501 
1502 	ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1503 		IONIC_INTR_MASK_CLEAR);
1504 
1505 	nqcq->flags |= IONIC_QCQ_F_INITED;
1506 
1507 	return 0;
1508 }
1509 
1510 int
1511 ionic_lif_set_features(struct ionic_lif *lif)
1512 {
1513 	struct ionic_admin_ctx ctx = {
1514 		.pending_work = true,
1515 		.cmd.lif_setattr = {
1516 			.opcode = IONIC_CMD_LIF_SETATTR,
1517 			.attr = IONIC_LIF_ATTR_FEATURES,
1518 			.features = rte_cpu_to_le_64(lif->features),
1519 		},
1520 	};
1521 	int err;
1522 
1523 	err = ionic_adminq_post_wait(lif, &ctx);
1524 	if (err)
1525 		return err;
1526 
1527 	lif->hw_features = rte_le_to_cpu_64(ctx.cmd.lif_setattr.features &
1528 						ctx.comp.lif_setattr.features);
1529 
1530 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1531 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_TX_TAG");
1532 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1533 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_STRIP");
1534 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1535 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_FILTER");
1536 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1537 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_HASH");
1538 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1539 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_SG");
1540 	if (lif->hw_features & IONIC_ETH_HW_RX_SG)
1541 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_SG");
1542 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1543 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_CSUM");
1544 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1545 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_CSUM");
1546 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1547 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO");
1548 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1549 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPV6");
1550 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1551 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_ECN");
1552 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1553 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE");
1554 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1555 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE_CSUM");
1556 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1557 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP4");
1558 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1559 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP6");
1560 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1561 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP");
1562 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1563 		IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP_CSUM");
1564 
1565 	return 0;
1566 }
1567 
1568 int
1569 ionic_lif_txq_init(struct ionic_tx_qcq *txq)
1570 {
1571 	struct ionic_qcq *qcq = &txq->qcq;
1572 	struct ionic_queue *q = &qcq->q;
1573 	struct ionic_lif *lif = qcq->lif;
1574 	struct ionic_cq *cq = &qcq->cq;
1575 	struct ionic_admin_ctx ctx = {
1576 		.pending_work = true,
1577 		.cmd.q_init = {
1578 			.opcode = IONIC_CMD_Q_INIT,
1579 			.type = q->type,
1580 			.ver = lif->qtype_info[q->type].version,
1581 			.index = rte_cpu_to_le_32(q->index),
1582 			.flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA),
1583 			.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1584 			.ring_size = rte_log2_u32(q->num_descs),
1585 			.ring_base = rte_cpu_to_le_64(q->base_pa),
1586 			.cq_ring_base = rte_cpu_to_le_64(cq->base_pa),
1587 			.sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa),
1588 		},
1589 	};
1590 	int err;
1591 
1592 	if (txq->flags & IONIC_QCQ_F_SG)
1593 		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG);
1594 	if (txq->flags & IONIC_QCQ_F_CMB)
1595 		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);
1596 
1597 	IONIC_PRINT(DEBUG, "txq_init.index %d", q->index);
1598 	IONIC_PRINT(DEBUG, "txq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1599 	IONIC_PRINT(DEBUG, "txq_init.ring_size %d",
1600 		ctx.cmd.q_init.ring_size);
1601 	IONIC_PRINT(DEBUG, "txq_init.ver %u", ctx.cmd.q_init.ver);
1602 
1603 	ionic_q_reset(q);
1604 	ionic_cq_reset(cq);
1605 
1606 	err = ionic_adminq_post_wait(lif, &ctx);
1607 	if (err)
1608 		return err;
1609 
1610 	q->hw_type = ctx.comp.q_init.hw_type;
1611 	q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
1612 	q->db = ionic_db_map(lif, q);
1613 
1614 	IONIC_PRINT(DEBUG, "txq->hw_type %d", q->hw_type);
1615 	IONIC_PRINT(DEBUG, "txq->hw_index %d", q->hw_index);
1616 	IONIC_PRINT(DEBUG, "txq->db %p", q->db);
1617 
1618 	txq->flags |= IONIC_QCQ_F_INITED;
1619 
1620 	return 0;
1621 }
1622 
1623 int
1624 ionic_lif_rxq_init(struct ionic_rx_qcq *rxq)
1625 {
1626 	struct ionic_qcq *qcq = &rxq->qcq;
1627 	struct ionic_queue *q = &qcq->q;
1628 	struct ionic_lif *lif = qcq->lif;
1629 	struct ionic_cq *cq = &qcq->cq;
1630 	struct ionic_admin_ctx ctx = {
1631 		.pending_work = true,
1632 		.cmd.q_init = {
1633 			.opcode = IONIC_CMD_Q_INIT,
1634 			.type = q->type,
1635 			.ver = lif->qtype_info[q->type].version,
1636 			.index = rte_cpu_to_le_32(q->index),
1637 			.flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA),
1638 			.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1639 			.ring_size = rte_log2_u32(q->num_descs),
1640 			.ring_base = rte_cpu_to_le_64(q->base_pa),
1641 			.cq_ring_base = rte_cpu_to_le_64(cq->base_pa),
1642 			.sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa),
1643 		},
1644 	};
1645 	int err;
1646 
1647 	if (rxq->flags & IONIC_QCQ_F_SG)
1648 		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG);
1649 	if (rxq->flags & IONIC_QCQ_F_CMB)
1650 		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);
1651 
1652 	IONIC_PRINT(DEBUG, "rxq_init.index %d", q->index);
1653 	IONIC_PRINT(DEBUG, "rxq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1654 	IONIC_PRINT(DEBUG, "rxq_init.ring_size %d",
1655 		ctx.cmd.q_init.ring_size);
1656 	IONIC_PRINT(DEBUG, "rxq_init.ver %u", ctx.cmd.q_init.ver);
1657 
1658 	ionic_q_reset(q);
1659 	ionic_cq_reset(cq);
1660 
1661 	err = ionic_adminq_post_wait(lif, &ctx);
1662 	if (err)
1663 		return err;
1664 
1665 	q->hw_type = ctx.comp.q_init.hw_type;
1666 	q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
1667 	q->db = ionic_db_map(lif, q);
1668 
1669 	rxq->flags |= IONIC_QCQ_F_INITED;
1670 
1671 	IONIC_PRINT(DEBUG, "rxq->hw_type %d", q->hw_type);
1672 	IONIC_PRINT(DEBUG, "rxq->hw_index %d", q->hw_index);
1673 	IONIC_PRINT(DEBUG, "rxq->db %p", q->db);
1674 
1675 	return 0;
1676 }
1677 
1678 static int
1679 ionic_station_set(struct ionic_lif *lif)
1680 {
1681 	struct ionic_admin_ctx ctx = {
1682 		.pending_work = true,
1683 		.cmd.lif_getattr = {
1684 			.opcode = IONIC_CMD_LIF_GETATTR,
1685 			.attr = IONIC_LIF_ATTR_MAC,
1686 		},
1687 	};
1688 	int err;
1689 
1690 	IONIC_PRINT_CALL();
1691 
1692 	err = ionic_adminq_post_wait(lif, &ctx);
1693 	if (err)
1694 		return err;
1695 
1696 	memcpy(lif->mac_addr, ctx.comp.lif_getattr.mac, RTE_ETHER_ADDR_LEN);
1697 
1698 	return 0;
1699 }
1700 
1701 static void
1702 ionic_lif_set_name(struct ionic_lif *lif)
1703 {
1704 	struct ionic_admin_ctx ctx = {
1705 		.pending_work = true,
1706 		.cmd.lif_setattr = {
1707 			.opcode = IONIC_CMD_LIF_SETATTR,
1708 			.attr = IONIC_LIF_ATTR_NAME,
1709 		},
1710 	};
1711 
1712 	memcpy(ctx.cmd.lif_setattr.name, lif->name,
1713 		sizeof(ctx.cmd.lif_setattr.name) - 1);
1714 
1715 	ionic_adminq_post_wait(lif, &ctx);
1716 }
1717 
1718 int
1719 ionic_lif_init(struct ionic_lif *lif)
1720 {
1721 	struct ionic_dev *idev = &lif->adapter->idev;
1722 	struct ionic_lif_init_comp comp;
1723 	uint32_t retries = 5;
1724 	int err;
1725 
1726 	memset(&lif->stats_base, 0, sizeof(lif->stats_base));
1727 
1728 retry_lif_init:
1729 	ionic_dev_cmd_lif_init(idev, lif->info_pa);
1730 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
1731 	if (err == -EAGAIN && retries > 0) {
1732 		retries--;
1733 		rte_delay_us_block(IONIC_DEVCMD_RETRY_WAIT_US);
1734 		goto retry_lif_init;
1735 	}
1736 	if (err)
1737 		return err;
1738 
1739 	ionic_dev_cmd_comp(idev, &comp);
1740 
1741 	lif->hw_index = rte_cpu_to_le_16(comp.hw_index);
1742 
1743 	err = ionic_lif_adminq_init(lif);
1744 	if (err)
1745 		return err;
1746 
1747 	err = ionic_lif_notifyq_init(lif);
1748 	if (err)
1749 		goto err_out_adminq_deinit;
1750 
1751 	/*
1752 	 * Configure initial feature set
1753 	 * This will be updated later by the dev_configure() step
1754 	 */
1755 	lif->features = IONIC_ETH_HW_RX_HASH | IONIC_ETH_HW_VLAN_RX_FILTER;
1756 
1757 	err = ionic_lif_set_features(lif);
1758 	if (err)
1759 		goto err_out_notifyq_deinit;
1760 
1761 	err = ionic_rx_filters_init(lif);
1762 	if (err)
1763 		goto err_out_notifyq_deinit;
1764 
1765 	err = ionic_station_set(lif);
1766 	if (err)
1767 		goto err_out_rx_filter_deinit;
1768 
1769 	ionic_lif_set_name(lif);
1770 
1771 	lif->state |= IONIC_LIF_F_INITED;
1772 
1773 	return 0;
1774 
1775 err_out_rx_filter_deinit:
1776 	ionic_rx_filters_deinit(lif);
1777 
1778 err_out_notifyq_deinit:
1779 	ionic_lif_notifyq_deinit(lif);
1780 
1781 err_out_adminq_deinit:
1782 	ionic_lif_adminq_deinit(lif);
1783 
1784 	return err;
1785 }
1786 
1787 void
1788 ionic_lif_deinit(struct ionic_lif *lif)
1789 {
1790 	if (!(lif->state & IONIC_LIF_F_INITED))
1791 		return;
1792 
1793 	ionic_rx_filters_deinit(lif);
1794 	ionic_lif_rss_teardown(lif);
1795 	ionic_lif_notifyq_deinit(lif);
1796 	ionic_lif_adminq_deinit(lif);
1797 
1798 	lif->state &= ~IONIC_LIF_F_INITED;
1799 }
1800 
1801 void
1802 ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask)
1803 {
1804 	struct rte_eth_dev *eth_dev = lif->eth_dev;
1805 	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1806 
1807 	/*
1808 	 * IONIC_ETH_HW_VLAN_RX_FILTER cannot be turned off, so
1809 	 * set RTE_ETH_RX_OFFLOAD_VLAN_FILTER and ignore RTE_ETH_VLAN_FILTER_MASK
1810 	 */
1811 	rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
1812 
1813 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1814 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1815 			lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP;
1816 		else
1817 			lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP;
1818 	}
1819 }
1820 
1821 void
1822 ionic_lif_configure_rx_sg_offload(struct ionic_lif *lif)
1823 {
1824 	struct rte_eth_rxmode *rxmode = &lif->eth_dev->data->dev_conf.rxmode;
1825 
1826 	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
1827 		lif->features |= IONIC_ETH_HW_RX_SG;
1828 		lif->eth_dev->data->scattered_rx = 1;
1829 	} else {
1830 		lif->features &= ~IONIC_ETH_HW_RX_SG;
1831 		lif->eth_dev->data->scattered_rx = 0;
1832 	}
1833 }
1834 
1835 void
1836 ionic_lif_configure(struct ionic_lif *lif)
1837 {
1838 	struct rte_eth_rxmode *rxmode = &lif->eth_dev->data->dev_conf.rxmode;
1839 	struct rte_eth_txmode *txmode = &lif->eth_dev->data->dev_conf.txmode;
1840 	struct ionic_identity *ident = &lif->adapter->ident;
1841 	union ionic_lif_config *cfg = &ident->lif.eth.config;
1842 	uint32_t ntxqs_per_lif =
1843 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]);
1844 	uint32_t nrxqs_per_lif =
1845 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]);
1846 	uint32_t nrxqs = lif->eth_dev->data->nb_rx_queues;
1847 	uint32_t ntxqs = lif->eth_dev->data->nb_tx_queues;
1848 
1849 	lif->port_id = lif->eth_dev->data->port_id;
1850 
1851 	IONIC_PRINT(DEBUG, "Configuring LIF on port %u",
1852 		lif->port_id);
1853 
1854 	if (nrxqs > 0)
1855 		nrxqs_per_lif = RTE_MIN(nrxqs_per_lif, nrxqs);
1856 
1857 	if (ntxqs > 0)
1858 		ntxqs_per_lif = RTE_MIN(ntxqs_per_lif, ntxqs);
1859 
1860 	lif->nrxqcqs = nrxqs_per_lif;
1861 	lif->ntxqcqs = ntxqs_per_lif;
1862 
1863 	/* Update the LIF configuration based on the eth_dev */
1864 
1865 	/*
1866 	 * NB: While it is true that RSS_HASH is always enabled on ionic,
1867 	 *     setting this flag unconditionally causes problems in DTS.
1868 	 * rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1869 	 */
1870 
1871 	/* RX per-port */
1872 
1873 	if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM ||
1874 	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM ||
1875 	    rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1876 		lif->features |= IONIC_ETH_HW_RX_CSUM;
1877 	else
1878 		lif->features &= ~IONIC_ETH_HW_RX_CSUM;
1879 
1880 	/*
1881 	 * NB: RX_SG may be enabled later during rx_queue_setup() if
1882 	 * required by the mbuf/mtu configuration
1883 	 */
1884 	ionic_lif_configure_rx_sg_offload(lif);
1885 
1886 	/* Covers VLAN_STRIP */
1887 	ionic_lif_configure_vlan_offload(lif, RTE_ETH_VLAN_STRIP_MASK);
1888 
1889 	/* TX per-port */
1890 
1891 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
1892 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
1893 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
1894 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
1895 	    txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
1896 		lif->features |= IONIC_ETH_HW_TX_CSUM;
1897 	else
1898 		lif->features &= ~IONIC_ETH_HW_TX_CSUM;
1899 
1900 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
1901 		lif->features |= IONIC_ETH_HW_VLAN_TX_TAG;
1902 	else
1903 		lif->features &= ~IONIC_ETH_HW_VLAN_TX_TAG;
1904 
1905 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
1906 		lif->features |= IONIC_ETH_HW_TX_SG;
1907 	else
1908 		lif->features &= ~IONIC_ETH_HW_TX_SG;
1909 
1910 	if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
1911 		lif->features |= IONIC_ETH_HW_TSO;
1912 		lif->features |= IONIC_ETH_HW_TSO_IPV6;
1913 		lif->features |= IONIC_ETH_HW_TSO_ECN;
1914 	} else {
1915 		lif->features &= ~IONIC_ETH_HW_TSO;
1916 		lif->features &= ~IONIC_ETH_HW_TSO_IPV6;
1917 		lif->features &= ~IONIC_ETH_HW_TSO_ECN;
1918 	}
1919 }
1920 
1921 int
1922 ionic_lif_start(struct ionic_lif *lif)
1923 {
1924 	uint32_t rx_mode;
1925 	uint32_t i;
1926 	int err;
1927 
1928 	err = ionic_lif_rss_setup(lif);
1929 	if (err)
1930 		return err;
1931 
1932 	if (!lif->rx_mode) {
1933 		IONIC_PRINT(DEBUG, "Setting RX mode on %s",
1934 			lif->name);
1935 
1936 		rx_mode  = IONIC_RX_MODE_F_UNICAST;
1937 		rx_mode |= IONIC_RX_MODE_F_MULTICAST;
1938 		rx_mode |= IONIC_RX_MODE_F_BROADCAST;
1939 
1940 		ionic_set_rx_mode(lif, rx_mode);
1941 	}
1942 
1943 	IONIC_PRINT(DEBUG, "Starting %u RX queues and %u TX queues "
1944 		"on port %u",
1945 		lif->nrxqcqs, lif->ntxqcqs, lif->port_id);
1946 
1947 	for (i = 0; i < lif->nrxqcqs; i++) {
1948 		struct ionic_rx_qcq *rxq = lif->rxqcqs[i];
1949 		if (!(rxq->flags & IONIC_QCQ_F_DEFERRED)) {
1950 			err = ionic_dev_rx_queue_start(lif->eth_dev, i);
1951 
1952 			if (err)
1953 				return err;
1954 		}
1955 	}
1956 
1957 	for (i = 0; i < lif->ntxqcqs; i++) {
1958 		struct ionic_tx_qcq *txq = lif->txqcqs[i];
1959 		if (!(txq->flags & IONIC_QCQ_F_DEFERRED)) {
1960 			err = ionic_dev_tx_queue_start(lif->eth_dev, i);
1961 
1962 			if (err)
1963 				return err;
1964 		}
1965 	}
1966 
1967 	/* Carrier ON here */
1968 	lif->state |= IONIC_LIF_F_UP;
1969 
1970 	ionic_link_status_check(lif);
1971 
1972 	return 0;
1973 }
1974 
1975 int
1976 ionic_lif_identify(struct ionic_adapter *adapter)
1977 {
1978 	struct ionic_dev *idev = &adapter->idev;
1979 	struct ionic_identity *ident = &adapter->ident;
1980 	union ionic_lif_config *cfg = &ident->lif.eth.config;
1981 	uint32_t lif_words = RTE_DIM(ident->lif.words);
1982 	uint32_t cmd_words = RTE_DIM(idev->dev_cmd->data);
1983 	uint32_t i, nwords;
1984 	int err;
1985 
1986 	ionic_dev_cmd_lif_identify(idev, IONIC_LIF_TYPE_CLASSIC,
1987 		IONIC_IDENTITY_VERSION_1);
1988 	err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
1989 	if (err)
1990 		return (err);
1991 
1992 	nwords = RTE_MIN(lif_words, cmd_words);
1993 	for (i = 0; i < nwords; i++)
1994 		ident->lif.words[i] = ioread32(&idev->dev_cmd->data[i]);
1995 
1996 	IONIC_PRINT(INFO, "capabilities 0x%" PRIx64 " ",
1997 		rte_le_to_cpu_64(ident->lif.capabilities));
1998 
1999 	IONIC_PRINT(INFO, "eth.max_ucast_filters 0x%" PRIx32 " ",
2000 		rte_le_to_cpu_32(ident->lif.eth.max_ucast_filters));
2001 	IONIC_PRINT(INFO, "eth.max_mcast_filters 0x%" PRIx32 " ",
2002 		rte_le_to_cpu_32(ident->lif.eth.max_mcast_filters));
2003 
2004 	IONIC_PRINT(INFO, "eth.features 0x%" PRIx64 " ",
2005 		rte_le_to_cpu_64(cfg->features));
2006 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_ADMINQ] 0x%" PRIx32 " ",
2007 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_ADMINQ]));
2008 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] 0x%" PRIx32 " ",
2009 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_NOTIFYQ]));
2010 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_RXQ] 0x%" PRIx32 " ",
2011 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]));
2012 	IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_TXQ] 0x%" PRIx32 " ",
2013 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]));
2014 
2015 	return 0;
2016 }
2017 
2018 int
2019 ionic_lifs_size(struct ionic_adapter *adapter)
2020 {
2021 	struct ionic_identity *ident = &adapter->ident;
2022 	union ionic_lif_config *cfg = &ident->lif.eth.config;
2023 	uint32_t nintrs, dev_nintrs = rte_le_to_cpu_32(ident->dev.nintrs);
2024 
2025 	adapter->max_ntxqs_per_lif =
2026 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]);
2027 	adapter->max_nrxqs_per_lif =
2028 		rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]);
2029 
2030 	nintrs = 1 /* notifyq */;
2031 
2032 	if (nintrs > dev_nintrs) {
2033 		IONIC_PRINT(ERR,
2034 			"At most %d intr supported, minimum req'd is %u",
2035 			dev_nintrs, nintrs);
2036 		return -ENOSPC;
2037 	}
2038 
2039 	adapter->nintrs = nintrs;
2040 
2041 	return 0;
2042 }
2043