1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2022 Advanced Micro Devices, Inc.
3 */
4
5 #include <rte_malloc.h>
6 #include <ethdev_driver.h>
7
8 #include "ionic.h"
9 #include "ionic_logs.h"
10 #include "ionic_lif.h"
11 #include "ionic_ethdev.h"
12 #include "ionic_rx_filter.h"
13 #include "ionic_rxtx.h"
14
15 /* queuetype support level */
16 static const uint8_t ionic_qtype_vers[IONIC_QTYPE_MAX] = {
17 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
18 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
19 [IONIC_QTYPE_RXQ] = 2, /* 0 = Base version with CQ+SG support
20 * 1 = ... with EQ
21 * 2 = ... with CMB
22 */
23 [IONIC_QTYPE_TXQ] = 3, /* 0 = Base version with CQ+SG support
24 * 1 = ... with Tx SG version 1
25 * 2 = ... with EQ
26 * 3 = ... with CMB
27 */
28 };
29
30 static int ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr);
31 static int ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr);
32
33 static int
ionic_qcq_disable_nowait(struct ionic_qcq * qcq,struct ionic_admin_ctx * ctx)34 ionic_qcq_disable_nowait(struct ionic_qcq *qcq,
35 struct ionic_admin_ctx *ctx)
36 {
37 int err;
38
39 struct ionic_queue *q = &qcq->q;
40 struct ionic_lif *lif = qcq->lif;
41
42 *ctx = (struct ionic_admin_ctx) {
43 .pending_work = true,
44 .cmd.q_control = {
45 .opcode = IONIC_CMD_Q_CONTROL,
46 .type = q->type,
47 .index = rte_cpu_to_le_32(q->index),
48 .oper = IONIC_Q_DISABLE,
49 },
50 };
51
52 /* Does not wait for command completion */
53 err = ionic_adminq_post(lif, ctx);
54 if (err)
55 ctx->pending_work = false;
56 return err;
57 }
58
59 void
ionic_lif_stop(struct ionic_lif * lif)60 ionic_lif_stop(struct ionic_lif *lif)
61 {
62 struct rte_eth_dev *dev = lif->eth_dev;
63 uint32_t i, j, chunk;
64
65 IONIC_PRINT_CALL();
66
67 lif->state &= ~IONIC_LIF_F_UP;
68
69 chunk = ionic_adminq_space_avail(lif);
70
71 for (i = 0; i < lif->nrxqcqs; i += chunk) {
72 for (j = 0; j < chunk && i + j < lif->nrxqcqs; j++)
73 ionic_dev_rx_queue_stop_firsthalf(dev, i + j);
74
75 for (j = 0; j < chunk && i + j < lif->nrxqcqs; j++)
76 ionic_dev_rx_queue_stop_secondhalf(dev, i + j);
77 }
78
79 for (i = 0; i < lif->ntxqcqs; i += chunk) {
80 for (j = 0; j < chunk && i + j < lif->ntxqcqs; j++)
81 ionic_dev_tx_queue_stop_firsthalf(dev, i + j);
82
83 for (j = 0; j < chunk && i + j < lif->ntxqcqs; j++)
84 ionic_dev_tx_queue_stop_secondhalf(dev, i + j);
85 }
86 }
87
88 void
ionic_lif_reset(struct ionic_lif * lif)89 ionic_lif_reset(struct ionic_lif *lif)
90 {
91 struct ionic_dev *idev = &lif->adapter->idev;
92 int err;
93
94 IONIC_PRINT_CALL();
95
96 ionic_dev_cmd_lif_reset(idev);
97 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
98 if (err)
99 IONIC_PRINT(WARNING, "Failed to reset %s", lif->name);
100 }
101
102 static void
ionic_lif_get_abs_stats(const struct ionic_lif * lif,struct rte_eth_stats * stats)103 ionic_lif_get_abs_stats(const struct ionic_lif *lif, struct rte_eth_stats *stats)
104 {
105 struct ionic_lif_stats *ls = &lif->info->stats;
106 uint32_t i;
107 uint32_t num_rx_q_counters = RTE_MIN(lif->nrxqcqs, (uint32_t)
108 RTE_ETHDEV_QUEUE_STAT_CNTRS);
109 uint32_t num_tx_q_counters = RTE_MIN(lif->ntxqcqs, (uint32_t)
110 RTE_ETHDEV_QUEUE_STAT_CNTRS);
111
112 memset(stats, 0, sizeof(*stats));
113
114 if (ls == NULL) {
115 IONIC_PRINT(DEBUG, "Stats on port %u not yet initialized",
116 lif->port_id);
117 return;
118 }
119
120 /* RX */
121
122 stats->ipackets = ls->rx_ucast_packets +
123 ls->rx_mcast_packets +
124 ls->rx_bcast_packets;
125
126 stats->ibytes = ls->rx_ucast_bytes +
127 ls->rx_mcast_bytes +
128 ls->rx_bcast_bytes;
129
130 for (i = 0; i < lif->nrxqcqs; i++) {
131 struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats;
132 stats->ierrors +=
133 rx_stats->bad_cq_status +
134 rx_stats->bad_len;
135 }
136
137 stats->imissed +=
138 ls->rx_ucast_drop_packets +
139 ls->rx_mcast_drop_packets +
140 ls->rx_bcast_drop_packets;
141
142 stats->ierrors +=
143 ls->rx_dma_error +
144 ls->rx_desc_fetch_error +
145 ls->rx_desc_data_error;
146
147 for (i = 0; i < num_rx_q_counters; i++) {
148 struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats;
149 stats->q_ipackets[i] = rx_stats->packets;
150 stats->q_ibytes[i] = rx_stats->bytes;
151 stats->q_errors[i] =
152 rx_stats->bad_cq_status +
153 rx_stats->bad_len;
154 }
155
156 /* TX */
157
158 stats->opackets = ls->tx_ucast_packets +
159 ls->tx_mcast_packets +
160 ls->tx_bcast_packets;
161
162 stats->obytes = ls->tx_ucast_bytes +
163 ls->tx_mcast_bytes +
164 ls->tx_bcast_bytes;
165
166 for (i = 0; i < lif->ntxqcqs; i++) {
167 struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats;
168 stats->oerrors += tx_stats->drop;
169 }
170
171 stats->oerrors +=
172 ls->tx_ucast_drop_packets +
173 ls->tx_mcast_drop_packets +
174 ls->tx_bcast_drop_packets;
175
176 stats->oerrors +=
177 ls->tx_dma_error +
178 ls->tx_queue_disabled +
179 ls->tx_desc_fetch_error +
180 ls->tx_desc_data_error;
181
182 for (i = 0; i < num_tx_q_counters; i++) {
183 struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats;
184 stats->q_opackets[i] = tx_stats->packets;
185 stats->q_obytes[i] = tx_stats->bytes;
186 }
187 }
188
189 void
ionic_lif_get_stats(const struct ionic_lif * lif,struct rte_eth_stats * stats)190 ionic_lif_get_stats(const struct ionic_lif *lif,
191 struct rte_eth_stats *stats)
192 {
193 ionic_lif_get_abs_stats(lif, stats);
194
195 stats->ipackets -= lif->stats_base.ipackets;
196 stats->opackets -= lif->stats_base.opackets;
197 stats->ibytes -= lif->stats_base.ibytes;
198 stats->obytes -= lif->stats_base.obytes;
199 stats->imissed -= lif->stats_base.imissed;
200 stats->ierrors -= lif->stats_base.ierrors;
201 stats->oerrors -= lif->stats_base.oerrors;
202 stats->rx_nombuf -= lif->stats_base.rx_nombuf;
203 }
204
205 void
ionic_lif_reset_stats(struct ionic_lif * lif)206 ionic_lif_reset_stats(struct ionic_lif *lif)
207 {
208 uint32_t i;
209
210 for (i = 0; i < lif->nrxqcqs; i++) {
211 memset(&lif->rxqcqs[i]->stats, 0,
212 sizeof(struct ionic_rx_stats));
213 memset(&lif->txqcqs[i]->stats, 0,
214 sizeof(struct ionic_tx_stats));
215 }
216
217 ionic_lif_get_abs_stats(lif, &lif->stats_base);
218 }
219
220 void
ionic_lif_get_hw_stats(struct ionic_lif * lif,struct ionic_lif_stats * stats)221 ionic_lif_get_hw_stats(struct ionic_lif *lif, struct ionic_lif_stats *stats)
222 {
223 uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t);
224 uint64_t *stats64 = (uint64_t *)stats;
225 uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats;
226 uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base;
227
228 for (i = 0; i < count; i++)
229 stats64[i] = lif_stats64[i] - lif_stats64_base[i];
230 }
231
232 void
ionic_lif_reset_hw_stats(struct ionic_lif * lif)233 ionic_lif_reset_hw_stats(struct ionic_lif *lif)
234 {
235 uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t);
236 uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats;
237 uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base;
238
239 for (i = 0; i < count; i++)
240 lif_stats64_base[i] = lif_stats64[i];
241 }
242
243 static int
ionic_lif_addr_add(struct ionic_lif * lif,const uint8_t * addr)244 ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr)
245 {
246 struct ionic_admin_ctx ctx = {
247 .pending_work = true,
248 .cmd.rx_filter_add = {
249 .opcode = IONIC_CMD_RX_FILTER_ADD,
250 .match = rte_cpu_to_le_16(IONIC_RX_FILTER_MATCH_MAC),
251 },
252 };
253 int err;
254
255 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, RTE_ETHER_ADDR_LEN);
256
257 err = ionic_adminq_post_wait(lif, &ctx);
258 if (err)
259 return err;
260
261 IONIC_PRINT(INFO, "rx_filter add (id %d)",
262 rte_le_to_cpu_32(ctx.comp.rx_filter_add.filter_id));
263
264 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx);
265 }
266
267 static int
ionic_lif_addr_del(struct ionic_lif * lif,const uint8_t * addr)268 ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr)
269 {
270 struct ionic_admin_ctx ctx = {
271 .pending_work = true,
272 .cmd.rx_filter_del = {
273 .opcode = IONIC_CMD_RX_FILTER_DEL,
274 },
275 };
276 struct ionic_rx_filter *f;
277 int err;
278
279 IONIC_PRINT_CALL();
280
281 rte_spinlock_lock(&lif->rx_filters.lock);
282
283 f = ionic_rx_filter_by_addr(lif, addr);
284 if (!f) {
285 rte_spinlock_unlock(&lif->rx_filters.lock);
286 return -ENOENT;
287 }
288
289 ctx.cmd.rx_filter_del.filter_id = rte_cpu_to_le_32(f->filter_id);
290 ionic_rx_filter_free(f);
291
292 rte_spinlock_unlock(&lif->rx_filters.lock);
293
294 err = ionic_adminq_post_wait(lif, &ctx);
295 if (err)
296 return err;
297
298 IONIC_PRINT(INFO, "rx_filter del (id %d)",
299 rte_le_to_cpu_32(ctx.cmd.rx_filter_del.filter_id));
300
301 return 0;
302 }
303
304 int
ionic_dev_add_mac(struct rte_eth_dev * eth_dev,struct rte_ether_addr * mac_addr,uint32_t index __rte_unused,uint32_t pool __rte_unused)305 ionic_dev_add_mac(struct rte_eth_dev *eth_dev,
306 struct rte_ether_addr *mac_addr,
307 uint32_t index __rte_unused, uint32_t pool __rte_unused)
308 {
309 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
310
311 IONIC_PRINT_CALL();
312
313 return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr);
314 }
315
316 void
ionic_dev_remove_mac(struct rte_eth_dev * eth_dev,uint32_t index)317 ionic_dev_remove_mac(struct rte_eth_dev *eth_dev, uint32_t index)
318 {
319 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
320 struct ionic_adapter *adapter = lif->adapter;
321 struct rte_ether_addr *mac_addr;
322
323 IONIC_PRINT_CALL();
324
325 if (index >= adapter->max_mac_addrs) {
326 IONIC_PRINT(WARNING,
327 "Index %u is above MAC filter limit %u",
328 index, adapter->max_mac_addrs);
329 return;
330 }
331
332 mac_addr = ð_dev->data->mac_addrs[index];
333
334 if (!rte_is_valid_assigned_ether_addr(mac_addr))
335 return;
336
337 ionic_lif_addr_del(lif, (const uint8_t *)mac_addr);
338 }
339
340 int
ionic_dev_set_mac(struct rte_eth_dev * eth_dev,struct rte_ether_addr * mac_addr)341 ionic_dev_set_mac(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr)
342 {
343 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
344
345 IONIC_PRINT_CALL();
346
347 if (mac_addr == NULL) {
348 IONIC_PRINT(NOTICE, "New mac is null");
349 return -1;
350 }
351
352 if (!rte_is_zero_ether_addr((struct rte_ether_addr *)lif->mac_addr)) {
353 IONIC_PRINT(INFO, "Deleting mac addr %pM",
354 lif->mac_addr);
355 ionic_lif_addr_del(lif, lif->mac_addr);
356 memset(lif->mac_addr, 0, RTE_ETHER_ADDR_LEN);
357 }
358
359 IONIC_PRINT(INFO, "Updating mac addr");
360
361 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)lif->mac_addr);
362
363 return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr);
364 }
365
366 static int
ionic_vlan_rx_add_vid(struct ionic_lif * lif,uint16_t vid)367 ionic_vlan_rx_add_vid(struct ionic_lif *lif, uint16_t vid)
368 {
369 struct ionic_admin_ctx ctx = {
370 .pending_work = true,
371 .cmd.rx_filter_add = {
372 .opcode = IONIC_CMD_RX_FILTER_ADD,
373 .match = rte_cpu_to_le_16(IONIC_RX_FILTER_MATCH_VLAN),
374 .vlan.vlan = rte_cpu_to_le_16(vid),
375 },
376 };
377 int err;
378
379 err = ionic_adminq_post_wait(lif, &ctx);
380 if (err)
381 return err;
382
383 IONIC_PRINT(INFO, "rx_filter add VLAN %d (id %d)", vid,
384 rte_le_to_cpu_32(ctx.comp.rx_filter_add.filter_id));
385
386 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx);
387 }
388
389 static int
ionic_vlan_rx_kill_vid(struct ionic_lif * lif,uint16_t vid)390 ionic_vlan_rx_kill_vid(struct ionic_lif *lif, uint16_t vid)
391 {
392 struct ionic_admin_ctx ctx = {
393 .pending_work = true,
394 .cmd.rx_filter_del = {
395 .opcode = IONIC_CMD_RX_FILTER_DEL,
396 },
397 };
398 struct ionic_rx_filter *f;
399 int err;
400
401 IONIC_PRINT_CALL();
402
403 rte_spinlock_lock(&lif->rx_filters.lock);
404
405 f = ionic_rx_filter_by_vlan(lif, vid);
406 if (!f) {
407 rte_spinlock_unlock(&lif->rx_filters.lock);
408 return -ENOENT;
409 }
410
411 ctx.cmd.rx_filter_del.filter_id = rte_cpu_to_le_32(f->filter_id);
412 ionic_rx_filter_free(f);
413 rte_spinlock_unlock(&lif->rx_filters.lock);
414
415 err = ionic_adminq_post_wait(lif, &ctx);
416 if (err)
417 return err;
418
419 IONIC_PRINT(INFO, "rx_filter del VLAN %d (id %d)", vid,
420 rte_le_to_cpu_32(ctx.cmd.rx_filter_del.filter_id));
421
422 return 0;
423 }
424
425 int
ionic_dev_vlan_filter_set(struct rte_eth_dev * eth_dev,uint16_t vlan_id,int on)426 ionic_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id,
427 int on)
428 {
429 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
430 int err;
431
432 if (on)
433 err = ionic_vlan_rx_add_vid(lif, vlan_id);
434 else
435 err = ionic_vlan_rx_kill_vid(lif, vlan_id);
436
437 return err;
438 }
439
440 static void
ionic_lif_rx_mode(struct ionic_lif * lif,uint32_t rx_mode)441 ionic_lif_rx_mode(struct ionic_lif *lif, uint32_t rx_mode)
442 {
443 struct ionic_admin_ctx ctx = {
444 .pending_work = true,
445 .cmd.rx_mode_set = {
446 .opcode = IONIC_CMD_RX_MODE_SET,
447 .rx_mode = rte_cpu_to_le_16(rx_mode),
448 },
449 };
450 int err;
451
452 if (rx_mode & IONIC_RX_MODE_F_UNICAST)
453 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_UNICAST");
454 if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
455 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_MULTICAST");
456 if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
457 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_BROADCAST");
458 if (rx_mode & IONIC_RX_MODE_F_PROMISC)
459 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_PROMISC");
460 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
461 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_ALLMULTI");
462
463 err = ionic_adminq_post_wait(lif, &ctx);
464 if (err)
465 IONIC_PRINT(ERR, "Failure setting RX mode");
466 }
467
468 static void
ionic_set_rx_mode(struct ionic_lif * lif,uint32_t rx_mode)469 ionic_set_rx_mode(struct ionic_lif *lif, uint32_t rx_mode)
470 {
471 if (lif->rx_mode != rx_mode) {
472 lif->rx_mode = rx_mode;
473 ionic_lif_rx_mode(lif, rx_mode);
474 }
475 }
476
477 int
ionic_dev_promiscuous_enable(struct rte_eth_dev * eth_dev)478 ionic_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
479 {
480 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
481 uint32_t rx_mode = lif->rx_mode;
482
483 IONIC_PRINT_CALL();
484
485 rx_mode |= IONIC_RX_MODE_F_PROMISC;
486
487 ionic_set_rx_mode(lif, rx_mode);
488
489 return 0;
490 }
491
492 int
ionic_dev_promiscuous_disable(struct rte_eth_dev * eth_dev)493 ionic_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
494 {
495 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
496 uint32_t rx_mode = lif->rx_mode;
497
498 rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
499
500 ionic_set_rx_mode(lif, rx_mode);
501
502 return 0;
503 }
504
505 int
ionic_dev_allmulticast_enable(struct rte_eth_dev * eth_dev)506 ionic_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
507 {
508 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
509 uint32_t rx_mode = lif->rx_mode;
510
511 rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
512
513 ionic_set_rx_mode(lif, rx_mode);
514
515 return 0;
516 }
517
518 int
ionic_dev_allmulticast_disable(struct rte_eth_dev * eth_dev)519 ionic_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
520 {
521 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
522 uint32_t rx_mode = lif->rx_mode;
523
524 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
525
526 ionic_set_rx_mode(lif, rx_mode);
527
528 return 0;
529 }
530
531 int
ionic_lif_change_mtu(struct ionic_lif * lif,uint32_t new_mtu)532 ionic_lif_change_mtu(struct ionic_lif *lif, uint32_t new_mtu)
533 {
534 struct ionic_admin_ctx ctx = {
535 .pending_work = true,
536 .cmd.lif_setattr = {
537 .opcode = IONIC_CMD_LIF_SETATTR,
538 .attr = IONIC_LIF_ATTR_MTU,
539 .mtu = rte_cpu_to_le_32(new_mtu),
540 },
541 };
542
543 /* Not needed for embedded applications */
544 if (ionic_is_embedded())
545 return 0;
546
547 return ionic_adminq_post_wait(lif, &ctx);
548 }
549
550 int
ionic_intr_alloc(struct ionic_lif * lif,struct ionic_intr_info * intr)551 ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
552 {
553 struct ionic_adapter *adapter = lif->adapter;
554 struct ionic_dev *idev = &adapter->idev;
555 unsigned long index;
556
557 /*
558 * Note: interrupt handler is called for index = 0 only
559 * (we use interrupts for the notifyq only anyway,
560 * which has index = 0)
561 */
562
563 for (index = 0; index < adapter->nintrs; index++)
564 if (!adapter->intrs[index])
565 break;
566
567 if (index == adapter->nintrs)
568 return -ENOSPC;
569
570 adapter->intrs[index] = true;
571
572 ionic_intr_init(idev, intr, index);
573
574 return 0;
575 }
576
577 static int
ionic_qcq_alloc(struct ionic_lif * lif,uint8_t type,size_t struct_size,uint32_t socket_id,uint32_t index,const char * type_name,uint16_t flags,uint16_t num_descs,uint16_t num_segs,uint16_t desc_size,uint16_t cq_desc_size,uint16_t sg_desc_size,struct ionic_qcq ** qcq)578 ionic_qcq_alloc(struct ionic_lif *lif,
579 uint8_t type,
580 size_t struct_size,
581 uint32_t socket_id,
582 uint32_t index,
583 const char *type_name,
584 uint16_t flags,
585 uint16_t num_descs,
586 uint16_t num_segs,
587 uint16_t desc_size,
588 uint16_t cq_desc_size,
589 uint16_t sg_desc_size,
590 struct ionic_qcq **qcq)
591 {
592 struct ionic_qcq *new;
593 uint32_t q_size, cq_size, sg_size, total_size;
594 void *q_base, *cmb_q_base, *cq_base, *sg_base;
595 rte_iova_t q_base_pa = 0;
596 rte_iova_t cq_base_pa = 0;
597 rte_iova_t sg_base_pa = 0;
598 rte_iova_t cmb_q_base_pa = 0;
599 size_t page_size = rte_mem_page_size();
600 int err;
601
602 *qcq = NULL;
603
604 q_size = num_descs * desc_size;
605 cq_size = num_descs * cq_desc_size;
606 sg_size = num_descs * sg_desc_size;
607
608 total_size = RTE_ALIGN(q_size, page_size) +
609 RTE_ALIGN(cq_size, page_size);
610 /*
611 * Note: aligning q_size/cq_size is not enough due to cq_base address
612 * aligning as q_base could be not aligned to the page.
613 * Adding page_size.
614 */
615 total_size += page_size;
616
617 if (flags & IONIC_QCQ_F_SG) {
618 total_size += RTE_ALIGN(sg_size, page_size);
619 total_size += page_size;
620 }
621
622 new = rte_zmalloc_socket("ionic", struct_size,
623 RTE_CACHE_LINE_SIZE, socket_id);
624 if (!new) {
625 IONIC_PRINT(ERR, "Cannot allocate queue structure");
626 return -ENOMEM;
627 }
628
629 new->lif = lif;
630
631 /* Most queue types will store 1 ptr per descriptor */
632 new->q.info = rte_calloc_socket("ionic",
633 (uint64_t)num_descs * num_segs,
634 sizeof(void *), page_size, socket_id);
635 if (!new->q.info) {
636 IONIC_PRINT(ERR, "Cannot allocate queue info");
637 err = -ENOMEM;
638 goto err_out_free_qcq;
639 }
640
641 new->q.num_segs = num_segs;
642 new->q.type = type;
643
644 err = ionic_q_init(&new->q, index, num_descs);
645 if (err) {
646 IONIC_PRINT(ERR, "Queue initialization failed");
647 goto err_out_free_info;
648 }
649
650 err = ionic_cq_init(&new->cq, num_descs);
651 if (err) {
652 IONIC_PRINT(ERR, "Completion queue initialization failed");
653 goto err_out_free_info;
654 }
655
656 new->base_z = rte_eth_dma_zone_reserve(lif->eth_dev,
657 type_name, index /* queue_idx */,
658 total_size, IONIC_ALIGN, socket_id);
659
660 if (!new->base_z) {
661 IONIC_PRINT(ERR, "Cannot reserve queue DMA memory");
662 err = -ENOMEM;
663 goto err_out_free_info;
664 }
665
666 new->base = new->base_z->addr;
667 new->base_pa = new->base_z->iova;
668
669 q_base = new->base;
670 q_base_pa = new->base_pa;
671
672 cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size, page_size);
673 cq_base_pa = RTE_ALIGN(q_base_pa + q_size, page_size);
674
675 if (flags & IONIC_QCQ_F_SG) {
676 sg_base = (void *)RTE_ALIGN((uintptr_t)cq_base + cq_size,
677 page_size);
678 sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size, page_size);
679 ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
680 }
681
682 if (flags & IONIC_QCQ_F_CMB) {
683 /* alloc descriptor ring from nic memory */
684 if (lif->adapter->cmb_offset + q_size >
685 lif->adapter->bars.bar[2].len) {
686 IONIC_PRINT(ERR, "Cannot reserve queue from NIC mem");
687 return -ENOMEM;
688 }
689 cmb_q_base = (void *)
690 ((uintptr_t)lif->adapter->bars.bar[2].vaddr +
691 (uintptr_t)lif->adapter->cmb_offset);
692 /* CMB PA is a relative address */
693 cmb_q_base_pa = lif->adapter->cmb_offset;
694 lif->adapter->cmb_offset += q_size;
695 } else {
696 cmb_q_base = NULL;
697 cmb_q_base_pa = 0;
698 }
699
700 IONIC_PRINT(DEBUG, "Q-Base-PA = %#jx CQ-Base-PA = %#jx "
701 "SG-base-PA = %#jx",
702 q_base_pa, cq_base_pa, sg_base_pa);
703
704 ionic_q_map(&new->q, q_base, q_base_pa, cmb_q_base, cmb_q_base_pa);
705 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
706
707 *qcq = new;
708
709 return 0;
710
711 err_out_free_info:
712 rte_free(new->q.info);
713 err_out_free_qcq:
714 rte_free(new);
715
716 return err;
717 }
718
719 void
ionic_qcq_free(struct ionic_qcq * qcq)720 ionic_qcq_free(struct ionic_qcq *qcq)
721 {
722 if (qcq->base_z) {
723 qcq->base = NULL;
724 qcq->base_pa = 0;
725 rte_memzone_free(qcq->base_z);
726 qcq->base_z = NULL;
727 }
728
729 if (qcq->q.info) {
730 rte_free(qcq->q.info);
731 qcq->q.info = NULL;
732 }
733
734 rte_free(qcq);
735 }
736
737 static uint64_t
ionic_rx_rearm_data(struct ionic_lif * lif)738 ionic_rx_rearm_data(struct ionic_lif *lif)
739 {
740 struct rte_mbuf rxm;
741
742 memset(&rxm, 0, sizeof(rxm));
743
744 rte_mbuf_refcnt_set(&rxm, 1);
745 rxm.data_off = RTE_PKTMBUF_HEADROOM;
746 rxm.nb_segs = 1;
747 rxm.port = lif->port_id;
748
749 rte_compiler_barrier();
750
751 RTE_BUILD_BUG_ON(sizeof(rxm.rearm_data[0]) != sizeof(uint64_t));
752 return rxm.rearm_data[0];
753 }
754
755 static uint64_t
ionic_rx_seg_rearm_data(struct ionic_lif * lif)756 ionic_rx_seg_rearm_data(struct ionic_lif *lif)
757 {
758 struct rte_mbuf rxm;
759
760 memset(&rxm, 0, sizeof(rxm));
761
762 rte_mbuf_refcnt_set(&rxm, 1);
763 rxm.data_off = 0; /* no headroom */
764 rxm.nb_segs = 1;
765 rxm.port = lif->port_id;
766
767 rte_compiler_barrier();
768
769 RTE_BUILD_BUG_ON(sizeof(rxm.rearm_data[0]) != sizeof(uint64_t));
770 return rxm.rearm_data[0];
771 }
772
773 int
ionic_rx_qcq_alloc(struct ionic_lif * lif,uint32_t socket_id,uint32_t index,uint16_t nrxq_descs,struct rte_mempool * mb_pool,struct ionic_rx_qcq ** rxq_out)774 ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
775 uint16_t nrxq_descs, struct rte_mempool *mb_pool,
776 struct ionic_rx_qcq **rxq_out)
777 {
778 struct ionic_rx_qcq *rxq;
779 uint16_t flags = 0, seg_size, hdr_seg_size, max_segs, max_segs_fw = 1;
780 uint32_t max_mtu;
781 int err;
782
783 if (lif->state & IONIC_LIF_F_Q_IN_CMB)
784 flags |= IONIC_QCQ_F_CMB;
785
786 seg_size = rte_pktmbuf_data_room_size(mb_pool);
787
788 /* The first mbuf needs to leave headroom */
789 hdr_seg_size = seg_size - RTE_PKTMBUF_HEADROOM;
790
791 max_mtu = rte_le_to_cpu_32(lif->adapter->ident.lif.eth.max_mtu);
792
793 /* If mbufs are too small to hold received packets, enable SG */
794 if (max_mtu > hdr_seg_size &&
795 !(lif->features & IONIC_ETH_HW_RX_SG)) {
796 IONIC_PRINT(NOTICE, "Enabling RX_OFFLOAD_SCATTER");
797 lif->eth_dev->data->dev_conf.rxmode.offloads |=
798 RTE_ETH_RX_OFFLOAD_SCATTER;
799 ionic_lif_configure_rx_sg_offload(lif);
800 }
801
802 if (lif->features & IONIC_ETH_HW_RX_SG) {
803 flags |= IONIC_QCQ_F_SG;
804 max_segs_fw = IONIC_RX_MAX_SG_ELEMS + 1;
805 }
806
807 /*
808 * Calculate how many fragment pointers might be stored in queue.
809 * This is the worst-case number, so that there's enough room in
810 * the info array.
811 */
812 max_segs = 1 + (max_mtu + RTE_PKTMBUF_HEADROOM - 1) / seg_size;
813
814 IONIC_PRINT(DEBUG, "rxq %u max_mtu %u seg_size %u max_segs %u",
815 index, max_mtu, seg_size, max_segs);
816 if (max_segs > max_segs_fw) {
817 IONIC_PRINT(ERR, "Rx mbuf size insufficient (%d > %d avail)",
818 max_segs, max_segs_fw);
819 return -EINVAL;
820 }
821
822 err = ionic_qcq_alloc(lif,
823 IONIC_QTYPE_RXQ,
824 sizeof(struct ionic_rx_qcq),
825 socket_id,
826 index,
827 "rx",
828 flags,
829 nrxq_descs,
830 max_segs,
831 sizeof(struct ionic_rxq_desc),
832 sizeof(struct ionic_rxq_comp),
833 sizeof(struct ionic_rxq_sg_desc),
834 (struct ionic_qcq **)&rxq);
835 if (err)
836 return err;
837
838 rxq->flags = flags;
839 rxq->seg_size = seg_size;
840 rxq->hdr_seg_size = hdr_seg_size;
841 rxq->rearm_data = ionic_rx_rearm_data(lif);
842 rxq->rearm_seg_data = ionic_rx_seg_rearm_data(lif);
843
844 lif->rxqcqs[index] = rxq;
845 *rxq_out = rxq;
846
847 return 0;
848 }
849
850 int
ionic_tx_qcq_alloc(struct ionic_lif * lif,uint32_t socket_id,uint32_t index,uint16_t ntxq_descs,struct ionic_tx_qcq ** txq_out)851 ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
852 uint16_t ntxq_descs, struct ionic_tx_qcq **txq_out)
853 {
854 struct ionic_tx_qcq *txq;
855 uint16_t flags = 0, num_segs_fw = 1;
856 int err;
857
858 if (lif->features & IONIC_ETH_HW_TX_SG) {
859 flags |= IONIC_QCQ_F_SG;
860 num_segs_fw = IONIC_TX_MAX_SG_ELEMS_V1 + 1;
861 }
862 if (lif->state & IONIC_LIF_F_Q_IN_CMB)
863 flags |= IONIC_QCQ_F_CMB;
864
865 IONIC_PRINT(DEBUG, "txq %u num_segs %u", index, num_segs_fw);
866
867 err = ionic_qcq_alloc(lif,
868 IONIC_QTYPE_TXQ,
869 sizeof(struct ionic_tx_qcq),
870 socket_id,
871 index,
872 "tx",
873 flags,
874 ntxq_descs,
875 num_segs_fw,
876 sizeof(struct ionic_txq_desc),
877 sizeof(struct ionic_txq_comp),
878 sizeof(struct ionic_txq_sg_desc_v1),
879 (struct ionic_qcq **)&txq);
880 if (err)
881 return err;
882
883 txq->flags = flags;
884 txq->num_segs_fw = num_segs_fw;
885
886 lif->txqcqs[index] = txq;
887 *txq_out = txq;
888
889 return 0;
890 }
891
892 static int
ionic_admin_qcq_alloc(struct ionic_lif * lif)893 ionic_admin_qcq_alloc(struct ionic_lif *lif)
894 {
895 uint16_t flags = 0;
896 int err;
897
898 err = ionic_qcq_alloc(lif,
899 IONIC_QTYPE_ADMINQ,
900 sizeof(struct ionic_admin_qcq),
901 rte_socket_id(),
902 0,
903 "admin",
904 flags,
905 IONIC_ADMINQ_LENGTH,
906 1,
907 sizeof(struct ionic_admin_cmd),
908 sizeof(struct ionic_admin_comp),
909 0,
910 (struct ionic_qcq **)&lif->adminqcq);
911 if (err)
912 return err;
913
914 return 0;
915 }
916
917 static int
ionic_notify_qcq_alloc(struct ionic_lif * lif)918 ionic_notify_qcq_alloc(struct ionic_lif *lif)
919 {
920 struct ionic_notify_qcq *nqcq;
921 struct ionic_dev *idev = &lif->adapter->idev;
922 uint16_t flags = 0;
923 int err;
924
925 err = ionic_qcq_alloc(lif,
926 IONIC_QTYPE_NOTIFYQ,
927 sizeof(struct ionic_notify_qcq),
928 rte_socket_id(),
929 0,
930 "notify",
931 flags,
932 IONIC_NOTIFYQ_LENGTH,
933 1,
934 sizeof(struct ionic_notifyq_cmd),
935 sizeof(union ionic_notifyq_comp),
936 0,
937 (struct ionic_qcq **)&nqcq);
938 if (err)
939 return err;
940
941 err = ionic_intr_alloc(lif, &nqcq->intr);
942 if (err) {
943 ionic_qcq_free(&nqcq->qcq);
944 return err;
945 }
946
947 ionic_intr_mask_assert(idev->intr_ctrl, nqcq->intr.index,
948 IONIC_INTR_MASK_SET);
949
950 lif->notifyqcq = nqcq;
951
952 return 0;
953 }
954
955 static void
ionic_lif_queue_identify(struct ionic_lif * lif)956 ionic_lif_queue_identify(struct ionic_lif *lif)
957 {
958 struct ionic_adapter *adapter = lif->adapter;
959 struct ionic_dev *idev = &adapter->idev;
960 union ionic_q_identity *q_ident = &adapter->ident.txq;
961 uint32_t q_words = RTE_DIM(q_ident->words);
962 uint32_t cmd_words = RTE_DIM(idev->dev_cmd->data);
963 uint32_t i, nwords, qtype;
964 int err;
965
966 for (qtype = 0; qtype < RTE_DIM(ionic_qtype_vers); qtype++) {
967 struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
968
969 /* Filter out the types this driver knows about */
970 switch (qtype) {
971 case IONIC_QTYPE_ADMINQ:
972 case IONIC_QTYPE_NOTIFYQ:
973 case IONIC_QTYPE_RXQ:
974 case IONIC_QTYPE_TXQ:
975 break;
976 default:
977 continue;
978 }
979
980 memset(qti, 0, sizeof(*qti));
981
982 if (ionic_is_embedded()) {
983 /* When embedded, FW will always match the driver */
984 qti->version = ionic_qtype_vers[qtype];
985 continue;
986 }
987
988 /* On the host, query the FW for info */
989 ionic_dev_cmd_queue_identify(idev, IONIC_LIF_TYPE_CLASSIC,
990 qtype, ionic_qtype_vers[qtype]);
991 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
992 if (err == -EINVAL) {
993 IONIC_PRINT(ERR, "qtype %d not supported", qtype);
994 continue;
995 } else if (err == -EIO) {
996 IONIC_PRINT(ERR, "q_ident failed, older FW");
997 return;
998 } else if (err) {
999 IONIC_PRINT(ERR, "q_ident failed, qtype %d: %d",
1000 qtype, err);
1001 return;
1002 }
1003
1004 nwords = RTE_MIN(q_words, cmd_words);
1005 for (i = 0; i < nwords; i++)
1006 q_ident->words[i] = ioread32(&idev->dev_cmd->data[i]);
1007
1008 qti->version = q_ident->version;
1009 qti->supported = q_ident->supported;
1010 qti->features = rte_le_to_cpu_64(q_ident->features);
1011 qti->desc_sz = rte_le_to_cpu_16(q_ident->desc_sz);
1012 qti->comp_sz = rte_le_to_cpu_16(q_ident->comp_sz);
1013 qti->sg_desc_sz = rte_le_to_cpu_16(q_ident->sg_desc_sz);
1014 qti->max_sg_elems = rte_le_to_cpu_16(q_ident->max_sg_elems);
1015 qti->sg_desc_stride =
1016 rte_le_to_cpu_16(q_ident->sg_desc_stride);
1017
1018 IONIC_PRINT(DEBUG, " qtype[%d].version = %d",
1019 qtype, qti->version);
1020 IONIC_PRINT(DEBUG, " qtype[%d].supported = %#x",
1021 qtype, qti->supported);
1022 IONIC_PRINT(DEBUG, " qtype[%d].features = %#jx",
1023 qtype, qti->features);
1024 IONIC_PRINT(DEBUG, " qtype[%d].desc_sz = %d",
1025 qtype, qti->desc_sz);
1026 IONIC_PRINT(DEBUG, " qtype[%d].comp_sz = %d",
1027 qtype, qti->comp_sz);
1028 IONIC_PRINT(DEBUG, " qtype[%d].sg_desc_sz = %d",
1029 qtype, qti->sg_desc_sz);
1030 IONIC_PRINT(DEBUG, " qtype[%d].max_sg_elems = %d",
1031 qtype, qti->max_sg_elems);
1032 IONIC_PRINT(DEBUG, " qtype[%d].sg_desc_stride = %d",
1033 qtype, qti->sg_desc_stride);
1034 }
1035 }
1036
1037 int
ionic_lif_alloc(struct ionic_lif * lif)1038 ionic_lif_alloc(struct ionic_lif *lif)
1039 {
1040 struct ionic_adapter *adapter = lif->adapter;
1041 uint32_t socket_id = rte_socket_id();
1042 int err;
1043
1044 /*
1045 * lif->name was zeroed on allocation.
1046 * Copy (sizeof() - 1) bytes to ensure that it is NULL terminated.
1047 */
1048 memcpy(lif->name, lif->eth_dev->data->name, sizeof(lif->name) - 1);
1049
1050 IONIC_PRINT(DEBUG, "LIF: %s", lif->name);
1051
1052 ionic_lif_queue_identify(lif);
1053
1054 if (lif->qtype_info[IONIC_QTYPE_TXQ].version < 1) {
1055 IONIC_PRINT(ERR, "FW too old, please upgrade");
1056 return -ENXIO;
1057 }
1058
1059 if (adapter->q_in_cmb) {
1060 if (adapter->bars.num_bars >= 3 &&
1061 lif->qtype_info[IONIC_QTYPE_RXQ].version >= 2 &&
1062 lif->qtype_info[IONIC_QTYPE_TXQ].version >= 3) {
1063 IONIC_PRINT(INFO, "%s enabled on %s",
1064 PMD_IONIC_CMB_KVARG, lif->name);
1065 lif->state |= IONIC_LIF_F_Q_IN_CMB;
1066 } else {
1067 IONIC_PRINT(ERR, "%s not supported on %s, disabled",
1068 PMD_IONIC_CMB_KVARG, lif->name);
1069 }
1070 }
1071
1072 IONIC_PRINT(DEBUG, "Allocating Lif Info");
1073
1074 rte_spinlock_init(&lif->adminq_lock);
1075 rte_spinlock_init(&lif->adminq_service_lock);
1076
1077 lif->kern_dbpage = adapter->idev.db_pages;
1078 if (!lif->kern_dbpage) {
1079 IONIC_PRINT(ERR, "Cannot map dbpage, aborting");
1080 return -ENOMEM;
1081 }
1082
1083 lif->txqcqs = rte_calloc_socket("ionic",
1084 adapter->max_ntxqs_per_lif,
1085 sizeof(*lif->txqcqs),
1086 RTE_CACHE_LINE_SIZE, socket_id);
1087 if (!lif->txqcqs) {
1088 IONIC_PRINT(ERR, "Cannot allocate tx queues array");
1089 return -ENOMEM;
1090 }
1091
1092 lif->rxqcqs = rte_calloc_socket("ionic",
1093 adapter->max_nrxqs_per_lif,
1094 sizeof(*lif->rxqcqs),
1095 RTE_CACHE_LINE_SIZE, socket_id);
1096 if (!lif->rxqcqs) {
1097 IONIC_PRINT(ERR, "Cannot allocate rx queues array");
1098 return -ENOMEM;
1099 }
1100
1101 IONIC_PRINT(DEBUG, "Allocating Notify Queue");
1102
1103 err = ionic_notify_qcq_alloc(lif);
1104 if (err) {
1105 IONIC_PRINT(ERR, "Cannot allocate notify queue");
1106 return err;
1107 }
1108
1109 IONIC_PRINT(DEBUG, "Allocating Admin Queue");
1110
1111 err = ionic_admin_qcq_alloc(lif);
1112 if (err) {
1113 IONIC_PRINT(ERR, "Cannot allocate admin queue");
1114 return err;
1115 }
1116
1117 IONIC_PRINT(DEBUG, "Allocating Lif Info");
1118
1119 lif->info_sz = RTE_ALIGN(sizeof(*lif->info), rte_mem_page_size());
1120
1121 lif->info_z = rte_eth_dma_zone_reserve(lif->eth_dev,
1122 "lif_info", 0 /* queue_idx*/,
1123 lif->info_sz, IONIC_ALIGN, socket_id);
1124 if (!lif->info_z) {
1125 IONIC_PRINT(ERR, "Cannot allocate lif info memory");
1126 return -ENOMEM;
1127 }
1128
1129 lif->info = lif->info_z->addr;
1130 lif->info_pa = lif->info_z->iova;
1131
1132 return 0;
1133 }
1134
1135 void
ionic_lif_free(struct ionic_lif * lif)1136 ionic_lif_free(struct ionic_lif *lif)
1137 {
1138 if (lif->notifyqcq) {
1139 ionic_qcq_free(&lif->notifyqcq->qcq);
1140 lif->notifyqcq = NULL;
1141 }
1142
1143 if (lif->adminqcq) {
1144 ionic_qcq_free(&lif->adminqcq->qcq);
1145 lif->adminqcq = NULL;
1146 }
1147
1148 if (lif->txqcqs) {
1149 rte_free(lif->txqcqs);
1150 lif->txqcqs = NULL;
1151 }
1152
1153 if (lif->rxqcqs) {
1154 rte_free(lif->rxqcqs);
1155 lif->rxqcqs = NULL;
1156 }
1157
1158 if (lif->info) {
1159 rte_memzone_free(lif->info_z);
1160 lif->info = NULL;
1161 }
1162 }
1163
1164 void
ionic_lif_free_queues(struct ionic_lif * lif)1165 ionic_lif_free_queues(struct ionic_lif *lif)
1166 {
1167 uint32_t i;
1168
1169 for (i = 0; i < lif->ntxqcqs; i++) {
1170 ionic_dev_tx_queue_release(lif->eth_dev, i);
1171 lif->eth_dev->data->tx_queues[i] = NULL;
1172 }
1173 for (i = 0; i < lif->nrxqcqs; i++) {
1174 ionic_dev_rx_queue_release(lif->eth_dev, i);
1175 lif->eth_dev->data->rx_queues[i] = NULL;
1176 }
1177 }
1178
1179 int
ionic_lif_rss_config(struct ionic_lif * lif,const uint16_t types,const uint8_t * key,const uint32_t * indir)1180 ionic_lif_rss_config(struct ionic_lif *lif,
1181 const uint16_t types, const uint8_t *key, const uint32_t *indir)
1182 {
1183 struct ionic_adapter *adapter = lif->adapter;
1184 struct ionic_admin_ctx ctx = {
1185 .pending_work = true,
1186 .cmd.lif_setattr = {
1187 .opcode = IONIC_CMD_LIF_SETATTR,
1188 .attr = IONIC_LIF_ATTR_RSS,
1189 .rss.types = rte_cpu_to_le_16(types),
1190 .rss.addr = rte_cpu_to_le_64(lif->rss_ind_tbl_pa),
1191 },
1192 };
1193 unsigned int i;
1194 uint16_t tbl_sz =
1195 rte_le_to_cpu_16(adapter->ident.lif.eth.rss_ind_tbl_sz);
1196
1197 IONIC_PRINT_CALL();
1198
1199 lif->rss_types = types;
1200
1201 if (key)
1202 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1203
1204 if (indir)
1205 for (i = 0; i < tbl_sz; i++)
1206 lif->rss_ind_tbl[i] = indir[i];
1207
1208 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1209 IONIC_RSS_HASH_KEY_SIZE);
1210
1211 return ionic_adminq_post_wait(lif, &ctx);
1212 }
1213
1214 static int
ionic_lif_rss_setup(struct ionic_lif * lif)1215 ionic_lif_rss_setup(struct ionic_lif *lif)
1216 {
1217 struct ionic_adapter *adapter = lif->adapter;
1218 static const uint8_t toeplitz_symmetric_key[] = {
1219 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1220 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1221 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1222 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1223 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1224 };
1225 uint32_t i;
1226 uint16_t tbl_sz =
1227 rte_le_to_cpu_16(adapter->ident.lif.eth.rss_ind_tbl_sz);
1228
1229 IONIC_PRINT_CALL();
1230
1231 if (!lif->rss_ind_tbl_z) {
1232 lif->rss_ind_tbl_z = rte_eth_dma_zone_reserve(lif->eth_dev,
1233 "rss_ind_tbl", 0 /* queue_idx */,
1234 sizeof(*lif->rss_ind_tbl) * tbl_sz,
1235 IONIC_ALIGN, rte_socket_id());
1236 if (!lif->rss_ind_tbl_z) {
1237 IONIC_PRINT(ERR, "OOM");
1238 return -ENOMEM;
1239 }
1240
1241 lif->rss_ind_tbl = lif->rss_ind_tbl_z->addr;
1242 lif->rss_ind_tbl_pa = lif->rss_ind_tbl_z->iova;
1243 }
1244
1245 if (lif->rss_ind_tbl_nrxqcqs != lif->nrxqcqs) {
1246 lif->rss_ind_tbl_nrxqcqs = lif->nrxqcqs;
1247
1248 /* Fill indirection table with 'default' values */
1249 for (i = 0; i < tbl_sz; i++)
1250 lif->rss_ind_tbl[i] = i % lif->nrxqcqs;
1251 }
1252
1253 return ionic_lif_rss_config(lif, IONIC_RSS_OFFLOAD_ALL,
1254 toeplitz_symmetric_key, NULL);
1255 }
1256
1257 static void
ionic_lif_rss_teardown(struct ionic_lif * lif)1258 ionic_lif_rss_teardown(struct ionic_lif *lif)
1259 {
1260 /* Not needed for embedded applications */
1261 if (ionic_is_embedded())
1262 return;
1263
1264 if (lif->rss_ind_tbl) {
1265 lif->rss_ind_tbl = NULL;
1266 lif->rss_ind_tbl_pa = 0;
1267 rte_memzone_free(lif->rss_ind_tbl_z);
1268 lif->rss_ind_tbl_z = NULL;
1269 }
1270 }
1271
1272 void
ionic_lif_txq_deinit_nowait(struct ionic_tx_qcq * txq)1273 ionic_lif_txq_deinit_nowait(struct ionic_tx_qcq *txq)
1274 {
1275 ionic_qcq_disable_nowait(&txq->qcq, &txq->admin_ctx);
1276
1277 txq->flags &= ~IONIC_QCQ_F_INITED;
1278 }
1279
1280 void
ionic_lif_txq_stats(struct ionic_tx_qcq * txq)1281 ionic_lif_txq_stats(struct ionic_tx_qcq *txq)
1282 {
1283 struct ionic_tx_stats *stats = &txq->stats;
1284
1285 IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju",
1286 txq->qcq.q.index, stats->packets, stats->tso);
1287 IONIC_PRINT(DEBUG, "TX queue %u comps %ju (%ju per)",
1288 txq->qcq.q.index, stats->comps,
1289 stats->comps ? stats->packets / stats->comps : 0);
1290 }
1291
1292 void
ionic_lif_rxq_deinit_nowait(struct ionic_rx_qcq * rxq)1293 ionic_lif_rxq_deinit_nowait(struct ionic_rx_qcq *rxq)
1294 {
1295 ionic_qcq_disable_nowait(&rxq->qcq, &rxq->admin_ctx);
1296
1297 rxq->flags &= ~IONIC_QCQ_F_INITED;
1298 }
1299
1300 void
ionic_lif_rxq_stats(struct ionic_rx_qcq * rxq)1301 ionic_lif_rxq_stats(struct ionic_rx_qcq *rxq)
1302 {
1303 struct ionic_rx_stats *stats = &rxq->stats;
1304
1305 IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju",
1306 rxq->qcq.q.index, stats->packets, stats->mtods);
1307 }
1308
1309 static void
ionic_lif_adminq_deinit(struct ionic_lif * lif)1310 ionic_lif_adminq_deinit(struct ionic_lif *lif)
1311 {
1312 lif->adminqcq->flags &= ~IONIC_QCQ_F_INITED;
1313 }
1314
1315 static void
ionic_lif_notifyq_deinit(struct ionic_lif * lif)1316 ionic_lif_notifyq_deinit(struct ionic_lif *lif)
1317 {
1318 struct ionic_notify_qcq *nqcq = lif->notifyqcq;
1319 struct ionic_dev *idev = &lif->adapter->idev;
1320
1321 if (!(nqcq->flags & IONIC_QCQ_F_INITED))
1322 return;
1323
1324 ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1325 IONIC_INTR_MASK_SET);
1326
1327 nqcq->flags &= ~IONIC_QCQ_F_INITED;
1328 }
1329
1330 /* This acts like ionic_napi */
1331 int
ionic_qcq_service(struct ionic_qcq * qcq,int budget,ionic_cq_cb cb,void * cb_arg)1332 ionic_qcq_service(struct ionic_qcq *qcq, int budget, ionic_cq_cb cb,
1333 void *cb_arg)
1334 {
1335 struct ionic_cq *cq = &qcq->cq;
1336 uint32_t work_done;
1337
1338 work_done = ionic_cq_service(cq, budget, cb, cb_arg);
1339
1340 return work_done;
1341 }
1342
1343 static void
ionic_link_status_check(struct ionic_lif * lif)1344 ionic_link_status_check(struct ionic_lif *lif)
1345 {
1346 struct ionic_adapter *adapter = lif->adapter;
1347 bool link_up;
1348
1349 lif->state &= ~IONIC_LIF_F_LINK_CHECK_NEEDED;
1350
1351 if (!lif->info)
1352 return;
1353
1354 link_up = (lif->info->status.link_status == IONIC_PORT_OPER_STATUS_UP);
1355
1356 if ((link_up && adapter->link_up) ||
1357 (!link_up && !adapter->link_up))
1358 return;
1359
1360 if (link_up) {
1361 adapter->link_speed =
1362 rte_le_to_cpu_32(lif->info->status.link_speed);
1363 IONIC_PRINT(DEBUG, "Link up - %d Gbps",
1364 adapter->link_speed);
1365 } else {
1366 IONIC_PRINT(DEBUG, "Link down");
1367 }
1368
1369 adapter->link_up = link_up;
1370 ionic_dev_link_update(lif->eth_dev, 0);
1371 }
1372
1373 static void
ionic_lif_handle_fw_down(struct ionic_lif * lif)1374 ionic_lif_handle_fw_down(struct ionic_lif *lif)
1375 {
1376 if (lif->state & IONIC_LIF_F_FW_RESET)
1377 return;
1378
1379 lif->state |= IONIC_LIF_F_FW_RESET;
1380
1381 if (lif->state & IONIC_LIF_F_UP) {
1382 IONIC_PRINT(NOTICE,
1383 "Surprise FW stop, stopping %s", lif->name);
1384 ionic_lif_stop(lif);
1385 }
1386
1387 IONIC_PRINT(NOTICE, "FW down, %s stopped", lif->name);
1388 }
1389
1390 static bool
ionic_notifyq_cb(struct ionic_cq * cq,uint16_t cq_desc_index,void * cb_arg)1391 ionic_notifyq_cb(struct ionic_cq *cq, uint16_t cq_desc_index, void *cb_arg)
1392 {
1393 union ionic_notifyq_comp *cq_desc_base = cq->base;
1394 union ionic_notifyq_comp *cq_desc = &cq_desc_base[cq_desc_index];
1395 struct ionic_lif *lif = cb_arg;
1396
1397 IONIC_PRINT(DEBUG, "Notifyq callback eid = %jd ecode = %d",
1398 cq_desc->event.eid, cq_desc->event.ecode);
1399
1400 /* Have we run out of new completions to process? */
1401 if (!(cq_desc->event.eid > lif->last_eid))
1402 return false;
1403
1404 lif->last_eid = cq_desc->event.eid;
1405
1406 switch (cq_desc->event.ecode) {
1407 case IONIC_EVENT_LINK_CHANGE:
1408 IONIC_PRINT(DEBUG,
1409 "Notifyq IONIC_EVENT_LINK_CHANGE %s "
1410 "eid=%jd link_status=%d link_speed=%d",
1411 lif->name,
1412 cq_desc->event.eid,
1413 cq_desc->link_change.link_status,
1414 cq_desc->link_change.link_speed);
1415
1416 lif->state |= IONIC_LIF_F_LINK_CHECK_NEEDED;
1417 break;
1418
1419 case IONIC_EVENT_RESET:
1420 IONIC_PRINT(NOTICE,
1421 "Notifyq IONIC_EVENT_RESET %s "
1422 "eid=%jd, reset_code=%d state=%d",
1423 lif->name,
1424 cq_desc->event.eid,
1425 cq_desc->reset.reset_code,
1426 cq_desc->reset.state);
1427 ionic_lif_handle_fw_down(lif);
1428 break;
1429
1430 default:
1431 IONIC_PRINT(WARNING, "Notifyq bad event ecode=%d eid=%jd",
1432 cq_desc->event.ecode, cq_desc->event.eid);
1433 break;
1434 }
1435
1436 return true;
1437 }
1438
1439 int
ionic_notifyq_handler(struct ionic_lif * lif,int budget)1440 ionic_notifyq_handler(struct ionic_lif *lif, int budget)
1441 {
1442 struct ionic_dev *idev = &lif->adapter->idev;
1443 struct ionic_notify_qcq *nqcq = lif->notifyqcq;
1444 uint32_t work_done;
1445
1446 if (!(nqcq->flags & IONIC_QCQ_F_INITED)) {
1447 IONIC_PRINT(DEBUG, "Notifyq not yet initialized");
1448 return -1;
1449 }
1450
1451 ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1452 IONIC_INTR_MASK_SET);
1453
1454 work_done = ionic_qcq_service(&nqcq->qcq, budget,
1455 ionic_notifyq_cb, lif);
1456
1457 if (lif->state & IONIC_LIF_F_LINK_CHECK_NEEDED)
1458 ionic_link_status_check(lif);
1459
1460 ionic_intr_credits(idev->intr_ctrl, nqcq->intr.index,
1461 work_done, IONIC_INTR_CRED_RESET_COALESCE);
1462
1463 ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1464 IONIC_INTR_MASK_CLEAR);
1465
1466 return 0;
1467 }
1468
1469 static int
ionic_lif_adminq_init(struct ionic_lif * lif)1470 ionic_lif_adminq_init(struct ionic_lif *lif)
1471 {
1472 struct ionic_dev *idev = &lif->adapter->idev;
1473 struct ionic_admin_qcq *aqcq = lif->adminqcq;
1474 struct ionic_queue *q = &aqcq->qcq.q;
1475 struct ionic_q_init_comp comp;
1476 uint32_t retries = 5;
1477 int err;
1478
1479 retry_adminq_init:
1480 ionic_dev_cmd_adminq_init(idev, &aqcq->qcq);
1481 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
1482 if (err == -EAGAIN && retries > 0) {
1483 retries--;
1484 rte_delay_us_block(IONIC_DEVCMD_RETRY_WAIT_US);
1485 goto retry_adminq_init;
1486 }
1487 if (err)
1488 return err;
1489
1490 ionic_dev_cmd_comp(idev, &comp);
1491
1492 q->hw_type = comp.hw_type;
1493 q->hw_index = rte_le_to_cpu_32(comp.hw_index);
1494 q->db = ionic_db_map(lif, q);
1495
1496 IONIC_PRINT(DEBUG, "adminq->hw_type %d", q->hw_type);
1497 IONIC_PRINT(DEBUG, "adminq->hw_index %d", q->hw_index);
1498 IONIC_PRINT(DEBUG, "adminq->db %p", q->db);
1499
1500 aqcq->flags |= IONIC_QCQ_F_INITED;
1501
1502 return 0;
1503 }
1504
1505 static int
ionic_lif_notifyq_init(struct ionic_lif * lif)1506 ionic_lif_notifyq_init(struct ionic_lif *lif)
1507 {
1508 struct ionic_dev *idev = &lif->adapter->idev;
1509 struct ionic_notify_qcq *nqcq = lif->notifyqcq;
1510 struct ionic_queue *q = &nqcq->qcq.q;
1511 uint16_t flags = IONIC_QINIT_F_ENA;
1512 int err;
1513
1514 struct ionic_admin_ctx ctx = {
1515 .pending_work = true,
1516 .cmd.q_init = {
1517 .opcode = IONIC_CMD_Q_INIT,
1518 .type = q->type,
1519 .ver = lif->qtype_info[q->type].version,
1520 .index = rte_cpu_to_le_32(q->index),
1521 .intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1522 .ring_size = rte_log2_u32(q->num_descs),
1523 .ring_base = rte_cpu_to_le_64(q->base_pa),
1524 }
1525 };
1526
1527 /* Only enable an interrupt if the device supports them */
1528 if (lif->adapter->intf->configure_intr != NULL) {
1529 flags |= IONIC_QINIT_F_IRQ;
1530 ctx.cmd.q_init.intr_index = rte_cpu_to_le_16(nqcq->intr.index);
1531 }
1532 ctx.cmd.q_init.flags = rte_cpu_to_le_16(flags);
1533
1534 IONIC_PRINT(DEBUG, "notifyq_init.index %d", q->index);
1535 IONIC_PRINT(DEBUG, "notifyq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1536 IONIC_PRINT(DEBUG, "notifyq_init.ring_size %d",
1537 ctx.cmd.q_init.ring_size);
1538 IONIC_PRINT(DEBUG, "notifyq_init.ver %u", ctx.cmd.q_init.ver);
1539
1540 err = ionic_adminq_post_wait(lif, &ctx);
1541 if (err)
1542 return err;
1543
1544 q->hw_type = ctx.comp.q_init.hw_type;
1545 q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
1546 q->db = NULL;
1547
1548 IONIC_PRINT(DEBUG, "notifyq->hw_type %d", q->hw_type);
1549 IONIC_PRINT(DEBUG, "notifyq->hw_index %d", q->hw_index);
1550 IONIC_PRINT(DEBUG, "notifyq->db %p", q->db);
1551
1552 ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1553 IONIC_INTR_MASK_CLEAR);
1554
1555 nqcq->flags |= IONIC_QCQ_F_INITED;
1556
1557 return 0;
1558 }
1559
1560 int
ionic_lif_set_features(struct ionic_lif * lif)1561 ionic_lif_set_features(struct ionic_lif *lif)
1562 {
1563 struct ionic_admin_ctx ctx = {
1564 .pending_work = true,
1565 .cmd.lif_setattr = {
1566 .opcode = IONIC_CMD_LIF_SETATTR,
1567 .attr = IONIC_LIF_ATTR_FEATURES,
1568 .features = rte_cpu_to_le_64(lif->features),
1569 },
1570 };
1571 int err;
1572
1573 err = ionic_adminq_post_wait(lif, &ctx);
1574 if (err)
1575 return err;
1576
1577 lif->hw_features = rte_le_to_cpu_64(ctx.cmd.lif_setattr.features &
1578 ctx.comp.lif_setattr.features);
1579
1580 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1581 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_TX_TAG");
1582 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1583 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_STRIP");
1584 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1585 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_FILTER");
1586 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1587 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_HASH");
1588 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1589 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_SG");
1590 if (lif->hw_features & IONIC_ETH_HW_RX_SG)
1591 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_SG");
1592 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1593 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_CSUM");
1594 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1595 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_CSUM");
1596 if (lif->hw_features & IONIC_ETH_HW_TSO)
1597 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO");
1598 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1599 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPV6");
1600 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1601 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_ECN");
1602 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1603 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE");
1604 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1605 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE_CSUM");
1606 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1607 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP4");
1608 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1609 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP6");
1610 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1611 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP");
1612 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1613 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP_CSUM");
1614
1615 return 0;
1616 }
1617
1618 int
ionic_lif_txq_init_nowait(struct ionic_tx_qcq * txq)1619 ionic_lif_txq_init_nowait(struct ionic_tx_qcq *txq)
1620 {
1621 struct ionic_qcq *qcq = &txq->qcq;
1622 struct ionic_queue *q = &qcq->q;
1623 struct ionic_lif *lif = qcq->lif;
1624 struct ionic_cq *cq = &qcq->cq;
1625 struct ionic_admin_ctx *ctx = &txq->admin_ctx;
1626 int err;
1627
1628 *ctx = (struct ionic_admin_ctx) {
1629 .pending_work = true,
1630 .cmd.q_init = {
1631 .opcode = IONIC_CMD_Q_INIT,
1632 .type = q->type,
1633 .ver = lif->qtype_info[q->type].version,
1634 .index = rte_cpu_to_le_32(q->index),
1635 .flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA),
1636 .intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1637 .ring_size = rte_log2_u32(q->num_descs),
1638 .cq_ring_base = rte_cpu_to_le_64(cq->base_pa),
1639 .sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa),
1640 },
1641 };
1642
1643 if (txq->flags & IONIC_QCQ_F_SG)
1644 ctx->cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG);
1645 if (txq->flags & IONIC_QCQ_F_CMB) {
1646 ctx->cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);
1647 ctx->cmd.q_init.ring_base = rte_cpu_to_le_64(q->cmb_base_pa);
1648 } else {
1649 ctx->cmd.q_init.ring_base = rte_cpu_to_le_64(q->base_pa);
1650 }
1651
1652 IONIC_PRINT(DEBUG, "txq_init.index %d", q->index);
1653 IONIC_PRINT(DEBUG, "txq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1654 IONIC_PRINT(DEBUG, "txq_init.ring_size %d",
1655 ctx->cmd.q_init.ring_size);
1656 IONIC_PRINT(DEBUG, "txq_init.ver %u", ctx->cmd.q_init.ver);
1657
1658 ionic_q_reset(q);
1659 ionic_cq_reset(cq);
1660
1661 /* Caller responsible for calling ionic_lif_txq_init_done() */
1662 err = ionic_adminq_post(lif, ctx);
1663 if (err)
1664 ctx->pending_work = false;
1665 return err;
1666 }
1667
1668 void
ionic_lif_txq_init_done(struct ionic_tx_qcq * txq)1669 ionic_lif_txq_init_done(struct ionic_tx_qcq *txq)
1670 {
1671 struct ionic_lif *lif = txq->qcq.lif;
1672 struct ionic_queue *q = &txq->qcq.q;
1673 struct ionic_admin_ctx *ctx = &txq->admin_ctx;
1674
1675 q->hw_type = ctx->comp.q_init.hw_type;
1676 q->hw_index = rte_le_to_cpu_32(ctx->comp.q_init.hw_index);
1677 q->db = ionic_db_map(lif, q);
1678
1679 IONIC_PRINT(DEBUG, "txq->hw_type %d", q->hw_type);
1680 IONIC_PRINT(DEBUG, "txq->hw_index %d", q->hw_index);
1681 IONIC_PRINT(DEBUG, "txq->db %p", q->db);
1682
1683 txq->flags |= IONIC_QCQ_F_INITED;
1684 }
1685
1686 int
ionic_lif_rxq_init_nowait(struct ionic_rx_qcq * rxq)1687 ionic_lif_rxq_init_nowait(struct ionic_rx_qcq *rxq)
1688 {
1689 struct ionic_qcq *qcq = &rxq->qcq;
1690 struct ionic_queue *q = &qcq->q;
1691 struct ionic_lif *lif = qcq->lif;
1692 struct ionic_cq *cq = &qcq->cq;
1693 struct ionic_admin_ctx *ctx = &rxq->admin_ctx;
1694 int err;
1695
1696 *ctx = (struct ionic_admin_ctx) {
1697 .pending_work = true,
1698 .cmd.q_init = {
1699 .opcode = IONIC_CMD_Q_INIT,
1700 .type = q->type,
1701 .ver = lif->qtype_info[q->type].version,
1702 .index = rte_cpu_to_le_32(q->index),
1703 .flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA),
1704 .intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1705 .ring_size = rte_log2_u32(q->num_descs),
1706 .cq_ring_base = rte_cpu_to_le_64(cq->base_pa),
1707 .sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa),
1708 },
1709 };
1710
1711 if (rxq->flags & IONIC_QCQ_F_SG)
1712 ctx->cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG);
1713 if (rxq->flags & IONIC_QCQ_F_CMB) {
1714 ctx->cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);
1715 ctx->cmd.q_init.ring_base = rte_cpu_to_le_64(q->cmb_base_pa);
1716 } else {
1717 ctx->cmd.q_init.ring_base = rte_cpu_to_le_64(q->base_pa);
1718 }
1719
1720 IONIC_PRINT(DEBUG, "rxq_init.index %d", q->index);
1721 IONIC_PRINT(DEBUG, "rxq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1722 IONIC_PRINT(DEBUG, "rxq_init.ring_size %d",
1723 ctx->cmd.q_init.ring_size);
1724 IONIC_PRINT(DEBUG, "rxq_init.ver %u", ctx->cmd.q_init.ver);
1725
1726 ionic_q_reset(q);
1727 ionic_cq_reset(cq);
1728
1729 /* Caller responsible for calling ionic_lif_rxq_init_done() */
1730 err = ionic_adminq_post(lif, ctx);
1731 if (err)
1732 ctx->pending_work = false;
1733 return err;
1734 }
1735
1736 void
ionic_lif_rxq_init_done(struct ionic_rx_qcq * rxq)1737 ionic_lif_rxq_init_done(struct ionic_rx_qcq *rxq)
1738 {
1739 struct ionic_lif *lif = rxq->qcq.lif;
1740 struct ionic_queue *q = &rxq->qcq.q;
1741 struct ionic_admin_ctx *ctx = &rxq->admin_ctx;
1742
1743 q->hw_type = ctx->comp.q_init.hw_type;
1744 q->hw_index = rte_le_to_cpu_32(ctx->comp.q_init.hw_index);
1745 q->db = ionic_db_map(lif, q);
1746
1747 rxq->flags |= IONIC_QCQ_F_INITED;
1748
1749 IONIC_PRINT(DEBUG, "rxq->hw_type %d", q->hw_type);
1750 IONIC_PRINT(DEBUG, "rxq->hw_index %d", q->hw_index);
1751 IONIC_PRINT(DEBUG, "rxq->db %p", q->db);
1752 }
1753
1754 static int
ionic_station_set(struct ionic_lif * lif)1755 ionic_station_set(struct ionic_lif *lif)
1756 {
1757 struct ionic_admin_ctx ctx = {
1758 .pending_work = true,
1759 .cmd.lif_getattr = {
1760 .opcode = IONIC_CMD_LIF_GETATTR,
1761 .attr = IONIC_LIF_ATTR_MAC,
1762 },
1763 };
1764 int err;
1765
1766 IONIC_PRINT_CALL();
1767
1768 err = ionic_adminq_post_wait(lif, &ctx);
1769 if (err)
1770 return err;
1771
1772 memcpy(lif->mac_addr, ctx.comp.lif_getattr.mac, RTE_ETHER_ADDR_LEN);
1773
1774 return 0;
1775 }
1776
1777 static void
ionic_lif_set_name(struct ionic_lif * lif)1778 ionic_lif_set_name(struct ionic_lif *lif)
1779 {
1780 struct ionic_admin_ctx ctx = {
1781 .pending_work = true,
1782 .cmd.lif_setattr = {
1783 .opcode = IONIC_CMD_LIF_SETATTR,
1784 .attr = IONIC_LIF_ATTR_NAME,
1785 },
1786 };
1787
1788 /* Not needed for embedded applications */
1789 if (ionic_is_embedded())
1790 return;
1791
1792 memcpy(ctx.cmd.lif_setattr.name, lif->name,
1793 sizeof(ctx.cmd.lif_setattr.name) - 1);
1794
1795 ionic_adminq_post_wait(lif, &ctx);
1796 }
1797
1798 int
ionic_lif_init(struct ionic_lif * lif)1799 ionic_lif_init(struct ionic_lif *lif)
1800 {
1801 struct ionic_dev *idev = &lif->adapter->idev;
1802 struct ionic_lif_init_comp comp;
1803 uint32_t retries = 5;
1804 int err;
1805
1806 memset(&lif->stats_base, 0, sizeof(lif->stats_base));
1807
1808 retry_lif_init:
1809 ionic_dev_cmd_lif_init(idev, lif->info_pa);
1810 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
1811 if (err == -EAGAIN && retries > 0) {
1812 retries--;
1813 rte_delay_us_block(IONIC_DEVCMD_RETRY_WAIT_US);
1814 goto retry_lif_init;
1815 }
1816 if (err)
1817 return err;
1818
1819 ionic_dev_cmd_comp(idev, &comp);
1820
1821 lif->hw_index = rte_cpu_to_le_16(comp.hw_index);
1822
1823 err = ionic_lif_adminq_init(lif);
1824 if (err)
1825 return err;
1826
1827 err = ionic_lif_notifyq_init(lif);
1828 if (err)
1829 goto err_out_adminq_deinit;
1830
1831 /*
1832 * Configure initial feature set
1833 * This will be updated later by the dev_configure() step
1834 */
1835 lif->features = IONIC_ETH_HW_RX_HASH | IONIC_ETH_HW_VLAN_RX_FILTER;
1836
1837 err = ionic_lif_set_features(lif);
1838 if (err)
1839 goto err_out_notifyq_deinit;
1840
1841 err = ionic_rx_filters_init(lif);
1842 if (err)
1843 goto err_out_notifyq_deinit;
1844
1845 err = ionic_station_set(lif);
1846 if (err)
1847 goto err_out_rx_filter_deinit;
1848
1849 ionic_lif_set_name(lif);
1850
1851 lif->state |= IONIC_LIF_F_INITED;
1852
1853 return 0;
1854
1855 err_out_rx_filter_deinit:
1856 ionic_rx_filters_deinit(lif);
1857
1858 err_out_notifyq_deinit:
1859 ionic_lif_notifyq_deinit(lif);
1860
1861 err_out_adminq_deinit:
1862 ionic_lif_adminq_deinit(lif);
1863
1864 return err;
1865 }
1866
1867 void
ionic_lif_deinit(struct ionic_lif * lif)1868 ionic_lif_deinit(struct ionic_lif *lif)
1869 {
1870 if (!(lif->state & IONIC_LIF_F_INITED))
1871 return;
1872
1873 ionic_rx_filters_deinit(lif);
1874 ionic_lif_rss_teardown(lif);
1875 ionic_lif_notifyq_deinit(lif);
1876 ionic_lif_adminq_deinit(lif);
1877
1878 lif->state &= ~IONIC_LIF_F_INITED;
1879 }
1880
1881 void
ionic_lif_configure_vlan_offload(struct ionic_lif * lif,int mask)1882 ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask)
1883 {
1884 struct rte_eth_dev *eth_dev = lif->eth_dev;
1885 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1886
1887 /*
1888 * IONIC_ETH_HW_VLAN_RX_FILTER cannot be turned off, so
1889 * set RTE_ETH_RX_OFFLOAD_VLAN_FILTER and ignore RTE_ETH_VLAN_FILTER_MASK
1890 */
1891 rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
1892
1893 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1894 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1895 lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP;
1896 else
1897 lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP;
1898 }
1899 }
1900
1901 void
ionic_lif_configure_rx_sg_offload(struct ionic_lif * lif)1902 ionic_lif_configure_rx_sg_offload(struct ionic_lif *lif)
1903 {
1904 struct rte_eth_rxmode *rxmode = &lif->eth_dev->data->dev_conf.rxmode;
1905
1906 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
1907 lif->features |= IONIC_ETH_HW_RX_SG;
1908 lif->eth_dev->data->scattered_rx = 1;
1909 } else {
1910 lif->features &= ~IONIC_ETH_HW_RX_SG;
1911 lif->eth_dev->data->scattered_rx = 0;
1912 }
1913 }
1914
1915 void
ionic_lif_configure(struct ionic_lif * lif)1916 ionic_lif_configure(struct ionic_lif *lif)
1917 {
1918 struct rte_eth_rxmode *rxmode = &lif->eth_dev->data->dev_conf.rxmode;
1919 struct rte_eth_txmode *txmode = &lif->eth_dev->data->dev_conf.txmode;
1920 struct ionic_identity *ident = &lif->adapter->ident;
1921 union ionic_lif_config *cfg = &ident->lif.eth.config;
1922 uint32_t ntxqs_per_lif =
1923 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]);
1924 uint32_t nrxqs_per_lif =
1925 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]);
1926 uint32_t nrxqs = lif->eth_dev->data->nb_rx_queues;
1927 uint32_t ntxqs = lif->eth_dev->data->nb_tx_queues;
1928
1929 lif->port_id = lif->eth_dev->data->port_id;
1930
1931 IONIC_PRINT(DEBUG, "Configuring LIF on port %u",
1932 lif->port_id);
1933
1934 if (nrxqs > 0)
1935 nrxqs_per_lif = RTE_MIN(nrxqs_per_lif, nrxqs);
1936
1937 if (ntxqs > 0)
1938 ntxqs_per_lif = RTE_MIN(ntxqs_per_lif, ntxqs);
1939
1940 lif->nrxqcqs = nrxqs_per_lif;
1941 lif->ntxqcqs = ntxqs_per_lif;
1942
1943 /* Update the LIF configuration based on the eth_dev */
1944
1945 /*
1946 * NB: While it is true that RSS_HASH is always enabled on ionic,
1947 * setting this flag unconditionally causes problems in DTS.
1948 * rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1949 */
1950
1951 /* RX per-port */
1952
1953 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM ||
1954 rxmode->offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM ||
1955 rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1956 lif->features |= IONIC_ETH_HW_RX_CSUM;
1957 else
1958 lif->features &= ~IONIC_ETH_HW_RX_CSUM;
1959
1960 /*
1961 * NB: RX_SG may be enabled later during rx_queue_setup() if
1962 * required by the mbuf/mtu configuration
1963 */
1964 ionic_lif_configure_rx_sg_offload(lif);
1965
1966 /* Covers VLAN_STRIP */
1967 ionic_lif_configure_vlan_offload(lif, RTE_ETH_VLAN_STRIP_MASK);
1968
1969 /* TX per-port */
1970
1971 if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
1972 txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
1973 txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
1974 txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
1975 txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
1976 lif->features |= IONIC_ETH_HW_TX_CSUM;
1977 else
1978 lif->features &= ~IONIC_ETH_HW_TX_CSUM;
1979
1980 if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
1981 lif->features |= IONIC_ETH_HW_VLAN_TX_TAG;
1982 else
1983 lif->features &= ~IONIC_ETH_HW_VLAN_TX_TAG;
1984
1985 if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
1986 lif->features |= IONIC_ETH_HW_TX_SG;
1987 else
1988 lif->features &= ~IONIC_ETH_HW_TX_SG;
1989
1990 if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
1991 lif->features |= IONIC_ETH_HW_TSO;
1992 lif->features |= IONIC_ETH_HW_TSO_IPV6;
1993 lif->features |= IONIC_ETH_HW_TSO_ECN;
1994 } else {
1995 lif->features &= ~IONIC_ETH_HW_TSO;
1996 lif->features &= ~IONIC_ETH_HW_TSO_IPV6;
1997 lif->features &= ~IONIC_ETH_HW_TSO_ECN;
1998 }
1999 }
2000
2001 int
ionic_lif_start(struct ionic_lif * lif)2002 ionic_lif_start(struct ionic_lif *lif)
2003 {
2004 struct rte_eth_dev *dev = lif->eth_dev;
2005 uint32_t rx_mode;
2006 uint32_t i, j, chunk;
2007 int err;
2008 bool fatal = false;
2009
2010 err = ionic_lif_rss_setup(lif);
2011 if (err)
2012 return err;
2013
2014 if (!lif->rx_mode) {
2015 IONIC_PRINT(DEBUG, "Setting RX mode on %s",
2016 lif->name);
2017
2018 rx_mode = IONIC_RX_MODE_F_UNICAST;
2019 rx_mode |= IONIC_RX_MODE_F_MULTICAST;
2020 rx_mode |= IONIC_RX_MODE_F_BROADCAST;
2021
2022 ionic_set_rx_mode(lif, rx_mode);
2023 }
2024
2025 IONIC_PRINT(DEBUG, "Starting %u RX queues and %u TX queues "
2026 "on port %u",
2027 lif->nrxqcqs, lif->ntxqcqs, lif->port_id);
2028
2029 chunk = ionic_adminq_space_avail(lif);
2030
2031 for (i = 0; i < lif->nrxqcqs; i += chunk) {
2032 if (lif->rxqcqs[0]->flags & IONIC_QCQ_F_DEFERRED) {
2033 IONIC_PRINT(DEBUG, "Rx queue start deferred");
2034 break;
2035 }
2036
2037 for (j = 0; j < chunk && i + j < lif->nrxqcqs; j++) {
2038 err = ionic_dev_rx_queue_start_firsthalf(dev, i + j);
2039 if (err) {
2040 fatal = true;
2041 break;
2042 }
2043 }
2044
2045 for (j = 0; j < chunk && i + j < lif->nrxqcqs; j++) {
2046 /* Commands that failed to post return immediately */
2047 err = ionic_dev_rx_queue_start_secondhalf(dev, i + j);
2048 if (err)
2049 /* Don't break */
2050 fatal = true;
2051 }
2052 }
2053 if (fatal)
2054 return -EIO;
2055
2056 for (i = 0; i < lif->ntxqcqs; i += chunk) {
2057 if (lif->txqcqs[0]->flags & IONIC_QCQ_F_DEFERRED) {
2058 IONIC_PRINT(DEBUG, "Tx queue start deferred");
2059 break;
2060 }
2061
2062 for (j = 0; j < chunk && i + j < lif->ntxqcqs; j++) {
2063 err = ionic_dev_tx_queue_start_firsthalf(dev, i + j);
2064 if (err) {
2065 fatal = true;
2066 break;
2067 }
2068 }
2069
2070 for (j = 0; j < chunk && i + j < lif->ntxqcqs; j++) {
2071 /* Commands that failed to post return immediately */
2072 err = ionic_dev_tx_queue_start_secondhalf(dev, i + j);
2073 if (err)
2074 /* Don't break */
2075 fatal = true;
2076 }
2077 }
2078 if (fatal)
2079 return -EIO;
2080
2081 /* Carrier ON here */
2082 lif->state |= IONIC_LIF_F_UP;
2083
2084 ionic_link_status_check(lif);
2085
2086 return 0;
2087 }
2088
2089 int
ionic_lif_identify(struct ionic_adapter * adapter)2090 ionic_lif_identify(struct ionic_adapter *adapter)
2091 {
2092 struct ionic_dev *idev = &adapter->idev;
2093 struct ionic_identity *ident = &adapter->ident;
2094 union ionic_lif_config *cfg = &ident->lif.eth.config;
2095 uint32_t lif_words = RTE_DIM(ident->lif.words);
2096 uint32_t cmd_words = RTE_DIM(idev->dev_cmd->data);
2097 uint32_t i, nwords;
2098 int err;
2099
2100 ionic_dev_cmd_lif_identify(idev, IONIC_LIF_TYPE_CLASSIC,
2101 IONIC_IDENTITY_VERSION_1);
2102 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
2103 if (err)
2104 return (err);
2105
2106 nwords = RTE_MIN(lif_words, cmd_words);
2107 for (i = 0; i < nwords; i++)
2108 ident->lif.words[i] = ioread32(&idev->dev_cmd->data[i]);
2109
2110 IONIC_PRINT(INFO, "capabilities 0x%" PRIx64 " ",
2111 rte_le_to_cpu_64(ident->lif.capabilities));
2112
2113 IONIC_PRINT(INFO, "eth.max_ucast_filters 0x%" PRIx32 " ",
2114 rte_le_to_cpu_32(ident->lif.eth.max_ucast_filters));
2115 IONIC_PRINT(INFO, "eth.max_mcast_filters 0x%" PRIx32 " ",
2116 rte_le_to_cpu_32(ident->lif.eth.max_mcast_filters));
2117
2118 IONIC_PRINT(INFO, "eth.features 0x%" PRIx64 " ",
2119 rte_le_to_cpu_64(cfg->features));
2120 IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_ADMINQ] 0x%" PRIx32 " ",
2121 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_ADMINQ]));
2122 IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] 0x%" PRIx32 " ",
2123 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_NOTIFYQ]));
2124 IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_RXQ] 0x%" PRIx32 " ",
2125 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]));
2126 IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_TXQ] 0x%" PRIx32 " ",
2127 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]));
2128
2129 return 0;
2130 }
2131
2132 int
ionic_lifs_size(struct ionic_adapter * adapter)2133 ionic_lifs_size(struct ionic_adapter *adapter)
2134 {
2135 struct ionic_identity *ident = &adapter->ident;
2136 union ionic_lif_config *cfg = &ident->lif.eth.config;
2137 uint32_t nintrs, dev_nintrs = rte_le_to_cpu_32(ident->dev.nintrs);
2138
2139 adapter->max_ntxqs_per_lif =
2140 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]);
2141 adapter->max_nrxqs_per_lif =
2142 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]);
2143
2144 nintrs = 1 /* notifyq */;
2145
2146 if (nintrs > dev_nintrs) {
2147 IONIC_PRINT(ERR,
2148 "At most %d intr supported, minimum req'd is %u",
2149 dev_nintrs, nintrs);
2150 return -ENOSPC;
2151 }
2152
2153 adapter->nintrs = nintrs;
2154
2155 return 0;
2156 }
2157