12e99ea80SHyong Youb Kim /* SPDX-License-Identifier: BSD-3-Clause 22e99ea80SHyong Youb Kim * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. 372f3de30SBruce Richardson * Copyright 2007 Nuova Systems, Inc. All rights reserved. 472f3de30SBruce Richardson */ 572f3de30SBruce Richardson 672f3de30SBruce Richardson #include <stdio.h> 772f3de30SBruce Richardson 872f3de30SBruce Richardson #include <sys/stat.h> 972f3de30SBruce Richardson #include <sys/mman.h> 1072f3de30SBruce Richardson #include <fcntl.h> 1172f3de30SBruce Richardson 1272f3de30SBruce Richardson #include <rte_pci.h> 13c752998bSGaetan Rivet #include <rte_bus_pci.h> 1472f3de30SBruce Richardson #include <rte_memzone.h> 1572f3de30SBruce Richardson #include <rte_malloc.h> 1672f3de30SBruce Richardson #include <rte_mbuf.h> 1772f3de30SBruce Richardson #include <rte_string_fns.h> 18df96fd0dSBruce Richardson #include <ethdev_driver.h> 1961c7b522SJohn Daley #include <rte_geneve.h> 2072f3de30SBruce Richardson 2172f3de30SBruce Richardson #include "enic_compat.h" 2272f3de30SBruce Richardson #include "enic.h" 2372f3de30SBruce Richardson #include "wq_enet_desc.h" 2472f3de30SBruce Richardson #include "rq_enet_desc.h" 2572f3de30SBruce Richardson #include "cq_enet_desc.h" 2672f3de30SBruce Richardson #include "vnic_enet.h" 2772f3de30SBruce Richardson #include "vnic_dev.h" 2872f3de30SBruce Richardson #include "vnic_wq.h" 2972f3de30SBruce Richardson #include "vnic_rq.h" 3072f3de30SBruce Richardson #include "vnic_cq.h" 3172f3de30SBruce Richardson #include "vnic_intr.h" 3272f3de30SBruce Richardson #include "vnic_nic.h" 3372f3de30SBruce Richardson 3472f3de30SBruce Richardson static inline int enic_is_sriov_vf(struct enic *enic) 3572f3de30SBruce Richardson { 3672f3de30SBruce Richardson return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; 3772f3de30SBruce Richardson } 3872f3de30SBruce Richardson 3972f3de30SBruce Richardson static int is_zero_addr(uint8_t *addr) 4072f3de30SBruce Richardson { 4172f3de30SBruce Richardson return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); 4272f3de30SBruce Richardson } 4372f3de30SBruce Richardson 4472f3de30SBruce Richardson static int is_mcast_addr(uint8_t *addr) 4572f3de30SBruce Richardson { 4672f3de30SBruce Richardson return addr[0] & 1; 4772f3de30SBruce Richardson } 4872f3de30SBruce Richardson 4972f3de30SBruce Richardson static int is_eth_addr_valid(uint8_t *addr) 5072f3de30SBruce Richardson { 5172f3de30SBruce Richardson return !is_mcast_addr(addr) && !is_zero_addr(addr); 5272f3de30SBruce Richardson } 5372f3de30SBruce Richardson 54edd08548SHyong Youb Kim void 55a44a1724SNelson Escobar enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq) 5672f3de30SBruce Richardson { 57947d860cSJohn Daley uint16_t i; 5872f3de30SBruce Richardson 59947d860cSJohn Daley if (!rq || !rq->mbuf_ring) { 60947d860cSJohn Daley dev_debug(enic, "Pointer to rq or mbuf_ring is NULL"); 61947d860cSJohn Daley return; 6272f3de30SBruce Richardson } 6372f3de30SBruce Richardson 64a44a1724SNelson Escobar for (i = 0; i < rq->ring.desc_count; i++) { 65947d860cSJohn Daley if (rq->mbuf_ring[i]) { 66947d860cSJohn Daley rte_pktmbuf_free_seg(rq->mbuf_ring[i]); 67947d860cSJohn Daley rq->mbuf_ring[i] = NULL; 68947d860cSJohn Daley } 69947d860cSJohn Daley } 70947d860cSJohn Daley } 71947d860cSJohn Daley 72edd08548SHyong Youb Kim void enic_free_wq_buf(struct rte_mbuf **buf) 7372f3de30SBruce Richardson { 74d355a942SHyong Youb Kim struct rte_mbuf *mbuf = *buf; 7572f3de30SBruce Richardson 76da24f6f6SJohn Daley rte_pktmbuf_free_seg(mbuf); 77d355a942SHyong Youb Kim *buf = NULL; 7872f3de30SBruce Richardson } 7972f3de30SBruce Richardson 8072f3de30SBruce Richardson static void enic_log_q_error(struct enic *enic) 8172f3de30SBruce Richardson { 8272f3de30SBruce Richardson unsigned int i; 8304e8ec74SJohn Daley uint32_t error_status; 8472f3de30SBruce Richardson 8572f3de30SBruce Richardson for (i = 0; i < enic->wq_count; i++) { 8672f3de30SBruce Richardson error_status = vnic_wq_error_status(&enic->wq[i]); 8772f3de30SBruce Richardson if (error_status) 8872f3de30SBruce Richardson dev_err(enic, "WQ[%d] error_status %d\n", i, 8972f3de30SBruce Richardson error_status); 9072f3de30SBruce Richardson } 9172f3de30SBruce Richardson 92856d7ba7SNelson Escobar for (i = 0; i < enic_vnic_rq_count(enic); i++) { 93e3725e7fSNelson Escobar if (!enic->rq[i].in_use) 94e3725e7fSNelson Escobar continue; 9572f3de30SBruce Richardson error_status = vnic_rq_error_status(&enic->rq[i]); 9672f3de30SBruce Richardson if (error_status) 9772f3de30SBruce Richardson dev_err(enic, "RQ[%d] error_status %d\n", i, 9872f3de30SBruce Richardson error_status); 9972f3de30SBruce Richardson } 10072f3de30SBruce Richardson } 10172f3de30SBruce Richardson 10265b5434dSJohn Daley static void enic_clear_soft_stats(struct enic *enic) 10365b5434dSJohn Daley { 10465b5434dSJohn Daley struct enic_soft_stats *soft_stats = &enic->soft_stats; 10565b5434dSJohn Daley rte_atomic64_clear(&soft_stats->rx_nombuf); 106c44d9f01SJohn Daley rte_atomic64_clear(&soft_stats->rx_packet_errors); 107ed6e564cSJohn Daley rte_atomic64_clear(&soft_stats->tx_oversized); 10865b5434dSJohn Daley } 10965b5434dSJohn Daley 11065b5434dSJohn Daley static void enic_init_soft_stats(struct enic *enic) 11165b5434dSJohn Daley { 11265b5434dSJohn Daley struct enic_soft_stats *soft_stats = &enic->soft_stats; 11365b5434dSJohn Daley rte_atomic64_init(&soft_stats->rx_nombuf); 114c44d9f01SJohn Daley rte_atomic64_init(&soft_stats->rx_packet_errors); 115ed6e564cSJohn Daley rte_atomic64_init(&soft_stats->tx_oversized); 11665b5434dSJohn Daley enic_clear_soft_stats(enic); 11765b5434dSJohn Daley } 11865b5434dSJohn Daley 1199970a9adSIgor Romanov int enic_dev_stats_clear(struct enic *enic) 12072f3de30SBruce Richardson { 1219970a9adSIgor Romanov int ret; 1229970a9adSIgor Romanov 1239970a9adSIgor Romanov ret = vnic_dev_stats_clear(enic->vdev); 1249970a9adSIgor Romanov if (ret != 0) { 12572f3de30SBruce Richardson dev_err(enic, "Error in clearing stats\n"); 1269970a9adSIgor Romanov return ret; 1279970a9adSIgor Romanov } 12865b5434dSJohn Daley enic_clear_soft_stats(enic); 1299970a9adSIgor Romanov 1309970a9adSIgor Romanov return 0; 13172f3de30SBruce Richardson } 13272f3de30SBruce Richardson 133d5b0924bSMatan Azrad int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) 13472f3de30SBruce Richardson { 13572f3de30SBruce Richardson struct vnic_stats *stats; 136c44d9f01SJohn Daley struct enic_soft_stats *soft_stats = &enic->soft_stats; 137c44d9f01SJohn Daley int64_t rx_truncated; 138c44d9f01SJohn Daley uint64_t rx_packet_errors; 139d5b0924bSMatan Azrad int ret = vnic_dev_stats_dump(enic->vdev, &stats); 14072f3de30SBruce Richardson 141d5b0924bSMatan Azrad if (ret) { 14272f3de30SBruce Richardson dev_err(enic, "Error in getting stats\n"); 143d5b0924bSMatan Azrad return ret; 14472f3de30SBruce Richardson } 14572f3de30SBruce Richardson 146c44d9f01SJohn Daley /* The number of truncated packets can only be calculated by 147c44d9f01SJohn Daley * subtracting a hardware counter from error packets received by 148c44d9f01SJohn Daley * the driver. Note: this causes transient inaccuracies in the 149c44d9f01SJohn Daley * ipackets count. Also, the length of truncated packets are 150c44d9f01SJohn Daley * counted in ibytes even though truncated packets are dropped 151c44d9f01SJohn Daley * which can make ibytes be slightly higher than it should be. 152c44d9f01SJohn Daley */ 153c44d9f01SJohn Daley rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors); 154c92efcaeSNelson Escobar rx_truncated = rx_packet_errors - stats->rx.rx_errors; 155c44d9f01SJohn Daley 156c44d9f01SJohn Daley r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated; 15772f3de30SBruce Richardson r_stats->opackets = stats->tx.tx_frames_ok; 15872f3de30SBruce Richardson 15972f3de30SBruce Richardson r_stats->ibytes = stats->rx.rx_bytes_ok; 16072f3de30SBruce Richardson r_stats->obytes = stats->tx.tx_bytes_ok; 16172f3de30SBruce Richardson 16265b5434dSJohn Daley r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop; 163ed6e564cSJohn Daley r_stats->oerrors = stats->tx.tx_errors 164ed6e564cSJohn Daley + rte_atomic64_read(&soft_stats->tx_oversized); 16572f3de30SBruce Richardson 166c44d9f01SJohn Daley r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated; 1677182d3e7SJohn Daley 16865b5434dSJohn Daley r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf); 169d5b0924bSMatan Azrad return 0; 17072f3de30SBruce Richardson } 17172f3de30SBruce Richardson 172740f5bf1SDavid Marchand int enic_del_mac_address(struct enic *enic, int mac_index) 17372f3de30SBruce Richardson { 174bbab3d97SJohn Daley struct rte_eth_dev *eth_dev = enic->rte_dev; 175bbab3d97SJohn Daley uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes; 176bbab3d97SJohn Daley 177740f5bf1SDavid Marchand return vnic_dev_del_addr(enic->vdev, mac_addr); 17872f3de30SBruce Richardson } 17972f3de30SBruce Richardson 1806d01e580SWei Dai int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr) 18172f3de30SBruce Richardson { 18272f3de30SBruce Richardson int err; 18372f3de30SBruce Richardson 18472f3de30SBruce Richardson if (!is_eth_addr_valid(mac_addr)) { 18572f3de30SBruce Richardson dev_err(enic, "invalid mac address\n"); 1866d01e580SWei Dai return -EINVAL; 18772f3de30SBruce Richardson } 18872f3de30SBruce Richardson 18972f3de30SBruce Richardson err = vnic_dev_add_addr(enic->vdev, mac_addr); 1906d01e580SWei Dai if (err) 19172f3de30SBruce Richardson dev_err(enic, "add mac addr failed\n"); 1926d01e580SWei Dai return err; 19372f3de30SBruce Richardson } 19472f3de30SBruce Richardson 195edd08548SHyong Youb Kim void enic_free_rq_buf(struct rte_mbuf **mbuf) 19672f3de30SBruce Richardson { 197947d860cSJohn Daley if (*mbuf == NULL) 19872f3de30SBruce Richardson return; 19972f3de30SBruce Richardson 200947d860cSJohn Daley rte_pktmbuf_free(*mbuf); 201e735c8e2SAaron Conole *mbuf = NULL; 20272f3de30SBruce Richardson } 20372f3de30SBruce Richardson 20472f3de30SBruce Richardson void enic_init_vnic_resources(struct enic *enic) 20572f3de30SBruce Richardson { 20672f3de30SBruce Richardson unsigned int error_interrupt_enable = 1; 20772f3de30SBruce Richardson unsigned int error_interrupt_offset = 0; 2080f872d31SHyong Youb Kim unsigned int rxq_interrupt_enable = 0; 20905e85682SJohn Daley unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET; 21072f3de30SBruce Richardson unsigned int index = 0; 211fc2c8c06SJohn Daley unsigned int cq_idx; 212856d7ba7SNelson Escobar struct vnic_rq *data_rq; 21372f3de30SBruce Richardson 21405e85682SJohn Daley if (enic->rte_dev->data->dev_conf.intr_conf.rxq) 2150f872d31SHyong Youb Kim rxq_interrupt_enable = 1; 21605e85682SJohn Daley 21772f3de30SBruce Richardson for (index = 0; index < enic->rq_count; index++) { 218aa07bf8fSJohn Daley cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index)); 21916dba071SNelson Escobar 220aa07bf8fSJohn Daley vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)], 22116dba071SNelson Escobar cq_idx, 222856d7ba7SNelson Escobar error_interrupt_enable, 223856d7ba7SNelson Escobar error_interrupt_offset); 224856d7ba7SNelson Escobar 225285fd7c4SJohn Daley data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index, enic)]; 226856d7ba7SNelson Escobar if (data_rq->in_use) 227856d7ba7SNelson Escobar vnic_rq_init(data_rq, 22816dba071SNelson Escobar cq_idx, 22972f3de30SBruce Richardson error_interrupt_enable, 23072f3de30SBruce Richardson error_interrupt_offset); 231fc2c8c06SJohn Daley vnic_cq_init(&enic->cq[cq_idx], 23272f3de30SBruce Richardson 0 /* flow_control_enable */, 23372f3de30SBruce Richardson 1 /* color_enable */, 23472f3de30SBruce Richardson 0 /* cq_head */, 23572f3de30SBruce Richardson 0 /* cq_tail */, 23672f3de30SBruce Richardson 1 /* cq_tail_color */, 2370f872d31SHyong Youb Kim rxq_interrupt_enable, 23872f3de30SBruce Richardson 1 /* cq_entry_enable */, 23972f3de30SBruce Richardson 0 /* cq_message_enable */, 2400f872d31SHyong Youb Kim rxq_interrupt_offset, 24172f3de30SBruce Richardson 0 /* cq_message_addr */); 2420f872d31SHyong Youb Kim if (rxq_interrupt_enable) 2430f872d31SHyong Youb Kim rxq_interrupt_offset++; 24472f3de30SBruce Richardson } 24572f3de30SBruce Richardson 246fc2c8c06SJohn Daley for (index = 0; index < enic->wq_count; index++) { 247fc2c8c06SJohn Daley vnic_wq_init(&enic->wq[index], 248fc2c8c06SJohn Daley enic_cq_wq(enic, index), 249fc2c8c06SJohn Daley error_interrupt_enable, 250fc2c8c06SJohn Daley error_interrupt_offset); 25193fb21fdSHyong Youb Kim /* Compute unsupported ol flags for enic_prep_pkts() */ 25293fb21fdSHyong Youb Kim enic->wq[index].tx_offload_notsup_mask = 253daa02b5cSOlivier Matz RTE_MBUF_F_TX_OFFLOAD_MASK ^ enic->tx_offload_mask; 254fc2c8c06SJohn Daley 255fc2c8c06SJohn Daley cq_idx = enic_cq_wq(enic, index); 256fc2c8c06SJohn Daley vnic_cq_init(&enic->cq[cq_idx], 257fc2c8c06SJohn Daley 0 /* flow_control_enable */, 258fc2c8c06SJohn Daley 1 /* color_enable */, 259fc2c8c06SJohn Daley 0 /* cq_head */, 260fc2c8c06SJohn Daley 0 /* cq_tail */, 261fc2c8c06SJohn Daley 1 /* cq_tail_color */, 262fc2c8c06SJohn Daley 0 /* interrupt_enable */, 263fc2c8c06SJohn Daley 0 /* cq_entry_enable */, 264fc2c8c06SJohn Daley 1 /* cq_message_enable */, 265fc2c8c06SJohn Daley 0 /* interrupt offset */, 26604e8ec74SJohn Daley (uint64_t)enic->wq[index].cqmsg_rz->iova); 267fc2c8c06SJohn Daley } 268fc2c8c06SJohn Daley 2690f872d31SHyong Youb Kim for (index = 0; index < enic->intr_count; index++) { 2700f872d31SHyong Youb Kim vnic_intr_init(&enic->intr[index], 27172f3de30SBruce Richardson enic->config.intr_timer_usec, 27272f3de30SBruce Richardson enic->config.intr_timer_type, 27372f3de30SBruce Richardson /*mask_on_assertion*/1); 27472f3de30SBruce Richardson } 2750f872d31SHyong Youb Kim } 27672f3de30SBruce Richardson 27772f3de30SBruce Richardson 278edd08548SHyong Youb Kim int 279947d860cSJohn Daley enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) 28072f3de30SBruce Richardson { 281947d860cSJohn Daley struct rte_mbuf *mb; 282947d860cSJohn Daley struct rq_enet_desc *rqd = rq->ring.descs; 283947d860cSJohn Daley unsigned i; 28472f3de30SBruce Richardson dma_addr_t dma_addr; 2851bb4a528SFerruh Yigit uint32_t max_rx_pktlen; 286422ba917SHyong Youb Kim uint16_t rq_buf_len; 28772f3de30SBruce Richardson 288856d7ba7SNelson Escobar if (!rq->in_use) 289856d7ba7SNelson Escobar return 0; 290856d7ba7SNelson Escobar 291bba57df3SNelson Escobar dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index, 292947d860cSJohn Daley rq->ring.desc_count); 293947d860cSJohn Daley 294422ba917SHyong Youb Kim /* 29595faa2a9SHyong Youb Kim * If *not* using scatter and the mbuf size is greater than the 2961bb4a528SFerruh Yigit * requested max packet size (mtu + eth overhead), then reduce the 2971bb4a528SFerruh Yigit * posted buffer size to max packet size. HW still receives packets 2981bb4a528SFerruh Yigit * larger than max packet size, but they will be truncated, which we 299422ba917SHyong Youb Kim * drop in the rx handler. Not ideal, but better than returning 300422ba917SHyong Youb Kim * large packets when the user is not expecting them. 301422ba917SHyong Youb Kim */ 3021bb4a528SFerruh Yigit max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu); 303422ba917SHyong Youb Kim rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM; 3041bb4a528SFerruh Yigit if (max_rx_pktlen < rq_buf_len && !rq->data_queue_enable) 3051bb4a528SFerruh Yigit rq_buf_len = max_rx_pktlen; 306947d860cSJohn Daley for (i = 0; i < rq->ring.desc_count; i++, rqd++) { 307fbfd9955SOlivier Matz mb = rte_mbuf_raw_alloc(rq->mp); 308947d860cSJohn Daley if (mb == NULL) { 309bba57df3SNelson Escobar dev_err(enic, "RX mbuf alloc failed queue_id=%u\n", 310947d860cSJohn Daley (unsigned)rq->index); 311947d860cSJohn Daley return -ENOMEM; 31272f3de30SBruce Richardson } 31372f3de30SBruce Richardson 3141ccc51b0SJohn Daley mb->data_off = RTE_PKTMBUF_HEADROOM; 315455da545SSantosh Shukla dma_addr = (dma_addr_t)(mb->buf_iova 3164a3259d6SJohn Daley + RTE_PKTMBUF_HEADROOM); 317856d7ba7SNelson Escobar rq_enet_desc_enc(rqd, dma_addr, 318856d7ba7SNelson Escobar (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP 319856d7ba7SNelson Escobar : RQ_ENET_TYPE_NOT_SOP), 320422ba917SHyong Youb Kim rq_buf_len); 321947d860cSJohn Daley rq->mbuf_ring[i] = mb; 32272f3de30SBruce Richardson } 323a74629cfSHyong Youb Kim /* 324a74629cfSHyong Youb Kim * Do not post the buffers to the NIC until we enable the RQ via 325a74629cfSHyong Youb Kim * enic_start_rq(). 326a74629cfSHyong Youb Kim */ 327a74629cfSHyong Youb Kim rq->need_initial_post = true; 3281c7c3ad1SHyong Youb Kim /* Initialize fetch index while RQ is disabled */ 3291c7c3ad1SHyong Youb Kim iowrite32(0, &rq->ctrl->fetch_index); 330a74629cfSHyong Youb Kim return 0; 331a74629cfSHyong Youb Kim } 332a74629cfSHyong Youb Kim 333a74629cfSHyong Youb Kim /* 334a74629cfSHyong Youb Kim * Post the Rx buffers for the first time. enic_alloc_rx_queue_mbufs() has 335a74629cfSHyong Youb Kim * allocated the buffers and filled the RQ descriptor ring. Just need to push 336a74629cfSHyong Youb Kim * the post index to the NIC. 337a74629cfSHyong Youb Kim */ 338a74629cfSHyong Youb Kim static void 339a74629cfSHyong Youb Kim enic_initial_post_rx(struct enic *enic, struct vnic_rq *rq) 340a74629cfSHyong Youb Kim { 341a74629cfSHyong Youb Kim if (!rq->in_use || !rq->need_initial_post) 342a74629cfSHyong Youb Kim return; 34372f3de30SBruce Richardson 344947d860cSJohn Daley /* make sure all prior writes are complete before doing the PIO write */ 345947d860cSJohn Daley rte_rmb(); 34672f3de30SBruce Richardson 347856d7ba7SNelson Escobar /* Post all but the last buffer to VIC. */ 348856d7ba7SNelson Escobar rq->posted_index = rq->ring.desc_count - 1; 349856d7ba7SNelson Escobar 350947d860cSJohn Daley rq->rx_nb_hold = 0; 35172f3de30SBruce Richardson 352947d860cSJohn Daley dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n", 353947d860cSJohn Daley enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold); 354947d860cSJohn Daley iowrite32(rq->posted_index, &rq->ctrl->posted_index); 355947d860cSJohn Daley rte_rmb(); 356a74629cfSHyong Youb Kim rq->need_initial_post = false; 35772f3de30SBruce Richardson } 35872f3de30SBruce Richardson 359ea7768b5SHyong Youb Kim void * 360da5f560bSNelson Escobar enic_alloc_consistent(void *priv, size_t size, 36104e8ec74SJohn Daley dma_addr_t *dma_handle, uint8_t *name) 36272f3de30SBruce Richardson { 36372f3de30SBruce Richardson void *vaddr; 36472f3de30SBruce Richardson const struct rte_memzone *rz; 36572f3de30SBruce Richardson *dma_handle = 0; 366da5f560bSNelson Escobar struct enic *enic = (struct enic *)priv; 367da5f560bSNelson Escobar struct enic_memzone_entry *mze; 36872f3de30SBruce Richardson 36946e4fb12SAnatoly Burakov rz = rte_memzone_reserve_aligned((const char *)name, size, 37004e8ec74SJohn Daley SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE); 37172f3de30SBruce Richardson if (!rz) { 372bba57df3SNelson Escobar pr_err("%s : Failed to allocate memory requested for %s\n", 37372f3de30SBruce Richardson __func__, name); 37472f3de30SBruce Richardson return NULL; 37572f3de30SBruce Richardson } 37672f3de30SBruce Richardson 37772f3de30SBruce Richardson vaddr = rz->addr; 378f17ca787SThomas Monjalon *dma_handle = (dma_addr_t)rz->iova; 37972f3de30SBruce Richardson 380da5f560bSNelson Escobar mze = rte_malloc("enic memzone entry", 381da5f560bSNelson Escobar sizeof(struct enic_memzone_entry), 0); 382da5f560bSNelson Escobar 383da5f560bSNelson Escobar if (!mze) { 384da5f560bSNelson Escobar pr_err("%s : Failed to allocate memory for memzone list\n", 385da5f560bSNelson Escobar __func__); 386da5f560bSNelson Escobar rte_memzone_free(rz); 3877f4a1aa1SRongQiang Xie return NULL; 388da5f560bSNelson Escobar } 389da5f560bSNelson Escobar 390da5f560bSNelson Escobar mze->rz = rz; 391da5f560bSNelson Escobar 392da5f560bSNelson Escobar rte_spinlock_lock(&enic->memzone_list_lock); 393da5f560bSNelson Escobar LIST_INSERT_HEAD(&enic->memzone_list, mze, entries); 394da5f560bSNelson Escobar rte_spinlock_unlock(&enic->memzone_list_lock); 395da5f560bSNelson Escobar 39672f3de30SBruce Richardson return vaddr; 39772f3de30SBruce Richardson } 39872f3de30SBruce Richardson 399ea7768b5SHyong Youb Kim void 400da5f560bSNelson Escobar enic_free_consistent(void *priv, 40172f3de30SBruce Richardson __rte_unused size_t size, 402da5f560bSNelson Escobar void *vaddr, 403da5f560bSNelson Escobar dma_addr_t dma_handle) 40472f3de30SBruce Richardson { 405da5f560bSNelson Escobar struct enic_memzone_entry *mze; 406da5f560bSNelson Escobar struct enic *enic = (struct enic *)priv; 407da5f560bSNelson Escobar 408da5f560bSNelson Escobar rte_spinlock_lock(&enic->memzone_list_lock); 409da5f560bSNelson Escobar LIST_FOREACH(mze, &enic->memzone_list, entries) { 410da5f560bSNelson Escobar if (mze->rz->addr == vaddr && 411f17ca787SThomas Monjalon mze->rz->iova == dma_handle) 412da5f560bSNelson Escobar break; 413da5f560bSNelson Escobar } 414da5f560bSNelson Escobar if (mze == NULL) { 415da5f560bSNelson Escobar rte_spinlock_unlock(&enic->memzone_list_lock); 416da5f560bSNelson Escobar dev_warning(enic, 417da5f560bSNelson Escobar "Tried to free memory, but couldn't find it in the memzone list\n"); 418da5f560bSNelson Escobar return; 419da5f560bSNelson Escobar } 420da5f560bSNelson Escobar LIST_REMOVE(mze, entries); 421da5f560bSNelson Escobar rte_spinlock_unlock(&enic->memzone_list_lock); 422da5f560bSNelson Escobar rte_memzone_free(mze->rz); 423da5f560bSNelson Escobar rte_free(mze); 42472f3de30SBruce Richardson } 42572f3de30SBruce Richardson 426c655c547SHyong Youb Kim int enic_link_update(struct rte_eth_dev *eth_dev) 427cf8d9826SNelson Escobar { 428c655c547SHyong Youb Kim struct enic *enic = pmd_priv(eth_dev); 4295042dde0SStephen Hemminger struct rte_eth_link link; 430cf8d9826SNelson Escobar 4315042dde0SStephen Hemminger memset(&link, 0, sizeof(link)); 4325042dde0SStephen Hemminger link.link_status = enic_get_link_status(enic); 433295968d1SFerruh Yigit link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 4345042dde0SStephen Hemminger link.link_speed = vnic_dev_port_speed(enic->vdev); 4355042dde0SStephen Hemminger 4365042dde0SStephen Hemminger return rte_eth_linkstatus_set(eth_dev, &link); 437cf8d9826SNelson Escobar } 438cf8d9826SNelson Escobar 43972f3de30SBruce Richardson static void 440c23a1a30SQi Zhang enic_intr_handler(void *arg) 44172f3de30SBruce Richardson { 44253fa8cc0SNelson Escobar struct rte_eth_dev *dev = (struct rte_eth_dev *)arg; 44353fa8cc0SNelson Escobar struct enic *enic = pmd_priv(dev); 44472f3de30SBruce Richardson 4450f872d31SHyong Youb Kim vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]); 44672f3de30SBruce Richardson 447c655c547SHyong Youb Kim enic_link_update(dev); 4485723fbedSFerruh Yigit rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 44972f3de30SBruce Richardson enic_log_q_error(enic); 4508bac78f8SHyong Youb Kim /* Re-enable irq in case of INTx */ 451d61138d4SHarman Kalra rte_intr_ack(enic->pdev->intr_handle); 45272f3de30SBruce Richardson } 45372f3de30SBruce Richardson 4540f872d31SHyong Youb Kim static int enic_rxq_intr_init(struct enic *enic) 4550f872d31SHyong Youb Kim { 4560f872d31SHyong Youb Kim struct rte_intr_handle *intr_handle; 4570f872d31SHyong Youb Kim uint32_t rxq_intr_count, i; 4580f872d31SHyong Youb Kim int err; 4590f872d31SHyong Youb Kim 4600f872d31SHyong Youb Kim intr_handle = enic->rte_dev->intr_handle; 4610f872d31SHyong Youb Kim if (!enic->rte_dev->data->dev_conf.intr_conf.rxq) 4620f872d31SHyong Youb Kim return 0; 4630f872d31SHyong Youb Kim /* 4640f872d31SHyong Youb Kim * Rx queue interrupts only work when we have MSI-X interrupts, 4650f872d31SHyong Youb Kim * one per queue. Sharing one interrupt is technically 4660f872d31SHyong Youb Kim * possible with VIC, but it is not worth the complications it brings. 4670f872d31SHyong Youb Kim */ 4680f872d31SHyong Youb Kim if (!rte_intr_cap_multiple(intr_handle)) { 4690f872d31SHyong Youb Kim dev_err(enic, "Rx queue interrupts require MSI-X interrupts" 4700f872d31SHyong Youb Kim " (vfio-pci driver)\n"); 4710f872d31SHyong Youb Kim return -ENOTSUP; 4720f872d31SHyong Youb Kim } 4730f872d31SHyong Youb Kim rxq_intr_count = enic->intr_count - ENICPMD_RXQ_INTR_OFFSET; 4740f872d31SHyong Youb Kim err = rte_intr_efd_enable(intr_handle, rxq_intr_count); 4750f872d31SHyong Youb Kim if (err) { 4760f872d31SHyong Youb Kim dev_err(enic, "Failed to enable event fds for Rx queue" 4770f872d31SHyong Youb Kim " interrupts\n"); 4780f872d31SHyong Youb Kim return err; 4790f872d31SHyong Youb Kim } 480d61138d4SHarman Kalra 481d61138d4SHarman Kalra if (rte_intr_vec_list_alloc(intr_handle, "enic_intr_vec", 482d61138d4SHarman Kalra rxq_intr_count)) { 4830f872d31SHyong Youb Kim dev_err(enic, "Failed to allocate intr_vec\n"); 4840f872d31SHyong Youb Kim return -ENOMEM; 4850f872d31SHyong Youb Kim } 4860f872d31SHyong Youb Kim for (i = 0; i < rxq_intr_count; i++) 487d61138d4SHarman Kalra if (rte_intr_vec_list_index_set(intr_handle, i, 488d61138d4SHarman Kalra i + ENICPMD_RXQ_INTR_OFFSET)) 489d61138d4SHarman Kalra return -rte_errno; 4900f872d31SHyong Youb Kim return 0; 4910f872d31SHyong Youb Kim } 4920f872d31SHyong Youb Kim 4930f872d31SHyong Youb Kim static void enic_rxq_intr_deinit(struct enic *enic) 4940f872d31SHyong Youb Kim { 4950f872d31SHyong Youb Kim struct rte_intr_handle *intr_handle; 4960f872d31SHyong Youb Kim 4970f872d31SHyong Youb Kim intr_handle = enic->rte_dev->intr_handle; 4980f872d31SHyong Youb Kim rte_intr_efd_disable(intr_handle); 499d61138d4SHarman Kalra 500d61138d4SHarman Kalra rte_intr_vec_list_free(intr_handle); 5010f872d31SHyong Youb Kim } 5020f872d31SHyong Youb Kim 503ed933c35SHyong Youb Kim static void enic_prep_wq_for_simple_tx(struct enic *enic, uint16_t queue_idx) 504ed933c35SHyong Youb Kim { 505ed933c35SHyong Youb Kim struct wq_enet_desc *desc; 506ed933c35SHyong Youb Kim struct vnic_wq *wq; 507ed933c35SHyong Youb Kim unsigned int i; 508ed933c35SHyong Youb Kim 509ed933c35SHyong Youb Kim /* 510ed933c35SHyong Youb Kim * Fill WQ descriptor fields that never change. Every descriptor is 511ed933c35SHyong Youb Kim * one packet, so set EOP. Also set CQ_ENTRY every ENIC_WQ_CQ_THRESH 512ed933c35SHyong Youb Kim * descriptors (i.e. request one completion update every 32 packets). 513ed933c35SHyong Youb Kim */ 514ed933c35SHyong Youb Kim wq = &enic->wq[queue_idx]; 515ed933c35SHyong Youb Kim desc = (struct wq_enet_desc *)wq->ring.descs; 516ed933c35SHyong Youb Kim for (i = 0; i < wq->ring.desc_count; i++, desc++) { 517ed933c35SHyong Youb Kim desc->header_length_flags = 1 << WQ_ENET_FLAGS_EOP_SHIFT; 518ed933c35SHyong Youb Kim if (i % ENIC_WQ_CQ_THRESH == ENIC_WQ_CQ_THRESH - 1) 519ed933c35SHyong Youb Kim desc->header_length_flags |= 520ed933c35SHyong Youb Kim (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT); 521ed933c35SHyong Youb Kim } 522ed933c35SHyong Youb Kim } 523ed933c35SHyong Youb Kim 5248a6ff33dSHyong Youb Kim /* 5258a6ff33dSHyong Youb Kim * The 'strong' version is in enic_rxtx_vec_avx2.c. This weak version is used 5268a6ff33dSHyong Youb Kim * used when that file is not compiled. 5278a6ff33dSHyong Youb Kim */ 528eeef60b0SHyong Youb Kim __rte_weak bool 529e92a4b41SHyong Youb Kim enic_use_vector_rx_handler(__rte_unused struct rte_eth_dev *eth_dev) 5308a6ff33dSHyong Youb Kim { 5318a6ff33dSHyong Youb Kim return false; 5328a6ff33dSHyong Youb Kim } 5338a6ff33dSHyong Youb Kim 534e92a4b41SHyong Youb Kim void enic_pick_rx_handler(struct rte_eth_dev *eth_dev) 535920aae3fSHyong Youb Kim { 536e92a4b41SHyong Youb Kim struct enic *enic = pmd_priv(eth_dev); 537920aae3fSHyong Youb Kim 5388b428cb5SHyong Youb Kim if (enic->cq64) { 5398b428cb5SHyong Youb Kim ENICPMD_LOG(DEBUG, " use the normal Rx handler for 64B CQ entry"); 5408b428cb5SHyong Youb Kim eth_dev->rx_pkt_burst = &enic_recv_pkts_64; 5418b428cb5SHyong Youb Kim return; 5428b428cb5SHyong Youb Kim } 5438a6ff33dSHyong Youb Kim /* 5448a6ff33dSHyong Youb Kim * Preference order: 5458a6ff33dSHyong Youb Kim * 1. The vectorized handler if possible and requested. 5468a6ff33dSHyong Youb Kim * 2. The non-scatter, simplified handler if scatter Rx is not used. 5478a6ff33dSHyong Youb Kim * 3. The default handler as a fallback. 5488a6ff33dSHyong Youb Kim */ 549e92a4b41SHyong Youb Kim if (enic_use_vector_rx_handler(eth_dev)) 5508a6ff33dSHyong Youb Kim return; 551920aae3fSHyong Youb Kim if (enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0) { 552bbd8ecc0SJohn Daley ENICPMD_LOG(DEBUG, " use the non-scatter Rx handler"); 553920aae3fSHyong Youb Kim eth_dev->rx_pkt_burst = &enic_noscatter_recv_pkts; 554920aae3fSHyong Youb Kim } else { 555bbd8ecc0SJohn Daley ENICPMD_LOG(DEBUG, " use the normal Rx handler"); 556920aae3fSHyong Youb Kim eth_dev->rx_pkt_burst = &enic_recv_pkts; 557920aae3fSHyong Youb Kim } 558920aae3fSHyong Youb Kim } 559920aae3fSHyong Youb Kim 560e92a4b41SHyong Youb Kim /* Secondary process uses this to set the Tx handler */ 561e92a4b41SHyong Youb Kim void enic_pick_tx_handler(struct rte_eth_dev *eth_dev) 562e92a4b41SHyong Youb Kim { 563e92a4b41SHyong Youb Kim struct enic *enic = pmd_priv(eth_dev); 564e92a4b41SHyong Youb Kim 565e92a4b41SHyong Youb Kim if (enic->use_simple_tx_handler) { 566e92a4b41SHyong Youb Kim ENICPMD_LOG(DEBUG, " use the simple tx handler"); 567e92a4b41SHyong Youb Kim eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts; 568e92a4b41SHyong Youb Kim } else { 569e92a4b41SHyong Youb Kim ENICPMD_LOG(DEBUG, " use the default tx handler"); 570e92a4b41SHyong Youb Kim eth_dev->tx_pkt_burst = &enic_xmit_pkts; 571e92a4b41SHyong Youb Kim } 572e92a4b41SHyong Youb Kim } 573e92a4b41SHyong Youb Kim 57472f3de30SBruce Richardson int enic_enable(struct enic *enic) 57572f3de30SBruce Richardson { 57672f3de30SBruce Richardson unsigned int index; 577947d860cSJohn Daley int err; 57872f3de30SBruce Richardson struct rte_eth_dev *eth_dev = enic->rte_dev; 57970401fd7SHyong Youb Kim uint64_t simple_tx_offloads; 5808a6ff33dSHyong Youb Kim uintptr_t p; 5818a6ff33dSHyong Youb Kim 5828a6ff33dSHyong Youb Kim if (enic->enable_avx2_rx) { 5838a6ff33dSHyong Youb Kim struct rte_mbuf mb_def = { .buf_addr = 0 }; 5848a6ff33dSHyong Youb Kim 5858a6ff33dSHyong Youb Kim /* 5868a6ff33dSHyong Youb Kim * mbuf_initializer contains const-after-init fields of 5878a6ff33dSHyong Youb Kim * receive mbufs (i.e. 64 bits of fields from rearm_data). 5888a6ff33dSHyong Youb Kim * It is currently used by the vectorized handler. 5898a6ff33dSHyong Youb Kim */ 5908a6ff33dSHyong Youb Kim mb_def.nb_segs = 1; 5918a6ff33dSHyong Youb Kim mb_def.data_off = RTE_PKTMBUF_HEADROOM; 5928a6ff33dSHyong Youb Kim mb_def.port = enic->port_id; 5938a6ff33dSHyong Youb Kim rte_mbuf_refcnt_set(&mb_def, 1); 5948a6ff33dSHyong Youb Kim rte_compiler_barrier(); 5958a6ff33dSHyong Youb Kim p = (uintptr_t)&mb_def.rearm_data; 5968a6ff33dSHyong Youb Kim enic->mbuf_initializer = *(uint64_t *)p; 5978a6ff33dSHyong Youb Kim } 59872f3de30SBruce Richardson 59972f3de30SBruce Richardson eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev); 600295968d1SFerruh Yigit eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 60172f3de30SBruce Richardson 60253fa8cc0SNelson Escobar /* vnic notification of link status has already been turned on in 60353fa8cc0SNelson Escobar * enic_dev_init() which is called during probe time. Here we are 60453fa8cc0SNelson Escobar * just turning on interrupt vector 0 if needed. 60553fa8cc0SNelson Escobar */ 60653fa8cc0SNelson Escobar if (eth_dev->data->dev_conf.intr_conf.lsc) 60753fa8cc0SNelson Escobar vnic_dev_notify_set(enic->vdev, 0); 60853fa8cc0SNelson Escobar 6090f872d31SHyong Youb Kim err = enic_rxq_intr_init(enic); 6100f872d31SHyong Youb Kim if (err) 6110f872d31SHyong Youb Kim return err; 61272f3de30SBruce Richardson 61339cf83f1SHyong Youb Kim /* Initialize flowman if not already initialized during probe */ 61439cf83f1SHyong Youb Kim if (enic->fm == NULL && enic_fm_init(enic)) 615ea7768b5SHyong Youb Kim dev_warning(enic, "Init of flowman failed.\n"); 616ea7768b5SHyong Youb Kim 61772f3de30SBruce Richardson for (index = 0; index < enic->rq_count; index++) { 618856d7ba7SNelson Escobar err = enic_alloc_rx_queue_mbufs(enic, 619aa07bf8fSJohn Daley &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]); 620947d860cSJohn Daley if (err) { 621856d7ba7SNelson Escobar dev_err(enic, "Failed to alloc sop RX queue mbufs\n"); 622856d7ba7SNelson Escobar return err; 623856d7ba7SNelson Escobar } 624856d7ba7SNelson Escobar err = enic_alloc_rx_queue_mbufs(enic, 625285fd7c4SJohn Daley &enic->rq[enic_rte_rq_idx_to_data_idx(index, enic)]); 626856d7ba7SNelson Escobar if (err) { 627856d7ba7SNelson Escobar /* release the allocated mbufs for the sop rq*/ 628856d7ba7SNelson Escobar enic_rxmbuf_queue_release(enic, 629aa07bf8fSJohn Daley &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]); 630856d7ba7SNelson Escobar 631856d7ba7SNelson Escobar dev_err(enic, "Failed to alloc data RX queue mbufs\n"); 632947d860cSJohn Daley return err; 63372f3de30SBruce Richardson } 63472f3de30SBruce Richardson } 63572f3de30SBruce Richardson 636ed933c35SHyong Youb Kim /* 63770401fd7SHyong Youb Kim * Use the simple TX handler if possible. Only checksum offloads 63870401fd7SHyong Youb Kim * and vlan insertion are supported. 639ed933c35SHyong Youb Kim */ 64070401fd7SHyong Youb Kim simple_tx_offloads = enic->tx_offload_capa & 641295968d1SFerruh Yigit (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | 642295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 643295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 644295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 645295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_TCP_CKSUM); 64670401fd7SHyong Youb Kim if ((eth_dev->data->dev_conf.txmode.offloads & 64770401fd7SHyong Youb Kim ~simple_tx_offloads) == 0) { 648bbd8ecc0SJohn Daley ENICPMD_LOG(DEBUG, " use the simple tx handler"); 649ed933c35SHyong Youb Kim eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts; 650ed933c35SHyong Youb Kim for (index = 0; index < enic->wq_count; index++) 651ed933c35SHyong Youb Kim enic_prep_wq_for_simple_tx(enic, index); 652e92a4b41SHyong Youb Kim enic->use_simple_tx_handler = 1; 653ed933c35SHyong Youb Kim } else { 654bbd8ecc0SJohn Daley ENICPMD_LOG(DEBUG, " use the default tx handler"); 655ed933c35SHyong Youb Kim eth_dev->tx_pkt_burst = &enic_xmit_pkts; 656ed933c35SHyong Youb Kim } 657ed933c35SHyong Youb Kim 658e92a4b41SHyong Youb Kim enic_pick_rx_handler(eth_dev); 65935e2cb6aSJohn Daley 66072f3de30SBruce Richardson for (index = 0; index < enic->wq_count; index++) 661856d7ba7SNelson Escobar enic_start_wq(enic, index); 66272f3de30SBruce Richardson for (index = 0; index < enic->rq_count; index++) 663856d7ba7SNelson Escobar enic_start_rq(enic, index); 66472f3de30SBruce Richardson 665e5b60cf1SNelson Escobar vnic_dev_add_addr(enic->vdev, enic->mac_addr); 666e5b60cf1SNelson Escobar 66772f3de30SBruce Richardson vnic_dev_enable_wait(enic->vdev); 66872f3de30SBruce Richardson 66972f3de30SBruce Richardson /* Register and enable error interrupt */ 670d61138d4SHarman Kalra rte_intr_callback_register(enic->pdev->intr_handle, 67172f3de30SBruce Richardson enic_intr_handler, (void *)enic->rte_dev); 67272f3de30SBruce Richardson 673d61138d4SHarman Kalra rte_intr_enable(enic->pdev->intr_handle); 6740f872d31SHyong Youb Kim /* Unmask LSC interrupt */ 6750f872d31SHyong Youb Kim vnic_intr_unmask(&enic->intr[ENICPMD_LSC_INTR_OFFSET]); 67672f3de30SBruce Richardson 67772f3de30SBruce Richardson return 0; 67872f3de30SBruce Richardson } 67972f3de30SBruce Richardson 68072f3de30SBruce Richardson int enic_alloc_intr_resources(struct enic *enic) 68172f3de30SBruce Richardson { 68272f3de30SBruce Richardson int err; 6830f872d31SHyong Youb Kim unsigned int i; 68472f3de30SBruce Richardson 68572f3de30SBruce Richardson dev_info(enic, "vNIC resources used: "\ 68672f3de30SBruce Richardson "wq %d rq %d cq %d intr %d\n", 687856d7ba7SNelson Escobar enic->wq_count, enic_vnic_rq_count(enic), 68872f3de30SBruce Richardson enic->cq_count, enic->intr_count); 68972f3de30SBruce Richardson 6900f872d31SHyong Youb Kim for (i = 0; i < enic->intr_count; i++) { 6910f872d31SHyong Youb Kim err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i); 6920f872d31SHyong Youb Kim if (err) { 69372f3de30SBruce Richardson enic_free_vnic_resources(enic); 69472f3de30SBruce Richardson return err; 69572f3de30SBruce Richardson } 6960f872d31SHyong Youb Kim } 6970f872d31SHyong Youb Kim return 0; 6980f872d31SHyong Youb Kim } 69972f3de30SBruce Richardson 70072f3de30SBruce Richardson void enic_free_rq(void *rxq) 70172f3de30SBruce Richardson { 702856d7ba7SNelson Escobar struct vnic_rq *rq_sop, *rq_data; 70383a9d8b7SJohn Daley struct enic *enic; 70472f3de30SBruce Richardson 70583a9d8b7SJohn Daley if (rxq == NULL) 70683a9d8b7SJohn Daley return; 70783a9d8b7SJohn Daley 708856d7ba7SNelson Escobar rq_sop = (struct vnic_rq *)rxq; 709856d7ba7SNelson Escobar enic = vnic_dev_priv(rq_sop->vdev); 710856d7ba7SNelson Escobar rq_data = &enic->rq[rq_sop->data_queue_idx]; 711856d7ba7SNelson Escobar 71235e2cb6aSJohn Daley if (rq_sop->free_mbufs) { 71335e2cb6aSJohn Daley struct rte_mbuf **mb; 71435e2cb6aSJohn Daley int i; 71535e2cb6aSJohn Daley 71635e2cb6aSJohn Daley mb = rq_sop->free_mbufs; 71735e2cb6aSJohn Daley for (i = ENIC_RX_BURST_MAX - rq_sop->num_free_mbufs; 71835e2cb6aSJohn Daley i < ENIC_RX_BURST_MAX; i++) 71935e2cb6aSJohn Daley rte_pktmbuf_free(mb[i]); 72035e2cb6aSJohn Daley rte_free(rq_sop->free_mbufs); 72135e2cb6aSJohn Daley rq_sop->free_mbufs = NULL; 72235e2cb6aSJohn Daley rq_sop->num_free_mbufs = 0; 72335e2cb6aSJohn Daley } 72435e2cb6aSJohn Daley 725856d7ba7SNelson Escobar enic_rxmbuf_queue_release(enic, rq_sop); 726856d7ba7SNelson Escobar if (rq_data->in_use) 727856d7ba7SNelson Escobar enic_rxmbuf_queue_release(enic, rq_data); 728856d7ba7SNelson Escobar 729856d7ba7SNelson Escobar rte_free(rq_sop->mbuf_ring); 730856d7ba7SNelson Escobar if (rq_data->in_use) 731856d7ba7SNelson Escobar rte_free(rq_data->mbuf_ring); 732856d7ba7SNelson Escobar 733856d7ba7SNelson Escobar rq_sop->mbuf_ring = NULL; 734856d7ba7SNelson Escobar rq_data->mbuf_ring = NULL; 735856d7ba7SNelson Escobar 736856d7ba7SNelson Escobar vnic_rq_free(rq_sop); 737856d7ba7SNelson Escobar if (rq_data->in_use) 738856d7ba7SNelson Escobar vnic_rq_free(rq_data); 739856d7ba7SNelson Escobar 740ceeb00b9SJohn Daley vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]); 741c3e09182SJohn Daley 742c3e09182SJohn Daley rq_sop->in_use = 0; 743c3e09182SJohn Daley rq_data->in_use = 0; 74472f3de30SBruce Richardson } 74572f3de30SBruce Richardson 74672f3de30SBruce Richardson void enic_start_wq(struct enic *enic, uint16_t queue_idx) 74772f3de30SBruce Richardson { 748c655c547SHyong Youb Kim struct rte_eth_dev_data *data = enic->dev_data; 74972f3de30SBruce Richardson vnic_wq_enable(&enic->wq[queue_idx]); 750c655c547SHyong Youb Kim data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; 75172f3de30SBruce Richardson } 75272f3de30SBruce Richardson 75372f3de30SBruce Richardson int enic_stop_wq(struct enic *enic, uint16_t queue_idx) 75472f3de30SBruce Richardson { 755c655c547SHyong Youb Kim struct rte_eth_dev_data *data = enic->dev_data; 756837e68aeSJohn Daley int ret; 757837e68aeSJohn Daley 758837e68aeSJohn Daley ret = vnic_wq_disable(&enic->wq[queue_idx]); 759837e68aeSJohn Daley if (ret) 760837e68aeSJohn Daley return ret; 761837e68aeSJohn Daley 762c655c547SHyong Youb Kim data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; 763837e68aeSJohn Daley return 0; 76472f3de30SBruce Richardson } 76572f3de30SBruce Richardson 76672f3de30SBruce Richardson void enic_start_rq(struct enic *enic, uint16_t queue_idx) 76772f3de30SBruce Richardson { 768c655c547SHyong Youb Kim struct rte_eth_dev_data *data = enic->dev_data; 769aa07bf8fSJohn Daley struct vnic_rq *rq_sop; 770aa07bf8fSJohn Daley struct vnic_rq *rq_data; 771aa07bf8fSJohn Daley rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)]; 772aa07bf8fSJohn Daley rq_data = &enic->rq[rq_sop->data_queue_idx]; 773856d7ba7SNelson Escobar 774a74629cfSHyong Youb Kim if (rq_data->in_use) { 775856d7ba7SNelson Escobar vnic_rq_enable(rq_data); 776a74629cfSHyong Youb Kim enic_initial_post_rx(enic, rq_data); 777a74629cfSHyong Youb Kim } 778856d7ba7SNelson Escobar rte_mb(); 779856d7ba7SNelson Escobar vnic_rq_enable(rq_sop); 780a74629cfSHyong Youb Kim enic_initial_post_rx(enic, rq_sop); 781c655c547SHyong Youb Kim data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; 78272f3de30SBruce Richardson } 78372f3de30SBruce Richardson 78472f3de30SBruce Richardson int enic_stop_rq(struct enic *enic, uint16_t queue_idx) 78572f3de30SBruce Richardson { 786c655c547SHyong Youb Kim struct rte_eth_dev_data *data = enic->dev_data; 787856d7ba7SNelson Escobar int ret1 = 0, ret2 = 0; 788aa07bf8fSJohn Daley struct vnic_rq *rq_sop; 789aa07bf8fSJohn Daley struct vnic_rq *rq_data; 790aa07bf8fSJohn Daley rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)]; 791aa07bf8fSJohn Daley rq_data = &enic->rq[rq_sop->data_queue_idx]; 792856d7ba7SNelson Escobar 793856d7ba7SNelson Escobar ret2 = vnic_rq_disable(rq_sop); 794856d7ba7SNelson Escobar rte_mb(); 795856d7ba7SNelson Escobar if (rq_data->in_use) 796856d7ba7SNelson Escobar ret1 = vnic_rq_disable(rq_data); 797856d7ba7SNelson Escobar 798856d7ba7SNelson Escobar if (ret2) 799856d7ba7SNelson Escobar return ret2; 800837e68aeSJohn Daley else if (ret1) 801856d7ba7SNelson Escobar return ret1; 802837e68aeSJohn Daley 803c655c547SHyong Youb Kim data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; 804837e68aeSJohn Daley return 0; 80572f3de30SBruce Richardson } 80672f3de30SBruce Richardson 80772f3de30SBruce Richardson int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, 80872f3de30SBruce Richardson unsigned int socket_id, struct rte_mempool *mp, 809ce16fd70SJohn Daley uint16_t nb_desc, uint16_t free_thresh) 81072f3de30SBruce Richardson { 811edd08548SHyong Youb Kim struct enic_vf_representor *vf; 812947d860cSJohn Daley int rc; 813edd08548SHyong Youb Kim uint16_t sop_queue_idx; 814edd08548SHyong Youb Kim uint16_t data_queue_idx; 815edd08548SHyong Youb Kim uint16_t cq_idx; 816edd08548SHyong Youb Kim struct vnic_rq *rq_sop; 817edd08548SHyong Youb Kim struct vnic_rq *rq_data; 818856d7ba7SNelson Escobar unsigned int mbuf_size, mbufs_per_pkt; 819856d7ba7SNelson Escobar unsigned int nb_sop_desc, nb_data_desc; 820856d7ba7SNelson Escobar uint16_t min_sop, max_sop, min_data, max_data; 8211bb4a528SFerruh Yigit uint32_t max_rx_pktlen; 82272f3de30SBruce Richardson 823edd08548SHyong Youb Kim /* 824edd08548SHyong Youb Kim * Representor uses a reserved PF queue. Translate representor 825edd08548SHyong Youb Kim * queue number to PF queue number. 826edd08548SHyong Youb Kim */ 827edd08548SHyong Youb Kim if (enic_is_vf_rep(enic)) { 828edd08548SHyong Youb Kim RTE_ASSERT(queue_idx == 0); 829edd08548SHyong Youb Kim vf = VF_ENIC_TO_VF_REP(enic); 830edd08548SHyong Youb Kim sop_queue_idx = vf->pf_rq_sop_idx; 831edd08548SHyong Youb Kim data_queue_idx = vf->pf_rq_data_idx; 832edd08548SHyong Youb Kim enic = vf->pf; 833edd08548SHyong Youb Kim queue_idx = sop_queue_idx; 834edd08548SHyong Youb Kim } else { 835edd08548SHyong Youb Kim sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx); 836edd08548SHyong Youb Kim data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx, enic); 837edd08548SHyong Youb Kim } 838edd08548SHyong Youb Kim cq_idx = enic_cq_rq(enic, sop_queue_idx); 839edd08548SHyong Youb Kim rq_sop = &enic->rq[sop_queue_idx]; 840edd08548SHyong Youb Kim rq_data = &enic->rq[data_queue_idx]; 841856d7ba7SNelson Escobar rq_sop->is_sop = 1; 842856d7ba7SNelson Escobar rq_sop->data_queue_idx = data_queue_idx; 843856d7ba7SNelson Escobar rq_data->is_sop = 0; 844856d7ba7SNelson Escobar rq_data->data_queue_idx = 0; 845856d7ba7SNelson Escobar rq_sop->socket_id = socket_id; 846856d7ba7SNelson Escobar rq_sop->mp = mp; 847856d7ba7SNelson Escobar rq_data->socket_id = socket_id; 848856d7ba7SNelson Escobar rq_data->mp = mp; 849856d7ba7SNelson Escobar rq_sop->in_use = 1; 850ce16fd70SJohn Daley rq_sop->rx_free_thresh = free_thresh; 851ce16fd70SJohn Daley rq_data->rx_free_thresh = free_thresh; 852ce16fd70SJohn Daley dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx, 853ce16fd70SJohn Daley free_thresh); 85472f3de30SBruce Richardson 855856d7ba7SNelson Escobar mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) - 856856d7ba7SNelson Escobar RTE_PKTMBUF_HEADROOM); 8571bb4a528SFerruh Yigit /* max_rx_pktlen includes the ethernet header and CRC. */ 8581bb4a528SFerruh Yigit max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu); 859856d7ba7SNelson Escobar 860a062bafaSHyong Youb Kim if (enic->rte_dev->data->dev_conf.rxmode.offloads & 861295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_SCATTER) { 862c3e09182SJohn Daley dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx); 863422ba917SHyong Youb Kim /* ceil((max pkt len)/mbuf_size) */ 8641bb4a528SFerruh Yigit mbufs_per_pkt = (max_rx_pktlen + mbuf_size - 1) / mbuf_size; 865856d7ba7SNelson Escobar } else { 866856d7ba7SNelson Escobar dev_info(enic, "Scatter rx mode disabled\n"); 867856d7ba7SNelson Escobar mbufs_per_pkt = 1; 8681bb4a528SFerruh Yigit if (max_rx_pktlen > mbuf_size) { 869422ba917SHyong Youb Kim dev_warning(enic, "The maximum Rx packet size (%u) is" 870422ba917SHyong Youb Kim " larger than the mbuf size (%u), and" 871422ba917SHyong Youb Kim " scatter is disabled. Larger packets will" 872422ba917SHyong Youb Kim " be truncated.\n", 8731bb4a528SFerruh Yigit max_rx_pktlen, mbuf_size); 874422ba917SHyong Youb Kim } 875856d7ba7SNelson Escobar } 876856d7ba7SNelson Escobar 877856d7ba7SNelson Escobar if (mbufs_per_pkt > 1) { 878c3e09182SJohn Daley dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx); 879e8a71c46SNelson Escobar rq_sop->data_queue_enable = 1; 880856d7ba7SNelson Escobar rq_data->in_use = 1; 881422ba917SHyong Youb Kim /* 8821bb4a528SFerruh Yigit * HW does not directly support MTU. HW always 883422ba917SHyong Youb Kim * receives packet sizes up to the "max" MTU. 884422ba917SHyong Youb Kim * If not using scatter, we can achieve the effect of dropping 885422ba917SHyong Youb Kim * larger packets by reducing the size of posted buffers. 886422ba917SHyong Youb Kim * See enic_alloc_rx_queue_mbufs(). 887422ba917SHyong Youb Kim */ 8881bb4a528SFerruh Yigit if (enic->rte_dev->data->mtu < enic->max_mtu) { 8891bb4a528SFerruh Yigit dev_warning(enic, 8901bb4a528SFerruh Yigit "mtu is ignored when scatter rx mode is in use.\n"); 891422ba917SHyong Youb Kim } 892856d7ba7SNelson Escobar } else { 893c3e09182SJohn Daley dev_info(enic, "Rq %u Scatter rx mode not being used\n", 894c3e09182SJohn Daley queue_idx); 895e8a71c46SNelson Escobar rq_sop->data_queue_enable = 0; 896856d7ba7SNelson Escobar rq_data->in_use = 0; 897856d7ba7SNelson Escobar } 898856d7ba7SNelson Escobar 899856d7ba7SNelson Escobar /* number of descriptors have to be a multiple of 32 */ 9009466a38dSHyong Youb Kim nb_sop_desc = (nb_desc / mbufs_per_pkt) & ENIC_ALIGN_DESCS_MASK; 9019466a38dSHyong Youb Kim nb_data_desc = (nb_desc - nb_sop_desc) & ENIC_ALIGN_DESCS_MASK; 902856d7ba7SNelson Escobar 903856d7ba7SNelson Escobar rq_sop->max_mbufs_per_pkt = mbufs_per_pkt; 904856d7ba7SNelson Escobar rq_data->max_mbufs_per_pkt = mbufs_per_pkt; 905856d7ba7SNelson Escobar 906856d7ba7SNelson Escobar if (mbufs_per_pkt > 1) { 90735e2cb6aSJohn Daley min_sop = ENIC_RX_BURST_MAX; 908856d7ba7SNelson Escobar max_sop = ((enic->config.rq_desc_count / 9099466a38dSHyong Youb Kim (mbufs_per_pkt - 1)) & ENIC_ALIGN_DESCS_MASK); 910856d7ba7SNelson Escobar min_data = min_sop * (mbufs_per_pkt - 1); 911856d7ba7SNelson Escobar max_data = enic->config.rq_desc_count; 912856d7ba7SNelson Escobar } else { 91335e2cb6aSJohn Daley min_sop = ENIC_RX_BURST_MAX; 914856d7ba7SNelson Escobar max_sop = enic->config.rq_desc_count; 915856d7ba7SNelson Escobar min_data = 0; 916856d7ba7SNelson Escobar max_data = 0; 917856d7ba7SNelson Escobar } 918856d7ba7SNelson Escobar 919856d7ba7SNelson Escobar if (nb_desc < (min_sop + min_data)) { 92072f3de30SBruce Richardson dev_warning(enic, 921856d7ba7SNelson Escobar "Number of rx descs too low, adjusting to minimum\n"); 922856d7ba7SNelson Escobar nb_sop_desc = min_sop; 923856d7ba7SNelson Escobar nb_data_desc = min_data; 924856d7ba7SNelson Escobar } else if (nb_desc > (max_sop + max_data)) { 925856d7ba7SNelson Escobar dev_warning(enic, 926856d7ba7SNelson Escobar "Number of rx_descs too high, adjusting to maximum\n"); 927856d7ba7SNelson Escobar nb_sop_desc = max_sop; 928856d7ba7SNelson Escobar nb_data_desc = max_data; 92972f3de30SBruce Richardson } 930856d7ba7SNelson Escobar if (mbufs_per_pkt > 1) { 931422ba917SHyong Youb Kim dev_info(enic, "For max packet size %u and mbuf size %u valid" 932422ba917SHyong Youb Kim " rx descriptor range is %u to %u\n", 9331bb4a528SFerruh Yigit max_rx_pktlen, mbuf_size, min_sop + min_data, 934856d7ba7SNelson Escobar max_sop + max_data); 93572f3de30SBruce Richardson } 936856d7ba7SNelson Escobar dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n", 937856d7ba7SNelson Escobar nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc); 93872f3de30SBruce Richardson 939856d7ba7SNelson Escobar /* Allocate sop queue resources */ 940856d7ba7SNelson Escobar rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx, 941856d7ba7SNelson Escobar nb_sop_desc, sizeof(struct rq_enet_desc)); 942947d860cSJohn Daley if (rc) { 943856d7ba7SNelson Escobar dev_err(enic, "error in allocation of sop rq\n"); 944947d860cSJohn Daley goto err_exit; 94572f3de30SBruce Richardson } 946856d7ba7SNelson Escobar nb_sop_desc = rq_sop->ring.desc_count; 94772f3de30SBruce Richardson 948856d7ba7SNelson Escobar if (rq_data->in_use) { 949856d7ba7SNelson Escobar /* Allocate data queue resources */ 950856d7ba7SNelson Escobar rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx, 951856d7ba7SNelson Escobar nb_data_desc, 952856d7ba7SNelson Escobar sizeof(struct rq_enet_desc)); 953856d7ba7SNelson Escobar if (rc) { 954856d7ba7SNelson Escobar dev_err(enic, "error in allocation of data rq\n"); 955856d7ba7SNelson Escobar goto err_free_rq_sop; 956856d7ba7SNelson Escobar } 957856d7ba7SNelson Escobar nb_data_desc = rq_data->ring.desc_count; 958856d7ba7SNelson Escobar } 9598b428cb5SHyong Youb Kim /* Enable 64B CQ entry if requested */ 9608b428cb5SHyong Youb Kim if (enic->cq64 && vnic_dev_set_cq_entry_size(enic->vdev, 9618b428cb5SHyong Youb Kim sop_queue_idx, VNIC_RQ_CQ_ENTRY_SIZE_64)) { 9628b428cb5SHyong Youb Kim dev_err(enic, "failed to enable 64B CQ entry on sop rq\n"); 9638b428cb5SHyong Youb Kim goto err_free_rq_data; 9648b428cb5SHyong Youb Kim } 9658b428cb5SHyong Youb Kim if (rq_data->in_use && enic->cq64 && 9668b428cb5SHyong Youb Kim vnic_dev_set_cq_entry_size(enic->vdev, data_queue_idx, 9678b428cb5SHyong Youb Kim VNIC_RQ_CQ_ENTRY_SIZE_64)) { 9688b428cb5SHyong Youb Kim dev_err(enic, "failed to enable 64B CQ entry on data rq\n"); 9698b428cb5SHyong Youb Kim goto err_free_rq_data; 9708b428cb5SHyong Youb Kim } 9718b428cb5SHyong Youb Kim 972edd08548SHyong Youb Kim rc = vnic_cq_alloc(enic->vdev, &enic->cq[cq_idx], cq_idx, 973856d7ba7SNelson Escobar socket_id, nb_sop_desc + nb_data_desc, 9748b428cb5SHyong Youb Kim enic->cq64 ? sizeof(struct cq_enet_rq_desc_64) : 97572f3de30SBruce Richardson sizeof(struct cq_enet_rq_desc)); 976947d860cSJohn Daley if (rc) { 97772f3de30SBruce Richardson dev_err(enic, "error in allocation of cq for rq\n"); 978856d7ba7SNelson Escobar goto err_free_rq_data; 97972f3de30SBruce Richardson } 98072f3de30SBruce Richardson 981856d7ba7SNelson Escobar /* Allocate the mbuf rings */ 982856d7ba7SNelson Escobar rq_sop->mbuf_ring = (struct rte_mbuf **) 983856d7ba7SNelson Escobar rte_zmalloc_socket("rq->mbuf_ring", 984856d7ba7SNelson Escobar sizeof(struct rte_mbuf *) * nb_sop_desc, 985856d7ba7SNelson Escobar RTE_CACHE_LINE_SIZE, rq_sop->socket_id); 986856d7ba7SNelson Escobar if (rq_sop->mbuf_ring == NULL) 987856d7ba7SNelson Escobar goto err_free_cq; 988947d860cSJohn Daley 989856d7ba7SNelson Escobar if (rq_data->in_use) { 990856d7ba7SNelson Escobar rq_data->mbuf_ring = (struct rte_mbuf **) 991856d7ba7SNelson Escobar rte_zmalloc_socket("rq->mbuf_ring", 992856d7ba7SNelson Escobar sizeof(struct rte_mbuf *) * nb_data_desc, 993856d7ba7SNelson Escobar RTE_CACHE_LINE_SIZE, rq_sop->socket_id); 994856d7ba7SNelson Escobar if (rq_data->mbuf_ring == NULL) 995856d7ba7SNelson Escobar goto err_free_sop_mbuf; 996856d7ba7SNelson Escobar } 997856d7ba7SNelson Escobar 99835e2cb6aSJohn Daley rq_sop->free_mbufs = (struct rte_mbuf **) 99935e2cb6aSJohn Daley rte_zmalloc_socket("rq->free_mbufs", 100035e2cb6aSJohn Daley sizeof(struct rte_mbuf *) * 100135e2cb6aSJohn Daley ENIC_RX_BURST_MAX, 100235e2cb6aSJohn Daley RTE_CACHE_LINE_SIZE, rq_sop->socket_id); 100335e2cb6aSJohn Daley if (rq_sop->free_mbufs == NULL) 100435e2cb6aSJohn Daley goto err_free_data_mbuf; 100535e2cb6aSJohn Daley rq_sop->num_free_mbufs = 0; 100635e2cb6aSJohn Daley 1007c3e09182SJohn Daley rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */ 1008c3e09182SJohn Daley 1009947d860cSJohn Daley return 0; 1010947d860cSJohn Daley 101135e2cb6aSJohn Daley err_free_data_mbuf: 101235e2cb6aSJohn Daley rte_free(rq_data->mbuf_ring); 1013856d7ba7SNelson Escobar err_free_sop_mbuf: 1014856d7ba7SNelson Escobar rte_free(rq_sop->mbuf_ring); 1015856d7ba7SNelson Escobar err_free_cq: 1016947d860cSJohn Daley /* cleanup on error */ 1017edd08548SHyong Youb Kim vnic_cq_free(&enic->cq[cq_idx]); 1018856d7ba7SNelson Escobar err_free_rq_data: 1019856d7ba7SNelson Escobar if (rq_data->in_use) 1020856d7ba7SNelson Escobar vnic_rq_free(rq_data); 1021856d7ba7SNelson Escobar err_free_rq_sop: 1022856d7ba7SNelson Escobar vnic_rq_free(rq_sop); 1023947d860cSJohn Daley err_exit: 1024947d860cSJohn Daley return -ENOMEM; 102572f3de30SBruce Richardson } 102672f3de30SBruce Richardson 102772f3de30SBruce Richardson void enic_free_wq(void *txq) 102872f3de30SBruce Richardson { 102983a9d8b7SJohn Daley struct vnic_wq *wq; 103083a9d8b7SJohn Daley struct enic *enic; 103172f3de30SBruce Richardson 103283a9d8b7SJohn Daley if (txq == NULL) 103383a9d8b7SJohn Daley return; 103483a9d8b7SJohn Daley 103583a9d8b7SJohn Daley wq = (struct vnic_wq *)txq; 103683a9d8b7SJohn Daley enic = vnic_dev_priv(wq->vdev); 1037fc2c8c06SJohn Daley rte_memzone_free(wq->cqmsg_rz); 103872f3de30SBruce Richardson vnic_wq_free(wq); 103972f3de30SBruce Richardson vnic_cq_free(&enic->cq[enic->rq_count + wq->index]); 104072f3de30SBruce Richardson } 104172f3de30SBruce Richardson 104272f3de30SBruce Richardson int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, 104372f3de30SBruce Richardson unsigned int socket_id, uint16_t nb_desc) 104472f3de30SBruce Richardson { 1045edd08548SHyong Youb Kim struct enic_vf_representor *vf; 104672f3de30SBruce Richardson int err; 1047edd08548SHyong Youb Kim struct vnic_wq *wq; 1048edd08548SHyong Youb Kim unsigned int cq_index; 1049846ac76cSJohn Daley char name[RTE_MEMZONE_NAMESIZE]; 1050fc2c8c06SJohn Daley static int instance; 105172f3de30SBruce Richardson 1052edd08548SHyong Youb Kim /* 1053edd08548SHyong Youb Kim * Representor uses a reserved PF queue. Translate representor 1054edd08548SHyong Youb Kim * queue number to PF queue number. 1055edd08548SHyong Youb Kim */ 1056edd08548SHyong Youb Kim if (enic_is_vf_rep(enic)) { 1057edd08548SHyong Youb Kim RTE_ASSERT(queue_idx == 0); 1058edd08548SHyong Youb Kim vf = VF_ENIC_TO_VF_REP(enic); 1059edd08548SHyong Youb Kim queue_idx = vf->pf_wq_idx; 1060edd08548SHyong Youb Kim cq_index = vf->pf_wq_cq_idx; 1061edd08548SHyong Youb Kim enic = vf->pf; 1062edd08548SHyong Youb Kim } else { 1063edd08548SHyong Youb Kim cq_index = enic_cq_wq(enic, queue_idx); 1064edd08548SHyong Youb Kim } 1065edd08548SHyong Youb Kim wq = &enic->wq[queue_idx]; 106672f3de30SBruce Richardson wq->socket_id = socket_id; 10679466a38dSHyong Youb Kim /* 10689466a38dSHyong Youb Kim * rte_eth_tx_queue_setup() checks min, max, and alignment. So just 10699466a38dSHyong Youb Kim * print an info message for diagnostics. 10709466a38dSHyong Youb Kim */ 10719466a38dSHyong Youb Kim dev_info(enic, "TX Queues - effective number of descs:%d\n", nb_desc); 107272f3de30SBruce Richardson 107372f3de30SBruce Richardson /* Allocate queue resources */ 107472f3de30SBruce Richardson err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx, 10752a7e3d54SHyong Youb Kim nb_desc, 107672f3de30SBruce Richardson sizeof(struct wq_enet_desc)); 107772f3de30SBruce Richardson if (err) { 107872f3de30SBruce Richardson dev_err(enic, "error in allocation of wq\n"); 107972f3de30SBruce Richardson return err; 108072f3de30SBruce Richardson } 108172f3de30SBruce Richardson 108272f3de30SBruce Richardson err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index, 10832a7e3d54SHyong Youb Kim socket_id, nb_desc, 108472f3de30SBruce Richardson sizeof(struct cq_enet_wq_desc)); 108572f3de30SBruce Richardson if (err) { 108672f3de30SBruce Richardson vnic_wq_free(wq); 108772f3de30SBruce Richardson dev_err(enic, "error in allocation of cq for wq\n"); 108872f3de30SBruce Richardson } 108972f3de30SBruce Richardson 1090fc2c8c06SJohn Daley /* setup up CQ message */ 1091fc2c8c06SJohn Daley snprintf((char *)name, sizeof(name), 1092fc2c8c06SJohn Daley "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx, 1093fc2c8c06SJohn Daley instance++); 1094fc2c8c06SJohn Daley 1095fc2c8c06SJohn Daley wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name, 109646e4fb12SAnatoly Burakov sizeof(uint32_t), SOCKET_ID_ANY, 109704e8ec74SJohn Daley RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE); 1098fc2c8c06SJohn Daley if (!wq->cqmsg_rz) 1099fc2c8c06SJohn Daley return -ENOMEM; 1100fc2c8c06SJohn Daley 110172f3de30SBruce Richardson return err; 110272f3de30SBruce Richardson } 110372f3de30SBruce Richardson 110472f3de30SBruce Richardson int enic_disable(struct enic *enic) 110572f3de30SBruce Richardson { 110672f3de30SBruce Richardson unsigned int i; 110772f3de30SBruce Richardson int err; 110872f3de30SBruce Richardson 11090f872d31SHyong Youb Kim for (i = 0; i < enic->intr_count; i++) { 11100f872d31SHyong Youb Kim vnic_intr_mask(&enic->intr[i]); 11110f872d31SHyong Youb Kim (void)vnic_intr_masked(&enic->intr[i]); /* flush write */ 11120f872d31SHyong Youb Kim } 11130f872d31SHyong Youb Kim enic_rxq_intr_deinit(enic); 1114d61138d4SHarman Kalra rte_intr_disable(enic->pdev->intr_handle); 1115d61138d4SHarman Kalra rte_intr_callback_unregister(enic->pdev->intr_handle, 1116667b8a3bSNelson Escobar enic_intr_handler, 1117667b8a3bSNelson Escobar (void *)enic->rte_dev); 111872f3de30SBruce Richardson 111972f3de30SBruce Richardson vnic_dev_disable(enic->vdev); 112072f3de30SBruce Richardson 1121ea7768b5SHyong Youb Kim enic_fm_destroy(enic); 112272f3de30SBruce Richardson 112372f3de30SBruce Richardson if (!enic_is_sriov_vf(enic)) 112472f3de30SBruce Richardson vnic_dev_del_addr(enic->vdev, enic->mac_addr); 112572f3de30SBruce Richardson 112672f3de30SBruce Richardson for (i = 0; i < enic->wq_count; i++) { 112772f3de30SBruce Richardson err = vnic_wq_disable(&enic->wq[i]); 112872f3de30SBruce Richardson if (err) 112972f3de30SBruce Richardson return err; 113072f3de30SBruce Richardson } 1131856d7ba7SNelson Escobar for (i = 0; i < enic_vnic_rq_count(enic); i++) { 1132856d7ba7SNelson Escobar if (enic->rq[i].in_use) { 113372f3de30SBruce Richardson err = vnic_rq_disable(&enic->rq[i]); 113472f3de30SBruce Richardson if (err) 113572f3de30SBruce Richardson return err; 113672f3de30SBruce Richardson } 1137856d7ba7SNelson Escobar } 113872f3de30SBruce Richardson 113953fa8cc0SNelson Escobar /* If we were using interrupts, set the interrupt vector to -1 11407be78d02SJosh Soref * to disable interrupts. We are not disabling link notifications, 114153fa8cc0SNelson Escobar * though, as we want the polling of link status to continue working. 114253fa8cc0SNelson Escobar */ 114353fa8cc0SNelson Escobar if (enic->rte_dev->data->dev_conf.intr_conf.lsc) 114453fa8cc0SNelson Escobar vnic_dev_notify_set(enic->vdev, -1); 114553fa8cc0SNelson Escobar 114672f3de30SBruce Richardson vnic_dev_set_reset_flag(enic->vdev, 1); 114772f3de30SBruce Richardson 114872f3de30SBruce Richardson for (i = 0; i < enic->wq_count; i++) 114972f3de30SBruce Richardson vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); 1150947d860cSJohn Daley 1151856d7ba7SNelson Escobar for (i = 0; i < enic_vnic_rq_count(enic); i++) 1152856d7ba7SNelson Escobar if (enic->rq[i].in_use) 115372f3de30SBruce Richardson vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); 115472f3de30SBruce Richardson for (i = 0; i < enic->cq_count; i++) 115572f3de30SBruce Richardson vnic_cq_clean(&enic->cq[i]); 11560f872d31SHyong Youb Kim for (i = 0; i < enic->intr_count; i++) 11570f872d31SHyong Youb Kim vnic_intr_clean(&enic->intr[i]); 115872f3de30SBruce Richardson 115972f3de30SBruce Richardson return 0; 116072f3de30SBruce Richardson } 116172f3de30SBruce Richardson 116272f3de30SBruce Richardson static int enic_dev_wait(struct vnic_dev *vdev, 116372f3de30SBruce Richardson int (*start)(struct vnic_dev *, int), 116472f3de30SBruce Richardson int (*finished)(struct vnic_dev *, int *), 116572f3de30SBruce Richardson int arg) 116672f3de30SBruce Richardson { 116772f3de30SBruce Richardson int done; 116872f3de30SBruce Richardson int err; 116972f3de30SBruce Richardson int i; 117072f3de30SBruce Richardson 117172f3de30SBruce Richardson err = start(vdev, arg); 117272f3de30SBruce Richardson if (err) 117372f3de30SBruce Richardson return err; 117472f3de30SBruce Richardson 117572f3de30SBruce Richardson /* Wait for func to complete...2 seconds max */ 117672f3de30SBruce Richardson for (i = 0; i < 2000; i++) { 117772f3de30SBruce Richardson err = finished(vdev, &done); 117872f3de30SBruce Richardson if (err) 117972f3de30SBruce Richardson return err; 118072f3de30SBruce Richardson if (done) 118172f3de30SBruce Richardson return 0; 118272f3de30SBruce Richardson usleep(1000); 118372f3de30SBruce Richardson } 118472f3de30SBruce Richardson return -ETIMEDOUT; 118572f3de30SBruce Richardson } 118672f3de30SBruce Richardson 118772f3de30SBruce Richardson static int enic_dev_open(struct enic *enic) 118872f3de30SBruce Richardson { 118972f3de30SBruce Richardson int err; 1190fe26a3bbSHyong Youb Kim int flags = CMD_OPENF_IG_DESCCACHE; 119172f3de30SBruce Richardson 119272f3de30SBruce Richardson err = enic_dev_wait(enic->vdev, vnic_dev_open, 1193fe26a3bbSHyong Youb Kim vnic_dev_open_done, flags); 119472f3de30SBruce Richardson if (err) 119572f3de30SBruce Richardson dev_err(enic_get_dev(enic), 119672f3de30SBruce Richardson "vNIC device open failed, err %d\n", err); 119772f3de30SBruce Richardson 119872f3de30SBruce Richardson return err; 119972f3de30SBruce Richardson } 120072f3de30SBruce Richardson 1201c2fec27bSHyong Youb Kim static int enic_set_rsskey(struct enic *enic, uint8_t *user_key) 120272f3de30SBruce Richardson { 120372f3de30SBruce Richardson dma_addr_t rss_key_buf_pa; 120472f3de30SBruce Richardson union vnic_rss_key *rss_key_buf_va = NULL; 1205c2fec27bSHyong Youb Kim int err, i; 120604e8ec74SJohn Daley uint8_t name[RTE_MEMZONE_NAMESIZE]; 120772f3de30SBruce Richardson 1208c2fec27bSHyong Youb Kim RTE_ASSERT(user_key != NULL); 1209846ac76cSJohn Daley snprintf((char *)name, sizeof(name), "rss_key-%s", enic->bdf_name); 121072f3de30SBruce Richardson rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key), 121172f3de30SBruce Richardson &rss_key_buf_pa, name); 121272f3de30SBruce Richardson if (!rss_key_buf_va) 121372f3de30SBruce Richardson return -ENOMEM; 121472f3de30SBruce Richardson 1215c2fec27bSHyong Youb Kim for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) 1216c2fec27bSHyong Youb Kim rss_key_buf_va->key[i / 10].b[i % 10] = user_key[i]; 121772f3de30SBruce Richardson 121872f3de30SBruce Richardson err = enic_set_rss_key(enic, 121972f3de30SBruce Richardson rss_key_buf_pa, 122072f3de30SBruce Richardson sizeof(union vnic_rss_key)); 122172f3de30SBruce Richardson 1222c2fec27bSHyong Youb Kim /* Save for later queries */ 1223c2fec27bSHyong Youb Kim if (!err) { 1224c2fec27bSHyong Youb Kim rte_memcpy(&enic->rss_key, rss_key_buf_va, 1225c2fec27bSHyong Youb Kim sizeof(union vnic_rss_key)); 1226c2fec27bSHyong Youb Kim } 1227da5f560bSNelson Escobar enic_free_consistent(enic, sizeof(union vnic_rss_key), 122872f3de30SBruce Richardson rss_key_buf_va, rss_key_buf_pa); 122972f3de30SBruce Richardson 123072f3de30SBruce Richardson return err; 123172f3de30SBruce Richardson } 123272f3de30SBruce Richardson 1233c2fec27bSHyong Youb Kim int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu) 123472f3de30SBruce Richardson { 123572f3de30SBruce Richardson dma_addr_t rss_cpu_buf_pa; 123672f3de30SBruce Richardson union vnic_rss_cpu *rss_cpu_buf_va = NULL; 123772f3de30SBruce Richardson int err; 123804e8ec74SJohn Daley uint8_t name[RTE_MEMZONE_NAMESIZE]; 123972f3de30SBruce Richardson 1240846ac76cSJohn Daley snprintf((char *)name, sizeof(name), "rss_cpu-%s", enic->bdf_name); 124172f3de30SBruce Richardson rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu), 124272f3de30SBruce Richardson &rss_cpu_buf_pa, name); 124372f3de30SBruce Richardson if (!rss_cpu_buf_va) 124472f3de30SBruce Richardson return -ENOMEM; 124572f3de30SBruce Richardson 1246c2fec27bSHyong Youb Kim rte_memcpy(rss_cpu_buf_va, rss_cpu, sizeof(union vnic_rss_cpu)); 124772f3de30SBruce Richardson 124872f3de30SBruce Richardson err = enic_set_rss_cpu(enic, 124972f3de30SBruce Richardson rss_cpu_buf_pa, 125072f3de30SBruce Richardson sizeof(union vnic_rss_cpu)); 125172f3de30SBruce Richardson 1252da5f560bSNelson Escobar enic_free_consistent(enic, sizeof(union vnic_rss_cpu), 125372f3de30SBruce Richardson rss_cpu_buf_va, rss_cpu_buf_pa); 125472f3de30SBruce Richardson 1255c2fec27bSHyong Youb Kim /* Save for later queries */ 1256c2fec27bSHyong Youb Kim if (!err) 1257c2fec27bSHyong Youb Kim rte_memcpy(&enic->rss_cpu, rss_cpu, sizeof(union vnic_rss_cpu)); 125872f3de30SBruce Richardson return err; 125972f3de30SBruce Richardson } 126072f3de30SBruce Richardson 126104e8ec74SJohn Daley static int enic_set_niccfg(struct enic *enic, uint8_t rss_default_cpu, 126204e8ec74SJohn Daley uint8_t rss_hash_type, uint8_t rss_hash_bits, uint8_t rss_base_cpu, 126304e8ec74SJohn Daley uint8_t rss_enable) 126472f3de30SBruce Richardson { 126504e8ec74SJohn Daley const uint8_t tso_ipid_split_en = 0; 126672f3de30SBruce Richardson int err; 126772f3de30SBruce Richardson 126872f3de30SBruce Richardson err = enic_set_nic_cfg(enic, 126972f3de30SBruce Richardson rss_default_cpu, rss_hash_type, 127072f3de30SBruce Richardson rss_hash_bits, rss_base_cpu, 127172f3de30SBruce Richardson rss_enable, tso_ipid_split_en, 127272f3de30SBruce Richardson enic->ig_vlan_strip_en); 127372f3de30SBruce Richardson 127472f3de30SBruce Richardson return err; 127572f3de30SBruce Richardson } 127672f3de30SBruce Richardson 1277c2fec27bSHyong Youb Kim /* Initialize RSS with defaults, called from dev_configure */ 1278c2fec27bSHyong Youb Kim int enic_init_rss_nic_cfg(struct enic *enic) 127972f3de30SBruce Richardson { 1280c2fec27bSHyong Youb Kim static uint8_t default_rss_key[] = { 1281c2fec27bSHyong Youb Kim 85, 67, 83, 97, 119, 101, 115, 111, 109, 101, 1282c2fec27bSHyong Youb Kim 80, 65, 76, 79, 117, 110, 105, 113, 117, 101, 1283c2fec27bSHyong Youb Kim 76, 73, 78, 85, 88, 114, 111, 99, 107, 115, 1284c2fec27bSHyong Youb Kim 69, 78, 73, 67, 105, 115, 99, 111, 111, 108, 1285c2fec27bSHyong Youb Kim }; 1286c2fec27bSHyong Youb Kim struct rte_eth_rss_conf rss_conf; 1287c2fec27bSHyong Youb Kim union vnic_rss_cpu rss_cpu; 1288c2fec27bSHyong Youb Kim int ret, i; 128972f3de30SBruce Richardson 1290c2fec27bSHyong Youb Kim rss_conf = enic->rte_dev->data->dev_conf.rx_adv_conf.rss_conf; 1291c2fec27bSHyong Youb Kim /* 1292c2fec27bSHyong Youb Kim * If setting key for the first time, and the user gives us none, then 1293c2fec27bSHyong Youb Kim * push the default key to NIC. 1294c2fec27bSHyong Youb Kim */ 1295c2fec27bSHyong Youb Kim if (rss_conf.rss_key == NULL) { 1296c2fec27bSHyong Youb Kim rss_conf.rss_key = default_rss_key; 1297c2fec27bSHyong Youb Kim rss_conf.rss_key_len = ENIC_RSS_HASH_KEY_SIZE; 129872f3de30SBruce Richardson } 1299c2fec27bSHyong Youb Kim ret = enic_set_rss_conf(enic, &rss_conf); 1300c2fec27bSHyong Youb Kim if (ret) { 1301c2fec27bSHyong Youb Kim dev_err(enic, "Failed to configure RSS\n"); 1302c2fec27bSHyong Youb Kim return ret; 130372f3de30SBruce Richardson } 1304c2fec27bSHyong Youb Kim if (enic->rss_enable) { 1305c2fec27bSHyong Youb Kim /* If enabling RSS, use the default reta */ 1306c2fec27bSHyong Youb Kim for (i = 0; i < ENIC_RSS_RETA_SIZE; i++) { 1307c2fec27bSHyong Youb Kim rss_cpu.cpu[i / 4].b[i % 4] = 1308c2fec27bSHyong Youb Kim enic_rte_rq_idx_to_sop_idx(i % enic->rq_count); 130972f3de30SBruce Richardson } 1310c2fec27bSHyong Youb Kim ret = enic_set_rss_reta(enic, &rss_cpu); 1311c2fec27bSHyong Youb Kim if (ret) 1312c2fec27bSHyong Youb Kim dev_err(enic, "Failed to set RSS indirection table\n"); 1313c2fec27bSHyong Youb Kim } 1314c2fec27bSHyong Youb Kim return ret; 131572f3de30SBruce Richardson } 131672f3de30SBruce Richardson 131772f3de30SBruce Richardson int enic_setup_finish(struct enic *enic) 131872f3de30SBruce Richardson { 131965b5434dSJohn Daley enic_init_soft_stats(enic); 132065b5434dSJohn Daley 132139cf83f1SHyong Youb Kim /* switchdev: enable promisc mode on PF */ 132239cf83f1SHyong Youb Kim if (enic->switchdev_mode) { 132339cf83f1SHyong Youb Kim vnic_dev_packet_filter(enic->vdev, 132439cf83f1SHyong Youb Kim 0 /* directed */, 132539cf83f1SHyong Youb Kim 0 /* multicast */, 132639cf83f1SHyong Youb Kim 0 /* broadcast */, 132739cf83f1SHyong Youb Kim 1 /* promisc */, 132839cf83f1SHyong Youb Kim 0 /* allmulti */); 132939cf83f1SHyong Youb Kim enic->promisc = 1; 133039cf83f1SHyong Youb Kim enic->allmulti = 0; 133139cf83f1SHyong Youb Kim return 0; 133239cf83f1SHyong Youb Kim } 133372f3de30SBruce Richardson /* Default conf */ 133472f3de30SBruce Richardson vnic_dev_packet_filter(enic->vdev, 133572f3de30SBruce Richardson 1 /* directed */, 133672f3de30SBruce Richardson 1 /* multicast */, 133772f3de30SBruce Richardson 1 /* broadcast */, 133872f3de30SBruce Richardson 0 /* promisc */, 133972f3de30SBruce Richardson 1 /* allmulti */); 134072f3de30SBruce Richardson 134172f3de30SBruce Richardson enic->promisc = 0; 134272f3de30SBruce Richardson enic->allmulti = 1; 134372f3de30SBruce Richardson 134472f3de30SBruce Richardson return 0; 134572f3de30SBruce Richardson } 134672f3de30SBruce Richardson 1347c2fec27bSHyong Youb Kim static int enic_rss_conf_valid(struct enic *enic, 1348c2fec27bSHyong Youb Kim struct rte_eth_rss_conf *rss_conf) 1349c2fec27bSHyong Youb Kim { 1350c2fec27bSHyong Youb Kim /* RSS is disabled per VIC settings. Ignore rss_conf. */ 1351c2fec27bSHyong Youb Kim if (enic->flow_type_rss_offloads == 0) 1352c2fec27bSHyong Youb Kim return 0; 1353c2fec27bSHyong Youb Kim if (rss_conf->rss_key != NULL && 1354c2fec27bSHyong Youb Kim rss_conf->rss_key_len != ENIC_RSS_HASH_KEY_SIZE) { 1355c2fec27bSHyong Youb Kim dev_err(enic, "Given rss_key is %d bytes, it must be %d\n", 1356c2fec27bSHyong Youb Kim rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE); 1357c2fec27bSHyong Youb Kim return -EINVAL; 1358c2fec27bSHyong Youb Kim } 1359c2fec27bSHyong Youb Kim if (rss_conf->rss_hf != 0 && 1360c2fec27bSHyong Youb Kim (rss_conf->rss_hf & enic->flow_type_rss_offloads) == 0) { 1361c2fec27bSHyong Youb Kim dev_err(enic, "Given rss_hf contains none of the supported" 1362c2fec27bSHyong Youb Kim " types\n"); 1363c2fec27bSHyong Youb Kim return -EINVAL; 1364c2fec27bSHyong Youb Kim } 1365c2fec27bSHyong Youb Kim return 0; 1366c2fec27bSHyong Youb Kim } 1367c2fec27bSHyong Youb Kim 1368c2fec27bSHyong Youb Kim /* Set hash type and key according to rss_conf */ 1369c2fec27bSHyong Youb Kim int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf) 1370c2fec27bSHyong Youb Kim { 1371c2fec27bSHyong Youb Kim struct rte_eth_dev *eth_dev; 1372c2fec27bSHyong Youb Kim uint64_t rss_hf; 137304e8ec74SJohn Daley uint8_t rss_hash_type; 137404e8ec74SJohn Daley uint8_t rss_enable; 1375c2fec27bSHyong Youb Kim int ret; 1376c2fec27bSHyong Youb Kim 1377c2fec27bSHyong Youb Kim RTE_ASSERT(rss_conf != NULL); 1378c2fec27bSHyong Youb Kim ret = enic_rss_conf_valid(enic, rss_conf); 1379c2fec27bSHyong Youb Kim if (ret) { 1380c2fec27bSHyong Youb Kim dev_err(enic, "RSS configuration (rss_conf) is invalid\n"); 1381c2fec27bSHyong Youb Kim return ret; 1382c2fec27bSHyong Youb Kim } 1383c2fec27bSHyong Youb Kim 1384c2fec27bSHyong Youb Kim eth_dev = enic->rte_dev; 1385c2fec27bSHyong Youb Kim rss_hash_type = 0; 1386c2fec27bSHyong Youb Kim rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads; 1387c2fec27bSHyong Youb Kim if (enic->rq_count > 1 && 1388295968d1SFerruh Yigit (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) && 1389c2fec27bSHyong Youb Kim rss_hf != 0) { 1390c2fec27bSHyong Youb Kim rss_enable = 1; 1391295968d1SFerruh Yigit if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | 1392295968d1SFerruh Yigit RTE_ETH_RSS_NONFRAG_IPV4_OTHER)) 1393c2fec27bSHyong Youb Kim rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4; 1394295968d1SFerruh Yigit if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) 1395c2fec27bSHyong Youb Kim rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4; 1396295968d1SFerruh Yigit if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) { 139794c35189SHyong Youb Kim rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4; 13985bc989e6SHyong Youb Kim if (enic->udp_rss_weak) { 13999bd04182SJohn Daley /* 140094c35189SHyong Youb Kim * 'TCP' is not a typo. The "weak" version of 140194c35189SHyong Youb Kim * UDP RSS requires both the TCP and UDP bits 140294c35189SHyong Youb Kim * be set. It does enable TCP RSS as well. 14039bd04182SJohn Daley */ 14049bd04182SJohn Daley rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4; 14059bd04182SJohn Daley } 140694c35189SHyong Youb Kim } 1407295968d1SFerruh Yigit if (rss_hf & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_IPV6_EX | 1408295968d1SFerruh Yigit RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER)) 1409c2fec27bSHyong Youb Kim rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6; 1410295968d1SFerruh Yigit if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX)) 1411c2fec27bSHyong Youb Kim rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; 1412295968d1SFerruh Yigit if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX)) { 141394c35189SHyong Youb Kim rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6; 14145bc989e6SHyong Youb Kim if (enic->udp_rss_weak) 14159bd04182SJohn Daley rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; 14169bd04182SJohn Daley } 1417c2fec27bSHyong Youb Kim } else { 1418c2fec27bSHyong Youb Kim rss_enable = 0; 1419c2fec27bSHyong Youb Kim rss_hf = 0; 1420c2fec27bSHyong Youb Kim } 1421c2fec27bSHyong Youb Kim 1422c2fec27bSHyong Youb Kim /* Set the hash key if provided */ 1423c2fec27bSHyong Youb Kim if (rss_enable && rss_conf->rss_key) { 1424c2fec27bSHyong Youb Kim ret = enic_set_rsskey(enic, rss_conf->rss_key); 1425c2fec27bSHyong Youb Kim if (ret) { 1426c2fec27bSHyong Youb Kim dev_err(enic, "Failed to set RSS key\n"); 1427c2fec27bSHyong Youb Kim return ret; 1428c2fec27bSHyong Youb Kim } 1429c2fec27bSHyong Youb Kim } 1430c2fec27bSHyong Youb Kim 1431c2fec27bSHyong Youb Kim ret = enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, rss_hash_type, 1432c2fec27bSHyong Youb Kim ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU, 1433c2fec27bSHyong Youb Kim rss_enable); 1434c2fec27bSHyong Youb Kim if (!ret) { 1435c2fec27bSHyong Youb Kim enic->rss_hf = rss_hf; 1436c2fec27bSHyong Youb Kim enic->rss_hash_type = rss_hash_type; 1437c2fec27bSHyong Youb Kim enic->rss_enable = rss_enable; 14385bc989e6SHyong Youb Kim } else { 14395bc989e6SHyong Youb Kim dev_err(enic, "Failed to update RSS configurations." 14405bc989e6SHyong Youb Kim " hash=0x%x\n", rss_hash_type); 1441c2fec27bSHyong Youb Kim } 14425bc989e6SHyong Youb Kim return ret; 1443c2fec27bSHyong Youb Kim } 1444c2fec27bSHyong Youb Kim 1445c2fec27bSHyong Youb Kim int enic_set_vlan_strip(struct enic *enic) 1446c2fec27bSHyong Youb Kim { 1447c2fec27bSHyong Youb Kim /* 1448c2fec27bSHyong Youb Kim * Unfortunately, VLAN strip on/off and RSS on/off are configured 1449c2fec27bSHyong Youb Kim * together. So, re-do niccfg, preserving the current RSS settings. 1450c2fec27bSHyong Youb Kim */ 1451c2fec27bSHyong Youb Kim return enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, enic->rss_hash_type, 1452c2fec27bSHyong Youb Kim ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU, 1453c2fec27bSHyong Youb Kim enic->rss_enable); 1454c2fec27bSHyong Youb Kim } 1455c2fec27bSHyong Youb Kim 14569039c812SAndrew Rybchenko int enic_add_packet_filter(struct enic *enic) 145772f3de30SBruce Richardson { 145839cf83f1SHyong Youb Kim /* switchdev ignores packet filters */ 145939cf83f1SHyong Youb Kim if (enic->switchdev_mode) { 146039cf83f1SHyong Youb Kim ENICPMD_LOG(DEBUG, " switchdev: ignore packet filter"); 146139cf83f1SHyong Youb Kim return 0; 146239cf83f1SHyong Youb Kim } 146372f3de30SBruce Richardson /* Args -> directed, multicast, broadcast, promisc, allmulti */ 14649039c812SAndrew Rybchenko return vnic_dev_packet_filter(enic->vdev, 1, 1, 1, 146572f3de30SBruce Richardson enic->promisc, enic->allmulti); 146672f3de30SBruce Richardson } 146772f3de30SBruce Richardson 146872f3de30SBruce Richardson int enic_get_link_status(struct enic *enic) 146972f3de30SBruce Richardson { 147072f3de30SBruce Richardson return vnic_dev_link_status(enic->vdev); 147172f3de30SBruce Richardson } 147272f3de30SBruce Richardson 147372f3de30SBruce Richardson static void enic_dev_deinit(struct enic *enic) 147472f3de30SBruce Richardson { 1475c98779abSNelson Escobar /* stop link status checking */ 1476c98779abSNelson Escobar vnic_dev_notify_unset(enic->vdev); 1477c98779abSNelson Escobar 14787f34bb52SHyong Youb Kim /* mac_addrs is freed by rte_eth_dev_release_port() */ 14796c45c330SHyong Youb Kim rte_free(enic->cq); 14800f872d31SHyong Youb Kim rte_free(enic->intr); 14816c45c330SHyong Youb Kim rte_free(enic->rq); 14826c45c330SHyong Youb Kim rte_free(enic->wq); 148372f3de30SBruce Richardson } 148472f3de30SBruce Richardson 148572f3de30SBruce Richardson 148672f3de30SBruce Richardson int enic_set_vnic_res(struct enic *enic) 148772f3de30SBruce Richardson { 148872f3de30SBruce Richardson struct rte_eth_dev *eth_dev = enic->rte_dev; 1489b6d5fd2eSJohn Daley int rc = 0; 14900f872d31SHyong Youb Kim unsigned int required_rq, required_wq, required_cq, required_intr; 149172f3de30SBruce Richardson 14926c45c330SHyong Youb Kim /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */ 14936c45c330SHyong Youb Kim required_rq = eth_dev->data->nb_rx_queues * 2; 14946c45c330SHyong Youb Kim required_wq = eth_dev->data->nb_tx_queues; 14956c45c330SHyong Youb Kim required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues; 14960f872d31SHyong Youb Kim required_intr = 1; /* 1 for LSC even if intr_conf.lsc is 0 */ 14970f872d31SHyong Youb Kim if (eth_dev->data->dev_conf.intr_conf.rxq) { 14980f872d31SHyong Youb Kim required_intr += eth_dev->data->nb_rx_queues; 14990f872d31SHyong Youb Kim } 1500edd08548SHyong Youb Kim ENICPMD_LOG(DEBUG, "Required queues for PF: rq %u wq %u cq %u", 1501edd08548SHyong Youb Kim required_rq, required_wq, required_cq); 1502edd08548SHyong Youb Kim if (enic->vf_required_rq) { 1503edd08548SHyong Youb Kim /* Queues needed for VF representors */ 1504edd08548SHyong Youb Kim required_rq += enic->vf_required_rq; 1505edd08548SHyong Youb Kim required_wq += enic->vf_required_wq; 1506edd08548SHyong Youb Kim required_cq += enic->vf_required_cq; 1507edd08548SHyong Youb Kim ENICPMD_LOG(DEBUG, "Required queues for VF representors: rq %u wq %u cq %u", 1508edd08548SHyong Youb Kim enic->vf_required_rq, enic->vf_required_wq, 1509edd08548SHyong Youb Kim enic->vf_required_cq); 1510edd08548SHyong Youb Kim } 15116c45c330SHyong Youb Kim 15126c45c330SHyong Youb Kim if (enic->conf_rq_count < required_rq) { 1513856d7ba7SNelson Escobar dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n", 1514856d7ba7SNelson Escobar eth_dev->data->nb_rx_queues, 15156c45c330SHyong Youb Kim required_rq, enic->conf_rq_count); 1516b6d5fd2eSJohn Daley rc = -EINVAL; 1517b6d5fd2eSJohn Daley } 15186c45c330SHyong Youb Kim if (enic->conf_wq_count < required_wq) { 1519b6d5fd2eSJohn Daley dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n", 1520ce93d3c3SNelson Escobar eth_dev->data->nb_tx_queues, enic->conf_wq_count); 1521b6d5fd2eSJohn Daley rc = -EINVAL; 152272f3de30SBruce Richardson } 152372f3de30SBruce Richardson 15246c45c330SHyong Youb Kim if (enic->conf_cq_count < required_cq) { 1525b6d5fd2eSJohn Daley dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n", 15266c45c330SHyong Youb Kim required_cq, enic->conf_cq_count); 1527b6d5fd2eSJohn Daley rc = -EINVAL; 1528b6d5fd2eSJohn Daley } 15290f872d31SHyong Youb Kim if (enic->conf_intr_count < required_intr) { 15300f872d31SHyong Youb Kim dev_err(dev, "Not enough Interrupts to support Rx queue" 15310f872d31SHyong Youb Kim " interrupts. Required:%u, Configured:%u\n", 15320f872d31SHyong Youb Kim required_intr, enic->conf_intr_count); 15330f872d31SHyong Youb Kim rc = -EINVAL; 15340f872d31SHyong Youb Kim } 1535b6d5fd2eSJohn Daley 1536b6d5fd2eSJohn Daley if (rc == 0) { 153772f3de30SBruce Richardson enic->rq_count = eth_dev->data->nb_rx_queues; 153872f3de30SBruce Richardson enic->wq_count = eth_dev->data->nb_tx_queues; 1539b6d5fd2eSJohn Daley enic->cq_count = enic->rq_count + enic->wq_count; 15400f872d31SHyong Youb Kim enic->intr_count = required_intr; 154172f3de30SBruce Richardson } 154272f3de30SBruce Richardson 1543b6d5fd2eSJohn Daley return rc; 154472f3de30SBruce Richardson } 154572f3de30SBruce Richardson 1546c3e09182SJohn Daley /* Initialize the completion queue for an RQ */ 1547c3e09182SJohn Daley static int 1548c3e09182SJohn Daley enic_reinit_rq(struct enic *enic, unsigned int rq_idx) 1549c3e09182SJohn Daley { 1550c3e09182SJohn Daley struct vnic_rq *sop_rq, *data_rq; 1551ea5f15b1SJohn Daley unsigned int cq_idx; 1552c3e09182SJohn Daley int rc = 0; 1553c3e09182SJohn Daley 1554aa07bf8fSJohn Daley sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)]; 1555285fd7c4SJohn Daley data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx, enic)]; 1556edd08548SHyong Youb Kim cq_idx = enic_cq_rq(enic, rq_idx); 1557c3e09182SJohn Daley 1558c3e09182SJohn Daley vnic_cq_clean(&enic->cq[cq_idx]); 1559c3e09182SJohn Daley vnic_cq_init(&enic->cq[cq_idx], 1560c3e09182SJohn Daley 0 /* flow_control_enable */, 1561c3e09182SJohn Daley 1 /* color_enable */, 1562c3e09182SJohn Daley 0 /* cq_head */, 1563c3e09182SJohn Daley 0 /* cq_tail */, 1564c3e09182SJohn Daley 1 /* cq_tail_color */, 1565c3e09182SJohn Daley 0 /* interrupt_enable */, 1566c3e09182SJohn Daley 1 /* cq_entry_enable */, 1567c3e09182SJohn Daley 0 /* cq_message_enable */, 1568c3e09182SJohn Daley 0 /* interrupt offset */, 1569c3e09182SJohn Daley 0 /* cq_message_addr */); 1570c3e09182SJohn Daley 1571c3e09182SJohn Daley 1572aa07bf8fSJohn Daley vnic_rq_init_start(sop_rq, enic_cq_rq(enic, 1573aa07bf8fSJohn Daley enic_rte_rq_idx_to_sop_idx(rq_idx)), 0, 1574aa07bf8fSJohn Daley sop_rq->ring.desc_count - 1, 1, 0); 1575c3e09182SJohn Daley if (data_rq->in_use) { 1576c3e09182SJohn Daley vnic_rq_init_start(data_rq, 1577aa07bf8fSJohn Daley enic_cq_rq(enic, 1578285fd7c4SJohn Daley enic_rte_rq_idx_to_data_idx(rq_idx, enic)), 1579285fd7c4SJohn Daley 0, data_rq->ring.desc_count - 1, 1, 0); 1580c3e09182SJohn Daley } 1581c3e09182SJohn Daley 1582c3e09182SJohn Daley rc = enic_alloc_rx_queue_mbufs(enic, sop_rq); 1583c3e09182SJohn Daley if (rc) 1584c3e09182SJohn Daley return rc; 1585c3e09182SJohn Daley 1586c3e09182SJohn Daley if (data_rq->in_use) { 1587c3e09182SJohn Daley rc = enic_alloc_rx_queue_mbufs(enic, data_rq); 1588c3e09182SJohn Daley if (rc) { 1589c3e09182SJohn Daley enic_rxmbuf_queue_release(enic, sop_rq); 1590c3e09182SJohn Daley return rc; 1591c3e09182SJohn Daley } 1592c3e09182SJohn Daley } 1593c3e09182SJohn Daley 1594c3e09182SJohn Daley return 0; 1595c3e09182SJohn Daley } 1596c3e09182SJohn Daley 1597396a6d71SJohn Daley /* The Cisco NIC can send and receive packets up to a max packet size 1598396a6d71SJohn Daley * determined by the NIC type and firmware. There is also an MTU 1599396a6d71SJohn Daley * configured into the NIC via the CIMC/UCSM management interface 1600396a6d71SJohn Daley * which can be overridden by this function (up to the max packet size). 1601396a6d71SJohn Daley * Depending on the network setup, doing so may cause packet drops 1602396a6d71SJohn Daley * and unexpected behavior. 1603396a6d71SJohn Daley */ 1604396a6d71SJohn Daley int enic_set_mtu(struct enic *enic, uint16_t new_mtu) 1605396a6d71SJohn Daley { 1606c3e09182SJohn Daley unsigned int rq_idx; 1607c3e09182SJohn Daley struct vnic_rq *rq; 1608c3e09182SJohn Daley int rc = 0; 1609396a6d71SJohn Daley uint16_t old_mtu; /* previous setting */ 1610396a6d71SJohn Daley uint16_t config_mtu; /* Value configured into NIC via CIMC/UCSM */ 1611396a6d71SJohn Daley struct rte_eth_dev *eth_dev = enic->rte_dev; 1612396a6d71SJohn Daley 1613396a6d71SJohn Daley old_mtu = eth_dev->data->mtu; 1614396a6d71SJohn Daley config_mtu = enic->config.mtu; 1615396a6d71SJohn Daley 16160e804034SJohn Daley if (rte_eal_process_type() != RTE_PROC_PRIMARY) 16170e804034SJohn Daley return -E_RTE_SECONDARY; 16180e804034SJohn Daley 1619396a6d71SJohn Daley if (new_mtu > enic->max_mtu) { 1620396a6d71SJohn Daley dev_err(enic, 1621396a6d71SJohn Daley "MTU not updated: requested (%u) greater than max (%u)\n", 1622396a6d71SJohn Daley new_mtu, enic->max_mtu); 1623396a6d71SJohn Daley return -EINVAL; 1624396a6d71SJohn Daley } 1625396a6d71SJohn Daley if (new_mtu < ENIC_MIN_MTU) { 1626396a6d71SJohn Daley dev_info(enic, 1627396a6d71SJohn Daley "MTU not updated: requested (%u) less than min (%u)\n", 1628396a6d71SJohn Daley new_mtu, ENIC_MIN_MTU); 1629396a6d71SJohn Daley return -EINVAL; 1630396a6d71SJohn Daley } 1631396a6d71SJohn Daley if (new_mtu > config_mtu) 1632396a6d71SJohn Daley dev_warning(enic, 1633396a6d71SJohn Daley "MTU (%u) is greater than value configured in NIC (%u)\n", 1634396a6d71SJohn Daley new_mtu, config_mtu); 1635396a6d71SJohn Daley 163695faa2a9SHyong Youb Kim /* 163795faa2a9SHyong Youb Kim * If the device has not started (enic_enable), nothing to do. 163895faa2a9SHyong Youb Kim * Later, enic_enable() will set up RQs reflecting the new maximum 163995faa2a9SHyong Youb Kim * packet length. 164095faa2a9SHyong Youb Kim */ 164195faa2a9SHyong Youb Kim if (!eth_dev->data->dev_started) 164295faa2a9SHyong Youb Kim goto set_mtu_done; 164395faa2a9SHyong Youb Kim 164495faa2a9SHyong Youb Kim /* 164595faa2a9SHyong Youb Kim * The device has started, re-do RQs on the fly. In the process, we 164695faa2a9SHyong Youb Kim * pick up the new maximum packet length. 164795faa2a9SHyong Youb Kim * 164895faa2a9SHyong Youb Kim * Some applications rely on the ability to change MTU without stopping 164995faa2a9SHyong Youb Kim * the device. So keep this behavior for now. 1650c3e09182SJohn Daley */ 1651c3e09182SJohn Daley rte_spinlock_lock(&enic->mtu_lock); 1652c3e09182SJohn Daley 1653c3e09182SJohn Daley /* Stop traffic on all RQs */ 1654c3e09182SJohn Daley for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) { 1655c3e09182SJohn Daley rq = &enic->rq[rq_idx]; 1656c3e09182SJohn Daley if (rq->is_sop && rq->in_use) { 1657aa07bf8fSJohn Daley rc = enic_stop_rq(enic, 1658aa07bf8fSJohn Daley enic_sop_rq_idx_to_rte_idx(rq_idx)); 1659c3e09182SJohn Daley if (rc) { 1660c3e09182SJohn Daley dev_err(enic, "Failed to stop Rq %u\n", rq_idx); 1661c3e09182SJohn Daley goto set_mtu_done; 1662c3e09182SJohn Daley } 1663c3e09182SJohn Daley } 1664c3e09182SJohn Daley } 1665c3e09182SJohn Daley 166698a7ea33SJerin Jacob /* replace Rx function with a no-op to avoid getting stale pkts */ 1667*a41f593fSFerruh Yigit eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; 1668bcd68b68SHyong Youb Kim rte_eth_fp_ops[enic->port_id].rx_pkt_burst = eth_dev->rx_pkt_burst; 1669c3e09182SJohn Daley rte_mb(); 1670c3e09182SJohn Daley 1671c3e09182SJohn Daley /* Allow time for threads to exit the real Rx function. */ 1672c3e09182SJohn Daley usleep(100000); 1673c3e09182SJohn Daley 1674c3e09182SJohn Daley /* now it is safe to reconfigure the RQs */ 1675c3e09182SJohn Daley 1676396a6d71SJohn Daley 1677c3e09182SJohn Daley /* free and reallocate RQs with the new MTU */ 1678c3e09182SJohn Daley for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) { 1679aa07bf8fSJohn Daley rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)]; 168033a2d659SJohn Daley if (!rq->in_use) 168133a2d659SJohn Daley continue; 1682c3e09182SJohn Daley 1683c3e09182SJohn Daley enic_free_rq(rq); 1684c3e09182SJohn Daley rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp, 1685ce16fd70SJohn Daley rq->tot_nb_desc, rq->rx_free_thresh); 1686c3e09182SJohn Daley if (rc) { 1687c3e09182SJohn Daley dev_err(enic, 1688c3e09182SJohn Daley "Fatal MTU alloc error- No traffic will pass\n"); 1689c3e09182SJohn Daley goto set_mtu_done; 1690c3e09182SJohn Daley } 1691c3e09182SJohn Daley 1692c3e09182SJohn Daley rc = enic_reinit_rq(enic, rq_idx); 1693c3e09182SJohn Daley if (rc) { 1694c3e09182SJohn Daley dev_err(enic, 1695c3e09182SJohn Daley "Fatal MTU RQ reinit- No traffic will pass\n"); 1696c3e09182SJohn Daley goto set_mtu_done; 1697c3e09182SJohn Daley } 1698c3e09182SJohn Daley } 1699c3e09182SJohn Daley 1700c3e09182SJohn Daley /* put back the real receive function */ 1701c3e09182SJohn Daley rte_mb(); 1702e92a4b41SHyong Youb Kim enic_pick_rx_handler(eth_dev); 1703bcd68b68SHyong Youb Kim rte_eth_fp_ops[enic->port_id].rx_pkt_burst = eth_dev->rx_pkt_burst; 1704c3e09182SJohn Daley rte_mb(); 1705c3e09182SJohn Daley 1706c3e09182SJohn Daley /* restart Rx traffic */ 1707c3e09182SJohn Daley for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) { 1708aa07bf8fSJohn Daley rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)]; 1709c3e09182SJohn Daley if (rq->is_sop && rq->in_use) 1710c3e09182SJohn Daley enic_start_rq(enic, rq_idx); 1711c3e09182SJohn Daley } 1712c3e09182SJohn Daley 1713c3e09182SJohn Daley set_mtu_done: 1714396a6d71SJohn Daley dev_info(enic, "MTU changed from %u to %u\n", old_mtu, new_mtu); 1715c3e09182SJohn Daley rte_spinlock_unlock(&enic->mtu_lock); 1716c3e09182SJohn Daley return rc; 1717396a6d71SJohn Daley } 1718396a6d71SJohn Daley 171961c7b522SJohn Daley static void 172061c7b522SJohn Daley enic_disable_overlay_offload(struct enic *enic) 172161c7b522SJohn Daley { 172261c7b522SJohn Daley /* 172361c7b522SJohn Daley * Disabling fails if the feature is provisioned but 172461c7b522SJohn Daley * not enabled. So ignore result and do not log error. 172561c7b522SJohn Daley */ 172661c7b522SJohn Daley if (enic->vxlan) { 172761c7b522SJohn Daley vnic_dev_overlay_offload_ctrl(enic->vdev, 172861c7b522SJohn Daley OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_DISABLE); 172961c7b522SJohn Daley } 173061c7b522SJohn Daley if (enic->geneve) { 173161c7b522SJohn Daley vnic_dev_overlay_offload_ctrl(enic->vdev, 173261c7b522SJohn Daley OVERLAY_FEATURE_GENEVE, OVERLAY_OFFLOAD_DISABLE); 173361c7b522SJohn Daley } 173461c7b522SJohn Daley } 173561c7b522SJohn Daley 173661c7b522SJohn Daley static int 173761c7b522SJohn Daley enic_enable_overlay_offload(struct enic *enic) 173861c7b522SJohn Daley { 173961c7b522SJohn Daley if (enic->vxlan && vnic_dev_overlay_offload_ctrl(enic->vdev, 174061c7b522SJohn Daley OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_ENABLE) != 0) { 174161c7b522SJohn Daley dev_err(NULL, "failed to enable VXLAN offload\n"); 174261c7b522SJohn Daley return -EINVAL; 174361c7b522SJohn Daley } 174461c7b522SJohn Daley if (enic->geneve && vnic_dev_overlay_offload_ctrl(enic->vdev, 174561c7b522SJohn Daley OVERLAY_FEATURE_GENEVE, OVERLAY_OFFLOAD_ENABLE) != 0) { 174661c7b522SJohn Daley dev_err(NULL, "failed to enable Geneve offload\n"); 174761c7b522SJohn Daley return -EINVAL; 174861c7b522SJohn Daley } 174961c7b522SJohn Daley enic->tx_offload_capa |= 1750295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | 1751295968d1SFerruh Yigit (enic->geneve ? RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO : 0) | 1752295968d1SFerruh Yigit (enic->vxlan ? RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO : 0); 175361c7b522SJohn Daley enic->tx_offload_mask |= 1754daa02b5cSOlivier Matz RTE_MBUF_F_TX_OUTER_IPV6 | 1755daa02b5cSOlivier Matz RTE_MBUF_F_TX_OUTER_IPV4 | 1756daa02b5cSOlivier Matz RTE_MBUF_F_TX_OUTER_IP_CKSUM | 1757daa02b5cSOlivier Matz RTE_MBUF_F_TX_TUNNEL_MASK; 175861c7b522SJohn Daley enic->overlay_offload = true; 175961c7b522SJohn Daley 176061c7b522SJohn Daley if (enic->vxlan && enic->geneve) 176161c7b522SJohn Daley dev_info(NULL, "Overlay offload is enabled (VxLAN, Geneve)\n"); 176261c7b522SJohn Daley else if (enic->vxlan) 176361c7b522SJohn Daley dev_info(NULL, "Overlay offload is enabled (VxLAN)\n"); 176461c7b522SJohn Daley else 176561c7b522SJohn Daley dev_info(NULL, "Overlay offload is enabled (Geneve)\n"); 176661c7b522SJohn Daley 176761c7b522SJohn Daley return 0; 176861c7b522SJohn Daley } 176961c7b522SJohn Daley 177061c7b522SJohn Daley static int 177161c7b522SJohn Daley enic_reset_overlay_port(struct enic *enic) 177261c7b522SJohn Daley { 177361c7b522SJohn Daley if (enic->vxlan) { 177461c7b522SJohn Daley enic->vxlan_port = RTE_VXLAN_DEFAULT_PORT; 177561c7b522SJohn Daley /* 177661c7b522SJohn Daley * Reset the vxlan port to the default, as the NIC firmware 177761c7b522SJohn Daley * does not reset it automatically and keeps the old setting. 177861c7b522SJohn Daley */ 177961c7b522SJohn Daley if (vnic_dev_overlay_offload_cfg(enic->vdev, 178061c7b522SJohn Daley OVERLAY_CFG_VXLAN_PORT_UPDATE, 178161c7b522SJohn Daley RTE_VXLAN_DEFAULT_PORT)) { 178261c7b522SJohn Daley dev_err(enic, "failed to update vxlan port\n"); 178361c7b522SJohn Daley return -EINVAL; 178461c7b522SJohn Daley } 178561c7b522SJohn Daley } 178661c7b522SJohn Daley if (enic->geneve) { 178761c7b522SJohn Daley enic->geneve_port = RTE_GENEVE_DEFAULT_PORT; 178861c7b522SJohn Daley if (vnic_dev_overlay_offload_cfg(enic->vdev, 178961c7b522SJohn Daley OVERLAY_CFG_GENEVE_PORT_UPDATE, 179061c7b522SJohn Daley RTE_GENEVE_DEFAULT_PORT)) { 179161c7b522SJohn Daley dev_err(enic, "failed to update vxlan port\n"); 179261c7b522SJohn Daley return -EINVAL; 179361c7b522SJohn Daley } 179461c7b522SJohn Daley } 179561c7b522SJohn Daley return 0; 179661c7b522SJohn Daley } 179761c7b522SJohn Daley 179872f3de30SBruce Richardson static int enic_dev_init(struct enic *enic) 179972f3de30SBruce Richardson { 180072f3de30SBruce Richardson int err; 180172f3de30SBruce Richardson struct rte_eth_dev *eth_dev = enic->rte_dev; 180272f3de30SBruce Richardson 180372f3de30SBruce Richardson vnic_dev_intr_coal_timer_info_default(enic->vdev); 180472f3de30SBruce Richardson 180572f3de30SBruce Richardson /* Get vNIC configuration 180672f3de30SBruce Richardson */ 180772f3de30SBruce Richardson err = enic_get_vnic_config(enic); 180872f3de30SBruce Richardson if (err) { 180972f3de30SBruce Richardson dev_err(dev, "Get vNIC configuration failed, aborting\n"); 181072f3de30SBruce Richardson return err; 181172f3de30SBruce Richardson } 181272f3de30SBruce Richardson 1813b16e60abSNelson Escobar /* Get available resource counts */ 1814b16e60abSNelson Escobar enic_get_res_counts(enic); 1815b16e60abSNelson Escobar if (enic->conf_rq_count == 1) { 1816b16e60abSNelson Escobar dev_err(enic, "Running with only 1 RQ configured in the vNIC is not supported.\n"); 1817b16e60abSNelson Escobar dev_err(enic, "Please configure 2 RQs in the vNIC for each Rx queue used by DPDK.\n"); 1818b16e60abSNelson Escobar dev_err(enic, "See the ENIC PMD guide for more information.\n"); 1819b16e60abSNelson Escobar return -EINVAL; 1820b16e60abSNelson Escobar } 18216c45c330SHyong Youb Kim /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */ 18226c45c330SHyong Youb Kim enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) * 18236c45c330SHyong Youb Kim enic->conf_cq_count, 8); 18240f872d31SHyong Youb Kim enic->intr = rte_zmalloc("enic_vnic_intr", sizeof(struct vnic_intr) * 18250f872d31SHyong Youb Kim enic->conf_intr_count, 8); 18266c45c330SHyong Youb Kim enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) * 18276c45c330SHyong Youb Kim enic->conf_rq_count, 8); 18286c45c330SHyong Youb Kim enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) * 18296c45c330SHyong Youb Kim enic->conf_wq_count, 8); 18306c45c330SHyong Youb Kim if (enic->conf_cq_count > 0 && enic->cq == NULL) { 18316c45c330SHyong Youb Kim dev_err(enic, "failed to allocate vnic_cq, aborting.\n"); 18326c45c330SHyong Youb Kim return -1; 18336c45c330SHyong Youb Kim } 18340f872d31SHyong Youb Kim if (enic->conf_intr_count > 0 && enic->intr == NULL) { 18350f872d31SHyong Youb Kim dev_err(enic, "failed to allocate vnic_intr, aborting.\n"); 18360f872d31SHyong Youb Kim return -1; 18370f872d31SHyong Youb Kim } 18386c45c330SHyong Youb Kim if (enic->conf_rq_count > 0 && enic->rq == NULL) { 18396c45c330SHyong Youb Kim dev_err(enic, "failed to allocate vnic_rq, aborting.\n"); 18406c45c330SHyong Youb Kim return -1; 18416c45c330SHyong Youb Kim } 18426c45c330SHyong Youb Kim if (enic->conf_wq_count > 0 && enic->wq == NULL) { 18436c45c330SHyong Youb Kim dev_err(enic, "failed to allocate vnic_wq, aborting.\n"); 18446c45c330SHyong Youb Kim return -1; 18456c45c330SHyong Youb Kim } 1846b16e60abSNelson Escobar 18478d496995SHyong Youb Kim eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", 18486d13ea8eSOlivier Matz sizeof(struct rte_ether_addr) * 18498d496995SHyong Youb Kim ENIC_UNICAST_PERFECT_FILTERS, 0); 185072f3de30SBruce Richardson if (!eth_dev->data->mac_addrs) { 185172f3de30SBruce Richardson dev_err(enic, "mac addr storage alloc failed, aborting.\n"); 185272f3de30SBruce Richardson return -1; 185372f3de30SBruce Richardson } 1854538da7a1SOlivier Matz rte_ether_addr_copy((struct rte_ether_addr *)enic->mac_addr, 1855bbab3d97SJohn Daley eth_dev->data->mac_addrs); 185672f3de30SBruce Richardson 185772f3de30SBruce Richardson vnic_dev_set_reset_flag(enic->vdev, 0); 185872f3de30SBruce Richardson 18596ced1376SJohn Daley LIST_INIT(&enic->flows); 18606ced1376SJohn Daley 1861c98779abSNelson Escobar /* set up link status checking */ 1862c98779abSNelson Escobar vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */ 1863c98779abSNelson Escobar 186493fb21fdSHyong Youb Kim enic->overlay_offload = false; 1865308b514bSHyong Youb Kim /* 186661c7b522SJohn Daley * First, explicitly disable overlay offload as the setting is 186761c7b522SJohn Daley * sticky, and resetting vNIC may not disable it. 1868308b514bSHyong Youb Kim */ 186961c7b522SJohn Daley enic_disable_overlay_offload(enic); 187061c7b522SJohn Daley /* Then, enable overlay offload according to vNIC flags */ 187161c7b522SJohn Daley if (!enic->disable_overlay && (enic->vxlan || enic->geneve)) { 187261c7b522SJohn Daley err = enic_enable_overlay_offload(enic); 187361c7b522SJohn Daley if (err) { 187461c7b522SJohn Daley dev_info(NULL, "failed to enable overlay offload\n"); 187561c7b522SJohn Daley return err; 1876c02a96fcSHyong Youb Kim } 1877c02a96fcSHyong Youb Kim } 1878af3a1628SHyong Youb Kim /* 187961c7b522SJohn Daley * Reset the vxlan/geneve port if HW parsing is available. It 1880af3a1628SHyong Youb Kim * is always enabled regardless of overlay offload 1881af3a1628SHyong Youb Kim * enable/disable. 1882af3a1628SHyong Youb Kim */ 188361c7b522SJohn Daley err = enic_reset_overlay_port(enic); 188461c7b522SJohn Daley if (err) 188561c7b522SJohn Daley return err; 188693fb21fdSHyong Youb Kim 188739cf83f1SHyong Youb Kim if (enic_fm_init(enic)) 188839cf83f1SHyong Youb Kim dev_warning(enic, "Init of flowman failed.\n"); 188972f3de30SBruce Richardson return 0; 189072f3de30SBruce Richardson } 189172f3de30SBruce Richardson 189239cf83f1SHyong Youb Kim static void lock_devcmd(void *priv) 189339cf83f1SHyong Youb Kim { 189439cf83f1SHyong Youb Kim struct enic *enic = priv; 189539cf83f1SHyong Youb Kim 189639cf83f1SHyong Youb Kim rte_spinlock_lock(&enic->devcmd_lock); 189739cf83f1SHyong Youb Kim } 189839cf83f1SHyong Youb Kim 189939cf83f1SHyong Youb Kim static void unlock_devcmd(void *priv) 190039cf83f1SHyong Youb Kim { 190139cf83f1SHyong Youb Kim struct enic *enic = priv; 190239cf83f1SHyong Youb Kim 190339cf83f1SHyong Youb Kim rte_spinlock_unlock(&enic->devcmd_lock); 190439cf83f1SHyong Youb Kim } 190539cf83f1SHyong Youb Kim 190672f3de30SBruce Richardson int enic_probe(struct enic *enic) 190772f3de30SBruce Richardson { 190872f3de30SBruce Richardson struct rte_pci_device *pdev = enic->pdev; 190972f3de30SBruce Richardson int err = -1; 191072f3de30SBruce Richardson 1911d0c98d9eSJohn Daley dev_debug(enic, "Initializing ENIC PMD\n"); 191272f3de30SBruce Richardson 19130e804034SJohn Daley /* if this is a secondary process the hardware is already initialized */ 19140e804034SJohn Daley if (rte_eal_process_type() != RTE_PROC_PRIMARY) 19150e804034SJohn Daley return 0; 19160e804034SJohn Daley 191772f3de30SBruce Richardson enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr; 191872f3de30SBruce Richardson enic->bar0.len = pdev->mem_resource[0].len; 191972f3de30SBruce Richardson 192072f3de30SBruce Richardson /* Register vNIC device */ 192172f3de30SBruce Richardson enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1); 192272f3de30SBruce Richardson if (!enic->vdev) { 192372f3de30SBruce Richardson dev_err(enic, "vNIC registration failed, aborting\n"); 192472f3de30SBruce Richardson goto err_out; 192572f3de30SBruce Richardson } 192672f3de30SBruce Richardson 1927da5f560bSNelson Escobar LIST_INIT(&enic->memzone_list); 1928da5f560bSNelson Escobar rte_spinlock_init(&enic->memzone_list_lock); 1929da5f560bSNelson Escobar 193072f3de30SBruce Richardson vnic_register_cbacks(enic->vdev, 193172f3de30SBruce Richardson enic_alloc_consistent, 193272f3de30SBruce Richardson enic_free_consistent); 193372f3de30SBruce Richardson 19348d782f3fSHyong Youb Kim /* 1935d74111a9SJohn Daley * Allocate the consistent memory for stats upfront so both primary and 1936d74111a9SJohn Daley * secondary processes can dump stats. 19378d782f3fSHyong Youb Kim */ 19388d782f3fSHyong Youb Kim err = vnic_dev_alloc_stats_mem(enic->vdev); 19398d782f3fSHyong Youb Kim if (err) { 19408d782f3fSHyong Youb Kim dev_err(enic, "Failed to allocate cmd memory, aborting\n"); 19418d782f3fSHyong Youb Kim goto err_out_unregister; 19428d782f3fSHyong Youb Kim } 194372f3de30SBruce Richardson /* Issue device open to get device in known state */ 194472f3de30SBruce Richardson err = enic_dev_open(enic); 194572f3de30SBruce Richardson if (err) { 194672f3de30SBruce Richardson dev_err(enic, "vNIC dev open failed, aborting\n"); 194772f3de30SBruce Richardson goto err_out_unregister; 194872f3de30SBruce Richardson } 194972f3de30SBruce Richardson 195072f3de30SBruce Richardson /* Set ingress vlan rewrite mode before vnic initialization */ 1951e39c2756SHyong Youb Kim dev_debug(enic, "Set ig_vlan_rewrite_mode=%u\n", 1952e39c2756SHyong Youb Kim enic->ig_vlan_rewrite_mode); 195372f3de30SBruce Richardson err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev, 1954e39c2756SHyong Youb Kim enic->ig_vlan_rewrite_mode); 195572f3de30SBruce Richardson if (err) { 195672f3de30SBruce Richardson dev_err(enic, 195772f3de30SBruce Richardson "Failed to set ingress vlan rewrite mode, aborting.\n"); 195872f3de30SBruce Richardson goto err_out_dev_close; 195972f3de30SBruce Richardson } 196072f3de30SBruce Richardson 196172f3de30SBruce Richardson /* Issue device init to initialize the vnic-to-switch link. 196272f3de30SBruce Richardson * We'll start with carrier off and wait for link UP 196372f3de30SBruce Richardson * notification later to turn on carrier. We don't need 196472f3de30SBruce Richardson * to wait here for the vnic-to-switch link initialization 196572f3de30SBruce Richardson * to complete; link UP notification is the indication that 196672f3de30SBruce Richardson * the process is complete. 196772f3de30SBruce Richardson */ 196872f3de30SBruce Richardson 196972f3de30SBruce Richardson err = vnic_dev_init(enic->vdev, 0); 197072f3de30SBruce Richardson if (err) { 197172f3de30SBruce Richardson dev_err(enic, "vNIC dev init failed, aborting\n"); 197272f3de30SBruce Richardson goto err_out_dev_close; 197372f3de30SBruce Richardson } 197472f3de30SBruce Richardson 197572f3de30SBruce Richardson err = enic_dev_init(enic); 197672f3de30SBruce Richardson if (err) { 197772f3de30SBruce Richardson dev_err(enic, "Device initialization failed, aborting\n"); 197872f3de30SBruce Richardson goto err_out_dev_close; 197972f3de30SBruce Richardson } 198072f3de30SBruce Richardson 198139cf83f1SHyong Youb Kim /* Use a PF spinlock to serialize devcmd from PF and VF representors */ 198239cf83f1SHyong Youb Kim if (enic->switchdev_mode) { 198339cf83f1SHyong Youb Kim rte_spinlock_init(&enic->devcmd_lock); 198439cf83f1SHyong Youb Kim vnic_register_lock(enic->vdev, lock_devcmd, unlock_devcmd); 198539cf83f1SHyong Youb Kim } 198672f3de30SBruce Richardson return 0; 198772f3de30SBruce Richardson 198872f3de30SBruce Richardson err_out_dev_close: 198972f3de30SBruce Richardson vnic_dev_close(enic->vdev); 199072f3de30SBruce Richardson err_out_unregister: 199172f3de30SBruce Richardson vnic_dev_unregister(enic->vdev); 199272f3de30SBruce Richardson err_out: 199372f3de30SBruce Richardson return err; 199472f3de30SBruce Richardson } 199572f3de30SBruce Richardson 199672f3de30SBruce Richardson void enic_remove(struct enic *enic) 199772f3de30SBruce Richardson { 199872f3de30SBruce Richardson enic_dev_deinit(enic); 199972f3de30SBruce Richardson vnic_dev_close(enic->vdev); 200072f3de30SBruce Richardson vnic_dev_unregister(enic->vdev); 200172f3de30SBruce Richardson } 2002