xref: /dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c (revision c4bcc342c8ee37b4692e79e7fac816df4f55d8ec)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <fcntl.h>
42 #include <inttypes.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46 
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_string_fns.h>
61 #include <rte_malloc.h>
62 #include <rte_dev.h>
63 
64 #include "base/vmxnet3_defs.h"
65 
66 #include "vmxnet3_ring.h"
67 #include "vmxnet3_logs.h"
68 #include "vmxnet3_ethdev.h"
69 
70 #define PROCESS_SYS_EVENTS 0
71 
72 static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
73 static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
74 static int vmxnet3_dev_start(struct rte_eth_dev *dev);
75 static void vmxnet3_dev_stop(struct rte_eth_dev *dev);
76 static void vmxnet3_dev_close(struct rte_eth_dev *dev);
77 static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set);
78 static void vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev);
79 static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
80 static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
81 static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
82 static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
83 				int wait_to_complete);
84 static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
85 				struct rte_eth_stats *stats);
86 static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
87 				struct rte_eth_dev_info *dev_info);
88 static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
89 				       uint16_t vid, int on);
90 static void vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
91 static void vmxnet3_dev_vlan_offload_set_clear(struct rte_eth_dev *dev,
92 						int mask, int clear);
93 
94 #if PROCESS_SYS_EVENTS == 1
95 static void vmxnet3_process_events(struct vmxnet3_hw *);
96 #endif
97 /*
98  * The set of PCI devices this driver supports
99  */
100 static const struct rte_pci_id pci_id_vmxnet3_map[] = {
101 
102 #define RTE_PCI_DEV_ID_DECL_VMXNET3(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
103 #include "rte_pci_dev_ids.h"
104 
105 { .vendor_id = 0, /* sentinel */ },
106 };
107 
108 static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
109 	.dev_configure        = vmxnet3_dev_configure,
110 	.dev_start            = vmxnet3_dev_start,
111 	.dev_stop             = vmxnet3_dev_stop,
112 	.dev_close            = vmxnet3_dev_close,
113 	.promiscuous_enable   = vmxnet3_dev_promiscuous_enable,
114 	.promiscuous_disable  = vmxnet3_dev_promiscuous_disable,
115 	.allmulticast_enable  = vmxnet3_dev_allmulticast_enable,
116 	.allmulticast_disable = vmxnet3_dev_allmulticast_disable,
117 	.link_update          = vmxnet3_dev_link_update,
118 	.stats_get            = vmxnet3_dev_stats_get,
119 	.dev_infos_get        = vmxnet3_dev_info_get,
120 	.vlan_filter_set      = vmxnet3_dev_vlan_filter_set,
121 	.vlan_offload_set     = vmxnet3_dev_vlan_offload_set,
122 	.rx_queue_setup       = vmxnet3_dev_rx_queue_setup,
123 	.rx_queue_release     = vmxnet3_dev_rx_queue_release,
124 	.tx_queue_setup       = vmxnet3_dev_tx_queue_setup,
125 	.tx_queue_release     = vmxnet3_dev_tx_queue_release,
126 };
127 
128 static const struct rte_memzone *
129 gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
130 		const char *post_string, int socket_id, uint16_t align)
131 {
132 	char z_name[RTE_MEMZONE_NAMESIZE];
133 	const struct rte_memzone *mz;
134 
135 	snprintf(z_name, sizeof(z_name), "%s_%d_%s",
136 					dev->driver->pci_drv.name, dev->data->port_id, post_string);
137 
138 	mz = rte_memzone_lookup(z_name);
139 	if (mz)
140 		return mz;
141 
142 	return rte_memzone_reserve_aligned(z_name, size,
143 			socket_id, 0, align);
144 }
145 
146 /**
147  * Atomically reads the link status information from global
148  * structure rte_eth_dev.
149  *
150  * @param dev
151  *   - Pointer to the structure rte_eth_dev to read from.
152  *   - Pointer to the buffer to be saved with the link status.
153  *
154  * @return
155  *   - On success, zero.
156  *   - On failure, negative value.
157  */
158 
159 static int
160 vmxnet3_dev_atomic_read_link_status(struct rte_eth_dev *dev,
161 				    struct rte_eth_link *link)
162 {
163 	struct rte_eth_link *dst = link;
164 	struct rte_eth_link *src = &(dev->data->dev_link);
165 
166 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
167 				*(uint64_t *)src) == 0)
168 		return -1;
169 
170 	return 0;
171 }
172 
173 /**
174  * Atomically writes the link status information into global
175  * structure rte_eth_dev.
176  *
177  * @param dev
178  *   - Pointer to the structure rte_eth_dev to write to.
179  *   - Pointer to the buffer to be saved with the link status.
180  *
181  * @return
182  *   - On success, zero.
183  *   - On failure, negative value.
184  */
185 static int
186 vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev,
187 				     struct rte_eth_link *link)
188 {
189 	struct rte_eth_link *dst = &(dev->data->dev_link);
190 	struct rte_eth_link *src = link;
191 
192 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
193 					*(uint64_t *)src) == 0)
194 		return -1;
195 
196 	return 0;
197 }
198 
199 /*
200  * This function is based on vmxnet3_disable_intr()
201  */
202 static void
203 vmxnet3_disable_intr(struct vmxnet3_hw *hw)
204 {
205 	int i;
206 
207 	PMD_INIT_FUNC_TRACE();
208 
209 	hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
210 	for (i = 0; i < VMXNET3_MAX_INTRS; i++)
211 			VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
212 }
213 
214 /*
215  * It returns 0 on success.
216  */
217 static int
218 eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
219 {
220 	struct rte_pci_device *pci_dev;
221 	struct vmxnet3_hw *hw = eth_dev->data->dev_private;
222 	uint32_t mac_hi, mac_lo, ver;
223 
224 	PMD_INIT_FUNC_TRACE();
225 
226 	eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
227 	eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
228 	eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
229 	pci_dev = eth_dev->pci_dev;
230 
231 	/*
232 	 * for secondary processes, we don't initialize any further as primary
233 	 * has already done this work.
234 	 */
235 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
236 		return 0;
237 
238 	/* Vendor and Device ID need to be set before init of shared code */
239 	hw->device_id = pci_dev->id.device_id;
240 	hw->vendor_id = pci_dev->id.vendor_id;
241 	hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
242 	hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;
243 
244 	hw->num_rx_queues = 1;
245 	hw->num_tx_queues = 1;
246 	hw->bufs_per_pkt = 1;
247 
248 	/* Check h/w version compatibility with driver. */
249 	ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
250 	PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
251 	if (ver & 0x1)
252 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 1);
253 	else {
254 		PMD_INIT_LOG(ERR, "Incompatible h/w version, should be 0x1");
255 		return -EIO;
256 	}
257 
258 	/* Check UPT version compatibility with driver. */
259 	ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
260 	PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
261 	if (ver & 0x1)
262 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
263 	else {
264 		PMD_INIT_LOG(ERR, "Incompatible UPT version.");
265 		return -EIO;
266 	}
267 
268 	/* Getting MAC Address */
269 	mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
270 	mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
271 	memcpy(hw->perm_addr  , &mac_lo, 4);
272 	memcpy(hw->perm_addr+4, &mac_hi, 2);
273 
274 	/* Allocate memory for storing MAC addresses */
275 	eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
276 					       VMXNET3_MAX_MAC_ADDRS, 0);
277 	if (eth_dev->data->mac_addrs == NULL) {
278 		PMD_INIT_LOG(ERR,
279 			     "Failed to allocate %d bytes needed to store MAC addresses",
280 			     ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
281 		return -ENOMEM;
282 	}
283 	/* Copy the permanent MAC address */
284 	ether_addr_copy((struct ether_addr *) hw->perm_addr,
285 			&eth_dev->data->mac_addrs[0]);
286 
287 	PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
288 		     hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
289 		     hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
290 
291 	/* Put device in Quiesce Mode */
292 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
293 
294 	return 0;
295 }
296 
297 static struct eth_driver rte_vmxnet3_pmd = {
298 	.pci_drv = {
299 		.name = "rte_vmxnet3_pmd",
300 		.id_table = pci_id_vmxnet3_map,
301 		.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
302 	},
303 	.eth_dev_init = eth_vmxnet3_dev_init,
304 	.dev_private_size = sizeof(struct vmxnet3_hw),
305 };
306 
307 /*
308  * Driver initialization routine.
309  * Invoked once at EAL init time.
310  * Register itself as the [Poll Mode] Driver of Virtual PCI VMXNET3 devices.
311  */
312 static int
313 rte_vmxnet3_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
314 {
315 	PMD_INIT_FUNC_TRACE();
316 
317 	rte_eth_driver_register(&rte_vmxnet3_pmd);
318 	return 0;
319 }
320 
321 static int
322 vmxnet3_dev_configure(struct rte_eth_dev *dev)
323 {
324 	const struct rte_memzone *mz;
325 	struct vmxnet3_hw *hw = dev->data->dev_private;
326 	size_t size;
327 
328 	PMD_INIT_FUNC_TRACE();
329 
330 	if (dev->data->nb_rx_queues > UINT8_MAX ||
331 	    dev->data->nb_tx_queues > UINT8_MAX)
332 		return -EINVAL;
333 
334 	size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
335 		dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
336 
337 	if (size > UINT16_MAX)
338 		return -EINVAL;
339 
340 	hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
341 	hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
342 
343 	/*
344 	 * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
345 	 * on current socket
346 	 */
347 	mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
348 			      "shared", rte_socket_id(), 8);
349 
350 	if (mz == NULL) {
351 		PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
352 		return -ENOMEM;
353 	}
354 	memset(mz->addr, 0, mz->len);
355 
356 	hw->shared = mz->addr;
357 	hw->sharedPA = mz->phys_addr;
358 
359 	/*
360 	 * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
361 	 * on current socket
362 	 */
363 	mz = gpa_zone_reserve(dev, size, "queuedesc",
364 			      rte_socket_id(), VMXNET3_QUEUE_DESC_ALIGN);
365 	if (mz == NULL) {
366 		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
367 		return -ENOMEM;
368 	}
369 	memset(mz->addr, 0, mz->len);
370 
371 	hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
372 	hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);
373 
374 	hw->queueDescPA = mz->phys_addr;
375 	hw->queue_desc_len = (uint16_t)size;
376 
377 	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
378 
379 		/* Allocate memory structure for UPT1_RSSConf and configure */
380 		mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), "rss_conf",
381 				      rte_socket_id(), RTE_CACHE_LINE_SIZE);
382 		if (mz == NULL) {
383 			PMD_INIT_LOG(ERR,
384 				     "ERROR: Creating rss_conf structure zone");
385 			return -ENOMEM;
386 		}
387 		memset(mz->addr, 0, mz->len);
388 
389 		hw->rss_conf = mz->addr;
390 		hw->rss_confPA = mz->phys_addr;
391 	}
392 
393 	return 0;
394 }
395 
396 static int
397 vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
398 {
399 	struct rte_eth_conf port_conf = dev->data->dev_conf;
400 	struct vmxnet3_hw *hw = dev->data->dev_private;
401 	Vmxnet3_DriverShared *shared = hw->shared;
402 	Vmxnet3_DSDevRead *devRead = &shared->devRead;
403 	uint32_t *mac_ptr;
404 	uint32_t val, i;
405 	int ret, mask;
406 
407 	shared->magic = VMXNET3_REV1_MAGIC;
408 	devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
409 
410 	/* Setting up Guest OS information */
411 	devRead->misc.driverInfo.gos.gosBits   = sizeof(void *) == 4 ?
412 		VMXNET3_GOS_BITS_32 :
413 		VMXNET3_GOS_BITS_64;
414 	devRead->misc.driverInfo.gos.gosType   = VMXNET3_GOS_TYPE_LINUX;
415 	devRead->misc.driverInfo.vmxnet3RevSpt = 1;
416 	devRead->misc.driverInfo.uptVerSpt     = 1;
417 
418 	devRead->misc.mtu = rte_le_to_cpu_32(dev->data->mtu);
419 	devRead->misc.queueDescPA  = hw->queueDescPA;
420 	devRead->misc.queueDescLen = hw->queue_desc_len;
421 	devRead->misc.numTxQueues  = hw->num_tx_queues;
422 	devRead->misc.numRxQueues  = hw->num_rx_queues;
423 
424 	/*
425 	 * Set number of interrupts to 1
426 	 * PMD disables all the interrupts but this is MUST to activate device
427 	 * It needs at least one interrupt for link events to handle
428 	 * So we'll disable it later after device activation if needed
429 	 */
430 	devRead->intrConf.numIntrs = 1;
431 	devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
432 
433 	for (i = 0; i < hw->num_tx_queues; i++) {
434 		Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
435 		vmxnet3_tx_queue_t *txq  = dev->data->tx_queues[i];
436 
437 		tqd->ctrl.txNumDeferred  = 0;
438 		tqd->ctrl.txThreshold    = 1;
439 		tqd->conf.txRingBasePA   = txq->cmd_ring.basePA;
440 		tqd->conf.compRingBasePA = txq->comp_ring.basePA;
441 		tqd->conf.dataRingBasePA = txq->data_ring.basePA;
442 
443 		tqd->conf.txRingSize   = txq->cmd_ring.size;
444 		tqd->conf.compRingSize = txq->comp_ring.size;
445 		tqd->conf.dataRingSize = txq->data_ring.size;
446 		tqd->conf.intrIdx      = txq->comp_ring.intr_idx;
447 		tqd->status.stopped    = TRUE;
448 		tqd->status.error      = 0;
449 		memset(&tqd->stats, 0, sizeof(tqd->stats));
450 	}
451 
452 	for (i = 0; i < hw->num_rx_queues; i++) {
453 		Vmxnet3_RxQueueDesc *rqd  = &hw->rqd_start[i];
454 		vmxnet3_rx_queue_t *rxq   = dev->data->rx_queues[i];
455 
456 		rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
457 		rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
458 		rqd->conf.compRingBasePA  = rxq->comp_ring.basePA;
459 
460 		rqd->conf.rxRingSize[0]   = rxq->cmd_ring[0].size;
461 		rqd->conf.rxRingSize[1]   = rxq->cmd_ring[1].size;
462 		rqd->conf.compRingSize    = rxq->comp_ring.size;
463 		rqd->conf.intrIdx         = rxq->comp_ring.intr_idx;
464 		rqd->status.stopped       = TRUE;
465 		rqd->status.error         = 0;
466 		memset(&rqd->stats, 0, sizeof(rqd->stats));
467 	}
468 
469 	/* RxMode set to 0 of VMXNET3_RXM_xxx */
470 	devRead->rxFilterConf.rxMode = 0;
471 
472 	/* Setting up feature flags */
473 	if (dev->data->dev_conf.rxmode.hw_ip_checksum)
474 		devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
475 
476 	if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
477 		ret = vmxnet3_rss_configure(dev);
478 		if (ret != VMXNET3_SUCCESS)
479 			return ret;
480 
481 		devRead->misc.uptFeatures |= VMXNET3_F_RSS;
482 		devRead->rssConfDesc.confVer = 1;
483 		devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf);
484 		devRead->rssConfDesc.confPA  = hw->rss_confPA;
485 	}
486 
487 	mask = 0;
488 	if (dev->data->dev_conf.rxmode.hw_vlan_strip)
489 		mask |= ETH_VLAN_STRIP_MASK;
490 
491 	if (dev->data->dev_conf.rxmode.hw_vlan_filter)
492 		mask |= ETH_VLAN_FILTER_MASK;
493 
494 	vmxnet3_dev_vlan_offload_set_clear(dev, mask, 1);
495 
496 	PMD_INIT_LOG(DEBUG,
497 		     "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
498 		     hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
499 		     hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
500 
501 	/* Write MAC Address back to device */
502 	mac_ptr = (uint32_t *)hw->perm_addr;
503 	val = *mac_ptr;
504 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
505 
506 	val = (hw->perm_addr[5] << 8) | hw->perm_addr[4];
507 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
508 
509 	return VMXNET3_SUCCESS;
510 }
511 
512 /*
513  * Configure device link speed and setup link.
514  * Must be called after eth_vmxnet3_dev_init. Other wise it might fail
515  * It returns 0 on success.
516  */
517 static int
518 vmxnet3_dev_start(struct rte_eth_dev *dev)
519 {
520 	int status, ret;
521 	struct vmxnet3_hw *hw = dev->data->dev_private;
522 
523 	PMD_INIT_FUNC_TRACE();
524 
525 	ret = vmxnet3_setup_driver_shared(dev);
526 	if (ret != VMXNET3_SUCCESS)
527 		return ret;
528 
529 	/* Exchange shared data with device */
530 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
531 			       VMXNET3_GET_ADDR_LO(hw->sharedPA));
532 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
533 			       VMXNET3_GET_ADDR_HI(hw->sharedPA));
534 
535 	/* Activate device by register write */
536 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
537 	status = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
538 
539 	if (status != 0) {
540 		PMD_INIT_LOG(ERR, "Device activation in %s(): UNSUCCESSFUL", __func__);
541 		return -1;
542 	}
543 
544 	/* Disable interrupts */
545 	vmxnet3_disable_intr(hw);
546 
547 	/*
548 	 * Load RX queues with blank mbufs and update next2fill index for device
549 	 * Update RxMode of the device
550 	 */
551 	ret = vmxnet3_dev_rxtx_init(dev);
552 	if (ret != VMXNET3_SUCCESS) {
553 		PMD_INIT_LOG(ERR, "Device receive init in %s: UNSUCCESSFUL", __func__);
554 		return ret;
555 	}
556 
557 	/* Setting proper Rx Mode and issue Rx Mode Update command */
558 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
559 
560 	/*
561 	 * Don't need to handle events for now
562 	 */
563 #if PROCESS_SYS_EVENTS == 1
564 	events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
565 	PMD_INIT_LOG(DEBUG, "Reading events: 0x%X", events);
566 	vmxnet3_process_events(hw);
567 #endif
568 	return status;
569 }
570 
571 /*
572  * Stop device: disable rx and tx functions to allow for reconfiguring.
573  */
574 static void
575 vmxnet3_dev_stop(struct rte_eth_dev *dev)
576 {
577 	struct rte_eth_link link;
578 	struct vmxnet3_hw *hw = dev->data->dev_private;
579 
580 	PMD_INIT_FUNC_TRACE();
581 
582 	if (hw->adapter_stopped == TRUE) {
583 		PMD_INIT_LOG(DEBUG, "Device already closed.");
584 		return;
585 	}
586 
587 	/* disable interrupts */
588 	vmxnet3_disable_intr(hw);
589 
590 	/* quiesce the device first */
591 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
592 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
593 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);
594 
595 	/* reset the device */
596 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
597 	PMD_INIT_LOG(DEBUG, "Device reset.");
598 	hw->adapter_stopped = FALSE;
599 
600 	vmxnet3_dev_clear_queues(dev);
601 
602 	/* Clear recorded link status */
603 	memset(&link, 0, sizeof(link));
604 	vmxnet3_dev_atomic_write_link_status(dev, &link);
605 }
606 
607 /*
608  * Reset and stop device.
609  */
610 static void
611 vmxnet3_dev_close(struct rte_eth_dev *dev)
612 {
613 	struct vmxnet3_hw *hw = dev->data->dev_private;
614 
615 	PMD_INIT_FUNC_TRACE();
616 
617 	vmxnet3_dev_stop(dev);
618 	hw->adapter_stopped = TRUE;
619 }
620 
621 static void
622 vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
623 {
624 	unsigned int i;
625 	struct vmxnet3_hw *hw = dev->data->dev_private;
626 
627 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
628 
629 	RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
630 	for (i = 0; i < hw->num_tx_queues; i++) {
631 		struct UPT1_TxStats *txStats = &hw->tqd_start[i].stats;
632 
633 		stats->q_opackets[i] = txStats->ucastPktsTxOK +
634 			txStats->mcastPktsTxOK +
635 			txStats->bcastPktsTxOK;
636 		stats->q_obytes[i] = txStats->ucastBytesTxOK +
637 			txStats->mcastBytesTxOK +
638 			txStats->bcastBytesTxOK;
639 
640 		stats->opackets += stats->q_opackets[i];
641 		stats->obytes += stats->q_obytes[i];
642 		stats->oerrors += txStats->pktsTxError +
643 			txStats->pktsTxDiscard;
644 	}
645 
646 	RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
647 	for (i = 0; i < hw->num_rx_queues; i++) {
648 		struct UPT1_RxStats *rxStats = &hw->rqd_start[i].stats;
649 
650 		stats->q_ipackets[i] = rxStats->ucastPktsRxOK +
651 			rxStats->mcastPktsRxOK +
652 			rxStats->bcastPktsRxOK;
653 
654 		stats->q_ibytes[i] = rxStats->ucastBytesRxOK +
655 			rxStats->mcastBytesRxOK +
656 			rxStats->bcastBytesRxOK;
657 
658 		stats->ipackets += stats->q_ipackets[i];
659 		stats->ibytes += stats->q_ibytes[i];
660 
661 		stats->q_errors[i] = rxStats->pktsRxError;
662 		stats->ierrors += rxStats->pktsRxError;
663 		stats->imcasts += rxStats->mcastPktsRxOK;
664 		stats->rx_nombuf += rxStats->pktsRxOutOfBuf;
665 	}
666 }
667 
668 static void
669 vmxnet3_dev_info_get(__attribute__((unused))struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
670 {
671 	dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
672 	dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
673 	dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
674 	dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
675 	dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
676 
677 	dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
678 						ETH_TXQ_FLAGS_NOOFFLOADS;
679 	dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
680 }
681 
682 /* return 0 means link status changed, -1 means not changed */
683 static int
684 vmxnet3_dev_link_update(struct rte_eth_dev *dev, __attribute__((unused)) int wait_to_complete)
685 {
686 	struct vmxnet3_hw *hw = dev->data->dev_private;
687 	struct rte_eth_link old, link;
688 	uint32_t ret;
689 
690 	if (dev->data->dev_started == 0)
691 		return -1; /* Link status doesn't change for stopped dev */
692 
693 	memset(&link, 0, sizeof(link));
694 	vmxnet3_dev_atomic_read_link_status(dev, &old);
695 
696 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
697 	ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
698 
699 	if (ret & 0x1) {
700 		link.link_status = 1;
701 		link.link_duplex = ETH_LINK_FULL_DUPLEX;
702 		link.link_speed = ETH_LINK_SPEED_10000;
703 	}
704 
705 	vmxnet3_dev_atomic_write_link_status(dev, &link);
706 
707 	return (old.link_status == link.link_status) ? -1 : 0;
708 }
709 
710 /* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
711 static void
712 vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set) {
713 
714 	struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
715 
716 	if (set)
717 		rxConf->rxMode = rxConf->rxMode | feature;
718 	else
719 		rxConf->rxMode = rxConf->rxMode & (~feature);
720 
721 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
722 }
723 
724 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
725 static void
726 vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
727 {
728 	struct vmxnet3_hw *hw = dev->data->dev_private;
729 	uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
730 
731 	memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE);
732 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
733 
734 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
735 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
736 }
737 
738 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
739 static void
740 vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
741 {
742 	struct vmxnet3_hw *hw = dev->data->dev_private;
743 	uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
744 
745 	memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
746 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
747 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
748 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
749 }
750 
751 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
752 static void
753 vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
754 {
755 	struct vmxnet3_hw *hw = dev->data->dev_private;
756 
757 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1);
758 }
759 
760 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
761 static void
762 vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
763 {
764 	struct vmxnet3_hw *hw = dev->data->dev_private;
765 
766 	vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
767 }
768 
769 /* Enable/disable filter on vlan */
770 static int
771 vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
772 {
773 	struct vmxnet3_hw *hw = dev->data->dev_private;
774 	struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
775 	uint32_t *vf_table = rxConf->vfTable;
776 
777 	/* save state for restore */
778 	if (on)
779 		VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid);
780 	else
781 		VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid);
782 
783 	/* don't change active filter if in promiscious mode */
784 	if (rxConf->rxMode & VMXNET3_RXM_PROMISC)
785 		return 0;
786 
787 	/* set in hardware */
788 	if (on)
789 		VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid);
790 	else
791 		VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid);
792 
793 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
794 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
795 	return 0;
796 }
797 
798 static void
799 vmxnet3_dev_vlan_offload_set_clear(struct rte_eth_dev *dev,
800 				   int mask, int clear)
801 {
802 	struct vmxnet3_hw *hw = dev->data->dev_private;
803 	Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
804 	uint32_t *vf_table = devRead->rxFilterConf.vfTable;
805 
806 	if (mask & ETH_VLAN_STRIP_MASK)
807 		devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
808 	else
809 		devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
810 
811 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
812 			       VMXNET3_CMD_UPDATE_FEATURE);
813 
814 	if (mask & ETH_VLAN_FILTER_MASK) {
815 		if (clear) {
816 			memset(hw->shadow_vfta, 0,
817 			       VMXNET3_VFT_TABLE_SIZE);
818 			/* allow untagged pkts */
819 			VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
820 		}
821 		memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
822 	} else {
823 		/* allow any pkts -- no filtering */
824 		if (clear)
825 			memset(hw->shadow_vfta, 0xff, VMXNET3_VFT_TABLE_SIZE);
826 		memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
827 	}
828 
829 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
830 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
831 }
832 
833 static void
834 vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
835 {
836 	vmxnet3_dev_vlan_offload_set_clear(dev, mask, 0);
837 }
838 
839 #if PROCESS_SYS_EVENTS == 1
840 static void
841 vmxnet3_process_events(struct vmxnet3_hw *hw)
842 {
843 	uint32_t events = hw->shared->ecr;
844 
845 	if (!events) {
846 		PMD_INIT_LOG(ERR, "No events to process in %s()", __func__);
847 		return;
848 	}
849 
850 	/*
851 	 * ECR bits when written with 1b are cleared. Hence write
852 	 * events back to ECR so that the bits which were set will be reset.
853 	 */
854 	VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
855 
856 	/* Check if link state has changed */
857 	if (events & VMXNET3_ECR_LINK)
858 		PMD_INIT_LOG(ERR,
859 			     "Process events in %s(): VMXNET3_ECR_LINK event", __func__);
860 
861 	/* Check if there is an error on xmit/recv queues */
862 	if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
863 		VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_QUEUE_STATUS);
864 
865 		if (hw->tqd_start->status.stopped)
866 			PMD_INIT_LOG(ERR, "tq error 0x%x",
867 				     hw->tqd_start->status.error);
868 
869 		if (hw->rqd_start->status.stopped)
870 			PMD_INIT_LOG(ERR, "rq error 0x%x",
871 				     hw->rqd_start->status.error);
872 
873 		/* Reset the device */
874 		/* Have to reset the device */
875 	}
876 
877 	if (events & VMXNET3_ECR_DIC)
878 		PMD_INIT_LOG(ERR, "Device implementation change event.");
879 
880 	if (events & VMXNET3_ECR_DEBUG)
881 		PMD_INIT_LOG(ERR, "Debug event generated by device.");
882 
883 }
884 #endif
885 
886 static struct rte_driver rte_vmxnet3_driver = {
887 	.type = PMD_PDEV,
888 	.init = rte_vmxnet3_pmd_init,
889 };
890 
891 PMD_REGISTER_DRIVER(rte_vmxnet3_driver);
892